max_stars_repo_path
stringlengths 3
269
| max_stars_repo_name
stringlengths 4
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.05M
| score
float64 0.23
5.13
| int_score
int64 0
5
|
---|---|---|---|---|---|---|
temperature.py | pso-code/shiny-domotic | 0 | 12794751 | import glob
import time
import datetime
def read_temperature_file (location) :
# Opens the file containing the temperature
temperature_file = open(location)
# Reading the file...
content = temperature_file.read()
# Closing file after reading
temperature_file.close()
return content
def extract_temperature_from_content (content) :
# We don't car about the first line, temperature is given on the second line of the file
second_line = content.split("\n")[1]
temperature = second_line.split("t=")[1]
# Return the temperature in degree
return (float(temperature) / 1000)
def save_temperature_into_file(temperature, date, file_location):
file = open(file_location, "a")
file.write(str(date) + " ")
file.write(str(temperature) + '\r\n')
file.close()
# We retrieve all the temperature sensors plugged and detected
routes_sensors = glob.glob("/sys/bus/w1/devices/28*/w1_slave")
if len(routes_sensors) > 0 :
c = 1
date = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
for sensor in routes_sensors :
file_content = read_temperature_file(sensor)
temperature = extract_temperature_from_content(file_content)
print ("[" + str(date) + "/" + sensor + "] Sensor's temperature #" + str(c) + " : " + str(temperature))
c += 1
# time.sleep(60)
else :
print("Sensor not found. Please check your setup.")
#if len(routes_capteurs) > 0 :
#while True:
# date = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
# contenu_fichier = lire_fichier(routes_capteurs[0])
# temperature = extraire_temperature(contenu_fichier)
# sauvegarde(temperature, date, "Temperature.txt")
# time.sleep(60)
| 3.234375 | 3 |
django_socketio_chat/utils.py | leukeleu/django-socketio-chat | 6 | 12794752 | from rest_framework.renderers import JSONRenderer
from django.utils import simplejson
def prepare_for_emit(obj):
"""
Prepare the object for emit() by Tornadio2's (too simple) JSON renderer
- render to JSON using Django REST Framework 2's JSON renderer
- convert back to _simple_ Python object using Django's simplejson
"""
json = JSONRenderer().render(obj)
return simplejson.loads(json)
| 2.328125 | 2 |
coding-challenges/array-problems/search-insert-position/solution.py | mcqueenjordan/learning_sandbox | 1 | 12794753 | class Solution:
def searchInsert(self, nums, target):
"""
Given a sorted array and a target value, return the index if the target
is found. If not, return the index where it would be if it were
inserted in order. You may assume no duplicates in the array.
"""
return binary_search(nums, target, 0, len(nums) - 1)
def binary_search(nums, target, low, hi):
print("low", low, "high", hi)
candidate = nums[midpoint(low, hi)]
if abs(hi - low) <= 1:
if nums[low] > target:
return low
elif nums[hi] > target:
return midpoint(low, hi)
elif nums[hi] < target:
return hi + 1
else:
return midpoint(low, hi) + 1
if candidate == target:
return midpoint(low, hi)
elif candidate < target:
return binary_search(nums, target, midpoint(low, hi), hi)
elif candidate > target:
return binary_search(nums, target, low, midpoint(low, hi))
def midpoint(low, hi):
return (low + hi) / 2
| 4 | 4 |
47.Replace_list_-1_0_1.py | gptakhil/Python_Practice_Beginner | 2 | 12794754 | <reponame>gptakhil/Python_Practice_Beginner
mylist = [8,4,7,6,2,3,5,-2,-1,0,1,-6,-8,5,0,-9]
print(mylist)
for i in range(len(mylist)):
if mylist[i]>0:
mylist[i]=1
elif mylist[i]<0:
mylist[i]=-1
else: mylist[i]=0
print(mylist) | 3.75 | 4 |
cwProject/cwApp/migrations/0003_car.py | cs-fullstack-2019-spring/django-models3-cw-autumn-ragland | 0 | 12794755 | <filename>cwProject/cwApp/migrations/0003_car.py<gh_stars>0
# Generated by Django 2.0.6 on 2019-02-21 17:52
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('cwApp', '0002_auto_20190221_1735'),
]
operations = [
migrations.CreateModel(
name='Car',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('make', models.CharField(default='', max_length=200)),
('model', models.CharField(default='', max_length=200)),
('year', models.DateField(default=datetime.date.today)),
],
),
]
| 1.890625 | 2 |
worker.py | mdylan2/propertyfinderscraper | 0 | 12794756 | <reponame>mdylan2/propertyfinderscraper
from rq import Connection, Worker
from dash_rq_demo import app, queue
from dash_rq_demo.core import conn
if __name__ == "__main__":
with app.server.app_context():
with Connection(conn):
w = Worker([queue])
w.work()
| 1.421875 | 1 |
src/cvdata/mask.py | edumotya/cvdata | 15 | 12794757 | <reponame>edumotya/cvdata
import argparse
import collections
import concurrent.futures
import json
import logging
import math
import os
import random
from typing import Dict
import cv2
import numpy as np
import six
import tensorflow as tf
from tensorflow.compat.v1.python_io import TFRecordWriter
from tqdm import tqdm
from cvdata.utils import image_dimensions, matching_ids
# ------------------------------------------------------------------------------
# set up a basic, global _logger which will write to the console
logging.basicConfig(
level=logging.INFO,
format="%(asctime)s %(levelname)s %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
)
_logger = logging.getLogger(__name__)
# ------------------------------------------------------------------------------
def _class_labels_to_ids(
labels_path: str,
) -> Dict:
"""
Reads a text file, which is assumed to contain one class label per line, and
returns a dictionary with class labels as keys mapped to the class ID (i.e.
the label's line number).
So a labels file like so:
cat
dog
panda
will result in a dictionary like so:
{
"cat": 1,
"dog": 2,
"panda": 3,
}
:param labels_path: path to a file containing class labels used in
a segmentation dataset, with one class label per line
:return: dictionary mapping class labels to ID values
"""
class_labels = {}
with open(labels_path, "r") as class_labels_file:
class_id = 1
for class_label in class_labels_file:
class_labels[class_label.strip()] = class_id
class_id += 1
return class_labels
# ------------------------------------------------------------------------------
def _int64_list_feature(
values,
) -> tf.train.Feature:
"""
Returns a TF-Feature of int64_list.
:param values:
:return:
"""
if not isinstance(values, collections.Iterable):
values = [values]
return tf.train.Feature(int64_list=tf.train.Int64List(value=values))
# ------------------------------------------------------------------------------
def _bytes_list_feature(
values: str,
) -> tf.train.Feature:
"""
Returns a TF-Feature of bytes.
:param values a string
:return TF-Feature of bytes
"""
def norm2bytes(value):
return value.encode() if isinstance(value, str) and six.PY3 else value
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[norm2bytes(values)]))
# ------------------------------------------------------------------------------
def _build_write_tfrecord(
args: Dict,
):
"""
Builds and writes a TFRecord with image and segmentation (mask) features.
:param args: dictionary containing the following function arguments:
output_path: the path of the TFRecord file to be written
shard_id: shard ID (for multi-shard TFRecord datasets)
num_per_shard: number of images/masks per shard
num_images: total number of images in dataset
file_ids: file IDs for image/mask files
images_dir: directory containing image files
masks_dir: directory containing mask files corresponding to the images
"""
with TFRecordWriter(args["output_path"]) as tfrecord_writer:
start_idx = args["shard_id"] * args["num_per_shard"]
end_idx = min((args["shard_id"] + 1) * args["num_per_shard"], args["num_images"])
for i in range(start_idx, end_idx):
print(f'\r>> Converting image {i + 1}/{len(args["file_ids"])} "'
f'shard {args["shard_id"]}')
# read the image
image_file_name = args["file_ids"][i] + ".jpg"
image_path = os.path.join(args["images_dir"], image_file_name)
image_data = tf.io.gfile.GFile(image_path, 'rb').read()
width, height, _ = image_dimensions(image_path)
# read the semantic segmentation annotation (mask)
mask_path = os.path.join(args["masks_dir"], args["file_ids"][i] + ".png")
seg_data = tf.io.gfile.GFile(mask_path, 'rb').read()
seg_width, seg_height, _ = image_dimensions(mask_path)
if height != seg_height or width != seg_width:
raise RuntimeError('Shape mismatched between image and mask.')
# Convert to tf example.
example = tf.train.Example(features=tf.train.Features(feature={
'image/encoded': _bytes_list_feature(image_data),
'image/filename': _bytes_list_feature(image_file_name),
'image/format': _bytes_list_feature('jpeg'),
'image/height': _int64_list_feature(height),
'image/width': _int64_list_feature(width),
'image/channels': _int64_list_feature(3),
'image/segmentation/class/encoded': (_bytes_list_feature(seg_data)),
'image/segmentation/class/format': _bytes_list_feature('png'),
}))
tfrecord_writer.write(example.SerializeToString())
# ------------------------------------------------------------------------------
def masked_dataset_to_tfrecords(
images_dir: str,
masks_dir: str,
tfrecord_dir: str,
num_shards: int = 1,
dataset_base_name: str = "tfrecord",
train_pct: float = 1.0,
):
"""
Creates TFRecord files corresponding to a dataset of JPG images with
corresponding set PNG masks.
:param images_dir: directory containing image files
:param masks_dir: directory containing mask files corresponding to the images
:param tfrecord_dir: directory where the output TFRecord files will be written
:param num_shards: number of shards
:param dataset_base_name: base name of the TFRecord files to be produced
:param train_pct: the percentage of images/masks to use for training, with
(1.0 minus this value as the validation percentage), if this value is 1.0
then no split will occur
"""
masks_ext = ".png"
images_ext = ".jpg"
file_ids = list(matching_ids(masks_dir, images_dir, masks_ext, images_ext))
random.shuffle(file_ids)
# create a mapping of base file names and subsets of file IDs
if train_pct < 1.0:
# get the correct file name prefix for the TFRecord files
# based on the presence of a specified file base name
tfrecord_file_prefix_train = "train"
tfrecord_file_prefix_valid = "valid"
if dataset_base_name != "":
tfrecord_file_prefix_train = tfrecord_file_prefix_train + "_" + dataset_base_name
tfrecord_file_prefix_valid = tfrecord_file_prefix_valid + "_" + dataset_base_name
# get the split index to use for splitting into train/valid sets
split_index = int(len(file_ids) * train_pct)
# map the file prefixes to the sets of file IDs for the split sections
split_names_to_ids = {
tfrecord_file_prefix_train: file_ids[:split_index],
tfrecord_file_prefix_valid: file_ids[split_index:],
}
# report the number of samples in each split section
_logger.info(f"TFRecord dataset contains {len(file_ids[:split_index])} training samples")
_logger.info(f"TFRecord dataset contains {len(file_ids[split_index:])} validation samples")
else:
# we'll just have one base file name mapped to all file IDs
if "" == dataset_base_name:
tfrecord_file_prefix = "tfrecord"
else:
tfrecord_file_prefix = dataset_base_name
# map the file prefixes to the set of file IDs
split_names_to_ids = {
tfrecord_file_prefix: file_ids,
}
# report the number of samples
_logger.info(f"TFRecord dataset contains {len(file_ids)} samples (no train/valid split)")
# create an iterable of arguments that will be mapped to concurrent future processes
args_iterable = []
for base_name, file_ids in split_names_to_ids.items():
num_images = len(file_ids)
num_per_shard = int(math.ceil(num_images / num_shards))
for shard_id in range(num_shards):
output_filename = os.path.join(
tfrecord_dir,
f'{base_name}-{str(shard_id).zfill(5)}-of-{str(num_shards).zfill(5)}.tfrecord',
)
tfrecord_writing_args = {
"output_path": output_filename,
"shard_id": shard_id,
"num_per_shard": num_per_shard,
"num_images": num_images,
"file_ids": file_ids,
"images_dir": images_dir,
"masks_dir": masks_dir,
}
args_iterable.append(tfrecord_writing_args)
# use a ProcessPoolExecutor to facilitate creating the TFRecords in parallel
with concurrent.futures.ProcessPoolExecutor() as executor:
# map the TFRecord creation function to the iterable of arguments
_logger.info(f"Building TFRecords in directory {tfrecord_dir} ")
executor.map(_build_write_tfrecord, args_iterable)
# ------------------------------------------------------------------------------
def vgg_to_masks(
images_dir: str,
annotations_file: str,
masks_dir: str,
class_labels_file: str,
combine_into_one: bool = False,
):
"""
Creates mask files from annotations specified in a JSON file exported from
the VGG Image Annotator (VIA) tool.
:param images_dir: directory containing JPG image files
:param annotations_file : annotation file containing segmentation (mask)
regions, expected to be in the JSON format created by the VGG Image
Annotator tool
:param masks_dir: directory where PNG mask files will be written
:param class_labels_file: text file containing one class label per line
:param combine_into_one: if True then combine all mask regions for an image
into a single mask file
"""
# arguments validation
if not os.path.exists(images_dir):
raise ValueError(f"Invalid images directory path: {images_dir}")
elif not os.path.exists(annotations_file):
raise ValueError(f"Invalid annotations file path: {annotations_file}")
# make the masks directory if it doesn't already exist
os.makedirs(masks_dir, exist_ok=True)
# load the contents of the annotation JSON file (created
# using the VIA tool) and initialize the annotations dictionary
annotations = json.loads(open(annotations_file).read())
image_annotations = {}
# loop over the file ID and annotations themselves (values)
for data in annotations.values():
# store the data in the dictionary using the filename as the key
image_annotations[data["filename"]] = data
# get a dictionary of class labels to class IDs
class_labels = _class_labels_to_ids(class_labels_file)
_logger.info("Generating mask files...")
for image_file_name in tqdm(os.listdir(images_dir)):
# skip any files without a *.jpg extension
if not image_file_name.endswith(".jpg"):
continue
file_id = os.path.splitext(image_file_name)[0]
# grab the image info and then grab the annotation data for
# the current image based on the unique image ID
annotation = image_annotations[image_file_name]
# get the image's dimensions
width, height, _ = image_dimensions(os.path.join(images_dir, image_file_name))
# if combining all regions into a single mask file
# then we'll only need to allocate the mask array once
if combine_into_one:
# allocate memory for the region mask
region_mask = np.zeros((height, width, 3), dtype="uint8")
# loop over each of the annotated regions
for (i, region) in enumerate(annotation["regions"]):
# if not combining all regions into a single mask file then
# we'll need to reallocate the mask array for each mask region
if not combine_into_one:
# allocate memory for the region mask
region_mask = np.zeros((height, width, 3), dtype="uint8")
# grab the shape and region attributes
shape_attributes = region["shape_attributes"]
region_attributes = region["region_attributes"]
# find the class ID corresponding to the region's class attribute
class_label = region_attributes["class"]
if class_label not in class_labels:
raise ValueError(
"No corresponding class ID found for the class label "
f"found in the region attributes -- label: {class_label}",
)
else:
class_id = class_labels[class_label]
# get the array of (x, y)-coordinates for the region's mask polygon
x_coords = shape_attributes["all_points_x"]
y_coords = shape_attributes["all_points_y"]
coords = zip(x_coords, y_coords)
poly_coords = [[x, y] for x, y in coords]
pts = np.array(poly_coords, np.int32)
# reshape the points to (<# of coordinates>, 1, 2)
pts = pts.reshape((-1, 1, 2))
# draw the polygon mask, using the class ID as the mask value
cv2.fillPoly(region_mask, [pts], color=[class_id]*3)
# if not combining all masks into a single file
# then write this mask into its own file
if not combine_into_one:
# write the mask file
mask_file_name = f"{file_id}_segmentation_{i}.png"
cv2.imwrite(os.path.join(masks_dir, mask_file_name), region_mask)
# write a combined mask file, if requested
if combine_into_one:
# write the mask file
mask_file_name = f"{file_id}_segmentation.png"
cv2.imwrite(os.path.join(masks_dir, mask_file_name), region_mask)
_logger.info("Done")
# ------------------------------------------------------------------------------
def main():
# parse the command line arguments
args_parser = argparse.ArgumentParser()
args_parser.add_argument(
"--images",
required=True,
type=str,
help="path to directory containing input image files",
)
args_parser.add_argument(
"--masks",
required=False,
type=str,
help="path to directory where mask files will be written "
"(or found if used as an input)",
)
args_parser.add_argument(
"--tfrecords",
required=False,
type=str,
help="path to directory where TFRecord output files will be written",
)
args_parser.add_argument(
"--annotations",
required=False,
type=str,
help="path to annotation file",
)
args_parser.add_argument(
"--in_format",
required=False,
type=str,
choices=["coco", "openimages", "png", "vgg"],
help="format of input annotations",
)
args_parser.add_argument(
"--out_format",
required=False,
type=str,
choices=["png", "tfrecord"],
help="format of output annotations/masks",
)
args_parser.add_argument(
"--classes",
required=False,
type=str,
help="path of the class labels file listing one class per line",
)
args_parser.add_argument(
"--combine",
default=False,
action='store_true',
help="combine all regions/classes into a single mask file",
)
args_parser.add_argument(
"--shards",
required=False,
default=1,
type=int,
help="number of shard files to use when converting to TFRecord format",
)
args_parser.add_argument(
"--train_pct",
required=False,
default=1.0,
type=float,
help="percentage of images/masks to use for the training subset "
"(validation subset will equal 1.0 - train_pct), if 1.0 then "
"no splitting will occur",
)
args_parser.add_argument(
"--base_name",
required=False,
type=str,
default="",
help="base name of the TFRecord files",
)
args = vars(args_parser.parse_args())
if args["in_format"] == "vgg":
if args["out_format"] == "png":
vgg_to_masks(
args["images"],
args["annotations"],
args["masks"],
args["classes"],
args["combine"],
)
elif args["in_format"] == "png":
if args["out_format"] == "tfrecord":
masked_dataset_to_tfrecords(
args["images"],
args["masks"],
args["tfrecords"],
args["shards"],
args["base_name"],
args["train_pct"],
)
else:
raise ValueError(f"Unsupported output format: {args['out_format']}")
else:
raise ValueError(f"Unsupported input format: {args['in_format']}")
# ------------------------------------------------------------------------------
if __name__ == "__main__":
"""
Usage:
For creating masks from VIA annotations:
$ python mask.py --in_format vgg \
--images /data/images \
--annotations /data/via_annotations.json \
--masks /data/masks
For creating TFRecords from a masked dataset with an 80% training and 20% validation split:
$ python mask.py --images /data/lesions/images \
--masks /data/lesions/masks \
--in_format png --out_format tfrecord \
--tfrecords /data/lesions/tfrecords \
--shards 12 -- train_pct 0.8
"""
# run this module's main function
main()
| 2.453125 | 2 |
app/internal/module/video/encode.py | kuropengin/SHINtube-video-api | 0 | 12794758 | <gh_stars>0
from .filemanager import filemanager
from ..command_run import command_run
from ..logger import logger
import json
import asyncio
import os
from typing import List
from dataclasses import dataclass
class encoder_class:
def __init__(self):
# サンプル動画
self.sample_dir = "./sample"
self.sample_video = "video.mp4"
# 解像度:ビットレート(Mbps)
self.bitrate = {
1080: 4.3,
720: 2.3,
480: 1.2,
360: 0.65,
240: 0.24,
160: 0.24
}
# 利用可能なエンコーダ
self.encoder_available = {
"vaapi": False,
"nvenc_hw_decode": False,
"nvenc_sw_decode": False,
"software": False
}
# 同時エンコード数
self.encode_worker = 0
# 現在利用中のエンコーダ
self.encoder_used_status = {
"vaapi": False,
"nvenc_hw_decode": False,
"nvenc_sw_decode": False,
"software": False
}
def get_bitrate(quality: str = "high"):
pass
return
def audio_encode_command(
self,
folderpath: str,
filename: str,):
"""
オーディオ切り出しのコマンド
"""
command = [
"ffmpeg",
"-hide_banner",
"-y",
f"-i {folderpath}/{filename}",
"-vn",
"-b:a 192k",
"-aac_coder twoloop",
"-start_number 0",
"-hls_time 6",
"-hls_list_size 0",
"-f hls",
f"{folderpath}/audio.m3u8"
]
return command
def software_encode_command(
self,
folderpath: str,
filename: str,
resolution: int,
thread: int = 0) -> List[str]:
"""
ソフトウエアエンコード時のコマンド。
遅い。
"""
command = [
"ffmpeg",
"-hide_banner",
"-y",
"-vsync 1",
f"-threads {thread}",
f"-i {folderpath}/{filename}",
"-r 30",
"-g 180",
f"-threads {thread}",
"-vcodec libx264",
"-bf 8",
f"-b:v {self.bitrate[resolution]}M",
f"-bufsize {self.bitrate[resolution]*6}M",
"-an",
"-start_number 0",
"-hls_time 6",
"-hls_list_size 0",
"-f hls",
f"-vf scale=-2:{resolution}",
f"{folderpath}/{resolution}p.m3u8"
]
return command
def vaapi_encode_command(
self,
folderpath: str,
filename: str,
resolution: int,
vaapi_device: str = "/dev/dri/renderD128") -> List[str]:
"""
vaapi(intel)エンコード時のコマンド。
VBRでのエンコードを行う。
"""
command = [
"ffmpeg",
"-hide_banner",
"-y",
"-vsync 1",
f"-init_hw_device vaapi=intel:{vaapi_device}",
"-hwaccel vaapi",
"-hwaccel_output_format vaapi",
"-hwaccel_device intel",
"-filter_hw_device intel",
f"-i {folderpath}/{filename}",
"-r 30",
"-g 180",
"-vcodec h264_vaapi",
"-rc_mode VBR",
"-bf 8",
f"-b:v {self.bitrate[resolution]}M",
f"-bufsize {self.bitrate[resolution]*6}M",
"-an",
f"-vf 'format=nv12|vaapi,hwupload,scale_vaapi=w=-2:h={resolution}'",
"-profile high",
"-compression_level 0",
"-start_number 0",
"-hls_time 6",
"-hls_list_size 0",
"-f hls",
f"{folderpath}/{resolution}p.m3u8"]
return command
def nvenc_hw_decode_encode_command(
self,
folderpath: str,
filename: str,
resolution: int,) -> List[str]:
"""
nvencエンコード時のコマンド。動画のデコードにはHWが利用される。
VBRでのエンコードを行う。
エラー対策のため、実際に出力される動画の解像度は-1されている。
"""
command = [
"/opt/bin/ffmpeg",
"-hide_banner",
"-y",
"-vsync 1",
"-init_hw_device cuda",
"-hwaccel cuda",
"-hwaccel_output_format cuda",
f"-i {folderpath}/{filename}",
"-r 30",
"-g 180",
"-c:v h264_nvenc",
f"-b:v {self.bitrate[resolution]}M",
f"-bufsize {self.bitrate[resolution]*6}M",
"-an",
"-preset medium",
"-profile:v high",
"-bf 4",
"-b_ref_mode 2",
"-temporal-aq 1",
f"-vf scale_cuda=-2:{resolution-1}",
"-hls_time 6",
"-hls_list_size 0",
"-f hls",
f"{folderpath}/{resolution}p.m3u8",
]
return command
def nvenc_sw_decode_encode_command(
self,
folderpath: str,
filename: str,
resolution: int,) -> List[str]:
"""
nvencエンコード時のコマンド。動画のデコードにはSWが利用される。
VBRでのエンコードを行う。
エラー対策のため、実際に出力される動画の解像度は-1されている。
"""
command = [
"/opt/bin/ffmpeg",
"-hide_banner",
"-y",
"-vsync 1",
"-init_hw_device cuda",
"-hwaccel_output_format cuda",
f"-i {folderpath}/{filename}",
"-r 30",
"-g 180",
"-c:v h264_nvenc",
f"-b:v {self.bitrate[resolution]}M",
f"-bufsize {self.bitrate[resolution]*6}M",
"-an",
"-preset medium",
"-profile:v high",
"-bf 4",
"-b_ref_mode 2",
"-temporal-aq 1",
f"-vf hwupload,scale_cuda=-2:{resolution-1}",
"-hls_time 6",
"-hls_list_size 0",
"-f hls",
f"{folderpath}/{resolution}p.m3u8",
]
return command
def thumbnail_command(
self,
folderpath: str,
filename: str,
resolution: int,
s: int = 5) -> List[str]:
"""
サムネイル生成のコマンド。
引数sは切り出し時点の動画の場所。
"""
command = [
"ffmpeg",
"-hide_banner",
"-y",
f"-i {folderpath}/{filename}",
f"-ss {s}",
"-vframes 1",
"-f image2",
f"-vf scale=-2:{resolution}",
f"{folderpath}/thumbnail_{resolution}.jpg"
]
return command
async def thumbnail(self, folderpath: str, filename: str):
command = self.thumbnail_command(folderpath, filename, 360)
await command_run(" ".join(command), "./")
command = self.thumbnail_command(folderpath, filename, 720)
await command_run(" ".join(command), "./")
pass
def video_info_command(self, folderpath: str, filename: str):
command = [
"ffprobe",
"-loglevel quiet",
"-show_streams",
"-print_format json",
f"{folderpath}/{filename}",
]
return command
@dataclass
class video_info_class:
"""Class for keeping track of an item in inventory."""
is_video: bool = False
is_audio: bool = False
width: int = 0
height: int = 0
async def get_video_info(
self, folderpath: str, filename: str) -> video_info_class:
command = self.video_info_command(folderpath, filename)
result = await command_run(" ".join(command), "./")
try:
result = json.loads(result.stdout)
except ValueError:
result = {}
obj = self.video_info_class()
if "streams" not in result:
return obj
for stream in result["streams"]:
if "codec_type" in stream:
if "audio" == stream["codec_type"]:
obj.is_audio = True
elif "video" == stream["codec_type"]:
obj.is_video = True
obj.width = stream["width"]
obj.height = stream["height"]
return obj
class encode_command_class:
def __init__(self, encoder, command):
self.encoder: str = encoder
self.command: List[str] = command
async def get_encode_command(
self,
folderpath: str,
filename: str,
resolution: int,) -> encode_command_class:
if self.encode_worker == 0:
await self.encode_test()
# 利用可能なエンコーダーの探索
use_encoder = None
while True:
for encoder in self.encoder_available:
# 利用可能でかつ、利用されていない場合
if self.encoder_available[encoder] and \
not self.encoder_used_status[encoder]:
# エンコーダーを利用状態にする
self.encoder_used_status[encoder] = True
use_encoder = encoder
break
else:
# 利用可能なエンコーダーがないときは待つ
await asyncio.sleep(10)
continue
# breakされていたらもう一度break
break
# ソフトウエアエンコード
if use_encoder == "software":
command = self.software_encode_command(
folderpath, filename, resolution)
# vaapiエンコード
elif use_encoder == "vaapi":
command = self.vaapi_encode_command(
folderpath, filename, resolution)
# nvenc_hwエンコード
elif use_encoder == "nvenc_hw_decode":
command = self.nvenc_hw_decode_encode_command(
folderpath, filename, resolution)
# nvenc_swエンコード
elif use_encoder == "nvenc_sw_decode":
command = self.nvenc_sw_decode_encode_command(
folderpath, filename, resolution)
result = self.encode_command_class(use_encoder, command)
return result
async def encode_audio(
self,
folderpath: str,
filename: str,
force: bool = False):
# audio.m3u8がファイルが存在していた場合
audio_path = f"{folderpath}/audio.m3u8"
if os.path.isfile(audio_path) or force:
return True
# 空のaudio.m3u8を作成
with open(audio_path, "w"):
pass
# audioのエンコード
command = self.audio_encode_command(folderpath, filename)
await command_run(" ".join(command), "./")
playlist_path = f"{folderpath}/playlist.m3u8"
await filemanager.write_playlist(playlist_path, "audio")
audio_done_path = f"{folderpath}/audio.done"
# 空のaudio.doneを作成
with open(audio_done_path, "w"):
pass
return True
async def encode(
self,
folderpath: str,
filename: str,
resolution: int,):
logger.info("エンコード開始")
input_video_info = await self.get_video_info(folderpath, filename)
if input_video_info.is_audio:
await self.encode_audio(folderpath, filename)
encoder = await self.get_encode_command(folderpath, filename, resolution)
logger.info(f"エンコーダ{encoder.encoder}を使用")
# エンコード実行
result = await command_run(" ".join(encoder.command), "./")
logger.info("エンコード完了")
# エンコーダーを開放
self.encoder_used_status[encoder.encoder] = False
if result.returncode == 0:
return True
else:
logger.error(f"encoder error {folderpath}")
logger.error(" ".join(encoder.command))
logger.error(result.stdout)
logger.error(result.stderr)
return False
async def encode_test(self):
"""
エンコードのテスト
"""
logger.info("エンコードテスト開始")
self.encode_worker = 0
# vaapi のテスト
command = self.vaapi_encode_command(
self.sample_dir, self.sample_video, 1080)
result = await command_run(" ".join(command), "./")
if result.returncode == 0:
self.encoder_available["vaapi"] = True
self.encode_worker += 1
# nvenc(HW) のテスト
command = self.nvenc_hw_decode_encode_command(
self.sample_dir, self.sample_video, 1080)
result = await command_run(" ".join(command), "./")
if result.returncode == 0:
self.encoder_available["nvenc_hw_decode"] = True
self.encode_worker += 1
# nvenc(SW) のテスト
command = self.nvenc_sw_decode_encode_command(
self.sample_dir, self.sample_video, 1080)
result = await command_run(" ".join(command), "./")
if result.returncode == 0:
self.encoder_available["nvenc_sw_decode"] = True
self.encode_worker += 1
# ソフトウエアエンコードしか使えない場合
if self.encode_worker == 0:
self.encoder_available["software"] = True
self.encode_worker = 1
logger.info("エンコードテスト完了!!")
logger.info(f"{self.encoder_available}")
return self.encoder_available
encoder = encoder_class()
| 2.171875 | 2 |
GhidraGdb.py | ArcTropics/GhidraGdb | 0 | 12794759 | from pwn import *
import sys
import os
from pathlib import Path
from threading import Thread
from clients.GhidraCommandClient import GhidraCommandClient
class GhidraGdb:
"""The main class which encapsulates the whole GhidraGdb framework
"""
def __init__(self, process=None):
self.fifo = None
self.process = process
self.FIFO = "/tmp/gdbPipe"
try:
os.mkfifo(self.FIFO)
except Exception as e:
print(e)
if not "File exists" in str(e):
print("sys.exit")
return
self.client = GhidraCommandClient(self)
self.parserMode = None
self.breakpointAddr = None
self.currRet = None
self.removals = []
def removeBpByPattern(self, pattern):
"""Removes a breakpoint before it is inserted
:param pattern: the pattern to identify the breakpoint
:return: None
"""
self.removals.append(pattern)
def excAndGet(self, exc, strip=True):
"""This function executes a command within the gdb session
:param exc: String value containing the gdb command
:param strip: Boolean, optional - remove the EOF delimiter automatically(this might create issues in some cases) - default: True
:return: String value containing the gdb response unparsed
"""
self.currRet = ""
self.parserMode = "GETDAT"
self.gdb.execute(exc.split("\n")[0])
self.gdb.execute("print \"ggdb__EOF\"")
while self.parserMode == "GETDAT":
time.sleep(0.01)
if strip:
return self.currRet.split("$")[0]
else:
return self.currRet
def readFifo(self, fifo):
"""read the ouput of the gdbPipe te receive the data
:param fifo: the fifo object to read from
:return: None
"""
while True:
#time.sleep(0.05)
line = fifo.readline()
if len(line) > 2:
line = line.replace("\n", "")
if self.parserMode == "WAITBP":
if "Breakpoint" in line:
for part in line.split(" "):
if "0x" in part:
self.breakpointAddr = part.split("x")[1]
#print("found Breakpoint Address: " + self.breakpointAddr)
elif self.parserMode == "GETDAT":
self.currRet = self.currRet + line + "\n"
if "ggdb__EOF" in line:
self.parserMode = "WAITBP"
def setupFifo(self, FIFO):
"""Create the Fifo which is used to read the data comming from the gdb
:param FIFO: The filename where the fifo will be created
:return: None
"""
print("setting up fifo now: " + str(FIFO))
with open(FIFO, 'r') as fifo:
self.fifo = fifo
print("fiifo opened")
self.readFifo(fifo)
def setupFifoNonBlock(self, Fifo):
"""Run the function "setupFifo" in None-blocking mode
:param FIFO: The filename where the fifo will be created
:return: None
"""
Thread(target=self.setupFifo, args=(Fifo,), daemon=True).start()
def setupGdbInteractive(self):
"""Setup the GdbSession as an interactive shell(the user can interact with GDB as usual) - Non-blocking
:return: None
"""
Thread(target=self.process.interactive).start()
def getProcOffset(self, procName):
"""Get the Proc Offset of a particular mapping
:param procName: String value containing the Name of the mapping
:return: The start Address of the mapped space
"""
while self.checkThreadRunning():
time.sleep(0.05)
print("getting proc mapping")
#get the proc mappings from gdb
procMappings = self.excAndGet("i proc mappings")
proc_maps = []
#get and format the memory mappings which are mapping the main executable
for line in procMappings.split("\n"):
if procName in line:
ln = line.replace("\t", " ")
#turn multiple whitespaces into single whitespaces
while " " in ln:
ln = ln.replace(" ", " ")
#create an array, containing the different columns
arr = ln.split(" ")
if len(arr[0]) < 2:
arr.pop(0)
proc_maps.append(arr)
## get the lowest Start Address
offset = 0
procStartAddresss = 0
for i, map in enumerate(proc_maps):
if i == 0 or offset > int(map[3].split("x")[1],16) :
offset = int(map[3].split("x")[1],16)
procStartAddresss = map[0]
return procStartAddresss
def run(self, cmd, interactive=True, startCommands="", args=""):
"""This is the entry function that spawns a new process and connects the debugger to it
:param String cmd: value containing the path to your executable
:param Boolean interactive: optional - open a regular GDB Window which the user can interact with. Default: True
:param String startCommands: optional - Initial GDB Commands which are executed before the program starts
:param String args: - Arguments to start the executable with
"""
#connect reader thread to read gdb pipe
self.setupFifoNonBlock(self.FIFO)
self.process = gdb.debug(cmd, '''
set logging file /tmp/gdbPipe
set logging on
starti''' + str(args) + "\n" + startCommands, api=True)
self.gdb = self.process.gdb
#self
if interactive:
self.setupGdbInteractive()
self.runtimeAnalysisNonBlock()
#we need to calculate the offset between Ghidra and the process mapping here (Because of ...)
imageBase = self.client.br.remote_eval("str(getState().getCurrentProgram().getAddressMap().getImageBase())")
procOffset = self.getProcOffset(Path(cmd).name)
if procOffset == 0:
return self.process, False
print("Found proc offset: " + str(procOffset))
#calculate final dynamic offset
self.procOffset = str(hex(int(procOffset.split("x")[1],16) - int(imageBase,16)))
print("final offset: " + str(self.procOffset))
print("EXECUTING GDB BP SETUP")
for bp in self.client.breakpoints:
skip = False
for line in bp.pyExc.split("\n"):
for line2 in self.removals:
if line2 in line:
skip = True
if skip:
continue
print("ADDING BP")
bp.rebuiltWithOffset(self.procOffset)
bp.setHitLimit(0)
ret = self.excAndGet(str(bp.setup))
#we parse the number of the breakpoint (in gdb)
parts = ret.split(" ")
parse = False
number = 0
for part in parts:
if parse:
try:
number = int(part)
except:
pass
if "Breakpoint" in part:
parse = True
bp.number = number
print("return from setup: " + str(ret))
#self.gdb.execute(str(bp.setup))
self.gdb.execute(str("continue"))
return self.process, True
def setupGdb(self, interactive=True, startCommands=""):
""" Deprecated - attaches the gdb to an existing program instance instead of spawning the program
:param interactive: interactive: Boolean, optional - open a regular GDB Window which the user can interact with. Default: True
:param startCommands: Sting - Initial GDB Commands which are executed before the program starts
:return: None
"""
#connect reader thread to read gdb pipe
self.setupFifoNonBlock(self.FIFO)
self.pid, self.gdb = gdb.attach(self.process, '''
set logging file /tmp/gdbPipe
set logging on
''' + startCommands, api=True)
if interactive:
self.setupGdbInteractive()
self.runtimeAnalysisNonBlock()
def analyze(self, funcs):
"""Analyze the Ghidra project - this command will create all the functions, breakpoints and classes from the Ghidra Code/Comments
:param funcs: A list of functions which are to be analyzed
:return: None
"""
self.client.analyze(funcs)
def runtimeAnalysis(self):
"""This function runs arbitrary code in either python or GDB everytime a breakpoint is hit
:return: None
"""
#the first breakpoint has to install the other breakpoints - then continue ...
while self.checkThreadRunning():
time.sleep(0.05)
#time.sleep(5)
print("CONTINUE")
self.parserMode = "WAITBP"
while True:
time.sleep(0.05)
while self.checkThreadRunning():
time.sleep(0.05)
finBp = None
try:
if self.breakpointAddr:
#print("breakpoint hit")
for bp in self.client.breakpoints:
if bp.address.split("x")[1] in self.breakpointAddr:
finBp = bp
self.breakpointAddr = None
break
except:
continue
if not finBp:
continue
finBp.hit()
#todo - this has to be in parallel
for line in finBp.pyExc.split("\n"):
if len(line) > 1:
try:
finBp.exec_(line)
except Exception as e:
print("Exception during code execution: " + str(line))
print(str(e))
for line in finBp.dbExc.split("\n"):
if len(line) > 0:
try:
self.gdb.execute(line)
if line[0] == "c" or "continue" in line:
finBp.deactivate()
except Exception as e:
print("Error in GDB execution of:" + str(line))
print("Exception: " + str(e))
def runtimeAnalysisNonBlock(self):
"""Run the function 'runtimeAnalysis' in Non-blocking mode
:return: None
"""
Thread(target=self.runtimeAnalysis, daemon=True).start()
#check if current thread is running ... (if gdb hits breakpoint ...)
def checkThreadRunning(self):
"""check if the current GDB Thread is running
:return: Boolean - True if the Thread is running
"""
#Todo -- check this
try:
#print(dir(self.gdb.conn.root.gdb))#.selected_inferior().threads())
#print(dir(self.gdb.conn.root.gdb.InferiorThread))
#print(self.gdb.conn.root.gdb.selected_thread().is_running())
#if self.gdb.conn.root.gdb.selected_inferior().threads()[0].is_running():
if self.gdb.conn.root.gdb.selected_thread().is_running():
return True
else:
return False
except Exception as e:
return True
| 2.546875 | 3 |
Adapter/python/AdapterPattern.py | hanmomhanda/DesignPatternStudy | 4 | 12794760 | class Target:
def print_weak(self):
raise NotImplementedError
def print_strong(self):
raise NotImplementedError
class Adaptee:
def __init__(self, message):
self.message = message
def print_paren(self):
print("(" + self.message + ")")
def print_asterisk(self):
print("*" + self.message + "*")
class Adapter(Target):
def __init__(self, adaptee):
self.adaptee = adaptee
def print_weak(self):
self.adaptee.print_paren()
def print_strong(self):
self.adaptee.print_asterisk()
def run():
target = Adapter(Adaptee("Adaptee is adpated to Target by Adapter"))
target.print_weak()
target.print_strong()
run()
| 3.09375 | 3 |
Crypto-Nexus/ChatCommon.py | idnrfc/Crypto-Nexus | 1 | 12794761 | <filename>Crypto-Nexus/ChatCommon.py
"""
개발환경 : PyQt5 x64, Python 3.4.3 x64, Windows 8.1 x64
파일 : ChatCommon.py
내용 : 그냥 공통적이고 주로 쓰이는 변수만 정리
"""
class ChatCommon:
# 타입 지정을 위한 변수... 그냥 자주 쓰는거
TYPE_OF_CAESAR = 0
TYPE_OF_TRANSPOSITION = 1
TYPE_OF_AFFINE = 2
# 프로그램에서 지정한 형식의 메세지가 아닌 경우
TYPE_OF_WRONG_MESSAGE = 0
# 기타 변수들, 정규식, 시저 기본 문자열 등
message_regularexpression = '\[(?P<type>[H]?[PM][AOS][GDC])\](?P<msg_all>(?P<hmac_value>[a-z0-9]*)[\s]?(?P<msg>.+))'
caesar_letters = ' !"#$%&\'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\\]^_`abcdefghijklmnopqrstuvwxyz{|}~'
RECV_BUFFER = 1024
is_response_HMSG = False
hmac_auth_success = False
stop = False
PAG = None
MAC = None
| 2.1875 | 2 |
example_004_oop_decorator_pattern.py | s-c-23/Elements-of-Software-Design | 3 | 12794762 | <reponame>s-c-23/Elements-of-Software-Design
"""The Decorator pattern is used to dynamically add a new features/functionalities
to an object without changing its implementation. It differs from inheritance because
the new functionalities are attached to that particular object on-demand,
not to the entire subclass."""
class HashTag:
"""Represents a hash tag text."""
def __init__(self, text):
"""A HastTag is a simple text string."""
self._text = text
def render(self):
"""This function represents a text rending in html format."""
return self._text
# def len(self):
# """Just in case if we want to know the lenght."""
# return len(self._text)
class BoldWrapper(HashTag):
"""Wraps a tag in <b>"""
def __init__(self, wrapped):
super().__init__(self)
self._wrapped = wrapped
def render(self):
return f"<b>{self._wrapped.render()}</b>"
class ItalicWrapper(HashTag):
"""Wraps a tag in <i>"""
def __init__(self, wrapped):
super().__init__(self)
self._wrapped = wrapped
def render(self):
return f"<i>{self._wrapped.render()}</i>"
def main():
"""This main function implements a test example run of this decorator pattern implementation"""
simple_hello = HashTag("#helloWorld!")
bold_hello = BoldWrapper(simple_hello)
italic_and_bold_hello = ItalicWrapper(bold_hello)
print("before: ", simple_hello.render())
print("after: ", bold_hello.render())
print("after: ", italic_and_bold_hello.render())
if __name__ == "__main__":
main()
| 4.375 | 4 |
tests/core/specifications/test_object_of_account_specification.py | douwevandermeij/fractal | 2 | 12794763 | from dataclasses import make_dataclass
from fractal.core.specifications.object_of_account_specification import (
ObjectOfAccountSpecification,
)
def test_object_of_account_specification():
spec = ObjectOfAccountSpecification("abc", "def")
DC = make_dataclass("DC", [("id", str), ("account_id", str)])
assert spec.is_satisfied_by(DC(**dict(id="abc", account_id="def")))
| 2.578125 | 3 |
benchmark/bench_dd.py | Watch-Later/recipes | 1,418 | 12794764 | <reponame>Watch-Later/recipes
#!/usr/bin/python3
import re, subprocess
bs = 1
count = 1024 * 1024
while bs <= 1024 * 1024 * 8:
args = ['dd', 'if=/dev/zero', 'of=/dev/null', 'bs=%d' % bs, 'count=%d' % count]
result = subprocess.run(args, capture_output=True)
seconds = 0
message = str(result.stderr)
if m := re.search('copied, (.*?) s, ', message):
seconds = float(m.group(1))
elif m := re.search('bytes transferred in (.*?) secs', message):
seconds = float(m.group(1))
else:
print('Unable to parse dd output:\n%s' % message)
break
print('bs=%7d count=%7d %6.3fs %8.3fus/record %9.3fMB/s' %
(bs, count, seconds, seconds * 1e6 / count, bs * count / 1e6 / seconds))
bs *= 2
if seconds > 1:
count /= 2
result = """
Raspberry Pi 4 running FreeBSD 13-RELEASE:
freebsd% python3.9 bench_dd.py
bs= 1 count=1048576 3.307s 3.154us/record 0.317MB/s
bs= 2 count= 524288 1.682s 3.209us/record 0.623MB/s
bs= 4 count= 262144 0.824s 3.144us/record 1.272MB/s
bs= 8 count= 262144 0.855s 3.262us/record 2.453MB/s
bs= 16 count= 262144 0.831s 3.171us/record 5.046MB/s
bs= 32 count= 262144 0.813s 3.101us/record 10.321MB/s
bs= 64 count= 262144 0.848s 3.236us/record 19.779MB/s
bs= 128 count= 262144 0.848s 3.235us/record 39.569MB/s
bs= 256 count= 262144 0.863s 3.293us/record 77.746MB/s
bs= 512 count= 262144 0.844s 3.220us/record 159.029MB/s
bs= 1024 count= 262144 0.894s 3.411us/record 300.221MB/s
bs= 2048 count= 262144 0.984s 3.755us/record 545.461MB/s
bs= 4096 count= 262144 1.106s 4.219us/record 970.906MB/s
bs= 8192 count= 131072 0.675s 5.148us/record 1591.372MB/s
bs= 16384 count= 131072 0.917s 6.992us/record 2343.125MB/s
bs= 32768 count= 131072 1.385s 10.567us/record 3100.959MB/s
bs= 65536 count= 65536 1.189s 18.144us/record 3611.984MB/s
bs= 131072 count= 32768 1.130s 34.500us/record 3799.209MB/s
bs= 262144 count= 16384 1.155s 70.499us/record 3718.413MB/s
bs= 524288 count= 8192 1.264s 154.328us/record 3397.221MB/s
bs=1048576 count= 4096 1.543s 376.625us/record 2784.138MB/s
bs=2097152 count= 2048 2.041s 996.766us/record 2103.957MB/s
bs=4194304 count= 1024 2.441s 2383.790us/record 1759.511MB/s
bs=8388608 count= 512 2.690s 5253.455us/record 1596.779MB/s
Raspberry Pi 4 running Raspbian GNU/Linux 10 armv7, kernel 5.10
$ python3 bench_dd.py
bs= 1 count=1048576 1.067s 1.018us/record 0.982MB/s
bs= 2 count= 524288 0.529s 1.009us/record 1.982MB/s
bs= 4 count= 524288 0.540s 1.030us/record 3.885MB/s
bs= 8 count= 524288 0.537s 1.025us/record 7.805MB/s
bs= 16 count= 524288 0.533s 1.016us/record 15.741MB/s
bs= 32 count= 524288 0.537s 1.023us/record 31.265MB/s
bs= 64 count= 524288 1.527s 2.913us/record 21.972MB/s
bs= 128 count= 262144 0.758s 2.892us/record 44.258MB/s
bs= 256 count= 262144 0.760s 2.899us/record 88.300MB/s
bs= 512 count= 262144 0.768s 2.930us/record 174.728MB/s
bs= 1024 count= 262144 0.795s 3.034us/record 337.543MB/s
bs= 2048 count= 262144 0.817s 3.117us/record 657.138MB/s
bs= 4096 count= 262144 0.886s 3.378us/record 1212.454MB/s
bs= 8192 count= 262144 1.406s 5.365us/record 1527.034MB/s
bs= 16384 count= 131072 1.294s 9.875us/record 1659.057MB/s
bs= 32768 count= 65536 1.245s 19.003us/record 1724.402MB/s
bs= 65536 count= 32768 1.227s 37.450us/record 1749.962MB/s
bs= 131072 count= 16384 1.264s 77.148us/record 1698.972MB/s
bs= 262144 count= 8192 1.257s 153.500us/record 1707.781MB/s
bs= 524288 count= 4096 1.303s 318.062us/record 1648.385MB/s
bs=1048576 count= 2048 1.503s 733.804us/record 1428.960MB/s
bs=2097152 count= 1024 1.839s 1796.094us/record 1167.618MB/s
bs=4194304 count= 512 1.833s 3580.527us/record 1171.421MB/s
bs=8388608 count= 256 1.860s 7266.406us/record 1154.437MB/s
Raspberry Pi 4 running Debian 11 arm64, kernel 5.10
$ ./bench_dd.py
bs= 1 count=1048576 1.464s 1.396us/record 0.716MB/s
bs= 2 count= 524288 0.729s 1.390us/record 1.439MB/s
bs= 4 count= 524288 0.735s 1.402us/record 2.852MB/s
bs= 8 count= 524288 0.740s 1.411us/record 5.670MB/s
bs= 16 count= 524288 0.746s 1.423us/record 11.246MB/s
bs= 32 count= 524288 0.737s 1.407us/record 22.750MB/s
bs= 64 count= 524288 0.738s 1.408us/record 45.465MB/s
bs= 128 count= 524288 0.745s 1.421us/record 90.060MB/s
bs= 256 count= 524288 0.752s 1.434us/record 178.504MB/s
bs= 512 count= 524288 0.780s 1.488us/record 344.122MB/s
bs= 1024 count= 524288 0.831s 1.585us/record 645.859MB/s
bs= 2048 count= 524288 0.914s 1.742us/record 1175.405MB/s
bs= 4096 count= 524288 1.096s 2.090us/record 1960.027MB/s
bs= 8192 count= 262144 0.750s 2.861us/record 2863.609MB/s
bs= 16384 count= 262144 1.125s 4.290us/record 3819.446MB/s
bs= 32768 count= 131072 1.001s 7.638us/record 4289.905MB/s
bs= 65536 count= 65536 0.975s 14.882us/record 4403.740MB/s
bs= 131072 count= 65536 1.834s 27.978us/record 4684.865MB/s
bs= 262144 count= 32768 2.088s 63.717us/record 4114.190MB/s
bs= 524288 count= 16384 2.347s 143.225us/record 3660.587MB/s
bs=1048576 count= 8192 3.553s 433.748us/record 2417.480MB/s
bs=2097152 count= 4096 5.754s 1404.768us/record 1492.881MB/s
bs=4194304 count= 2048 6.109s 2982.832us/record 1406.148MB/s
bs=8388608 count= 1024 6.307s 6159.189us/record 1361.966MB/s
Raspberry Pi 4 running Ubuntu server 21.04 arm64, kernel 5.11
$ ./bench_dd.py
bs= 1 count=1048576 5.409s 5.159us/record 0.194MB/s
bs= 2 count= 524288 2.828s 5.393us/record 0.371MB/s
bs= 4 count= 262144 1.415s 5.397us/record 0.741MB/s
bs= 8 count= 131072 0.682s 5.202us/record 1.538MB/s
bs= 16 count= 131072 0.719s 5.483us/record 2.918MB/s
bs= 32 count= 131072 0.674s 5.143us/record 6.222MB/s
bs= 64 count= 131072 0.704s 5.373us/record 11.911MB/s
bs= 128 count= 131072 0.711s 5.425us/record 23.593MB/s
bs= 256 count= 131072 0.690s 5.262us/record 48.655MB/s
bs= 512 count= 131072 0.714s 5.449us/record 93.955MB/s
bs= 1024 count= 131072 0.707s 5.392us/record 189.911MB/s
bs= 2048 count= 131072 0.751s 5.728us/record 357.517MB/s
bs= 4096 count= 131072 0.802s 6.116us/record 669.720MB/s
bs= 8192 count= 131072 1.038s 7.916us/record 1034.902MB/s
bs= 16384 count= 65536 0.833s 12.712us/record 1288.837MB/s
bs= 32768 count= 65536 1.325s 20.212us/record 1621.207MB/s
bs= 65536 count= 32768 1.282s 39.113us/record 1675.575MB/s
bs= 131072 count= 16384 1.211s 73.936us/record 1772.773MB/s
bs= 262144 count= 8192 1.185s 144.619us/record 1812.651MB/s
bs= 524288 count= 4096 1.091s 266.418us/record 1967.912MB/s
bs=1048576 count= 2048 1.372s 670.063us/record 1564.891MB/s
bs=2097152 count= 1024 1.543s 1507.129us/record 1391.488MB/s
bs=4194304 count= 512 1.650s 3223.105us/record 1301.324MB/s
bs=8388608 count= 256 1.583s 6185.391us/record 1356.197MB/s
================================================================
Raspberry Pi 3 running Raspbian GNU/Linux 10 armv7, kernel 5.10
$ ./bench_dd.py
bs= 1 count=1048576 1.507s 1.437us/record 0.696MB/s
bs= 2 count= 524288 0.753s 1.437us/record 1.392MB/s
bs= 4 count= 524288 0.757s 1.444us/record 2.770MB/s
bs= 8 count= 524288 0.762s 1.454us/record 5.503MB/s
bs= 16 count= 524288 0.763s 1.456us/record 10.992MB/s
bs= 32 count= 524288 0.767s 1.463us/record 21.878MB/s
bs= 64 count= 524288 0.897s 1.711us/record 37.394MB/s
bs= 128 count= 524288 0.899s 1.715us/record 74.630MB/s
bs= 256 count= 524288 0.925s 1.764us/record 145.141MB/s
bs= 512 count= 524288 0.943s 1.799us/record 284.672MB/s
bs= 1024 count= 524288 1.013s 1.933us/record 529.725MB/s
bs= 2048 count= 262144 0.565s 2.155us/record 950.259MB/s
bs= 4096 count= 262144 0.671s 2.559us/record 1600.774MB/s
bs= 8192 count= 262144 0.996s 3.799us/record 2156.141MB/s
bs= 16384 count= 262144 1.627s 6.208us/record 2639.224MB/s
bs= 32768 count= 131072 1.456s 11.111us/record 2949.152MB/s
bs= 65536 count= 65536 1.365s 20.821us/record 3147.534MB/s
bs= 131072 count= 32768 1.324s 40.391us/record 3245.109MB/s
bs= 262144 count= 16384 1.301s 79.400us/record 3301.561MB/s
bs= 524288 count= 8192 1.369s 167.107us/record 3137.440MB/s
bs=1048576 count= 4096 1.862s 454.695us/record 2306.109MB/s
bs=2097152 count= 2048 2.197s 1072.520us/record 1955.351MB/s
bs=4194304 count= 1024 2.454s 2396.406us/record 1750.247MB/s
bs=8388608 count= 512 2.584s 5046.152us/record 1662.377MB/s
Raspberry Pi 3 running Ubuntu server 21.04 arm64, kernel 5.11
$ ./bench_dd.py
bs= 1 count=1048576 10.017s 9.553us/record 0.105MB/s
bs= 2 count= 524288 5.021s 9.577us/record 0.209MB/s
bs= 4 count= 262144 2.505s 9.554us/record 0.419MB/s
bs= 8 count= 131072 1.251s 9.546us/record 0.838MB/s
bs= 16 count= 65536 0.631s 9.623us/record 1.663MB/s
bs= 32 count= 65536 0.629s 9.605us/record 3.332MB/s
bs= 64 count= 65536 0.630s 9.606us/record 6.663MB/s
bs= 128 count= 65536 0.636s 9.700us/record 13.195MB/s
bs= 256 count= 65536 0.634s 9.667us/record 26.481MB/s
bs= 512 count= 65536 0.635s 9.687us/record 52.854MB/s
bs= 1024 count= 65536 0.645s 9.840us/record 104.064MB/s
bs= 2048 count= 65536 0.655s 10.002us/record 204.760MB/s
bs= 4096 count= 65536 0.688s 10.498us/record 390.177MB/s
bs= 8192 count= 65536 0.903s 13.782us/record 594.390MB/s
bs= 16384 count= 65536 1.343s 20.487us/record 799.712MB/s
bs= 32768 count= 32768 1.105s 33.717us/record 971.844MB/s
bs= 65536 count= 16384 0.987s 60.240us/record 1087.909MB/s
bs= 131072 count= 16384 1.854s 113.177us/record 1158.110MB/s
bs= 262144 count= 8192 1.801s 219.850us/record 1192.377MB/s
bs= 524288 count= 4096 1.796s 438.547us/record 1195.511MB/s
bs=1048576 count= 2048 1.972s 963.125us/record 1088.723MB/s
bs=2097152 count= 1024 2.151s 2100.605us/record 998.356MB/s
bs=4194304 count= 512 2.253s 4400.293us/record 953.187MB/s
bs=8388608 count= 256 2.306s 9005.898us/record 931.457MB/s
Raspberry Pi 3 running Debian 11 arm64, kernel 5.10
$ ./bench_dd.py
bs= 1 count=1048576 2.171s 2.070us/record 0.483MB/s
bs= 2 count= 524288 1.069s 2.039us/record 0.981MB/s
bs= 4 count= 262144 0.543s 2.071us/record 1.931MB/s
bs= 8 count= 262144 0.539s 2.058us/record 3.888MB/s
bs= 16 count= 262144 0.543s 2.070us/record 7.730MB/s
bs= 32 count= 262144 0.543s 2.072us/record 15.443MB/s
bs= 64 count= 262144 0.544s 2.077us/record 30.817MB/s
bs= 128 count= 262144 0.552s 2.105us/record 60.802MB/s
bs= 256 count= 262144 0.557s 2.126us/record 120.423MB/s
bs= 512 count= 262144 0.572s 2.184us/record 234.471MB/s
bs= 1024 count= 262144 0.599s 2.286us/record 447.998MB/s
bs= 2048 count= 262144 0.656s 2.501us/record 818.834MB/s
bs= 4096 count= 262144 0.767s 2.926us/record 1399.933MB/s
bs= 8192 count= 262144 1.018s 3.883us/record 2109.512MB/s
bs= 16384 count= 131072 0.757s 5.776us/record 2836.329MB/s
bs= 32768 count= 131072 1.252s 9.549us/record 3431.527MB/s
bs= 65536 count= 65536 1.116s 17.026us/record 3849.261MB/s
bs= 131072 count= 32768 1.052s 32.093us/record 4084.183MB/s
bs= 262144 count= 16384 1.045s 63.790us/record 4109.505MB/s
bs= 524288 count= 8192 1.092s 133.292us/record 3933.372MB/s
bs=1048576 count= 4096 2.321s 566.655us/record 1850.465MB/s
bs=2097152 count= 2048 2.984s 1457.168us/record 1439.197MB/s
bs=4194304 count= 1024 3.431s 3350.625us/record 1251.798MB/s
bs=8388608 count= 512 3.456s 6750.234us/record 1242.714MB/s
================================================================
Raspberry Pi 2 running Raspbian GNU/Linux 10 armv7, kernel 5.10
$ ./bench_dd.py
bs= 1 count=1048576 2.294s 2.188us/record 0.457MB/s
bs= 2 count= 524288 1.155s 2.203us/record 0.908MB/s
bs= 4 count= 262144 0.573s 2.187us/record 1.829MB/s
bs= 8 count= 262144 0.581s 2.215us/record 3.611MB/s
bs= 16 count= 262144 0.579s 2.210us/record 7.239MB/s
bs= 32 count= 262144 0.582s 2.221us/record 14.405MB/s
bs= 64 count= 262144 0.767s 2.926us/record 21.874MB/s
bs= 128 count= 262144 0.725s 2.767us/record 46.261MB/s
bs= 256 count= 262144 0.794s 3.028us/record 84.557MB/s
bs= 512 count= 262144 0.773s 2.951us/record 173.523MB/s
bs= 1024 count= 262144 0.799s 3.050us/record 335.763MB/s
bs= 2048 count= 262144 1.093s 4.170us/record 491.168MB/s
bs= 4096 count= 131072 0.547s 4.170us/record 982.276MB/s
bs= 8192 count= 131072 1.039s 7.929us/record 1033.159MB/s
bs= 16384 count= 65536 0.771s 11.765us/record 1392.607MB/s
bs= 32768 count= 65536 1.511s 23.059us/record 1421.036MB/s
bs= 65536 count= 32768 2.009s 61.321us/record 1068.740MB/s
bs= 131072 count= 16384 1.858s 113.374us/record 1156.103MB/s
bs= 262144 count= 8192 2.055s 250.829us/record 1045.111MB/s
bs= 524288 count= 4096 2.036s 496.960us/record 1054.989MB/s
bs=1048576 count= 2048 2.070s 1010.869us/record 1037.301MB/s
bs=2097152 count= 1024 2.084s 2035.068us/record 1030.507MB/s
bs=4194304 count= 512 2.097s 4094.844us/record 1024.289MB/s
bs=8388608 count= 256 2.096s 8189.414us/record 1024.323MB/s
Overclocking https://wiki.debian.org/RaspberryPi#Overclocking_Pi_2
arm_freq=1000
core_freq=500
sdram_freq=400
over_voltage=0
over_voltage_sdram_p=0
over_voltage_sdram_i=0
over_voltage_sdram_c=0
$ ./bench_dd.py
bs= 1 count=1048576 2.071s 1.975us/record 0.506MB/s
bs= 2 count= 524288 1.038s 1.979us/record 1.011MB/s
bs= 4 count= 262144 0.520s 1.984us/record 2.016MB/s
bs= 8 count= 262144 0.520s 1.982us/record 4.036MB/s
bs= 16 count= 262144 0.524s 2.001us/record 7.997MB/s
bs= 32 count= 262144 0.524s 1.999us/record 16.006MB/s
bs= 64 count= 262144 0.692s 2.640us/record 24.246MB/s
bs= 128 count= 262144 0.654s 2.494us/record 51.329MB/s
bs= 256 count= 262144 0.653s 2.492us/record 102.746MB/s
bs= 512 count= 262144 0.672s 2.564us/record 199.718MB/s
bs= 1024 count= 262144 0.732s 2.792us/record 366.773MB/s
bs= 2048 count= 262144 0.785s 2.993us/record 684.160MB/s
bs= 4096 count= 262144 0.968s 3.694us/record 1108.962MB/s
bs= 8192 count= 262144 1.612s 6.148us/record 1332.376MB/s
bs= 16384 count= 131072 1.504s 11.471us/record 1428.238MB/s
bs= 32768 count= 65536 1.497s 22.840us/record 1434.649MB/s
bs= 65536 count= 32768 1.432s 43.706us/record 1499.482MB/s
bs= 131072 count= 16384 1.437s 87.693us/record 1494.671MB/s
bs= 262144 count= 8192 1.426s 174.119us/record 1505.548MB/s
bs= 524288 count= 4096 1.415s 345.540us/record 1517.302MB/s
bs=1048576 count= 2048 1.428s 697.305us/record 1503.756MB/s
bs=2097152 count= 1024 1.430s 1396.846us/record 1501.348MB/s
bs=4194304 count= 512 1.442s 2815.664us/record 1489.632MB/s
bs=8388608 count= 256 1.444s 5642.461us/record 1486.693MB/s
================================================================
HP e8300, CPU i7-3770
freebsd13% ./bench_dd.py
bs= 1 count=1048576 0.728s 0.694us/record 1.440MB/s
bs= 2 count=1048576 0.573s 0.547us/record 3.658MB/s
bs= 4 count=1048576 0.565s 0.539us/record 7.418MB/s
bs= 8 count=1048576 0.575s 0.548us/record 14.595MB/s
bs= 16 count=1048576 0.572s 0.546us/record 29.329MB/s
bs= 32 count=1048576 0.574s 0.548us/record 58.435MB/s
bs= 64 count=1048576 0.573s 0.546us/record 117.174MB/s
bs= 128 count=1048576 0.568s 0.542us/record 236.122MB/s
bs= 256 count=1048576 0.577s 0.550us/record 465.528MB/s
bs= 512 count=1048576 0.585s 0.558us/record 917.797MB/s
bs= 1024 count=1048576 0.591s 0.564us/record 1815.495MB/s
bs= 2048 count=1048576 0.610s 0.582us/record 3517.599MB/s
bs= 4096 count=1048576 0.648s 0.618us/record 6624.642MB/s
bs= 8192 count=1048576 0.716s 0.683us/record 12000.920MB/s
bs= 16384 count=1048576 0.886s 0.845us/record 19391.838MB/s
bs= 32768 count=1048576 1.414s 1.349us/record 24291.204MB/s
bs= 65536 count= 524288 1.167s 2.226us/record 29446.678MB/s
bs= 131072 count= 262144 1.049s 4.001us/record 32757.097MB/s
bs= 262144 count= 131072 0.996s 7.597us/record 34507.742MB/s
bs= 524288 count= 131072 1.938s 14.784us/record 35462.791MB/s
bs=1048576 count= 65536 1.954s 29.814us/record 35170.740MB/s
bs=2097152 count= 32768 1.978s 60.353us/record 34748.329MB/s
bs=4194304 count= 16384 2.007s 122.520us/record 34233.639MB/s
bs=8388608 count= 8192 2.103s 256.698us/record 32678.930MB/s
debian11$ ./bench_dd.py
bs= 1 count=1048576 0.558s 0.532us/record 1.880MB/s
bs= 2 count=1048576 0.550s 0.524us/record 3.814MB/s
bs= 4 count=1048576 0.551s 0.526us/record 7.611MB/s
bs= 8 count=1048576 0.550s 0.525us/record 15.252MB/s
bs= 16 count=1048576 0.550s 0.524us/record 30.509MB/s
bs= 32 count=1048576 0.550s 0.524us/record 61.048MB/s
bs= 64 count=1048576 0.553s 0.527us/record 121.398MB/s
bs= 128 count=1048576 0.556s 0.530us/record 241.471MB/s
bs= 256 count=1048576 0.565s 0.538us/record 475.482MB/s
bs= 512 count=1048576 0.583s 0.556us/record 921.523MB/s
bs= 1024 count=1048576 0.608s 0.580us/record 1764.989MB/s
bs= 2048 count=1048576 0.640s 0.611us/record 3353.923MB/s
bs= 4096 count=1048576 0.701s 0.669us/record 6126.015MB/s
bs= 8192 count=1048576 0.870s 0.830us/record 9870.674MB/s
bs= 16384 count=1048576 1.191s 1.136us/record 14427.529MB/s
bs= 32768 count= 524288 1.004s 1.915us/record 17109.038MB/s
bs= 65536 count= 262144 0.822s 3.135us/record 20902.551MB/s
bs= 131072 count= 262144 1.496s 5.705us/record 22973.575MB/s
bs= 262144 count= 131072 1.468s 11.200us/record 23406.614MB/s
bs= 524288 count= 65536 1.519s 23.171us/record 22626.825MB/s
bs=1048576 count= 32768 1.495s 45.614us/record 22988.023MB/s
bs=2097152 count= 16384 1.487s 90.750us/record 23109.237MB/s
bs=4194304 count= 8192 1.474s 179.918us/record 23312.281MB/s
bs=8388608 count= 4096 1.588s 387.625us/record 21641.067MB/s
"""
| 2.328125 | 2 |
easyPlog/__init__.py | whuhit/easyPlog | 0 | 12794765 | <reponame>whuhit/easyPlog<filename>easyPlog/__init__.py
"""
@author: yangqiang
@contact: <EMAIL>
@file: __init__.py.py
@time: 2020/4/3 14:49
"""
from .easyPlog import Plog
| 1.046875 | 1 |
models/__init__.py | theorenck/pm-bot | 0 | 12794766 | <filename>models/__init__.py
from .colors import Color, Colors
from .grid import Location, Annotation, Annotations, Grid
from .minimap import Minimap
from .astar import AStarSearch | 1.4375 | 1 |
imports/dataHandler.py | mike-tr/upass-spork | 1 | 12794767 | import json
import imports.fileReader as reader
class dataBase:
def __init__(self, path, key):
self.file = reader.efile(path, key);
if(len(self.file.data) > 0):
self.json = json.loads(self.file.data)
else:
self.json = json.loads("{}")
self.json["key"] = key.decode()
def save(self):
self.file.data = json.dumps(self.json)
self.file.save(); | 3.171875 | 3 |
app/controllers/main_routes.py | HungUnicorn/mssql-prom-exporter | 1 | 12794768 | """
This is where all the general routes and controllers are defined.
"""
from flask import Blueprint
from flask import current_app as app
from flask import make_response
main_blueprint = Blueprint('main_blueprint', __name__)
@main_blueprint.route('/')
def index():
return make_response()
@main_blueprint.route('/health')
def health():
app.prom_init.up_gauge.set(1)
return make_response()
| 2.625 | 3 |
trade/urls.py | RoosDaniel/Fadderjobb | 2 | 12794769 | from django.urls import path
from . import views
app_name = "trade"
urlpatterns = [
path('start/<str:receiver_username>/', views.start, name="start"),
path('<str:other_username>/', views.see_trade, name="see"),
path('change/<str:other_username>', views.change_trade, name="change"),
]
| 1.945313 | 2 |
scripts/codraw_dataset_generation/codraw_raw_to_hdf5.py | capstonecs42/GeNeVA_datasets_dev | 36 | 12794770 | <filename>scripts/codraw_dataset_generation/codraw_raw_to_hdf5.py
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
"""
Script to parse and read raw CoDraw data and save it in HDF5 format for GeNeVA-GAN
"""
from glob import glob
import json
import os
import pickle
import string
import cv2
import h5py
import nltk
import numpy as np
from tqdm import tqdm
import yaml
with open('config.yml', 'r') as f:
keys = yaml.load(f, Loader=yaml.FullLoader)
def replace_at_offset(msg, tok, offset, tok_replace):
before = msg[:offset]
after = msg[offset:]
after = after.replace(tok, tok_replace, 1)
return before + after
def create_h5():
# load required keys
scenes_path = keys['codraw_scenes']
images_path = keys['codraw_images']
background_img = cv2.imread(keys['codraw_background'])
h5_path = keys['codraw_hdf5_folder']
spell_check = keys['codraw_spell_check']
codraw_extracted_coords = keys['codraw_extracted_coordinates']
# set height, width, scaling parameters
h, w, _ = background_img.shape
scale_x = 128. / w
scale_y = 128. / h
scaling_ratio = np.array([scale_x, scale_y, 1])
background_img = cv2.resize(background_img, (128, 128))
# load spelling corrections - obtained via Bing Spell Check API
with open(spell_check, 'rb') as f:
spell_check = pickle.load(f)
# create hdf5 files for train, val, test
h5_train = h5py.File(os.path.join(h5_path, 'codraw_train.h5'), 'w')
h5_val = h5py.File(os.path.join(h5_path, 'codraw_val.h5'), 'w')
h5_test = h5py.File(os.path.join(h5_path, 'codraw_test.h5'), 'w')
h5_train.create_dataset('background', data=background_img)
h5_val.create_dataset('background', data=background_img)
h5_test.create_dataset('background', data=background_img)
# set objects and bow (bag of words) dicts for each image
bow_dim = 0
GT_BOW = {}
GT_OBJECTS = {}
with open(codraw_extracted_coords, 'r') as f:
for line in f:
splits = line.split('\t')
image = splits[0]
split_coords = lambda x: [int(c) for c in x.split(',')]
bow = np.array([split_coords(b) for b in splits[1].split()])
bow_dim = len(bow)
GT_BOW[image] = bow[:, 0]
scaling = scaling_ratio * np.expand_dims(bow[:, 0], axis=1).repeat(3, 1)
GT_OBJECTS[image] = (bow[:, 1:] * scaling).astype(int)
# mark purely chitchat turns to be removed
chitchat = ['hi', 'done', 'ok', 'alright', 'okay', 'thanks', 'bye', 'hello']
# start saving data into hdf5; loop over all scenes
c_train = 0
c_val = 0
c_test = 0
for scene_file in tqdm(sorted(glob('{}/*json'.format(scenes_path)))):
# identify if scene belongs to train / val / test
split = scene_file.split('/')[-1].split('_')[0]
images = []
utterences = []
objects = []
coordinates = []
with open(scene_file, 'r') as f:
scene = json.load(f)
scene_id = scene['image_id']
# loop over turns in a single scene
idx = 0
prev_bow = np.zeros((bow_dim))
description = []
for i in range(len(scene['dialog'])):
bow = GT_BOW['Scene{}_{}'.format(scene_id, idx)]
# new objects added in this turn
hamming_distance = np.sum(bow - prev_bow)
turn = scene['dialog'][i]
# lowercase all messages
teller = str.lower(turn['msg_t'])
drawer = str.lower(turn['msg_d'])
# clear chitchat turns
if teller in chitchat:
teller = ''
if drawer in chitchat:
drawer = ''
# replace with spelling suggestions returned by Bing Spell Check API
if teller in spell_check and len(spell_check[teller]['flaggedTokens']) != 0:
for flagged_token in spell_check[teller]['flaggedTokens']:
tok = flagged_token['token']
tok_offset = flagged_token['offset']
assert len(flagged_token['suggestions']) == 1
tok_replace = flagged_token['suggestions'][0]['suggestion']
teller = replace_at_offset(teller, tok, tok_offset, tok_replace)
if drawer in spell_check and len(spell_check[drawer]['flaggedTokens']) != 0:
for flagged_token in spell_check[drawer]['flaggedTokens']:
tok = flagged_token['token']
tok_offset = flagged_token['offset']
assert len(flagged_token['suggestions']) == 1
tok_replace = flagged_token['suggestions'][0]['suggestion']
drawer = replace_at_offset(drawer, tok, tok_offset, tok_replace)
# add delimiting tokens: <teller>, <drawer>
if teller != '':
description += ['<teller>'] + nltk.word_tokenize(teller)
if drawer != '':
description += ['<drawer>'] + nltk.word_tokenize(drawer)
description = [w for w in description if w not in chitchat]
description = [w for w in description if w not in string.punctuation]
bow = GT_BOW['Scene{}_{}'.format(scene_id, idx)]
coords = GT_OBJECTS['Scene{}_{}'.format(scene_id, idx)]
# if there is no image for current turn: merge with next turn
if turn['abs_d'] == '':
continue
# if no new object is added in image for current turn: merge with next turn
if hamming_distance < 1:
prev_bow = bow
idx += 1
continue
# queue image, instruction, objects bow, object coordinates for saving
if len(description) > 0:
image = cv2.imread(os.path.join(images_path, 'Scene{}_{}.png'.format(scene_id, idx)))
image = cv2.resize(image, (128, 128))
images.append(image)
utterences.append(str.join(' ', description))
objects.append(bow)
coordinates.append(coords)
description = []
idx += 1
prev_bow = bow
# add current scene's data to hdf5
if len(images) > 0:
if split == 'train':
scene = h5_train.create_group(str(c_train))
c_train += 1
elif split == 'val':
scene = h5_val.create_group(str(c_val))
c_val += 1
elif split == 'test':
scene = h5_test.create_group(str(c_test))
c_test += 1
scene.create_dataset('images', data=images)
dt = h5py.special_dtype(vlen=str)
scene.create_dataset('utterences', data=np.string_(utterences), dtype=dt)
scene.create_dataset('objects', data=np.array(objects))
scene.create_dataset('coords', data=np.array(coordinates))
scene.create_dataset('scene_id', data=scene_id)
else:
print(scene_id)
if __name__ == '__main__':
create_h5()
| 2.484375 | 2 |
project/scripts/update_eo_version.py | polystat/c2eo | 12 | 12794771 | <gh_stars>10-100
#! /usr/bin/python3
import sys
import re as regex
# Our scripts
import tools
import settings
def main():
tools.pprint()
current_version = settings.get_setting('current_eo_version')
latest_version = settings.get_setting('latest_eo_version')
is_latest_version, latest_version = is_update_needed(current_version, latest_version)
if is_latest_version:
return
found_files = tools.search_files_by_pattern('../../', 'pom.xml', recursive=True)
update_version_in_files(found_files, latest_version)
settings.set_setting('current_eo_version', latest_version)
tools.pprint('EO version updated\n')
def is_update_needed(current_version, latest_version):
compare = tools.version_compare(current_version, latest_version)
is_latest_version = False
if compare == 1:
latest_version = current_version
tools.pprint(f'Manual update latest EO version to {latest_version}', status='WARN')
elif compare == 0:
is_latest_version = True
tools.pprint('We use latest EO version', status='PASS')
tools.pprint()
else:
tools.pprint(f'We use old EO version: "{current_version}"', status='WARN')
tools.pprint(f'Start updating files')
return is_latest_version, latest_version
def update_version_in_files(files, latest_version):
tools.pprint('Updating version')
count_changed_files = 0
pattern = r'<eolang\.version>.*<\/eolang\.version>'
latest_version_declaration = f'<eolang.version>{latest_version}</eolang.version>'
for file in files:
with open(file, 'r') as f:
data = f.read()
result = regex.search(pattern, data)
if (not result) or (latest_version_declaration in result.group()):
continue
new_data = regex.sub(pattern, latest_version_declaration, data)
with open(file, 'w') as f:
f.write(new_data)
count_changed_files += 1
tools.pprint(f'{count_changed_files} files updated')
return count_changed_files
if __name__ == '__main__':
tools.move_to_script_dir(sys.argv[0])
main()
| 2.609375 | 3 |
release-assistant/test/coverage_count.py | openeuler-mirror/release-tools | 1 | 12794772 | #!/usr/bin/python3
# ******************************************************************************
# Copyright (c) Huawei Technologies Co., Ltd. 2020-2020. All rights reserved.
# licensed under the Mulan PSL v2.
# You can use this software according to the terms and conditions of the Mulan PSL v2.
# You may obtain a copy of Mulan PSL v2 at:
# http://license.coscl.org.cn/MulanPSL2
# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR FIT FOR A PARTICULAR
# PURPOSE.
# See the Mulan PSL v2 for more details.
# ******************************************************************************/
# -*- coding:utf-8 -*-
"""
Count all test cases
"""
import os
import sys
import unittest
import coverage
from coverage import CoverageException
suite = unittest.TestSuite()
BASE_PATH = os.path.join(os.path.dirname(os.path.abspath(os.path.dirname(__file__))))
TEST_CASE_PATH = os.path.join(BASE_PATH, "test")
cov = coverage.coverage(include=[BASE_PATH + "/javcra/*"],
omit=["*__init__.py", "*/check_requires/*.py", "*/api/obscloud.py"])
def specify_case(file_path):
"""
Test specify test cases
Args:
file_path: test cases file path
Returns: discover result
"""
discover = unittest.defaultTestLoader.discover(
file_path, pattern="test*.py", top_level_dir=file_path
)
return discover
if __name__ == "__main__":
runner = unittest.TextTestRunner()
args = sys.argv
cov.start()
test_case_files = [
os.path.join(TEST_CASE_PATH, "test_start/"),
os.path.join(TEST_CASE_PATH, "test_modify/"),
os.path.join(TEST_CASE_PATH, "test_check/"),
os.path.join(TEST_CASE_PATH, "test_release/")
]
errors = []
failures = []
for file in test_case_files:
runner_result = runner.run(specify_case(file))
errors.extend(runner_result.errors)
failures.extend(runner_result.failures)
if any([errors, failures]):
sys.exit(1)
cov.stop()
try:
cov.report(show_missing=True)
# cov.html_report()
except CoverageException:
print("No data to report")
sys.exit(1)
| 1.882813 | 2 |
doc/workflow/examples/example_driver1.py | PyUtilib/PyUtilib | 24 | 12794773 | <reponame>PyUtilib/PyUtilib<gh_stars>10-100
import pyutilib.workflow
import pyutilib.component.core
# @usage:
import tasks_yz
driver = pyutilib.workflow.TaskDriver()
driver.register_task('TaskZ')
driver.register_task('TaskY')
print(driver.parse_args(['TaskZ','--x=3','--y=4']))
print(driver.parse_args(['TaskY','--X=3','--Y=4']))
# @:usage
| 2.109375 | 2 |
AxonDataset.py | idhamari/CapsPix2Pix | 24 | 12794774 | import numpy as np
from torch.utils.data import Dataset, DataLoader
from torchvision import transforms, utils
import torch
from torch.autograd import Variable
from load_memmap import *
class AxonDataset(Dataset):
"""" Inherits pytorch Dataset class to load Axon Dataset """
def __init__(self, data_name='crops64_axons_only', folder='axon_data', type='train', transform=None, resize=None, normalise=False, read='npy'):
"""
:param data_name (string)- data name to load/ save
:param folder- location of dataset
:param type - train or test dataset
"""
self.data_name = data_name
self.read = read
self.transform = transform
self.resize = resize
self.normalise = normalise
__location__ = os.path.realpath(
os.path.join(os.getcwd(), os.path.dirname(__file__)))
if self.read == 'npy':
self.x_data, self.y_data, _ = load_dataset(type, folder, data_name)
self.len_data = len(self.x_data)
elif self.read == 'image':
self.folder = os.path.join(__location__,self.data_name,'train')
images_original = [img for img in
os.listdir(os.path.join(os.path.dirname(os.path.abspath(__file__)), self.folder, "original"))]
images_mask = [img for img in
os.listdir(os.path.join(os.path.dirname(os.path.abspath(__file__)), self.folder, "mask"))]
self.images_mask = images_mask
self.images_original = images_original
self.images_mask.sort()
self.images_original.sort()
self.len_data = len(images_original)
def __len__(self):
""" get length of data
example: len(data) """
return self.len_data
def __getitem__(self, idx):
"""gets samples from data according to idx
:param idx- index to take
example: data[10] -to get the 10th data sample"""
__location__ = os.path.realpath(
os.path.join(os.getcwd(), os.path.dirname(__file__)))
if self.read == 'npy':
if self.resize:
sample_x_data = np.resize(np.array([self.x_data[idx]]), (1, self.resize,self.resize))
sample_y_data = np.resize(np.array([self.y_data[idx]]), (1, self.resize,self.resize))
else:
sample_x_data = self.x_data[idx]
sample_y_data = self.y_data[idx]
elif self.read == 'image':
data_path = self.images_original[idx]
mask_path = self.images_mask[idx]
sample_x_data = plt.imread(
os.path.join(os.path.dirname(os.path.abspath(__file__)), self.folder, "original", data_path))
sample_y_data = (plt.imread(
os.path.join(os.path.dirname(os.path.abspath(__file__)), self.folder, "mask", mask_path))).astype(
float)
sample_x_data = torch.Tensor(sample_x_data)
sample_y_data = torch.Tensor(sample_y_data)
if len(sample_x_data.shape) == 2:
sample_x_data.unsqueeze_(0)
if len(sample_y_data.shape) == 2:
sample_y_data.unsqueeze_(0)
# normalise between [-1,1]
if self.normalise:
sample_x_data = 2*((sample_x_data - torch.min(sample_x_data))/ (torch.max(sample_x_data) - torch.min(sample_x_data)) ) - 1
data = [sample_x_data, sample_y_data]
return data
class SyntheticDataset(Dataset):
"""" Inherits pytorch Dataset class to load Synthetic Axon Dataset """
def __init__(self, num=50000, data_name='syn256', type='val', transform=None, resize=None):
"""
:param num - number of data to generate
:param data_name (string)- data name to load/ save
:param type - train or test dataset
"""
__location__ = os.path.realpath(
os.path.join(os.getcwd(), os.path.dirname(__file__)))
name_x = os.path.join(__location__, 'npy_data/' + data_name + '_x_data_' + type + '.npy')
name_y = os.path.join(__location__,'npy_data/' + data_name + '_y_data_' + type + '.npy')
name_y_points = os.path.join(__location__,'npy_data/' + data_name + '_y_points_data_' + type + '.npy')
try:
self.x_data = np.load(name_x, mmap_mode='r')
self.y_data = np.load(name_y, mmap_mode='r')
self.y_data_points = np.load(name_y_points)
except:
# if no dataset currently created, generate a new synthetic dataset with parameters args
print('no dataset with the name')
self.data_name = data_name
self.transform = transform
self.resize = resize
def read_tensor_dataset(self):
""" converts dataset to tensors """
tt = ToTensor()
x_data = tt(self.x_data)
y_data = tt(self.y_data)
return x_data, y_data
def __len__(self):
""" get length of data
example: len(data) """
return (len(self.x_data))
def __getitem__(self, idx):
"""gets samples from data according to idx
:param idx- index to take
example: data[10] -to get the 10th data sample"""
if self.resize:
sample_x_data = np.resize(np.array([self.x_data[idx]]), (1, self.resize,self.resize))
sample_y_data = np.resize(np.array([self.y_data[idx]]), (1, self.resize,self.resize))
else:
sample_x_data = self.x_data[idx]
sample_y_data = self.y_data[idx]
sample_x_data = np.expand_dims(sample_x_data, axis=0)
sample_y_data = np.expand_dims(sample_y_data, axis=0)
sample_x_data = torch.Tensor(sample_x_data)
sample_y_data = torch.Tensor(sample_y_data)
data = [sample_x_data, sample_y_data]
return data
class ToTensor:
"""Convert ndarrays in data to Tensors."""
@staticmethod
def __call__(data):
# swap color axis because
# numpy image: H x W x C
# torch image: C X H X W
#data = data.transpose((1, 0))
data = np.array([data])
data = torch.Tensor(data)
if torch.cuda.is_available():
data = data.cuda()
return data
@staticmethod
def data_to_tensor(x_data, y_data):
"""takes data and splits into a list of tensors- of which each list contains
tensors of several samples (i.e. one id)
:param x_data - the data
:param y_data - the labels
"""
tt = ToTensor()
x_train_temp = tt(x_data)
y_train_temp = tt(y_data)
data = [x_train_temp, y_train_temp]
return data
@staticmethod
def data_ids_to_tensor_list(x_data, y_data, ids):
"""takes data and splits into a list of tensors- of which each list contains
tensors of several samples (i.e. one id)
:param x_data - the data
:param y_data - the labels
:param ids - the ids corresponding to each sample
"""
tt = ToTensor()
unique_ids = np.unique(ids)
data = [None] * unique_ids.size
len = np.zeros(unique_ids.size).astype(int)
for i in np.arange(unique_ids.size):
ind_id = np.nonzero(unique_ids[i] == ids)[0].astype(int)
len[i] = int(ind_id.size)
x_train_temp = tt(x_data[ind_id])
y_train_temp = tt(y_data[ind_id])
data[i] = [x_train_temp[0], y_train_temp[0], len[i]]
max_len = int(np.max(len))
return data, max_len
@staticmethod
def create_variable(tensor):
"""creates a Variable tensor with gpu if available
:param tensor - the tensor to wrap with Variable """
# Do cuda() before wrapping with variable
if torch.cuda.is_available():
return Variable(tensor.cuda())
else:
return Variable(tensor)
| 2.71875 | 3 |
dev/source/RequestClass.py | Gerard-007/deenux | 0 | 12794775 | <reponame>Gerard-007/deenux
from CustomerClass import Customer
import random
class Order:
num_of_orders = 0
def __init__(self):
self.order_ID = random.randint(00000000, 999999999)
self.customer_ID =Customer.business_ID
self.ship_to_party_ID = Customer.business_address | 2.953125 | 3 |
library/connecter/ansible/yaml/read2file.py | GNHJM/lykops | 141 | 12794776 | import os
from library.connecter.ansible.yaml import Yaml_Base
from library.utils.file import read_file
from library.utils.path import get_pathlist
class Read_File(Yaml_Base):
def router(self, this_path, this_basedir=None, yaml_tpye='main', preserve=True, together=False, name='', describe=''):
'''
检测来自文件的yaml语法等是否正确的路由器
:参数
filename:文件
name:名称
this_basedir:目录
yaml_tpye:yaml文件类型
preserve:是否写入数据库
together:是否返回该main下所有文件内容
name:yaml文件内容写入数据的名称
describe:yaml文件内容写入数据的描述
zhname:yaml文件内容写入数据的中文名称,很简短说明
:return
元组,第一个为执行结果,
成功为true,文件内容(格式为字典))
失败为False,返回失败原因
'''
if yaml_tpye in ('full_roles' , 'main') :
result = self.main(this_path, preserve=preserve, together=together, name=name, describe=describe)
elif yaml_tpye == 'include' :
result = self.include(this_path, this_basedir=this_basedir, file_type='tasks', preserve=preserve, name=name, describe=describe)
elif yaml_tpye == 'roles' :
result = self.roles(this_path, this_basedir=this_basedir, preserve=preserve, together=together, name=name, describe=describe)
else :
self.logger.error('检测yaml文件的语法失败,原因:参数yaml_data' + yaml_tpye + '不是接受值,只能接受full_roles、main、include、roles')
return (False, '参数yaml_data' + yaml_tpye + '不是接受值,只能接受full_roles、main、include、roles')
return result
def main(self, filename, preserve=True, together=False, name='', describe=''):
'''
检测main文件的语法等是否正确,如果含有include或/和roles,会逐个检查
include:只能为相对路径
roles:只能为字母和数字组合
:参数
filename:文件
name:名称
preserve:是否写入数据库
together:是否返回该main下所有文件内容
name:yaml文件内容写入数据的名称
describe:yaml文件内容写入数据的描述
zhname:yaml文件内容写入数据的中文名称,很简短说明
:return
元组,第一个为执行结果,
成功为true,文件内容(格式为字典))
失败为False,返回失败原因
'''
if preserve and together:
sub_preserve = False
else :
sub_preserve = preserve
result = self.yaml_loader(filename)
if result[0] :
(filename, content, yaml_data) = result[1:]
else :
self.logger.error('检测yaml文件' + filename + '类型为full_roles或者main语法失败,转化成yaml数据时失败,原因:' + result[1])
return (False, '文件' + filename + '转化成yaml数据时失败,' + result[1])
result = self.check_main(yaml_data)
if result[0] :
(roles_list, includefile_dict) = result[1:]
else :
self.logger.error('检测yaml文件' + filename + '类型为full_roles或者main语法失败,通过yaml语法检测,原因:' + result[1])
return (False, '文件' + filename + '未通过yaml语法检测,' + result[1])
this_basedir = os.path.dirname(filename)
include_content = {}
roles_content = {}
for file, file_type in includefile_dict.items() :
result = self.include(file, this_basedir=this_basedir, file_type=file_type, preserve=sub_preserve)
if not result[0] :
self.logger.error('检测yaml文件' + filename + '类型为full_roles或者main语法失败,通过yaml语法检测,原因:' + result[1])
return (False, '文件' + filename + '中的include文件名为' + file + '未通过yaml语法检测,' + result[1])
else :
file = os.path.basename(file)
include_content.update({file:result[1]})
for roles in roles_list :
result = self.roles(roles, this_basedir=this_basedir, preserve=sub_preserve, together=together)
if result[0] :
include_content.update(result[2])
roles = os.path.basename(roles)
roles_content.update({roles:result[1]})
else :
self.logger.error('检测yaml文件' + filename + '类型为full_roles或者main语法失败,roles名为' + roles + '未通过yaml语法检测,原因:' + result[1])
return (False, '文件' + filename + '中的roles名为' + roles + '未通过yaml语法检测,' + result[1])
data = {
'main' : content,
'include': include_content,
'roles': roles_content,
}
if preserve :
result = self.write2db(name, data, 'main', describe=describe)
if not result[0] :
self.logger.error('检测yaml文件' + filename + '类型为full_roles或者main语法失败,通过yaml语法检测,但无法写入数据库,原因:' + result[1])
return (False, '文件' + filename + '通过yaml语法检测,但无法写入数据库' + result[1])
self.logger.info('检测yaml文件' + filename + '类型为full_roles或者main语法成功')
if together :
return (True, data)
else :
return (True, content)
def include(self, file, this_basedir=None, file_type='main', preserve=True, name='', describe=''):
'''
检测include文件的语法等是否正确
:参数
this_basedir:引用该文件的上级目录
file:文件
this_path:引用时的路径
file_type:类型
preserve:是否写入数据库
name:yaml文件内容写入数据的名称
describe:yaml文件内容写入数据的描述
zhname:yaml文件内容写入数据的中文名称,很简短说明
:return
元组,第一个为执行结果,
成功为true,include文件内容(格式为字典,可能为空))
失败为False,返回失败原因
'''
if file_type not in ('main', 'tasks', 'var') :
self.logger.error('检测yaml文件' + file + '类型为include语法失败,参数file_type错误')
return (False, '参数file_type错误')
result = self._isinclude(file)
if not result[0] :
self.logger.error('检测yaml文件' + file + '类型为include语法失败,参数file_type错误,原因:' + result[1])
return result
if this_basedir is None or not this_basedir :
filename = file
else :
try :
filename = this_basedir + '/' + file
except :
filename = file
result = self.yaml_loader(filename)
if result[0] :
(content, yaml_data) = result[2:]
else :
self.logger.error('检测yaml文件' + file + '类型为include语法失败,转化为yaml数据时失败,原因:' + result[1])
return (False, result[1])
result = self.check_include(yaml_data, file_type=file_type)
if not result[0] :
self.logger.error('检测yaml文件' + file + '类型为include语法失败,语法检测未通过,原因:' + result[1])
return (False, result[1])
if preserve :
result = self.write2db(name, content, 'include', describe=describe)
if not result[0] :
self.logger.error('检测yaml文件' + file + '类型为include语法失败,但无法写入数据库,原因:' + result[1])
return (False, '无法写入数据库' + result[1])
self.logger.info('检测yaml文件' + filename + '类型为include语法成功')
return (True, content)
def roles(self, roles_path, this_basedir=None, preserve=True, together=False, name='', describe=''):
'''
检测单个roles的语法等是否正确
:参数
this_basedir:引用该roles的main文件的上级目录,例如/opt/lykops/example/ansible/roles/nginx/main.yaml引用一个roles,那么该值为/opt/lykops/example/ansible/roles/nginx/
roles_path:引用该roles的main文件写的roles路径
preserve:是否写入数据库
together:是否返回该roles下所有文件内容
name:yaml文件内容写入数据的名称
describe:yaml文件内容写入数据的描述
zhname:yaml文件内容写入数据的中文名称,很简短说明
:return
元组,第一个为执行结果,
成功为true,返回内容为(True,roles下所有文件内容(格式为字典,可能为空), roles下所有文件中include文件内容(格式为字典,可能为空))
失败为False,返回失败原因
'''
content_dict = {}
if preserve and together:
sub_preserve = False
else :
sub_preserve = preserve
if not name :
name = roles_path
result = self._isrolesname(name)
if not result :
self.logger.error('检测yaml文件roles名为' + roles_path + '失败,roles名不符合本系统要求的,注:虽然原生ansible支持这样写')
return (False, '语法错误,roles名不符合本系统要求的,注:虽然原生ansible支持这样写')
else :
if this_basedir is None or not this_basedir:
this_roles_path = roles_path
else :
try :
this_roles_path = this_basedir + '/roles/' + roles_path
except :
this_roles_path = roles_path
include_content = {}
for this_dir in ('tasks', 'vars', 'handlers', 'meta', 'defaults') :
yaml_file = this_roles_path + '/' + this_dir + '/main.yaml'
result = read_file(yaml_file)
if not result[0] :
if this_dir == 'tasks' :
self.logger.error('检测yaml文件roles名为' + roles_path + '失败,' + this_dir + '/main.yaml不存在')
return (False, this_dir + '/main.yaml不存在')
continue
else :
content_dict[this_dir] = result[1]
temp_dir = this_roles_path + '/templates/'
content_dict['templates'] = {}
result = get_pathlist(temp_dir, get_death=0, max_size=4 * 1024 * 1024)
if result[0] :
temp_list = result[1]
for temp in temp_list :
result = read_file(temp)
if result[0] :
temp_file = os.path.basename(temp)
content_dict['templates'][temp_file] = result[1]
if not content_dict['templates'] :
del content_dict['templates']
result = self.check_roles(content_dict)
if result[0] :
includefile_dict = result[1]
for file, file_type in includefile_dict.items() :
result = self.include(file, this_basedir=this_basedir, file_type=file_type, preserve=sub_preserve)
if not result[0] :
self.logger.error('检测yaml文件roles名为' + roles_path + '失败,roles包含的include文件' + file + '未通过语法检测,原因:' + result[1])
return (False, 'roles包含的include文件' + file + '未通过语法检测,' + result[1])
else :
include_content.update({file:result[1]})
else :
self.logger.error('检测yaml文件roles名为' + roles_path + '失败,' + this_dir + '/main.yaml语法错误,原因:' + result[1])
return (False, this_dir + '/main.yaml语法错误,' + result[1])
data = {
'main' : {},
'include': include_content,
'roles': {name:content_dict},
}
if preserve :
result = self.write2db(name, data, 'roles', describe=describe)
if not result[0] :
self.logger.error('检测yaml文件roles名为' + roles_path + '失败,无法写入数据库,' + result[1])
return (False, '无法写入数据库,' + result[1])
self.logger.info('检测yaml文件roles名为' + roles_path + '成功')
if together :
return (True, content_dict, include_content)
else :
return (True, {}, {})
| 2.265625 | 2 |
cvs_scrapy/pipelines.py | joehwang/auto-door | 5 | 12794777 | <reponame>joehwang/auto-door
# -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html
import redis
import re
import os
class CvsScrapyPipeline(object):
def open_spider(self,spider):
self._redis = redis.Redis(host='redis', port=6379, decode_responses=True,password=os.getenv("REDISPWD"))
def process_item(self, item, spider):
if item.get("addr"):
tags=re.findall(r"(\D*[縣市])?(\D*[區鎮鄉市])?(\D*[村里])?(\D*[路大道街])", item["addr"])
item["tags"]=",".join(filter(None, [a for b in tags for a in b]))
#(\D*[縣市])(\D*[區鎮鄉市])(\D*[路大道街])
self._redis.set(item["name"], item["addr"])
self._redis.set(item["phone"], item["name"])
self._redis.lpush(item["kind"],item["name"])
return item
def close_spider(self, spider):
pass | 2.375 | 2 |
dbHelper.py | portbusy/moneytrakerBot | 1 | 12794778 | <gh_stars>1-10
import sqlite3
class DBHelper:
def __init__(self, dbname="expenses.sqlite"):
self.dbname = dbname
self.conn = sqlite3.connect(dbname)
def setup(self):
outcome = "CREATE TABLE IF NOT EXISTS outcome (date date, value float, comment varchar(50))"
income = "CREATE TABLE IF NOT EXISTS income (date date, value float, comment varchar(50))"
self.conn.execute(outcome)
self.conn.execute(income)
self.conn.commit()
def add_income(self, date, value, comment):
stmt = "INSERT INTO income (date, value, comment) VALUES (?, ?, ?)"
args = (date, value, comment)
self.conn.execute(stmt, args)
self.conn.commit()
def add_outcome(self, date, value, comment):
stmt = "INSERT INTO outcome (date, value, comment) VALUES (?, ?, ?)"
args = (date, value, comment)
self.conn.execute(stmt, args)
self.conn.commit()
def delete_income(self, value, comment):
stmt = "DELETE FROM income WHERE value = (?) AND comment = (?)"
args = (value, comment)
self.conn.execute(stmt, args)
self.conn.commit()
def delete_outcome(self, value, comment):
stmt = "DELETE FROM outcome WHERE value = (?) AND comment = (?)"
args = (value, comment)
self.conn.execute(stmt, args)
self.conn.commit()
def get_income(self, month):
cur = self.conn.cursor()
stmt = "SELECT * FROM income WHERE strftime('%m', date) = '" +month+ "'"
cur.execute(stmt)
rows = cur.fetchall()
return rows
def get_total_income(self, month):
cur = self.conn.cursor()
stmt = "SELECT SUM(value) FROM income WHERE strftime('%m', date) = '" + month + "'"
cur.execute(stmt)
total = cur.fetchone()
return total[0]
def get_outcome(self, month):
cur = self.conn.cursor()
stmt = "SELECT * FROM outcome WHERE strftime('%m', date) = '"+month+"'"
cur.execute(stmt)
rows = cur.fetchall()
return rows
def get_total_outcome(self, month):
cur = self.conn.cursor()
stmt = "SELECT SUM(value) FROM outcome WHERE strftime('%m', date) = '" + month + "'"
cur.execute(stmt)
total = cur.fetchone()
return total[0]
| 3.46875 | 3 |
tests/utils.py | chstan/autodiDAQt | 1 | 12794779 | <filename>tests/utils.py<gh_stars>1-10
from dataclasses import dataclass
from autodidaqt.instrument import LogicalAxisSpecification
from autodidaqt.mock import MockMotionController
@dataclass
class CoordinateOffsets:
x_off: float = 0
y_off: float = 0
z_off: float = 0
class LogicalMockMotionController(MockMotionController):
r = 3.14159 / 4
# cartesian
x_y_z = LogicalAxisSpecification(
{
"stages[0]": lambda state, x, y, z: x - y,
"stages[1]": lambda state, x, y, z: x + y,
"stages[2]": lambda state, x, y, z: z,
},
{
"x": lambda state, s0, s1, s2: (s0 + s1) / 2,
"y": lambda state, s0, s1, s2: (s1 - s0) / 2,
"z": lambda state, s0, s1, s2: s2,
},
initial_coords=(0, 0, 0),
) # (x,y,z) = (0,0,0)
# stateful transform: offset coordinates
offset_x_y_z = LogicalAxisSpecification(
{
"stages[0]": lambda state, x, y, z: x + state.x_off,
"stages[1]": lambda state, x, y, z: y + state.y_off,
"stages[2]": lambda state, x, y, z: z + state.z_off,
},
{
"x": lambda state, s0, s1, s2: s0 - state.x_off,
"y": lambda state, s0, s1, s2: s1 - state.y_off,
"z": lambda state, s0, s1, s2: s2 - state.z_off,
},
initial_coords=(0, 0, 0),
state=CoordinateOffsets,
)
| 2.53125 | 3 |
App/utils/need_login.py | msamunetogetoge/BookRecommendationApp | 0 | 12794780 | <reponame>msamunetogetoge/BookRecommendationApp
from functools import wraps
from django.shortcuts import render
from Login.models import M_User
def need_login(redirect_field_name:str, err_msg:str, view_func=None):
"""
login が必要な場所で使う。
loginしていない時に、redirecr_field_name に{'msg':err_masg} を付与してrenderする。
Args:
redirect_field_name (str): loginしていなかったときに、表示したいページ
view_func (function, optional): viewe関数.
err_msg (str): loginしていなかったときに表示したい文字列.
"""
def decorator(func):
@wraps(func)
def wrapper(request, *args, **kwargs):
if request.user.is_authenticated:
return func(request, *args, **kwargs)
msg={"msg":err_msg}
return render(request, redirect_field_name, msg)
return wrapper
return decorator
| 2.171875 | 2 |
qp_search_project/searcher/services.py | enlighter/ndl-question-papers-search-hub | 1 | 12794781 | import json
from urllib import request
import requests
#for rest api
repository_url = 'http://10.3.100.22:8080'
restpath = '/rest'
xmlpath = '/xmlui'
def get_communities():
communities = request.urlopen(repository_url + restpath + '/communities')
communities_json = communities.read().decode('utf-8')
communities_load = json.loads(communities_json)
communities_processed = []
for dictionary in communities_load:
if dictionary['name'] and dictionary['name'] != '':
communities_processed.append(dictionary)
#print(communities_processed)
with open("test.json", 'w') as jsonfile:
text = json.dumps(communities_processed)
jsonfile.write(text)
return communities_processed
def get_by_year(cp):
for dictionary in cp:
try:
year = int(dictionary['name'])
id = dictionary['id']
print(year)
#ccj = curr_collections.read().decode('utf-8')
except:
year = 0
if year != 0:
path = repository_url + dictionary['link'] + '/collections'
print(path)
curr_collections = request.urlopen(path)
curr_json = json.loads(curr_collections.read().decode('utf-8'))
print(curr_json[0]['handle'])
path += str(curr_json[0]['id'])
temp = requests.get(path)
print(temp)
if __name__ == '__main__':
get_by_year(get_communities()) | 3.0625 | 3 |
sdk/python/pulumi_azure/network/network_security_group.py | Frassle/pulumi-azure | 0 | 12794782 | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import pulumi
import pulumi.runtime
from .. import utilities, tables
class NetworkSecurityGroup(pulumi.CustomResource):
"""
Manages a network security group that contains a list of network security rules. Network security groups enable inbound or outbound traffic to be enabled or denied.
~> **NOTE on Network Security Groups and Network Security Rules:** Terraform currently
provides both a standalone Network Security Rule resource, and allows for Network Security Rules to be defined in-line within the Network Security Group resource.
At this time you cannot use a Network Security Group with in-line Network Security Rules in conjunction with any Network Security Rule resources. Doing so will cause a conflict of rule settings and will overwrite rules.
"""
def __init__(__self__, __name__, __opts__=None, location=None, name=None, resource_group_name=None, security_rules=None, tags=None):
"""Create a NetworkSecurityGroup resource with the given unique name, props, and options."""
if not __name__:
raise TypeError('Missing resource name argument (for URN creation)')
if not isinstance(__name__, str):
raise TypeError('Expected resource name to be a string')
if __opts__ and not isinstance(__opts__, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
__props__ = dict()
if not location:
raise TypeError('Missing required property location')
__props__['location'] = location
__props__['name'] = name
if not resource_group_name:
raise TypeError('Missing required property resource_group_name')
__props__['resource_group_name'] = resource_group_name
__props__['security_rules'] = security_rules
__props__['tags'] = tags
super(NetworkSecurityGroup, __self__).__init__(
'azure:network/networkSecurityGroup:NetworkSecurityGroup',
__name__,
__props__,
__opts__)
def translate_output_property(self, prop):
return tables._CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return tables._SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
| 1.875 | 2 |
SHOOTER/bullets.py | AnshDubey1999/Customized-Space-Shooter | 0 | 12794783 | <reponame>AnshDubey1999/Customized-Space-Shooter
def bullets_remove(bullets):
# checks if bullets have crossed the display. If a bullet has passed the
# current game window, remove it
#returns an empty list if the bullet list is empty
if len(bullets) == 0:
return []
else:
index = 0
while True:
if len(bullets) == 0:
break
# checks if bullet's x or y coordinates (in relation to pixels) have crossed
# the game window
if bullets[index][0] < 0 or bullets[index][0]> 900 or bullets[index][1]<0 or bullets[index][1] > 900:
bullets.remove(bullets[index])
else:
index += 1
#breaks once all bullets within list has been checked
if index == len(bullets):
break
# returns empty list if all bullets have been removed
if len(bullets) == 0:
return []
# returns the new list of bullets
else:
return bullets | 3.578125 | 4 |
Python/Pandas/Inbuilt_impootant_functions.py | themohitpapneja/Code_Dump | 0 | 12794784 | X=[4.5,6,7,1,0.9,5.6,4.3,4,4.65,3,9]
summ=sum(X)
print(summ)
Y=sorted(X)
print(Y)
X.sort()
print(X) | 3.125 | 3 |
app/__init__.py | Jhustin27/Eye-of-summit-2.0 | 0 | 12794785 | """
Project Summit es un programa relacionado con el parque Nacional SUMMIT, mostrando al usuario todos los animales que hay en el parque y su taxonomía.
"""
from app import Listas
import os
def clases(opc):
"""
Clases es una función que despliega la lista de los 2 tipos de clases de animales que hay en el parque, en este caso Aves y Mamalia
"""
num = 0
n=0
for n in range(24):
"""
La lista de los animales esta compuesto por:
:param nombre: Nombre
:param clase: Clase
:param especie: Especie
:param familia: Familia
:param genero: Genero
:param nombre comun: Nombre Común
:param orden: Orden
:type n: int
:type c: int
:type e: int
:type f: int
:type g: int
:type n_c: int
:type o: int
"""
if opc == 1:
if Listas.clase[n] == 'Aves':
num = num + 1
print(str(num) + ".", Listas.nombre[n])
print(Listas.especies[n])
print(Listas.familia[n])
print(Listas.genero[n])
print(Listas.nombre_comun[n])
print(Listas.orden[n])
elif opc == 2:
if Listas.clase[n] == "Mammalia":
num = num + 1
print(str(num) + ".", Listas.nombre[n])
print(Listas.especies[n])
print(Listas.familia[n])
print(Listas.genero[n])
print(Listas.nombre_comun[n])
print(Listas.orden[n])
return num
if __name__ == '__main__':
print("Menu")
print("1. Lista de animales")
print("2. Lista de animales por clases")
num = 0
opm = int(input("Introduza una opcion: "))
if opm == 1:
n = 0
for n in range(24):
num = num + 1
print(str(num) + ".", Listas.nombre[n])
op = int(input("Introduzca una opcion: "))
os.system('cls')
print(Listas.nombre[op - 1])
print(Listas.clase[op - 1])
print(Listas.especies[op - 1])
print(Listas.familia[op - 1])
print(Listas.genero[op - 1])
print(Listas.nombre_comun[op - 1])
print(Listas.orden[op - 1])
else:
os.system('cls')
print("1.Aves")
print("2. Mammalia")
opc = int(input("Introduzca una opcion: "))
clases(opc)
| 3.796875 | 4 |
Python 3 - WebScraping/Teste2.py | Paimonz/Python-Estudos | 0 | 12794786 | <filename>Python 3 - WebScraping/Teste2.py
'''import requests
r = requests.get('https://www.investopedia.com/terms/f/forex.asp')
print(r.text)
print(r.encoding)
print(r.headers)
print(r.status_code)'''
html_doc = """
<html><head><title>The Dormouse's story</title></head>
<body>
<p class="title"><b>The Dormouse's story</b></p>
<p class="story">Once upon a time there were three little sisters; and their names were
<a href="http://example.com/elsie" class="sister" id="link1">Elsie</a>,
<a href="http://example.com/lacie" class="sister" id="link2">Lacie</a> and
<a href="http://example.com/tillie" class="sister" id="link3">Tillie</a>;
and they lived at the bottom of a well.</p>
<p class="story">...</p>"""
import requests
r = requests.get('https://www.investopedia.com/terms/f/forex.asp')
dados = r.text
from bs4 import BeautifulSoup
soup = BeautifulSoup(dados, 'html.parser')
'''print(soup.prettify())
print(soup.title)
print(soup.title.name)
print(soup.title.string)
print(soup.title.parent.name)
print(soup.p)'''
#print(soup.get_text(), end='')
arquivo = open('Teste.txt','w+')
arquivo.write(soup.get_text())
import webbrowser
webbrowser.open('www.google.com', new=0, autoraise=True) | 3.859375 | 4 |
silk/utils/multipleprocess.py | tzm41/silk | 1 | 12794787 | <filename>silk/utils/multipleprocess.py
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import multiprocessing
import time
class MultipleProcess(object):
@staticmethod
def process(func, *args):
processes = []
for node in args[0]:
new_args = [args[0][node]] + [arg for arg in args[1:]]
p = multiprocessing.Process(target=func, args=(new_args,))
processes.append(p)
if len(processes) == 4:
is_alive = True
for each in processes:
each.start()
begin = time.time()
while is_alive:
is_alive = False
for each in processes:
is_alive = is_alive or each.is_alive()
timeout = (time.time() - begin)
if timeout >= 5:
break
processes = []
for each in processes:
each.start()
is_alive = True
while is_alive:
is_alive = False
for each in processes:
is_alive = is_alive or each.is_alive()
| 2.921875 | 3 |
services/src/trader/trader_v1.py | pjw960408/binance-trader-c1 | 0 | 12794788 | <filename>services/src/trader/trader_v1.py
import os
import gc
import time
import ccxt
import requests
import urllib3
import joblib
import pandas as pd
import numpy as np
from dataclasses import dataclass
from config import CFG
from trainer.models import PredictorV1
from database.usecase import Usecase
from exchange.custom_client import CustomClient
from .utils import nan_to_zero
from logging import getLogger
from common_utils_svc import initialize_trader_logger, Position
from dataset_builder.build_dataset import DatasetBuilder
from trainer.datasets.dataset import build_X_and_BX
logger = getLogger("trader")
initialize_trader_logger()
LAST_ENTRY_AT_FILE_PATH = "/app/storage/trader/last_entry_at.pkl"
@dataclass
class TraderV1:
usecase = Usecase()
possible_in_debt = False
commission = {"entry": 0.0004, "exit": 0.0002, "spread": 0.0004}
skip_executable_order_check = True # To prevent api limitation
def __post_init__(self):
self.custom_cli = CustomClient()
self.tradable_coins = pd.Index(self.custom_cli.tradable_coins)
self._set_params()
self._set_test_params()
self._set_bins(
prediction_abs_bins=self.prediction_abs_bins,
probability_bins=self.probability_bins,
index=self.tradable_coins,
)
self._build_dataset_builder()
self._build_model()
self._load_last_entry_at()
self._initialize_order_books()
self.cached_pricing = None
if self.skip_executable_order_check is True:
assert self.order_criterion == "capital"
def _set_params(self):
# Set params which has dependency on trader logic
self.base_currency = CFG.REPORT_PARAMS["base_currency"]
self.position_side = CFG.REPORT_PARAMS["position_side"]
self.entry_ratio = CFG.REPORT_PARAMS["entry_ratio"] * CFG.LEVERAGE
logger.info(f"[O] Info: leverage is {CFG.LEVERAGE}")
self.min_holding_minutes = CFG.REPORT_PARAMS["min_holding_minutes"]
self.max_holding_minutes = CFG.REPORT_PARAMS["max_holding_minutes"]
self.compound_interest = CFG.REPORT_PARAMS["compound_interest"]
self.order_criterion = CFG.REPORT_PARAMS["order_criterion"]
self.exit_if_achieved = CFG.REPORT_PARAMS["exit_if_achieved"]
self.achieve_ratio = CFG.REPORT_PARAMS["achieve_ratio"]
self.achieved_with_commission = CFG.REPORT_PARAMS["achieved_with_commission"]
self.max_n_updated = CFG.REPORT_PARAMS["max_n_updated"]
# Currently we accept only 0
assert self.max_n_updated == 0
self.positive_entry_threshold = CFG.REPORT_PARAMS["positive_entry_threshold"]
self.negative_entry_threshold = CFG.REPORT_PARAMS["negative_entry_threshold"]
self.exit_threshold = CFG.REPORT_PARAMS["exit_threshold"]
self.positive_probability_threshold = CFG.REPORT_PARAMS[
"positive_probability_threshold"
]
self.negative_probability_threshold = CFG.REPORT_PARAMS[
"negative_probability_threshold"
]
self.adjust_prediction = CFG.REPORT_PARAMS["adjust_prediction"]
# Currently we accept False adjust_prediction
assert self.adjust_prediction is False
self.prediction_abs_bins = CFG.PREDICTION_ABS_BINS
self.probability_bins = CFG.PROBABILITY_BINS
# Set data builder params
self.dataset_builder_params = {}
self.dataset_builder_params["features_columns"] = [
(column[0].replace("-", "/"), column[1])
for column in CFG.DATASET_PARAMS["features_columns"]
]
self.dataset_builder_params["winsorize_threshold"] = CFG.DATASET_PARAMS[
"winsorize_threshold"
]
self.dataset_builder_params["base_feature_assets"] = [
base_feature_asset.replace("-", "/")
for base_feature_asset in CFG.EXP_DATA_PARAMS["base_feature_assets"]
]
self.dataset_builder_params["asset_to_id"] = {
key.replace("-", "/"): value
for key, value in CFG.EXP_PARAMS["asset_to_id"].items()
}
self.dataset_builder_params["id_to_asset"] = {
value: key.replace("-", "/")
for key, value in CFG.EXP_PARAMS["asset_to_id"].items()
}
def _set_test_params(self):
if CFG.TEST_MODE is True:
assert self.custom_cli.test_mode is True
self.entry_ratio = 0.0001
def _set_bins(self, prediction_abs_bins, probability_bins, index):
assert (prediction_abs_bins >= 0).all().all()
assert (probability_bins >= 0).all().all()
self.positive_entry_bins = None
self.negative_entry_bins = None
self.exit_bins = None
self.positive_probability_bins = None
self.negative_probability_bins = None
if isinstance(self.positive_entry_threshold, str):
if "*" in self.positive_entry_threshold:
self.positive_entry_bins = (
prediction_abs_bins.loc[
int(self.positive_entry_threshold.split("*")[0])
]
* float(self.positive_entry_threshold.split("*")[-1])
)[index]
else:
self.positive_entry_bins = prediction_abs_bins.loc[
self.positive_entry_threshold
][index]
if isinstance(self.negative_entry_threshold, str):
if "*" in self.negative_entry_threshold:
self.negative_entry_bins = -(
prediction_abs_bins.loc[
int(self.negative_entry_threshold.split("*")[0])
]
* float(self.negative_entry_threshold.split("*")[-1])
)[index]
else:
self.negative_entry_bins = -prediction_abs_bins.loc[
self.negative_entry_threshold
][index]
if isinstance(self.exit_threshold, str):
if "*" in self.exit_threshold:
self.exit_bins = (
prediction_abs_bins.loc[int(self.exit_threshold.split("*")[0])]
* float(self.exit_threshold.split("*")[-1])
)[index]
else:
self.exit_bins = prediction_abs_bins.loc[self.exit_threshold][index]
if isinstance(self.positive_probability_threshold, str):
if "*" in self.positive_probability_threshold:
self.positive_probability_bins = (
probability_bins.loc[
int(self.positive_probability_threshold.split("*")[0])
]
* float(self.positive_probability_threshold.split("*")[-1])
)[index]
else:
self.positive_probability_bins = probability_bins.loc[
self.positive_probability_threshold
][index]
if isinstance(self.negative_probability_threshold, str):
if "*" in self.negative_probability_threshold:
self.negative_probability_bins = (
probability_bins.loc[
int(self.negative_probability_threshold.split("*")[0])
]
* float(self.negative_probability_threshold.split("*")[-1])
)[index]
else:
self.negative_probability_bins = probability_bins.loc[
self.negative_probability_threshold
][index]
def _build_dataset_builder(self):
feature_scaler = joblib.load(os.path.join(CFG.EXP_DIR, "feature_scaler.pkl"))
label_scaler = joblib.load(os.path.join(CFG.EXP_DIR, "label_scaler.pkl"))
self.dataset_builder = DatasetBuilder(
tradable_coins=self.tradable_coins,
features_columns=self.dataset_builder_params["features_columns"],
feature_scaler=feature_scaler,
label_scaler=label_scaler,
)
def _build_model(self):
self.model = PredictorV1(
exp_dir=CFG.EXP_DIR,
m_config=CFG.EXP_MODEL_PARAMS,
d_config=CFG.EXP_DATA_PARAMS,
device="cpu",
mode="predict",
)
def _store_last_entry_at(self):
joblib.dump(self.last_entry_at, LAST_ENTRY_AT_FILE_PATH)
def _load_last_entry_at(self):
if os.path.exists(LAST_ENTRY_AT_FILE_PATH):
self.last_entry_at = joblib.load(LAST_ENTRY_AT_FILE_PATH)
logger.info(f"[O] Info: loaded last_entry_at")
else:
self.last_entry_at = {key: None for key in self.tradable_coins}
# Initialize
positions = self.custom_cli.get_position_objects(with_entry_at=True)
for position in positions:
if self.last_entry_at[position.asset] is not None:
self.last_entry_at[position.asset] = max(
position.entry_at, self.last_entry_at[position.asset]
)
else:
self.last_entry_at[position.asset] = position.entry_at
def _initialize_order_books(self):
positions = self.custom_cli.get_position_objects(with_entry_at=False)
for position in positions:
orders = self.custom_cli.get_open_orders(symbol=position.asset)
# When already limit order exists, we skip it.
if len(orders) >= 1:
continue
assert position.entry_price != 0.0
self.custom_cli.exit_order(
symbol=position.asset,
order_type="limit",
position=position.side,
amount=position.qty,
price=self.compute_price_to_achieve(
position=position, entry_price=position.entry_price
),
)
logger.info(f"[O] Info: initialized order books")
def _build_features(self, pricing):
features, class_features = self.dataset_builder.build_features(rawdata=pricing)
features = self.dataset_builder.preprocess_features(
features=features,
winsorize_threshold=self.dataset_builder_params["winsorize_threshold"],
)
return pd.concat([features, class_features], axis=1)[
self.dataset_builder_params["features_columns"]
].sort_index()
def _build_inputs(self, features):
features, base_features = build_X_and_BX(
features=features.astype("float32"),
base_feature_assets=self.dataset_builder_params["base_feature_assets"],
)
inputs = []
for target_coin in self.tradable_coins:
to_input = pd.concat([base_features, features[target_coin]], axis=1)
to_input = np.swapaxes(to_input.values, 0, 1)
inputs.append(to_input)
inputs = np.stack(inputs, axis=0)
ids = [
self.dataset_builder_params["asset_to_id"][target_coin]
for target_coin in self.tradable_coins
]
return inputs, ids
def build_prediction_dict(self, last_sync_on):
query_start_on = last_sync_on - pd.Timedelta(
minutes=(1320 + CFG.EXP_MODEL_PARAMS["lookback_window"] - 1)
)
query_end_on = last_sync_on
if self.cached_pricing is None:
pricing = self.usecase.get_pricing(
start_on=query_start_on, end_on=query_end_on
)
else:
# Get extra 1 candle, cause it has potential to be changed.
pricing = self.usecase.get_pricing(
start_on=self.cached_pricing.index.levels[0][-1], end_on=query_end_on
)
pricing = pd.concat(
[
self.cached_pricing[
query_start_on : self.cached_pricing.index.levels[0][-2]
],
pricing,
]
).sort_index()
self.cached_pricing = pricing
pricing = pricing.unstack().swaplevel(0, 1, axis=1)
features = self._build_features(pricing=pricing)
inputs, ids = self._build_inputs(features=features)
pred_dict = self.model.predict(
X=inputs, id=ids, id_to_asset=self.dataset_builder_params["id_to_asset"]
)
return pred_dict
def build_positive_and_negative_assets(self, pred_dict):
# Set assets which has signals
positive_assets = self.tradable_coins[
(pred_dict["predictions"] >= self.positive_entry_bins)
& (pred_dict["probabilities"] >= self.positive_probability_bins)
]
negative_assets = self.tradable_coins[
(pred_dict["predictions"] <= self.negative_entry_bins)
& (pred_dict["probabilities"] >= self.negative_probability_bins)
]
return positive_assets, negative_assets
def is_executable(self, last_sync_on: pd.Timestamp, now: pd.Timestamp):
if last_sync_on is None:
return False
sync_min_delta = int((now - last_sync_on).total_seconds() // 60)
if sync_min_delta == 1:
last_trade_on = self.usecase.get_last_trade_on()
if last_trade_on is None:
return True
else:
if int((now - last_trade_on).total_seconds() // 60) >= 1:
return True
return False
def exit_order(self, position):
self.custom_cli.cancel_orders(symbol=position.asset)
ordered = self.custom_cli.exit_order(
symbol=position.asset,
order_type="market",
position=position.side,
amount=position.qty,
)
if ordered is None:
assert CFG.TEST_MODE is True
return
def handle_exit(self, positions, positive_assets, negative_assets, now):
for position_idx, position in enumerate(positions):
# Keep position if matched
if (position.side == "long") and (position.asset in positive_assets):
continue
if (position.side == "short") and (position.asset in negative_assets):
continue
position_entry_at = self.last_entry_at[position.asset]
passed_minutes = (now - position_entry_at).total_seconds() // 60
# Handle min_holding_minutes
if passed_minutes <= self.min_holding_minutes:
continue
# Handle max_holding_minutes
if passed_minutes >= self.max_holding_minutes:
self.exit_order(position=position)
positions[position_idx].is_exited = True
logger.info(f"[-] Exit: {str(position)}, max_holding")
continue
# Handle exit signal
if (position.side == "long") and (position.asset in negative_assets):
self.exit_order(position=position)
positions[position_idx].is_exited = True
logger.info(f"[-] Exit: {str(position)}, opposite")
continue
if (position.side == "short") and (position.asset in positive_assets):
self.exit_order(position=position)
positions[position_idx].is_exited = True
logger.info(f"[-] Exit: {str(position)}, opposite")
continue
# Delete exited positions
positions = [
position for position in positions if position.is_exited is not True
]
return positions
def check_if_opposite_position_exists(self, positions, order_asset, order_side):
if order_side == "long":
opposite_side = "short"
if order_side == "short":
opposite_side = "long"
for exist_position in positions:
if (exist_position.asset == order_asset) and (
exist_position.side == opposite_side
):
return True
return False
def compute_cost_to_order(self, position):
cache_to_order = position.entry_price * position.qty
commission_to_order = cache_to_order * (
self.commission["entry"] + self.commission["spread"]
)
return cache_to_order + commission_to_order
def check_if_already_have(self, positions, position):
for exist_position in positions:
if (exist_position.asset == position.asset) and (
exist_position.side == position.side
):
return True
return False
def check_if_executable_order(self, position):
if self.skip_executable_order_check is True:
is_enough_ammount = bool(
position.qty >= self.custom_cli.ammount_constraints[position.asset]
)
return is_enough_ammount
cache = self.custom_cli.get_cache_dict()["free"]
cost = self.compute_cost_to_order(position=position)
is_enough_cache = bool((cache - cost) >= 0)
is_enough_ammount = bool(
position.qty >= self.custom_cli.ammount_constraints[position.asset]
)
return is_enough_cache & is_enough_ammount
def compute_price_to_achieve(self, position, entry_price, predictions=None):
if predictions is not None:
prediction = predictions[position.asset]
else:
if position.side == "long":
prediction = self.positive_entry_bins[position.asset]
if position.side == "short":
prediction = self.negative_entry_bins[position.asset]
commission = self.commission
if self.achieved_with_commission is not True:
commission["entry"] = 0
commission["exit"] = 0
commission["spread"] = 0
if position.side == "long":
assert prediction >= 0
price_to_achieve = (
entry_price
* (
(prediction * self.achieve_ratio)
+ 1
+ (commission["entry"] + commission["spread"])
)
/ (1 - (commission["exit"] + commission["spread"]))
)
if position.side == "short":
assert prediction <= 0
price_to_achieve = (
entry_price
* (
(prediction * self.achieve_ratio)
+ 1
- (commission["entry"] + commission["spread"])
)
/ (1 + (commission["exit"] + commission["spread"]))
)
return price_to_achieve
def entry_order(self, positions, asset, side, cache_to_order, pricing, now):
if cache_to_order == 0:
return
# if opposite position exists, we dont entry
if (
self.check_if_opposite_position_exists(
positions=positions, order_asset=asset, order_side=side
)
is True
):
return
entry_price = pricing[asset]
qty = cache_to_order / entry_price
position = Position(
asset=asset, side=side, qty=qty, entry_price=entry_price, entry_at=now
)
# Currently update_position_if_already_have is not supported.
already_have = self.check_if_already_have(
positions=positions, position=position
)
if already_have is True:
self.last_entry_at[position.asset] = now
return
executable_order = self.check_if_executable_order(position=position)
if executable_order is True:
ordered = self.custom_cli.entry_order(
symbol=position.asset,
order_type="market",
position=position.side,
amount=position.qty,
)
if ordered is None:
return
self.last_entry_at[position.asset] = now
if self.exit_if_achieved is True:
self.assets_to_limit_order.append(position.asset)
logger.info(f"[+] Entry: {str(position)}")
def handle_entry(
self,
positions,
cache_to_order,
positive_assets,
negative_assets,
pricing,
predictions,
now,
):
# Set init to handle limit order
self.assets_to_limit_order = []
# Entry order
if self.position_side in ("long", "longshort"):
for order_asset in positive_assets:
self.entry_order(
positions=positions,
asset=order_asset,
side="long",
cache_to_order=cache_to_order,
pricing=pricing,
now=now,
)
if self.position_side in ("short", "longshort"):
for order_asset in negative_assets:
self.entry_order(
positions=positions,
asset=order_asset,
side="short",
cache_to_order=cache_to_order,
pricing=pricing,
now=now,
)
# Limit order
if len(self.assets_to_limit_order) > 0:
positions = self.custom_cli.get_position_objects(with_entry_at=False)
for position in positions:
if position.asset not in self.assets_to_limit_order:
continue
assert position.entry_price != 0.0
self.custom_cli.exit_order(
symbol=position.asset,
order_type="limit",
position=position.side,
amount=position.qty,
price=self.compute_price_to_achieve(
position=position,
entry_price=position.entry_price,
predictions=predictions,
),
)
def run(self):
logger.info(f"[O] Start: demon of trader")
n_traded = 0
while True:
# Handle relogin
if n_traded == 60:
self.custom_cli = CustomClient()
n_traded = 0
# Main
try:
# Use timestamp without second info
now = pd.Timestamp.utcnow().floor("T")
last_sync_on = self.usecase.get_last_sync_on()
if self.is_executable(last_sync_on=last_sync_on, now=now) is True:
pred_dict = self.build_prediction_dict(last_sync_on=last_sync_on)
(
positive_assets,
negative_assets,
) = self.build_positive_and_negative_assets(pred_dict=pred_dict)
# Handle exit
positions = self.custom_cli.get_position_objects(
with_entry_at=False
)
positions = self.handle_exit(
positions=positions,
positive_assets=positive_assets,
negative_assets=negative_assets,
now=now,
)
long_positions = [
position for position in positions if position.side == "long"
]
short_positions = [
position for position in positions if position.side == "short"
]
# Compute how much use cache to order
cache_dict = self.custom_cli.get_cache_dict()
capital = cache_dict["total"]
cache = cache_dict["free"]
logger.info(
f"[_] Capital: {capital:.2f}$ | Holds: long({len(long_positions)}), short({len(short_positions)}) | Signals: pos({len(positive_assets)}), neg({len(negative_assets)})"
)
if self.compound_interest is False:
cache_to_order = self.entry_ratio
else:
if self.order_criterion == "cache":
if cache > 0:
cache_to_order = nan_to_zero(
value=(cache * self.entry_ratio)
)
else:
cache_to_order = 0
elif self.order_criterion == "capital":
# Entry with capital base
cache_to_order = nan_to_zero(
value=(capital * self.entry_ratio)
)
# Handle entry
pricing = self.custom_cli.get_last_pricing()
self.handle_entry(
positions=positions,
cache_to_order=cache_to_order,
positive_assets=positive_assets,
negative_assets=negative_assets,
pricing=pricing,
predictions=pred_dict["predictions"],
now=now,
)
# Record traded
self.usecase.insert_trade({"timestamp": now})
self._store_last_entry_at()
n_traded += 1
else:
time.sleep(0.1)
except Exception as e:
logger.error("[!] Error: ", exc_info=True)
raise Exception
if __name__ == "__main__":
import fire
fire.Fire(TraderV1)
| 2.046875 | 2 |
lib-other/pylib/consensus/test/test_consensus.py | endolith/Truthcoin | 161 | 12794789 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Tests for Truthcoin's consensus functions.
Verifies that the consensus algorithm works as expected.
Check test_answers.txt for expected results.
"""
from __future__ import division, unicode_literals, absolute_import
import os
import sys
import platform
import json
import numpy as np
import numpy.ma as ma
if platform.python_version() < "2.7":
unittest = __import__("unittest2")
else:
import unittest
HERE = os.path.dirname(os.path.realpath(__file__))
sys.path.insert(0, os.path.join(HERE, os.pardir))
import consensus
def prp(o):
print(json.dumps(outcome, indent=3, sort_keys=True))
class TestConsensus(unittest.TestCase):
def setUp(self):
self.votes_unmasked = np.array([
[1, 1, 0, 0],
[1, 0, 0, 0],
[1, 1, 0, 0],
[1, 1, 1, 0],
[0, 0, 1, 1],
[0, 0, 1, 1],
])
self.votes = ma.masked_array(self.votes_unmasked, np.isnan(self.votes_unmasked))
def test_Factory(self):
outcome = consensus.Factory(self.votes)
self.assertAlmostEquals(outcome["Certainty"], 0.228237569613, places=11)
def test_Factory_scaled(self):
scalar_decision_params = [
{"scaled": True, "min": 0.1, "max": 0.5},
{"scaled": True, "min": 0.2, "max": 0.7},
{"scaled": False, "min": 0, "max": 1},
{"scaled": False, "min": 0, "max": 1},
]
outcome = consensus.Factory(self.votes, Scales=scalar_decision_params)
self.assertAlmostEquals(outcome["Certainty"], 0.618113325804, places=11)
def tearDown(self):
del self.votes_unmasked
del self.votes
if __name__ == "__main__":
suite = unittest.TestLoader().loadTestsFromTestCase(TestConsensus)
unittest.TextTestRunner(verbosity=2).run(suite)
| 2.65625 | 3 |
happ/tests/api/admin/test_interests.py | Mafioso/happ-backend | 1 | 12794790 | from rest_framework import status
from rest_framework.test import APISimpleTestCase
from rest_framework_jwt.settings import api_settings
from happ.models import User, Interest, LogEntry
from happ.factories import (
UserFactory,
InterestFactory,
CityFactory,
)
from happ.tests import *
class Tests(APISimpleTestCase):
def test_get_without_auth(self):
"""
Resourse is not available without authentication
"""
url = prepare_url('admin-interests-list')
response = self.client.get(url, format='json')
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
def test_get_with_auth_not_staff(self):
"""
Resourse is not available for non-staff users
"""
u = UserFactory()
u.set_password('<PASSWORD>')
u.save()
auth_url = prepare_url('login')
data = {
'username': u.username,
'password': '<PASSWORD>'
}
response = self.client.post(auth_url, data=data, format='json')
token = response.data['token']
url = prepare_url('admin-interests-list')
self.client.credentials(HTTP_AUTHORIZATION='{} {}'.format(api_settings.JWT_AUTH_HEADER_PREFIX, token))
response = self.client.get(url, format='json')
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
def test_get_with_auth(self):
"""
Resourse is available with authentication only and for staff
"""
u = UserFactory(role=User.MODERATOR)
u.set_password('<PASSWORD>')
u.save()
auth_url = prepare_url('login')
data = {
'username': u.username,
'password': '<PASSWORD>'
}
response = self.client.post(auth_url, data=data, format='json')
token = response.data['token']
url = prepare_url('admin-interests-list')
self.client.credentials(HTTP_AUTHORIZATION='{} {}'.format(api_settings.JWT_AUTH_HEADER_PREFIX, token))
response = self.client.get(url, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_search_interests(self):
"""
We can search interests
"""
Interest.objects.delete()
for i in range(3):
interest = InterestFactory(title='Hockey')
interest.save()
interest = InterestFactory(title='Beer')
interest.save()
u = UserFactory(role=User.MODERATOR)
u.set_password('<PASSWORD>')
u.save()
auth_url = prepare_url('login')
data = {
'username': u.username,
'password': '<PASSWORD>'
}
response = self.client.post(auth_url, data=data, format='json')
token = response.data['token']
url = prepare_url('admin-interests-list', query={'search': 'hoc'})
self.client.credentials(HTTP_AUTHORIZATION='{} {}'.format(api_settings.JWT_AUTH_HEADER_PREFIX, token))
response = self.client.get(url, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data['count'], 3)
def test_create_interest(self):
"""
we can create interest
"""
n = Interest.objects.count()
u = UserFactory(role=User.MODERATOR)
u.set_password('<PASSWORD>')
u.save()
log_n = LogEntry.objects.count()
url = prepare_url('admin-interests-list')
interest_data = {
'title': 'NewInterest name',
'parent_id': None,
'is_global': True,
'local_cities': [],
}
auth_url = prepare_url('login')
data = {
'username': u.username,
'password': '<PASSWORD>'
}
# restricted for moderator
response = self.client.post(auth_url, data=data, format='json')
token = response.data['token']
self.client.credentials(HTTP_AUTHORIZATION='{} {}'.format(api_settings.JWT_AUTH_HEADER_PREFIX, token))
response = self.client.post(url, data=interest_data, format='json')
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
# ok for administrator
u.role = User.ADMINISTRATOR
u.save()
response = self.client.post(auth_url, data=data, format='json')
token = response.data['token']
self.client.credentials(HTTP_AUTHORIZATION='{} {}'.format(api_settings.JWT_AUTH_HEADER_PREFIX, token))
response = self.client.post(url, data=interest_data, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertEqual(Interest.objects.count(), n+1)
self.assertEqual(response.data['title'], 'NewInterest name')
self.assertEqual(LogEntry.objects.count(), log_n+1)
# ok for root
u.role = User.ROOT
u.save()
response = self.client.post(auth_url, data=data, format='json')
token = response.data['token']
self.client.credentials(HTTP_AUTHORIZATION='{} {}'.format(api_settings.JWT_AUTH_HEADER_PREFIX, token))
response = self.client.post(url, data=interest_data, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertEqual(Interest.objects.count(), n+2)
self.assertEqual(response.data['title'], 'NewInterest name')
self.assertEqual(LogEntry.objects.count(), log_n+2)
def test_update_interest(self):
"""
we can update interest
"""
cities = map(lambda x: str(CityFactory().id), range(3))
interest = InterestFactory()
u = UserFactory(role=User.MODERATOR)
u.set_password('<PASSWORD>')
u.save()
log_n = LogEntry.objects.count()
auth_url = prepare_url('login')
data = {
'username': u.username,
'password': '<PASSWORD>'
}
response = self.client.post(auth_url, data=data, format='json')
token = response.data['token']
url = prepare_url('admin-interests-detail', kwargs={'id': str(interest.id)})
data = {
'title': 'NewInterest name',
'parent_id': None,
'is_global': False,
'local_cities': cities,
}
n = Interest.objects.count()
self.client.credentials(HTTP_AUTHORIZATION='{} {}'.format(api_settings.JWT_AUTH_HEADER_PREFIX, token))
response = self.client.patch(url, data=data, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(Interest.objects.count(), n)
self.assertEqual(response.data['title'], 'NewInterest name')
self.assertEqual(LogEntry.objects.count(), log_n+1)
def test_delete_interest(self):
"""
we can delete interest
"""
u = UserFactory(role=User.MODERATOR)
u.set_password('<PASSWORD>')
u.save()
log_n = LogEntry.objects.count()
i = InterestFactory()
i.save()
auth_url = prepare_url('login')
data = {
'username': u.username,
'password': '<PASSWORD>'
}
response = self.client.post(auth_url, data=data, format='json')
token = response.data['token']
url = prepare_url('admin-interests-detail', kwargs={'id': str(i.id)})
n = Interest.objects.count()
self.client.credentials(HTTP_AUTHORIZATION='{} {}'.format(api_settings.JWT_AUTH_HEADER_PREFIX, token))
response = self.client.delete(url, format='json')
self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)
self.assertEqual(Interest.objects.count(), n-1)
self.assertEqual(LogEntry.objects.count(), log_n+1)
def test_get_categories(self):
"""
Ensure that we can get only categories with api
"""
Interest.objects.delete()
for i in range(3):
interest = InterestFactory(parent=None)
inter = InterestFactory(parent=interest)
u = UserFactory(role=User.MODERATOR)
u.set_password('<PASSWORD>')
u.save()
auth_url = prepare_url('login')
data = {
'username': u.username,
'password': '<PASSWORD>'
}
response = self.client.post(auth_url, data=data, format='json')
token = response.data['token']
url = prepare_url('admin-interests-categories')
self.client.credentials(HTTP_AUTHORIZATION='{} {}'.format(api_settings.JWT_AUTH_HEADER_PREFIX, token))
response = self.client.get(url, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(len(response.data['results']), 3)
for data in response.data['results']:
if data['id'] == str(interest.id):
self.assertEqual(len(data['children']), 1)
else:
self.assertEqual(len(data['children']), 0)
def test_get_children(self):
"""
Ensure that we can get only children with api
"""
Interest.objects.delete()
for i in range(3):
interest = InterestFactory(parent=None)
interest = InterestFactory(parent=interest)
u = UserFactory(role=User.MODERATOR)
u.set_password('<PASSWORD>')
u.save()
auth_url = prepare_url('login')
data = {
'username': u.username,
'password': '<PASSWORD>'
}
response = self.client.post(auth_url, data=data, format='json')
token = response.data['token']
url = prepare_url('admin-interests-children')
self.client.credentials(HTTP_AUTHORIZATION='{} {}'.format(api_settings.JWT_AUTH_HEADER_PREFIX, token))
response = self.client.get(url, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data['count'], 1)
self.assertNotEqual(response.data['results'][0]['parent'], None)
def test_activate(self):
"""
we can activate interest through API
"""
u = UserFactory(role=User.MODERATOR)
u.set_password('<PASSWORD>')
u.save()
log_n = LogEntry.objects.count()
auth_url = prepare_url('login')
data = {
'username': u.username,
'password': '<PASSWORD>'
}
response = self.client.post(auth_url, data=data, format='json')
token = response.data['token']
i = InterestFactory(is_active=False)
url = prepare_url('admin-interests-activate', kwargs={'id': str(i.id)})
self.client.credentials(HTTP_AUTHORIZATION='{} {}'.format(api_settings.JWT_AUTH_HEADER_PREFIX, token))
response = self.client.post(url, format='json')
self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)
i = Interest.objects.get(id=i.id)
self.assertTrue(i.is_active)
self.assertEqual(LogEntry.objects.count(), log_n+1)
def test_deactivate(self):
"""
we can deactivate interest through API
"""
u = UserFactory(role=User.MODERATOR)
u.set_password('<PASSWORD>')
u.save()
log_n = LogEntry.objects.count()
auth_url = prepare_url('login')
data = {
'username': u.username,
'password': '<PASSWORD>'
}
response = self.client.post(auth_url, data=data, format='json')
token = response.data['token']
i = InterestFactory(is_active=True)
url = prepare_url('admin-interests-deactivate', kwargs={'id': str(i.id)})
self.client.credentials(HTTP_AUTHORIZATION='{} {}'.format(api_settings.JWT_AUTH_HEADER_PREFIX, token))
response = self.client.post(url, format='json')
self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)
i = Interest.objects.get(id=i.id)
self.assertFalse(i.is_active)
self.assertEqual(LogEntry.objects.count(), log_n+1)
| 2.515625 | 3 |
Basico_03.py | Emgicraft/PythonGUI_PyQt5 | 0 | 12794791 | import sys
from PyQt5 import QtGui
import PyQt5.QtWidgets as qw
class Mensaje(qw.QWidget):
def __init__(self, parent=None):
qw.QWidget.__init__(self, parent)
self.setGeometry(700, 300, 640, 640)
self.setWindowTitle("Basico 03")
self.setWindowIcon(QtGui.QIcon("Recursos/Icon-Python_PyQt5.png"))
self.setToolTip("Esto es un <b><i>Widget</i></b> hecho con PyQt.") # Mensaje tooltip, puede usar RTF
qw.QToolTip.setFont(QtGui.QFont("OldEnglish", 11)) # Fuente y tamaño de fuente
apli = qw.QApplication(sys.argv)
tip = Mensaje()
tip.show()
apli.exec_() # También se puede poner así. | 2.984375 | 3 |
Paths/Re-interpolate.py | harbortype/glyphs-scripts | 23 | 12794792 | <filename>Paths/Re-interpolate.py<gh_stars>10-100
#MenuTitle: Re-interpolate
# -*- coding: utf-8 -*-
from __future__ import division, print_function, unicode_literals
__doc__="""
Re-interpolates selected layers. Makes it possible to assign a keyboard shortcut to this command via Preferences > Shortcuts (in Glyphs 3) or System Preferences > Keyboard > Shortcuts > App Shortcuts (in Glyphs 2).
"""
thisFont = Glyphs.font
for thisLayer in thisFont.selectedLayers:
thisLayer.reinterpolate() | 1.867188 | 2 |
mltools/libos.py | thesfinox/mltools | 0 | 12794793 | import os
import psutil
class InfoOS:
'''
This class retrieves and prints information on the current OS.
Public methods:
Attributes:
os: the current OS,
kernel: the current release,
arch: the current architecture,
threads: the number of available CPU threads,
freq: the current CPU frequency,
freqm: the maximum CPU frequency,
vmtot: the total virtual memory (in MB),
vmav: the available virtual memory (in MB).
'''
def __init__(self):
'''
Constructor of the class.
'''
self.os = os.uname().sysname
self.kernel = os.uname().release
self.arch = os.uname().machine
self.threads = psutil.cpu_count()
self.freq = psutil.cpu_freq().current
self.freqm = psutil.cpu_freq().max
self.vmtot = int(psutil.virtual_memory().total / 1024 / 1024)
self.vmav = int(psutil.virtual_memory().available / 1024 / 1024)
| 3.359375 | 3 |
four_table.py | ChipJust/diodes | 0 | 12794794 | <filename>four_table.py
#! python3
r"""
THD = sqrt ( Sum(2, n)(Mag^2[n]) ) / Mag[1]
"""
import argparse
import re
import collections
import math
import itertools
import os
parser = argparse.ArgumentParser(description='Create fourier tables out of a collection of fourier files')
#parser.add_argument('models', nargs='+', help='The name(s) of the diode model(s) to make a test circuit for.')
class Harmonic():
def __init__(self, m):
self.Harmonic = int(m.group('Harmonic'))
self.Frequency = int(m.group('Frequency'))
self.Magnitude = float(m.group('Magnitude'))
self.Phase = float(m.group('Phase'))
self.NormMag = float(m.group('NormMag'))
self.NormPhase = float(m.group('NormPhase'))
def __str__(self):
return "{Harmonic}, {Frequency}, {Magnitude}, {Phase}, {NormMag}, {NormPhase}".format(**self.__dict__)
class FourierAnalysis():
fp = "[-+]?(?:(?:\d*\.\d+)|(?:\d+\.?))(?:[Ee][+-]?\d+)?"
re_fourier_analysis = re.compile(r"Fourier analysis for.*No. Harmonics:\s*(?P<n>\d+),\s*THD:\s*(?P<thd>{fp})\s*%".format(fp=fp), re.DOTALL)
re_harmonic = re.compile(r"^\s*(?P<Harmonic>\d+)\s+(?P<Frequency>\d+)\s+(?P<Magnitude>{fp})\s+(?P<Phase>{fp})\s+(?P<NormMag>{fp})\s+(?P<NormPhase>{fp})\s+$".format(fp=fp), re.MULTILINE)
def __init__(self, filename):
self.filename = filename
with open(filename, "r") as f:
buffer = f.read()
m = self.re_fourier_analysis.search(buffer)
if not m:
raise ValueError("{filename} does not look like a fourier analysis file. File contents follow...\n{contents}".format(filename=filename, contents=buffer))
self.n = int(m.group('n'))
self.thd = float(m.group('thd'))
self.harmonics = collections.OrderedDict()
for m in self.re_harmonic.finditer(buffer):
self.harmonics[int(m.group('Harmonic'))] = Harmonic(m)
self.distortion = {}
last = 0
for i in range(2, self.n):
try:
thd = self.harmonic_distortion(i)
except KeyError as e:
print (self.filename)
print (e)
continue
self.distortion[i] = thd - last
last = thd
def __repr__(self):
return "FourierAnalysis({filename}): n={n} thd={thd}".format(**self.__dict__)
def harmonic_distortion(self, k=0):
if k == 0:
k = self.n
elif k >= self.n:
k = self.n
else:
k = k + 1 # because the arrary is zero based
SumMag2 = 0
for i in range(2, k):
SumMag2 += math.pow(self.harmonics[i].Magnitude, 2)
return 100 * math.sqrt(SumMag2) / self.harmonics[1].Magnitude
def show_thds(filename):
fa = FourierAnalysis(filename)
print (fa)
SumNormMag = 0
for i in range(fa.n):
SumNormMag += fa.harmonics[i].NormMag
print ("SumNormMag = {SumNormMag}".format(SumNormMag=SumNormMag))
last = 0
for i in range(2, fa.n):
thd = fa.harmonic_distortion(i)
print ("{0:<3} {1:0<12.10f} {2:0<12.10f} {3:0<12.10f}".format(i, fa.harmonics[i].Magnitude, thd, thd - last))
last = thd
def flatten(positive, negative, four_folder, out_file):
fourier_file = r"{four_folder}\{positive}__{negative}.four".format(
four_folder = four_folder,
positive = positive,
negative = negative)
if not os.path.exists(fourier_file):
return
fa = FourierAnalysis (fourier_file)
h = []
last = 0
for i in range(2, fa.n):
try:
thd = fa.harmonic_distortion(i)
except KeyError as e:
print (fourier_file)
print (e)
return
h.append("{0:.4f}".format(thd - last))
last = thd
out_file.write("{positive},{negative},{thd},{harmonics}\n".format(
positive = positive,
negative = negative,
thd = fa.thd,
harmonics = ",".join(h)
))
return
def is_close(fa, m, n, ratio=2):
"""
Is the percent harmonic distortion at m close to the percent harmonic distortion
at harmonic n?
Close is defined as (larger / smaller) < ratio
"""
if m not in fa.distortion or n not in fa.distortion:
return False
if fa.distortion[m] == fa.distortion[n]:
return True
if fa.distortion[m] == 0 or fa.distortion[n] == 0:
return False
biggest = fa.distortion[m] / fa.distortion[n] if fa.distortion[m] > fa.distortion[n] else fa.distortion[n] / fa.distortion[m]
return biggest < ratio
def main():
#show_thds("testfile.four")
four_folder = r"E:\eda\fourier" # where all the .four files are
diode_list_file = r"E:\eda\diodes\diode-list.txt"
#diode_list_file = r"E:\eda\diodes\diode-list-test.txt"
out_file = r"four_table.txt"
with open(diode_list_file) as f:
diode_list = f.read().splitlines()
combo_list = [(i, i) for i in diode_list]
combo_list.extend(itertools.combinations(diode_list, 2))
fa = {}
for pd, nd in combo_list:
fourier_file = r"{four_folder}\{positive}__{negative}.four".format(
four_folder = four_folder,
positive = pd,
negative = nd)
if not os.path.exists(fourier_file):
continue
fa[(pd, nd)] = FourierAnalysis (fourier_file)
close_2_3 = {k : v for k, v in fa.items() if is_close(v, 2, 3)}
with open(out_file, "w") as four_table:
four_table.write("Positive,Negative,THD,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20\n")
for (dp, dn), v in close_2_3.items():
flatten(dp, dn, four_folder, four_table)
if __name__ == '__main__':
main()
| 2.890625 | 3 |
src/presence_analyzer/tests.py | stxnext-kindergarten/presence-analyzer-asierhej | 0 | 12794795 | <reponame>stxnext-kindergarten/presence-analyzer-asierhej
# -*- coding: utf-8 -*-
"""
Presence analyzer unit tests.
"""
from __future__ import unicode_literals
import os.path
import json
import datetime
import time
import unittest
from collections import OrderedDict
import main # pylint: disable=relative-import
import utils # pylint: disable=relative-import
import views # pylint: disable=unused-import, relative-import
from .utils import memoize
TEST_DATA_CSV = os.path.join(
os.path.dirname(__file__), '..', '..', 'runtime', 'data', 'test_data.csv'
)
TEST_XML_DATA = os.path.join(
os.path.dirname(__file__), '..', '..', 'runtime', 'data', 'export_test.xml'
)
# pylint: disable=maybe-no-member, too-many-public-methods
class PresenceAnalyzerViewsTestCase(unittest.TestCase):
"""
Views tests.
"""
def setUp(self):
"""
Before each test, set up a environment.
"""
main.app.config.update(
{
'XML_DATA': TEST_XML_DATA,
'DATA_CSV': TEST_DATA_CSV
}
)
self.client = main.app.test_client()
def tearDown(self):
"""
Get rid of unused objects after each test.
"""
pass
def test_mainpage(self):
"""
Test main page render template.
"""
resp = self.client.get('/')
self.assertEqual(resp.status_code, 200)
def test_api_users(self):
"""
Test users listing.
"""
resp = self.client.get('/api/v1/users')
self.assertEqual(resp.status_code, 200)
self.assertEqual(resp.content_type, 'application/json')
data = json.loads(resp.data)
self.assertEqual(
data[0],
{
'user_id': 36,
'name': '<NAME>.',
'avatar': 'https://intranet.stxnext.pl:443/api/images/users/36'
}
)
def test_presence_weekday_view(self):
"""
Test mean presence time of given user grouped by weekday.
"""
resp = self.client.get('/api/v1/presence_weekday/11')
self.assertEqual(resp.status_code, 200)
self.assertEqual(resp.content_type, 'application/json')
data = json.loads(resp.data)
self.assertEqual(
data,
[
['Weekday', 'Presence (s)'],
['Mon', 24123],
['Tue', 41885],
['Wed', 41885],
['Thu', 45968],
['Fri', 30549],
['Sat', 6426],
['Sun', 22969]
]
)
resp = self.client.get('/api/v1/podium/9999')
data = json.loads(resp.data)
self.assertEqual(data, 'no data')
def test_mean_time_weekday_view(self):
"""
Test of mean presence time grouped by weekday of given user.
"""
resp = self.client.get('/api/v1/mean_time_weekday/11')
self.assertEqual(resp.status_code, 200)
self.assertEqual(resp.content_type, 'application/json')
data = json.loads(resp.data)
self.assertEqual(
data,
[
['Mon', 24123.0],
['Tue', 20942.5],
['Wed', 20942.5],
['Thu', 22984.0],
['Fri', 15274.5],
['Sat', 6426.0],
['Sun', 22969.0]
]
)
resp = self.client.get('/api/v1/podium/9999')
data = json.loads(resp.data)
self.assertEqual(data, 'no data')
def test_presence_start_end(self):
"""
Test the medium time to come to the office and medium time of leave.
"""
resp = self.client.get('/api/v1/presence_start_end/10')
self.assertEqual(resp.status_code, 200)
self.assertEqual(resp.content_type, 'application/json')
data = json.loads(resp.data)
self.assertEqual(
data,
[
['Mon', 0, 0],
['Tue', 34745.0, 64792.0],
['Wed', 33592.0, 58057.0],
['Thu', 38926.0, 62631.0],
['Fri', 0, 0],
['Sat', 0, 0],
['Sun', 0, 0]
]
)
resp = self.client.get('/api/v1/podium/9999')
data = json.loads(resp.data)
self.assertEqual(data, 'no data')
def test_podium(self):
"""
Test five best months of work time.
"""
resp = self.client.get('/api/v1/podium/11')
self.assertEqual(resp.status_code, 200)
self.assertEqual(resp.content_type, 'application/json')
data = json.loads(resp.data)
self.assertEqual(
data,
[
['no data', 0],
['no data', 0],
['no data', 0],
['no data', 0],
['no data', 0],
['no data', 0],
['April', 1],
['July', 4],
['May', 6],
['August', 6],
['June', 7],
['September', 32]
]
)
resp = self.client.get('/api/v1/podium/9999')
data = json.loads(resp.data)
self.assertEqual(data, 'no data')
def test_five_top(self):
"""
Test top 5 workers per months in year.
"""
resp = self.client.get('/api/v1/five_top/9,2013')
self.assertEqual(resp.status_code, 200)
self.assertEqual(resp.content_type, 'application/json')
data = json.loads(resp.data)
self.assertEqual(
data,
[
{
'hours': 32,
'user_id': 11,
'name': '<NAME>.',
'avatar':
'https://intranet.stxnext.pl:443/api/images/users/11'
},
{
'hours': 21,
'user_id': 10,
'name': '<NAME>.',
'avatar':
'https://intranet.stxnext.pl:443/api/images/users/10'
}
]
)
class PresenceAnalyzerUtilsTestCase(unittest.TestCase):
"""
Utility functions tests.
"""
def setUp(self):
"""
Before each test, set up a environment.
"""
main.app.config.update(
{
'XML_DATA': TEST_XML_DATA,
'DATA_CSV': TEST_DATA_CSV
}
)
def tearDown(self):
"""
Get rid of unused objects after each test.
"""
pass
def test_get_data(self):
"""
Test parsing of CSV file.
"""
data = utils.get_data()
self.assertIsInstance(data, dict)
self.assertItemsEqual(data.keys(), [10, 11, 68, 49, 176, 141, 26, 62])
sample_date = datetime.date(2013, 9, 10)
self.assertIn(sample_date, data[10])
self.assertItemsEqual(data[10][sample_date].keys(), ['start', 'end'])
self.assertEqual(
data[10][sample_date]['start'],
datetime.time(9, 39, 5)
)
def test_seconds_since_midnight(self):
"""
Test calculation of secounds since midnight.
"""
data = utils.seconds_since_midnight(datetime.time(2, 42, 23))
self.assertEqual(data, 9743)
data = utils.seconds_since_midnight(datetime.time(00, 00, 00))
self.assertEqual(data, 0)
def test_interval(self):
"""
Test calculation of seconds between the time the objects.
"""
start_example = datetime.time(13, 59, 59)
end_example = datetime.time(23, 59, 59)
data = utils.interval(start_example, end_example)
self.assertEqual(36000, data)
data = utils.interval(end_example, start_example)
self.assertEqual(-36000, data)
def test_mean(self):
"""
Test of mean and if empty list returns 0.
"""
data = utils.mean([100, 100, 100])
self.assertEqual(100, data)
data = utils.mean([0.5, 0.2, 0.3, 234])
self.assertEqual(58.75, data)
data = utils.mean([])
self.assertEqual(0, data)
def test_day_start_end(self):
"""
Test start and end work times sorted by weekday.
"""
user = utils.get_data()
data = utils.day_start_end(user[10])
self.assertEqual(
data,
[
['Mon', 0, 0],
['Tue', 34745.0, 64792.0],
['Wed', 33592.0, 58057.0],
['Thu', 38926.0, 62631.0],
['Fri', 0, 0],
['Sat', 0, 0],
['Sun', 0, 0]
])
def test_xml_translator(self):
"""
Test user data from XML file extraction.
"""
data = utils.xml_translator()
self.assertIsInstance(data, dict)
self.assertItemsEqual(data.keys()[:3], [36, 165, 170])
self.assertEqual(
data.values()[0],
{
'name': '<NAME>.',
'avatar': 'https://intranet.stxnext.pl:443/api/images/users/36'
}
)
def test_cache(self):
"""
Test data caching.
"""
@memoize(age_cache=20)
def short_calculation():
data = 2 + 2
data = time.time()
time.sleep(1)
return data
self.assertEqual(short_calculation(), short_calculation())
@memoize(age_cache=1)
def other_calculation():
data = 2 + 3
data = time.time()
time.sleep(2)
return data
self.assertNotEqual(other_calculation(), other_calculation())
def test_podium_result_structure_builder(self):
"""
Test building result for podium template.
"""
months = [
[], [], [], [], [], [], [276890],
[655139], [500730], [233576], [], [], []
]
data = utils.podium_result_structure_builder(months)
self.assertEqual(
data,
[
['no data', 0],
['no data', 0],
['no data', 0],
['no data', 0],
['no data', 0],
['June', 76],
['July', 181],
['August', 139],
['September', 64],
['no data', 0],
['no data', 0],
['no data', 0]
]
)
def test_podium_data_maker(self):
"""
Test groups presence entries as podium data.
"""
data = utils.podium_data_maker(utils.get_data()[11])
self.assertEqual(
data,
[
['no data', 0],
['no data', 0],
['no data', 0],
['no data', 0],
['no data', 0],
['no data', 0],
['April', 1],
['July', 4],
['May', 6],
['August', 6],
['June', 7],
['September', 32]
]
)
def test_group_by_month(self):
"""
Test grouping presence entries by month.
"""
data = utils.group_by_month(utils.get_data(), 2013)
self.assertEqual(
data,
[
{68: [[], [], [], [], [], [], [], [], [], [], [], [], []]},
{
10: [
[], [], [], [], [], [], [], [], [], [78217], [], [], []
]
},
{
11: [
[], [], [], [], [6426], [22969], [25321],
[16564], [24123], [118402], [], [], []
]
},
{141: [[], [], [], [], [], [], [], [], [], [], [], [], []]},
{176: [[], [], [], [], [], [], [], [], [], [], [], [], []]},
{49: [[], [], [], [], [], [], [], [], [], [], [], [], []]},
{26: [[], [], [], [], [], [], [], [], [], [], [], [], []]},
{62: [[], [], [], [], [], [], [], [], [], [], [], [], []]}
]
)
data = utils.group_by_month(utils.get_data(), 2011)
self.assertEqual(
data,
[
{68: [[], [], [], [], [], [], [], [], [], [], [], [], []]},
{10: [[], [], [], [], [], [], [], [], [], [], [], [], []]},
{11: [[], [], [], [], [], [], [], [], [], [], [], [], []]},
{141: [[], [], [], [], [], [], [], [], [], [], [], [], []]},
{176: [[], [], [], [], [], [], [], [], [], [], [], [], []]},
{49: [[], [], [], [], [], [], [], [], [], [], [], [], []]},
{26: [[], [], [], [], [], [], [], [], [], [], [], [], []]},
{62: [[], [], [], [], [], [], [], [], [], [], [], [], []]}
]
)
def test_five_top_workers(self):
"""
Test top 5 presence users with information about them.
"""
data = utils.five_top_workers(9, 1997)
self.assertEqual(data, [])
data = utils.five_top_workers(9, 2013)
self.assertEqual(
data,
[
{
'hours': 32, 'user_id': 11, 'name': '<NAME>.',
'avatar':
'https://intranet.stxnext.pl:443/api/images/users/11'
},
{
'hours': 21, 'user_id': 10, 'name': '<NAME>.',
'avatar':
'https://intranet.stxnext.pl:443/api/images/users/10'
}
]
)
data = utils.five_top_workers(9, 2015)
self.assertEqual(
data,
[
{
'hours': 15, 'user_id': 62, 'name': '<NAME>.',
'avatar':
'https://intranet.stxnext.pl:443/api/images/users/62'
},
{
'hours': 12, 'user_id': 141, 'name': '<NAME>.',
'avatar':
'https://intranet.stxnext.pl:443/api/images/users/141'
},
{
'hours': 11, 'user_id': 176, 'name': '<NAME>.',
'avatar':
'https://intranet.stxnext.pl:443/api/images/users/176'
},
{
'hours': 11, 'user_id': 49, 'name': '<NAME>.',
'avatar':
'https://intranet.stxnext.pl:443/api/images/users/49'
},
{
'hours': 8, 'user_id': 68, 'name': '<NAME>.',
'avatar':
'https://intranet.stxnext.pl:443/api/images/users/68'
}
]
)
def test_five_top_user_data(self):
"""
Test top 5 user data.
"""
dict_months = [
(10, [455386]), (11, [263049]), (12, [371559]),
(13, [394007]), (15, [432795]), (16, [513180]),
(176, [606888]), (19, [434499]), (165, [555037]),
(170, [576346]), (23, [514312]), (24, [235634]),
(141, [612478]), (26, [508050]), (26, [560624]),
(29, [385973]), (30, []), (31, []), (33, [306667]),
(36, [546225]), (48, []), (49, []), (54, []), (58, []),
]
sorted_dict = OrderedDict(
[
(141, [612478]), (176, [606888]), (170, [576346]),
(26, [560624]), (165, [555037]), (36, [546225]),
(23, [514312]), (16, [513180]), (26, [508050]),
(10, [455386]), (19, [434499]), (15, [432795]),
(13, [394007]), (29, [385973]), (12, [371559]),
(33, [306667]), (11, [263049]), (24, [235634]),
(101, [])
]
)
data = utils.five_top_user_data(dict_months, sorted_dict)
self.assertEqual(
data[0],
{
'hours': 170,
'user_id': 141,
'name': '<NAME>.',
'avatar':
'https://intranet.stxnext.pl:443/api/images/users/141'
}
)
sorted_dict = OrderedDict([(141, [612478])])
data = utils.five_top_user_data(dict_months, sorted_dict)
self.assertEqual(data, [])
def test_sorted_months_dict(self):
"""
Test sorting of months dict.
"""
dict_months = [
(10, [455386]), (11, [263049]), (12, [371559]),
(13, [394007]), (15, [432795]), (16, [513180]),
(176, [606888]), (19, [434499]), (165, [555037]),
(170, [576346]), (23, [514312]), (24, [235634]),
(141, [612478]), (26, [508050]), (26, [560624]),
(29, [385973]), (30, []), (31, []), (33, [306667]),
(36, [546225]), (48, []), (49, []), (54, []), (58, [])
]
data = utils.sorted_months_dict(dict_months)
self.assertEqual(
data,
OrderedDict(
[
(141, [612478]), (176, [606888]), (170, [576346]),
(26, [508050]), (165, [555037]), (36, [546225]),
(23, [514312]), (16, [513180]), (10, [455386]),
(19, [434499]), (15, [432795]), (13, [394007]),
(29, [385973]), (12, [371559]), (33, [306667]),
(11, [263049]), (24, [235634]), (30, []), (31, []),
(48, []), (49, []), (54, []), (58, [])
]
)
)
def test_months_sum_dict(self):
"""
Test appending and suming time for every month.
"""
items = {
178:
{
datetime.date(2013, 9, 9):
{
'end': datetime.time(17, 14, 42),
'start': datetime.time(11, 43, 50)
}
},
179:
{
datetime.date(2013, 9, 12):
{
'end': datetime.time(18, 5, 24),
'start': datetime.time(16, 55, 24)
}
}
}
item = datetime.date(2013, 9, 9)
months = [[] for month in xrange(13)]
data = utils.months_sum_dict(2013, items, item, 178, months)
self.assertEqual(
data,
[
[], [], [], [], [], [], [], [], [], [19852], [], [], []
]
)
def test_user_validate(self):
"""
Test checking if user exist.
"""
months_sum = [
[], [], [], [], [], [], [550395], [632015],
[505118], [499105], [486939], [624356], [455386]
]
data = utils.user_validate(months_sum, 34654)
self.assertEqual(data, [])
data = utils.user_validate(months_sum, 141)
self.assertEqual(
data,
{
141: [
[], [], [], [], [], [], [550395], [632015],
[505118], [499105], [486939], [624356], [455386]
]
}
)
def suite():
"""
Default test suite.
"""
base_suite = unittest.TestSuite()
base_suite.addTest(unittest.makeSuite(PresenceAnalyzerViewsTestCase))
base_suite.addTest(unittest.makeSuite(PresenceAnalyzerUtilsTestCase))
return base_suite
if __name__ == '__main__':
unittest.main()
| 2.1875 | 2 |
bag/tests/test_views.py | keeks-mtl/go-tennis | 0 | 12794796 | <filename>bag/tests/test_views.py
from django.test import TestCase, Client
from django.urls import reverse
from products.models import Category, Product
class TestBagViews(TestCase):
def setUp(self):
self.client = Client()
self.home = reverse("home")
self.view_bag = reverse("view_bag")
self.category = Category.objects.create(
name="test_category",
friendly_name="Test Category"
)
self.product = Product.objects.create(
category=self.category,
sku="1",
name="test product",
description="test description",
price="2.99",
rating="4",
image="testimage.jpg",
has_sizes=False,
)
def test_view_bag_view_GET(self):
''' test the view bag page '''
response = self.client.get(self.view_bag)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "bag/bag.html")
self.assertTemplateUsed(response, "base.html")
| 2.4375 | 2 |
docs/source/sample_scripts/run_vc_pipeline.py | genestack/python-client | 2 | 12794797 | <reponame>genestack/python-client<filename>docs/source/sample_scripts/run_vc_pipeline.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from future import standard_library
standard_library.install_aliases()
from builtins import *
from builtins import object
from genestack_client import (AlignedReadsQC, BioMetaKeys, BowtieApplication, FilesUtil,
SpecialFolders, VariationCaller2Application, get_connection,
make_connection_parser)
# base class to create multiple files with a CLA
class BatchFilesCreator(object):
def __init__(self, cla, base_folder, friendly_name, custom_args=None):
"""
Constructor of the general batch files creator, to create multiple files from a CLA.
:param cla: a ``CLApplication`` object, wrapper for the corresponding CLA
:param base_folder: accession of the base folder where the pipeline files will be organised into subfolders
:param friendly_name: user-friendly name of the files produced by the app ; used in the on-screen statements
and in the name of the project subfolders
:param custom_args: list of custom command-line argument strings for the files. Default is ``None``
"""
self._cla = cla
self._files_util = FilesUtil(cla.connection)
self._base_folder = base_folder
self._friendly_name = friendly_name
self._custom_args = custom_args
def create_files(self, sources):
print('Creating %s files...' % self._friendly_name)
output_folder = self._files_util.create_folder(self._friendly_name, parent=self._base_folder)
output_files = []
for i, source in enumerate(sources, 1):
output = self._create_output_file(source)
self._files_util.link_file(output, output_folder)
print('Created %s file %s (%d/%d)' % (self._friendly_name, output, i, len(output)))
output_files.append(output)
return output_files
# this method can be overridden in child classes to allow for more complex file creation logic
def _create_output_file(self, source):
output = self._cla.create_file(source)
if self._custom_args:
self._cla.change_command_line_arguments(output, self._custom_args)
return output
# special class for Bowtie to replace the default reference genome
class BowtieBatchFilesCreator(BatchFilesCreator):
def __init__(self, cla, base_folder, friendly_name, custom_args=None, ref_genome=None):
BatchFilesCreator.__init__(self, cla, base_folder, friendly_name, custom_args)
self._ref_genome = ref_genome
def _create_output_file(self, source):
output = BatchFilesCreator._create_output_file(self, source)
# replace reference genome
if self._ref_genome:
self._files_util.remove_metainfo_value([output], BioMetaKeys.REFERENCE_GENOME)
self._cla.replace_file_reference(output, BioMetaKeys.REFERENCE_GENOME, None, self._ref_genome)
return output
# These CLA arguments correspond to all default options except the type of variants to look for (SNPs only).
# The easiest way to know the syntax of the command-line arguments for a specific app is to look at the "Parameters"
# metainfo field of a CLA file on Genestack that has the parameters you want.
VC_ARGUMENTS_NO_INDELS = ["--skip-indels -d 250 -m 1 -E --BCF --output-tags DP,DV,DP4,SP", "",
"--skip-variants indels --multiallelic-caller --variants-only"]
if __name__ == "__main__":
# parse script arguments
parser = make_connection_parser()
parser.add_argument('raw_reads_folder',
help='Genestack accession of the folder containing the raw reads files to process')
parser.add_argument('--name', default="New Project",
help='Name of the Genestack folder where to put the output files')
parser.add_argument('--ref-genome', help='Accession of the reference genome to use for the mapping step')
args = parser.parse_args()
project_name = args.name
print('Connecting to Genestack...')
# get connection and create output folder
connection = get_connection(args)
files_util = FilesUtil(connection)
created_files_folder = files_util.get_special_folder(SpecialFolders.CREATED)
project_folder = files_util.create_folder(project_name, parent=created_files_folder)
# create application wrappers and batch files creators
bowtie_app = BowtieApplication(connection)
mapped_qc_app = AlignedReadsQC(connection)
variant_calling_app = VariationCaller2Application(connection)
bowtie_creator = BowtieBatchFilesCreator(bowtie_app, project_folder, "Mapped Reads", ref_genome=args.ref_genome)
mapped_qc_creator = BatchFilesCreator(mapped_qc_app, project_folder, "Mapped Reads QC")
vc_creator = BatchFilesCreator(variant_calling_app, project_folder, "Variants", custom_args=VC_ARGUMENTS_NO_INDELS)
# collect files
print('Collecting raw reads...')
raw_reads = files_util.get_file_children(args.raw_reads_folder)
files_count = len(raw_reads)
print('Found %d files to process' % files_count)
# Create pipeline files
mapped_reads = bowtie_creator.create_files(raw_reads)
mapped_reads_qcs = mapped_qc_creator.create_files(mapped_reads)
vc_creator.create_files(mapped_reads)
print('All done! Your files are in the folder %s' % project_folder)
| 2.453125 | 2 |
topo_processor/file_system/get_fs.py | linz/topo-processor | 11 | 12794798 | from fsspec.implementations.local import LocalFileSystem
from s3fs import S3FileSystem
def is_s3_path(path: str) -> bool:
if path.startswith("s3://"):
return True
return False
def bucket_name_from_path(path: str) -> str:
path_parts = path.replace("s3://", "").split("/")
return path_parts.pop(0)
def get_fs(path: str):
if is_s3_path(path):
return S3FileSystem()
return LocalFileSystem(auto_mkdir="True")
| 2.46875 | 2 |
programmers/skill-test-lv1/get_middle_char.py | love-adela/algorithm | 3 | 12794799 | <filename>programmers/skill-test-lv1/get_middle_char.py
def solution(s):
length = len(s)
return s[length // 2] if length % 2 != 0 else s[(length // 2)-1:(length // 2)+1]
# Test
# s = 'abcde'
s = 'qwer'
print(solution(s))
#5 -> 2
#4 -> [1:3] # 1, 2
| 3.8125 | 4 |
books/models.py | oliverroick/django-tests | 0 | 12794800 | from django.db import models
from django.contrib.auth.models import User
class Book(models.Model):
title = models.CharField(max_length=100)
author = models.ForeignKey(User)
| 2.09375 | 2 |
way/python/first_steps/zadachi/podkat/podkat_k_programmistke.py | only-romano/junkyard | 0 | 12794801 | <reponame>only-romano/junkyard
#! Подкат к программистке.
# Вариант с инпутом.
def podkat():
a = str(input("Привет, солнце, посветишь для меня сегодня ?): "))
right_answers = ("да конечно посвечу афк пошли мяф го игого"
"гоу хочу хотела красава мечтаю давай ага супер пойдём")
if a.lower() in right_answers.lower():
return print('Сегодня кому-то повезло :)')
else:
while a.lower() not in right_answers.lower():
a = str(input("Некорректный ответ. Повторите попытку: "))
return print("Столько времени потеряли зря :) Могли бы уже зажигать!")
if __name__ == "__main__":
podkat()
| 3.46875 | 3 |
index.py | z0di4ckX/IP-finder | 0 | 12794802 | from flask import Flask, redirect, url_for, render_template
#import ip_finder
app = Flask(__name__)
@app.route("/<name>")
def home(name):
return render_template("index.html", content=name)
# @app.route("/<name>")
# def user(name):
# return f"Hello {name}!"
# # Working on it!
# @app.route("/<ipF>")
# def ip(ipF):
# return f"{ipF}"
# @app.route("/admin")
# def admin():
# return redirect(url_for("user", name="Admin!"))
if __name__ == "__main__":
app.run() | 2.546875 | 3 |
pingyingdict/data_clean/__init__.py | sujing863/crawler-dict | 0 | 12794803 | # -*- coding: utf-8 -*-
"""
@Time : 2021/1/9 17:28
@Author : s_jing
@File : __init__.py.py
@Software: PyCharm
"""
| 0.925781 | 1 |
src/pyglue/DocStrings/Exception.py | omenos/OpenColorIO | 7 | 12794804 |
class Exception:
"""
An exception class to throw for errors detected at runtime.
.. warning::
All functions in the Config class can potentially throw this exception.
"""
def __init__(self):
pass
| 2.21875 | 2 |
openmdao.gui/src/openmdao/gui/urls.py | OzanCKN/OpenMDAO-Framework | 3 | 12794805 | from django.conf.urls.defaults import patterns, include, url
from django.contrib.auth import views as authviews
# Uncomment the next two lines to enable the admin:
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
# favicon
(r'^favicon\.ico$', 'django.views.generic.simple.redirect_to', {'url': '/static/images/favicon.ico'}),
# default to projdb app
(r'^$', 'projdb.views.index'),
(r'^login/$', 'workspace.views.Exit'), # Workspace Logout menu item currently currently points to /login
# projects
(r'^projects/', include('projdb.urls')),
# workspace
(r'^workspace/', include('workspace.urls')),
# registration view is in projdb at the moment
(r'^accounts/register/$', 'projdb.views.register'),
# authentication
(r'^accounts/login/$', 'django.contrib.auth.views.login', {'template_name': 'login.html'}),
(r'^accounts/logout/$', 'django.contrib.auth.views.logout_then_login'),
(r'^accounts/password_change/$', 'django.contrib.auth.views.password_change'),
(r'^accounts/password_reset/$', 'django.contrib.auth.views.password_reset'),
(r'^accounts/password_reset/done/$', 'django.contrib.auth.views.password_reset_done'),
# admin
(r'^admin/', include(admin.site.urls)),
(r'^admin/doc/', include('django.contrib.admindocs.urls')),
)
| 1.929688 | 2 |
landscape/prerequisites.py | shaneramey/landscape-cli | 0 | 12794806 | import subprocess as sp
import platform
import os.path
import logging
def install_prerequisites(os_platform):
"""
Installs prerequisites for the landscape CLI tool
Returns: None
"""
install_gsed(os_platform)
install_minikube(os_platform)
install_lastpass(os_platform)
install_vault(os_platform)
install_kubectl(os_platform)
install_helm(os_platform)
install_landscaper(os_platform)
install_terraform(os_platform)
install_helm_plugins()
def install_gsed(os_platform):
"""Install minikube"""
install_cmds = {
'Darwin': 'brew install gnu-sed'
}
dst = '/usr/local/bin/gsed'
if not os.path.isfile(dst):
logging.info("installing gnu-sed")
sp.call(install_cmds[os_platform], shell=True)
else:
logging.info("gnu-sed already installed in {0}".format(dst))
def install_minikube(os_platform):
"""Install minikube"""
install_cmds = {
'Darwin': 'curl -LO https://storage.googleapis.com/minikube/releases/v0.22.3/minikube-darwin-amd64 && \
chmod +x minikube-darwin-amd64 && \
mv minikube-darwin-amd64 /usr/local/bin/minikube'
}
dst = '/usr/local/bin/minikube'
if not os.path.isfile(dst):
logging.info("installing minikube")
sp.call(install_cmds[os_platform], shell=True)
else:
logging.info("minikube already installed in {0}".format(dst))
def install_lastpass(os_platform):
"""Install LastPass"""
install_cmds = {
'Darwin': 'brew update && brew install lastpass-cli --with-pinentry'
}
dst = '/usr/local/bin/lpass'
if not os.path.isfile(dst):
logging.info("installing lastpass")
sp.call(install_cmds[os_platform], shell=True)
else:
logging.info("lastpass already installed in {0}".format(dst))
def install_vault(os_platform):
"""Installs Hashicorp Vault"""
install_cmds = {
'Darwin': 'curl -LO https://releases.hashicorp.com/vault/0.8.3/vault_0.8.3_darwin_amd64.zip && \
unzip -d /usr/local/bin/ vault_0.8.3_darwin_amd64.zip && \
rm vault_0.8.3_darwin_amd64.zip'
}
dst = '/usr/local/bin/vault'
if not os.path.isfile(dst):
logging.info("installing vault")
sp.call(install_cmds[os_platform], shell=True)
else:
logging.info("vault already installed in {0}".format(dst))
def install_kubectl(os_platform):
"""Installs Kubernetes kubectl"""
install_cmds = {
'Darwin': 'curl -LO https://storage.googleapis.com/kubernetes-release/release/v1.8.1/bin/darwin/amd64/kubectl && \
chmod +x kubectl && \
mv kubectl /usr/local/bin/'
}
dst = '/usr/local/bin/kubectl'
if not os.path.isfile(dst):
logging.info("installing kubectl")
sp.call(install_cmds[os_platform], shell=True)
else:
logging.info("kubectl already installed in {0}".format(dst))
def install_helm(os_platform):
"""Installs Kubernetes Helm"""
install_cmds = {
'Darwin': 'curl -LO https://storage.googleapis.com/kubernetes-helm/helm-v2.7.2-darwin-amd64.tar.gz && \
tar zvxf helm-v2.7.2-darwin-amd64.tar.gz --strip-components=1 darwin-amd64/helm && \
chmod +x helm && \
mv helm /usr/local/bin/ && \
rm helm-v2.7.2-darwin-amd64.tar.gz'
}
dst = '/usr/local/bin/helm'
if not os.path.isfile(dst):
logging.info("installing helm")
sp.call(install_cmds[os_platform], shell=True)
else:
logging.info("helm already installed in {0}".format(dst))
def install_landscaper(os_platform):
"""Installs Helm Landscaper"""
install_cmds = {
'Darwin': 'curl -LO https://github.com/Eneco/landscaper/releases/download/1.0.10/landscaper-1.0.11-darwin-amd64.tar.gz && \
tar zvxf landscaper-1.0.11-darwin-amd64.tar.gz landscaper && \
mv landscaper /usr/local/bin/ && \
rm landscaper-1.0.11-darwin-amd64.tar.gz'
}
dst = '/usr/local/bin/landscaper'
if not os.path.isfile(dst):
logging.info("installing landscaper")
sp.call(install_cmds[os_platform], shell=True)
else:
logging.info("landscaper already installed in {0}".format(dst))
def install_terraform(os_platform):
"""Installs Terraform"""
install_cmds = {
'Darwin': 'curl -LO https://releases.hashicorp.com/terraform/0.10.2/terraform_0.10.7_darwin_amd64.zip && \
unzip -d /usr/local/bin terraform_0.10.7_darwin_amd64.zip && \
rm terraform_0.10.7_darwin_amd64.zip'
}
dst = '/usr/local/bin/terraform'
if not os.path.isfile(dst):
logging.info("installing terraform")
sp.call(install_cmds[os_platform], shell=True)
else:
logging.info("terraform already installed in {0}".format(dst))
def install_helm_plugins():
"""Install helm plugins. Requires helm to be installed"""
plugins = {
'https://github.com/technosophos/helm-gpg': '0.1.0',
}
for plugin_url, version in plugins.items():
install_cmd = "helm plugin install {0} --version={1}".format(
plugin_url,
version)
logging.info("installing helm plugin with command: {0}".format(install_cmd))
sp.call(install_cmd, shell=True)
| 1.984375 | 2 |
pacote-download/ex(1-100)/ex098.py | gssouza2051/python-exercicios | 0 | 12794807 | <gh_stars>0
'''Faça um programa que tenha uma função chamada contador(),
que receba três parâmetros: início, fim e passo. Seu programa tem que realizar três contagens através da função criada:
a) de 1 até 10, de 1 em 1
b) de 10 até 0, de 2 em 2
c) uma contagem personalizada'''
from time import sleep
def contador(i,f,p):
print('-='*20)
print(f'contagem de {i} até {f} de {p} em {p}')
sleep(2.5)
if i < f:
cont=i
while cont <=f:
print(f'{cont} ',end='',flush=True)
sleep(0.5)
cont +=p
print('FIM!')
else:
cont=i
while cont <=f:
print(f' {cont} ',end='',flush=True)
sleep(0.5)
cont -=p
print('FIM!')
print('-='*20)
contador(1,10,1)
contador(10,0,2)
print('Agora é sua vez de personalizar a contagem!')
ini=int(input('Início: '))
fim=int(input('Fim: '))
pas=int(input('Passo: '))
contador(ini,fim,pas)
| 3.890625 | 4 |
ikalog/ui/panel/preview.py | fetus-hina/IkaLog | 285 | 12794808 | <gh_stars>100-1000
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# IkaLog
# ======
# Copyright (C) 2015 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import copy
import os.path
import threading
import wx
import cv2
from ikalog.utils import Localization
from ikalog.ui.events import *
_ = Localization.gettext_translation('IkaUI', fallback=True).gettext
class FileDropTarget(wx.FileDropTarget):
def __init__(self, observer):
wx.FileDropTarget.__init__(self)
self.observer = observer
def OnDropFiles(self, x, y, filenames):
self.observer.on_drop_files(x, y, filenames)
return True
class InputFilePanel(wx.Panel):
def __init__(self, *args, **kwargs):
wx.Panel.__init__(self, *args, **kwargs)
# This is used to determine if a file dialog is open or not.
self.prev_file_path = ''
# Textbox for input file
self.text_ctrl = wx.TextCtrl(self, wx.ID_ANY, '')
self.text_ctrl.Bind(wx.EVT_TEXT, self.on_text_input)
self.button = wx.Button(self, wx.ID_ANY, _('Browse'))
self.button.Bind(wx.EVT_BUTTON, self.on_button_click)
# Drag and drop
drop_target = FileDropTarget(self)
self.text_ctrl.SetDropTarget(drop_target)
top_sizer = wx.BoxSizer(wx.HORIZONTAL)
top_sizer.Add(self.text_ctrl, proportion=1)
top_sizer.Add(self.button)
self.SetSizer(top_sizer)
def should_open_file(self, file_path):
return os.path.isfile(file_path) and self.prev_file_path != file_path
def update_button_label(self):
file_path = self.text_ctrl.GetValue()
if self.should_open_file(file_path):
self.button.SetLabel(_('Open'))
else:
self.button.SetLabel(_('Browse'))
# wx event
def on_text_input(self, event):
self.update_button_label()
# wx event
def on_button_click(self, event):
file_path = self.text_ctrl.GetValue()
if self.should_open_file(file_path):
evt = InputFileAddedEvent(input_file=file_path)
wx.PostEvent(self, evt)
self.prev_file_path = file_path
self.update_button_label()
return
# file_path is invalid. Open a file dialog.
file_dialog = wx.FileDialog(self, _('Select a video file'))
if file_dialog.ShowModal() != wx.ID_OK:
return
file_path = file_dialog.GetPath()
self.text_ctrl.SetValue(file_path)
# Callback from wx.FileDropTarget.OnDropFiles
def on_drop_files(self, x, y, filenames):
if not filenames:
return
self.text_ctrl.SetValue(filenames[0])
class PreviewPanel(wx.Panel):
def SetEventHandlerEnable(self, obj, enable):
orig_state = obj.GetEvtHandlerEnabled()
obj.SetEvtHandlerEnabled(enable)
return orig_state
# IkaLog event
def on_show_preview(self, context):
img = context['engine'].get('preview', context['engine']['frame'])
if img is None:
return False
try:
self.lock.acquire()
self.latest_frame = cv2.resize(img, self.preview_size)
self.refresh_at_next = True
finally:
self.lock.release()
# wx event
def on_input_initialized(self, event):
self.show_header(event.source)
# wx event
def on_ikalog_pause(self, event):
self._pause = event.pause
self.draw_preview()
# wx event
def on_preview_click(self, event):
evt = IkalogPauseEvent(pause=(not self._pause))
wx.PostEvent(self, evt)
# wx event
def on_enter_preview(self, event):
self._enter = True
self.draw_preview()
# wx event
def on_leave_preview(self, event):
self._enter = False
self.draw_preview()
# wx event
def on_input_file_added(self, event):
# Propagate the event to the upper level.
wx.PostEvent(self, event)
source_message = {
'amarec': _('Capture through AmarecTV'),
'dshow_capture': _('HDMI Video input (DirectShow, recommended)'),
'opencv_capture': _('HDMI Video input (OpenCV driver)'),
'screen': _('Realtime Capture from desktop'),
'file': _('Read from pre-recorded video file (for testing)'),
}
def show_header(self, source):
self.video_input_source_text.SetLabel(
PreviewPanel.source_message.get(source, ''))
self.show_input_file((source == 'file'))
def show_input_file(self, show):
self.input_file_panel.Show(show)
self.Layout()
def draw_preview(self):
frame_rgb = None
try:
self.lock.acquire()
if self.latest_frame is None:
if self._prev_bmp:
dc.DrawBitmap(self._prev_bmp, 0, 0)
return False
width, height = self.preview_size
frame_rgb = cv2.cvtColor(self.latest_frame, cv2.COLOR_BGR2RGB)
finally:
self.lock.release()
if frame_rgb is None:
return False
bmp = wx.BitmapFromBuffer(width, height, frame_rgb)
dc = wx.ClientDC(self.preview_panel)
dc.DrawBitmap(bmp, 0, 0)
self._prev_bmp = bmp
if self._enter:
ox = int(width / 2)
oy = int(height / 2)
if self._pause:
# Draw a triangle representing 'play'.
dc.DrawPolygon([(ox - 20, oy - 30),
(ox - 20, oy + 30),
(ox + 20, oy)])
else:
# Draw two rectangles representing 'pause'.
dc.DrawRectangle(ox - 20, oy - 30, 15, 60)
dc.DrawRectangle(ox + 10, oy - 30, 15, 60)
# wx event
def OnTimer(self, event):
self.lock.acquire()
if self.latest_frame is None:
self.lock.release()
return
self.lock.release()
if not self.refresh_at_next:
return
self.draw_preview()
self.refresh_at_next = False
def __init__(self, *args, **kwargs):
self._prev_bmp = None
self._enter = False
self._pause = False
self.refresh_at_next = False
self.latest_frame = None
self.lock = threading.Lock()
wx.Panel.__init__(self, *args, **kwargs)
self.timer = wx.Timer(self)
self.timer.Start(100)
self.Bind(wx.EVT_TIMER, self.OnTimer, self.timer)
self.GetTopLevelParent().Bind(EVT_INPUT_INITIALIZED,
self.on_input_initialized)
self.GetTopLevelParent().Bind(EVT_IKALOG_PAUSE, self.on_ikalog_pause)
# Preview
self.preview_size = (640, 360)
# Preview image.
self.preview_panel = wx.Panel(self, wx.ID_ANY, size=self.preview_size)
self.preview_panel.Bind(wx.EVT_LEFT_UP, self.on_preview_click)
self.preview_panel.Bind(wx.EVT_ENTER_WINDOW, self.on_enter_preview)
self.preview_panel.Bind(wx.EVT_LEAVE_WINDOW, self.on_leave_preview)
# Video Input
self.video_input_title_text = wx.StaticText(
self, wx.ID_ANY, _('Video Input'))
self.video_input_source_text = wx.StaticText(self, wx.ID_ANY, '')
self.input_file_panel = InputFilePanel(self, wx.ID_ANY)
self.input_file_panel.Bind(EVT_INPUT_FILE_ADDED,
self.on_input_file_added)
self.show_input_file(False)
self.video_input_source_sizer = wx.BoxSizer(wx.HORIZONTAL)
self.video_input_source_sizer.Add(
self.video_input_source_text, flag=wx.LEFT, border=10)
self.video_input_source_sizer.Add(self.input_file_panel, proportion=1)
# Sizer to set the width of the text box to 640.
self.video_input_sizer = wx.BoxSizer(wx.VERTICAL)
self.video_input_sizer.Add(self.video_input_title_text)
self.video_input_sizer.Add(self.video_input_source_sizer,
flag=wx.EXPAND | wx.ALL, border=5)
self.video_input_sizer.Add((640, 5))
# Top sizer
self.top_sizer = wx.BoxSizer(wx.VERTICAL)
self.top_sizer.Add(self.video_input_sizer, flag=wx.ALL, border=5)
self.top_sizer.Add(self.preview_panel)
self.SetSizer(self.top_sizer)
if __name__ == "__main__":
import sys
import wx
application = wx.App()
frame = wx.Frame(None, wx.ID_ANY, 'Preview', size=(640, 360))
preview = PreviewPanel(frame, size=(640, 360))
layout = wx.BoxSizer(wx.VERTICAL)
layout.Add(preview)
frame.SetSizer(layout)
frame.Show()
application.MainLoop()
| 2.28125 | 2 |
agent.py | globocom/Tryout-agent | 0 | 12794809 | # -*- encoding: utf-8 -*-
import os
import subprocess
import settings
import git
import requests
def clone_challenge(challenge_repository, challenge_name):
try:
git.Git().clone(challenge_repository)
if not os.path.exists(challenge_name):
return "Can't download this repository", True
except git.GitCommandError:
pass
return '', False
def _run_make_command(challenge_name, make_parameter, background=False):
make_command = ["make", "-C", "{directory}".format(directory=challenge_name), make_parameter]
try:
if background:
bg_process = subprocess.Popen(make_command, stdin=None, stdout=None, stderr=None)
if bg_process.returncode != 0:
bg_process.kill()
else:
output = subprocess.check_output(make_command, stderr=subprocess.STDOUT)
return output, False
except Exception as e:
return "Have a error in make {parameter} error: {error}".format(parameter=make_parameter, error=e), True
def run_make_setup(challenge_name):
return _run_make_command(challenge_name, "setup")
def run_make_run(challenge_name):
return _run_make_command(challenge_name, "run", background=True)
def send_status(challenge_name, status_json):
requests.post(settings.API_URL, status_json)
def main():
status_json = dict()
challenge_repository = os.environ.get("REPO")
challenge_name = challenge_repository.split('/')[-1].replace('.git', '')
msg, error = clone_challenge(challenge_repository, challenge_name)
if error:
status_json['clone_error'] = msg
return
msg, setup_error = run_make_setup(challenge_name)
status_json['setup_output'] = msg
if setup_error:
return
run_make_run(challenge_name)
send_status(challenge_name, status_json)
if __name__ == '__main__':
status = main()
| 2.5625 | 3 |
awx/main/management/commands/provision_instance.py | ziegenberg/awx | 0 | 12794810 | # Copyright (c) 2015 Ansible, Inc.
# All Rights Reserved
import os
from django.core.management.base import BaseCommand, CommandError
from django.db import transaction
from django.conf import settings
from awx.main.models import Instance
class Command(BaseCommand):
"""
Internal tower command.
Register this instance with the database for HA tracking.
"""
help = (
"Add instance to the database. "
"When no options are provided, values from Django settings will be used to register the current system, "
"as well as the default queues if needed (only used or enabled for Kubernetes installs). "
"Override with `--hostname`."
)
def add_arguments(self, parser):
parser.add_argument('--hostname', dest='hostname', type=str, help="Hostname used during provisioning")
parser.add_argument('--node_type', type=str, default='hybrid', choices=['control', 'execution', 'hop', 'hybrid'], help="Instance Node type")
parser.add_argument('--uuid', type=str, help="Instance UUID")
def _register_hostname(self, hostname, node_type, uuid):
if not hostname:
if not settings.AWX_AUTO_DEPROVISION_INSTANCES:
raise CommandError('Registering with values from settings only intended for use in K8s installs')
from awx.main.management.commands.register_queue import RegisterQueue
(changed, instance) = Instance.objects.register(ip_address=os.environ.get('MY_POD_IP'), node_type='control', uuid=settings.SYSTEM_UUID)
RegisterQueue(settings.DEFAULT_CONTROL_PLANE_QUEUE_NAME, 100, 0, [], is_container_group=False).register()
RegisterQueue(
settings.DEFAULT_EXECUTION_QUEUE_NAME, 100, 0, [], is_container_group=True, pod_spec_override=settings.DEFAULT_EXECUTION_QUEUE_POD_SPEC_OVERRIDE
).register()
else:
(changed, instance) = Instance.objects.register(hostname=hostname, node_type=node_type, uuid=uuid)
if changed:
print("Successfully registered instance {}".format(hostname))
else:
print("Instance already registered {}".format(instance.hostname))
self.changed = changed
@transaction.atomic
def handle(self, **options):
self.changed = False
self._register_hostname(options.get('hostname'), options.get('node_type'), options.get('uuid'))
if self.changed:
print("(changed: True)")
| 1.90625 | 2 |
src/helpers.py | vergoh/micropython-spotify-status-display | 3 | 12794811 | # reduced from https://github.com/blainegarrett/urequests2
import binascii
always_safe = ('ABCDEFGHIJKLMNOPQRSTUVWXYZ'
'abcdefghijklmnopqrstuvwxyz'
'0123456789' '_.-')
def quote(s):
res = []
for c in s:
if c in always_safe:
res.append(c)
continue
res.append('%%%x' % ord(c))
return ''.join(res)
def quote_plus(s):
if ' ' in s:
s = s.replace(' ', '+')
return quote(s)
def urlencode(query):
if isinstance(query, dict):
query = query.items()
l = []
for k, v in query:
if not isinstance(v, list):
v = [v]
for value in v:
k = quote_plus(str(k))
v = quote_plus(str(value))
l.append(k + '=' + v)
return '&'.join(l)
def b64encode(s):
"""Reproduced from micropython base64"""
if not isinstance(s, (bytes, bytearray)):
raise TypeError("expected bytes, not %s" % s.__class__.__name__)
# Strip off the trailing newline
encoded = binascii.b2a_base64(s)[:-1]
return encoded
| 2.6875 | 3 |
fluent_pages/pagetypes/textfile/page_type_plugins.py | django-fluent/django-fluent-pages | 59 | 12794812 | from django.http import HttpResponse
from fluent_pages.extensions import PageTypePlugin, page_type_pool
from .models import TextFile
@page_type_pool.register
class TextFilePlugin(PageTypePlugin):
model = TextFile
is_file = True
def get_response(self, request, textfile, **kwargs):
content_type = textfile.content_type
if content_type in TextFile.UTF8_TYPES:
content_type += "; charset=utf-8" # going to enforce this.
return HttpResponse(content=textfile.content, content_type=content_type)
| 2.09375 | 2 |
Alien Invasion/sound_fx.py | rubotic1/AlienInvaders | 0 | 12794813 | <filename>Alien Invasion/sound_fx.py
import pygame
class Sound_fx:
"""Clase que controla el sonido."""
def __init__(self):
""" Inicializamos el sonido y cargamos los recursos"""
self.init_music = 'sound/01_Title Screen.mp3'
self.game_music = 'sound/12_Invader_Homeworld.mp3'
self.shot = pygame.mixer.Sound('sound/shot.wav')
self.alien = pygame.mixer.Sound('sound/mixkit-video-game-blood-pop-2361.wav')
self.shot_cn = pygame.mixer.Channel(1)
self.alien_cn = pygame.mixer.Channel(2)
self.alien_cn.set_volume(0.5)
pygame.mixer.init()
def play_init_music(self):
pygame.mixer.music.load(self.init_music)
pygame.mixer.music.play(-1)
def play_game_music(self):
pygame.mixer.music.load(self.game_music)
pygame.mixer.music.play(-1)
def play_shot(self):
self.shot_cn.play(self.shot)
def play_alien(self):
self.alien_cn.play(self.alien)
| 3.1875 | 3 |
resources/lib/api/models/playback.py | MatiasStorm/plugin.video.tv2play | 0 | 12794814 | <filename>resources/lib/api/models/playback.py
class PlayBack:
def __init__(self, playback):
self.playback = playback
self.src = playback["smil"]["video"]["src"]
self.mime_type = playback["smil"]["video"]["type"]
self.license_token = playback["smil"]["securityLicense"]["token"]
url = self.playback["smil"]["securityLicense"]["url"]
# self.license_url = url + "|Content-Type=&User-Agent=Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/59.0.3041.0 Safari/537.36&Host=lic.drmtoday.com&x-dt-auth-token=%s|R{SSM}|JBlicense" % self.license_token
self.license_url = url
self.license_url += "|Content-Type="
self.license_url += "&Accept=*/*"
self.license_url += "&Accept-Encoding=gzip, deflate, br"
self.license_url += "&Accept-Language=en-US,en;q=0.9,da;q=0.8"
self.license_url += "&User-Agent=Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/93.0.4577.63 Safari/537.36"
self.license_url += "&sec-ch-ua=\"Google Chrome\";v=\"93\", \" Not;A Brand\";v=\"99\", \"Chromium\";v=\"93\""
self.license_url += "&sec-ch-ua-mobile=?0"
self.license_url += "&sec-ch-ua-platform=\"Linux\""
self.license_url += "&Sec-Fetch-Dest=empty"
self.license_url += "&Sec-Fetch-Mode=cors"
self.license_url += "&Sec-Fetch-Site=cross-site"
self.license_url += "&Host=lic.drmtoday.com"
self.license_url += "&x-dt-auth-token=%s|R{SSM}|JBlicense" % self.license_token
def __repr__(self):
return self.playback.__repr__()
| 2.234375 | 2 |
pokemon/migrations/0002_auto_20200224_0512.py | andresRah/PokemonDjango | 0 | 12794815 | <reponame>andresRah/PokemonDjango
# Generated by Django 3.0.3 on 2020-02-24 05:12
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('pokemon', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Evolution',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('evolutionChainId', models.CharField(default='', max_length=100)),
('name', models.CharField(default='', max_length=100)),
],
),
migrations.CreateModel(
name='Pokemon',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100, unique=True)),
('height', models.CharField(default='', max_length=100)),
('weight', models.CharField(default='', max_length=100)),
],
),
migrations.CreateModel(
name='StatElement',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('baseStat', models.CharField(default='', max_length=100)),
('effort', models.CharField(default='', max_length=100)),
('pokemon', models.ForeignKey(default='', on_delete=django.db.models.deletion.CASCADE, to='pokemon.Pokemon')),
],
),
migrations.CreateModel(
name='StatsPokemon',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(default='', max_length=100)),
('url', models.URLField(default='', max_length=100)),
('stat', models.ForeignKey(default='', on_delete=django.db.models.deletion.CASCADE, to='pokemon.StatElement')),
],
),
migrations.DeleteModel(
name='User',
),
migrations.AddField(
model_name='evolution',
name='pokemon',
field=models.ForeignKey(default='', on_delete=django.db.models.deletion.CASCADE, to='pokemon.Pokemon'),
),
]
| 1.921875 | 2 |
src/cgen/ast/literal.py | cursedclock/swiss_water_compiler | 0 | 12794816 | import enum
import struct
from .abstract import AbstractNode
from .utils import ValuedNodeMixin, NodeContext
class PrimitiveTypes(enum.Enum):
Null = 0
Bool = 1
Int = 2
Double = 3
String = 4
# aliases
NULL = 0
BOOLEANLITERAL = 1
INTLITERAL = 2
DOUBLELITERAL = 3
STRINGLITERAL = 4
bool = 1
int = 2
double = 3
string = 4
class BaseLiteralNode(AbstractNode, ValuedNodeMixin):
def __init__(self, ctx: NodeContext, value_type: str, value: str):
super(BaseLiteralNode, self).__init__(ctx)
self._value_type = BaseLiteralNode.get_type(value_type)
self._literal_value = value
def _run_scope_check(self):
pass # no checks needed
def _run_type_check(self):
pass # no checks needed
def generate_code(self):
raise NotImplementedError
@staticmethod
def get_type(value_type: str) -> PrimitiveTypes:
return PrimitiveTypes[value_type]
def get_value(self):
return self._literal_value
class StringLiteralNode(BaseLiteralNode):
def generate_code(self):
label = self.ctx.label_generator.get_label()
self.ctx.data_segment += f'{label}:\t.asciiz\t{self._literal_value}\n'
self.ctx.text_segment += f'\tla $v0, {label}\n'
class IntLiteralNode(BaseLiteralNode):
def generate_code(self):
self.ctx.text_segment += f'\tli $v0, {self._literal_value}\n'
class BoolLiteralNode(BaseLiteralNode):
def generate_code(self):
value = 1 if self._literal_value == 'true' else 0
self.ctx.text_segment += f'\tli $v0, {value}\n'
class NullLiteralNode(BaseLiteralNode):
def generate_code(self):
self.ctx.text_segment += f'\tmove $v0, $zero\n'
class DoubleLiteralNode(BaseLiteralNode):
def generate_code(self):
label = self.ctx.label_generator.get_label()
self.ctx.data_segment += f'{label}:\t.double\t{self._literal_value}\n'
self.ctx.text_segment += f'\tl.d $f0, {label}\n'
| 2.765625 | 3 |
Chapter05/01-chapter-content/filter2D_kernels.py | yaojh01/Mastering-OpenCV-4-with-Python | 2 | 12794817 | """
Comparing different kernels using cv2.filter2D()
"""
# Import required packages:
import cv2
import numpy as np
import matplotlib.pyplot as plt
def show_with_matplotlib(color_img, title, pos):
"""Shows an image using matplotlib capabilities"""
# Convert BGR image to RGB
img_RGB = color_img[:, :, ::-1]
ax = plt.subplot(3, 4, pos)
plt.imshow(img_RGB)
plt.title(title)
plt.axis('off')
# Create the dimensions of the figure and set title:
plt.figure(figsize=(12, 6))
plt.suptitle("Comparing different kernels using cv2.filter2D()", fontsize=14, fontweight='bold')
# Load the original image:
image = cv2.imread('cat-face.png')
# We try different kernels
# Identify kernel (does not modify the image)
kernel_identity = np.array([[0, 0, 0],
[0, 1, 0],
[0, 0, 0]])
# Try different kernels for edge detection:
kernel_edge_detection_1 = np.array([[1, 0, -1],
[0, 0, 0],
[-1, 0, 1]])
kernel_edge_detection_2 = np.array([[0, 1, 0],
[1, -4, 1],
[0, 1, 0]])
kernel_edge_detection_3 = np.array([[-1, -1, -1],
[-1, 8, -1],
[-1, -1, -1]])
# Try different kernels for sharpening:
kernel_sharpen = np.array([[0, -1, 0],
[-1, 5, -1],
[0, -1, 0]])
kernel_unsharp_masking = -1 / 256 * np.array([[1, 4, 6, 4, 1],
[4, 16, 24, 16, 4],
[6, 24, -476, 24, 6],
[4, 16, 24, 16, 4],
[1, 4, 6, 4, 1]])
# Try different kernels for smoothing:
kernel_blur = 1 / 9 * np.array([[1, 1, 1],
[1, 1, 1],
[1, 1, 1]])
gaussian_blur = 1 / 16 * np.array([[1, 2, 1],
[2, 4, 2],
[1, 2, 1]])
# Try a kernel for embossing:
kernel_emboss = np.array([[-2, -1, 0],
[-1, 1, 1],
[0, 1, 2]])
# Try different kernels for edge detection:
sobel_x_kernel = np.array([[1, 0, -1],
[2, 0, -2],
[1, 0, -1]])
sobel_y_kernel = np.array([[1, 2, 1],
[0, 0, 0],
[-1, -2, -1]])
outline_kernel = np.array([[-1, -1, -1],
[-1, 8, -1],
[-1, -1, -1]])
# Apply all the kernels:
original_image = cv2.filter2D(image, -1, kernel_identity)
edge_image_1 = cv2.filter2D(image, -1, kernel_edge_detection_1)
edge_image_2 = cv2.filter2D(image, -1, kernel_edge_detection_2)
edge_image_3 = cv2.filter2D(image, -1, kernel_edge_detection_3)
sharpen_image = cv2.filter2D(image, -1, kernel_sharpen)
unsharp_masking_image = cv2.filter2D(image, -1, kernel_unsharp_masking)
blur_image = cv2.filter2D(image, -1, kernel_blur)
gaussian_blur_image = cv2.filter2D(image, -1, gaussian_blur)
emboss_image = cv2.filter2D(image, -1, kernel_emboss)
sobel_x_image = cv2.filter2D(image, -1, sobel_x_kernel)
sobel_y_image = cv2.filter2D(image, -1, sobel_y_kernel)
outline_image = cv2.filter2D(image, -1, outline_kernel)
# Show all the images:
show_with_matplotlib(original_image, "identity kernel", 1)
show_with_matplotlib(edge_image_1, "edge detection 1", 2)
show_with_matplotlib(edge_image_2, "edge detection 2", 3)
show_with_matplotlib(edge_image_3, "edge detection 3", 4)
show_with_matplotlib(sharpen_image, "sharpen", 5)
show_with_matplotlib(unsharp_masking_image, "unsharp masking", 6)
show_with_matplotlib(blur_image, "blur image", 7)
show_with_matplotlib(gaussian_blur_image, "gaussian blur image", 8)
show_with_matplotlib(emboss_image, "emboss image", 9)
show_with_matplotlib(sobel_x_image, "sobel x image", 10)
show_with_matplotlib(sobel_y_image, "sobel y image", 11)
show_with_matplotlib(outline_image, "outline image", 12)
# Show the Figure:
plt.show()
| 3.375 | 3 |
python/src/mapreduce/api/map_job/map_job_control.py | rolepoint/appengine-mapreduce | 0 | 12794818 | <filename>python/src/mapreduce/api/map_job/map_job_control.py
#!/usr/bin/env python
"""User API for controlling Map job execution."""
from google.appengine.ext import db
from mapreduce import util
# pylint: disable=g-bad-name
# pylint: disable=protected-access
def start(job_config=None,
in_xg_transaction=False):
"""Start a new map job.
Args:
job_config: an instance of map_job.MapJobConfig.
in_xg_transaction: controls what transaction scope to use to start this MR
job. If True, there has to be an already opened cross-group transaction
scope. MR will use one entity group from it.
If False, MR will create an independent transaction to start the job
regardless of any existing transaction scopes.
Returns:
the id of this map job.
Raises:
ValueError: when in_xg_transaction is True but no transaction scope is
detected.
"""
if in_xg_transaction and not db.is_in_transaction():
raise ValueError("Expects an opened xg transaction to start mapreduce.")
# Break circular dependency.
# pylint: disable=g-import-not-at-top
from mapreduce import handlers
return handlers.StartJobHandler._start_map(
name=job_config.job_name,
mapper_spec=job_config._get_mapper_spec(),
mapreduce_params=job_config._get_mr_params(),
queue_name=job_config.queue_name,
hooks_class_name=util._obj_to_path(job_config._hooks_cls),
_app=job_config._app,
in_xg_transaction=in_xg_transaction)
| 2.140625 | 2 |
django_command_admin/apps.py | andrewp-as-is/django-admin-commands.py | 1 | 12794819 | <reponame>andrewp-as-is/django-admin-commands.py
from django.apps import AppConfig
class Config(AppConfig):
name = 'django_command_admin'
verbose_name = 'command-admin'
| 1.453125 | 1 |
mc2d/core/__init__.py | Den4200/mc2d | 0 | 12794820 | <reponame>Den4200/mc2d
from mc2d.core.generators import MapGenerator
from mc2d.core.grid import Grid
from mc2d.core.inventory import Inventory
from mc2d.core.player import Player
from mc2d.core.world import World
__all__ = (
'Grid',
'Inventory',
'MapGenerator',
'Player',
'World'
)
| 1.53125 | 2 |
cogs/running.py | Piturnah/Society-voting-bot | 0 | 12794821 | <reponame>Piturnah/Society-voting-bot<filename>cogs/running.py<gh_stars>0
# A Cog for running, and managing your run, in elections #
import traceback
from pyrankvote import Candidate
from discord.ext import commands
from cogs import helpers
class Running(commands.Cog):
# Initialisation #
def __init__(self, bot):
self.bot = bot
# Commands #
@commands.command(name='stand', help=f'Stand for a post - DM Only. Usage: {helpers.PREFIX}stand <POST> <EMAIL ADDRESS>',
usage='<POST> <EMAIL ADDRESS>')
@commands.dm_only()
async def stand(self, context, *input):
if not input:
await context.send('Must supply the post you are running for and a valid email address, '
f'usage:`{helpers.PREFIX}stand <POST> <EMAIL>`')
return
email = input[-1]
post = ' '.join(input[:-1])
if not post:
await context.send('Must supply the post you are running for and a valid email address, '
f'usage:`{helpers.PREFIX}stand <POST> <EMAIL>`')
return
if '@' not in email:
await context.send('Must supply the post you are running for and a valid email address, '
f'usage:`{helpers.PREFIX}stand <POST> <EMAIL>`')
return
matching_posts = helpers.match_post(post)
if not matching_posts:
await context.send('Looks like that post isn\'t available for this election, '
f'use `{helpers.PREFIX}posts` to see the posts up for election')
return
post = matching_posts[0]
async with helpers.current_live_post_lock.reader_lock:
if helpers.current_live_post:
if post == helpers.current_live_post[1]:
await context.send(f'I\'m afraid voting for {post} has already begun, you cannot stand for this post')
return
author = context.author.id
members = helpers.get_members()
output_str = 'Error'
if author in helpers.registered_members:
if [i for i in helpers.standing[post] if i == helpers.registered_members[author]]:
output_str = (f'It looks like you, {members[helpers.registered_members[author]]} are already '
f'standing for the position of: {post}')
else:
helpers.standing[post][helpers.registered_members[author]] = (Candidate(members[helpers.registered_members[author]]), email, author)
output_str = (f'Congratulations {members[helpers.registered_members[author]]}, '
f'you are now standing for the position of {post}. If you no longer wish to stand, you '
f'can send `{helpers.PREFIX}standdown {post}`\n\n'
'Now you\'ll need to prepare a 2 minute speech to be given in the election call.\n'
f'If you have any questions please contact the secretary {helpers.SECRETARY_NAME}'
f'({helpers.SECRETARY_EMAIL}), or someone else on the committee.\n'
'If you can\'t make it to the actual election call, you must get in touch with the '
'secretary ASAP to sort out alternative arrangements.')
helpers.log(f'{context.author.name}({helpers.registered_members[author]}) is now standing for {post}')
helpers.email_secretary(members[helpers.registered_members[author]], post)
else:
output_str = ('Looks like you\'re not registered yet, '
f'please register using `{helpers.PREFIX}register <STUDENT NUMBER>`')
helpers.log(f'{context.author.name} has failed to stand for {post} because they are not registered')
helpers.save_standing()
await context.send(output_str)
@commands.command(name='standdown', help=f'Stand down from running for a post - DM Only. Usage: {helpers.PREFIX}standdown <POST>',
usage='<POST>')
@commands.dm_only()
async def standdown(self, context, *post):
post = ' '.join(post)
if not post:
await context.send(f'Must supply the post you are standing down from, usage: `{helpers.PREFIX}standdown <POST>`')
return
matching_posts = helpers.match_post(post)
if not matching_posts:
await context.send('Looks like that post isn\'t available for this election, '
f'use `{helpers.PREFIX}posts` to see the posts up for election`')
return
post = matching_posts[0]
author = context.author.id
if helpers.registered_members[author] not in helpers.standing[post]:
await context.send('Looks like you weren\'t standing for this post')
return
helpers.email_secretary(str(helpers.standing[post][helpers.registered_members[author]][0]), post, stood_down=True)
del helpers.standing[post][helpers.registered_members[author]]
helpers.save_standing()
helpers.log(f'{helpers.registered_members[author]} has stood down from standing for {post}')
await context.send(f'You have stood down from running for {post}')
@commands.command(name='changename', help='Change your name as used by the bot - DM Only. '
f'Usage: {helpers.PREFIX}changename <NAME>', usage='<NAME>')
@commands.dm_only()
async def changename(self, context, *name):
name = ' '.join(name)
if not name:
await context.send(f'Must supply the name you are wanting to change to, usage: `{helpers.PREFIX}changename <NAME>`')
return
if name.startswith('\''):
name = name.strip('\'')
author = context.author.id
if author not in helpers.registered_members:
await context.send('It looks like you\'re not registered yet, you must first register using '
f'`{helpers.PREFIX}register <STUDENT NUMBER>` before you can update your name')
return
async with helpers.current_live_post_lock.reader_lock:
if helpers.current_live_post:
await context.send('I\'m afraid you can\'t change your name whilst a vote is ongoing, '
'please wait until the vote has finished')
return
author_id = helpers.registered_members[author]
helpers.preferred_names[author_id] = name
for post in helpers.standing:
if author_id in helpers.standing[post]:
helpers.standing[post][author_id] = (Candidate(name), helpers.standing[post][author_id][1], author)
helpers.save_names()
helpers.save_standing()
await context.send(f'The bot now recognises your name to be {name}')
helpers.log(f'{context.author.name}({author_id}) has changed their name to {name}')
# Error handling #
async def dm_error(self, context, error):
if isinstance(error, commands.errors.PrivateMessageOnly):
await context.send('This command is DM only, please try again in a private message to me.')
return True
@stand.error
async def stand_error(self, context, error):
traceback.print_exception(type(error), error, error.__traceback__)
await self.dm_error(context, error)
@standdown.error
async def standdown_error(self, context, error):
traceback.print_exception(type(error), error, error.__traceback__)
await self.dm_error(context, error)
@changename.error
async def changename_error(self, context, error):
traceback.print_exception(type(error), error, error.__traceback__)
await self.dm_error(context, error)
def setup(bot):
bot.add_cog(Running(bot)) | 2.546875 | 3 |
app.py | uvcloud/sample-12factor-docker-flask | 0 | 12794822 |
from flask import Flask,render_template
app = Flask(__name__)
@app.route("/")
def hello():
return "Hello World!"
@app.route("/welcome")
def welcome():
return render_template("welcome.html")
if __name__ == '__main__':
app.run(host='0.0.0.0',debug=True)
| 2.671875 | 3 |
models/topological_sort.py | BLimmie/manga_ordering | 0 | 12794823 | from collections import defaultdict
from .metrics import calculate_metrics_list
class Graph:
"""
The code for this class is based on geeksforgeeks.com
"""
def __init__(self, vertices):
self.graph = defaultdict(list)
self.V = vertices
def addEdge(self, u, v):
self.graph[u].append([v])
def topologicalSortUtil(self, v, visited, stack):
visited[v] = True
for i in self.graph[v]:
if not visited[i[0]]:
self.topologicalSortUtil(i[0], visited, stack)
stack.insert(0, v)
def topologicalSort(self):
visited = [False] * self.V
stack = []
for i in range(self.V):
if not visited[i]:
self.topologicalSortUtil(i, visited, stack)
return stack
def isCyclicUtil(self, v, visited, recStack):
visited[v] = True
recStack[v] = True
for neighbour in self.graph[v]:
if not visited[neighbour[0]]:
if self.isCyclicUtil(
neighbour[0], visited, recStack):
return True
elif recStack[neighbour[0]]:
self.graph[v].remove(neighbour)
return True
recStack[v] = False
return False
def isCyclic(self):
visited = [False] * self.V
recStack = [False] * self.V
for node in range(self.V):
if not visited[node]:
if self.isCyclicUtil(node, visited, recStack):
return True
return False
def convert_to_graph(logits, positions, flipped=False):
# get no vertices (len logits = n(n-1)/2
nvert = int((2 * len(logits)) ** 0.5)+1
# create graph obj
g = Graph(nvert)
# read pred label
for logit, pos in zip(logits, positions):
if flipped:
pred = 1 if logit < 0 else 0
else:
pred = 1 if logit > 0 else 0
pos_s1, pos_s2 = pos[0], pos[1]
if pred == 0:
g.addEdge(pos_s1, pos_s2)
elif pred == 1:
g.addEdge(pos_s2, pos_s1)
while g.isCyclic():
g.isCyclic()
order = g.topologicalSort()
gold_order = list(range(nvert))
return calculate_metrics_list(order, gold_order)
| 3.3125 | 3 |
augmentation/augmentation.py | akaver/pbt-demo-mnist | 0 | 12794824 | <reponame>akaver/pbt-demo-mnist
import logging
import PIL
import random
import numpy as np
import torch
import torchvision.transforms.functional as TF
log = logging.getLogger(__name__)
# define augmentation functions
def auto_contrast(img: torch.Tensor, level: float, fill=None) -> torch.Tensor:
fill = img[0, 0, 0].item() if fill is None else fill
if level > 0.1:
img = TF.autocontrast(img)
return img
def blur(img: torch.Tensor, level: float, fill=None) -> torch.Tensor:
kernel_size = int(level * 4)
if kernel_size % 2 == 0:
if random.random() > 0.5:
kernel_size = kernel_size + 1
else:
kernel_size = kernel_size - 1
if kernel_size > 0:
img = TF.gaussian_blur(img, kernel_size = kernel_size)
return img
def crop(img: torch.Tensor, level: float, fill=None) -> torch.Tensor:
height, width = img.shape[1], img.shape[2]
crop_h = int(height * level)
crop_w = int(width * level)
# crop from center
# img = TF.resized_crop(img,[crop_h, crop_w])
return img
def cutout(img: torch.Tensor, level: float, fill=None) -> torch.Tensor:
pass
def equalize(img: torch.Tensor, level: float, fill=None) -> torch.Tensor:
pass
def flip_leftright(img: torch.Tensor, level: float, fill=None) -> torch.Tensor:
pass
def flip_updown(img: torch.Tensor, level: float, fill=None) -> torch.Tensor:
pass
def identity(img: torch.Tensor, level: float, fill=None) -> torch.Tensor:
pass
def posterize(img: torch.Tensor, level: float, fill=None) -> torch.Tensor:
pass
def rotate_left(img: torch.Tensor, level: float, fill=None) -> torch.Tensor:
fill = img[0, 0, 0].item() if fill is None else fill
# max 30 degrees of rotation
degrees = level * 30
img = TF.rotate(img, degrees, fill=fill)
return img
def rotate_right(img: torch.Tensor, level: float, fill=None) -> torch.Tensor:
fill = img[0, 0, 0].item() if fill is None else fill
# max 30 degrees of rotation
degrees = level * -30
img = TF.rotate(img, degrees, fill=fill)
return img
def shear_x(img: torch.Tensor, level: float, fill=None) -> torch.Tensor:
pass
def shear_y(img: torch.Tensor, level: float, fill=None) -> torch.Tensor:
pass
def smooth(img: torch.Tensor, level: float, fill=None) -> torch.Tensor:
pass
def solarize(img: torch.Tensor, level: float, fill=None) -> torch.Tensor:
pass
def translate_x(img: torch.Tensor, level: float, fill=None) -> torch.Tensor:
pass
def translate_y(img):
pass
ALL_TRANSFORMS = [
auto_contrast,
blur,
crop,
cutout,
equalize,
flip_leftright,
flip_updown,
identity,
posterize,
rotate_left,
rotate_right,
shear_x,
shear_y,
smooth,
solarize,
translate_x,
translate_y,
]
# actual working augmentations. just add more here!
ALL_TRANSFORMS = [
# auto_contrast,
blur,
rotate_left,
rotate_right,
]
NAME_TO_TRANSFORM = {t.__name__: t for t in ALL_TRANSFORMS}
TRANSFORM_NAMES = NAME_TO_TRANSFORM.keys()
| 2.328125 | 2 |
example.py | ollieglass/sqlalchemy-pg-copy | 2 | 12794825 | import sys
from sqlalchemy import create_engine
import pg_copy
if __name__ == "__main__":
engine = create_engine(sys.argv[1])
target_table = 'example_table'
objs = [
{
'id': i,
'description': f'record description {i}'
} for i in range(100_000)
]
pg_copy.insert_with_copy(engine, objs, target_table)
| 2.640625 | 3 |
Easy/Repeated String/repeatedString.py | Zealll/HackerRank | 0 | 12794826 | <reponame>Zealll/HackerRank
def repeatedString(s, n):
dictionary = {'a': 0}
length = n // len(s)
if 'a' not in s:
return 0
for i in s:
if i == 'a':
dictionary['a'] += 1
remaining = n - len(s) * length
total = int(dictionary['a'] * length)
if remaining > 0:
for i in range(remaining):
if s[i] == 'a':
total += 1
return total
# def repeatedString(s, n):
# dictionary = {}
# length = n // len(s)
# if 'a' not in s:
# return 0
# for i in s:
# if i == 'a':
# if 'a' not in dictionary:
# dictionary['a'] = 1
# else:
# dictionary['a'] += 1
# remaining = n - len(s) * length
# total = int(dictionary['a'] * length)
# if remaining > 0:
# for i in range(remaining):
# if s[i] == 'a':
# total += 1
# return total | 3.71875 | 4 |
nodes/remove_directory.py | iograft/iograft | 0 | 12794827 | <filename>nodes/remove_directory.py
# Copyright 2021 Fabrica Software, LLC
import os
import shutil
import iograft
import iobasictypes
class RemoveDirectory(iograft.Node):
"""
Remove the given directory.
"""
directory = iograft.InputDefinition("directory", iobasictypes.String())
remove_contents = iograft.InputDefinition("remove_contents",
iobasictypes.Bool(),
default_value=False)
must_exist = iograft.InputDefinition("must_exist",
iobasictypes.Bool(),
default_value=False)
@classmethod
def GetDefinition(cls):
node = iograft.NodeDefinition("remove_directory")
node.AddInput(cls.directory)
node.AddInput(cls.remove_contents)
node.AddInput(cls.must_exist)
return node
@staticmethod
def Create():
return RemoveDirectory()
def Process(self, data):
directory = iograft.GetInput(self.directory, data)
remove_contents = iograft.GetInput(self.remove_contents, data)
must_exist = iograft.GetInput(self.must_exist, data)
if must_exist and not os.path.isdir(directory):
try:
FileNotFoundError
except NameError:
FileNotFoundError = IOError
raise FileNotFoundError(
"Directory {} does not exist.".format(directory))
# The directory does not exist; nothing to do.
if not os.path.isdir(directory):
return
# Based on if we are removing contents or not, use shutil or os.
if remove_contents:
shutil.rmtree(directory)
else:
os.rmdir(directory)
def LoadPlugin(plugin):
node = RemoveDirectory.GetDefinition()
plugin.RegisterNode(node, RemoveDirectory.Create)
| 2.96875 | 3 |
otter/test/cloud_client/test_clb.py | codebyravi/otter | 20 | 12794828 | <filename>otter/test/cloud_client/test_clb.py
"""Tests for otter.cloud_client.clb"""
import json
from effect import sync_perform
from effect.testing import (
EQFDispatcher, const, intent_func, noop, perform_sequence)
import six
from twisted.trial.unittest import SynchronousTestCase
from otter.cloud_client import service_request
from otter.cloud_client.clb import (
CLBDeletedError,
CLBDuplicateNodesError,
CLBImmutableError,
CLBNodeLimitError,
CLBNotActiveError,
CLBPartialNodesRemoved,
CLBRateLimitError,
CLB_BATCH_DELETE_LIMIT,
NoSuchCLBError,
NoSuchCLBNodeError,
add_clb_nodes,
change_clb_node,
get_clb_health_monitor,
get_clb_node_feed,
get_clb_nodes,
get_clbs,
remove_clb_nodes)
from otter.constants import ServiceType
from otter.test.cloud_client.test_init import log_intent, service_request_eqf
from otter.test.utils import (
StubResponse,
stub_json_response,
stub_pure_response
)
from otter.util.http import APIError
from otter.util.pure_http import has_code
def assert_parses_common_clb_errors(testcase, intent, eff, lb_id):
"""
Assert that the effect produced performs the common CLB error parsing:
:class:`CLBImmutableError`, :class:`CLBDescription`,
:class:`NoSuchCLBError`, :class:`CLBRateLimitError`,
:class:`APIError`
:param :obj:`twisted.trial.unittest.TestCase` testcase: Test object
:param intent: expected ``ServiceRequest`` intent
:param eff: Effect returned from function being tested
:param lb_id: ID of load balancer being accessed in the function being
tested
"""
json_responses_and_errs = [
("Load Balancer '{0}' has a status of 'BUILD' and is "
"considered immutable.", 422, CLBImmutableError),
("Load Balancer '{0}' has a status of 'PENDING_UPDATE' and is "
"considered immutable.", 422, CLBImmutableError),
("Load Balancer '{0}' has a status of 'unexpected status' and is "
"considered immutable.", 422, CLBImmutableError),
("Load Balancer '{0}' has a status of 'PENDING_DELETE' and is "
"considered immutable.", 422, CLBDeletedError),
("The load balancer is deleted and considered immutable.",
422, CLBDeletedError),
("Load balancer not found.", 404, NoSuchCLBError),
("LoadBalancer is not ACTIVE", 422, CLBNotActiveError),
("The loadbalancer is marked as deleted.", 410, CLBDeletedError),
]
for msg, code, err in json_responses_and_errs:
msg = msg.format(lb_id)
resp = stub_pure_response(
json.dumps({'message': msg, 'code': code, 'details': ''}),
code)
with testcase.assertRaises(err) as cm:
perform_sequence([(intent, service_request_eqf(resp))], eff)
testcase.assertEqual(cm.exception,
err(msg, lb_id=six.text_type(lb_id)))
# OverLimit Retry is different because it's produced by repose
over_limit = stub_pure_response(
json.dumps({
"overLimit": {
"message": "OverLimit Retry...",
"code": 413,
"retryAfter": "2015-06-13T22:30:10Z",
"details": "Error Details..."
}
}),
413)
with testcase.assertRaises(CLBRateLimitError) as cm:
perform_sequence([(intent, service_request_eqf(over_limit))], eff)
testcase.assertEqual(
cm.exception,
CLBRateLimitError("OverLimit Retry...",
lb_id=six.text_type(lb_id)))
# Ignored errors
bad_resps = [
stub_pure_response(
json.dumps({
'message': ("Load Balancer '{0}' has a status of 'BROKEN' "
"and is considered immutable."),
'code': 422}),
422),
stub_pure_response(
json.dumps({
'message': ("The load balancer is deleted and considered "
"immutable"),
'code': 404}),
404),
stub_pure_response(
json.dumps({
'message': "Cloud load balancers is down",
'code': 500}),
500),
stub_pure_response(
json.dumps({
'message': "this is not an over limit message",
'code': 413}),
413),
stub_pure_response("random repose error message", 404),
stub_pure_response("random repose error message", 413)
]
for resp in bad_resps:
with testcase.assertRaises(APIError) as cm:
perform_sequence([(intent, service_request_eqf(resp))], eff)
testcase.assertEqual(
cm.exception,
APIError(headers={}, code=resp[0].code, body=resp[1],
method='method', url='original/request/URL'))
class CLBClientTests(SynchronousTestCase):
"""
Tests for CLB client functions, such as :obj:`change_clb_node`.
"""
@property
def lb_id(self):
"""What is my LB ID"""
return "123456"
def test_change_clb_node(self):
"""
Produce a request for modifying a node on a load balancer, which
returns a successful result on 202.
Parse the common CLB errors, and :class:`NoSuchCLBNodeError`.
"""
eff = change_clb_node(lb_id=self.lb_id, node_id='1234',
condition="DRAINING", weight=50,
_type='SECONDARY')
expected = service_request(
ServiceType.CLOUD_LOAD_BALANCERS,
'PUT',
'loadbalancers/{0}/nodes/1234'.format(self.lb_id),
data={'node': {'condition': 'DRAINING',
'weight': 50, 'type': 'SECONDARY'}},
success_pred=has_code(202))
# success
dispatcher = EQFDispatcher([(
expected.intent,
service_request_eqf(stub_pure_response('', 202)))])
self.assertEqual(sync_perform(dispatcher, eff),
stub_pure_response(None, 202))
# NoSuchCLBNode failure
msg = "Node with id #1234 not found for loadbalancer #{0}".format(
self.lb_id)
no_such_node = stub_pure_response(
json.dumps({'message': msg, 'code': 404}), 404)
dispatcher = EQFDispatcher([(
expected.intent, service_request_eqf(no_such_node))])
with self.assertRaises(NoSuchCLBNodeError) as cm:
sync_perform(dispatcher, eff)
self.assertEqual(
cm.exception,
NoSuchCLBNodeError(msg, lb_id=six.text_type(self.lb_id),
node_id=u'1234'))
# all the common failures
assert_parses_common_clb_errors(self, expected.intent, eff, "123456")
def test_change_clb_node_default_type(self):
"""
Produce a request for modifying a node on a load balancer with the
default type, which returns a successful result on 202.
"""
eff = change_clb_node(lb_id=self.lb_id, node_id='1234',
condition="DRAINING", weight=50)
expected = service_request(
ServiceType.CLOUD_LOAD_BALANCERS,
'PUT',
'loadbalancers/{0}/nodes/1234'.format(self.lb_id),
data={'node': {'condition': 'DRAINING',
'weight': 50, 'type': 'PRIMARY'}},
success_pred=has_code(202))
dispatcher = EQFDispatcher([(
expected.intent,
service_request_eqf(stub_pure_response('', 202)))])
self.assertEqual(sync_perform(dispatcher, eff),
stub_pure_response(None, 202))
def test_add_clb_nodes(self):
"""
Produce a request for adding nodes to a load balancer, which returns
a successful result on a 202.
Parse the common CLB errors, and a :class:`CLBDuplicateNodesError`.
"""
nodes = [{"address": "1.1.1.1", "port": 80, "condition": "ENABLED"},
{"address": "1.1.1.2", "port": 80, "condition": "ENABLED"},
{"address": "1.1.1.5", "port": 81, "condition": "ENABLED"}]
eff = add_clb_nodes(lb_id=self.lb_id, nodes=nodes)
expected = service_request(
ServiceType.CLOUD_LOAD_BALANCERS,
'POST',
'loadbalancers/{0}/nodes'.format(self.lb_id),
data={'nodes': nodes},
success_pred=has_code(202))
# success
seq = [
(expected.intent, lambda i: stub_json_response({}, 202, {})),
(log_intent('request-add-clb-nodes', {}), lambda _: None)]
self.assertEqual(perform_sequence(seq, eff),
(StubResponse(202, {}), {}))
# CLBDuplicateNodesError failure
msg = ("Duplicate nodes detected. One or more nodes already "
"configured on load balancer.")
duplicate_nodes = stub_pure_response(
json.dumps({'message': msg, 'code': 422}), 422)
dispatcher = EQFDispatcher([(
expected.intent, service_request_eqf(duplicate_nodes))])
with self.assertRaises(CLBDuplicateNodesError) as cm:
sync_perform(dispatcher, eff)
self.assertEqual(
cm.exception,
CLBDuplicateNodesError(msg, lb_id=six.text_type(self.lb_id)))
# CLBNodeLimitError failure
msg = "Nodes must not exceed 25 per load balancer."
limit = stub_pure_response(
json.dumps({'message': msg, 'code': 413}), 413)
dispatcher = EQFDispatcher([(
expected.intent, service_request_eqf(limit))])
with self.assertRaises(CLBNodeLimitError) as cm:
sync_perform(dispatcher, eff)
self.assertEqual(
cm.exception,
CLBNodeLimitError(msg, lb_id=six.text_type(self.lb_id),
node_limit=25))
# all the common failures
assert_parses_common_clb_errors(self, expected.intent, eff, "123456")
def expected_node_removal_req(self, nodes=(1, 2)):
"""
:return: Expected effect for a node removal request.
"""
return service_request(
ServiceType.CLOUD_LOAD_BALANCERS,
'DELETE',
'loadbalancers/{}/nodes'.format(self.lb_id),
params={'id': map(str, nodes)},
success_pred=has_code(202))
def test_remove_clb_nodes_success(self):
"""
A DELETE request is sent, and the Effect returns None if 202 is
returned.
"""
eff = remove_clb_nodes(self.lb_id, ["1", "2"])
seq = [
(self.expected_node_removal_req().intent,
service_request_eqf(stub_pure_response({}, 202))),
]
result = perform_sequence(seq, eff)
self.assertIs(result, None)
def test_remove_clb_nodes_handles_standard_clb_errors(self):
"""
Common CLB errors about it being in a deleted state, pending update,
etc. are handled.
"""
eff = remove_clb_nodes(self.lb_id, ["1", "2"])
assert_parses_common_clb_errors(
self, self.expected_node_removal_req().intent, eff, "123456")
def test_remove_clb_nodes_non_202(self):
"""Any random HTTP response code is bubbled up as an APIError."""
eff = remove_clb_nodes(self.lb_id, ["1", "2"])
seq = [
(self.expected_node_removal_req().intent,
service_request_eqf(stub_pure_response({}, 200))),
]
self.assertRaises(APIError, perform_sequence, seq, eff)
def test_remove_clb_nodes_random_400(self):
"""Random 400s that can't be parsed are bubbled up as an APIError."""
error_bodies = [
{'validationErrors': {'messages': ['bar']}},
{'messages': 'bar'},
{'validationErrors': {'messages': []}},
"random non-json"
]
for body in error_bodies:
eff = remove_clb_nodes(self.lb_id, ["1", "2"])
seq = [
(self.expected_node_removal_req().intent,
service_request_eqf(stub_pure_response(body, 400))),
]
self.assertRaises(APIError, perform_sequence, seq, eff)
def test_remove_clb_nodes_retry_on_some_invalid_nodes(self):
"""
When CLB returns an error indicating that some of the nodes are
invalid, the request is retried without the offending nodes.
"""
node_ids = map(str, range(1, 5))
eff = remove_clb_nodes(self.lb_id, node_ids)
response = stub_pure_response(
{'validationErrors': {'messages': [
'Node ids 1,3 are not a part of your loadbalancer']}},
400)
response2 = stub_pure_response({}, 202)
seq = [
(self.expected_node_removal_req(node_ids).intent,
service_request_eqf(response)),
(self.expected_node_removal_req(["2", "4"]).intent,
service_request_eqf(response2))
]
self.assertIs(perform_sequence(seq, eff), None)
def test_remove_clb_nodes_partial_success(self):
"""
``remove_clb_nodes`` removes only CLB_BATCH_DELETE_LIMIT nodes and
raises ``CLBPartialNodesRemoved`` with remaining nodes
"""
limit = CLB_BATCH_DELETE_LIMIT
node_ids = map(str, range(limit + 2))
removed = map(six.text_type, range(limit))
not_removed = map(six.text_type, range(limit, limit + 2))
eff = remove_clb_nodes(self.lb_id, node_ids)
seq = [
(self.expected_node_removal_req(removed).intent,
service_request_eqf(stub_pure_response({}, 202))),
]
with self.assertRaises(CLBPartialNodesRemoved) as ce:
perform_sequence(seq, eff)
self.assertEqual(
ce.exception,
CLBPartialNodesRemoved(
six.text_type(self.lb_id), not_removed, removed))
def test_get_clbs(self):
"""Returns all the load balancer details from the LBs endpoint."""
expected = service_request(
ServiceType.CLOUD_LOAD_BALANCERS, 'GET', 'loadbalancers')
req = get_clbs()
body = {'loadBalancers': 'lbs!'}
seq = [
(expected.intent, lambda i: stub_json_response(body)),
(log_intent('request-list-clbs', body), lambda _: None)]
self.assertEqual(perform_sequence(seq, req), 'lbs!')
def test_get_clb_nodes(self):
""":func:`get_clb_nodes` returns all the nodes for a LB."""
req = get_clb_nodes(self.lb_id)
expected = service_request(
ServiceType.CLOUD_LOAD_BALANCERS,
'GET', 'loadbalancers/123456/nodes')
body = {'nodes': 'nodes!'}
seq = [
(expected.intent, lambda i: stub_json_response(body)),
(log_intent('request-list-clb-nodes', body), lambda _: None)]
self.assertEqual(perform_sequence(seq, req), 'nodes!')
def test_get_clb_nodes_error_handling(self):
""":func:`get_clb_nodes` parses the common CLB errors."""
expected = service_request(
ServiceType.CLOUD_LOAD_BALANCERS,
'GET', 'loadbalancers/123456/nodes')
assert_parses_common_clb_errors(
self, expected.intent, get_clb_nodes(self.lb_id), "123456")
def test_get_clb_health_mon(self):
"""
:func:`get_clb_health_monitor` calls
``GET .../loadbalancers/lb_id/healthmonitor`` and returns setting
inside {"healthMonitor": ...}
"""
expected = service_request(
ServiceType.CLOUD_LOAD_BALANCERS,
'GET', 'loadbalancers/123456/healthmonitor')
settings = {
"type": "CONNECT",
"delay": 10,
"timeout": 10,
"attemptsBeforeDeactivation": 3
}
body = {"healthMonitor": settings}
seq = [
(expected.intent, const(stub_json_response(body))),
(log_intent('request-get-clb-healthmon', body), noop)
]
self.assertEqual(
perform_sequence(seq, get_clb_health_monitor(self.lb_id)),
settings)
def test_get_clb_health_mon_error(self):
"""
:func:`get_clb_health_monitor` parses the common CLB errors.
"""
expected = service_request(
ServiceType.CLOUD_LOAD_BALANCERS, 'GET',
'loadbalancers/123456/healthmonitor')
assert_parses_common_clb_errors(
self, expected.intent, get_clb_health_monitor(self.lb_id),
self.lb_id)
class GetCLBNodeFeedTests(SynchronousTestCase):
"""
Tests for :func:`get_clb_node_feed`
"""
def test_calls_read_entries(self):
"""
Calls `cf.read_entries` with CLB servicetype and atom URL and returns
the feed part of the result
"""
from otter.cloud_client.clb import cf
self.patch(cf, "read_entries", intent_func("re"))
eff = get_clb_node_feed("12", "13")
seq = [
(("re", ServiceType.CLOUD_LOAD_BALANCERS,
"loadbalancers/12/nodes/13.atom", {}, cf.Direction.NEXT,
"request-get-clb-node-feed"),
const((["feed1"], {"param": "2"})))
]
self.assertEqual(perform_sequence(seq, eff), ["feed1"])
def test_error_handling(self):
"""
Parses regular CLB errors and raises corresponding exceptions
"""
svc_intent = service_request(
ServiceType.CLOUD_LOAD_BALANCERS, "GET",
"loadbalancers/12/nodes/13.atom", params={},
json_response=False).intent
assert_parses_common_clb_errors(
self, svc_intent, get_clb_node_feed("12", "13"), "12")
| 1.921875 | 2 |
3d-tracking/tools/visualize_kitti.py | sadjadasghari/3d-vehicle-tracking | 603 | 12794829 | import os
import re
import sys
import argparse
import json
import numpy as np
from glob import glob
import cv2
from utils.plot_utils import RandomColor
def parse_args():
parser = argparse.ArgumentParser(
description='Monocular 3D Tracking Visualizer',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('set', choices=['gta', 'kitti'])
parser.add_argument('split', choices=['train', 'val', 'test'],
help='Which data split to use in testing')
parser.add_argument('--session', default='623',
help='Name of the session, to separate exp')
parser.add_argument('--epoch', default='100',
help='How many epochs you used to separate exp')
parser.add_argument('--flag', default='kf3doccdeep_age15_aff0.1_hit0_80m_pd',
help='Flags for running evaluation code')
parser.add_argument('--save_vid', action='store_true', default=False,
help='Flags for saving video')
parser.add_argument('--save_txt', action='store_true', default=False,
help='Flags for saving txt')
parser.add_argument('--dry_run', action='store_true', default=False,
help='Show command without running')
parser.add_argument('--overwrite', action='store_true', default=False,
help='Overwrite the output files')
args = parser.parse_args()
return args
print(' '.join(sys.argv))
args = parse_args()
if args.set == 'kitti':
IMAGE_PATH = 'data/kitti_tracking/{SPLIT}ing/image_02/{SEQ}/*.png'.format(**{'SPLIT': args.split, 'SEQ': '{:04d}'})
re_pattern = re.compile('[0-9]{4}')
else:
IMAGE_PATH = 'data/gta5_tracking/{SPLIT}/image/{SEQ}/*.jpg'.format(**{'SPLIT': args.split, 'SEQ': '{}'})
re_pattern = re.compile('rec_(.{8})_(.+)_(.+)h(.+)m_(.+[0-9])')
SAVE_PATH = 'output/{SESS}_{EP}_{SET}_{SPLIT}_set/'.format(
**{'SESS': args.session, 'EP': args.epoch, 'SET': args.set, 'SPLIT': args.split})
out_name = '{SESS}_{EP}_{SET}_{SETTING}'.format(
**{'SESS': args.session, 'EP': args.epoch, 'SET': args.set, 'SETTING': args.flag})
FONT = cv2.FONT_HERSHEY_SIMPLEX
FOURCC = cv2.VideoWriter_fourcc(*'mp4v')
fps = 15
np.random.seed(777)
rm_color = RandomColor(30)
tid2color = {}
def mkdir(path):
if not os.path.isdir(path):
print("Making directory {}".format(path))
os.makedirs(path) # Use with care
def gen_result(out_path, out_name, save_vid=False, save_txt=True,
dry_run=False, overwrite=False):
print("Reading meta data...")
info = json.load(open('{}{}.json'.format(out_path, out_name), 'r'))
if not dry_run: mkdir('{}{}/data/'.format(out_path, out_name))
for seqid in range(len(info)):
file_seq = re_pattern.search(info[seqid]['filename']).group(0)
print('Reading {} from {}{}...'.format(file_seq, out_path, out_name))
if dry_run:
continue
seqout = []
vid_name = '{}{}/data/{}.mp4'.format(out_path, out_name, file_seq)
txt_name = '{}{}/data/{}.txt'.format(out_path, out_name, file_seq)
if not overwrite:
if not os.path.isfile(txt_name) and save_txt:
pass
elif not os.path.isfile(vid_name) and save_vid:
pass
else:
print("SKIP running. Generated file {} Found".format(txt_name))
continue
if save_vid:
images = sorted(glob(IMAGE_PATH.format(file_seq)))
img = cv2.imread(images[0])
vidsize = (img.shape[1], img.shape[0]) # height, width
out = cv2.VideoWriter(vid_name, FOURCC, fps, vidsize)
demoinfo = info[seqid]['frames']
for idx, frame in enumerate(demoinfo):
if save_vid:
img = cv2.imread(images[idx])
img = cv2.putText(img, str(idx), (20, 30),
cv2.FONT_HERSHEY_COMPLEX, 1,
(180, 180, 180), 2)
for trk in frame['hypotheses']:
x1, y1, x2, y2, conf = trk['det_box']
xc, yc = trk['xc'], trk['yc']
if save_vid:
if trk['id'] not in tid2color:
tid2color[trk['id']] = rm_color.get_random_color(scale=255)
img = cv2.rectangle(img, (int(xc-1), int(yc-1)), (int(xc+1), int(yc+1)),
tid2color[trk['id']], 2)
img = cv2.rectangle(img, (int(x1), int(y1)), (int(x2), int(y2)),
tid2color[trk['id']], 4)
img = cv2.putText(img, str(int(trk['id'])), (int(x1), int(y1)),
cv2.FONT_HERSHEY_COMPLEX, 1,
tid2color[trk['id']], 2)
img = cv2.putText(img, str(int(trk['depth'])), (int(x2)-14, int(y2)),
cv2.FONT_HERSHEY_COMPLEX, 0.8,
tid2color[trk['id']], 2)
if save_txt:
'''
submit_txt = ' '.join([
str(idx),
str(int(trk['id'])),
'Car',
'-1 -1',
trk['alpha'],
str(x1), str(y1), str(x2), str(y2),
trk['dim'],
trk['loc'],
trk['rot'],
str(conf)])
'''
submit_txt = ' '.join([
str(idx),
str(int(trk['id'])),
'Car',
'-1 -1 -10',
str(x1), str(y1), str(x2), str(y2),
'-1 -1 -1',
'-1000 -1000 -1000 -10',
str(conf)])
#'''
submit_txt += '\n'
seqout.append(submit_txt)
if save_vid: out.write(img)
if save_txt:
print("{} saved.".format(txt_name))
with open(txt_name, 'w') as f:
f.writelines(seqout)
if save_vid:
print("{} saved.".format(vid_name))
out.release()
if __name__ == '__main__':
# Not using out_name, too slow
output_list = [os.path.splitext(item)[0] for item in os.listdir(SAVE_PATH) if item.endswith('_pd.json')]
my_list = ['none', 'kf2ddeep', 'kf3doccdeep', 'lstmdeep', 'lstmoccdeep']
for dir_name in output_list:
print(dir_name)
save_vid = args.save_vid
if save_vid:
is_in = False
for ml in my_list:
is_in = is_in or (ml in dir_name)
save_vid = is_in
gen_result(SAVE_PATH,
dir_name,
save_vid=save_vid,
save_txt=args.save_txt,
dry_run=args.dry_run,
overwrite=args.overwrite
)
| 2.53125 | 3 |
Decision_tree/code.py | AnurodhRaina/ga-learner-dsmp-repo | 1 | 12794830 | # --------------
#Importing header files
import pandas as pd
from sklearn.model_selection import train_test_split as tts
# Code starts here
data= pd.read_csv(path)
X= data.drop(['customer.id','paid.back.loan'],1)
y=data['paid.back.loan']
X_train, X_test, y_train, y_test = tts(X,y,random_state=0,test_size=0.3)
# Code ends here
# --------------
#Importing header files
import matplotlib.pyplot as plt
# Code starts here
import pandas as pd
from sklearn.model_selection import train_test_split as tts
# Code starts here
fully_paid = y_train.value_counts()
plt.figure()
fully_paid.plot(kind='bar')
# Code ends here
# --------------
#Importing header files
import numpy as np
from sklearn.preprocessing import LabelEncoder
# Code starts here
X_train['int.rate'] = X_train['int.rate'].str.replace('%','').astype(float)
X_train['int.rate'] = X_train['int.rate']/100
X_test['int.rate'] = X_test['int.rate'].str.replace('%','').astype(float)
X_test['int.rate'] = X_test['int.rate']/100
num_df = X_train.select_dtypes(include = np.number)
cat_df = X_train.select_dtypes(exclude = np.number)
# Code ends here
# --------------
#Importing header files
import seaborn as sns
# Code starts here
# Code ends
cols = list(num_df)
fig, axes = plt.subplots(nrows =9, ncols= 1)
for i in range(1,9):
sns.boxplot(x=y_train, y=num_df[cols[i]], ax=axes[i])
# --------------
# Code starts here
# Code ends here
cols= list(cat_df)
fig, axes = plt.subplots(nrows = 2, ncols= 2)
for i in range (0,2):
for j in range(0,2):
sns.countplot(x=X_train[cols[i*2+j]], hue=y_train, ax=axes[i,j])
# --------------
#Importing header files
from sklearn.tree import DecisionTreeClassifier
from sklearn.preprocessing import LabelEncoder
# Code starts here
for i in list(cat_df):
X_train[i].fillna('NA')
le = LabelEncoder()
X_train[i] = le.fit_transform(X_train[i])
X_test[i].fillna('NA')
le = LabelEncoder()
X_test[i] = le.fit_transform(X_test[i])
#y_test = y_test.str.replace('No',0)
y_train.replace({'No':0,'Yes':1},inplace=True)
y_test.replace({'No':0,'Yes':1},inplace=True)
# Code ends here
from sklearn.metrics import accuracy_score
model = DecisionTreeClassifier(random_state = 0)
model.fit(X_train, y_train)
y_preds = model.predict(X_test)
acc= accuracy_score(y_test, y_preds)
# --------------
#Importing header files
from sklearn.model_selection import GridSearchCV
#Parameter grid
parameter_grid = {'max_depth': np.arange(3,10), 'min_samples_leaf': range(10,50,10)}
# Code starts here
model_2 = DecisionTreeClassifier(random_state =0)
p_tree = GridSearchCV(estimator=model_2, param_grid=parameter_grid, cv=5)
p_tree.fit(X_train,y_train)
# Code ends here
ypreds2 = p_tree.predict(X_test)
acc_2 = accuracy_score(y_test, ypreds2)
acc_2
# --------------
#Importing header files
from io import StringIO
from sklearn.tree import export_graphviz
from sklearn import tree
from sklearn import metrics
from IPython.display import Image
import pydotplus
# Code starts here
dot_data = export_graphviz(decision_tree=p_tree.best_estimator_, out_file=None, feature_names=X.columns, filled = True, class_names=['loan_paid_back_yes','loan_paid_back_no'])
graph_big=pydotplus.graph_from_dot_data(dot_data)
# show graph - do not delete/modify the code below this line
img_path = user_data_dir+'/file.png'
graph_big.write_png(img_path)
plt.figure(figsize=(20,15))
plt.imshow(plt.imread(img_path))
plt.axis('off')
plt.show()
# Code ends here
| 2.953125 | 3 |
swift3/__init__.py | ntk148v/swift3 | 10 | 12794831 | # Copyright (c) 2012-2014 OpenStack Foundation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pkg_resources
__all__ = ['version_info', 'version']
try:
# First, try to get our version out of PKG-INFO. If we're installed,
# this'll let us find our version without pulling in pbr. After all, if
# we're installed on a system, we're not in a Git-managed source tree, so
# pbr doesn't really buy us anything.
__version__ = pkg_resources.get_provider(
pkg_resources.Requirement.parse('swift3')).version
except pkg_resources.DistributionNotFound:
# No PKG-INFO? We're probably running from a checkout, then. Let pbr do
# its thing to figure out a version number.
import pbr.version
__version__ = pbr.version.VersionInfo('swift3').release_string()
#: Version information ``(major, minor, revision)``.
version_info = tuple(map(int, __version__.split('.')[:3]))
#: Version string ``'major.minor.revision'``.
version = '.'.join(map(str, version_info))
| 2.125 | 2 |
test/single/test_task_service.py | Infi-zc/horovod | 7,676 | 12794832 | # Copyright 2021 Uber Technologies, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import io
import re
import unittest
from horovod.runner.common.service.task_service import BasicTaskService, BasicTaskClient
from horovod.runner.common.util import secret
class FaultyStream:
"""This stream raises an exception after some text has been written."""
def __init__(self, stream):
self.stream = stream
self.raised = False
def write(self, b):
if not self.raised and len(self.stream.getvalue()) > 1024:
self.raised = True
raise RuntimeError()
self.stream.write(b)
def close(self):
pass
class TaskServiceTest(unittest.TestCase):
cmd = 'for i in {1..10000}; do echo "a very very useful log line #$i"; done'
cmd_single_line = f'{cmd} | wc'
@staticmethod
def cmd_with(stdout, stderr):
return f"bash -c '{stderr} >&2 & {stdout}'"
def test_run_command(self):
key = secret.make_secret_key()
service = BasicTaskService('test service', 0, key, nics=None, verbose=2)
try:
client = BasicTaskClient('test service', service.addresses(), key, verbose=2, attempts=1)
client.run_command(self.cmd_with(self.cmd_single_line, self.cmd_single_line), {})
exit = client.wait_for_command_exit_code()
self.assertEqual(0, exit)
self.assertEqual((True, 0), client.command_result())
finally:
service.shutdown()
def test_stream_command_output(self):
self.do_test_stream_command_output(
self.cmd_with(self.cmd, self.cmd),
capture_stdout=True, capture_stderr=True,
prefix_output_with_timestamp=True
)
def test_stream_command_output_stdout(self):
self.do_test_stream_command_output(
self.cmd_with(self.cmd, self.cmd_single_line),
capture_stdout=True, capture_stderr=False,
prefix_output_with_timestamp=True
)
def test_stream_command_output_stderr(self):
self.do_test_stream_command_output(
self.cmd_with(self.cmd_single_line, self.cmd),
capture_stdout=False, capture_stderr=True,
prefix_output_with_timestamp=True
)
def test_stream_command_output_neither(self):
self.do_test_stream_command_output(
self.cmd_with(self.cmd_single_line, self.cmd_single_line),
capture_stdout=False, capture_stderr=False,
prefix_output_with_timestamp=True
)
def test_stream_command_output_un_prefixed(self):
self.do_test_stream_command_output(
self.cmd_with(self.cmd, self.cmd),
capture_stdout=True, capture_stderr=True,
prefix_output_with_timestamp=False
)
def do_test_stream_command_output(self,
command,
capture_stdout, capture_stderr,
prefix_output_with_timestamp):
stdout = io.StringIO()
stderr = io.StringIO()
key = secret.make_secret_key()
service = BasicTaskService('test service', 0, key, nics=None, verbose=2)
try:
client = BasicTaskClient('test service', service.addresses(), key, verbose=2, attempts=1)
stdout_t, stderr_t = client.stream_command_output(stdout, stderr)
client.run_command(command, {},
capture_stdout=capture_stdout, capture_stderr=capture_stderr,
prefix_output_with_timestamp=prefix_output_with_timestamp)
client.wait_for_command_termination(delay=0.2)
self.assertEqual((True, 0), client.command_result())
if stdout_t is not None:
stdout_t.join(1.0)
self.assertEqual(False, stdout_t.is_alive())
if stderr_t is not None:
stderr_t.join(1.0)
self.assertEqual(False, stderr_t.is_alive())
finally:
service.shutdown()
stdout = stdout.getvalue()
stderr = stderr.getvalue()
# remove timestamps from each line in outputs
if prefix_output_with_timestamp:
stdout_no_ts = re.sub('^[^[]+', '', stdout, flags=re.MULTILINE)
stderr_no_ts = re.sub('^[^[]+', '', stderr, flags=re.MULTILINE)
# test we are removing something (hopefully timestamps)
if capture_stdout:
self.assertNotEqual(stdout_no_ts, stdout)
if capture_stderr:
self.assertNotEqual(stderr_no_ts, stderr)
stdout = stdout_no_ts
stderr = stderr_no_ts
# remove prefix
stdout_no_prefix = re.sub('\[0\]<stdout>:', '', stdout, flags=re.MULTILINE)
stderr_no_prefix = re.sub('\[0\]<stderr>:', '', stderr, flags=re.MULTILINE)
# test we are removing something (hopefully prefixes)
if capture_stdout:
self.assertNotEqual(stdout_no_prefix, stdout)
if capture_stderr:
self.assertNotEqual(stderr_no_prefix, stderr)
stdout = stdout_no_prefix
stderr = stderr_no_prefix
if capture_stdout and capture_stderr:
# both streams should be equal
self.assertEqual(stdout, stderr)
# streams should have meaningful number of lines and characters
if capture_stdout:
self.assertTrue(len(stdout) > 1024)
self.assertTrue(len(stdout.splitlines()) > 10)
if capture_stderr:
self.assertTrue(len(stderr) > 1024)
self.assertTrue(len(stderr.splitlines()) > 10)
def test_stream_command_output_reconnect(self):
self.do_test_stream_command_output_reconnect(attempts=3, succeeds=True)
def test_stream_command_output_no_reconnect(self):
self.do_test_stream_command_output_reconnect(attempts=1, succeeds=None)
def do_test_stream_command_output_reconnect(self, attempts, succeeds):
key = secret.make_secret_key()
stdout = io.StringIO()
stderr = io.StringIO()
stdout_s = FaultyStream(stdout)
stderr_s = FaultyStream(stderr)
service = BasicTaskService('test service', 0, key, nics=None, verbose=2)
try:
client = BasicTaskClient('test service', service.addresses(), key, verbose=2, attempts=attempts)
stdout_t, stderr_t = client.stream_command_output(stdout_s, stderr_s)
client.run_command(self.cmd_with(self.cmd, self.cmd), {},
capture_stdout=True, capture_stderr=True,
prefix_output_with_timestamp=False)
client.wait_for_command_termination(delay=0.2)
terminated, exit = client.command_result()
self.assertEqual(True, terminated)
if succeeds is not None:
self.assertEqual(succeeds, exit == 0)
if stdout_t is not None:
stdout_t.join(1.0)
self.assertEqual(False, stdout_t.is_alive())
if stderr_t is not None:
stderr_t.join(1.0)
self.assertEqual(False, stderr_t.is_alive())
finally:
service.shutdown()
stdout = stdout.getvalue()
stderr = stderr.getvalue()
# we are likely to loose some lines, so output is hard to evaluate
if succeeds:
self.assertGreaterEqual(len(stdout), 1024)
self.assertGreater(len(stdout.splitlines()), 10)
self.assertTrue(stdout_s.raised)
self.assertGreaterEqual(len(stderr), 1024)
self.assertGreater(len(stderr.splitlines()), 10)
self.assertTrue(stderr_s.raised)
# assert stdout and stderr similarity (how many lines both have in common)
stdout = re.sub('\[0\]<stdout>:', '', stdout, flags=re.MULTILINE)
stderr = re.sub('\[0\]<stderr>:', '', stderr, flags=re.MULTILINE)
stdout_set = set(stdout.splitlines())
stderr_set = set(stderr.splitlines())
intersect = stdout_set.intersection(stderr_set)
self.assertGreater(len(intersect) / min(len(stdout_set), len(stderr_set)), 0.90)
else:
# we might have retrieved data only for one of stdout and stderr
# so we expect some data for at least one of them
self.assertGreaterEqual(len(stdout) + len(stderr), 1024)
self.assertGreater(len(stdout.splitlines()) + len(stderr.splitlines()), 10)
self.assertTrue(stdout_s.raised or stderr_s.raised)
| 2.078125 | 2 |
anchore_engine/services/policy_engine/engine/policy/gates/conditions.py | Talanor/anchore-engine | 0 | 12794833 | from collections import namedtuple
from anchore_engine.services.policy_engine.engine.policy.params import InputValidator
from anchore_engine.services.policy_engine.engine.policy.gate import Gate, GateMeta, BaseTrigger
class AttributeListValidator(InputValidator):
def __init__(self, attrs):
self.attrs = attrs
def validation_criteria(self):
return 'In: {}'.format(','.join(self.attrs))
def __call__(self, *args, **kwargs):
if args and args[0]:
parts = map(lambda x: x.strip(), args[0].split(','))
return not bool(filter(lambda x: x not in self.attrs, parts))
else:
return False
CheckOperation = namedtuple('CheckOperation', ['requires_rvalue','eval_function'])
class CheckOperations(InputValidator):
"""
A very generic condition validator. Child classes can override the __conditions__ list for different values.
"""
# Map of tuples from an operator name to a tuple of (bool, function) where arg 0 is whether an rvalue is required and arg 1 is function taking 2 args to return evaluation
def __init__(self, ops):
"""
:param ops: a dict of string keys mapped to CheckOperation tuples
"""
self.ops = ops
def get_op(self, name):
return self.ops[name]
def validation_criteria(self):
return 'In: {}'.format(','.join(self.ops.keys()))
def __call__(self, *args, **kwargs):
if args and args[0]:
return args[0].strip() in self.ops.keys()
return False
from anchore_engine.services.policy_engine.engine.policy.gate import BaseTrigger, Gate
#
#
# class MetadataConditionGate(Gate):
# """
# A generic conditional check gate on specific data items in the image metadata.
# """
# __gate_name__ = 'attribute_condition'
#
# class ExistsTrigger(BaseTrigger):
# __trigger_name__ = 'exists'
# __params__ = {'key': str}
#
# class LikeTrigger(BaseTrigger):
# __trigger_name__ = 'like_match'
# __params__ = {
# 'key': str,
# 'pattern': str,
# }
#
# class EqualsTrigger(BaseTrigger):
# __trigger_name__ = 'equals'
# __params__ = {
# 'key': str,
# 'value': str
# }
#
# class NotExists(BaseTrigger):
# __trigger_name__ = 'not_exists'
# __params__ = {'key': str}
#
# @staticmethod
# def resolve_key(key, image_obj):
# """
# Resolves a text key to a specific attribute of an image and returns it.
# Examples:
# $image.dockerfile.from -> image.dockerfile_contents['from']
#
#
# :param key:
# :param image_obj:
# :return:
# """
# # Resolves a key to a specific image element and retrieves it from the image object
# key_components = key.split('.')
# if key_components[0] != '$image':
# raise ValueError('Invalid key format: {}. Must be $image.p1.p2.p3...pN')
# else:
# key_components.pop()
#
# obj = image_obj
# for k in key_components:
# obj = model.get_lookup(k, obj)
#
#
#
# # TODO: zhill - Just jotted down these notes for future work
#
# # Powerful, but need to ensure consistency, may need to add statement Ids to the language to facilitate
# # direct references here
# class BooleanOperatorGate(Gate):
# __gate_name__ = 'combiner'
#
# class AndTrigger(BaseTrigger):
# __trigger_name__ = 'and'
# __params__ = {
# 'gate_1': str,
# 'trigger_1': str,
# 'result_1': str,
# 'gate_2': str,
# 'trigger_2': str,
# 'result_2': str
# }
#
# class OrTrigger(BaseTrigger):
# __trigger_name__ = 'or'
# __params__ = {
# 'gate_1': str,
# 'trigger_1': str,
# 'result_1': str,
# 'gate_2': str,
# 'trigger_2': str,
# 'result_2': str
# }
#
# class XorTrigger(BaseTrigger):
# __trigger_name__ = 'xor'
# __params__ = {
# 'gate_1': str,
# 'trigger_1': str,
# 'result_1': str,
# 'gate_2': str,
# 'trigger_2': str,
# 'result_2': str
# }
#
# class NotTrigger(BaseTrigger):
# __trigger_name__ = 'not'
# __params__ = {
# 'gate_1': str,
# 'trigger_1': str,
# 'result_1': str
# }
#
#
#
| 2.46875 | 2 |
wyrdin/core/backend/generic.py | RichardLitt/wyrd-django-dev | 0 | 12794834 | <filename>wyrdin/core/backend/generic.py
#!/usr/bin/python3
#-*- coding: utf-8 -*-
# This code is PEP8-compliant. See http://www.python.org/dev/peps/pep-0008/.
"""
Wyrd In: Time tracker and task manager
CC-Share Alike 2012 © The Wyrd In team
https://github.com/WyrdIn
"""
class DBObject(object):
_next_id = 0
def __init__(self, id=None):
"""Creates a new database-enabled object.
Keyword arguments:
- id: an ID (a number) of the object, if a specific one is
required; if ID is supplied, it has to be non-negative
integer larger than any of IDs for this type of object
assigned so far
"""
cls = type(self) # the actual (most specific) class of self
if id is not None and id >= cls._next_id:
self._id = id
else:
self._id = cls._next_id
cls._next_id = self._id + 1
@property
def id(self):
return self._id
def short_repr(self):
"""Returns a short string which identifies the object and its type
within the set of objects created in this WyrdIn application.
"""
raise NotImplementedError(('{cls} does not implement the '
"required method `short_repr'.").format(
cls=type(self).__name__))
| 2.375 | 2 |
output/models/saxon_data/id/id011_xsd/__init__.py | tefra/xsdata-w3c-tests | 1 | 12794835 | <filename>output/models/saxon_data/id/id011_xsd/__init__.py
from output.models.saxon_data.id.id011_xsd.id011 import (
Doc,
Para,
)
__all__ = [
"Doc",
"Para",
]
| 1.21875 | 1 |
linked-lists/linked_list_test.py | jrandson/data-structures | 0 | 12794836 | <reponame>jrandson/data-structures<gh_stars>0
import unittest
from linked_list import LinkedList
class LinkListTest(unittest.TestCase):
def test_add_node(self):
ll = LinkedList()
self.assertEqual(True,ll.is_empty())
self.assertEqual('',ll.show_elements())
ll.add_node(10)
self.assertEqual(False,ll.is_empty())
self.assertEqual('10->',ll.show_elements())
ll.add_node(8)
ll.add_node(1)
self.assertEqual('1->8->10->',ll.show_elements())
def test_add_new_tail(self):
ll = LinkedList()
ll.add_node(10)
ll.add_node(8)
ll.add_new_tail(5)
self.assertEqual('8->10->5->',ll.show_elements())
ll.add_node(6)
self.assertEqual('6->8->10->5->',ll.show_elements())
def test_top(self):
ll = LinkedList()
self.assertEqual(None,ll.top())
ll.add_node(10)
ll.add_node(8)
ll.add_new_tail(5)
self.assertEqual(8,ll.top())
def test_pop(self):
ll = LinkedList()
self.assertEqual(None,ll.pop())
self.assertEqual('',ll.show_elements())
ll.add_node(10)
ll.add_node(8)
ll.add_new_tail(5)
self.assertEqual(8,ll.pop())
self.assertEqual('10->5->',ll.show_elements())
def test_remove_node(self):
ll = LinkedList()
ll.add_node(10)
ll.add_node(8)
ll.add_node(12)
ll.add_new_tail(5)
self.assertEqual('12->8->10->5->',ll.show_elements())
ll.remove_node(10)
self.assertEqual('12->8->5->',ll.show_elements())
ll.remove_node(12)
self.assertEqual('8->5->',ll.show_elements())
ll.remove_node(5)
self.assertEqual('8->',ll.show_elements())
def test_remove_repeated_with_buffer(self):
ll = LinkedList()
ll.add_node(10)
ll.add_node(8)
ll.add_node(12)
ll.add_node(8)
self.assertEqual('8->12->8->10->',ll.show_elements())
ll.remove_repeated_with_buffer()
self.assertEqual('12->8->10->',ll.show_elements())
ll.add_node(10)
ll.add_node(12)
ll.add_node(3)
ll.remove_repeated_with_buffer()
self.assertEqual('3->12->8->10->',ll.show_elements())
def test_remove_repeated_wihtout_buffer(self):
ll = LinkedList()
ll.add_node(10)
ll.add_node(8)
ll.add_node(12)
ll.add_node(8)
self.assertEqual('8->12->8->10->',ll.show_elements())
ll.remove_repeated_without_buffer()
self.assertEqual('12->8->10->',ll.show_elements())
ll.add_node(10)
ll.add_node(12)
ll.add_node(3)
ll.remove_repeated_without_buffer()
self.assertEqual('3->12->8->10->',ll.show_elements())
def test_remove_repeated_wihtout_buffer(self):
ll = LinkedList()
ll.add_node(10)
ll.add_node(8)
ll.add_node(12)
ll.add_node(3)
ll.add_node(5)
ll.add_node(7)
self.assertEqual([3,12,8,10],ll.find_last_n(3))
self.assertEqual([7,5,3,12,8,10],ll.find_last_n(1))
self.assertEqual([10],ll.find_last_n(6))
def test_soma(self):
l1 = populate_liste(513)
self.assertEqual('3->1->5->',l1.show_elements())
self.assertEqual(513,get_number_from_list(l1))
l2 = populate_liste(295)
self.assertEqual('5->9->2->',l2.show_elements())
self.assertEqual(295,get_number_from_list(l2))
self.assertEqual('8->0->8->',soma_lista(l1,l2).show_elements())
def test_add_sort(self):
ll = LinkedList()
self.assertEqual('3->',ll.add_sort(3))
self.assertEqual('5->3->',ll.add_sort(5))
self.assertEqual('5->4->3->',ll.add_sort(4))
self.assertEqual('5->4->3->1->',ll.add_sort(1))
self.assertEqual('5->4->3->2->1->',ll.add_sort(2))
def test_add_sort2(self):
ll = LinkedList()
self.assertEqual('3->',ll.add_sort2(3))
self.assertEqual('3->5->',ll.add_sort2(5))
self.assertEqual('3->4->5->',ll.add_sort2(4))
self.assertEqual('1->3->4->5->',ll.add_sort2(1))
self.assertEqual('1->2->3->4->5->',ll.add_sort2(2))
def populate_liste(n):
ll = LinkedList()
for i in str(n):
ll.add_node(i)
return ll
def get_number_from_list(lista):
n = ''
node = lista._head
while not node == None:
n = str(node._element) + n
node = node._next
return int(n)
def soma_lista(l1, l2):
n1 = get_number_from_list(l1)
n2 = get_number_from_list(l2)
n3 = n1 + n2
l3 = populate_liste(n3)
return l3
if __name__ == '__main__':
unittest.main()
| 3.90625 | 4 |
cut_twist_process/cut_part.py | ericosmic/2019-CCF-BDCI-OCR-MCZJ-OCR-IdentificationIDElement | 886 | 12794837 | # -*- coding: utf-8 -*-
# @Time : 19-11-19 22:25
# @Author : <NAME>
# @Reference : None
# @File : cut_twist_join.py
# @IDE : PyCharm Community Edition
"""
将身份证正反面从原始图片中切分出来。
需要的参数有:
1.图片所在路径。
输出结果为:
切分后的身份证正反面图片。
"""
import os
import cv2
import numpy as np
def point_judge(center, bbox):
"""
用于将矩形框的边界按顺序排列
:param center: 矩形中心的坐标[x, y]
:param bbox: 矩形顶点坐标[[x1, y1], [x2, y2], [x3, y3], [x4, y4]]
:return: 矩形顶点坐标,依次是 左下, 右下, 左上, 右上
"""
left = []
right = []
for i in range(4):
if bbox[i][0] > center[0]: # 只要是x坐标比中心点坐标大,一定是右边
right.append(bbox[i])
else:
left.append(bbox[i])
if right[0][1] > right[1][1]: # 如果y点坐标大,则是右上
right_down = right[1]
right_up = right[0]
else:
right_down = right[0]
right_up = right[1]
if left[0][1] > left[1][1]: # 如果y点坐标大,则是左上
left_down = left[1]
left_up = left[0]
else:
left_down = left[0]
left_up = left[1]
return left_down, right_down, left_up, right_up
def gray_and_fliter(img, image_name='1.jpg', save_path='./'): # 转为灰度图并滤波,后面两个参数调试用
"""
将图片灰度化,并滤波
:param img: 输入RGB图片
:param image_name: 输入图片名称,测试时使用
:param save_path: 滤波结果保存路径,测试时使用
:return: 灰度化、滤波后图片
"""
# img = cv2.imread(image_path + image_name) # 读取图片
img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) # 转换为灰度图片
# cv2.imwrite(os.path.join(save_path, image_name + '_gray.jpg'), img_gray) # 保存,方便查看
img_blurred = cv2.filter2D(img_gray, -1,
kernel=np.array([[0, -1, 0], [-1, 5, -1], [0, -1, 0]], np.float32)) # 对图像进行滤波,是锐化操作
img_blurred = cv2.filter2D(img_blurred, -1, kernel=np.array([[0, -1, 0], [-1, 5, -1], [0, -1, 0]], np.float32))
# cv2.imwrite(os.path.join(save_path, img_name + '_blurred.jpg'), img_blurred) # 锐化, 这里的卷积核可以更改
return img_blurred
def gradient_and_binary(img_blurred, image_name='1.jpg', save_path='./'): # 将灰度图二值化,后面两个参数调试用
"""
求取梯度,二值化
:param img_blurred: 滤波后的图片
:param image_name: 图片名,测试用
:param save_path: 保存路径,测试用
:return: 二值化后的图片
"""
gradX = cv2.Sobel(img_blurred, ddepth=cv2.CV_32F, dx=1, dy=0)
gradY = cv2.Sobel(img_blurred, ddepth=cv2.CV_32F, dx=0, dy=1)
img_gradient = cv2.subtract(gradX, gradY)
img_gradient = cv2.convertScaleAbs(img_gradient) # sobel算子,计算梯度, 也可以用canny算子替代
# 这里改进成自适应阈值,貌似没用
img_thresh = cv2.adaptiveThreshold(img_gradient, 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY, 3, -3)
# cv2.imwrite(os.path.join(save_path, img_name + '_binary.jpg'), img_thresh) # 二值化 阈值未调整好
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (5, 5))
img_closed = cv2.morphologyEx(img_thresh, cv2.MORPH_CLOSE, kernel)
img_closed = cv2.morphologyEx(img_closed, cv2.MORPH_OPEN, kernel)
img_closed = cv2.erode(img_closed, None, iterations=9)
img_closed = cv2.dilate(img_closed, None, iterations=9) # 腐蚀膨胀
# 这里调整了kernel大小(减小),腐蚀膨胀次数后(增大),出错的概率大幅减小
return img_closed
def find_bbox(img, img_closed): # 寻找身份证正反面区域
"""
根据二值化结果判定并裁剪出身份证正反面区域
:param img: 原始RGB图片
:param img_closed: 二值化后的图片
:return: 身份证正反面区域
"""
(contours, _) = cv2.findContours(img_closed.copy(), cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE) # 求出框的个数
# 这里opencv如果版本不对(4.0或以上)会报错,只需把(contours, _)改成 (_, contours, _)
contours = sorted(contours, key=cv2.contourArea, reverse=True) # 按照面积大小排序
countours_res = []
for i in range(0, len(contours)):
area = cv2.contourArea(contours[i]) # 计算面积
if (area <= 0.4 * img.shape[0] * img.shape[1]) and (area >= 0.05 * img.shape[0] * img.shape[1]):
# 人为设定,身份证正反面框的大小不会超过整张图片大小的0.4,不会小于0.05(这个参数随便设置的)
rect = cv2.minAreaRect(contours[i]) # 最小外接矩,返回值有中心点坐标,矩形宽高,倾斜角度三个参数
box = cv2.boxPoints(rect)
left_down, right_down, left_up, right_up = point_judge([int(rect[0][0]), int(rect[0][1])], box)
src = np.float32([left_down, right_down, left_up, right_up]) # 这里注意必须对应
dst = np.float32([[0, 0], [int(max(rect[1][0], rect[1][1])), 0], [0, int(min(rect[1][0], rect[1][1]))],
[int(max(rect[1][0], rect[1][1])),
int(min(rect[1][0], rect[1][1]))]]) # rect中的宽高不清楚是个怎么机制,但是对于身份证,肯定是宽大于高,因此加个判定
m = cv2.getPerspectiveTransform(src, dst) # 得到投影变换矩阵
result = cv2.warpPerspective(img, m, (int(max(rect[1][0], rect[1][1])), int(min(rect[1][0], rect[1][1]))),
flags=cv2.INTER_CUBIC) # 投影变换
countours_res.append(result)
return countours_res # 返回身份证区域
def find_cut_line(img_closed_original): # 对于正反面粘连情况的处理,求取最小点作为中线
"""
根据规则,强行将粘连的区域切分
:param img_closed_original: 二值化图片
:return: 处理后的二值化图片
"""
img_closed = img_closed_original.copy()
img_closed = img_closed // 250
#print(img_closed.shape)
width_sum = img_closed.sum(axis=1) # 沿宽度方向求和,统计宽度方向白点个数
start_region_flag = 0
start_region_index = 0 # 身份证起始点高度值
end_region_index = 0 # 身份证结束点高度值
for i in range(img_closed_original.shape[0]): # 1000是原始图片高度值,当然, 这里也可以用 img_closed_original.shape[0]替代
if start_region_flag == 0 and width_sum[i] > 330:
start_region_flag = 1
start_region_index = i # 判定第一个白点个数大于330的是身份证区域的起始点
if width_sum[i] > 330:
end_region_index = i # 只要白点个数大于330,便认为是身份证区域,更新结束点
# 身份证区域中白点最少的高度值,认为这是正反面的交点
# argsort函数中,只取width_sum中判定区域开始和结束的部分,因此结果要加上开始点的高度值
min_line_position = start_region_index + np.argsort(width_sum[start_region_index:end_region_index])[0]
img_closed_original[min_line_position][:] = 0
for i in range(1, 11): # 参数可变,分割10个点
temp_line_position = start_region_index + np.argsort(width_sum[start_region_index:end_region_index])[i]
if abs(temp_line_position - min_line_position) < 30: # 限定范围,在最小点距离【-30, 30】的区域内
img_closed_original[temp_line_position][:] = 0 # 强制变为0
return img_closed_original
def cut_part_img(img, cut_percent):
"""
# 从宽度和高度两个方向,裁剪身份证边缘
:param img: 身份证区域
:param cut_percent: 裁剪的比例
:return: 裁剪后的身份证区域
"""
height, width, _ = img.shape
height_num = int(height * cut_percent) # 需要裁剪的高度值
h_start = 0 + height_num // 2 # 左右等比例切分
h_end = height - height_num // 2 - 1
width_num = int(width * cut_percent) # 需要裁剪的宽度值
w_start = 0 + width_num // 2
w_end = width - width_num // 2 - 1
return img[h_start:h_end, w_start:w_end] # 返回裁剪后的图片
def preprocess_cut_one_img(img_path, img_name, save_path='./save_imgs/', problem_path='./problem_save/'): # 处理一张图片
"""
裁剪出一张图片中的身份证正反面区域
:param img_path: 图片所在路径
:param img_name: 图片名称
:param save_path: 结果保存路径 测试用
:param problem_path: 出错图片中间结果保存 测试用
:return: 身份证正反面图片
"""
img_path_name = os.path.join(img_path, img_name)
if not os.path.exists(img_path_name): # 判断图片是否存在
print('img {name} is not exits'.format(name=img_path_name))
return 1, [] # 图片不存在,直接返回,报错加一
img = cv2.imread(img_path_name) # 读取图片
img_blurred = gray_and_fliter(img, img_name) # 灰度化并滤波
img_t = cv2.filter2D(img, -1, kernel=np.array([[0, -1, 0], [-1, 5, -1], [0, -1, 0]], np.float32))
# 对图像进行锐化
img_binary = gradient_and_binary(img_blurred) # 二值化
res_bbox = find_bbox(img_t, img_binary) # 切分正反面
if len(res_bbox) != 2: # 异常处理
print('Error happened when cut img {name}, try exception cut program '.format(name=img_path_name))
# cv2.imwrite(os.path.join(problem_path, img_name.split('.')[0] + '_blurred.jpg'), img_blurred)
# cv2.imwrite(os.path.join(problem_path, img_name.split('.')[0] + '_binary.jpg'), img_binary)
# cv2.imwrite(os.path.join(problem_path, img_name), img) # 调试用,保存中间处理结果
img_binary = find_cut_line(img_binary) # 强制分割正反面
res_bbox = find_bbox(img_t, img_binary)
if len(res_bbox) != 2: # 纠正失败
print('Failed to cut img {name}, exception program end'.format(name=img_path_name))
return 1, None
else: # 纠正成功
print('Correctly cut img {name}, exception program end'.format(name=img_path_name))
return 0, res_bbox
else: # 裁剪过程正常
# cv2.imwrite(os.path.join(save_path, img_name.split('.')[0] + '_0.jpg'), cut_part_img(res_bbox[0], 0.0))
# cv2.imwrite(os.path.join(save_path, img_name.split('.')[0] + '_1.jpg'), cut_part_img(res_bbox[1], 0.0))
# cv2.imwrite(os.path.join(save_path, img_name.split('.')[0] + '_original.jpg'), img)
return 0, res_bbox
def process_img(img_path, save_path, problem_path):
"""
切分一个目录下的所有图片
:param img_path: 图片所在路径
:param save_path: 结果保存路径
:param problem_path: 问题图片保存路径
:return: None
"""
if not os.path.exists(img_path): # 判断图片路径是否存在
print('img path {name} is not exits, program break.'.format(name=img_path))
return
if not os.path.exists(save_path): # 保存路径不存在,则创建路径
os.makedirs(save_path)
if not os.path.exists(problem_path): # 保存路径不存在,则创建路径
os.makedirs(problem_path)
img_names = os.listdir(img_path)
error_count = 0
error_names = []
for img_name in img_names:
error_temp, res_bbox = preprocess_cut_one_img(img_path, img_name, save_path, problem_path)
error_count += error_temp
if error_temp == 0:
cv2.imwrite(os.path.join(save_path, img_name.split('.')[0] + '_0.jpg'), cut_part_img(res_bbox[0], 0.0))
cv2.imwrite(os.path.join(save_path, img_name.split('.')[0] + '_1.jpg'), cut_part_img(res_bbox[1], 0.0))
else:
error_names.append(img_name)
print('total error number is: ', error_count)
print('error images mame :')
for error_img_name in error_names:
print(error_img_name)
return
if __name__ == '__main__':
origin_img_path = './problem_imgs/'
cutted_save_path = './res_imgs/'
cut_problem_path = './temp_imgs/'
#process_img(img_path=origin_img_path, save_path=cutted_save_path, problem_path=cut_problem_path)
| 2.578125 | 3 |
bluetooth_write.py | DethCount/usb-gamepad | 0 | 12794838 | import time
import concurrent
import asyncio
import bleak
async def main():
loop = asyncio.new_event_loop()
client = bleak.BleakClient('D8:A9:8B:7E:1E:D2')
is_connected = await client.connect()
print(is_connected)
response = await client.write_gatt_char('0000ffe1-0000-1000-8000-00805f9b34fb', b'MOVE X 0.000000')
print(response)
if __name__ == "__main__":
asyncio.run(main())
| 2.5 | 2 |
finance_ml/sampling/utils.py | BTETON/finance_ml | 446 | 12794839 | <gh_stars>100-1000
import pandas as pd
def get_ind_matrix(bar_idx, t1):
ind_m = pd.DataFrame(0, index=bar_idx,
columns=range(t1.shape[0]))
for i, (t0_, t1_) in enumerate(t1.iteritems()):
ind_m.loc[t0_:t1_, i] = 1
return ind_m
def get_avg_uniq(ind_m, c=None):
if c is None:
c = ind_m.sum(axis=1)
ind_m = ind_m.loc[c > 0]
c = c.loc[c > 0]
u = ind_m.div(c, axis=0)
avg_u = u[u > 0].mean()
avg_u = avg_u.fillna(0)
return avg_u
| 2.515625 | 3 |
setup.py | aozhiwei/srap | 0 | 12794840 | <reponame>aozhiwei/srap
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import with_statement
import sys
if sys.version_info < (2, 5):
sys.exit('Python 2.5 or greater is required.')
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
import srap
with open('LICENSE') as fp:
license = fp.read()
setup(name = 'srap',
version = srap.__version__,
description = 'Simple reflect annotation protocol lib.',
long_description = '',
author = '<NAME>',
author_email = '<EMAIL>',
maintainer = '<NAME>',
maintainer_email = '<EMAIL>',
url = 'https://github.com/aozhiwei/srap',
packages = ['srap'],
license = license,
platforms = ['any'],
classifiers = []
)
| 1.84375 | 2 |
docs/source/guide/examples/test_future.py | enthought/traits-futures | 10 | 12794841 | <filename>docs/source/guide/examples/test_future.py
# (C) Copyright 2018-2021 Enthought, Inc., Austin, TX
# All rights reserved.
#
# This software is provided without warranty under the terms of the BSD
# license included in LICENSE.txt and may be redistributed only under
# the conditions described in the aforementioned license. The license
# is also available online at http://www.enthought.com/licenses/BSD.txt
#
# Thanks for using Enthought open source!
"""
Example of testing a simple future using the GuiTestAssistant.
"""
import unittest
from pyface.toolkit import toolkit_object
from traits_futures.api import submit_call, TraitsExecutor
#: Maximum timeout for blocking calls, in seconds. A successful test should
#: never hit this timeout - it's there to prevent a failing test from hanging
#: forever and blocking the rest of the test suite.
SAFETY_TIMEOUT = 5.0
#: Note that the GuiTestAssistant is currently only available for Qt, not
#: for wxPython. To run this unit test, you'll need PyQt or PySide 2 installed.
GuiTestAssistant = toolkit_object("util.gui_test_assistant:GuiTestAssistant")
class TestMyFuture(GuiTestAssistant, unittest.TestCase):
def setUp(self):
GuiTestAssistant.setUp(self)
self.traits_executor = TraitsExecutor()
def tearDown(self):
# Request the executor to stop, and wait for that stop to complete.
self.traits_executor.shutdown(timeout=SAFETY_TIMEOUT)
GuiTestAssistant.tearDown(self)
def test_my_future(self):
future = submit_call(self.traits_executor, pow, 3, 5)
# Wait for the future to complete.
self.assertEventuallyTrueInGui(
lambda: future.done, timeout=SAFETY_TIMEOUT
)
self.assertEqual(future.result, 243)
if __name__ == "__main__":
unittest.main()
| 2.328125 | 2 |
main/validating-credit-card-number/validating-credit-card-number.py | EliahKagan/old-practice-snapshot | 0 | 12794842 | #!/usr/bin/env python3
import re
CCNUM = re.compile(r'(?!.*(\d)(?:\D?\1){3})[456]\d{3}(-?)(?:\d{4}\2){2}\d{4}')
for _ in range(int(input())):
print('Valid' if CCNUM.fullmatch(input().strip()) else 'Invalid')
| 3.234375 | 3 |
tests/obj/business_test.py | ruchir594/messenger-bot-yelp-aws | 1 | 12794843 | <filename>tests/obj/business_test.py
# -*- coding: UTF-8 -*-
import io
import json
from tests.testing import resource_filename
from yelp.obj.business import Business
from yelp.obj.business import Category
def test_init_business():
with io.open(resource_filename('json/business_response.json')) as biz:
response = json.load(biz)
business = Business(response)
assert business.id == response['id']
def test_business_category_is_tuple():
with io.open(resource_filename('json/business_response.json')) as biz:
response = json.load(biz)
business = Business(response)
assert type(business.categories[0]) is Category
assert business.categories[0].name == "Indian"
assert business.categories[0].alias == "indpak"
| 2.53125 | 3 |
src/palette.py | Guillaume227/supercoco | 2 | 12794844 | <reponame>Guillaume227/supercoco<gh_stars>1-10
from .elems import Elements, Categories
from . import elems
from . import media
import os
import pygame
from . import menu
_Selection = None
RepertoiresVrac = ['decors Vrac', 'Cap Vrac']
def DecorsVrac(repertoire):
vrac_dir = os.path.join(media.MEDIA_REP, repertoire)
return [item for item in os.listdir(vrac_dir) if item.split('.')[-1] in ['png', 'gif', 'jpeg', 'jpg']]
def GetElements():
Elems = Elements.copy()
for Repertoire in RepertoiresVrac:
Elems[Repertoire] = DecorsVrac(Repertoire)
return Elems
ListElements = GetElements()
class Palette(menu.ElemInterface):
def __init__(self, **kwargs):
menu.ElemInterface.__init__(self, pos=(0, 0), alpha_fond=150, **kwargs)
self.IndexSelection = [0, 0]
self.dim_vignette = 32, 32
self.dim_ecran = pygame.display.get_surface().get_size()
self.IndexType = 0
self.marge_HG = 0, 16
self.nomb_vignettes = [(self.dim_ecran[i] - self.marge_HG[i]) // self.dim_vignette[i] for i in (0, 1)]
coin_HG = [(self.dim_ecran[i] - self.nomb_vignettes[i] * self.dim_vignette[i]) // 2 for i in (0, 1)]
self.coin_HG = [max(self.marge_HG[i], coin_HG[i]) for i in (0, 1)]
self.grilles = [[[None for _ in range(self.nomb_vignettes[1])] for _ in range(self.nomb_vignettes[0])] for _
in Categories]
self.emplit_grille()
def emplit_grille(self):
for cat_index, categorie in enumerate(Categories):
for i, elem in enumerate(ListElements[categorie]):
li = i % self.nomb_vignettes[1]
col = 1 + i // self.nomb_vignettes[1]
if isinstance(elem, str):
elem_name = elem
image = os.path.join(media.MEDIA_REP, categorie, elem)
else:
elem_name = elem.__name__
if hasattr(elem, 'nomImages'):
image = elem.nomImages[0]
else:
image = None
if image:
# Retaille
image_obj = pygame.transform.scale(media.charge_image(image), self.dim_vignette)
else:
image_obj = None
self.grilles[cat_index][col][li] = elem, image_obj, elem_name
for cat_index, _categorie in enumerate(Categories):
for CatIndex2, _categorie in enumerate(Categories):
self.grilles[cat_index][0][CatIndex2] = self.grilles[CatIndex2][1][0]
def index_pour_pos(self, pos):
""" index col, index ligne"""
return [(pos[i] - self.coin_HG[i]) // self.dim_vignette[i] for i in (0, 1)]
def pos_pour_index(self, index):
""" index col, index ligne"""
return [index[i] * self.dim_vignette[i] + self.coin_HG[i] for i in (0, 1)]
@property
def valeur(self):
val = self.grilles[self.IndexType][self.IndexSelection[0]][self.IndexSelection[1]]
if val is not None:
return val[0]
@valeur.setter
def valeur(self, val):
pass
def affiche(self, surface):
for colIndex, col in enumerate(self.grilles[self.IndexType]):
for liIndex, elem in enumerate(col):
if elem:
elem, image, _nom = elem
if image:
pos = self.pos_pour_index((colIndex, liIndex))
surface.blit(image, pos)
etiquette = Categories[self.IndexType]
if self.IndexSelection[0] != 0:
val = self.grilles[self.IndexType][self.IndexSelection[0]][self.IndexSelection[1]]
if val:
etiquette += ' : ' + val[2]
# Marque la selection d'une ombre
Rect = pygame.Rect(self.pos_pour_index(self.IndexSelection), self.dim_vignette)
pygame.draw.rect(surface, pygame.Color(0, 255, 0, 100), Rect, 1)
self.affiche_ligne(surface, etiquette)
def mettre_a_jour(self, e):
if e.type == pygame.MOUSEMOTION:
pos = pygame.mouse.get_pos()
self.IndexSelection = self.index_pour_pos(pos)
elif e.type == pygame.KEYDOWN:
if e.key == pygame.K_UP:
self.IndexSelection[1] -= 1
elif e.key == pygame.K_DOWN:
self.IndexSelection[1] += 1
elif e.key == pygame.K_LEFT:
self.IndexSelection[0] -= 1
elif e.key == pygame.K_RIGHT:
self.IndexSelection[0] += 1
if self.IndexSelection[0] == 0:
cat_index = min(max(0, self.IndexSelection[1]), len(Categories) - 1)
if self.IndexType != cat_index:
self.IndexType = cat_index
self.IndexSelection[1] = self.IndexType
else:
self.IndexSelection[1] %= len(ListElements[Categories[self.IndexType]])
for i in 0, 1:
self.IndexSelection[i] %= self.nomb_vignettes[i]
def Selecte():
selection = Palette().boucle()
pygame.event.clear()
if selection:
if isinstance(selection, str):
sel = elems.Dessinable(pos=None, images=[selection])
else:
sel = selection(pos=None)
sel.efface()
return sel
def EditFields(item):
return sorted([AttrName for AttrName in dir(item) if AttrName.endswith('_') and not AttrName.startswith('_')])
class EditeurElem(menu.EditeurElem):
def alafin(self):
# Verifie que les photos existent (pas de coquille dans le nom)
for AttrName, champ in self.champs:
if AttrName == 'photos_':
for val in champ.valeur:
try:
media.charge_image('photos/' + val)
except:
print('manque la photo', val)
menu.EditeurElem.alafin(self)
def Editor(*items):
if EditFields(items[0]):
choix_champs = dict(nomJoueur_=['coco', 'mario'], surprise_=[None] + ListElements['Surprises'])
editeur = EditeurElem(items, fonte_h=10, choixPourChamps=choix_champs, filtre_=True)
editeur.boucle()
pygame.event.clear()
return editeur.modifie
| 2.40625 | 2 |
m5-joyc-mp/main.py | jessevl/m5-roverc-joyc-mp | 0 | 12794845 | from m5stack import *
from m5ui import *
import espnow
import wifiCfg
import hat
joy_pos = None
paired = False
addr = None
data = None
setScreenColor(0x000000)
axp.setLDO2Volt(2.8)
hat_joyc0 = hat.get(hat.JOYC)
label0 = M5TextBox(22, 48, "Text", lcd.FONT_Default, 0xFFFFFF, rotate=0)
label1 = M5TextBox(22, 62, "Text", lcd.FONT_Default, 0xFFFFFF, rotate=0)
label2 = M5TextBox(22, 76, "Text", lcd.FONT_Default, 0xFFFFFF, rotate=0)
label3 = M5TextBox(22, 90, "Text", lcd.FONT_Default, 0xFFFFFF, rotate=0)
label4 = M5TextBox(22, 104, "Unpaired", lcd.FONT_Default, 0xFFFFFF, rotate=0)
titlebar = M5Title(title="text", x=3, fgcolor=0xFFFFFF, bgcolor=0x5b5b5b)
def main():
hat_joyc0.SetLedColor(0x3232ff)
wifiCfg.wlan_ap.active(True)
wifiCfg.wlan_sta.active(True)
espnow.init()
espnow.recv_cb(receive_msg)
timerSch.run('UpdatePosition', 10, 0x00)
timerSch.run('UpdateBattery', 1000, 0x00)
@timerSch.event('UpdatePosition')
def tUpdatePosition():
global joy_pos
joy_pos = [hat_joyc0.GetX(0), hat_joyc0.GetY(0), hat_joyc0.GetX(1), hat_joyc0.GetY(1)]
label0.setText(str(joy_pos[0]))
label1.setText(str(joy_pos[1]))
label2.setText(str(joy_pos[2]))
label3.setText(str(joy_pos[3]))
if paired == True:
#TODO: Add msg type code, and check at receiver.
espnow.send(id=1, data=bytes(joy_pos))
pass
@timerSch.event('UpdateBattery')
def tUpdateBattery():
titlebar.setTitle(str("%.1fv %.0fma"%(float(axp.getBatVoltage()), float(axp.getBatCurrent()))))
pass
def receive_msg(_):
global addr, data, paired
addr, _, data = espnow.recv_data(encoder='str')
label4.setText(str(data))
if paired == False:
#TODO: check if is this a mac address?
espnow.add_peer(str(data), id=1)
espnow.send(id=1, data=str('connected'))
paired = True
label4.setText(str('paired'))
pass
else:
pass
main() | 2.328125 | 2 |
atlas/foundations_contrib/src/test/helpers/test_lazy_redis.py | DeepLearnI/atlas | 296 | 12794846 | <filename>atlas/foundations_contrib/src/test/helpers/test_lazy_redis.py
import unittest
from mock import Mock
from foundations_contrib.helpers.lazy_redis import LazyRedis
class TestLazyRedis(unittest.TestCase):
class MockObject(object):
def __init__(self):
self.value = 5
self.name = 'mock'
def setUp(self):
pass
def test_get_attr_returns_attribute_value(self):
lazy_redis = LazyRedis(self._callback)
self.assertEqual(lazy_redis.value, 5)
def test_get_attr_returns_attribute_name(self):
lazy_redis = LazyRedis(self._callback)
self.assertEqual(lazy_redis.name, 'mock')
def test_get_attr_raises_attribute_error(self):
lazy_redis = LazyRedis(self._callback)
with self.assertRaises(AttributeError) as context:
lazy_redis.redis
self.assertIn("'MockObject' object has no attribute 'redis'",
context.exception.args)
def test_get_attr_raises_attribute_error_different_attribute(self):
lazy_redis = LazyRedis(self._callback)
with self.assertRaises(AttributeError) as context:
lazy_redis.potato
self.assertIn("'MockObject' object has no attribute 'potato'",
context.exception.args)
def _callback(self):
return self.MockObject()
| 2.6875 | 3 |
src/automotive/application/panel/panel.py | philosophy912/automotive | 0 | 12794847 | # -*- coding:utf-8 -*-
# --------------------------------------------------------
# Copyright (C), 2016-2021, lizhe, All rights reserved
# --------------------------------------------------------
# @Name: gui.py.py
# @Author: lizhe
# @Created: 2021/12/15 - 21:24
# --------------------------------------------------------
import copy
from time import sleep
from tkinter import Frame, Button, NORMAL, DISABLED, W, BooleanVar, Checkbutton, Entry, Label, Tk, messagebox, \
HORIZONTAL, E , PhotoImage, LEFT
from tkinter.ttk import Combobox, Notebook, Separator
from typing import List, Dict, Any, Union, Optional
from automotive.logger.logger import logger
from automotive.core.can.can_service import CANService
from automotive.core.can.common.enums import CanBoxDeviceEnum, BaudRateEnum
from .reader import ConfigReader
from .reader import check_buttons, thread_buttons, comboxs, entries, buttons, receive_buttons
from ..common.constants import OPEN_DEVICE, CLOSE_DEVICE, CLEAR_STACK, DEFAULT_MESSAGE, BUS_LOST, \
MESSAGE_LOST, TEXT, ON, OFF, VALUES, ACTIONS, COMMON, CHECK_MSGS, CHECK_MESSAGE, SIGNAL_NAME, \
SIGNAL_VALUE, SIGNAL_VALUES, SEARCH_COUNT, EXACT_SEARCH, YES_OR_NO, CHECK_SIGNAL, CHECK_SIGNAL_NAME
from ...utils.common.enums import ExcelEnum
class TabFrame(Frame):
def __init__(self, master, can_service: CANService, config: Dict[str, Any], filter_nodes: List[str],
common_panel: bool = False, max_line_count: int = None):
super().__init__(master)
self.can_service = can_service
self.thread_pool = can_service.can_bus.thread_pool
self.__filter_nodes = filter_nodes
# 单选框按钮配置
self.__check_buttons = config[check_buttons] if config[check_buttons] else dict()
logger.debug(f"check_buttons = {self.__check_buttons}")
# 闪烁单选框按钮配置
self.__thread_buttons = config[thread_buttons] if config[thread_buttons] else dict()
logger.debug(f"thread_buttons = {self.__thread_buttons}")
# 下拉框按钮配置
self.__comboxs = config[comboxs] if config[comboxs] else dict()
logger.debug(f"comboxs = {self.__comboxs}")
# 输入框按钮配置
self.__entries = config[entries] if config[entries] else dict()
logger.debug(f"entries = {self.__entries}")
# 按钮框配置
self.__buttons = config[buttons] if config[buttons] else dict()
logger.debug(f"buttons = {self.__buttons}")
# 接收按钮框配置
self.__receive_buttons = config[receive_buttons] if config[receive_buttons] else dict()
logger.debug(f"receive_buttons = {self.__receive_buttons}")
# 每行能够容纳的数量
self.__max_line_count = max_line_count # 36
# 双行能够容纳的数量
self.__max_double_line_count = int(self.__max_line_count / 2)
# 设置标签(label)默认宽度
self.__label_width = 25
# 设置下拉框(comboxs)默认宽度
self.__comboxs_width = 20
# 设置单选按钮(checkBut)默认宽度
self.__checkBut_width = 25
# 设置多线程按钮框(thread_buttons)默认宽度
self.__thread_buttons_width = 20
# 设置按钮(button)默认宽度
self.__buttons_width = 24
# 设置输入框(entrie)默认宽度
self.__entrie_width = 10
# 输入框支持的事件列表
self.support_event_keys = "<Return>",
# 单选框值
self.check_button_bool_vars = dict()
# 闪烁单选框值
self.thread_button_bool_vars = dict()
# 按钮框对象字典
self.buttons = dict()
# 单选框对象字典
self.check_buttons = dict()
# 闪烁单选框对象字典
self.thread_buttons = dict()
# 下拉框对象字典
self.comboxs = dict()
# 输入框对象字典
self.entries = dict()
# 闪烁事件Task
self.thread_task = dict()
# 总线丢失按钮 =
# 开始的行列
self.row = 0
self.column = 0
# 布局显示
self.pack()
# todo 64*64 3 3比较合适
# self.open_image = PhotoImage(file=rf"D:\Download\Chrome\打开 (1).png").subsample(3, 3)
# 创建公共按钮
if common_panel:
self.create_common_widget()
# 创建单选按钮
self.create_check_buttons()
# 创建下拉按钮
self.create_comboxs()
# 创建输入框
self.create_entries()
# 创建事件单选按钮
self.create_thread_buttons()
# 创建按钮框(多线程)
self.create_buttons()
# 创建接收检查按钮
self.create_receive_buttons()
def create_common_widget(self):
"""
创建 打开设备、关闭设备、清除数据(清除接收到的数据)、发送默认消息(通过初始化的filter_node过滤消息), 总线丢失、丢失部分信号等按键
"""
# ********** 创建打开设备按钮 check_button **********
text_name, show_name = OPEN_DEVICE
# 创建Button对象
self.buttons[text_name] = Button(self, text=show_name,
command=lambda x=OPEN_DEVICE: self.__special_button_event(x))
# 布局button
self.buttons[text_name].grid(row=self.row, column=self.column, sticky=W)
self.buttons[text_name]["state"] = NORMAL
self.column += 1
# ********** 创建关闭设备按钮 **********
text_name, show_name = CLOSE_DEVICE
# 创建Button对象
self.buttons[text_name] = Button(self, text=show_name,
command=lambda x=CLOSE_DEVICE: self.__special_button_event(x))
# 布局button
self.buttons[text_name].grid(row=self.row, column=self.column, sticky=W)
self.buttons[text_name]["state"] = DISABLED
self.column += 1
# ********** 创建清除接收到的CAN信号按钮 **********
text_name, show_name = CLEAR_STACK
# 创建Button对象
self.buttons[text_name] = Button(self, text=show_name,
command=lambda x=CLEAR_STACK: self.__special_button_event(x))
# 布局button
self.buttons[text_name].grid(row=self.row, column=self.column, sticky=W)
self.column += 1
# ********** 创建一个发送默认消息的按钮 button **********
text_name, show_name = DEFAULT_MESSAGE
# 创建Button对象
self.buttons[text_name] = Button(self, text=show_name,
command=lambda x=DEFAULT_MESSAGE: self.__special_button_event(x))
# 布局button
self.buttons[text_name].grid(row=self.row, column=self.column, sticky=W)
self.column += 1
# ********** 创建一个总线丢失的按钮 button **********
text_name, show_name = BUS_LOST
# 创建CheckButton对象并放到check_buttons中方便调用
self.buttons[text_name] = Button(self, text=show_name,
command=lambda x=BUS_LOST: self.__special_button_event(x))
# 布局checkbutton
self.buttons[text_name].grid(row=self.row, column=self.column, sticky=W)
self.column += 1
# ********** 创建一个信号丢失的输入框 entry **********
text_name, show_name = MESSAGE_LOST
# 获取输入框的名称
Label(self, text=show_name).grid(row=self.row, column=self.column, sticky=W)
self.column += 1
self.entries[text_name] = Entry(self, width=10)
self.entries[text_name].grid(row=self.row, column=self.column, sticky=W, columnspan=2)
self.entries[text_name].bind(self.support_event_keys[0],
lambda x, y=("", text_name): self.__entry_event(x, y))
self.row += 1
Separator(self, orient=HORIZONTAL).grid(row=self.row, column=0, pady=5, sticky=E + W,
columnspan=self.__max_line_count)
self.row += 1
# ********** 创建信号检查部分 **********
self.__create_message_check()
# ********** 创建检测信号是否之前发送值部分 *******
self.row += 1
Separator(self, orient=HORIZONTAL).grid(row=self.row, column=0, pady=5, sticky=E + W,
columnspan=self.__max_line_count)
self.row += 1
self.__create_message_signal_check()
def __create_message_check(self):
"""
创建信号检查部分
帧ID, 信号名称 信号值, 出现次数 精确查找等选中,用于在主机操作后的检查
"""
self.column = 0
text_name, show_name = SIGNAL_NAME
Label(self, text=show_name).grid(row=self.row, column=self.column, sticky=W)
self.column += 1
self.entries[text_name] = Entry(self, width=20) # 等同于signal_name = Entry
self.entries[text_name].grid(row=self.row, column=self.column, sticky=W, columnspan=2)
self.column += 2
text_name, show_name = SIGNAL_VALUE
Label(self, text=show_name).grid(row=self.row, column=self.column, sticky=W)
self.column += 1
self.entries[text_name] = Entry(self, width=8) # 等同于signal_value = Entry
self.entries[text_name].grid(row=self.row, column=self.column, sticky=W)
self.column += 1
text_name, show_name = SEARCH_COUNT
Label(self, text=show_name).grid(row=self.row, column=self.column, sticky=W)
self.column += 1
self.entries[text_name] = Entry(self, width=8)
self.entries[text_name].grid(row=self.row, column=self.column, sticky=W)
self.column += 1
text_name, show_name = EXACT_SEARCH
Label(self, text=show_name).grid(row=self.row, column=self.column, sticky=W)
self.column += 1
# 创建下拉框
self.comboxs[text_name] = Combobox(self, values=YES_OR_NO, state="readonly", width=5)
# 设置下拉框初始值为第一个值
self.comboxs[text_name].current(0)
# 布局下拉框
self.comboxs[text_name].grid(row=self.row, column=self.column, sticky=W)
self.column += 1
text_name, show_name = CHECK_MESSAGE
# 创建Button对象
self.buttons[text_name] = Button(self, text=show_name,
command=lambda x=CHECK_MESSAGE: self.__special_button_event(x))
# 布局button
self.buttons[text_name].grid(row=self.row, column=self.column, sticky=W)
self.buttons[text_name]["state"] = NORMAL
def __create_message_signal_check(self):
"""
创建信号之前发送过那些值检测
帧ID,信号名称 精确查找的等选择
:return:
"""
self.column = 0
text_name, show_name = CHECK_SIGNAL_NAME
Label(self, text=show_name).grid(row=self.row, column=self.column, sticky=W)
self.column += 1
self.entries[text_name] = Entry(self, width=20) # 等同于signal_name = Entry
self.entries[text_name].grid(row=self.row, column=self.column, sticky=W, columnspan=2)
self.column += 2
text_name, show_name = SIGNAL_VALUES
Label(self, text=show_name).grid(row=self.row, column=self.column, sticky=W)
self.column += 1
self.entries[text_name] = Entry(self, width=40, state=DISABLED) # 等同于signal_value = Entry
self.entries[text_name].grid(row=self.row, column=self.column, sticky=W, columnspan=5)
self.column += 5
text_name, show_name = CHECK_SIGNAL
# 创建Button对象
self.buttons[text_name] = Button(self, text=show_name,
command=lambda x=CHECK_SIGNAL: self.__special_button_event(x))
# 布局button
self.buttons[text_name].grid(row=self.row, column=self.column, sticky=W)
self.buttons[text_name]["state"] = NORMAL
logger.debug(f"entries are {entries}")
def __special_button_event(self, button_type: tuple):
text_name, show_name = button_type
self.buttons[text_name]["state"] = DISABLED
try:
self.__special_actions(button_type)
except RuntimeError as e:
messagebox.showerror("出错了", f"【{e}】")
logger.error(e)
self.buttons[text_name]["state"] = NORMAL
def __special_actions(self, button_type: tuple):
open_text_name = OPEN_DEVICE[0]
close_text_name = CLOSE_DEVICE[0]
signal_name_text_name = SIGNAL_NAME[0]
check_signal_name_text_name = CHECK_SIGNAL_NAME[0]
signal_value_text_name = SIGNAL_VALUE[0]
signal_values_text_name = SIGNAL_VALUES[0]
search_count_text_name = SEARCH_COUNT[0]
exact_search_text_name = EXACT_SEARCH[0]
text_name, show_name = button_type
if button_type == DEFAULT_MESSAGE:
self.can_service.send_default_messages(filter_sender=self.__filter_nodes)
self.buttons[text_name]["state"] = NORMAL
elif button_type == BUS_LOST:
self.can_service.stop_transmit()
self.buttons[text_name]["state"] = NORMAL
elif button_type == OPEN_DEVICE:
self.can_service.open_can()
self.buttons[open_text_name]["state"] = DISABLED
self.buttons[close_text_name]["state"] = NORMAL
elif button_type == CLOSE_DEVICE:
self.can_service.close_can()
self.buttons[open_text_name]["state"] = NORMAL
self.buttons[close_text_name]["state"] = DISABLED
elif button_type == CLEAR_STACK:
self.can_service.clear_stack_data()
self.buttons[text_name]["state"] = NORMAL
elif button_type == CHECK_MESSAGE:
# 获取signal name
signal_name = self.entries[signal_name_text_name].get().strip()
# 获取signal value
signal_value_text = self.entries[signal_value_text_name].get()
if signal_value_text != "":
signal_value = int(signal_value_text)
# 获取次数
search_count_text = self.entries[search_count_text_name].get()
if search_count_text != "":
search_count = int(search_count_text)
else:
search_count = None
# 获取是否精确查找
index = self.comboxs[exact_search_text_name].current()
# 选中第一个则表示是True
exact_search = (index == 0)
stack = self.can_service.get_stack()
result = self.can_service.check_signal_value(stack=stack, signal_name =signal_name, expect_value=signal_value, count=search_count,
exact=exact_search)
show_message = "成功" if result else "失败"
exact_message = "精确" if exact_search else "不精确"
message = f"检查信号【{signal_name}】值为【{signal_value}】收到次数" \
f"为【{search_count}】,匹配方式是【{exact_message}】检查结果是【{show_message}】"
if result:
messagebox.showinfo(title=show_message, message=message)
else:
messagebox.showerror(title=show_message, message=message)
self.buttons[text_name]["state"] = NORMAL
else:
messagebox.showerror(title="失败", message="请填写需要查询的信号值")
self.buttons[text_name]["state"] = NORMAL
elif button_type == CHECK_SIGNAL:
# 获取signal name
signal_name = self.entries[check_signal_name_text_name].get().strip()
# 检测信号值是否已经发送过,并返回检测到的信号值 result
stack = self.can_service.get_stack()
result = self.can_service.get_receive_signal_values(stack, signal_name)
if len(result) > 0:
self.entries[signal_values_text_name]["state"] = NORMAL
# 将之前的值先清空
self.entries[signal_values_text_name].delete(0, "end")
# 将返回的值插入到输入框中
self.entries[signal_values_text_name].insert(0, result)
self.entries[signal_values_text_name]["state"] = DISABLED
else:
messagebox.showerror(title="失败", message=f"{signal_name} is not received")
self.buttons[text_name]["state"] = NORMAL
def create_check_buttons(self):
"""
创建选中框,适用于单选发送消息的情况
"""
# 创建下拉框
if self.row != 0:
self.row += 1
# 创建单选框
index = 0
for key, value in self.__check_buttons.items():
function_name = key
text_name = value[TEXT]
if index == 0:
self.column = 0
elif index % self.__max_line_count == 0:
self.row += 1
self.column = 0
else:
self.column += 1
# 创建bool对象接收值
self.check_button_bool_vars[function_name] = BooleanVar()
# 创建CheckButton对象并放到check_buttons中方便调用
button = Checkbutton(self, text=text_name,
variable=self.check_button_bool_vars[function_name],
onvalue=True,
offvalue=False,
command=lambda x=function_name: self.__check_button_event(x),
width=self.__checkBut_width,
anchor="w",wraplength=150,justify="left"
)
self.check_buttons[function_name] = button
logger.debug(f"row = {self.row}, column = {self.column}, index = {index}")
# 布局checkbutton
self.check_buttons[function_name].grid(row=self.row, column=self.column, sticky=W)
index += 1
self.row += 1
if len(self.__check_buttons) != 0:
Separator(self, orient=HORIZONTAL).grid(row=self.row, column=0, pady=5, sticky=E + W,
columnspan=self.__max_line_count)
self.row += 1
def __check_button_event(self, function_name):
values = self.__check_buttons[function_name]
text_name = values[TEXT]
on_actions = values[ON]
off_actions = values[OFF]
if self.check_button_bool_vars[function_name].get():
logger.debug(f"{text_name} ON")
self.__send_actions(on_actions)
else:
logger.debug(f"{text_name} OFF")
self.__send_actions(off_actions)
def create_comboxs(self):
"""
创建下拉框,选中的时候触发事件, 适用于枚举类型的选中框
"""
# 创建下拉框
if self.row != 0:
self.row += 1
index = 0
for key, value in self.__comboxs.items():
function_name = key
text_name = value[TEXT]
if index == 0:
self.column = 0
elif index % self.__max_double_line_count == 0:
self.row += 1
self.column = 0
else:
self.column += 1
# 获取下拉框的名称
values = list(value[VALUES].keys())
logger.debug(f"row = {self.row}, column = {self.column}, index = {index}")
# 创建Label框
Label(self, text=text_name, width=self.__label_width, anchor="w",wraplength=180,justify="left").grid(row=self.row, column=self.column,
sticky=W)
# 创建下拉框
self.comboxs[function_name] = Combobox(self, values=values, state="readonly", width=self.__comboxs_width)
# 设置下拉框初始值为第一个值
self.comboxs[function_name].current(0)
logger.debug(f"row = {self.row}, column = {self.column}, index = {index}")
# 布局下拉框
self.comboxs[function_name].grid(row=self.row, column=self.column + 1, sticky=W)
# 绑定下拉框事件
self.comboxs[function_name].bind("<<ComboboxSelected>>",
lambda x, y=("", function_name): self.__combox_event(x, y))
logger.debug(f"row = {self.row}, column = {self.column}")
self.column += 1
index += 1
self.row += 1
if len(self.__comboxs) != 0:
Separator(self, orient=HORIZONTAL).grid(row=self.row, column=0, pady=5, sticky=E + W,
columnspan=self.__max_line_count)
self.row += 1
def __combox_event(self, event, function_name):
"""
能够找到下拉框,并根据下拉框的内容进行判断
后续能够根据内容进行消息的发送
"""
function_name = function_name[1]
combox_param = self.__comboxs[function_name]
# 字典中定义的值列表
values = combox_param[VALUES]
text_name = combox_param[TEXT]
actual_values = list(values.keys())
# 当前选中的是第几个
combox_index = self.comboxs[function_name].current()
select_name = actual_values[combox_index]
actions = values[select_name]
logger.debug(f"设置{text_name}为{select_name}")
self.__send_actions(actions)
logger.trace(event)
def create_entries(self):
"""
创建输入框,适用于车速类型的线性信号值
"""
# 创建输入框
if self.row != 0:
self.row += 1
index = 0
for key, value in self.__entries.items():
function_name = key
text_name = value[TEXT]
if index == 0:
self.column = 0
elif index % self.__max_double_line_count == 0:
self.row += 1
self.column = 0
else:
self.column += 1
logger.debug(f"row = {self.row}, column = {self.column}, index = {index}")
# 获取输入框的名称
Label(self, text=text_name, width=self.__label_width, anchor="w",wraplength=180,justify="left").grid(row=self.row, column=self.column,
sticky=W)
# 创建输入框
self.entries[function_name] = Entry(self, width=self.__entrie_width)
logger.debug(f"row = {self.row}, column = {self.column}, index = {index}")
self.entries[function_name].grid(row=self.row, column=self.column + 1, sticky=W)
# 绑定事件
for event_key in self.support_event_keys:
self.entries[function_name].bind(event_key,
lambda x, y=("", function_name): self.__entry_event(x, y))
self.column += 1
index += 1
self.row += 1
if len(self.__entries) != 0:
Separator(self, orient=HORIZONTAL).grid(row=self.row, column=0, pady=5, sticky=E + W,
columnspan=self.__max_line_count)
self.row += 1
def __entry_event(self, event, params):
message_lost = MESSAGE_LOST[0]
logger.trace(event)
function_name = params[1]
if function_name == message_lost:
value = self.entries[function_name].get()
if value != "":
# 0x152,0x153, 0x154
value.replace(",", ",")
if "," in value:
values = value.split(",")
else:
# 0x164
values = [value]
for msg_id in values:
msg_id = msg_id.strip()
# 处理16进制
if "x" in msg_id or "X" in msg_id:
# 把16进制转换成10进制
message_id = int(msg_id, 16)
else:
message_id = int(f"0x{msg_id}", 16)
logger.debug(f"message_id = {message_id}")
try:
self.can_service.stop_transmit(message_id)
except RuntimeError as e:
logger.error(e)
messagebox.showerror("出错了", f"【{e}】")
else:
entry_value = self.entries[function_name].get()
params = self.__entries[function_name]
actions = params[ACTIONS]
text_name = params[TEXT]
logger.debug(f"设置{text_name}值为{entry_value}")
new_actions = copy.deepcopy(actions)
for action in new_actions:
if len(action) == 2:
msg_id, signals = action
for name, value in signals.items():
if value is None:
logger.debug(f"change {name} value to {entry_value}")
signals[name] = float(entry_value)
self.__send_actions(new_actions)
def create_thread_buttons(self):
"""
创建周期交替变化或者有时间延迟的信号发送, 如双闪灯
选中会发送,不选中则不发送
名字上以【】区别
"""
# 创建事件单选框
if self.row != 0:
self.row += 1
index = 0
for key, value in self.__thread_buttons.items():
function_name = key
text_name = value[TEXT]
if index == 0:
self.column = 0
elif index % self.__max_line_count == 0:
self.row += 1
self.column = 0
else:
self.column += 1
# 创建bool对象接收值
self.thread_button_bool_vars[text_name] = BooleanVar()
# 创建CheckButton对象并放到thread_buttons中方便调用
button = Checkbutton(self, text=f"【{text_name}】",
variable=self.thread_button_bool_vars[text_name],
onvalue=True,
offvalue=False,
command=lambda x=function_name: self.__thread_check_button_event(x),
width=self.__thread_buttons_width,
anchor="w",wraplength=180,justify="left"
)
self.thread_buttons[function_name] = button
logger.debug(f"row = {self.row}, column = {self.column}, index = {index}")
self.thread_buttons[function_name].grid(row=self.row, column=self.column, sticky=W)
index += 1
self.row += 1
if len(self.__thread_buttons) != 0:
Separator(self, orient=HORIZONTAL).grid(row=self.row, column=0, pady=5, sticky=E + W,
columnspan=self.__max_line_count)
self.row += 1
def __thread_check_button_event(self, function_name):
if function_name == DEFAULT_MESSAGE:
logger.info(f"send default messages and filter nodes {self.__filter_nodes}")
if self.thread_button_bool_vars[DEFAULT_MESSAGE].get():
self.thread_pool.submit(self.__special_actions, 1)
elif function_name == BUS_LOST:
logger.info("can bus lost")
if self.thread_button_bool_vars[BUS_LOST].get():
self.thread_pool.submit(self.__special_actions, 2)
else:
param = self.__thread_buttons[function_name]
text_name = param[TEXT]
actions = param[ACTIONS]
if self.thread_button_bool_vars[text_name].get():
if function_name not in self.thread_task:
task = self.thread_pool.submit(self.__thread_method, text_name, actions)
self.thread_task[function_name] = task
else:
if function_name in self.thread_task:
self.thread_task.pop(function_name)
def __thread_method(self, name, actions):
logger.debug(actions)
while self.thread_button_bool_vars[name].get():
self.__send_actions(actions)
def __send_actions(self, actions: List):
for action in actions:
if len(action) == 2:
msg_id, signals = action
logger.info(f"{hex(msg_id)} = {signals}")
try:
self.can_service.send_can_signal_message(msg_id, signals)
except RuntimeError as e:
logger.error(e)
messagebox.showerror("出错了", f"【{e}】")
elif len(action) == 1:
logger.debug(f"sleep {action} seconds")
sleep_time = float(action[0])
sleep(sleep_time)
else:
raise RuntimeError(f"value[{action}] incorrect")
def create_buttons(self):
"""
创建事件信号按钮,主要用于有时间延迟的部分,如长按或者短按方向盘按键, press release两种状态切换需要时间等待
"""
if self.row != 0:
self.row += 1
index = 0
for key, value in self.__buttons.items():
function_name = key
text_name = value[TEXT]
if index == 0:
self.column = 0
elif index % self.__max_line_count == 0:
self.row += 1
self.column = 0
else:
self.column += 1
# 创建CheckButton对象并放到thread_buttons中方便调用
self.buttons[function_name] = Button(self, text=text_name,
command=lambda x=function_name: self.__thread_button_event(x),
width=self.__buttons_width,wraplength=170,justify="left",anchor="w")
logger.debug(f"row = {self.row}, column = {self.column}, index = {index}")
self.buttons[function_name].grid(row=self.row, column=self.column, sticky=W)
index += 1
self.row += 1
if len(self.__buttons) != 0:
Separator(self, orient=HORIZONTAL).grid(row=self.row, column=0, pady=5, sticky=E + W,
columnspan=self.__max_line_count)
self.row += 1
def __thread_button_event(self, function_name):
try:
self.buttons[function_name]["state"] = DISABLED
param = self.__buttons[function_name]
text_name = param[TEXT]
logger.debug(f"press {text_name} button")
actions = param[ACTIONS]
self.thread_pool.submit(self.__send_actions, actions)
except RuntimeError as e:
logger.error(e)
messagebox.showerror("出错了", f"【{e}】")
finally:
self.buttons[function_name]["state"] = NORMAL
def create_receive_buttons(self):
"""
创建接收检查按钮, 模拟其他ECU接收
"""
if self.row != 0:
self.row += 1
index = 0
for key, value in self.__receive_buttons.items():
function_name = key
text_name = value[TEXT]
if index == 0:
self.column = 0
elif index % self.__max_line_count == 0:
self.row += 1
self.column = 0
else:
self.column += 1
# 创建CheckButton对象并放到thread_buttons中方便调用
logger.debug(f"add button {function_name} in buttons")
self.buttons[function_name] = Button(self, text=f"【{text_name}】",
command=lambda x=function_name: self.__receive_button_event(x))
logger.debug(f"row = {self.row}, column = {self.column}, index = {index}")
self.buttons[function_name].grid(row=self.row, column=self.column, sticky=W)
index += 1
self.row += 1
if len(self.__receive_buttons) != 0:
Separator(self, orient=HORIZONTAL).grid(row=self.row, column=0, pady=5, sticky=E + W,
columnspan=self.__max_line_count)
self.row += 1
def __receive_button_event(self, function_name):
self.buttons[function_name]["state"] = DISABLED
param = self.__receive_buttons[function_name]
text_name = param[TEXT]
logger.debug(f"press {text_name} button")
check_msgs = param[CHECK_MSGS]
msg_id, signal_name, signal_value, count, expect_value = check_msgs
try:
stack = self.can_service.get_stack()
result = self.can_service.check_signal_value(stack=stack, msg_id=msg_id, signal_name=signal_name, expect_value=signal_value, count=count, exact=expect_value)
show_message = "成功" if result else "失败"
exact_message = "精确" if expect_value else "不精确"
message = f"检查【{hex(msg_id)}】中信号【{signal_name}】值为【{signal_value}】收到次数" \
f"为【{count}】,匹配方式为【{exact_message}】的检查结果是【{show_message}】"
if result:
messagebox.showinfo(title=show_message, message=message)
else:
messagebox.showerror(title=show_message, message=message)
except RuntimeError as e:
logger.error(e)
messagebox.showerror(title="出错了", message=f"【{e}】")
finally:
self.can_service.clear_stack_data()
self.buttons[function_name]["state"] = NORMAL
class Gui(object):
def __init__(self, excel_file: str, dbc: str, can_box_device: Union[CanBoxDeviceEnum, str, None] = None,
baud_rate: Union[BaudRateEnum, int] = BaudRateEnum.HIGH,
data_rate: Union[BaudRateEnum, int] = BaudRateEnum.DATA,
channel_index: int = 1,
filter_nodes: Optional[List[str]] = None, can_fd: bool = False,
excel_type: ExcelEnum = ExcelEnum.OPENPYXL,
max_workers: int = 500,
max_line_count: int = 8):
"""
:param excel_file: Excel文件路径 (必填项)
:param dbc: 项目dbc文件路径 (必填项)
:param can_box_device:(选填)
:param filter_nodes:发送默认信号筛选器(默认值)
:param can_fd:(选填)
:param excel_type: (选填)
:param max_workers:默认值就行(选填)
:param max_line_count:面板一行中显示的最大数量,默认值为8,如果显示不全可以自己修改
"""
self.tk = Tk()
self.tk.title("CAN面板")
# 初始化 CANService
self.can_service = CANService(dbc, can_box_device=can_box_device, baud_rate=baud_rate, data_rate=data_rate,
channel_index=channel_index, can_fd=can_fd, max_workers=max_workers)
# 默认消息发送要过滤的节点
self.__filter_nodes = filter_nodes
# 获取按钮
service = ConfigReader(can_service=self.can_service,type_=excel_type)
tab_configs = dict()
tab_configs[COMMON] = {check_buttons: {}, thread_buttons: {}, comboxs: {},
entries: {}, buttons: {}, receive_buttons: {}}
config = service.read_from_file(excel_file)
tab_configs.update(config)
self.tab_control = Notebook(self.tk)
# tab选项框对象字典
self.tabs = []
for key, value in tab_configs.items():
logger.info(f"handle tab {key}")
if key == COMMON:
common_panel = True
else:
common_panel = False
tab = TabFrame(self.tk, can_service=self.can_service, filter_nodes=filter_nodes,
config=value, common_panel=common_panel, max_line_count=max_line_count)
self.tab_control.add(tab, text=key)
self.tabs.append(tab)
self.tab_control.pack(expand=1, fill="both")
# 第一个tab
self.tab_control.select(self.tabs[0])
self.tk.protocol('WM_DELETE_WINDOW', self.exit_root)
self.tk.mainloop()
def exit_root(self):
self.can_service.close_can()
self.tk.destroy()
| 1.898438 | 2 |
subs2cia/subzipper.py | mdVNwyRbm/subs2cia | 53 | 12794848 | <gh_stars>10-100
from subs2cia.argparser import get_args_subzipper
from subs2cia.sources import is_language
import logging
from pathlib import Path
import pycountry
from pprint import pprint
def start():
args = get_args_subzipper()
args = vars(args)
if args['verbose']:
# if args['debug']:
logging.basicConfig(level=logging.DEBUG)
else:
logging.basicConfig(level=logging.INFO)
# elif args['debug']:
# logging.basicConfig(level=logging.DEBUG)
logging.debug(f"Start arguments: {args}")
subfiles = args['subfiles']
reffiles = args['reffiles']
if len(subfiles) != len(reffiles):
logging.warning(f"Mismatched number of subtitle and reference files! Got {len(subfiles)} subtitle files and "
f"{len(reffiles)} reference files.")
logging.warning(f"Will only process the first "
f"{len(subfiles) if len(subfiles) < len(reffiles) else len(reffiles)} "
f"reference-subtitle pairs.")
# exit(1)
# subfiles = [Path(s).absolute() for s in subfiles]
# reffiles = [Path(r).absolute() for r in reffiles]
subfiles = [Path(s) for s in subfiles]
reffiles = [Path(r) for r in reffiles]
lang = None
if args['lang'] is not None:
if is_language(args['lang']):
lang = pycountry.languages.lookup(args['lang'])
lang = lang.alpha_3
logging.info(f'Appending language code {lang}')
else:
logging.error(f"Language lookup failure: {args['lang']} is not a ISO recognized language")
if args['no_sort']:
logging.info("Not sorting inputs alphabetically, using as-is.")
else:
subfiles.sort(key=lambda x: str(x))
reffiles.sort(key=lambda x: str(x))
for s, r in zip(subfiles, reffiles):
newpath = r.parent / (r.stem + ('' if lang is None else f'.{lang}') + s.suffix)
logging.info(f"Will rename {s} to {newpath}")
if not s.exists():
logging.critical(f"Subtitle file doesn't exist: {s}")
exit(1)
if not r.exists():
logging.warning(f"Reference file doesn't exist: {r}")
if newpath == r:
logging.critical(f"Renaming subtitle to {newpath} will overwrite the reference file!")
exit(1)
if newpath.exists():
logging.critical(f"Renaming subtitle to {newpath} will overwrite an existing file!")
exit(1)
# todo: user-interactive question here
if args['dry_run']:
logging.info("Dry run mode, not writing changes.")
return
for s, r in zip(subfiles, reffiles):
newpath = r.parent / (r.stem + ('' if lang is None else f'.{lang}') + s.suffix)
logging.info(f"Renaming {s} to {newpath}...")
s.rename(newpath)
logging.info(f"...done")
if __name__ == '__main__':
start()
| 2.578125 | 3 |
lichee/module/torch/layer/longformer_multi_headed_attn.py | Tencent/Lichee | 91 | 12794849 | <filename>lichee/module/torch/layer/longformer_multi_headed_attn.py
# -*- coding: utf-8 -*-
# _*_ conding:utf-8 _*_
# Author : Nick
# Time : 2020/9/15 3:21 下午
from typing import List, Tuple
import torch
import math
def nonzero_tuple(x):
if x.dim() == 0:
return x.unsqueeze(0).nonzero().unbind(1)
return x.nonzero().unbind(1)
class LongformerSelfAttention(torch.nn.Module):
def __init__(self, cfg, layer_id):
super().__init__()
if cfg["CONFIG"]["HIDDEN_SIZE"] % cfg["CONFIG"]["NUM_ATTENTION_HEADS"] != 0:
raise ValueError(
"The hidden size (%d) is not a multiple of the number of attention "
"heads (%d)" % (cfg["CONFIG"]["HIDDEN_SIZE"], cfg["CONFIG"]["NUM_ATTENTION_HEADS"])
)
self.num_heads = cfg["CONFIG"]["NUM_ATTENTION_HEADS"]
self.head_dim = int(cfg["CONFIG"]["HIDDEN_SIZE"] / cfg["CONFIG"]["NUM_ATTENTION_HEADS"])
self.embed_dim = cfg["CONFIG"]["HIDDEN_SIZE"]
self.query = torch.nn.Linear(cfg["CONFIG"]["HIDDEN_SIZE"], self.embed_dim)
self.key = torch.nn.Linear(cfg["CONFIG"]["HIDDEN_SIZE"], self.embed_dim)
self.value = torch.nn.Linear(cfg["CONFIG"]["HIDDEN_SIZE"], self.embed_dim)
# separate projection layers for tokens with global attention
self.query_global = torch.nn.Linear(cfg["CONFIG"]["HIDDEN_SIZE"], self.embed_dim)
self.key_global = torch.nn.Linear(cfg["CONFIG"]["HIDDEN_SIZE"], self.embed_dim)
self.value_global = torch.nn.Linear(cfg["CONFIG"]["HIDDEN_SIZE"], self.embed_dim)
self.dropout = cfg["CONFIG"]["ATTENTION_PROBS_DROPOUT_PROB"]
self.layer_id = layer_id # 待补充超参数
attention_window = cfg["CONFIG"]["ATTENTION_WINDOW"][self.layer_id] # 待补充超参数
assert (
attention_window % 2 == 0
), f"`attention_window` for layer {self.layer_id} has to be an even value. Given {attention_window}"
assert (
attention_window > 0
), f"`attention_window` for layer {self.layer_id} has to be positive. Given {attention_window}"
self.one_sided_attn_window_size = attention_window // 2
def forward(
self, hidden_states, attention_mask
):
"""
LongformerSelfAttention expects `len(hidden_states)` to be multiple of `attention_window`.
Padding to `attention_window` happens in LongformerModel.forward to avoid redoing the padding on each layer.
The `attention_mask` is changed in `BertModel.forward` from 0, 1, 2 to
-ve: no attention
0: local attention
+ve: global attention
"""
attention_mask = attention_mask.squeeze(dim=2).squeeze(dim=1)
# is index masked or global attention
is_index_masked = attention_mask < 0
is_index_global_attn = attention_mask > 0
is_global_attn = is_index_global_attn.flatten().any().item()
hidden_states = hidden_states.transpose(0, 1)
# project hidden states
query_vectors = self.query(hidden_states)
key_vectors = self.key(hidden_states)
value_vectors = self.value(hidden_states)
seq_len, batch_size, embed_dim = hidden_states.size()
assert (
embed_dim == self.embed_dim
), f"hidden_states should have embed_dim = {self.embed_dim}, but has {embed_dim}"
# normalize query
query_vectors /= math.sqrt(self.head_dim)
query_vectors = query_vectors.view(seq_len, batch_size, self.num_heads, self.head_dim).transpose(0, 1)
key_vectors = key_vectors.view(seq_len, batch_size, self.num_heads, self.head_dim).transpose(0, 1)
# attn_probs = (batch_size, seq_len, num_heads, window*2+1)
attn_scores = self._sliding_chunks_query_key_matmul(
query_vectors, key_vectors, self.one_sided_attn_window_size
)
# values to pad for attention probs
remove_from_windowed_attention_mask = (attention_mask != 0)[:, :, None, None]
# cast to fp32/fp16 then replace 1's with -inf
float_mask = remove_from_windowed_attention_mask.type_as(query_vectors).masked_fill(
remove_from_windowed_attention_mask, -10000.0
)
# diagonal mask with zeros everywhere and -inf inplace of padding
diagonal_mask = self._sliding_chunks_query_key_matmul(
torch.ones(size=float_mask.size(), dtype=float_mask.dtype, device=float_mask.device),
float_mask, self.one_sided_attn_window_size
)
# pad local attention probs
attn_scores += diagonal_mask
assert list(attn_scores.size()) == [
batch_size,
seq_len,
self.num_heads,
self.one_sided_attn_window_size * 2 + 1,
], f"attn_probs should be of size ({batch_size}, {seq_len}, {self.num_heads}, " \
f"{self.one_sided_attn_window_size * 2 + 1}), but is of size {attn_scores.size()}"
max_num_global_attn_indices = torch.tensor(0)
is_index_global_attn_nonzero = [torch.tensor(0)]
is_local_index_global_attn_nonzero = [torch.tensor(0)]
is_local_index_no_global_attn_nonzero = [torch.tensor(0)]
# compute local attention probs from global attention keys and contact over window dim
if is_global_attn:
# compute global attn indices required through out forward fn
ret = self._get_global_attn_indices(is_index_global_attn)
max_num_global_attn_indices = ret[0]
is_index_global_attn_nonzero = ret[1]
is_local_index_global_attn_nonzero = ret[2]
is_local_index_no_global_attn_nonzero = ret[3]
# calculate global attn probs from global key
global_key_attn_scores = self._concat_with_global_key_attn_probs(
query_vectors=query_vectors,
key_vectors=key_vectors,
max_num_global_attn_indices=max_num_global_attn_indices,
is_index_global_attn_nonzero=is_index_global_attn_nonzero,
is_local_index_global_attn_nonzero=is_local_index_global_attn_nonzero,
is_local_index_no_global_attn_nonzero=is_local_index_no_global_attn_nonzero,
)
# concat to attn_probs
# (batch_size, seq_len, num_heads, extra attention count + 2*window+1)
attn_scores = torch.cat((global_key_attn_scores, attn_scores), dim=-1)
# free memory
# if self.query.training:
# del global_key_attn_scores
attn_probs_fp32 = torch.nn.functional.softmax(attn_scores, dim=-1,
dtype=torch.float32) # use fp32 for numerical stability
attn_probs = attn_probs_fp32.type_as(attn_scores)
# free memory
# if self.query.training:
# del attn_probs_fp32
# softmax sometimes inserts NaN if all positions are masked, replace them with 0
attn_probs = torch.masked_fill(attn_probs, is_index_masked[:, :, None, None], 0.0)
# apply dropout
attn_probs = torch.nn.functional.dropout(attn_probs, p=self.dropout, training=self.training)
value_vectors = value_vectors.view(seq_len, batch_size, self.num_heads, self.head_dim).transpose(0, 1)
# compute local attention output with global attention value and add
if is_global_attn:
# compute sum of global and local attn
attn_output = self._compute_attn_output(
value_vectors=value_vectors,
attn_probs=attn_probs,
max_num_global_attn_indices=max_num_global_attn_indices,
is_index_global_attn_nonzero=is_index_global_attn_nonzero,
is_local_index_global_attn_nonzero=is_local_index_global_attn_nonzero,
)
else:
# compute local attn only
attn_output = self._sliding_chunks_matmul_attn(
attn_probs, value_vectors, self.one_sided_attn_window_size
)
assert attn_output.size() == (batch_size, seq_len, self.num_heads, self.head_dim), "Unexpected size"
attn_output = attn_output.transpose(0, 1).reshape(seq_len, batch_size, embed_dim).contiguous()
# compute value for global attention and overwrite to attention output
# TODO: remove the redundant computation
if is_global_attn:
global_attn_output = self._compute_global_attn_output(
hidden_states=hidden_states,
max_num_global_attn_indices=max_num_global_attn_indices,
is_local_index_global_attn_nonzero=is_local_index_global_attn_nonzero,
is_index_global_attn_nonzero=is_index_global_attn_nonzero,
is_local_index_no_global_attn_nonzero=is_local_index_no_global_attn_nonzero,
is_index_masked=is_index_masked,
)
# get only non zero global attn output
nonzero_global_attn_output = global_attn_output[
is_local_index_global_attn_nonzero[0], :, is_local_index_global_attn_nonzero[1]
]
# overwrite values with global attention
attn_output[is_index_global_attn_nonzero[::-1]] = nonzero_global_attn_output.view(
len(is_local_index_global_attn_nonzero[0]), -1
)
attn_output = attn_output.transpose(0, 1)
return attn_output
def _pad_and_transpose_last_two_dims(self, hidden_states_padded, padding: Tuple[int, int, int, int]):
"""pads rows and then flips rows and columns"""
hidden_states_padded = torch.nn.functional.pad(
hidden_states_padded, padding
) # padding value is not important because it will be overwritten
hidden_states_padded = hidden_states_padded.view(
hidden_states_padded.size(0), hidden_states_padded.size(1), hidden_states_padded.size(3),
hidden_states_padded.size(2)
)
return hidden_states_padded
def _pad_and_diagonalize(self, chunked_hidden_states):
"""shift every row 1 step right, converting columns into diagonals.
Example:
chunked_hidden_states: [ 0.4983, 2.6918, -0.0071, 1.0492,
-1.8348, 0.7672, 0.2986, 0.0285,
-0.7584, 0.4206, -0.0405, 0.1599,
2.0514, -1.1600, 0.5372, 0.2629 ]
window_overlap = num_rows = 4
(pad & diagonilize) =>
[ 0.4983, 2.6918, -0.0071, 1.0492, 0.0000, 0.0000, 0.0000
0.0000, -1.8348, 0.7672, 0.2986, 0.0285, 0.0000, 0.0000
0.0000, 0.0000, -0.7584, 0.4206, -0.0405, 0.1599, 0.0000
0.0000, 0.0000, 0.0000, 2.0514, -1.1600, 0.5372, 0.2629 ]
"""
total_num_heads, num_chunks, window_overlap, hidden_dim = chunked_hidden_states.size()
chunked_hidden_states = torch.nn.functional.pad(
chunked_hidden_states, (0, window_overlap + 1)
) # total_num_heads x num_chunks x window_overlap x (hidden_dim+window_overlap+1).
# Padding value is not important because it'll be overwritten
chunked_hidden_states = chunked_hidden_states.view(
total_num_heads, num_chunks, -1
) # total_num_heads x num_chunks x window_overlapL+window_overlapwindow_overlap+window_overlap
chunked_hidden_states = chunked_hidden_states[
:, :, :-window_overlap
] # total_num_heads x num_chunks x window_overlapL+window_overlapwindow_overlap
chunked_hidden_states = chunked_hidden_states.view(
total_num_heads, num_chunks, window_overlap, window_overlap + hidden_dim
) # total_num_heads x num_chunks, window_overlap x hidden_dim+window_overlap
chunked_hidden_states = chunked_hidden_states[:, :, :, :-1]
return chunked_hidden_states
def _chunk(self, hidden_states, window_overlap: int):
"""convert into overlapping chunkings. Chunk size = 2w, overlap size = w"""
# non-overlapping chunks of size = 2w
hidden_states = hidden_states.view(
hidden_states.size(0),
hidden_states.size(1) // (window_overlap * 2),
window_overlap * 2,
hidden_states.size(2),
)
# use `as_strided` to make the chunks overlap with an overlap size = window_overlap
chunk_size = list(hidden_states.size())
chunk_size[1] = chunk_size[1] * 2 - 1
chunk_stride = [hidden_states.stride(0), hidden_states.stride(1), hidden_states.stride(2),
hidden_states.stride(3)]
chunk_stride[1] = chunk_stride[1] // 2
return hidden_states.as_strided(size=chunk_size, stride=chunk_stride)
def _mask_invalid_locations(self, input_tensor, affected_seq_len: int):
beginning_mask_2d = torch.ones(affected_seq_len, affected_seq_len + 1, dtype=input_tensor.dtype,
device=input_tensor.device).tril().flip(dims=[0])
beginning_mask = beginning_mask_2d[None, :, None, :]
ending_mask = beginning_mask.flip(dims=(1, 3))
beginning_input = input_tensor[:, :affected_seq_len, :, : affected_seq_len + 1]
beginning_mask = beginning_mask.expand(beginning_input.size())
beginning_input.masked_fill_(beginning_mask == 1, -float("inf")) # `== 1` converts to bool or uint8
ending_input = input_tensor[:, -affected_seq_len:, :, -(affected_seq_len + 1):]
ending_mask = ending_mask.expand(ending_input.size())
ending_input.masked_fill_(ending_mask == 1, -float("inf")) # `== 1` converts to bool or uint8
def _sliding_chunks_query_key_matmul(self, query: torch.Tensor, key: torch.Tensor, window_overlap: int):
"""Matrix multiplication of query and key tensors using with a sliding window attention pattern.
This implementation splits the input into overlapping chunks of size 2w (e.g. 512 for pretrained Longformer)
with an overlap of size window_overlap"""
batch_size, seq_len, num_heads, head_dim = query.size()
assert (
seq_len % (window_overlap * 2) == 0
), f"Sequence length should be multiple of {window_overlap * 2}. Given {seq_len}"
assert query.size() == key.size()
chunks_count = seq_len // window_overlap - 1
# group batch_size and num_heads dimensions into one, then chunk seq_len into chunks of size window_overlap * 2
query = query.transpose(1, 2).reshape(batch_size * num_heads, seq_len, head_dim)
key = key.transpose(1, 2).reshape(batch_size * num_heads, seq_len, head_dim)
chunked_query = self._chunk(query, window_overlap)
chunked_key = self._chunk(key, window_overlap)
# matrix multipication
# bcxd: batch_size * num_heads x chunks x 2window_overlap x head_dim
# bcyd: batch_size * num_heads x chunks x 2window_overlap x head_dim
# bcxy: batch_size * num_heads x chunks x 2window_overlap x window_overlap
chunked_attention_scores = torch.einsum("bcxd,bcyd->bcxy", (chunked_query, chunked_key)) # multiply
# convert diagonals into columns
diagonal_chunked_attention_scores = self._pad_and_transpose_last_two_dims(
chunked_attention_scores, padding=(0, 0, 0, 1)
)
# allocate space for the overall attention matrix where the chunks are combined. The last dimension
# has (window_overlap * 2 + 1) columns. The first (window_overlap) columns are the window_overlap lower
# triangles (attention from a word to window_overlap previous words). The following column is attention
# score from each word to itself, then followed by window_overlap columns for the upper triangle.
diagonal_attention_scores = diagonal_chunked_attention_scores.new_empty(
(batch_size * num_heads, chunks_count + 1, window_overlap, window_overlap * 2 + 1)
)
# copy parts from diagonal_chunked_attention_scores into the combined matrix of attentions
# - copying the main diagonal and the upper triangle
diagonal_attention_scores[:, :-1, :, window_overlap:] = diagonal_chunked_attention_scores[
:, :, :window_overlap, : window_overlap + 1
]
diagonal_attention_scores[:, -1, :, window_overlap:] = diagonal_chunked_attention_scores[
:, -1, window_overlap:, : window_overlap + 1
]
# - copying the lower triangle
diagonal_attention_scores[:, 1:, :, :window_overlap] = diagonal_chunked_attention_scores[
:, :, -(window_overlap + 1): -1, window_overlap + 1:
]
diagonal_attention_scores[:, 0, 1:window_overlap, 1:window_overlap] = diagonal_chunked_attention_scores[
:, 0, : window_overlap - 1,
1 - window_overlap:
]
# separate batch_size and num_heads dimensions again
diagonal_attention_scores = diagonal_attention_scores.view(
batch_size, num_heads, seq_len, 2 * window_overlap + 1
).transpose(2, 1)
self._mask_invalid_locations(diagonal_attention_scores, window_overlap)
return diagonal_attention_scores
def _sliding_chunks_matmul_attn(
self, attn_probs: torch.Tensor, value: torch.Tensor, window_overlap: int
):
"""Same as _sliding_chunks_query_key_matmul but for attn_probs and value tensors.
Returned tensor will be of the same shape as `attn_probs`"""
batch_size, seq_len, num_heads, head_dim = value.size()
assert seq_len % (window_overlap * 2) == 0
assert attn_probs.size()[:3] == value.size()[:3]
assert attn_probs.size(3) == 2 * window_overlap + 1
chunks_count = seq_len // window_overlap - 1
# group batch_size and num_heads dimensions into one, then chunk seq_len into chunks of size 2 window overlap
chunked_attn_probs = attn_probs.transpose(1, 2).reshape(
batch_size * num_heads, seq_len // window_overlap, window_overlap, 2 * window_overlap + 1
)
# group batch_size and num_heads dimensions into one
value = value.transpose(1, 2).reshape(batch_size * num_heads, seq_len, head_dim)
# pad seq_len with w at the beginning of the sequence and another window overlap at the end
padded_value = torch.nn.functional.pad(value, (0, 0, window_overlap, window_overlap), value=-1.0)
# chunk padded_value into chunks of size 3 window overlap and an overlap of size window overlap
chunked_value_size = (batch_size * num_heads, chunks_count + 1, 3 * window_overlap, head_dim)
chunked_value_stride = [padded_value.stride(0), padded_value.stride(1), padded_value.stride(2)]
chunked_value_stride = (
chunked_value_stride[0],
window_overlap * chunked_value_stride[1],
chunked_value_stride[1],
chunked_value_stride[2],
)
chunked_value = padded_value.as_strided(size=chunked_value_size, stride=chunked_value_stride)
chunked_attn_probs = self._pad_and_diagonalize(chunked_attn_probs)
context = torch.einsum("bcwd,bcdh->bcwh", (chunked_attn_probs, chunked_value))
return context.view(batch_size, num_heads, seq_len, head_dim).transpose(1, 2)
def _get_global_attn_indices(self, is_index_global_attn):
""" compute global attn indices required throughout forward pass """
# helper variable
num_global_attn_indices = is_index_global_attn.long().sum(dim=1)
# max number of global attn indices in batch
max_num_global_attn_indices = num_global_attn_indices.max()
# indices of global attn
is_index_global_attn_nonzero = nonzero_tuple(is_index_global_attn)
# helper variable
is_local_index_global_attn = torch.arange(
max_num_global_attn_indices, device=is_index_global_attn.device
) < num_global_attn_indices.unsqueeze(dim=-1)
# location of the non-padding values within global attention indices
is_local_index_global_attn_nonzero = nonzero_tuple(is_local_index_global_attn)
# location of the padding values within global attention indices
is_local_index_no_global_attn_nonzero = nonzero_tuple(is_local_index_global_attn == 0)
return (
max_num_global_attn_indices,
is_index_global_attn_nonzero,
is_local_index_global_attn_nonzero,
is_local_index_no_global_attn_nonzero,
)
def _concat_with_global_key_attn_probs(
self,
key_vectors,
query_vectors,
max_num_global_attn_indices,
is_index_global_attn_nonzero: List[torch.Tensor],
is_local_index_global_attn_nonzero: List[torch.Tensor],
is_local_index_no_global_attn_nonzero: List[torch.Tensor],
):
batch_size = key_vectors.shape[0]
# create only global key vectors
key_vectors_only_global = key_vectors.new_zeros(
batch_size, max_num_global_attn_indices, self.num_heads, self.head_dim
)
key_vectors_only_global[is_local_index_global_attn_nonzero] = key_vectors[is_index_global_attn_nonzero]
# (batch_size, seq_len, num_heads, max_num_global_attn_indices)
attn_probs_from_global_key = torch.einsum("blhd,bshd->blhs", (query_vectors, key_vectors_only_global))
attn_probs_from_global_key[
is_local_index_no_global_attn_nonzero[0], :, :, is_local_index_no_global_attn_nonzero[1]
] = -10000.0
return attn_probs_from_global_key
def _compute_attn_output(
self,
value_vectors,
attn_probs,
max_num_global_attn_indices,
is_index_global_attn_nonzero: List[torch.Tensor],
is_local_index_global_attn_nonzero: List[torch.Tensor],
):
batch_size = attn_probs.shape[0]
# cut local attn probs to global only
attn_probs_only_global = attn_probs.narrow(-1, 0, max_num_global_attn_indices)
# get value vectors for global only
value_vectors_only_global = value_vectors.new_zeros(
batch_size, max_num_global_attn_indices, self.num_heads, self.head_dim
)
value_vectors_only_global[is_local_index_global_attn_nonzero] = value_vectors[is_index_global_attn_nonzero]
# use `matmul` because `einsum` crashes sometimes with fp16
# attn = torch.einsum('blhs,bshd->blhd', (selected_attn_probs, selected_v))
# compute attn output only global
attn_output_only_global = torch.matmul(
attn_probs_only_global.transpose(1, 2), value_vectors_only_global.transpose(1, 2)
).transpose(1, 2)
# reshape attn probs
attn_probs_without_global = attn_probs.narrow(
-1, max_num_global_attn_indices, attn_probs.size(-1) - max_num_global_attn_indices
).contiguous()
# compute attn output with global
attn_output_without_global = self._sliding_chunks_matmul_attn(
attn_probs_without_global, value_vectors, self.one_sided_attn_window_size
)
return attn_output_only_global + attn_output_without_global
def _compute_global_attn_output(
self,
hidden_states,
max_num_global_attn_indices,
is_local_index_global_attn_nonzero: List[torch.Tensor],
is_index_global_attn_nonzero: List[torch.Tensor],
is_local_index_no_global_attn_nonzero: List[torch.Tensor],
is_index_masked,
):
seq_len, batch_size = hidden_states.shape[:2]
# prepare global hidden states
global_attn_hidden_states = hidden_states.new_zeros(max_num_global_attn_indices, batch_size, self.embed_dim)
global_attn_hidden_states[is_local_index_global_attn_nonzero[::-1]] = hidden_states[
is_index_global_attn_nonzero[::-1]
]
# global key, query, value
global_query_vectors_only_global = self.query_global(global_attn_hidden_states)
global_key_vectors = self.key_global(hidden_states)
global_value_vectors = self.value_global(hidden_states)
# normalize
global_query_vectors_only_global /= math.sqrt(self.head_dim)
# reshape
global_query_vectors_only_global = (
global_query_vectors_only_global.contiguous()
.view(max_num_global_attn_indices, batch_size * self.num_heads, self.head_dim)
.transpose(0, 1)
) # (batch_size * self.num_heads, max_num_global_attn_indices, head_dim)
global_key_vectors = (
global_key_vectors.contiguous().view(-1, batch_size * self.num_heads, self.head_dim).transpose(0, 1)
) # batch_size * self.num_heads, seq_len, head_dim)
global_value_vectors = (
global_value_vectors.contiguous().view(-1, batch_size * self.num_heads, self.head_dim).transpose(0, 1)
) # batch_size * self.num_heads, seq_len, head_dim)
# compute attn scores
global_attn_scores = torch.bmm(global_query_vectors_only_global, global_key_vectors.transpose(1, 2))
assert global_attn_scores.size(0) == batch_size * self.num_heads, \
f"global_attn_scores have the wrong size. size(0) should be {batch_size * self.num_heads}, " \
f"but is {global_attn_scores.size(0)}."
assert global_attn_scores.size(1) == max_num_global_attn_indices, \
f"global_attn_scores have the wrong size. size(1) should be {max_num_global_attn_indices}, " \
f"but is {global_attn_scores.size(1)}."
assert global_attn_scores.size(2) == seq_len, \
f"global_attn_scores have the wrong size. size(2) should be {seq_len}, but is {global_attn_scores.size(2)}."
global_attn_scores = global_attn_scores.view(batch_size, self.num_heads, max_num_global_attn_indices, seq_len)
global_attn_scores[
is_local_index_no_global_attn_nonzero[0], :, is_local_index_no_global_attn_nonzero[1], :
] = -10000.0
global_attn_scores = global_attn_scores.masked_fill(is_index_masked[:, None, None, :], -10000.0, )
global_attn_scores = global_attn_scores.view(batch_size * self.num_heads, max_num_global_attn_indices, seq_len)
# compute global attn probs
global_attn_probs_float = torch.nn.functional.softmax(
global_attn_scores, dim=-1, dtype=torch.float32
) # use fp32 for numerical stability
global_attn_probs = torch.nn.functional.dropout(
global_attn_probs_float.type_as(global_attn_scores), p=self.dropout, training=self.training
)
# global attn output
global_attn_output = torch.bmm(global_attn_probs, global_value_vectors)
assert global_attn_output.size(0) == batch_size * self.num_heads, \
f"global_attn_scores have the wrong size. size(0) should be {batch_size * self.num_heads}, " \
f"but is {global_attn_output.size(0)}."
assert global_attn_output.size(1) == max_num_global_attn_indices, \
f"global_attn_scores have the wrong size. size(1) should be {max_num_global_attn_indices}, " \
f"but is {global_attn_output.size(1)}."
assert global_attn_output.size(2) == self.head_dim, \
f"global_attn_scores have the wrong size. size(2) should be {self.head_dim}, " \
f"but is {global_attn_output.size(2)}."
global_attn_output = global_attn_output.view(
batch_size, self.num_heads, max_num_global_attn_indices, self.head_dim
)
return global_attn_output
| 2.34375 | 2 |
tests/cupyx_tests/scipy_tests/special_tests/test_basic.py | amanchhaparia/cupy | 0 | 12794850 | <filename>tests/cupyx_tests/scipy_tests/special_tests/test_basic.py<gh_stars>0
import math
import cupy
import numpy
import pytest
import scipy.special # NOQA
import cupyx.scipy.special
from cupy import testing
from cupy.testing import (
assert_array_equal,
assert_array_almost_equal,
)
from cupy.testing import numpy_cupy_allclose
rtol = {'default': 1e-5, cupy.float64: 1e-12}
@testing.gpu
@testing.with_requires("scipy")
class TestLegendreFunctions:
def test_lpmv_basic(self):
# specific values tested in the SciPy test suite
scp = cupyx.scipy
lp = scp.special.lpmv(0, 2, 0.5)
assert_array_almost_equal(lp, -0.125, 7)
lp = scp.special.lpmv(0, 40, 0.001)
assert_array_almost_equal(lp, 0.1252678976534484, 7)
# XXX: this is outside the domain of the current implementation,
# so ensure it returns a NaN rather than a wrong answer.
olderr = numpy.seterr(all="ignore")
try:
lp = scp.special.lpmv(-1, -1, 0.001)
finally:
numpy.seterr(**olderr)
assert lp != 0 or cupy.isnan(lp)
@pytest.mark.parametrize("order", [0, 1, 2, 3, 4])
@pytest.mark.parametrize("degree", [0, 1, 2, 3, 4, 5, 10, 20, 30, 40, 50])
@testing.for_dtypes(["e", "f", "d"])
@numpy_cupy_allclose(scipy_name="scp", atol=1e-12)
def test_lpmv(self, xp, scp, dtype, order, degree):
vals = xp.linspace(-1, 1, 100, dtype=dtype)
return scp.special.lpmv(order, degree, vals)
@testing.gpu
@testing.with_requires("scipy")
class TestBasic:
@testing.for_dtypes(["e", "f", "d"])
@numpy_cupy_allclose(scipy_name="scp")
def test_gammasgn(self, xp, scp, dtype):
vals = xp.linspace(-4, 4, 100, dtype=dtype)
return scp.special.gammasgn(vals)
@testing.for_dtypes(["e", "f", "d"])
@numpy_cupy_allclose(scipy_name="scp", rtol=rtol)
def test_log1p_(self, xp, scp, dtype):
# only test with values > 0 to avoid NaNs
vals = xp.logspace(-10, 10, 10000, dtype=dtype)
return scp.special.log1p(vals)
@testing.for_dtypes(["e", "f", "d"])
@numpy_cupy_allclose(scipy_name="scp", rtol=rtol)
def test_log1p_path2(self, xp, scp, dtype):
# test values for code path corresponding to range [1/sqrt(2), sqrt(2)]
vals = xp.linspace(1 / math.sqrt(2), math.sqrt(2), 1000, dtype=dtype)
return scp.special.log1p(vals)
def test_log1p_real(self):
log1p = cupyx.scipy.special.log1p
inf = cupy.inf
nan = cupy.nan
assert_array_equal(log1p(0), 0.0)
assert_array_equal(log1p(-1), -inf)
assert_array_equal(log1p(-2), nan)
assert_array_equal(log1p(inf), inf)
def test_log1p_complex(self):
# complex-valued log1p not yet implemented
with pytest.raises(TypeError):
cupyx.scipy.special.log1p(0 + 0j)
@pytest.mark.parametrize("function", ["xlogy", "xlog1py"])
@testing.for_dtypes('efdFD')
@numpy_cupy_allclose(scipy_name="scp", rtol={'default': 1e-3,
cupy.float64: 1e-12})
def test_xlogy(self, xp, scp, dtype, function):
# only test with values > 0 to avoid NaNs
x = xp.linspace(-100, 100, 1000, dtype=dtype)
y = xp.linspace(0.001, 100, 1000, dtype=dtype)
if x.dtype.kind == 'c':
x -= 1j * x
y += 1j * y
return getattr(scp.special, function)(x, y)
@pytest.mark.parametrize("function", ["xlogy", "xlog1py"])
@testing.for_dtypes('efdFD')
@numpy_cupy_allclose(scipy_name="scp", rtol={'default': 1e-3,
cupy.float64: 1e-12})
def test_xlogy_zeros(self, xp, scp, dtype, function):
# only test with values > 0 to avoid NaNs
x = xp.zeros((1, 100), dtype=dtype)
y = xp.linspace(-10, 10, 100, dtype=dtype)
if y.dtype.kind == 'c':
y += 1j * y
return getattr(scp.special, function)(x, y)
@pytest.mark.parametrize("function", ["xlogy", "xlog1py"])
@testing.for_all_dtypes()
def test_xlogy_nonfinite(self, dtype, function):
func = getattr(cupyx.scipy.special, function)
y = cupy.ones((5,), dtype=dtype)
assert cupy.all(cupy.isnan(func(cupy.nan, y)))
assert cupy.all(cupy.isnan(func(y, cupy.nan)))
| 1.890625 | 2 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.