max_stars_repo_path
stringlengths 3
269
| max_stars_repo_name
stringlengths 4
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.05M
| score
float64 0.23
5.13
| int_score
int64 0
5
|
---|---|---|---|---|---|---|
drink_partners/partners/tests/views/test_search_partner_view.py | henriquebraga/drink-partners | 0 | 6000 | from drink_partners.contrib.samples import partner_bar_legal
class TestSearchPartner:
async def test_should_return_bad_request_for_str_coordinates(
self,
client,
partner_search_with_str_coordinates_url
):
async with client.get(partner_search_with_str_coordinates_url) as response: # noqa
assert response.status == 400
response_json = await response.json()
assert response_json['error_code'] == 'bad_request'
assert response_json['error_message'] == (
'Invalid coordinate longitude:a latitude:a'
)
async def test_should_return_nearest_partner_for_coordinate(
self,
client,
partner_search_coordinates_url,
save_partners
):
async with client.get(partner_search_coordinates_url) as response: # noqa
assert response.status == 200
response_json = await response.json()
assert response_json == partner_bar_legal()
async def test_should_return_not_found_when_no_partner_covers_coordinate(
self,
client,
partner_search_coordinates_url
):
async with client.get(partner_search_coordinates_url) as response: # noqa
assert response.status == 404
response_json = await response.json()
assert response_json['error_code'] == 'not_found'
assert response_json['error_message'] == (
'Partners not found covering area for '
'latitude:-43.36556 longitude:-22.99669'
)
| 2.71875 | 3 |
Titanic/class_create_model_of_logistic_regression.py | ysh329/Titanic-Machine-Learning-from-Disaster | 1 | 6001 | <filename>Titanic/class_create_model_of_logistic_regression.py<gh_stars>1-10
# -*- coding: utf-8 -*-
# !/usr/bin/python
################################### PART0 DESCRIPTION #################################
# Filename: class_create_model_of_logistic_regression.py
# Description:
#
# Author: <NAME>
# E-mail: <EMAIL>
# Create: 2016-01-23 23:32:49
# Last:
__author__ = 'yuens'
################################### PART1 IMPORT ######################################
import MySQLdb
import logging
import time
import pylab
from numpy import *
from math import exp
import csv
import decorator_of_function
################################### PART2 CLASS && FUNCTION ###########################
class CreateLogisticRegressionModel(object):
Decorator = decorator_of_function.CreateDecorator()
@Decorator.log_of_function
def __init__(self):
self.start = time.clock()
logging.basicConfig(level = logging.INFO,
format = '%(asctime)s %(levelname)5s %(filename)19s[line:%(lineno)3d] %(funcName)s %(message)s',
datefmt = '%y-%m-%d %H:%M:%S',
filename = 'main.log',
filemode = 'a')
console = logging.StreamHandler()
console.setLevel(logging.INFO)
formatter = logging.Formatter('%(asctime)s %(levelname)5s %(filename)19s[line:%(lineno)3d] %(funcName)s %(message)s')
console.setFormatter(formatter)
logging.getLogger('').addHandler(console)
logging.info("START CLASS {class_name}.".format(class_name = CreateLogisticRegressionModel.__name__))
try:
self.con = MySQLdb.connect(host='localhost', user='root', passwd='<PASSWORD>', charset='utf8')
logging.info("Success in connecting MySQL.")
except MySQLdb.Error, e:
logging.error("Fail in connecting MySQL.")
logging.error("MySQL Error {error_num}: {error_info}.".format(error_num = e.args[0], error_info = e.args[1]))
@Decorator.log_of_function
def __del__(self):
try:
self.con.close()
logging.info("Success in quiting MySQL.")
except MySQLdb.Error, e:
self.con.rollback()
logging.error("Fail in quiting MySQL.")
logging.error("MySQL Error {error_num}: {error_info}.".format(error_num = e.args[0], error_info = e.args[1]))
logging.info("END CLASS {class_name}.".format(class_name = CreateLogisticRegressionModel.__name__))
self.end = time.clock()
logging.info("The class {class_name} run time is : {delta_time} seconds".format(class_name = CreateLogisticRegressionModel.__name__, delta_time = self.end - self.start))
@Decorator.log_of_function
def get_data_from_database(self, database_name, passenger_table_name):
cursor = self.con.cursor()
sql_list = []
# training set
sql_list.append("""SELECT PassengerId, Survived, Pclass, Sex, Age, SibSp, Parch FROM {database_name}.{table_name} WHERE Is_train=1"""\
.format(database_name = database_name,\
table_name = passenger_table_name)\
)
# test set
sql_list.append("""SELECT PassengerId, Survived, Pclass, Sex, Age, SibSp, Parch FROM {database_name}.{table_name} WHERE Is_train=0"""\
.format(database_name = database_name,\
table_name = passenger_table_name)\
)
for sql_idx in xrange(len(sql_list)):
sql = sql_list[sql_idx]
try:
cursor.execute(sql)
if sql_idx == 0:
train_data = cursor.fetchall()
logging.info("len(train_data):{0}".format(len(train_data)))
logging.info("train_data[0]:{0}".format(train_data[0]))
logging.info("type(train_data[0]):{0}".format(type(train_data[0])))
elif sql_idx == 1:
test_data = cursor.fetchall()
logging.info("len(test_data):{0}".format(len(test_data)))
logging.info("test_data[0]:{0}".format(test_data[0]))
logging.info("type(test_data[0]):{0}".format(type(test_data[0])))
except MySQLdb.Error, e:
self.con.rollback()
logging.error("Fail in fetch data from MySQL.")
logging.error("MySQL Error {error_num}: {error_info}.".format(error_num = e.args[0], error_info = e.args[1]))
train_data = map(lambda (PassengerId, Survived, Pclass, Sex, Age, SibSp, Parch):\
(int(PassengerId),\
int(Survived),\
int(Pclass),\
Sex,\
int(Age),\
int(SibSp),\
int(Parch)\
),\
train_data)
logging.info("len(train_data):{0}".format(len(train_data)))
logging.info("train_data[0]:{0}".format(train_data[0]))
logging.info("type(train_data[0]):{0}".format(type(train_data[0])))
test_data = map(lambda (PassengerId, Survived, Pclass, Sex, Age, SibSp, Parch):\
(int(PassengerId),\
int(Survived),\
int(Pclass),\
Sex,\
int(Age),\
int(SibSp),\
int(Parch)\
),\
test_data)
logging.info("len(test_data):{0}".format(len(test_data)))
logging.info("test_data[0]:{0}".format(test_data[0]))
logging.info("type(test_data[0]):{0}".format(type(test_data[0])))
return train_data, test_data
@Decorator.log_of_function
def add_intercept_term(self, train_feature_tuple_list, test_feature_tuple_list):
logging.info("len(train_feature_tuple_list[0]):{0}".format(len(train_feature_tuple_list[0])))
logging.info("len(train_feature_tuple_list):{0}".format(len(train_feature_tuple_list)))
logging.info("train_feature_tuple_list[0]:{0}".format(train_feature_tuple_list[0]))
logging.info("test_feature_tuple_list[0]:{0}".format(len(test_feature_tuple_list[0])))
logging.info("len(test_feature_tuple_list):{0}".format(len(test_feature_tuple_list)))
logging.info("test_feature_tuple_list[0]:{0}".format(test_feature_tuple_list[0]))
# len(train_feature_tuple_list[0]): 7
# PassengerId, Pclass, Sex, Age, SibSp, Parch, Fare
train_feature_intercept_term_added_tuple_list = map(lambda (PassengerId, Pclass, Sex, Age, SibSp, Parch, Fare): \
(PassengerId, 1.0, Pclass, Sex, Age, SibSp, Parch, Fare),\
train_feature_tuple_list)
test_feature_intercept_term_added_tuple_list = map(lambda (PassengerId, Pclass, Sex, Age, SibSp, Parch, Fare): \
(PassengerId, 1.0, Pclass, Sex, Age, SibSp, Parch, Fare),\
test_feature_tuple_list)
logging.info("len(train_feature_intercept_term_added_tuple_list):{0}".format(len(train_feature_intercept_term_added_tuple_list)))
logging.info("train_feature_intercept_term_added_tuple_list[0]:{0}".format(train_feature_intercept_term_added_tuple_list[0]))
logging.info("len(test_feature_intercept_term_added_tuple_list):{0}".format(len(test_feature_intercept_term_added_tuple_list)))
logging.info("test_feature_intercept_term_added_tuple_list[0]:{0}".format(test_feature_intercept_term_added_tuple_list[0]))
return train_feature_intercept_term_added_tuple_list,\
test_feature_intercept_term_added_tuple_list
@Decorator.log_of_function
def sigmoid_function(self, inX):
return 1.0 / (1.0 + exp(-inX))
@Decorator.log_of_function
def gradient_descent(self, train_feature_tuple_list, train_label_list, learning_rate = 0.01, max_iteration_time = 500, lambda_regularization = 0.1):
############################
# Initial parameters
# learning_rate = 0.01
# max_iteration_time = 500
############################
'''
train_feature_tuple_list_without_PassengerId = map(lambda (PassengerId, InterceptTerm, Pclass, Sex, Age, SibSp, Parch, Fare):\
(InterceptTerm, Pclass, Sex, Age, SibSp, Parch, Fare),\
train_feature_tuple_list)
'''
train_feature_tuple_list_without_PassengerId = map(lambda (PassengerId, InterceptTerm, Pclass, Sex, Age, SibSp, Parch, Fare):\
(InterceptTerm, Sex, Fare),\
train_feature_tuple_list)
# [891, 7]
train_input_matrix = mat(train_feature_tuple_list_without_PassengerId)
# [891, 1]
train_label_matrix = mat(train_label_list).transpose()
train_sample_num, feature_num = shape(train_input_matrix)
weight_matrix = ones((feature_num, 1))
cost_list = []
error_list = []
optimal_solution = {}
for cur_iter in xrange(max_iteration_time):
# [891, 1] <- [891, 7]*[7, 1]
hypothesis = self.sigmoid_function(train_input_matrix * weight_matrix)
# real <- sum([891, 1]T*[891, 1] + [891, 1]T*[891, 1])
cost = -float(1) / (train_sample_num) * \
sum( train_label_matrix.transpose()*log(hypothesis) + (1-train_label_matrix.transpose())*log(1-hypothesis) ) + \
lambda_regularization / (2*train_sample_num) * (array(weight_matrix[1:]) * array(weight_matrix[1:])).sum()
cost_list.append(cost)
# [891, 1]
error = train_label_matrix - hypothesis
error_list.append(error)
logging.info("cur_iter:{0}, cost:{1}, sum(error):{2}".format(cur_iter+1, cost, sum(error)))
# 1 = 1 + 1 * [891, 1].T *[891, 1]
weight_matrix[0] = weight_matrix[0] + learning_rate * (float(1)/train_sample_num) * train_input_matrix[:, 0].transpose() * error
# [6, 1] = [6, 1] + 1 * \
# ( 1 / 1 * [891, 6].T * [891, 1]
# )
weight_matrix[1:] = weight_matrix[1:] + learning_rate * \
( (float(1)/train_sample_num) * train_input_matrix[:, 1::].transpose() * error - \
float(lambda_regularization) / train_sample_num * weight_matrix[1:] \
)
#weight_matrix = weight_matrix + learning_rate * train_input_matrix.transpose() * error
#"""
# find optimal solution
if cur_iter == 0:
optimal_solution['cur_iter'] = cur_iter
optimal_solution['cost'] = cost
optimal_solution['abs(error.sum())'] = abs(error.sum())
optimal_solution['weight_matrix'] = weight_matrix
elif cur_iter != 0 and optimal_solution['abs(error.sum())'] > abs(error.sum()):
optimal_solution['cur_iter'] = cur_iter
optimal_solution['cost'] = cost
optimal_solution['abs(error.sum())'] = abs(error.sum())
optimal_solution['weight_matrix'] = weight_matrix
logging.info("optimal_solution['cur_iter']:{0}".format(optimal_solution['cur_iter']))
logging.info("optimal_solution['cost':{0}".format(optimal_solution['cost']))
logging.info("optimal_solution['abs(error.sum())']:{0}".format(optimal_solution['abs(error.sum())']))
logging.info("optimal_solution['weight_matrix'].tolist():{0}".format(optimal_solution['weight_matrix'].tolist()))
#"""
pylab.plot(cost_list)
pylab.show()
return weight_matrix
#return optimal_solution['weight_matrix']
@Decorator.log_of_function
def predict(self, train_feature_tuple_list, weight_matrix):
'''
train_feature_tuple_list_without_PassengerId = map(lambda (PassengerId, InterceptTerm, Pclass, Sex, Age, SibSp, Parch, Fare):\
(InterceptTerm, Pclass, Sex, Age, SibSp, Parch, Fare),\
train_feature_tuple_list)
'''
train_feature_tuple_list_without_PassengerId = map(lambda (PassengerId, InterceptTerm, Pclass, Sex, Age, SibSp, Parch, Fare):\
(InterceptTerm, Sex, Fare),\
train_feature_tuple_list)
train_input_matrix = mat(train_feature_tuple_list_without_PassengerId)
predict_prob_matrix = self.sigmoid_function(train_input_matrix * weight_matrix)
'''
row, col = shape(predict_label_matrix)
for i in xrange(row):
print i+1, predict_label_matrix[i][0]
'''
predict_prob_list = predict_prob_matrix.transpose().tolist()[0]
predict_label_list = []
for prob_idx in xrange(len(predict_prob_list)):
predict_prob = predict_prob_list[prob_idx]
if predict_prob > 0.5:
predict_label_list.append(1)
else:
predict_label_list.append(0)
return predict_label_list
@Decorator.log_of_function
def accuracy(self, train_label_list, predict_label_list):
logging.info("len(train_label_list):{0}".format(len(train_label_list)))
logging.info("len(predict_label_list):{0}".format(len(predict_label_list)))
# compute accuracy
def compute_accuracy(train_label_list, predict_label_list):
right_predict_num = 0
if len(train_label_list) == len(predict_label_list):
for idx in xrange(len(train_label_list)):
if train_label_list[idx] == predict_label_list[idx]:
right_predict_num = right_predict_num + 1
accuracy = float(right_predict_num)/len(train_label_list)
return right_predict_num, accuracy
def compute_precision_and_recall_and_F1(train_label_list, predict_label_list):
if len(train_label_list) == len(predict_label_list):
# compute precision and recall
true_positive_num = 10E-10
true_negative_num = 10E-10
predicted_positive_num = predict_label_list.count(1)
predicted_negative_num = predict_label_list.count(0)
for idx in xrange(len(train_label_list)):
if predict_label_list[idx] == train_label_list[idx] == 1:
true_positive_num = true_positive_num + 1
elif predict_label_list[idx] == train_label_list[idx] == 0:
true_negative_num = true_negative_num + 1
precision = float(true_positive_num) / (predicted_positive_num + 10E-10)
recall = float(true_negative_num) / (predicted_negative_num + 10E-10)
F1 = 2 * precision * recall / (precision + recall)
return precision, recall, F1
right_predict_num, accuracy = compute_accuracy(train_label_list = train_label_list,\
predict_label_list = predict_label_list)
logging.info("right_predict_num:{0}".format(right_predict_num))
logging.info("accuracy:{0}".format(accuracy))
precision, recall, F1 = compute_precision_and_recall_and_F1(train_label_list = train_label_list,\
predict_label_list = predict_label_list)
logging.info("precision:{0}".format(precision))
logging.info("recall:{0}".format(recall))
logging.info("F1:{0}".format(F1))
return accuracy, precision, recall, F1
@Decorator.log_of_function
def write_csv_file(self, start_id, predict_label_list, result_csv_dir):
# open csv file
try:
result_csv_handle = file(result_csv_dir, 'wb')
logging.info("Success in attaining file handle of {0}.".format(result_csv_dir))
except Exception as e:
logging.error("Fail in attaining file handle of {0}.".format(result_csv_dir))
logging.error(e)
return -1
# create csv writer
result_csv_writer = csv.writer(result_csv_handle)
# write csv file
result_csv_writer.writerow(["PassengerId", "Survived"])
for list_idx in xrange(len(predict_label_list)):
PassengerId = start_id + list_idx
predict_label = predict_label_list[list_idx]
result_csv_writer.writerow([PassengerId, predict_label])
# close csv file
try:
result_csv_handle.close()
logging.info("Success in closing file handle of {0}.".format(result_csv_dir))
except Exception as e:
logging.error("Fail in closing file handle of {0}.".format(result_csv_dir))
logging.error(e)
@Decorator.log_of_function
def plot_decision_bondary(self, weight_matrix):
pass
################################### PART3 CLASS TEST ##################################
"""
# Initial parameters
database_name = "TitanicDB"
passenger_table_name = "passenger_table"
LRModel = CreateLogisticRegressionModel()
""" | 2.578125 | 3 |
mjecv/io/base.py | mje-nz/mjecv | 0 | 6002 | import multiprocessing
from typing import List, Optional
import numpy as np
from ..util import dill_for_apply
class ImageSequenceWriter:
def __init__(self, pattern, writer, *, max_index=None):
if type(pattern) is not str:
raise ValueError("Pattern must be string")
if pattern.format(1, index="1") == pattern.format(2, index="2"):
raise ValueError("Pattern must use {} or {index}")
self._pattern = pattern
self._writer = writer
self._max_index = max_index
self._index = 1
@property
def next_filename(self):
index = str(self._index)
if self._max_index:
index = "{:0{}d}".format(self._index, len(str(self._max_index)))
return self._pattern.format(self._index, index=index)
def _save(self, filename: str, image: np.ndarray):
self._writer(filename, image)
def save(self, image: np.ndarray):
self._save(self.next_filename, image)
self._index += 1
def finish(self):
pass
class MultiprocessingImageSequenceWriter(ImageSequenceWriter):
"""Image sequence writer that uses multiprocessing to save several images in
parallel.
This falls apart for large objects, as multiprocessing pickles them and pipes them
into the subprocesses.
"""
def __init__(self, *args, max_workers=None, max_waiting=None, **kwargs):
super().__init__(*args, **kwargs)
if max_workers is None:
max_workers = multiprocessing.cpu_count() - 1
ctx = multiprocessing.get_context("spawn")
self._pool = ctx.Pool(max_workers)
if max_waiting is not None:
# Semaphore's value is number of slots available for tasks to wait in
self._sem = ctx.Semaphore(
max_waiting
) # type: Optional[multiprocessing.synchronize.Semaphore]
else:
self._sem = None
self._results = [] # type: List[multiprocessing.pool.AsyncResult]
def __del__(self):
self.terminate()
def _save(self, filename: str, image: np.ndarray):
# Limit number of waiting tasks
if self._sem:
self._sem.acquire()
def callback(v):
assert self._sem is not None
self._sem.release()
else:
callback = None # type: ignore
args = (self._writer, (filename, image))
if dill_for_apply:
# Use dill instead of pickle, and make sure writer returns the filename
_writer = self._writer # Exclude self from capture to avoid dilling _pool
args = dill_for_apply(lambda f, i: _writer(f, i) or f, filename, image)
result = self._pool.apply_async(
*args, callback=callback, error_callback=callback,
)
self._results.append(result)
def terminate(self):
self._pool.terminate()
self._pool.join()
def finish(self, result_handler=None):
try:
# self._pool.close()
for result in self._results:
filename = result.get()
if result_handler is not None:
result_handler(filename)
self._pool.close()
except KeyboardInterrupt:
self._pool.terminate()
finally:
self._pool.join()
| 3.046875 | 3 |
377_combination_sum_iv.py | gengwg/leetcode | 2 | 6003 | # 377 Combination Sum IV
# Given an integer array with all positive numbers and no duplicates,
# find the number of possible combinations that add up to a positive integer target.
#
# Example:
#
# nums = [1, 2, 3]
# target = 4
#
# The possible combination ways are:
# (1, 1, 1, 1)
# (1, 1, 2)
# (1, 2, 1)
# (1, 3)
# (2, 1, 1)
# (2, 2)
# (3, 1)
#
# Note that different sequences are counted as different combinations.
#
# Therefore the output is 7.
#
# Follow up:
# What if negative numbers are allowed in the given array?
# How does it change the problem?
# What limitation we need to add to the question to allow negative numbers?
class Solution:
def combinationSum4(self, nums, target):
"""
:type nums: List[int]
:type target: int
:rtype: int
"""
nums.sort()
res = [0] * (target + 1)
for i in range(1, len(res)):
for num in nums:
if num > i:
break
elif num == i:
res[i] += 1
else:
res[i] += res[i-num]
return res[target]
# https://www.hrwhisper.me/leetcode-combination-sum-iv/
# dp[i] += dp[i-num]
def combinationSum4(self, nums, target):
dp = [1] + [0] * target
for i in range(1, target+1):
for num in nums:
if i >= num:
dp[i] += dp[i-num]
return dp[target]
print(Solution().combinationSum4([1, 2, 3], 4))
| 3.546875 | 4 |
nvidia-texture-tools/conanfile.py | koeleck/conan-packages | 0 | 6004 | <reponame>koeleck/conan-packages<gh_stars>0
from conans import ConanFile, CMake, tools
import os
STATIC_LIBS = ["nvtt", "squish", "rg_etc1", "nvimage", "bc6h", "posh",
"bc7", "nvmath", "nvthread", "nvcore"]
SHARED_LIBS = ["nvtt", "nvimage", "nvthread", "nvmath", "nvcore"]
class NvidiatexturetoolsConan(ConanFile):
name = "nvidia-texture-tools"
version = "662d223626185f7c6c7e0d822a4796a691acc05a"
license = "MIT"
author = "koeleck"
url = "<Package recipe repository url here, for issues about the package>"
description = "The NVIDIA Texture Tools is a collection of image processing and texture manipulation tools, designed to be integrated in game tools and asset processing pipelines."
settings = "os", "compiler", "build_type", "arch"
source_subfolder = "nvtt"
no_copy_source = True
options = {"shared": [True, False],
"fPIC": [True, False],
"use_OpenMP": [True, False]
}
default_options = "shared=False", "fPIC=True", "use_OpenMP=True"
generators = "cmake"
def config_options(self):
if self.settings.os == "Windows":
del self.options.fPIC
def source(self):
url = "https://github.com/castano/nvidia-texture-tools/archive/{}.zip".format(self.version)
tools.get(url)
os.rename('nvidia-texture-tools-{}'.format(self.version), self.source_subfolder)
tools.replace_in_file(os.path.join(self.source_subfolder, "CMakeLists.txt"), "PROJECT(NV)",
'''PROJECT(NV)
include(${CMAKE_BINARY_DIR}/conanbuildinfo.cmake)
conan_basic_setup()''')
def build(self):
cmake = CMake(self)
cmake.definitions["HAVE_CUDA"] = False
cmake.definitions["HAVE_OPENMP"] = self.options.use_OpenMP
cmake.configure(source_folder=self.source_subfolder)
cmake.build()
def package(self):
self.copy("license*", src=self.source_subfolder, ignore_case=True, keep_path=False)
self.copy("nvtt.h", dst="include/nvtt", src=os.path.join(self.source_subfolder, "src", "nvtt"), keep_path=False)
self.copy("nvtt_wrapper.h", dst="include/nvtt", src=os.path.join(self.source_subfolder, "src", "nvtt"), keep_path=False)
if self.options.shared:
for libname in SHARED_LIBS:
self.copy("*{}*.dll".format(libname), dst="bin", src=os.path.join(self.build_folder, "bin"), keep_path=False)
self.copy("*{}*.lib".format(libname), dst="lib", src=os.path.join(self.build_folder, "lib"), keep_path=False)
self.copy("*{}*.so*".format(libname), dst="lib", src=os.path.join(self.build_folder, "lib"), keep_path=False)
else:
for libname in STATIC_LIBS:
self.copy("*{}*.a".format(libname), dst="lib", src=os.path.join(self.build_folder, "lib"), keep_path=False)
self.copy("*{}*.lib".format(libname), dst="lib", src=os.path.join(self.build_folder, "lib"), keep_path=False)
def package_info(self):
all_libs = tools.collect_libs(self)
if self.options.shared:
libs = all_libs
else:
libs = []
for libname in STATIC_LIBS:
libs += [lib for lib in all_libs if libname in lib]
self.cpp_info.libs = libs
if self.settings.os == "Linux":
self.cpp_info.libs.extend(["dl", "pthread"])
if self.options.shared:
self.cpp_info.defines = ["NVTT_SHARED=1"]
| 1.648438 | 2 |
train_args.py | MyWay/Create-Your-Own-Image-Classifier | 0 | 6005 | <reponame>MyWay/Create-Your-Own-Image-Classifier
#!/usr/bin/env python3
""" train_args.py
train_args.py command-line args.
"""
import argparse
def get_args():
"""
"""
parser = argparse.ArgumentParser(
description="This script lets you train and save your model.",
usage="python3 train.py flowers/train --gpu --learning_rate 0.001 --epochs 11 --gpu --hidden_units 500",
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument('data_directory', action="store")
parser.add_argument('--arch',
action="store",
default="alexnet",
dest='arch',
type=str,
help='Directory to save the model file.',
)
parser.add_argument('--save_dir',
action="store",
default=".",
dest='save_dir',
type=str,
help='Directory to save the model file.',
)
parser.add_argument('--save_name',
action="store",
default="checkpoint",
dest='save_name',
type=str,
help='Checkpoint filename.',
)
parser.add_argument('--categories_json',
action="store",
default="cat_to_name.json",
dest='categories_json',
type=str,
help='Path to file containing the categories.',
)
parser.add_argument('--gpu',
action="store_true",
dest="use_gpu",
default=False,
help='Use the GPU to train instead of the CPU')
hp = parser.add_argument_group('hyperparameters')
hp.add_argument('--learning_rate',
action="store",
default=0.001,
type=float,
help='Learning rate')
hp.add_argument('--hidden_units', '-hu',
action="store",
dest="hidden_units",
default=[4096],
type=int,
nargs='+',
help='Hidden layer units')
hp.add_argument('--epochs',
action="store",
dest="epochs",
default=1,
type=int,
help='Epochs to train the model for')
parser.parse_args()
return parser
def main():
"""
Main Function
"""
print(f'Command line argument utility for train.py.\nTry "python train.py -h".')
if __name__ == '__main__':
main()
"""
main() is called if script is executed on it's own.
""" | 3.015625 | 3 |
apps/payment/views.py | canadiyaman/thetask | 0 | 6006 | <reponame>canadiyaman/thetask<filename>apps/payment/views.py<gh_stars>0
from django.http import HttpResponseRedirect
from django.conf import settings
from django.views.generic import TemplateView
from apps.payment.models import PaymentLog
from apps.payment.stripe import get_token, get_payment_charge
from apps.subscription.views import start_subscription
class ChargeView(TemplateView):
template_name = 'payment/charge.html'
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['stripe_public_key'] = settings.STRIPE_PUBLISHABLE_KEY
context['amount'] = 100
context['currency'] = 'tl'
return context
def post(self, request):
name = request.POST.get('name')
card_number = request.POST.get('cardnumber')
exp_month = int(request.POST.get('exp-date').split('/')[0])
exp_year = int(request.POST.get('exp-date').split('/')[1])
cvc = request.POST.get('cvc')
card = {
"name": name,
"number": card_number,
"exp_month": exp_month,
"exp_year": exp_year,
"cvc": cvc
}
token = get_token(card)
charge = get_payment_charge(amount=100, currency="usd", description="test", token=token.stripe_id)
if charge.paid:
log_payment(user=request.user, data=charge)
start_subscription(request.user)
return HttpResponseRedirect('/')
def log_payment(user, data):
PaymentLog.objects.create(user=user, data=data)
| 2 | 2 |
users/apps.py | srinidhibhat/booknotes | 0 | 6007 | from django.apps import AppConfig
class UsersConfig(AppConfig):
name = 'users'
# below piece of code is needed for automatic profile creation for user
def ready(self):
import users.signals
| 1.921875 | 2 |
secure_data_store/cli.py | HumanBrainProject/secure-data-store | 1 | 6008 | <reponame>HumanBrainProject/secure-data-store
# -*- coding: utf-8 -*-
"""Console script for secure_data_store."""
import click
from . import secure_data_store as sds
CONFIG='~/.sdsrc'
@click.group()
def main():
"""Wrapper for GoCryptFS"""
@main.command()
@click.argument('name')
@click.option('--config', help='Path to config file', default='~/.sdsrc')
def create(name, config=None):
"""Create a new secure data container NAME."""
try:
config = sds.read_config(config)
sds.create(config, name)
except (sds.ContainerError, sds.GCFSError, FileExistsError, sds.ConfigError) as err:
print(err)
@main.command()
@click.argument('name')
@click.option('--config', help='Path to config file', default='~/.sdsrc')
def open(name, config=None):
"""Open an existing secure data container NAME.
Will print path to the opened, clear-text container."""
try:
config = sds.read_config(config)
sds.mount(config, name)
except (sds.ContainerError, sds.GCFSError, sds.ConfigError, sds.MountError) as err:
print(err)
@main.command()
@click.argument('name')
@click.option('--config', help='Path to config file', default='~/.sdsrc')
def close(name, config=None):
"""Close an opend data container NAME."""
try:
config = sds.read_config(config)
sds.unmount(config, name)
except (sds.ContainerError, sds.GCFSError, sds.ConfigError) as err:
print(err)
main()
| 2.234375 | 2 |
Support/Fuego/Pythia/pythia-0.4/packages/pyre/pyre/graph/Node.py | marient/PelePhysics | 1 | 6009 | #!/usr/bin/env python
#
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# <NAME>
# California Institute of Technology
# (C) 1998-2003 All Rights Reserved
#
# <LicenseText>
#
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
from Drawable import Drawable
def nodeAttributes():
"""return a list of valid attributes for Node"""
return Node._validAttributes.keys()
class Node(Drawable):
def id(self): return self._id
def __init__(self, id):
Drawable.__init__(self)
self._id = id
return
_validAttributes = {
"color" : None,
"fontcolor" : None,
"fontname" : None,
"fontsize" : None,
"height" : None,
"label" : None,
"layer" : None,
"shape" : None,
"shapefile" : None,
"style" : None,
"width" : None
}
# version
__id__ = "$Id$"
#
# End of file
| 2.28125 | 2 |
cairis/gui/RiskScatterPanel.py | RachelLar/cairis_update | 0 | 6010 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import os
import pprint
import random
import wx
from cairis.core.armid import *
from cairis.core.Borg import Borg
import matplotlib
matplotlib.use('WXAgg')
from matplotlib.figure import Figure
from matplotlib.backends.backend_wxagg import \
FigureCanvasWxAgg as FigCanvas, \
NavigationToolbar2WxAgg as NavigationToolbar
def riskColourCode(riskScore):
if (riskScore <= 1):
return '#fef2ec'
elif (riskScore == 2):
return '#fcd9c8'
elif (riskScore == 3):
return '#f7ac91'
elif (riskScore == 4):
return '#f67e61'
elif (riskScore == 5):
return '#f2543d'
elif (riskScore == 6):
return '#e42626'
elif (riskScore == 7):
return '#b9051a'
elif (riskScore == 8):
return '#900014'
else:
return '#52000D'
class RiskScatterPanel(wx.Panel):
def __init__(self,parent):
wx.Panel.__init__(self,parent,RISKSCATTER_ID)
b = Borg()
self.dbProxy = b.dbProxy
self.dpi = 100
self.fig = Figure((5.0, 4.0), dpi=self.dpi)
self.canvas = FigCanvas(self, -1, self.fig)
self.axes = self.fig.add_subplot(111,xlabel='Severity',ylabel='Likelihood',autoscale_on=False)
self.axes.set_xticklabels(['Marginal','Critical','Catastrophic'])
self.axes.set_yticks([0,1,2,3,4,5])
self.toolbar = NavigationToolbar(self.canvas)
envs = self.dbProxy.getDimensionNames('environment')
self.envCombo = wx.ComboBox(self,RISKSCATTER_COMBOENVIRONMENT_ID,envs[0],choices=envs,size=(300,-1),style=wx.CB_DROPDOWN)
self.envCombo.Bind(wx.EVT_COMBOBOX,self.onEnvironmentChange)
self.vbox = wx.BoxSizer(wx.VERTICAL)
self.vbox.Add(self.toolbar, 0, wx.EXPAND)
self.vbox.Add(self.envCombo,0, wx.EXPAND)
self.vbox.Add(self.canvas, 1, wx.LEFT | wx.TOP | wx.GROW)
self.SetSizer(self.vbox)
self.vbox.Fit(self)
self.drawScatter(envs[0])
def drawScatter(self,envName):
self.axes.clear()
self.axes.grid(True)
self.axes.set_xlabel('Severity')
self.axes.set_ylabel('Likelihood')
self.axes.set_xbound(0,4)
self.axes.set_ybound(0,5)
xs,ys,cs = self.dbProxy.riskScatter(envName)
ccs = []
for c in cs:
ccs.append(riskColourCode(c))
if ((len(xs) > 0) and (len(ys) > 0)):
self.axes.scatter(xs,ys,c=ccs,marker='d')
self.canvas.draw()
def onEnvironmentChange(self,evt):
envName = self.envCombo.GetStringSelection()
self.drawScatter(envName)
def on_save_plot(self, event):
fileChoices = "PNG (*.png)|*.png"
dlg = wx.FileDialog(self,message="Save risk scatter",defaultDir=os.getcwd(),defaultFile="scatter.png",wildcard=fileChoices,style=wx.SAVE)
if dlg.ShowModal() == wx.ID_OK:
path = dlg.GetPath()
self.canvas.print_figure(path, dpi=self.dpi)
| 1.695313 | 2 |
sdk/python/pulumi_azure/containerservice/get_registry.py | aangelisc/pulumi-azure | 0 | 6011 | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = [
'GetRegistryResult',
'AwaitableGetRegistryResult',
'get_registry',
]
@pulumi.output_type
class GetRegistryResult:
"""
A collection of values returned by getRegistry.
"""
def __init__(__self__, admin_enabled=None, admin_password=None, admin_username=None, id=None, location=None, login_server=None, name=None, resource_group_name=None, sku=None, storage_account_id=None, tags=None):
if admin_enabled and not isinstance(admin_enabled, bool):
raise TypeError("Expected argument 'admin_enabled' to be a bool")
pulumi.set(__self__, "admin_enabled", admin_enabled)
if admin_password and not isinstance(admin_password, str):
raise TypeError("Expected argument 'admin_password' to be a str")
pulumi.set(__self__, "admin_password", <PASSWORD>)
if admin_username and not isinstance(admin_username, str):
raise TypeError("Expected argument 'admin_username' to be a str")
pulumi.set(__self__, "admin_username", admin_username)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if location and not isinstance(location, str):
raise TypeError("Expected argument 'location' to be a str")
pulumi.set(__self__, "location", location)
if login_server and not isinstance(login_server, str):
raise TypeError("Expected argument 'login_server' to be a str")
pulumi.set(__self__, "login_server", login_server)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if resource_group_name and not isinstance(resource_group_name, str):
raise TypeError("Expected argument 'resource_group_name' to be a str")
pulumi.set(__self__, "resource_group_name", resource_group_name)
if sku and not isinstance(sku, str):
raise TypeError("Expected argument 'sku' to be a str")
pulumi.set(__self__, "sku", sku)
if storage_account_id and not isinstance(storage_account_id, str):
raise TypeError("Expected argument 'storage_account_id' to be a str")
pulumi.set(__self__, "storage_account_id", storage_account_id)
if tags and not isinstance(tags, dict):
raise TypeError("Expected argument 'tags' to be a dict")
pulumi.set(__self__, "tags", tags)
@property
@pulumi.getter(name="adminEnabled")
def admin_enabled(self) -> bool:
"""
Is the Administrator account enabled for this Container Registry.
"""
return pulumi.get(self, "admin_enabled")
@property
@pulumi.getter(name="adminPassword")
def admin_password(self) -> str:
"""
The Password associated with the Container Registry Admin account - if the admin account is enabled.
"""
return pulumi.get(self, "admin_password")
@property
@pulumi.getter(name="adminUsername")
def admin_username(self) -> str:
"""
The Username associated with the Container Registry Admin account - if the admin account is enabled.
"""
return pulumi.get(self, "admin_username")
@property
@pulumi.getter
def id(self) -> str:
"""
The provider-assigned unique ID for this managed resource.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def location(self) -> str:
"""
The Azure Region in which this Container Registry exists.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter(name="loginServer")
def login_server(self) -> str:
"""
The URL that can be used to log into the container registry.
"""
return pulumi.get(self, "login_server")
@property
@pulumi.getter
def name(self) -> str:
return pulumi.get(self, "name")
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> str:
return pulumi.get(self, "resource_group_name")
@property
@pulumi.getter
def sku(self) -> str:
"""
The SKU of this Container Registry, such as `Basic`.
"""
return pulumi.get(self, "sku")
@property
@pulumi.getter(name="storageAccountId")
def storage_account_id(self) -> str:
"""
The ID of the Storage Account used for this Container Registry. This is only returned for `Classic` SKU's.
"""
return pulumi.get(self, "storage_account_id")
@property
@pulumi.getter
def tags(self) -> Mapping[str, str]:
"""
A map of tags assigned to the Container Registry.
"""
return pulumi.get(self, "tags")
class AwaitableGetRegistryResult(GetRegistryResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetRegistryResult(
admin_enabled=self.admin_enabled,
admin_password=<PASSWORD>,
admin_username=self.admin_username,
id=self.id,
location=self.location,
login_server=self.login_server,
name=self.name,
resource_group_name=self.resource_group_name,
sku=self.sku,
storage_account_id=self.storage_account_id,
tags=self.tags)
def get_registry(name: Optional[str] = None,
resource_group_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetRegistryResult:
"""
Use this data source to access information about an existing Container Registry.
## Example Usage
```python
import pulumi
import pulumi_azure as azure
example = azure.containerservice.get_registry(name="testacr",
resource_group_name="test")
pulumi.export("loginServer", example.login_server)
```
:param str name: The name of the Container Registry.
:param str resource_group_name: The Name of the Resource Group where this Container Registry exists.
"""
__args__ = dict()
__args__['name'] = name
__args__['resourceGroupName'] = resource_group_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure:containerservice/getRegistry:getRegistry', __args__, opts=opts, typ=GetRegistryResult).value
return AwaitableGetRegistryResult(
admin_enabled=__ret__.admin_enabled,
admin_password=__ret__.admin_password,
admin_username=__ret__.admin_username,
id=__ret__.id,
location=__ret__.location,
login_server=__ret__.login_server,
name=__ret__.name,
resource_group_name=__ret__.resource_group_name,
sku=__ret__.sku,
storage_account_id=__ret__.storage_account_id,
tags=__ret__.tags)
| 1.867188 | 2 |
contrib/memcache_whisper.py | TimWhalen/graphite-web | 1 | 6012 | <filename>contrib/memcache_whisper.py
#!/usr/bin/env python
# Copyright 2008 Orbitz WorldWide
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
# This module is an implementation of the Whisper database API
# Here is the basic layout of a whisper data file
#
# File = Header,Data
# Header = Metadata,ArchiveInfo+
# Metadata = lastUpdate,maxRetention,xFilesFactor,archiveCount
# ArchiveInfo = Offset,SecondsPerPoint,Points
# Data = Archive+
# Archive = Point+
# Point = timestamp,value
"""
NOTE: This is a modified version of whisper.py
For details on the modification, read https://bugs.launchpad.net/graphite/+bug/245835
"""
import os, struct, time
try:
import fcntl
CAN_LOCK = True
except ImportError:
CAN_LOCK = False
LOCK = False
CACHE_HEADERS = False
__headerCache = {}
longFormat = "!L"
longSize = struct.calcsize(longFormat)
floatFormat = "!f"
floatSize = struct.calcsize(floatFormat)
timestampFormat = "!L"
timestampSize = struct.calcsize(timestampFormat)
valueFormat = "!d"
valueSize = struct.calcsize(valueFormat)
pointFormat = "!Ld"
pointSize = struct.calcsize(pointFormat)
metadataFormat = "!2LfL"
metadataSize = struct.calcsize(metadataFormat)
archiveInfoFormat = "!3L"
archiveInfoSize = struct.calcsize(archiveInfoFormat)
debug = startBlock = endBlock = lambda *a,**k: None
def exists(path):
return os.path.exists(path)
def drop(path):
os.remove(path)
def enableMemcache(servers = ['127.0.0.1:11211'], min_compress_len = 0):
from StringIO import StringIO
import memcache
global open, exists, drop
MC = memcache.Client(servers)
class open(StringIO):
def __init__(self,*args,**kwargs):
self.name = args[0]
self.mode = args[1]
if self.mode == "r+b" or self.mode == "rb":
StringIO.__init__(self, MC.get(self.name))
else:
StringIO.__init__(self)
def close(self):
if self.mode == "r+b" or self.mode == "wb":
MC.set(self.name, self.getvalue(), min_compress_len = min_compress_len)
StringIO.close(self)
def exists(path):
return MC.get(path) != None
def drop(path):
MC.delete(path)
def enableDebug():
global open, debug, startBlock, endBlock
class open(file):
def __init__(self,*args,**kwargs):
file.__init__(self,*args,**kwargs)
self.writeCount = 0
self.readCount = 0
def write(self,data):
self.writeCount += 1
debug('WRITE %d bytes #%d' % (len(data),self.writeCount))
return file.write(self,data)
def read(self,bytes):
self.readCount += 1
debug('READ %d bytes #%d' % (bytes,self.readCount))
return file.read(self,bytes)
def debug(message):
print('DEBUG :: %s' % message)
__timingBlocks = {}
def startBlock(name):
__timingBlocks[name] = time.time()
def endBlock(name):
debug("%s took %.5f seconds" % (name,time.time() - __timingBlocks.pop(name)))
def __readHeader(fh):
info = __headerCache.get(fh.name)
if info: return info
#startBlock('__readHeader')
originalOffset = fh.tell()
fh.seek(0)
packedMetadata = fh.read(metadataSize)
(lastUpdate,maxRetention,xff,archiveCount) = struct.unpack(metadataFormat,packedMetadata)
archives = []
for i in xrange(archiveCount):
packedArchiveInfo = fh.read(archiveInfoSize)
(offset,secondsPerPoint,points) = struct.unpack(archiveInfoFormat,packedArchiveInfo)
archiveInfo = {
'offset' : offset,
'secondsPerPoint' : secondsPerPoint,
'points' : points,
'retention' : secondsPerPoint * points,
'size' : points * pointSize,
}
archives.append(archiveInfo)
fh.seek(originalOffset)
info = {
'lastUpdate' : lastUpdate,
'maxRetention' : maxRetention,
'xFilesFactor' : xff,
'archives' : archives,
}
if CACHE_HEADERS:
__headerCache[fh.name] = info
#endBlock('__readHeader')
return info
def __changeLastUpdate(fh):
return #XXX Make this a NOP, use os.stat(filename).st_mtime instead
startBlock('__changeLastUpdate()')
originalOffset = fh.tell()
fh.seek(0) #Based on assumption that first field is lastUpdate
now = int( time.time() )
packedTime = struct.pack(timestampFormat,now)
fh.write(packedTime)
fh.seek(originalOffset)
endBlock('__changeLastUpdate()')
def create(path,archiveList,xFilesFactor=0.5):
"""create(path,archiveList,xFilesFactor=0.5)
path is a string
archiveList is a list of archives, each of which is of the form (secondsPerPoint,numberOfPoints)
xFilesFactor specifies the fraction of data points in a propagation interval that must have known values for a propagation to occur
"""
#Validate archive configurations...
assert archiveList, "You must specify at least one archive configuration!"
archiveList.sort(key=lambda a: a[0]) #sort by precision (secondsPerPoint)
for i,archive in enumerate(archiveList):
if i == len(archiveList) - 1: break
next = archiveList[i+1]
assert archive[0] < next[0],\
"You cannot configure two archives with the same precision %s,%s" % (archive,next)
assert (next[0] % archive[0]) == 0,\
"Higher precision archives' precision must evenly divide all lower precision archives' precision %s,%s" % (archive[0],next[0])
retention = archive[0] * archive[1]
nextRetention = next[0] * next[1]
assert nextRetention > retention,\
"Lower precision archives must cover larger time intervals than higher precision archives %s,%s" % (archive,next)
#Looks good, now we create the file and write the header
assert not exists(path), "File %s already exists!" % path
fh = open(path,'wb')
if LOCK: fcntl.flock( fh.fileno(), fcntl.LOCK_EX )
lastUpdate = struct.pack( timestampFormat, int(time.time()) )
oldest = sorted([secondsPerPoint * points for secondsPerPoint,points in archiveList])[-1]
maxRetention = struct.pack( longFormat, oldest )
xFilesFactor = struct.pack( floatFormat, float(xFilesFactor) )
archiveCount = struct.pack(longFormat, len(archiveList))
packedMetadata = lastUpdate + maxRetention + xFilesFactor + archiveCount
fh.write(packedMetadata)
headerSize = metadataSize + (archiveInfoSize * len(archiveList))
archiveOffsetPointer = headerSize
for secondsPerPoint,points in archiveList:
archiveInfo = struct.pack(archiveInfoFormat, archiveOffsetPointer, secondsPerPoint, points)
fh.write(archiveInfo)
archiveOffsetPointer += (points * pointSize)
zeroes = '\x00' * (archiveOffsetPointer - headerSize)
fh.write(zeroes)
fh.close()
def __propagate(fh,timestamp,xff,higher,lower):
lowerIntervalStart = timestamp - (timestamp % lower['secondsPerPoint'])
lowerIntervalEnd = lowerIntervalStart + lower['secondsPerPoint']
fh.seek(higher['offset'])
packedPoint = fh.read(pointSize)
(higherBaseInterval,higherBaseValue) = struct.unpack(pointFormat,packedPoint)
if higherBaseInterval == 0:
higherFirstOffset = higher['offset']
else:
timeDistance = lowerIntervalStart - higherBaseInterval
pointDistance = timeDistance / higher['secondsPerPoint']
byteDistance = pointDistance * pointSize
higherFirstOffset = higher['offset'] + (byteDistance % higher['size'])
higherPoints = lower['secondsPerPoint'] / higher['secondsPerPoint']
higherSize = higherPoints * pointSize
higherLastOffset = higherFirstOffset + (higherSize % higher['size'])
fh.seek(higherFirstOffset)
if higherFirstOffset < higherLastOffset: #we don't wrap the archive
seriesString = fh.read(higherLastOffset - higherFirstOffset)
else: #We do wrap the archive
higherEnd = higher['offset'] + higher['size']
seriesString = fh.read(higherEnd - higherFirstOffset)
fh.seek(higher['offset'])
seriesString += fh.read(higherLastOffset - higher['offset'])
#Now we unpack the series data we just read
byteOrder,pointTypes = pointFormat[0],pointFormat[1:]
points = len(seriesString) / pointSize
seriesFormat = byteOrder + (pointTypes * points)
unpackedSeries = struct.unpack(seriesFormat, seriesString)
#And finally we construct a list of values
neighborValues = [None] * points
currentInterval = lowerIntervalStart
step = higher['secondsPerPoint']
for i in xrange(0,len(unpackedSeries),2):
pointTime = unpackedSeries[i]
if pointTime == currentInterval:
neighborValues[i/2] = unpackedSeries[i+1]
currentInterval += step
#Propagate aggregateValue to propagate from neighborValues if we have enough known points
knownValues = [v for v in neighborValues if v is not None]
knownPercent = float(len(knownValues)) / float(len(neighborValues))
if knownPercent >= xff: #we have enough data to propagate a value!
aggregateValue = float(sum(knownValues)) / float(len(knownValues)) #TODO another CF besides average?
myPackedPoint = struct.pack(pointFormat,lowerIntervalStart,aggregateValue)
fh.seek(lower['offset'])
packedPoint = fh.read(pointSize)
(lowerBaseInterval,lowerBaseValue) = struct.unpack(pointFormat,packedPoint)
if lowerBaseInterval == 0: #First propagated update to this lower archive
fh.seek(lower['offset'])
fh.write(myPackedPoint)
else: #Not our first propagated update to this lower archive
timeDistance = lowerIntervalStart - lowerBaseInterval
pointDistance = timeDistance / lower['secondsPerPoint']
byteDistance = pointDistance * pointSize
lowerOffset = lower['offset'] + (byteDistance % lower['size'])
fh.seek(lowerOffset)
fh.write(myPackedPoint)
return True
else:
return False
def update(path,value,timestamp=None):
"""update(path,value,timestamp=None)
path is a string
value is a float
timestamp is either an int or float
"""
#startBlock('complete update')
value = float(value)
fh = open(path,'r+b')
if LOCK: fcntl.flock( fh.fileno(), fcntl.LOCK_EX )
header = __readHeader(fh)
now = int( time.time() )
if timestamp is None: timestamp = now
timestamp = int(timestamp)
diff = now - timestamp
assert diff < header['maxRetention'] and diff >= 0, "Timestamp not covered by any archives in this database"
for i,archive in enumerate(header['archives']): #Find the highest-precision archive that covers timestamp
if archive['retention'] < diff: continue
lowerArchives = header['archives'][i+1:] #We'll pass on the update to these lower precision archives later
break
#First we update the highest-precision archive
myInterval = timestamp - (timestamp % archive['secondsPerPoint'])
myPackedPoint = struct.pack(pointFormat,myInterval,value)
fh.seek(archive['offset'])
packedPoint = fh.read(pointSize)
(baseInterval,baseValue) = struct.unpack(pointFormat,packedPoint)
if baseInterval == 0: #This file's first update
fh.seek(archive['offset'])
fh.write(myPackedPoint)
baseInterval,baseValue = myInterval,value
else: #Not our first update
timeDistance = myInterval - baseInterval
pointDistance = timeDistance / archive['secondsPerPoint']
byteDistance = pointDistance * pointSize
myOffset = archive['offset'] + (byteDistance % archive['size'])
fh.seek(myOffset)
fh.write(myPackedPoint)
#Now we propagate the update to lower-precision archives
#startBlock('update propagation')
higher = archive
for lower in lowerArchives:
if not __propagate(fh,myInterval,header['xFilesFactor'],higher,lower): break
higher = lower
#endBlock('update propagation')
__changeLastUpdate(fh)
fh.close()
#endBlock('complete update')
def update_many(path,points):
"""update_many(path,points)
path is a string
points is a list of (timestamp,value) points
"""
#startBlock('complete update_many path=%s points=%d' % (path,len(points)))
if not points: return
points = [ (int(t),float(v)) for (t,v) in points]
points.sort(key=lambda p: p[0],reverse=True) #order points by timestamp, newest first
fh = open(path,'r+b')
if LOCK: fcntl.flock( fh.fileno(), fcntl.LOCK_EX )
header = __readHeader(fh)
now = int( time.time() )
archives = iter( header['archives'] )
currentArchive = next(archives)
#debug(' update_many currentArchive=%s' % str(currentArchive))
currentPoints = []
for point in points:
age = now - point[0]
#debug(' update_many iterating points, point=%s age=%d' % (str(point),age))
while currentArchive['retention'] < age: #we can't fit any more points in this archive
#debug(' update_many this point is too old to fit here, currentPoints=%d' % len(currentPoints))
if currentPoints: #commit all the points we've found that it can fit
currentPoints.reverse() #put points in chronological order
__archive_update_many(fh,header,currentArchive,currentPoints)
currentPoints = []
try:
currentArchive = next(archives)
#debug(' update_many using next archive %s' % str(currentArchive))
except StopIteration:
#debug(' update_many no more archives!')
currentArchive = None
break
if not currentArchive: break #drop remaining points that don't fit in the database
#debug(' update_many adding point=%s' % str(point))
currentPoints.append(point)
#debug(' update_many done iterating points')
if currentArchive and currentPoints: #don't forget to commit after we've checked all the archives
currentPoints.reverse()
__archive_update_many(fh,header,currentArchive,currentPoints)
__changeLastUpdate(fh)
fh.close()
#endBlock('complete update_many path=%s points=%d' % (path,len(points)))
def __archive_update_many(fh,header,archive,points):
step = archive['secondsPerPoint']
#startBlock('__archive_update_many file=%s archive=%s points=%d' % (fh.name,step,len(points)))
alignedPoints = [ (timestamp - (timestamp % step), value)
for (timestamp,value) in points ]
#Create a packed string for each contiguous sequence of points
#startBlock('__archive_update_many string packing')
packedStrings = []
previousInterval = None
currentString = ""
for (interval,value) in alignedPoints:
#debug('__archive_update_many iterating alignedPoint at %s' % interval)
if (not previousInterval) or (interval == previousInterval + step):
#debug('__archive_update_many was expected, packing onto currentString')
currentString += struct.pack(pointFormat,interval,value)
previousInterval = interval
else:
numberOfPoints = len(currentString) / pointSize
startInterval = previousInterval - (step * (numberOfPoints-1))
#debug('__archive_update_many was NOT expected, appending to packedStrings startInterval=%s currentString=%d bytes' % (startInterval,len(currentString)))
packedStrings.append( (startInterval,currentString) )
currentString = struct.pack(pointFormat,interval,value)
previousInterval = interval
if currentString:
#startInterval = previousInterval - (step * len(currentString) / pointSize) + step
numberOfPoints = len(currentString) / pointSize
startInterval = previousInterval - (step * (numberOfPoints-1))
#debug('__archive_update_many done iterating alignedPoints, remainder currentString of %d bytes, startInterval=%s' % (len(currentString),startInterval))
packedStrings.append( (startInterval,currentString) )
#endBlock('__archive_update_many string packing')
#Read base point and determine where our writes will start
fh.seek(archive['offset'])
packedBasePoint = fh.read(pointSize)
(baseInterval,baseValue) = struct.unpack(pointFormat,packedBasePoint)
if baseInterval == 0: #This file's first update
#debug('__archive_update_many first update')
baseInterval = packedStrings[0][0] #use our first string as the base, so we start at the start
#debug('__archive_update_many baseInterval is %s' % baseInterval)
#Write all of our packed strings in locations determined by the baseInterval
#startBlock('__archive_update_many write() operations')
for (interval,packedString) in packedStrings:
timeDistance = interval - baseInterval
pointDistance = timeDistance / step
byteDistance = pointDistance * pointSize
myOffset = archive['offset'] + (byteDistance % archive['size'])
fh.seek(myOffset)
archiveEnd = archive['offset'] + archive['size']
bytesBeyond = (myOffset + len(packedString)) - archiveEnd
#debug(' __archive_update_many myOffset=%d packedString=%d archiveEnd=%d bytesBeyond=%d' % (myOffset,len(packedString),archiveEnd,bytesBeyond))
if bytesBeyond > 0:
fh.write( packedString[:-bytesBeyond] )
#debug('We wrapped an archive!')
assert fh.tell() == archiveEnd, "archiveEnd=%d fh.tell=%d bytesBeyond=%d len(packedString)=%d" % (archiveEnd,fh.tell(),bytesBeyond,len(packedString))
fh.seek( archive['offset'] )
fh.write( packedString[-bytesBeyond:] ) #safe because it can't exceed the archive (retention checking logic above)
else:
fh.write(packedString)
#endBlock('__archive_update_many write() operations')
#Now we propagate the updates to lower-precision archives
#startBlock('__archive_update_many propagation')
higher = archive
lowerArchives = [arc for arc in header['archives'] if arc['secondsPerPoint'] > archive['secondsPerPoint']]
#debug('__archive_update_many I have %d lower archives' % len(lowerArchives))
for lower in lowerArchives:
fit = lambda i: i - (i % lower['secondsPerPoint'])
lowerIntervals = [fit(p[0]) for p in alignedPoints]
uniqueLowerIntervals = set(lowerIntervals)
#debug(' __archive_update_many points=%d unique=%d' % (len(alignedPoints),len(uniqueLowerIntervals)))
propagateFurther = False
for interval in uniqueLowerIntervals:
#debug(' __archive_update_many propagating from %d to %d, interval=%d' % (higher['secondsPerPoint'],lower['secondsPerPoint'],interval))
if __propagate(fh,interval,header['xFilesFactor'],higher,lower):
propagateFurther = True
#debug(' __archive_update_many Successful propagation!')
#debug(' __archive_update_many propagateFurther=%s' % propagateFurther)
if not propagateFurther: break
higher = lower
#endBlock('__archive_update_many propagation')
#endBlock('__archive_update_many file=%s archive=%s points=%d' % (fh.name,step,len(points)))
def info(path):
"""info(path)
path is a string
"""
fh = open(path,'rb')
info = __readHeader(fh)
fh.close()
return info
def fetch(path,fromTime,untilTime=None):
"""fetch(path,fromTime,untilTime=None)
path is a string
fromTime is an epoch time
untilTime is also an epoch time, but defaults to now
"""
fh = open(path,'rb')
header = __readHeader(fh)
now = int( time.time() )
if untilTime is None or untilTime > now:
untilTime = now
if fromTime < (now - header['maxRetention']):
fromTime = now - header['maxRetention']
assert fromTime < untilTime, "Invalid time interval"
diff = now - fromTime
for archive in header['archives']:
if archive['retention'] >= diff: break
fromInterval = int( fromTime - (fromTime % archive['secondsPerPoint']) )
untilInterval = int( untilTime - (untilTime % archive['secondsPerPoint']) )
fh.seek(archive['offset'])
packedPoint = fh.read(pointSize)
(baseInterval,baseValue) = struct.unpack(pointFormat,packedPoint)
if baseInterval == 0:
step = archive['secondsPerPoint']
points = (untilInterval - fromInterval) / step
timeInfo = (fromInterval,untilInterval,step)
valueList = [None] * points
return (timeInfo,valueList)
#Determine fromOffset
timeDistance = fromInterval - baseInterval
pointDistance = timeDistance / archive['secondsPerPoint']
byteDistance = pointDistance * pointSize
fromOffset = archive['offset'] + (byteDistance % archive['size'])
#Determine untilOffset
timeDistance = untilInterval - baseInterval
pointDistance = timeDistance / archive['secondsPerPoint']
byteDistance = pointDistance * pointSize
untilOffset = archive['offset'] + (byteDistance % archive['size'])
#Read all the points in the interval
fh.seek(fromOffset)
if fromOffset < untilOffset: #If we don't wrap around the archive
seriesString = fh.read(untilOffset - fromOffset)
else: #We do wrap around the archive, so we need two reads
archiveEnd = archive['offset'] + archive['size']
seriesString = fh.read(archiveEnd - fromOffset)
fh.seek(archive['offset'])
seriesString += fh.read(untilOffset - archive['offset'])
#Now we unpack the series data we just read (anything faster than unpack?)
byteOrder,pointTypes = pointFormat[0],pointFormat[1:]
points = len(seriesString) / pointSize
seriesFormat = byteOrder + (pointTypes * points)
unpackedSeries = struct.unpack(seriesFormat, seriesString)
#And finally we construct a list of values (optimize this!)
valueList = [None] * points #pre-allocate entire list for speed
currentInterval = fromInterval
step = archive['secondsPerPoint']
for i in xrange(0,len(unpackedSeries),2):
pointTime = unpackedSeries[i]
if pointTime == currentInterval:
pointValue = unpackedSeries[i+1]
valueList[i/2] = pointValue #in-place reassignment is faster than append()
currentInterval += step
fh.close()
timeInfo = (fromInterval,untilInterval,step)
return (timeInfo,valueList)
| 2.015625 | 2 |
main.py | showtimesynergy/mojify | 0 | 6013 | <gh_stars>0
from PIL import Image
import csv
from ast import literal_eval as make_tuple
from math import sqrt
import argparse
import os.path
def load_img(image):
# load an image as a PIL object
im = Image.open(image).convert('RGBA')
return im
def color_distance(c_tuple1, c_tuple2):
# calculate the color distance between two rgb tuples
red_mean = (c_tuple1[0] + c_tuple2[0]) / 2
red = c_tuple1[0] - c_tuple2[0]
green = c_tuple1[1] - c_tuple2[1]
blue = c_tuple1[2] - c_tuple2[2]
delta = (2 + (red_mean / 256)) * (red ** 2)
delta += (4 * (green ** 2))
delta += (2 + ((255 - red_mean) / 256)) * (blue ** 2)
delta = sqrt(delta)
return delta
def write_out(text_matrix):
# write out emoji grid to txt file
with open('out.txt', '+w', encoding='utf-8') as out:
for line in text_matrix:
line_out = ''
for char in line:
# TODO: ZWJ support
if char is None:
line_out += '\u2001\u2006'
else:
char_code = '0x' + char
char_code = int(char_code, base=16)
line_out += chr(char_code)
out.writelines(line_out + '\n')
def gen_matrix(pix_data):
# generate unicode data from colors
pix = pix_data.load()
emoji_grid = []
for y in range(0, size[1]):
emoji_grid.append([])
for x in range(0, size[0]):
pixel = pix[x, y]
best_delta = float('Inf')
for entry in emoji_list:
emoji_color = entry[1]
if pixel[3] == 0:
best = None
else:
delta = color_distance(emoji_color, pixel)
if delta < best_delta:
best = entry[0]
best_delta = delta
emoji_grid[-1].append(best)
return emoji_grid
def handle_arguments():
parser = argparse.ArgumentParser(
description='Represent an image using emoji'
)
parser.add_argument('image', help='image to be processed')
args = parser.parse_args()
return args
if __name__ == '__main__':
args = handle_arguments()
path = args.image
emoji_list = []
with open('proc.csv') as raw_list:
emoji_list = []
reader = csv.reader(raw_list)
raw_list = list(reader)
for entry in raw_list:
emoji_list.append([entry[0], make_tuple(entry[1])])
image = load_img(path)
size = image.size
emoji_grid = gen_matrix(image)
write_out(emoji_grid)
print('Output in out.txt')
| 3.015625 | 3 |
venv/lib/python3.7/site-packages/Xlib/ext/xinput.py | umr-bot/sliding-puzzle-solver-bot | 0 | 6014 | # Xlib.ext.xinput -- XInput extension module
#
# Copyright (C) 2012 Outpost Embedded, LLC
# <NAME> <<EMAIL>>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public License
# as published by the Free Software Foundation; either version 2.1
# of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc.,
# 59 Temple Place,
# Suite 330,
# Boston, MA 02111-1307 USA
'''
A very incomplete implementation of the XInput extension.
'''
import sys
import array
import struct
# Python 2/3 compatibility.
from six import integer_types
from Xlib.protocol import rq
from Xlib import X
extname = 'XInputExtension'
PropertyDeleted = 0
PropertyCreated = 1
PropertyModified = 2
NotifyNormal = 0
NotifyGrab = 1
NotifyUngrab = 2
NotifyWhileGrabbed = 3
NotifyPassiveGrab = 4
NotifyPassiveUngrab = 5
NotifyAncestor = 0
NotifyVirtual = 1
NotifyInferior = 2
NotifyNonlinear = 3
NotifyNonlinearVirtual = 4
NotifyPointer = 5
NotifyPointerRoot = 6
NotifyDetailNone = 7
GrabtypeButton = 0
GrabtypeKeycode = 1
GrabtypeEnter = 2
GrabtypeFocusIn = 3
GrabtypeTouchBegin = 4
AnyModifier = (1 << 31)
AnyButton = 0
AnyKeycode = 0
AsyncDevice = 0
SyncDevice = 1
ReplayDevice = 2
AsyncPairedDevice = 3
AsyncPair = 4
SyncPair = 5
SlaveSwitch = 1
DeviceChange = 2
MasterAdded = (1 << 0)
MasterRemoved = (1 << 1)
SlaveAdded = (1 << 2)
SlaveRemoved = (1 << 3)
SlaveAttached = (1 << 4)
SlaveDetached = (1 << 5)
DeviceEnabled = (1 << 6)
DeviceDisabled = (1 << 7)
AddMaster = 1
RemoveMaster = 2
AttachSlave = 3
DetachSlave = 4
AttachToMaster = 1
Floating = 2
ModeRelative = 0
ModeAbsolute = 1
MasterPointer = 1
MasterKeyboard = 2
SlavePointer = 3
SlaveKeyboard = 4
FloatingSlave = 5
KeyClass = 0
ButtonClass = 1
ValuatorClass = 2
ScrollClass = 3
TouchClass = 8
KeyRepeat = (1 << 16)
AllDevices = 0
AllMasterDevices = 1
DeviceChanged = 1
KeyPress = 2
KeyRelease = 3
ButtonPress = 4
ButtonRelease = 5
Motion = 6
Enter = 7
Leave = 8
FocusIn = 9
FocusOut = 10
HierarchyChanged = 11
PropertyEvent = 12
RawKeyPress = 13
RawKeyRelease = 14
RawButtonPress = 15
RawButtonRelease = 16
RawMotion = 17
DeviceChangedMask = (1 << DeviceChanged)
KeyPressMask = (1 << KeyPress)
KeyReleaseMask = (1 << KeyRelease)
ButtonPressMask = (1 << ButtonPress)
ButtonReleaseMask = (1 << ButtonRelease)
MotionMask = (1 << Motion)
EnterMask = (1 << Enter)
LeaveMask = (1 << Leave)
FocusInMask = (1 << FocusIn)
FocusOutMask = (1 << FocusOut)
HierarchyChangedMask = (1 << HierarchyChanged)
PropertyEventMask = (1 << PropertyEvent)
RawKeyPressMask = (1 << RawKeyPress)
RawKeyReleaseMask = (1 << RawKeyRelease)
RawButtonPressMask = (1 << RawButtonPress)
RawButtonReleaseMask = (1 << RawButtonRelease)
RawMotionMask = (1 << RawMotion)
GrabModeSync = 0
GrabModeAsync = 1
GrabModeTouch = 2
DEVICEID = rq.Card16
DEVICE = rq.Card16
DEVICEUSE = rq.Card8
class FP1616(rq.Int32):
def check_value(self, value):
return int(value * 65536.0)
def parse_value(self, value, display):
return float(value) / float(1 << 16)
class FP3232(rq.ValueField):
structcode = 'lL'
structvalues = 2
def check_value(self, value):
return value
def parse_value(self, value, display):
integral, frac = value
ret = float(integral)
# optimised math.ldexp(float(frac), -32)
ret += float(frac) * (1.0 / (1 << 32))
return ret
class XIQueryVersion(rq.ReplyRequest):
_request = rq.Struct(
rq.Card8('opcode'),
rq.Opcode(47),
rq.RequestLength(),
rq.Card16('major_version'),
rq.Card16('minor_version'),
)
_reply = rq.Struct(
rq.ReplyCode(),
rq.Pad(1),
rq.Card16('sequence_number'),
rq.ReplyLength(),
rq.Card16('major_version'),
rq.Card16('minor_version'),
rq.Pad(20),
)
def query_version(self):
return XIQueryVersion(
display=self.display,
opcode=self.display.get_extension_major(extname),
major_version=2,
minor_version=0,
)
class Mask(rq.List):
def __init__(self, name):
rq.List.__init__(self, name, rq.Card32, pad=0)
def pack_value(self, val):
mask_seq = array.array(rq.struct_to_array_codes['L'])
if isinstance(val, integer_types):
# We need to build a "binary mask" that (as far as I can tell) is
# encoded in native byte order from end to end. The simple case is
# with a single unsigned 32-bit value, for which we construct an
# array with just one item. For values too big to fit inside 4
# bytes we build a longer array, being careful to maintain native
# byte order across the entire set of values.
if sys.byteorder == 'little':
def fun(val):
mask_seq.insert(0, val)
elif sys.byteorder == 'big':
fun = mask_seq.append
else:
raise AssertionError(sys.byteorder)
while val:
fun(val & 0xFFFFFFFF)
val = val >> 32
else:
mask_seq.extend(val)
return mask_seq.tostring(), len(mask_seq), None
EventMask = rq.Struct(
DEVICE('deviceid'),
rq.LengthOf('mask', 2),
Mask('mask'),
)
class XISelectEvents(rq.Request):
_request = rq.Struct(
rq.Card8('opcode'),
rq.Opcode(46),
rq.RequestLength(),
rq.Window('window'),
rq.LengthOf('masks', 2),
rq.Pad(2),
rq.List('masks', EventMask),
)
def select_events(self, event_masks):
'''
select_events(event_masks)
event_masks:
Sequence of (deviceid, mask) pairs, where deviceid is a numerical device
ID, or AllDevices or AllMasterDevices, and mask is either an unsigned
integer or sequence of 32 bits unsigned values
'''
return XISelectEvents(
display=self.display,
opcode=self.display.get_extension_major(extname),
window=self,
masks=event_masks,
)
AnyInfo = rq.Struct(
rq.Card16('type'),
rq.Card16('length'),
rq.Card16('sourceid'),
rq.Pad(2),
)
class ButtonMask(object):
def __init__(self, value, length):
self._value = value
self._length = length
def __len__(self):
return self._length
def __getitem__(self, key):
return self._value & (1 << key)
def __str__(self):
return repr(self)
def __repr__(self):
return '0b{value:0{width}b}'.format(value=self._value,
width=self._length)
class ButtonState(rq.ValueField):
structcode = None
def __init__(self, name):
rq.ValueField.__init__(self, name)
def parse_binary_value(self, data, display, length, fmt):
# Mask: bitfield of <length> button states.
mask_len = 4 * ((((length + 7) >> 3) + 3) >> 2)
mask_data = data[:mask_len]
mask_value = 0
for byte in reversed(struct.unpack('={0:d}B'.format(mask_len), mask_data)):
mask_value <<= 8
mask_value |= byte
data = data[mask_len:]
assert (mask_value & 1) == 0
return ButtonMask(mask_value >> 1, length), data
ButtonInfo = rq.Struct(
rq.Card16('type'),
rq.Card16('length'),
rq.Card16('sourceid'),
rq.LengthOf(('state', 'labels'), 2),
ButtonState('state'),
rq.List('labels', rq.Card32),
)
KeyInfo = rq.Struct(
rq.Card16('type'),
rq.Card16('length'),
rq.Card16('sourceid'),
rq.LengthOf('keycodes', 2),
rq.List('keycodes', rq.Card32),
)
ValuatorInfo = rq.Struct(
rq.Card16('type'),
rq.Card16('length'),
rq.Card16('sourceid'),
rq.Card16('number'),
rq.Card32('label'),
FP3232('min'),
FP3232('max'),
FP3232('value'),
rq.Card32('resolution'),
rq.Card8('mode'),
rq.Pad(3),
)
ScrollInfo = rq.Struct(
rq.Card16('type'),
rq.Card16('length'),
rq.Card16('sourceid'),
rq.Card16('number'),
rq.Card16('scroll_type'),
rq.Pad(2),
rq.Card32('flags'),
FP3232('increment'),
)
TouchInfo = rq.Struct(
rq.Card16('type'),
rq.Card16('length'),
rq.Card16('sourceid'),
rq.Card8('mode'),
rq.Card8('num_touches'),
)
INFO_CLASSES = {
KeyClass: KeyInfo,
ButtonClass: ButtonInfo,
ValuatorClass: ValuatorInfo,
ScrollClass: ScrollInfo,
TouchClass: TouchInfo,
}
class ClassInfoClass(object):
structcode = None
def parse_binary(self, data, display):
class_type, length = struct.unpack('=HH', data[:4])
class_struct = INFO_CLASSES.get(class_type, AnyInfo)
class_data, _ = class_struct.parse_binary(data, display)
data = data[length * 4:]
return class_data, data
ClassInfo = ClassInfoClass()
DeviceInfo = rq.Struct(
DEVICEID('deviceid'),
rq.Card16('use'),
rq.Card16('attachment'),
rq.LengthOf('classes', 2),
rq.LengthOf('name', 2),
rq.Bool('enabled'),
rq.Pad(1),
rq.String8('name', 4),
rq.List('classes', ClassInfo),
)
class XIQueryDevice(rq.ReplyRequest):
_request = rq.Struct(
rq.Card8('opcode'),
rq.Opcode(48),
rq.RequestLength(),
DEVICEID('deviceid'),
rq.Pad(2),
)
_reply = rq.Struct(
rq.ReplyCode(),
rq.Pad(1),
rq.Card16('sequence_number'),
rq.ReplyLength(),
rq.LengthOf('devices', 2),
rq.Pad(22),
rq.List('devices', DeviceInfo),
)
def query_device(self, deviceid):
return XIQueryDevice(
display=self.display,
opcode=self.display.get_extension_major(extname),
deviceid=deviceid,
)
class XIGrabDevice(rq.ReplyRequest):
_request = rq.Struct(
rq.Card8('opcode'),
rq.Opcode(51),
rq.RequestLength(),
rq.Window('grab_window'),
rq.Card32('time'),
rq.Cursor('cursor', (X.NONE, )),
DEVICEID('deviceid'),
rq.Set('grab_mode', 1, (GrabModeSync, GrabModeAsync)),
rq.Set('paired_device_mode', 1, (GrabModeSync, GrabModeAsync)),
rq.Bool('owner_events'),
rq.Pad(1),
rq.LengthOf('mask', 2),
Mask('mask'),
)
_reply = rq.Struct(
rq.ReplyCode(),
rq.Pad(1),
rq.Card16('sequence_number'),
rq.ReplyLength(),
rq.Card8('status'),
rq.Pad(23),
)
def grab_device(self, deviceid, time, grab_mode, paired_device_mode, owner_events, event_mask):
return XIGrabDevice(
display=self.display,
opcode=self.display.get_extension_major(extname),
deviceid=deviceid,
grab_window=self,
time=time,
cursor=X.NONE,
grab_mode=grab_mode,
paired_device_mode=paired_device_mode,
owner_events=owner_events,
mask=event_mask,
)
class XIUngrabDevice(rq.Request):
_request = rq.Struct(
rq.Card8('opcode'),
rq.Opcode(52),
rq.RequestLength(),
rq.Card32('time'),
DEVICEID('deviceid'),
rq.Pad(2),
)
def ungrab_device(self, deviceid, time):
return XIUngrabDevice(
display=self.display,
opcode=self.display.get_extension_major(extname),
time=time,
deviceid=deviceid,
)
class XIPassiveGrabDevice(rq.ReplyRequest):
_request = rq.Struct(
rq.Card8('opcode'),
rq.Opcode(54),
rq.RequestLength(),
rq.Card32('time'),
rq.Window('grab_window'),
rq.Cursor('cursor', (X.NONE, )),
rq.Card32('detail'),
DEVICEID('deviceid'),
rq.LengthOf('modifiers', 2),
rq.LengthOf('mask', 2),
rq.Set('grab_type', 1, (GrabtypeButton, GrabtypeKeycode, GrabtypeEnter,
GrabtypeFocusIn, GrabtypeTouchBegin)),
rq.Set('grab_mode', 1, (GrabModeSync, GrabModeAsync)),
rq.Set('paired_device_mode', 1, (GrabModeSync, GrabModeAsync)),
rq.Bool('owner_events'),
rq.Pad(2),
Mask('mask'),
rq.List('modifiers', rq.Card32),
)
_reply = rq.Struct(
rq.ReplyCode(),
rq.Pad(1),
rq.Card16('sequence_number'),
rq.ReplyLength(),
rq.LengthOf('modifiers', 2),
rq.Pad(22),
rq.List('modifiers', rq.Card32),
)
def passive_grab_device(self, deviceid, time, detail,
grab_type, grab_mode, paired_device_mode,
owner_events, event_mask, modifiers):
return XIPassiveGrabDevice(
display=self.display,
opcode=self.display.get_extension_major(extname),
deviceid=deviceid,
grab_window=self,
time=time,
cursor=X.NONE,
detail=detail,
grab_type=grab_type,
grab_mode=grab_mode,
paired_device_mode=paired_device_mode,
owner_events=owner_events,
mask=event_mask,
modifiers=modifiers,
)
def grab_keycode(self, deviceid, time, keycode,
grab_mode, paired_device_mode,
owner_events, event_mask, modifiers):
return passive_grab_device(self, deviceid, time, keycode,
GrabtypeKeycode,
grab_mode, paired_device_mode,
owner_events, event_mask, modifiers)
class XIPassiveUngrabDevice(rq.Request):
_request = rq.Struct(
rq.Card8('opcode'),
rq.Opcode(55),
rq.RequestLength(),
rq.Window('grab_window'),
rq.Card32('detail'),
DEVICEID('deviceid'),
rq.LengthOf('modifiers', 2),
rq.Set('grab_type', 1, (GrabtypeButton, GrabtypeKeycode,
GrabtypeEnter, GrabtypeFocusIn,
GrabtypeTouchBegin)),
rq.Pad(3),
rq.List('modifiers', rq.Card32),
)
def passive_ungrab_device(self, deviceid, detail, grab_type, modifiers):
return XIPassiveUngrabDevice(
display=self.display,
opcode=self.display.get_extension_major(extname),
deviceid=deviceid,
grab_window=self,
detail=detail,
grab_type=grab_type,
modifiers=modifiers,
)
def ungrab_keycode(self, deviceid, keycode, modifiers):
return passive_ungrab_device(self, deviceid, keycode,
GrabtypeKeycode, modifiers)
HierarchyInfo = rq.Struct(
DEVICEID('deviceid'),
DEVICEID('attachment'),
DEVICEUSE('type'),
rq.Bool('enabled'),
rq.Pad(2),
rq.Card32('flags'),
)
HierarchyEventData = rq.Struct(
DEVICEID('deviceid'),
rq.Card32('time'),
rq.Card32('flags'),
rq.LengthOf('info', 2),
rq.Pad(10),
rq.List('info', HierarchyInfo),
)
ModifierInfo = rq.Struct(
rq.Card32('base_mods'),
rq.Card32('latched_mods'),
rq.Card32('locked_mods'),
rq.Card32('effective_mods'),
)
GroupInfo = rq.Struct(
rq.Card8('base_group'),
rq.Card8('latched_group'),
rq.Card8('locked_group'),
rq.Card8('effective_group'),
)
DeviceEventData = rq.Struct(
DEVICEID('deviceid'),
rq.Card32('time'),
rq.Card32('detail'),
rq.Window('root'),
rq.Window('event'),
rq.Window('child'),
FP1616('root_x'),
FP1616('root_y'),
FP1616('event_x'),
FP1616('event_y'),
rq.LengthOf('buttons', 2),
rq.Card16('valulators_len'),
DEVICEID('sourceid'),
rq.Pad(2),
rq.Card32('flags'),
rq.Object('mods', ModifierInfo),
rq.Object('groups', GroupInfo),
ButtonState('buttons'),
)
DeviceChangedEventData = rq.Struct(
DEVICEID('deviceid'),
rq.Card32('time'),
rq.LengthOf('classes', 2),
DEVICEID('sourceid'),
rq.Card8('reason'),
rq.Pad(11),
rq.List('classes', ClassInfo),
)
def init(disp, info):
disp.extension_add_method('display', 'xinput_query_version', query_version)
disp.extension_add_method('window', 'xinput_select_events', select_events)
disp.extension_add_method('display', 'xinput_query_device', query_device)
disp.extension_add_method('window', 'xinput_grab_device', grab_device)
disp.extension_add_method('display', 'xinput_ungrab_device', ungrab_device)
disp.extension_add_method('window', 'xinput_grab_keycode', grab_keycode)
disp.extension_add_method('window', 'xinput_ungrab_keycode', ungrab_keycode)
if hasattr(disp,"ge_add_event_data"):
for device_event in (ButtonPress, ButtonRelease, KeyPress, KeyRelease, Motion):
disp.ge_add_event_data(info.major_opcode, device_event, DeviceEventData)
disp.ge_add_event_data(info.major_opcode, DeviceChanged, DeviceEventData)
disp.ge_add_event_data(info.major_opcode, HierarchyChanged, HierarchyEventData)
| 1.523438 | 2 |
vel/notebook/__init__.py | tigerwlin/vel | 273 | 6015 | from .loader import load | 1.125 | 1 |
YourJobAidApi/migrations/0019_remove_category_count_post.py | rayhanrock/django-yourjobaid-api | 1 | 6016 | <reponame>rayhanrock/django-yourjobaid-api
# Generated by Django 3.0.4 on 2020-04-16 23:10
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('YourJobAidApi', '0018_category_count_post'),
]
operations = [
migrations.RemoveField(
model_name='category',
name='count_post',
),
]
| 1.460938 | 1 |
easyquant/login/__init__.py | CharlieZhao95/easy-quant | 0 | 6017 | # @Time : 2022/1/26 23:07
# @Author : zhaoyu
# @Site :
# @File : __init__.py.py
# @Software: PyCharm
# @Note : xx | 1.054688 | 1 |
tests/api/test_attributes.py | DowneyTung/saleor | 19 | 6018 | from typing import Union
from unittest import mock
import graphene
import pytest
from django.core.exceptions import ValidationError
from django.db.models import Q
from django.template.defaultfilters import slugify
from graphene.utils.str_converters import to_camel_case
from saleor.core.taxes import zero_money
from saleor.graphql.core.utils import snake_to_camel_case
from saleor.graphql.product.enums import AttributeTypeEnum, AttributeValueType
from saleor.graphql.product.filters import filter_attributes_by_product_types
from saleor.graphql.product.mutations.attributes import validate_value_is_unique
from saleor.graphql.product.types.attributes import resolve_attribute_value_type
from saleor.product import AttributeInputType
from saleor.product.error_codes import ProductErrorCode
from saleor.product.models import (
Attribute,
AttributeProduct,
AttributeValue,
AttributeVariant,
Category,
Collection,
Product,
ProductType,
ProductVariant,
)
from saleor.product.utils.attributes import associate_attribute_values_to_instance
from tests.api.utils import get_graphql_content
def test_validate_value_is_unique(color_attribute):
value = color_attribute.values.first()
# a new value but with existing slug should raise an error
with pytest.raises(ValidationError):
validate_value_is_unique(color_attribute, AttributeValue(slug=value.slug))
# a new value with a new slug should pass
validate_value_is_unique(
color_attribute, AttributeValue(slug="spanish-inquisition")
)
# value that already belongs to the attribute shouldn't be taken into account
validate_value_is_unique(color_attribute, value)
def test_get_single_attribute_by_pk(user_api_client, color_attribute_without_values):
attribute_gql_id = graphene.Node.to_global_id(
"Attribute", color_attribute_without_values.id
)
query = """
query($id: ID!) {
attribute(id: $id) {
id
slug
}
}
"""
content = get_graphql_content(
user_api_client.post_graphql(query, {"id": attribute_gql_id})
)
assert content["data"]["attribute"], "Should have found an attribute"
assert content["data"]["attribute"]["id"] == attribute_gql_id
assert content["data"]["attribute"]["slug"] == color_attribute_without_values.slug
QUERY_ATTRIBUTES = """
query {
attributes(first: 20) {
edges {
node {
id
name
slug
values {
id
name
slug
}
}
}
}
}
"""
def test_attributes_query(user_api_client, product):
attributes = Attribute.objects
query = QUERY_ATTRIBUTES
response = user_api_client.post_graphql(query)
content = get_graphql_content(response)
attributes_data = content["data"]["attributes"]["edges"]
assert attributes_data
assert len(attributes_data) == attributes.count()
def test_attributes_query_hidden_attribute(user_api_client, product, color_attribute):
query = QUERY_ATTRIBUTES
# hide the attribute
color_attribute.visible_in_storefront = False
color_attribute.save(update_fields=["visible_in_storefront"])
attribute_count = Attribute.objects.get_visible_to_user(
user_api_client.user
).count()
assert attribute_count == 1
response = user_api_client.post_graphql(query)
content = get_graphql_content(response)
attributes_data = content["data"]["attributes"]["edges"]
assert len(attributes_data) == attribute_count
def test_attributes_query_hidden_attribute_as_staff_user(
staff_api_client, product, color_attribute, permission_manage_products
):
query = QUERY_ATTRIBUTES
# hide the attribute
color_attribute.visible_in_storefront = False
color_attribute.save(update_fields=["visible_in_storefront"])
attribute_count = Attribute.objects.all().count()
# The user doesn't have the permission yet to manage products,
# the user shouldn't be able to see the hidden attributes
assert Attribute.objects.get_visible_to_user(staff_api_client.user).count() == 1
# The user should now be able to see the attributes
staff_api_client.user.user_permissions.add(permission_manage_products)
response = staff_api_client.post_graphql(query)
content = get_graphql_content(response)
attributes_data = content["data"]["attributes"]["edges"]
assert len(attributes_data) == attribute_count
QUERY_PRODUCT_AND_VARIANTS_ATTRIBUTES = """
{
products(first: 1) {
edges {
node {
attributes {
attribute {
slug
}
values {
slug
}
value {
slug
}
}
variants {
attributes {
attribute {
slug
}
values {
slug
}
value {
slug
}
}
}
}
}
}
}
"""
@pytest.mark.parametrize("is_staff", (False, True))
def test_resolve_attributes_with_hidden(
user_api_client,
product,
color_attribute,
size_attribute,
staff_user,
is_staff,
permission_manage_products,
):
"""Ensure non-staff users don't see hidden attributes, and staff users having
the 'manage product' permission can.
"""
query = QUERY_PRODUCT_AND_VARIANTS_ATTRIBUTES
api_client = user_api_client
variant = product.variants.first()
product_attribute = color_attribute
variant_attribute = size_attribute
expected_product_attribute_count = product.attributes.count() - 1
expected_variant_attribute_count = variant.attributes.count() - 1
if is_staff:
api_client.user = staff_user
expected_product_attribute_count += 1
expected_variant_attribute_count += 1
staff_user.user_permissions.add(permission_manage_products)
# Hide one product and variant attribute from the storefront
for attribute in (product_attribute, variant_attribute):
attribute.visible_in_storefront = False
attribute.save(update_fields=["visible_in_storefront"])
product = get_graphql_content(api_client.post_graphql(query))["data"]["products"][
"edges"
][0]["node"]
assert len(product["attributes"]) == expected_product_attribute_count
assert len(product["variants"][0]["attributes"]) == expected_variant_attribute_count
def test_resolve_attribute_values(user_api_client, product, staff_user):
"""Ensure the attribute values are properly resolved."""
query = QUERY_PRODUCT_AND_VARIANTS_ATTRIBUTES
api_client = user_api_client
variant = product.variants.first()
assert product.attributes.count() == 1
assert variant.attributes.count() == 1
product_attribute_values = list(
product.attributes.first().values.values_list("slug", flat=True)
)
variant_attribute_values = list(
variant.attributes.first().values.values_list("slug", flat=True)
)
assert len(product_attribute_values) == 1
assert len(variant_attribute_values) == 1
product = get_graphql_content(api_client.post_graphql(query))["data"]["products"][
"edges"
][0]["node"]
product_attributes = product["attributes"]
variant_attributes = product["variants"][0]["attributes"]
assert len(product_attributes) == len(product_attribute_values)
assert len(variant_attributes) == len(variant_attribute_values)
assert product_attributes[0]["attribute"]["slug"] == "color"
assert product_attributes[0]["values"][0]["slug"] == product_attribute_values[0]
assert product_attributes[0]["value"]["slug"] == product_attribute_values[0]
assert variant_attributes[0]["attribute"]["slug"] == "size"
assert variant_attributes[0]["values"][0]["slug"] == variant_attribute_values[0]
assert variant_attributes[0]["value"]["slug"] == variant_attribute_values[0]
def test_resolve_attribute_values_non_assigned_to_node(
user_api_client, product, staff_user
):
"""Ensure the attribute values are properly resolved when an attribute is part
of the product type but not of the node (product/variant), thus no values should be
resolved.
"""
query = QUERY_PRODUCT_AND_VARIANTS_ATTRIBUTES
api_client = user_api_client
variant = product.variants.first()
product_type = product.product_type
# Create dummy attributes
unassigned_product_attribute = Attribute.objects.create(name="P", slug="product")
unassigned_variant_attribute = Attribute.objects.create(name="V", slug="variant")
# Create a value for each dummy attribute to ensure they are not returned
# by the product or variant as they are not associated to them
AttributeValue.objects.bulk_create(
[
AttributeValue(slug="a", name="A", attribute=unassigned_product_attribute),
AttributeValue(slug="b", name="B", attribute=unassigned_product_attribute),
]
)
# Assign the dummy attributes to the product type and push them at the top
# through a sort_order=0 as the other attributes have sort_order=null
AttributeProduct.objects.create(
attribute=unassigned_product_attribute, product_type=product_type, sort_order=0
)
AttributeVariant.objects.create(
attribute=unassigned_variant_attribute, product_type=product_type, sort_order=0
)
assert product.attributes.count() == 1
assert variant.attributes.count() == 1
product = get_graphql_content(api_client.post_graphql(query))["data"]["products"][
"edges"
][0]["node"]
product_attributes = product["attributes"]
variant_attributes = product["variants"][0]["attributes"]
assert len(product_attributes) == 2, "Non-assigned attr from the PT may be missing"
assert len(variant_attributes) == 2, "Non-assigned attr from the PT may be missing"
assert product_attributes[0]["attribute"]["slug"] == "product"
assert product_attributes[0]["values"] == []
assert variant_attributes[0]["value"] is None
assert variant_attributes[0]["attribute"]["slug"] == "variant"
assert variant_attributes[0]["values"] == []
assert variant_attributes[0]["value"] is None
def test_attributes_filter_by_product_type_with_empty_value():
"""Ensure passing an empty or null value is ignored and the queryset is simply
returned without any modification.
"""
qs = Attribute.objects.all()
assert filter_attributes_by_product_types(qs, "...", "") is qs
assert filter_attributes_by_product_types(qs, "...", None) is qs
def test_attributes_filter_by_product_type_with_unsupported_field():
"""Ensure using an unknown field to filter attributes by raises a NotImplemented
exception.
"""
qs = Attribute.objects.all()
with pytest.raises(NotImplementedError) as exc:
filter_attributes_by_product_types(qs, "in_space", "a-value")
assert exc.value.args == ("Filtering by in_space is unsupported",)
def test_attributes_filter_by_non_existing_category_id():
"""Ensure using a non-existing category ID returns an empty query set."""
category_id = graphene.Node.to_global_id("Category", -1)
mocked_qs = mock.MagicMock()
qs = filter_attributes_by_product_types(mocked_qs, "in_category", category_id)
assert qs == mocked_qs.none.return_value
@pytest.mark.parametrize("test_deprecated_filter", [True, False])
@pytest.mark.parametrize("tested_field", ["inCategory", "inCollection"])
def test_attributes_in_collection_query(
user_api_client,
product_type,
category,
collection,
collection_with_products,
test_deprecated_filter,
tested_field,
):
if "Collection" in tested_field:
filtered_by_node_id = graphene.Node.to_global_id("Collection", collection.pk)
elif "Category" in tested_field:
filtered_by_node_id = graphene.Node.to_global_id("Category", category.pk)
else:
raise AssertionError(tested_field)
expected_qs = Attribute.objects.filter(
Q(attributeproduct__product_type_id=product_type.pk)
| Q(attributevariant__product_type_id=product_type.pk)
)
# Create another product type and attribute that shouldn't get matched
other_category = Category.objects.create(name="Other Category", slug="other-cat")
other_attribute = Attribute.objects.create(name="Other", slug="other")
other_product_type = ProductType.objects.create(
name="Other type", has_variants=True, is_shipping_required=True
)
other_product_type.product_attributes.add(other_attribute)
other_product = Product.objects.create(
name=f"Another Product",
product_type=other_product_type,
category=other_category,
price=zero_money(),
is_published=True,
)
# Create another collection with products but shouldn't get matched
# as we don't look for this other collection
other_collection = Collection.objects.create(
name="Other Collection",
slug="other-collection",
is_published=True,
description="Description",
)
other_collection.products.add(other_product)
query = """
query($nodeID: ID!) {
attributes(first: 20, %(filter_input)s) {
edges {
node {
id
name
slug
}
}
}
}
"""
if test_deprecated_filter:
query = query % {"filter_input": f"{tested_field}: $nodeID"}
else:
query = query % {"filter_input": "filter: { %s: $nodeID }" % tested_field}
variables = {"nodeID": filtered_by_node_id}
content = get_graphql_content(user_api_client.post_graphql(query, variables))
attributes_data = content["data"]["attributes"]["edges"]
flat_attributes_data = [attr["node"]["slug"] for attr in attributes_data]
expected_flat_attributes_data = list(expected_qs.values_list("slug", flat=True))
assert flat_attributes_data == expected_flat_attributes_data
CREATE_ATTRIBUTES_QUERY = """
mutation createAttribute($name: String!, $values: [AttributeValueCreateInput]) {
attributeCreate(input: {name: $name, values: $values}) {
errors {
field
message
}
productErrors {
field
message
code
}
attribute {
name
slug
values {
name
slug
}
productTypes(first: 10) {
edges {
node {
id
}
}
}
}
}
}
"""
def test_create_attribute_and_attribute_values(
staff_api_client, permission_manage_products
):
query = CREATE_ATTRIBUTES_QUERY
attribute_name = "<NAME>"
name = "Value name"
variables = {"name": attribute_name, "values": [{"name": name}]}
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_products]
)
content = get_graphql_content(response)
assert not content["data"]["attributeCreate"]["errors"]
data = content["data"]["attributeCreate"]
# Check if the attribute was correctly created
assert data["attribute"]["name"] == attribute_name
assert data["attribute"]["slug"] == slugify(
attribute_name
), "The default slug should be the slugified name"
assert (
data["attribute"]["productTypes"]["edges"] == []
), "The attribute should not have been assigned to a product type"
# Check if the attribute values were correctly created
assert len(data["attribute"]["values"]) == 1
assert data["attribute"]["values"][0]["name"] == name
assert data["attribute"]["values"][0]["slug"] == slugify(name)
@pytest.mark.parametrize(
"input_slug, expected_slug, expected_error",
(
("my-slug", "my-slug", []),
(None, "my-name", []),
(
"",
None,
[{"field": "slug", "message": "The attribute's slug cannot be blank."}],
),
),
)
def test_create_attribute_with_given_slug(
staff_api_client,
permission_manage_products,
input_slug,
expected_slug,
expected_error,
):
staff_api_client.user.user_permissions.add(permission_manage_products)
query = """
mutation createAttribute(
$name: String!, $slug: String) {
attributeCreate(input: {name: $name, slug: $slug}) {
errors {
field
message
}
attribute {
slug
}
}
}
"""
attribute_name = "My Name"
variables = {"name": attribute_name, "slug": input_slug}
content = get_graphql_content(staff_api_client.post_graphql(query, variables))
# Check if the error is as expected: null or something else
assert content["data"]["attributeCreate"]["errors"] == expected_error
# Check if the slug was correctly set if no error was expected
if expected_error is None:
assert content["data"]["attributeCreate"]["attribute"]["slug"] == expected_slug
@pytest.mark.parametrize(
"name_1, name_2, error_msg, error_code",
(
(
"Red color",
"Red color",
"Provided values are not unique.",
ProductErrorCode.UNIQUE,
),
(
"Red color",
"red color",
"Provided values are not unique.",
ProductErrorCode.UNIQUE,
),
),
)
def test_create_attribute_and_attribute_values_errors(
staff_api_client,
name_1,
name_2,
error_msg,
error_code,
permission_manage_products,
product_type,
):
query = CREATE_ATTRIBUTES_QUERY
variables = {"name": "Example name", "values": [{"name": name_1}, {"name": name_2}]}
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_products]
)
content = get_graphql_content(response)
errors = content["data"]["attributeCreate"]["errors"]
assert errors
assert errors[0]["field"] == "values"
assert errors[0]["message"] == error_msg
product_errors = content["data"]["attributeCreate"]["productErrors"]
assert product_errors[0]["code"] == error_code.name
UPDATE_ATTRIBUTE_QUERY = """
mutation updateAttribute(
$id: ID!, $name: String!, $addValues: [AttributeValueCreateInput]!,
$removeValues: [ID]!) {
attributeUpdate(
id: $id,
input: {
name: $name, addValues: $addValues,
removeValues: $removeValues}) {
errors {
field
message
}
productErrors {
field
message
code
}
attribute {
name
slug
values {
name
slug
}
productTypes(first: 10) {
edges {
node {
id
}
}
}
}
}
}
"""
def test_update_attribute_name(
staff_api_client, color_attribute, permission_manage_products
):
query = UPDATE_ATTRIBUTE_QUERY
attribute = color_attribute
name = "<NAME>"
node_id = graphene.Node.to_global_id("Attribute", attribute.id)
variables = {"name": name, "id": node_id, "addValues": [], "removeValues": []}
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_products]
)
content = get_graphql_content(response)
attribute.refresh_from_db()
data = content["data"]["attributeUpdate"]
assert data["attribute"]["name"] == name == attribute.name
assert data["attribute"]["productTypes"]["edges"] == []
def test_update_attribute_remove_and_add_values(
staff_api_client, color_attribute, permission_manage_products
):
query = UPDATE_ATTRIBUTE_QUERY
attribute = color_attribute
name = "<NAME>"
attribute_value_name = "Red Color"
node_id = graphene.Node.to_global_id("Attribute", attribute.id)
attribute_value_id = attribute.values.first().id
value_id = graphene.Node.to_global_id("AttributeValue", attribute_value_id)
variables = {
"name": name,
"id": node_id,
"addValues": [{"name": attribute_value_name}],
"removeValues": [value_id],
}
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_products]
)
content = get_graphql_content(response)
attribute.refresh_from_db()
data = content["data"]["attributeUpdate"]
assert not data["errors"]
assert data["attribute"]["name"] == name == attribute.name
assert not attribute.values.filter(pk=attribute_value_id).exists()
assert attribute.values.filter(name=attribute_value_name).exists()
def test_update_empty_attribute_and_add_values(
staff_api_client, color_attribute_without_values, permission_manage_products
):
query = UPDATE_ATTRIBUTE_QUERY
attribute = color_attribute_without_values
name = "<NAME>"
attribute_value_name = "Yellow Color"
node_id = graphene.Node.to_global_id("Attribute", attribute.id)
variables = {
"name": name,
"id": node_id,
"addValues": [{"name": attribute_value_name}],
"removeValues": [],
}
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_products]
)
get_graphql_content(response)
attribute.refresh_from_db()
assert attribute.values.count() == 1
assert attribute.values.filter(name=attribute_value_name).exists()
@pytest.mark.parametrize(
"name_1, name_2, error_msg, error_code",
(
(
"Red color",
"Red color",
"Provided values are not unique.",
ProductErrorCode.UNIQUE,
),
(
"Red color",
"red color",
"Provided values are not unique.",
ProductErrorCode.UNIQUE,
),
),
)
def test_update_attribute_and_add_attribute_values_errors(
staff_api_client,
name_1,
name_2,
error_msg,
error_code,
color_attribute,
permission_manage_products,
):
query = UPDATE_ATTRIBUTE_QUERY
attribute = color_attribute
node_id = graphene.Node.to_global_id("Attribute", attribute.id)
variables = {
"name": "Example name",
"id": node_id,
"removeValues": [],
"addValues": [{"name": name_1}, {"name": name_2}],
}
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_products]
)
content = get_graphql_content(response)
errors = content["data"]["attributeUpdate"]["errors"]
assert errors
assert errors[0]["field"] == "addValues"
assert errors[0]["message"] == error_msg
product_errors = content["data"]["attributeUpdate"]["productErrors"]
assert product_errors[0]["code"] == error_code.name
def test_update_attribute_and_remove_others_attribute_value(
staff_api_client, color_attribute, size_attribute, permission_manage_products
):
query = UPDATE_ATTRIBUTE_QUERY
attribute = color_attribute
node_id = graphene.Node.to_global_id("Attribute", attribute.id)
size_attribute = size_attribute.values.first()
attr_id = graphene.Node.to_global_id("AttributeValue", size_attribute.pk)
variables = {
"name": "Example name",
"id": node_id,
"slug": "example-slug",
"addValues": [],
"removeValues": [attr_id],
}
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_products]
)
content = get_graphql_content(response)
errors = content["data"]["attributeUpdate"]["errors"]
assert errors
assert errors[0]["field"] == "removeValues"
err_msg = "Value %s does not belong to this attribute." % str(size_attribute)
assert errors[0]["message"] == err_msg
product_errors = content["data"]["attributeUpdate"]["productErrors"]
assert product_errors[0]["code"] == ProductErrorCode.INVALID.name
def test_delete_attribute(
staff_api_client, color_attribute, permission_manage_products, product_type
):
attribute = color_attribute
query = """
mutation deleteAttribute($id: ID!) {
attributeDelete(id: $id) {
errors {
field
message
}
attribute {
id
}
}
}
"""
node_id = graphene.Node.to_global_id("Attribute", attribute.id)
variables = {"id": node_id}
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_products]
)
content = get_graphql_content(response)
data = content["data"]["attributeDelete"]
assert data["attribute"]["id"] == variables["id"]
with pytest.raises(attribute._meta.model.DoesNotExist):
attribute.refresh_from_db()
CREATE_ATTRIBUTE_VALUE_QUERY = """
mutation createAttributeValue(
$attributeId: ID!, $name: String!) {
attributeValueCreate(
attribute: $attributeId, input: {name: $name}) {
productErrors {
field
message
code
}
attribute {
values {
name
}
}
attributeValue {
name
type
slug
}
}
}
"""
def test_create_attribute_value(
staff_api_client, color_attribute, permission_manage_products
):
attribute = color_attribute
query = CREATE_ATTRIBUTE_VALUE_QUERY
attribute_id = graphene.Node.to_global_id("Attribute", attribute.id)
name = "<NAME>"
variables = {"name": name, "attributeId": attribute_id}
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_products]
)
content = get_graphql_content(response)
data = content["data"]["attributeValueCreate"]
assert not data["productErrors"]
attr_data = data["attributeValue"]
assert attr_data["name"] == name
assert attr_data["slug"] == slugify(name)
assert attr_data["type"] == "STRING"
assert name in [value["name"] for value in data["attribute"]["values"]]
def test_create_attribute_value_not_unique_name(
staff_api_client, color_attribute, permission_manage_products
):
attribute = color_attribute
query = CREATE_ATTRIBUTE_VALUE_QUERY
attribute_id = graphene.Node.to_global_id("Attribute", attribute.id)
value_name = attribute.values.first().name
variables = {"name": value_name, "attributeId": attribute_id}
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_products]
)
content = get_graphql_content(response)
data = content["data"]["attributeValueCreate"]
assert data["productErrors"]
assert data["productErrors"][0]["code"] == ProductErrorCode.ALREADY_EXISTS.name
assert data["productErrors"][0]["field"] == "name"
def test_create_attribute_value_capitalized_name(
staff_api_client, color_attribute, permission_manage_products
):
attribute = color_attribute
query = CREATE_ATTRIBUTE_VALUE_QUERY
attribute_id = graphene.Node.to_global_id("Attribute", attribute.id)
value_name = attribute.values.first().name
variables = {"name": value_name.upper(), "attributeId": attribute_id}
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_products]
)
content = get_graphql_content(response)
data = content["data"]["attributeValueCreate"]
assert data["productErrors"]
assert data["productErrors"][0]["code"] == ProductErrorCode.ALREADY_EXISTS.name
assert data["productErrors"][0]["field"] == "name"
UPDATE_ATTRIBUTE_VALUE_QUERY = """
mutation updateChoice(
$id: ID!, $name: String!) {
attributeValueUpdate(
id: $id, input: {name: $name}) {
errors {
field
message
}
attributeValue {
name
slug
}
attribute {
values {
name
}
}
}
}
"""
def test_update_attribute_value(
staff_api_client, pink_attribute_value, permission_manage_products
):
query = UPDATE_ATTRIBUTE_VALUE_QUERY
value = pink_attribute_value
node_id = graphene.Node.to_global_id("AttributeValue", value.id)
name = "Crimson name"
variables = {"name": name, "id": node_id}
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_products]
)
content = get_graphql_content(response)
data = content["data"]["attributeValueUpdate"]
value.refresh_from_db()
assert data["attributeValue"]["name"] == name == value.name
assert data["attributeValue"]["slug"] == slugify(name)
assert name in [value["name"] for value in data["attribute"]["values"]]
def test_update_attribute_value_name_not_unique(
staff_api_client, pink_attribute_value, permission_manage_products
):
query = UPDATE_ATTRIBUTE_VALUE_QUERY
value = pink_attribute_value.attribute.values.create(
name="<NAME>", slug="example-name", value="#RED"
)
node_id = graphene.Node.to_global_id("AttributeValue", value.id)
variables = {"name": pink_attribute_value.name, "id": node_id}
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_products]
)
content = get_graphql_content(response)
data = content["data"]["attributeValueUpdate"]
assert data["errors"]
assert data["errors"][0]["message"]
assert data["errors"][0]["field"] == "name"
def test_delete_attribute_value(
staff_api_client, color_attribute, pink_attribute_value, permission_manage_products
):
value = color_attribute.values.get(name="Red")
query = """
mutation updateChoice($id: ID!) {
attributeValueDelete(id: $id) {
attributeValue {
name
slug
}
}
}
"""
node_id = graphene.Node.to_global_id("AttributeValue", value.id)
variables = {"id": node_id}
staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_products]
)
with pytest.raises(value._meta.model.DoesNotExist):
value.refresh_from_db()
@pytest.mark.parametrize(
"raw_value, expected_type",
[
("#0000", AttributeValueType.COLOR),
("#FF69B4", AttributeValueType.COLOR),
("rgb(255, 0, 0)", AttributeValueType.COLOR),
("hsl(0, 100%, 50%)", AttributeValueType.COLOR),
("hsla(120, 60%, 70%, 0.3)", AttributeValueType.COLOR),
("rgba(100%, 255, 0, 0)", AttributeValueType.COLOR),
("http://example.com", AttributeValueType.URL),
("https://example.com", AttributeValueType.URL),
("ftp://example.com", AttributeValueType.URL),
("example.com", AttributeValueType.STRING),
("Foo", AttributeValueType.STRING),
("linear-gradient(red, yellow)", AttributeValueType.GRADIENT),
("radial-gradient(#0000, yellow)", AttributeValueType.GRADIENT),
],
)
def test_resolve_attribute_value_type(raw_value, expected_type):
assert resolve_attribute_value_type(raw_value) == expected_type
def test_resolve_assigned_attribute_without_values(api_client, product_type, product):
"""Ensure the attributes assigned to a product type are resolved even if
the product doesn't provide any value for it or is not directly associated to it.
"""
# Retrieve the product's variant
variant = product.variants.get()
# Remove all attributes and values from the product and its variant
product.attributesrelated.clear()
variant.attributesrelated.clear()
# Retrieve the product and variant's attributes
products = get_graphql_content(
api_client.post_graphql(
"""
{
products(first: 10) {
edges {
node {
attributes {
attribute {
slug
}
values {
name
}
}
variants {
attributes {
attribute {
slug
}
values {
name
}
}
}
}
}
}
}
"""
)
)["data"]["products"]["edges"]
# Ensure we are only working on one product and variant, the ones we are testing
assert len(products) == 1
assert len(products[0]["node"]["variants"]) == 1
# Retrieve the nodes data
product = products[0]["node"]
variant = product["variants"][0]
# Ensure the product attributes values are all None
assert len(product["attributes"]) == 1
assert product["attributes"][0]["attribute"]["slug"] == "color"
assert product["attributes"][0]["values"] == []
# Ensure the variant attributes values are all None
assert variant["attributes"][0]["attribute"]["slug"] == "size"
assert variant["attributes"][0]["values"] == []
ASSIGN_ATTR_QUERY = """
mutation assign($productTypeId: ID!, $operations: [AttributeAssignInput]!) {
attributeAssign(productTypeId: $productTypeId, operations: $operations) {
errors {
field
message
}
productType {
id
productAttributes {
id
}
variantAttributes {
id
}
}
}
}
"""
def test_assign_attributes_to_product_type(
staff_api_client, permission_manage_products, attribute_list
):
product_type = ProductType.objects.create(name="Default Type", has_variants=True)
product_type_global_id = graphene.Node.to_global_id("ProductType", product_type.pk)
query = ASSIGN_ATTR_QUERY
operations = []
variables = {"productTypeId": product_type_global_id, "operations": operations}
product_attributes_ids = {attr.pk for attr in attribute_list[:2]}
variant_attributes_ids = {attr.pk for attr in attribute_list[2:]}
for attr_id in product_attributes_ids:
operations.append(
{"type": "PRODUCT", "id": graphene.Node.to_global_id("Attribute", attr_id)}
)
for attr_id in variant_attributes_ids:
operations.append(
{"type": "VARIANT", "id": graphene.Node.to_global_id("Attribute", attr_id)}
)
content = get_graphql_content(
staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_products]
)
)["data"]["attributeAssign"]
assert not content["errors"], "Should have succeeded"
assert content["productType"]["id"] == product_type_global_id
assert len(content["productType"]["productAttributes"]) == len(
product_attributes_ids
)
assert len(content["productType"]["variantAttributes"]) == len(
variant_attributes_ids
)
found_product_attrs_ids = {
int(graphene.Node.from_global_id(attr["id"])[1])
for attr in content["productType"]["productAttributes"]
}
found_variant_attrs_ids = {
int(graphene.Node.from_global_id(attr["id"])[1])
for attr in content["productType"]["variantAttributes"]
}
assert found_product_attrs_ids == product_attributes_ids
assert found_variant_attrs_ids == variant_attributes_ids
def test_assign_variant_attribute_to_product_type_with_disabled_variants(
staff_api_client,
permission_manage_products,
product_type_without_variant,
color_attribute_without_values,
):
"""The assignAttribute mutation should raise an error when trying
to add an attribute as a variant attribute when
the product type doesn't support variants"""
product_type = product_type_without_variant
attribute = color_attribute_without_values
staff_api_client.user.user_permissions.add(permission_manage_products)
product_type_global_id = graphene.Node.to_global_id("ProductType", product_type.pk)
query = ASSIGN_ATTR_QUERY
operations = [
{"type": "VARIANT", "id": graphene.Node.to_global_id("Attribute", attribute.pk)}
]
variables = {"productTypeId": product_type_global_id, "operations": operations}
content = get_graphql_content(staff_api_client.post_graphql(query, variables))[
"data"
]["attributeAssign"]
assert content["errors"] == [
{
"field": "operations",
"message": "Variants are disabled in this product type.",
}
]
def test_assign_variant_attribute_having_unsupported_input_type(
staff_api_client, permission_manage_products, product_type, size_attribute
):
"""The assignAttribute mutation should raise an error when trying
to use an attribute as a variant attribute when
the attribute's input type doesn't support variants"""
attribute = size_attribute
attribute.input_type = AttributeInputType.MULTISELECT
attribute.save(update_fields=["input_type"])
product_type.variant_attributes.clear()
staff_api_client.user.user_permissions.add(permission_manage_products)
product_type_global_id = graphene.Node.to_global_id("ProductType", product_type.pk)
query = ASSIGN_ATTR_QUERY
operations = [
{"type": "VARIANT", "id": graphene.Node.to_global_id("Attribute", attribute.pk)}
]
variables = {"productTypeId": product_type_global_id, "operations": operations}
content = get_graphql_content(staff_api_client.post_graphql(query, variables))[
"data"
]["attributeAssign"]
assert content["errors"] == [
{
"field": "operations",
"message": (
"Attributes having for input types ['multiselect'] cannot be assigned "
"as variant attributes"
),
}
]
@pytest.mark.parametrize(
"product_type_attribute_type, gql_attribute_type",
(
(AttributeTypeEnum.PRODUCT, AttributeTypeEnum.VARIANT),
(AttributeTypeEnum.VARIANT, AttributeTypeEnum.PRODUCT),
(AttributeTypeEnum.PRODUCT, AttributeTypeEnum.PRODUCT),
(AttributeTypeEnum.VARIANT, AttributeTypeEnum.VARIANT),
),
)
def test_assign_attribute_to_product_type_having_already_that_attribute(
staff_api_client,
permission_manage_products,
color_attribute_without_values,
product_type_attribute_type,
gql_attribute_type,
):
"""The assignAttribute mutation should raise an error when trying
to add an attribute already contained in the product type."""
product_type = ProductType.objects.create(name="Type")
attribute = color_attribute_without_values
staff_api_client.user.user_permissions.add(permission_manage_products)
product_type_global_id = graphene.Node.to_global_id("ProductType", product_type.pk)
if product_type_attribute_type == AttributeTypeEnum.PRODUCT:
product_type.product_attributes.add(attribute)
elif product_type_attribute_type == AttributeTypeEnum.VARIANT:
product_type.variant_attributes.add(attribute)
else:
raise ValueError(f"Unknown: {product_type}")
query = ASSIGN_ATTR_QUERY
operations = [
{
"type": gql_attribute_type.value,
"id": graphene.Node.to_global_id("Attribute", attribute.pk),
}
]
variables = {"productTypeId": product_type_global_id, "operations": operations}
content = get_graphql_content(staff_api_client.post_graphql(query, variables))[
"data"
]["attributeAssign"]
assert content["errors"] == [
{
"field": "operations",
"message": "Color (color) have already been assigned to this product type.",
}
]
UNASSIGN_ATTR_QUERY = """
mutation unAssignAttribute(
$productTypeId: ID!, $attributeIds: [ID]!
) {
attributeUnassign(productTypeId: $productTypeId, attributeIds: $attributeIds) {
errors {
field
message
}
productType {
id
variantAttributes {
id
}
productAttributes {
id
}
}
}
}
"""
def test_unassign_attributes_from_product_type(
staff_api_client, permission_manage_products, attribute_list
):
product_type = ProductType.objects.create(name="Type")
product_type_global_id = graphene.Node.to_global_id("ProductType", product_type.pk)
variant_attribute, *product_attributes = attribute_list
product_type.product_attributes.add(*product_attributes)
product_type.variant_attributes.add(variant_attribute)
remaining_attribute_global_id = graphene.Node.to_global_id(
"Attribute", product_attributes[1].pk
)
query = UNASSIGN_ATTR_QUERY
variables = {
"productTypeId": product_type_global_id,
"attributeIds": [
graphene.Node.to_global_id("Attribute", product_attributes[0].pk)
],
}
content = get_graphql_content(
staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_products]
)
)["data"]["attributeUnassign"]
assert not content["errors"]
assert content["productType"]["id"] == product_type_global_id
assert len(content["productType"]["productAttributes"]) == 1
assert len(content["productType"]["variantAttributes"]) == 1
assert (
content["productType"]["productAttributes"][0]["id"]
== remaining_attribute_global_id
)
def test_unassign_attributes_not_in_product_type(
staff_api_client, permission_manage_products, color_attribute_without_values
):
"""The unAssignAttribute mutation should not raise any error when trying
to remove an attribute that is not/no longer in the product type."""
staff_api_client.user.user_permissions.add(permission_manage_products)
product_type = ProductType.objects.create(name="Type")
product_type_global_id = graphene.Node.to_global_id("ProductType", product_type.pk)
query = UNASSIGN_ATTR_QUERY
variables = {
"productTypeId": product_type_global_id,
"attributeIds": [
graphene.Node.to_global_id("Attribute", color_attribute_without_values.pk)
],
}
content = get_graphql_content(staff_api_client.post_graphql(query, variables))[
"data"
]["attributeUnassign"]
assert not content["errors"]
assert content["productType"]["id"] == product_type_global_id
assert len(content["productType"]["productAttributes"]) == 0
assert len(content["productType"]["variantAttributes"]) == 0
def test_retrieve_product_attributes_input_type(
staff_api_client, product, permission_manage_products
):
query = """
{
products(first: 10) {
edges {
node {
attributes {
values {
type
inputType
}
}
}
}
}
}
"""
found_products = get_graphql_content(
staff_api_client.post_graphql(query, permissions=[permission_manage_products])
)["data"]["products"]["edges"]
assert len(found_products) == 1
for gql_attr in found_products[0]["node"]["attributes"]:
assert len(gql_attr["values"]) == 1
assert gql_attr["values"][0]["type"] == "STRING"
assert gql_attr["values"][0]["inputType"] == "DROPDOWN"
@pytest.mark.parametrize(
"attribute, expected_value",
(
("filterable_in_storefront", True),
("filterable_in_dashboard", True),
("visible_in_storefront", True),
("available_in_grid", True),
("value_required", False),
("storefront_search_position", 0),
),
)
def test_retrieving_the_restricted_attributes_restricted(
staff_api_client,
color_attribute,
permission_manage_products,
attribute,
expected_value,
):
"""Checks if the attributes are restricted and if their default value
is the expected one."""
attribute = to_camel_case(attribute)
query = (
"""
{
attributes(first: 10) {
edges {
node {
%s
}
}
}
}
"""
% attribute
)
found_attributes = get_graphql_content(
staff_api_client.post_graphql(query, permissions=[permission_manage_products])
)["data"]["attributes"]["edges"]
assert len(found_attributes) == 1
assert found_attributes[0]["node"][attribute] == expected_value
ATTRIBUTES_RESORT_QUERY = """
mutation ProductTypeReorderAttributes(
$productTypeId: ID!
$moves: [ReorderInput]!
$type: AttributeTypeEnum!
) {
productTypeReorderAttributes(
productTypeId: $productTypeId
moves: $moves
type: $type
) {
productType {
id
variantAttributes {
id
slug
}
productAttributes {
id
}
}
errors {
field
message
}
}
}
"""
def test_sort_attributes_within_product_type_invalid_product_type(
staff_api_client, permission_manage_products
):
"""Try to reorder an invalid product type (invalid ID)."""
product_type_id = graphene.Node.to_global_id("ProductType", -1)
attribute_id = graphene.Node.to_global_id("Attribute", -1)
variables = {
"type": "VARIANT",
"productTypeId": product_type_id,
"moves": [{"id": attribute_id, "sortOrder": 1}],
}
content = get_graphql_content(
staff_api_client.post_graphql(
ATTRIBUTES_RESORT_QUERY, variables, permissions=[permission_manage_products]
)
)["data"]["productTypeReorderAttributes"]
assert content["errors"] == [
{
"field": "productTypeId",
"message": f"Couldn't resolve to a product type: {product_type_id}",
}
]
def test_sort_attributes_within_product_type_invalid_id(
staff_api_client, permission_manage_products, color_attribute
):
"""Try to reorder an attribute not associated to the given product type."""
product_type = ProductType.objects.create(name="Dummy Type")
product_type_id = graphene.Node.to_global_id("ProductType", product_type.id)
attribute_id = graphene.Node.to_global_id("Attribute", color_attribute.id)
variables = {
"type": "VARIANT",
"productTypeId": product_type_id,
"moves": [{"id": attribute_id, "sortOrder": 1}],
}
content = get_graphql_content(
staff_api_client.post_graphql(
ATTRIBUTES_RESORT_QUERY, variables, permissions=[permission_manage_products]
)
)["data"]["productTypeReorderAttributes"]
assert content["errors"] == [
{
"field": "moves",
"message": f"Couldn't resolve to an attribute: {attribute_id}",
}
]
@pytest.mark.parametrize(
"attribute_type, relation_field, backref_field",
(
("VARIANT", "variant_attributes", "attributevariant"),
("PRODUCT", "product_attributes", "attributeproduct"),
),
)
def test_sort_attributes_within_product_type(
staff_api_client,
attribute_list,
permission_manage_products,
attribute_type,
relation_field,
backref_field,
):
attributes = attribute_list
assert len(attributes) == 3
staff_api_client.user.user_permissions.add(permission_manage_products)
product_type = ProductType.objects.create(name="Dummy Type")
product_type_id = graphene.Node.to_global_id("ProductType", product_type.id)
m2m_attributes = getattr(product_type, relation_field)
m2m_attributes.set(attributes)
sort_method = getattr(m2m_attributes, f"{relation_field}_sorted")
attributes = list(sort_method())
assert len(attributes) == 3
variables = {
"type": attribute_type,
"productTypeId": product_type_id,
"moves": [
{
"id": graphene.Node.to_global_id("Attribute", attributes[0].pk),
"sortOrder": +1,
},
{
"id": graphene.Node.to_global_id("Attribute", attributes[2].pk),
"sortOrder": -1,
},
],
}
expected_order = [attributes[1].pk, attributes[2].pk, attributes[0].pk]
content = get_graphql_content(
staff_api_client.post_graphql(ATTRIBUTES_RESORT_QUERY, variables)
)["data"]["productTypeReorderAttributes"]
assert not content["errors"]
assert (
content["productType"]["id"] == product_type_id
), "Did not return the correct product type"
gql_attributes = content["productType"][snake_to_camel_case(relation_field)]
assert len(gql_attributes) == len(expected_order)
for attr, expected_pk in zip(gql_attributes, expected_order):
gql_type, gql_attr_id = graphene.Node.from_global_id(attr["id"])
assert gql_type == "Attribute"
assert int(gql_attr_id) == expected_pk
ATTRIBUTE_VALUES_RESORT_QUERY = """
mutation attributeReorderValues($attributeId: ID!, $moves: [ReorderInput]!) {
attributeReorderValues(attributeId: $attributeId, moves: $moves) {
attribute {
id
values {
id
}
}
errors {
field
message
}
}
}
"""
def test_sort_values_within_attribute_invalid_product_type(
staff_api_client, permission_manage_products
):
"""Try to reorder an invalid attribute (invalid ID)."""
attribute_id = graphene.Node.to_global_id("Attribute", -1)
value_id = graphene.Node.to_global_id("AttributeValue", -1)
variables = {
"attributeId": attribute_id,
"moves": [{"id": value_id, "sortOrder": 1}],
}
content = get_graphql_content(
staff_api_client.post_graphql(
ATTRIBUTE_VALUES_RESORT_QUERY,
variables,
permissions=[permission_manage_products],
)
)["data"]["attributeReorderValues"]
assert content["errors"] == [
{
"field": "attributeId",
"message": f"Couldn't resolve to an attribute: {attribute_id}",
}
]
def test_sort_values_within_attribute_invalid_id(
staff_api_client, permission_manage_products, color_attribute
):
"""Try to reorder a value not associated to the given attribute."""
attribute_id = graphene.Node.to_global_id("Attribute", color_attribute.id)
value_id = graphene.Node.to_global_id("AttributeValue", -1)
variables = {
"type": "VARIANT",
"attributeId": attribute_id,
"moves": [{"id": value_id, "sortOrder": 1}],
}
content = get_graphql_content(
staff_api_client.post_graphql(
ATTRIBUTE_VALUES_RESORT_QUERY,
variables,
permissions=[permission_manage_products],
)
)["data"]["attributeReorderValues"]
assert content["errors"] == [
{
"field": "moves",
"message": f"Couldn't resolve to an attribute value: {value_id}",
}
]
def test_sort_values_within_attribute(
staff_api_client, color_attribute, permission_manage_products
):
attribute = color_attribute
AttributeValue.objects.create(attribute=attribute, name="Green", slug="green")
values = list(attribute.values.all())
assert len(values) == 3
staff_api_client.user.user_permissions.add(permission_manage_products)
attribute_id = graphene.Node.to_global_id("Attribute", attribute.id)
m2m_values = attribute.values
m2m_values.set(values)
assert values == sorted(
values, key=lambda o: o.sort_order if o.sort_order is not None else o.pk
), "The values are not properly ordered"
variables = {
"attributeId": attribute_id,
"moves": [
{
"id": graphene.Node.to_global_id("AttributeValue", values[0].pk),
"sortOrder": +1,
},
{
"id": graphene.Node.to_global_id("AttributeValue", values[2].pk),
"sortOrder": -1,
},
],
}
expected_order = [values[1].pk, values[2].pk, values[0].pk]
content = get_graphql_content(
staff_api_client.post_graphql(ATTRIBUTE_VALUES_RESORT_QUERY, variables)
)["data"]["attributeReorderValues"]
assert not content["errors"]
assert content["attribute"]["id"] == attribute_id
gql_values = content["attribute"]["values"]
assert len(gql_values) == len(expected_order)
actual_order = []
for attr, expected_pk in zip(gql_values, expected_order):
gql_type, gql_attr_id = graphene.Node.from_global_id(attr["id"])
assert gql_type == "AttributeValue"
actual_order.append(int(gql_attr_id))
assert actual_order == expected_order
ATTRIBUTES_FILTER_QUERY = """
query($filters: AttributeFilterInput!) {
attributes(first: 10, filter: $filters) {
edges {
node {
name
slug
}
}
}
}
"""
def test_search_attributes(api_client, color_attribute, size_attribute):
variables = {"filters": {"search": "color"}}
attributes = get_graphql_content(
api_client.post_graphql(ATTRIBUTES_FILTER_QUERY, variables)
)["data"]["attributes"]["edges"]
assert len(attributes) == 1
assert attributes[0]["node"]["slug"] == "color"
def test_filter_attributes_if_filterable_in_dashboard(
api_client, color_attribute, size_attribute
):
color_attribute.filterable_in_dashboard = False
color_attribute.save(update_fields=["filterable_in_dashboard"])
variables = {"filters": {"filterableInDashboard": True}}
attributes = get_graphql_content(
api_client.post_graphql(ATTRIBUTES_FILTER_QUERY, variables)
)["data"]["attributes"]["edges"]
assert len(attributes) == 1
assert attributes[0]["node"]["slug"] == "size"
def test_filter_attributes_if_available_in_grid(
api_client, color_attribute, size_attribute
):
color_attribute.available_in_grid = False
color_attribute.save(update_fields=["available_in_grid"])
variables = {"filters": {"availableInGrid": True}}
attributes = get_graphql_content(
api_client.post_graphql(ATTRIBUTES_FILTER_QUERY, variables)
)["data"]["attributes"]["edges"]
assert len(attributes) == 1
assert attributes[0]["node"]["slug"] == "size"
def test_filter_attributes_by_global_id_list(api_client, attribute_list):
global_ids = [
graphene.Node.to_global_id("Attribute", attribute.pk)
for attribute in attribute_list[:2]
]
variables = {"filters": {"ids": global_ids}}
expected_slugs = sorted([attribute_list[0].slug, attribute_list[1].slug])
attributes = get_graphql_content(
api_client.post_graphql(ATTRIBUTES_FILTER_QUERY, variables)
)["data"]["attributes"]["edges"]
assert len(attributes) == 2
received_slugs = sorted(
[attributes[0]["node"]["slug"], attributes[1]["node"]["slug"]]
)
assert received_slugs == expected_slugs
ATTRIBUTES_SORT_QUERY = """
query($sortBy: AttributeSortingInput) {
attributes(first: 10, sortBy: $sortBy) {
edges {
node {
slug
}
}
}
}
"""
def test_sort_attributes_by_slug(api_client):
Attribute.objects.bulk_create(
[
Attribute(name="MyAttribute", slug="b"),
Attribute(name="MyAttribute", slug="a"),
]
)
variables = {"sortBy": {"field": "SLUG", "direction": "ASC"}}
attributes = get_graphql_content(
api_client.post_graphql(ATTRIBUTES_SORT_QUERY, variables)
)["data"]["attributes"]["edges"]
assert len(attributes) == 2
assert attributes[0]["node"]["slug"] == "a"
assert attributes[1]["node"]["slug"] == "b"
@pytest.mark.parametrize(
"sort_field, m2m_model",
(
("DASHBOARD_VARIANT_POSITION", AttributeVariant),
("DASHBOARD_PRODUCT_POSITION", AttributeProduct),
),
)
def test_sort_attributes_by_position_in_product_type(
api_client,
color_attribute,
size_attribute,
sort_field: str,
m2m_model: Union[AttributeVariant, AttributeProduct],
):
"""Sorts attributes for dashboard custom ordering inside a given product type."""
product_type = ProductType.objects.create(name="My Product Type")
m2m_model.objects.create(
product_type=product_type, attribute=color_attribute, sort_order=0
)
m2m_model.objects.create(
product_type=product_type, attribute=size_attribute, sort_order=1
)
variables = {"sortBy": {"field": sort_field, "direction": "DESC"}}
attributes = get_graphql_content(
api_client.post_graphql(ATTRIBUTES_SORT_QUERY, variables)
)["data"]["attributes"]["edges"]
assert len(attributes) == 2
assert attributes[0]["node"]["slug"] == "size"
assert attributes[1]["node"]["slug"] == "color"
def test_sort_attributes_by_default_sorting(api_client):
"""Don't provide any sorting, this should sort by name by default."""
Attribute.objects.bulk_create(
[Attribute(name="A", slug="b"), Attribute(name="B", slug="a")]
)
attributes = get_graphql_content(
api_client.post_graphql(ATTRIBUTES_SORT_QUERY, {})
)["data"]["attributes"]["edges"]
assert len(attributes) == 2
assert attributes[0]["node"]["slug"] == "b"
assert attributes[1]["node"]["slug"] == "a"
@pytest.mark.parametrize("is_variant", (True, False))
def test_attributes_of_products_are_sorted(
staff_api_client, product, color_attribute, is_variant
):
"""Ensures the attributes of products and variants are sorted."""
variant = product.variants.first()
if is_variant:
query = """
query($id: ID!) {
productVariant(id: $id) {
attributes {
attribute {
id
}
}
}
}
"""
else:
query = """
query($id: ID!) {
product(id: $id) {
attributes {
attribute {
id
}
}
}
}
"""
# Create a dummy attribute with a higher ID
# This will allow us to make sure it is always the last attribute
# when sorted by ID. Thus, we are sure the query is actually passing the test.
other_attribute = Attribute.objects.create(name="Other", slug="other")
# Add the attribute to the product type
if is_variant:
product.product_type.variant_attributes.set([color_attribute, other_attribute])
else:
product.product_type.product_attributes.set([color_attribute, other_attribute])
# Retrieve the M2M object for the attribute vs the product type
if is_variant:
m2m_rel_other_attr = other_attribute.attributevariant.last()
else:
m2m_rel_other_attr = other_attribute.attributeproduct.last()
# Push the last attribute to the top and let the others to None
m2m_rel_other_attr.sort_order = 0
m2m_rel_other_attr.save(update_fields=["sort_order"])
# Assign attributes to the product
node = variant if is_variant else product # type: Union[Product, ProductVariant]
node.attributesrelated.clear()
associate_attribute_values_to_instance(
node, color_attribute, color_attribute.values.first()
)
# Sort the database attributes by their sort order and ID (when None)
expected_order = [other_attribute.pk, color_attribute.pk]
# Make the node ID
if is_variant:
node_id = graphene.Node.to_global_id("ProductVariant", variant.pk)
else:
node_id = graphene.Node.to_global_id("Product", product.pk)
# Retrieve the attributes
data = get_graphql_content(staff_api_client.post_graphql(query, {"id": node_id}))[
"data"
]
attributes = data["productVariant" if is_variant else "product"]["attributes"]
actual_order = [
int(graphene.Node.from_global_id(attr["attribute"]["id"])[1])
for attr in attributes
]
# Compare the received data against our expectations
assert actual_order == expected_order
| 1.984375 | 2 |
3-photos/1-chromakey/app.py | rafacm/aws-serverless-workshop-innovator-island | 1 | 6019 | <gh_stars>1-10
import os
import json
import cv2
import logging
import boto3
import botocore
s3 = boto3.client('s3')
logger = logging.getLogger()
logger.setLevel(logging.INFO)
def upload_file(file_name, bucket, object_name=None):
"""Upload a file to an S3 bucket
:param file_name: File to upload
:param bucket: Bucket to upload to
:param object_name: S3 object name. If not specified then same as file_name
:return: True if file was uploaded, else False
"""
# If S3 object_name was not specified, use file_name
if object_name is None:
object_name = file_name
# Upload the file
s3_client = s3
try:
response = s3_client.upload_file(file_name, bucket, object_name)
except botocore.exceptions.ClientError as e:
logging.error(e)
return False
return True
def scale_image(image):
_image = image
target_height = 800
height, width, channels = _image.shape
logger.info('Original size: {}h x {}w'.format(height, width))
scale = height/target_height
if scale > 1:
_image = cv2.resize(image, (int(width/scale), int(height/scale)))
height, width, channels = image.shape
logger.info('New size: {}h x {}w'.format(int(height/scale), int(width/scale)))
return _image
def lambda_handler(event, context):
print ("Starting handler")
# get object metadata from event
input_bucket_name = event['Records'][0]['s3']['bucket']['name']
file_key = event['Records'][0]['s3']['object']['key']
output_bucket_name = os.environ['OUTPUT_BUCKET_NAME']
output_file_key = file_key.<KEY>')
print("Input bucket: ", input_bucket_name)
print("Output bucket: ", output_bucket_name)
if output_bucket_name is None:
print("Error: No OUTPUT_BUCKET_NAME environment variable specified.")
return
# set up local temp file names
local_input_temp_file = '/tmp/' + file_key
local_output_temp_file = '/tmp/out_' + file_key.replace('.jpg', '.png')
logger.info('Local input file: {}'.format(local_input_temp_file))
logger.info('Local output file: {}'.format(local_output_temp_file))
# get the object
s3.download_file(input_bucket_name, file_key, local_input_temp_file)
# HSV range
# (36, 25, 25) - most extreme
# (36, 50, 50) - average
# (36, 100, 100) - relaxed
lower_range = eval(os.environ["HSV_LOWER"])
# (70, 255, 255) - default
upper_range = eval(os.environ["HSV_UPPER"])
print('Lower HSV range: ', lower_range)
print('Upper HSV range: ', upper_range)
# Read in the file
image = cv2.imread(local_input_temp_file)
# Resize the image if larger than target size
image = scale_image(image)
# Flip from RGB of JPEG to BGR of OpenCV
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
# Convert BGR to HSV color space
hsv = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
# convert to RGBA
image_alpha = cv2.cvtColor(image, cv2.COLOR_BGR2RGBA)
# Threshold the HSV image to only green colors
mask = cv2.inRange(hsv, lower_range, upper_range)
# Invert the mask (i.e. select everything not green)
mask = ~mask
# Extract the non-green parts of the image
result = cv2.bitwise_and(image_alpha, image_alpha, mask=mask)
#Save the result
cv2.imwrite(local_output_temp_file,result)
#Save to S3
if upload_file(local_output_temp_file, output_bucket_name, output_file_key):
print('Processed file uploaded.')
return True
| 2.5625 | 3 |
metrics/overflow.py | DEKHTIARJonathan/pyinstrument | 1 | 6020 | from pyinstrument import Profiler
p = Profiler(use_signal=False)
p.start()
def func(num):
if num == 0:
return
b = 0
for x in range(1,100000):
b += x
return func(num - 1)
func(900)
p.stop()
print(p.output_text())
with open('overflow_out.html', 'w') as f:
f.write(p.output_html())
| 2.859375 | 3 |
scripts/gen_tee_bin.py | wawang621/optee_os | 0 | 6021 | #!/usr/bin/env python3
# SPDX-License-Identifier: BSD-2-Clause
#
# Copyright (c) 2019, Linaro Limited
#
from __future__ import print_function
from __future__ import division
import argparse
import sys
import struct
import re
import hashlib
try:
from elftools.elf.elffile import ELFFile
from elftools.elf.constants import SH_FLAGS
from elftools.elf.enums import ENUM_RELOC_TYPE_ARM
from elftools.elf.enums import ENUM_RELOC_TYPE_AARCH64
from elftools.elf.sections import SymbolTableSection
from elftools.elf.relocation import RelocationSection
except ImportError:
print("""
***
Can't find elftools module. Probably it is not installed on your system.
You can install this module with
$ apt install python3-pyelftools
if you are using Ubuntu. Or try to search for "pyelftools" or "elftools" in
your package manager if you are using some other distribution.
***
""")
raise
small_page_size = 4 * 1024
elffile_symbols = None
tee_pageable_bin = None
tee_pager_bin = None
tee_embdata_bin = None
def eprint(*args, **kwargs):
print(*args, file=sys.stderr, **kwargs)
def round_up(n, m):
if n == 0:
return 0
else:
return (((n - 1) // m) + 1) * m
def get_arch_id(elffile):
e_machine = elffile.header['e_machine']
if e_machine == 'EM_ARM':
return 0
if e_machine == 'EM_AARCH64':
return 1
eprint('Unknown e_machine "%s"' % e_machine)
sys.exit(1)
def get_name(obj):
# Symbol or section .name might be a byte array or a string, we want a
# string
try:
name = obj.name.decode()
except (UnicodeDecodeError, AttributeError):
name = obj.name
return name
def get_symbol(elffile, name):
global elffile_symbols
global lsyms_def
if elffile_symbols is None:
elffile_symbols = dict()
lsyms_def = dict()
symbol_tables = [s for s in elffile.iter_sections()
if isinstance(s, SymbolTableSection)]
for section in symbol_tables:
for symbol in section.iter_symbols():
symbol_name = get_name(symbol)
if symbol['st_info']['bind'] == 'STB_GLOBAL':
elffile_symbols[symbol_name] = symbol
elif symbol['st_info']['bind'] == 'STB_LOCAL':
if symbol_name not in elffile_symbols.keys():
elffile_symbols[symbol_name] = symbol
if symbol_name not in lsyms_def.keys():
lsyms_def[symbol_name] = 1
else:
lsyms_def[symbol_name] += 1
if name in lsyms_def.keys() and lsyms_def[name] > 1:
eprint("Multiple definitions of local symbol %s" % name)
sys.exit(1)
if name not in elffile_symbols.keys():
eprint("Cannot find symbol %s" % name)
sys.exit(1)
return elffile_symbols[name]
def get_sections(elffile, pad_to, dump_names):
last_end = 0
bin_data = bytearray()
for section in elffile.iter_sections():
section_name = get_name(section)
if (section['sh_type'] == 'SHT_NOBITS' or
not (section['sh_flags'] & SH_FLAGS.SHF_ALLOC) or
not dump_names.match(section_name)):
continue
if last_end == 0:
bin_data = section.data()
else:
if section['sh_addr'] > last_end:
bin_data += bytearray(section['sh_addr'] - last_end)
bin_data += section.data()
last_end = section['sh_addr'] + section['sh_size']
if pad_to > last_end:
bin_data += bytearray(pad_to - last_end)
last_end = pad_to
return bin_data
def get_pageable_bin(elffile):
global tee_pageable_bin
if tee_pageable_bin is None:
pad_to = 0
dump_names = re.compile(r'^\..*_(pageable|init)$')
tee_pageable_bin = get_sections(elffile, pad_to, dump_names)
return tee_pageable_bin
def get_pager_bin(elffile):
global tee_pager_bin
if tee_pager_bin is None:
pad_to = get_symbol(elffile, '__data_end')['st_value']
dump_names = re.compile(
r'^\.(text|rodata|got|data|ARM\.exidx|ARM\.extab)$')
tee_pager_bin = get_sections(elffile, pad_to, dump_names)
return tee_pager_bin
def get_reloc_bin(elffile):
if get_arch_id(elffile) == 0:
exp_rel_type = ENUM_RELOC_TYPE_ARM['R_ARM_RELATIVE']
else:
exp_rel_type = ENUM_RELOC_TYPE_AARCH64['R_AARCH64_RELATIVE']
link_address = get_symbol(elffile, '__text_start')['st_value']
addrs = []
for section in elffile.iter_sections():
if not isinstance(section, RelocationSection):
continue
for rel in section.iter_relocations():
if rel['r_info_type'] == 0:
continue
if rel['r_info_type'] != exp_rel_type:
eprint("Unexpected relocation type 0x%x" %
rel['r_info_type'])
sys.exit(1)
addrs.append(rel['r_offset'] - link_address)
addrs.sort()
data = bytearray()
for a in addrs:
data += struct.pack('<I', a)
# Relocations has been reduced to only become the relative type with
# addend at the address (r_offset) of relocation, that is, increase by
# load_offset. The addresses (r_offset) are also sorted. The format is
# then:
# uint32_t: relocation #1
# uint32_t: relocation #2
# ...
# uint32_t: relocation #n
return data
def get_hashes_bin(elffile):
pageable_bin = get_pageable_bin(elffile)
if len(pageable_bin) % small_page_size != 0:
eprint("pageable size not a multiple of 4K: "
"{}".format(paged_area_size))
sys.exit(1)
data = bytearray()
for n in range(0, len(pageable_bin), small_page_size):
page = pageable_bin[n:n + small_page_size]
data += hashlib.sha256(page).digest()
return data
def get_embdata_bin(elffile):
global tee_embdata_bin
if tee_embdata_bin is None:
hashes_bin = get_hashes_bin(elffile)
reloc_bin = get_reloc_bin(elffile)
num_entries = 2
hash_offs = 2 * 4 + num_entries * (2 * 4)
hash_pad = round_up(len(hashes_bin), 8) - len(hashes_bin)
reloc_offs = hash_offs + len(hashes_bin) + hash_pad
reloc_pad = round_up(len(reloc_bin), 8) - len(reloc_bin)
total_len = reloc_offs + len(reloc_bin) + reloc_pad
tee_embdata_bin = struct.pack('<IIIIII', total_len, num_entries,
hash_offs, len(hashes_bin),
reloc_offs, len(reloc_bin))
tee_embdata_bin += hashes_bin + bytearray(hash_pad)
tee_embdata_bin += reloc_bin + bytearray(reloc_pad)
# The embedded data region is designed to be easy to extend when
# needed, it's formatted as:
# +---------------------------------------------------------+
# | uint32_t: Length of entire area including this field |
# +---------------------------------------------------------+
# | uint32_t: Number of entries "2" |
# +---------------------------------------------------------+
# | uint32_t: Offset of hashes from beginning of table |
# +---------------------------------------------------------+
# | uint32_t: Length of hashes |
# +---------------------------------------------------------+
# | uint32_t: Offset of relocations from beginning of table |
# +---------------------------------------------------------+
# | uint32_t: Length of relocations |
# +---------------------------------------------------------+
# | Data of hashes + eventual padding |
# +---------------------------------------------------------+
# | Data of relocations + eventual padding |
# +---------------------------------------------------------+
return tee_embdata_bin
def output_pager_bin(elffile, outf):
outf.write(get_pager_bin(elffile))
def output_pageable_bin(elffile, outf):
outf.write(get_pageable_bin(elffile))
def get_init_load_addr(elffile):
init_load_addr = get_symbol(elffile, '_start')['st_value']
init_load_addr_hi = init_load_addr >> 32
init_load_addr_lo = init_load_addr & 0xffffffff
return init_load_addr_hi, init_load_addr_lo
def output_header_v1(elffile, outf):
arch_id = get_arch_id(elffile)
pager_bin = get_pager_bin(elffile)
pageable_bin = get_pageable_bin(elffile)
embdata_bin = get_embdata_bin(elffile)
init_load_addr = get_init_load_addr(elffile)
init_bin_size = get_symbol(elffile, '__init_size')['st_value']
pager_bin_size = len(pager_bin)
paged_area_size = len(pageable_bin)
init_mem_usage = (get_symbol(elffile, '__get_tee_init_end')['st_value'] -
get_symbol(elffile, '__text_start')['st_value'] +
len(embdata_bin))
init_size = (pager_bin_size + min(init_bin_size, paged_area_size) +
len(embdata_bin))
paged_size = paged_area_size - min(init_bin_size, paged_area_size)
magic = 0x4554504f # 'OPTE'
version = 1
flags = 0
outf.write(struct.pack('<IBBHIIIII', magic, version, arch_id, flags,
init_size, init_load_addr[0], init_load_addr[1],
init_mem_usage, paged_size))
outf.write(pager_bin)
outf.write(pageable_bin[:init_bin_size])
outf.write(embdata_bin)
outf.write(pageable_bin[init_bin_size:])
def output_header_v2(elffile, outf):
arch_id = get_arch_id(elffile)
init_load_addr = get_init_load_addr(elffile)
init_bin_size = get_symbol(elffile, '__init_size')['st_value']
pager_bin_size = len(get_pager_bin(elffile))
paged_area_size = len(get_pageable_bin(elffile))
embdata_bin_size = len(get_embdata_bin(elffile))
init_size = (pager_bin_size + min(init_bin_size, paged_area_size) +
embdata_bin_size)
paged_size = paged_area_size - min(init_bin_size, paged_area_size)
magic = 0x4554504f # 'OPTE'
version = 2
flags = 0
nb_images = 1 if paged_size == 0 else 2
outf.write(struct.pack('<IBBHI', magic, version, arch_id, flags,
nb_images))
outf.write(struct.pack('<IIII', init_load_addr[0], init_load_addr[1],
0, init_size))
if nb_images == 2:
outf.write(struct.pack('<IIII', 0xffffffff, 0xffffffff, 1, paged_size))
def output_pager_v2(elffile, outf):
init_bin_size = get_symbol(elffile, '__init_size')['st_value']
pager_bin = get_pager_bin(elffile)
pageable_bin = get_pageable_bin(elffile)
embdata_bin = get_embdata_bin(elffile)
outf.write(pager_bin)
outf.write(pageable_bin[:init_bin_size])
outf.write(embdata_bin)
def output_pageable_v2(elffile, outf):
init_bin_size = get_symbol(elffile, '__init_size')['st_value']
outf.write(get_pageable_bin(elffile)[init_bin_size:])
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument('--input',
required=True, type=argparse.FileType('rb'),
help='The input tee.elf')
parser.add_argument('--out_tee_bin',
required=False, type=argparse.FileType('wb'),
help='The output tee.bin')
parser.add_argument('--out_tee_pager_bin',
required=False, type=argparse.FileType('wb'),
help='The output tee_pager.bin')
parser.add_argument('--out_tee_pageable_bin',
required=False, type=argparse.FileType('wb'),
help='The output tee_pageable.bin')
parser.add_argument('--out_header_v2',
required=False, type=argparse.FileType('wb'),
help='The output tee_header_v2.bin')
parser.add_argument('--out_pager_v2',
required=False, type=argparse.FileType('wb'),
help='The output tee_pager_v2.bin')
parser.add_argument('--out_pageable_v2',
required=False, type=argparse.FileType('wb'),
help='The output tee_pageable_v2.bin')
return parser.parse_args()
def main():
args = get_args()
elffile = ELFFile(args.input)
if args.out_tee_bin:
output_header_v1(elffile, args.out_tee_bin)
if args.out_tee_pager_bin:
output_pager_bin(elffile, args.out_tee_pager_bin)
if args.out_tee_pageable_bin:
output_pageable_bin(elffile, args.out_tee_pageable_bin)
if args.out_header_v2:
output_header_v2(elffile, args.out_header_v2)
if args.out_pager_v2:
output_pager_v2(elffile, args.out_pager_v2)
if args.out_pageable_v2:
output_pageable_v2(elffile, args.out_pageable_v2)
if __name__ == "__main__":
main()
| 2.125 | 2 |
CircuitPython_JEplayer_mp3/repeat.py | gamblor21/Adafruit_Learning_System_Guides | 665 | 6022 | <gh_stars>100-1000
# The MIT License (MIT)
#
# Copyright (c) 2020 <NAME> for Adafruit Industries LLC
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""
Make a key (button) repeat when held down
"""
import time
class KeyRepeat:
"""Track the state of a button and, while it is held, output a press every
'rate' seconds"""
def __init__(self, getter, rate=0.5):
self.getter = getter
self.rate_ns = round(rate * 1e9)
self.next = -1
@property
def value(self):
"""True when a button is first pressed, or once every 'rate' seconds
thereafter"""
state = self.getter()
if not state:
self.next = -1
return False
now = time.monotonic_ns()
if state and now > self.next:
self.next = now + self.rate_ns
return True
return False
| 2.15625 | 2 |
Kapitel_1/_1_public_private.py | Geralonx/Classes_Tutorial | 1 | 6023 | <reponame>Geralonx/Classes_Tutorial
# --- Klassendeklaration mit Konstruktor --- #
class PC:
def __init__(self, cpu, gpu, ram):
self.cpu = cpu
self.gpu = gpu
self.__ram = ram
# --- Instanziierung einer Klasse ---#
# --- Ich bevorzuge die Initialisierung mit den Keywords --- #
pc_instanz = PC(cpu='Ryzen 7', gpu='RTX2070Super', ram='GSkill')
# --- Zugriff auf normale _public_ Attribute --- #
print(pc_instanz.cpu)
print(pc_instanz.gpu)
# --- Zugriff auf ein _privates_ Attribut --- #
# Auskommentiert, da es einen AttributeError schmeißt.
# print(pc_instanz.__ram)
# --- Zugriff auf das Instanz-Dictionary, um die Inhalte jener Instanz zu erhalten. --- #
print(pc_instanz.__dict__)
# --- Zugriff auf das eigentlich _private_ Attribut. --- #
print(pc_instanz._PC__ram)
| 2.734375 | 3 |
algorithm/dynamic_programming/coin_change/solution.py | delaanthonio/hackerrank | 1 | 6024 | <gh_stars>1-10
#!/usr/bin/env python3
"""
The Coin Change Problem
:author: <NAME>
:hackerrank: https://hackerrank.com/delaanthonio
:problem: https://www.hackerrank.com/challenges/coin-change/problem
"""
from typing import List
def count_ways(amount: int, coins: List[int]) -> int:
"""Return the number of ways we can count to ``amount`` with values ``coins``."""
ways = [1] + [0] * amount
for coin in coins:
for val in range(coin, amount + 1):
ways[val] += ways[val - coin]
return ways[-1]
def main():
m, n = [int(x) for x in input().strip().split()]
coins = sorted({int(x) for x in input().strip().split()})
print(count_ways(m, coins))
if __name__ == '__main__':
main()
| 3.890625 | 4 |
climbproject/climbapp/admin.py | javawolfpack/ClimbProject | 0 | 6025 | from django.contrib import admin
#from .models import *
from . import models
# Register your models here.
admin.site.register(models.ClimbModel)
| 1.351563 | 1 |
setup.py | TheMagicNacho/artemis-nozzle | 0 | 6026 | <reponame>TheMagicNacho/artemis-nozzle<gh_stars>0
# coding: utf-8
from runpy import run_path
from setuptools import setup
# Get the version from the relevant file
d = run_path('skaero/version.py')
__version__ = d['__version__']
setup(
name="scikit-aero",
version=__version__,
description="Aeronautical engineering calculations in Python.",
author="<NAME>",
author_email="<EMAIL>",
url="https://github.com/Juanlu001/scikit-aero",
license="BSD",
keywords=[
"aero", "aeronautical", "aerospace",
"engineering", "atmosphere", "gas"
],
requires=["numpy", "scipy"],
packages=[
"skaero",
"skaero.atmosphere", "skaero.gasdynamics",
"skaero.util"
],
classifiers=[
"Development Status :: 2 - Pre-Alpha",
"Intended Audience :: Education",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: BSD License",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: Implementation :: CPython",
"Topic :: Scientific/Engineering",
"Topic :: Scientific/Engineering :: Physics"
],
long_description=open('README.rst').read()
)
| 1.695313 | 2 |
appendix/AI.by.Search/backtracking.search/3-1.eight.queens.py | royqh1979/programming_with_python | 5 | 6027 | """
8皇后问题
使用栈实现回溯法
"""
def print_board(n,count):
print(f"------解.{count}------")
print(" ",end="")
for j in range(n):
print(f"{j:<2}" ,end="")
print()
for i in range(1,n+1):
print(f"{i:<2}",end="")
for j in range(1,n+1):
if queens[i] == j:
print("Q ",end="")
else:
print(" ",end="")
print()
def set_flags(i,j,n):
col_flags[j]=1
diag_flags[i+j-1]=1
diag2_flags[n+i-j]=1
def clear_flags(i,j,n):
col_flags[j]=0
diag_flags[i+j-1]=0
diag2_flags[n+i-j]=0
def can_stay(i,j,n):
if col_flags[j]==1:
return False
if diag_flags[i+j-1]==1:
return False
if diag2_flags[n+i-j]==1:
return False
return True
def try_queen(i,n):
global count
i=1
while True:
queens[i]+=1
if queens[i]>n: # backtracking
i-=1
if i<1: # all possible solutions have been tried, quit searching
break
clear_flags(i,queens[i],n)
elif can_stay(i,queens[i],n):
if i==n:
count += 1
print_board(n, count)
else:
set_flags(i, queens[i], n)
i+=1
queens[i] = 0
def queen(n):
try_queen(1,n)
n=int(input("请输入n:"))
queens = [0]*(n+1)
# 列标志
col_flags=[0]*(n+1)
# 主对角线标志
diag_flags = [0]*(2*n)
# 副对角线标志
diag2_flags = [0] * (2*n)
count = 0
queen(n)
print(f"共有{count}种解法\n")
| 3.5 | 4 |
multimodal_affinities/evaluation/analysis/plots_producer.py | amzn/multimodal-affinities | 6 | 6028 | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: CC-BY-4.0
import os
import cv2
from collections import namedtuple
import imageio
from PIL import Image
from random import randrange
import numpy as np
from sklearn.decomposition import PCA
from scipy.spatial.distance import pdist, squareform
import torch
import matplotlib
matplotlib.use('Agg') # Required for gif animations
import matplotlib as mpl
import matplotlib.pyplot as plt
import matplotlib.image as image
import matplotlib.patches as patches
from multimodal_affinities.visualization.vis_handler import VisHandler
from multimodal_affinities.visualization.image_utils import resize_image
from multimodal_affinities.visualization.colors_util import rgb_hex_to_tuple
class PlotsProducer:
def __init__(self, document, output_path):
# Load background image
self.image_path = document.image_path
self.img = plt.imread(self.image_path)
self.img_opencv = cv2.imread(self.image_path)
dpi = 120
mpl.rcParams['figure.dpi'] = dpi
height = self.img.shape[0]
width = self.img.shape[1]
self.figsize = width / float(dpi), height / float(dpi) # Fig size in inches
self.document = document
self.output_path = output_path
if not os.path.exists(output_path):
os.makedirs(output_path)
def plot_word_boxes_on_image(self):
set_of_words = [[word] for word in self.document.get_words()] # list of singleton word lists
fig, ax = plt.subplots(1, figsize=self.figsize)
monochrome_colors_list = ['#5a5d8f' for _ in self.document.get_words()]
self._draw_entity_bounding_boxes(fig=fig, ax=ax, bg_img=self.img,
title='',
entity_sets=set_of_words,
colors_list=monochrome_colors_list)
fig.savefig(os.path.join(self.output_path, self.document.basename + '_word_boxes.png'))
plt.close(fig)
def save_phrase_detection_results(self):
set_of_phrases = [[phrase] for phrase in self.document.get_phrases()] # list of singleton phrase lists
fig, ax = plt.subplots(1, figsize=self.figsize)
self._draw_entity_bounding_boxes(fig=fig, ax=ax, bg_img=self.img,
title='Phrase Detection', entity_sets=set_of_phrases)
fig.savefig(os.path.join(self.output_path, self.document.basename + '_phrase_detection.png'))
plt.close(fig)
def save_clustering_results(self, with_title=True, colors_list=None):
set_of_clusters = [cluster.words for cluster in self.document.get_clusters()] # list of list of words (clusters)
self._save_set_of_clusters(set_of_clusters, with_title, colors_list)
def save_clustering_labels(self, clustering_labels, colors_list=None):
cluster_ids = np.unique(np.array(clustering_labels))
cluster_id_to_cluster_idx = {cluster_id: idx for idx, cluster_id in enumerate(cluster_ids)}
# Converts from list of labels to list of list of words (clusters)
set_of_clusters = [[] for _ in range(len(cluster_ids))]
for word_idx, word in enumerate(self.document.get_words()):
cluster_id = clustering_labels[word_idx]
if cluster_id == -1: # Ignore non-clustered words
continue
cluster_idx = cluster_id_to_cluster_idx[cluster_id]
set_of_clusters[cluster_idx].append(word)
self._save_set_of_clusters(set_of_clusters, colors_list)
def _save_set_of_clusters(self, set_of_clusters, with_title=True, colors_list=None):
"""
:param document:
:param set_of_clusters: list of list of words (clusters)
:return:
"""
output_img = self._draw_entity_bounding_boxes_opencv(bg_img=self.img_opencv,
entity_sets=set_of_clusters,
colors_list=colors_list)
cv2.imwrite(os.path.join(self.output_path, self.document.basename + '_clustering.png'), output_img)
@staticmethod
def _draw_entity_bounding_boxes_opencv(bg_img, entity_sets, colors_list=None):
img_height = bg_img.shape[0]
img_width = bg_img.shape[1]
if colors_list is None:
colors_list = VisHandler.generate_colors_list(amount=len(entity_sets))
face_colors = colors_list
edge_colors = VisHandler.generate_darker_palette(colors_list)
output_img = bg_img.copy()
alpha = 0.8
for set_idx, entities_set in enumerate(entity_sets):
face_color = face_colors[set_idx]
edge_color = edge_colors[set_idx]
for entity in entities_set:
x = entity.geometry.left * img_width
y = entity.geometry.top * img_height
width = entity.geometry.width * img_width
height = entity.geometry.height * img_height
# writing the text onto the image and returning it
rgb_color = rgb_hex_to_tuple(face_color)
cv2.rectangle(output_img, (int(x), int(y)), (int(x + width), int(y + height)),
(rgb_color[2], rgb_color[1], rgb_color[0]), cv2.FILLED)
output_img = cv2.addWeighted(output_img, alpha, bg_img, 1 - alpha, 0)
return output_img
@staticmethod
def _draw_entity_bounding_boxes(fig, ax, bg_img, title, entity_sets, colors_list=None):
ax.set_title(title)
plt.tick_params(axis='both', which='both',
bottom='off', top='off', labelbottom='off', right='off', left='off',
labelleft='off')
plt.imshow(bg_img)
img_height = bg_img.shape[0]
img_width = bg_img.shape[1]
if colors_list is None:
colors_list = VisHandler.generate_colors_list(amount=len(entity_sets))
face_colors = colors_list
edge_colors = VisHandler.generate_darker_palette(colors_list)
for set_idx, entities_set in enumerate(entity_sets):
face_color = face_colors[set_idx]
edge_color = edge_colors[set_idx]
for entity in entities_set:
x = entity.geometry.left * img_width
y = entity.geometry.top * img_height
width = entity.geometry.width * img_width
height = entity.geometry.height * img_height
rect = patches.Rectangle((x, y), width, height,
linewidth=2,
edgecolor=edge_color,
facecolor=face_color,
alpha=0.4)
ax.add_patch(rect)
@staticmethod
def plot_pca_embedding_space_for_clusters(document, output_path,
embedding_property='embedding',
title=''):
"""
Plot 2d PCA visualization of the embedding space according to cluster colors.
:param document: Document with clustering results
:param embedding_property: Embedding property of words - normally 'embedding' or 'unprojected_embedding'
:return:
"""
if not os.path.exists(output_path):
os.makedirs(output_path)
words = document.get_words()
clusters = document.get_clusters()
if len(words) == 0 or getattr(words[0], embedding_property) is None:
return
if embedding_property == 'unprojected_embedding':
embeddings = []
for word in words:
unprojected_embedding = torch.cat(word.unprojected_embedding['embeddings'], dim=1)
unprojected_embedding = unprojected_embedding.detach().cpu().numpy()
embeddings.append(unprojected_embedding)
else:
embeddings = [getattr(word, embedding_property).detach().cpu().numpy() for word in words]
colors_palette = VisHandler.generate_colors_list(amount=len(clusters))
word_to_color = {word: colors_palette[cluster_idx]
for cluster_idx, cluster in enumerate(clusters)
for word in cluster.words}
colors = [word_to_color[word] for word in words]
embeddings_array = np.array(embeddings).squeeze()
num_pca_comp = 2
embeddings_2d = PCA(n_components=num_pca_comp).fit_transform(embeddings_array)
x_list = [embeddings_2d[i, 0] for i in range(embeddings_2d.shape[0])]
y_list = [embeddings_2d[i, 1] for i in range(embeddings_2d.shape[0])]
fig, ax = plt.subplots(1)
plot_title = embedding_property
if plot_title != '':
plot_title += ': ' + title
plt.title(plot_title)
plt.scatter(x_list, y_list, c=colors, s=1, alpha=0.8)
fig.tight_layout()
fig.savefig(os.path.join(output_path, document.basename + '_' + embedding_property + '_pca.png'))
plt.close(fig)
@staticmethod
def _find_k_furthest_words_per_cluster(document, embeddings_2d, k=3):
""" Greedy approximation algorithm for finding k furthest neighbour words per cluster.
k is expected to be relatively small (< 100)
"""
words = document.get_words()
word_to_embedding_2d_idx = {word: idx for idx, word in enumerate(words)}
clusters = document.get_clusters()
solution_per_cluster = {}
ClusterSolution = namedtuple('ClusterSolution', ['word_indices', 'words'])
for cluster in clusters:
# Generate cluster pairwise distances matrix
all_cluster_embeddings_indices = [word_to_embedding_2d_idx[word] for word in cluster.words]
all_cluster_embeddings = np.take(embeddings_2d, all_cluster_embeddings_indices, axis=0)
pairwise_distances = pdist(all_cluster_embeddings, metric='euclidean')
distances_matrix = squareform(pairwise_distances)
# Total distance from selected set so far
distances_accumulator = np.zeros(len(cluster.words))
# Sample first point
random_index = randrange(len(cluster.words))
# Indices of selected points
selected_points = [random_index]
# How many points we need to add
points_to_calc_count = min(k - 1, len(words) - 1)
for _ in range(points_to_calc_count):
last_point_selected = selected_points[-1]
# Update accumulator with distance collected from last point
distances_accumulator += distances_matrix[last_point_selected]
# Eliminate last point selected from distance matrix & accumulator
distances_matrix[:, random_index] = 0
distances_matrix[random_index, :] = 0
furthrest_point_from_set = np.argmax(distances_accumulator, axis=0)
selected_points.append(furthrest_point_from_set)
selected_words = [cluster.words[point] for point in selected_points]
selected_word_indices = [word_to_embedding_2d_idx[word] for word in selected_words]
solution_per_cluster[cluster] = ClusterSolution(word_indices=selected_word_indices, words=selected_words)
return solution_per_cluster
@staticmethod
def _extract_crops_per_cluster_solution(document, solution_per_cluster):
"""
Extracts crops for each selected word in k-furthest neighbours solution
:param document:
:param solution_per_cluster: Solution of k-furthest neighbours
:return:
"""
word_indices_to_crops = {}
for cluster, cluster_solution in solution_per_cluster.items():
for word_index, word in zip(cluster_solution.word_indices, cluster_solution.words):
bbox = word.get_bbox() # left, top, width, height
y_min = int(round(bbox[1] * document.height))
y_max = int(round((bbox[1] + bbox[3]) * document.height))
x_min = int(round(bbox[0] * document.width))
x_max = int(round((bbox[0] + bbox[2]) * document.width))
image_of_crop = document.image[max(0, y_min):min(y_max, document.height),
max(0, x_min):min(x_max, document.width), :]
pil_image = Image.fromarray(image_of_crop[...,::-1]) # BGR to RGB
pil_image = pil_image.convert('RGB')
word_indices_to_crops[word_index] = pil_image
return word_indices_to_crops
@staticmethod
def _space_out_crops(indices_to_crops, words, x_list, y_list, dist_from_pt=0.01, height=0.02):
"""
Calculates the positions and dimensions of crop images on the embedding space plot.
Makes sure crops don't overlay each other.
This method assumes a small number of crops (< 1000) and performs a naive linear comparison for each crop.
:param indices_to_crops: dict of word index (by order in doc) to PIL crop
:param words: List of words
:param x_list: List of corresponding pt x positions
:param y_list: List of corresponding pt y positions
:param dist_from_pt: How far in (x-y) coords the crop should be placed from the plot
:param height: Height of the crop, in figure axes dimensions (note: for normalized pca space: -1 to 1)
:return: indices_to_extents: dict of word index to extens describing position and dimensions of each crop.
Crops are shifted so they don't cover each other,
"""
indices_to_extents = {}
MatplotExtent = namedtuple('matplot_extent', ['left', 'right', 'bottom', 'top'])
is_extent_x_intersect = lambda e1, e2: not (e1.right < e2.left or e1.left > e2.right)
is_extent_y_intersect = lambda e1, e2: not (e1.top > e2.bottom or e1.bottom < e2.top)
is_extent_intersect = lambda e1, e2: is_extent_x_intersect(e1, e2) and is_extent_y_intersect(e1, e2)
min_x, max_x = min(x_list), max(x_list)
min_y, max_y = min(y_list), max(y_list)
height = (max_y - min_y) * height
dist_from_pt = min(max_y - min_y, max_x - min_x) * dist_from_pt
for point_index, crop in indices_to_crops.items():
word_aspect_ratio = words[point_index].geometry.width / words[point_index].geometry.height
axis_ratio = (max_x-min_x) / (max_y-min_y) / 2
width = height * word_aspect_ratio * axis_ratio
left, right = x_list[point_index] + dist_from_pt, x_list[point_index] + dist_from_pt + width
bottom, top = y_list[point_index] + dist_from_pt + height, y_list[point_index] + dist_from_pt
overlap = True
while overlap:
overlap = False
extent = MatplotExtent(left, right, bottom, top)
for other_crop_extent in indices_to_extents.values():
other_left, other_right, other_bottom, other_top = other_crop_extent
spaceout_margin = dist_from_pt / 2
if is_extent_intersect(extent, other_crop_extent):
overlap = True
# shift below
if other_bottom <= top <= other_top:
top = other_bottom + spaceout_margin
bottom = top + height
else: # shift above
bottom = other_top - spaceout_margin
top = bottom - height
continue
indices_to_extents[point_index] = extent
return indices_to_extents
def plot_clusters_and_embedding_space_with_crops(self, document, output_path, crops_per_cluster=3,
embedding_properties=['embedding', 'unprojected_embedding'],
unprojected_caption=None):
"""
Plot 2d PCA visualization of the embedding space according to cluster colors.
:param document: Document with clustering results
:param embedding_property: Embedding property of words - normally 'embedding' or 'unprojected_embedding'
:return:
"""
if not os.path.exists(output_path):
os.makedirs(output_path)
words = document.get_words()
clusters = document.get_clusters()
if len(words) == 0 or \
all([getattr(words[0], embedding_property) is None for embedding_property in embedding_properties]):
return
colors_palette = VisHandler.generate_colors_list(amount=len(clusters))
word_to_color = {word: colors_palette[cluster_idx]
for cluster_idx, cluster in enumerate(clusters)
for word in cluster.words}
colors = [word_to_color[word] for word in words]
# Initially empty, the first embedding property we process will set those for all figures
selected_word_crops_per_cluster = None
indices_to_crops = None
for embedding_property in embedding_properties:
if embedding_property == 'unprojected_embedding': # Can't handle tuples, concat them
embeddings = []
for word in words:
unprojected_embedding = torch.cat(word.unprojected_embedding['embeddings'], dim=1)
unprojected_embedding = unprojected_embedding.detach().cpu().numpy()
embeddings.append(unprojected_embedding)
else:
embeddings = [getattr(word, embedding_property).detach().cpu().numpy() for word in words]
embeddings_array = np.array(embeddings).squeeze()
num_pca_comp = 2
embeddings_2d = PCA(n_components=num_pca_comp).fit_transform(embeddings_array)
x_list = [embeddings_2d[i, 0] for i in range(embeddings_2d.shape[0])]
y_list = [embeddings_2d[i, 1] for i in range(embeddings_2d.shape[0])]
fig, ax = plt.subplots(1)
if crops_per_cluster > 0:
if selected_word_crops_per_cluster is None and indices_to_crops is None: # Calculate per first attribute
selected_word_crops_per_cluster = PlotsProducer._find_k_furthest_words_per_cluster(document, embeddings_2d, k=crops_per_cluster)
indices_to_crops = PlotsProducer._extract_crops_per_cluster_solution(document, selected_word_crops_per_cluster)
indices_to_extents = PlotsProducer._space_out_crops(indices_to_crops, words,
x_list, y_list, dist_from_pt=0.02, height=0.04)
# Plot crop images
for point_index, crop in indices_to_crops.items():
extent = indices_to_extents[point_index]
rect = patches.Rectangle((extent.left, extent.top), extent.right-extent.left, extent.bottom-extent.top,
linewidth=0.5,
edgecolor="black",
facecolor="none",
zorder=5)
ax.imshow(crop, aspect='auto', alpha=0.65, extent=extent, zorder=4)
ax.add_patch(rect)
# Plot points
if embedding_property == 'unprojected_embedding':
plot_title = 'Initial unprojected embeddings, pre training (PCA)'
else:
if unprojected_caption is None:
plot_title = 'Projected embeddings, post training (PCA)'
else:
plot_title = unprojected_caption
plt.title(plot_title)
plt.scatter(x_list, y_list, c=colors, s=18, alpha=1.0, edgecolors='black', linewidth=1.0, zorder=3)
plt.tick_params(axis='both', which='both',
bottom='off', top='off', labelbottom='off', right='off', left='off',
labelleft='off')
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
fig.tight_layout()
fig.savefig(os.path.join(output_path, document.basename + '_' + embedding_property + '_pca.png'))
plt.close(fig)
# Finally plot clusters on original image
self.save_clustering_results(with_title=False, colors_list=colors_palette)
return colors_palette
@staticmethod
def animate_pca_embedding_space_for_clusters(document, output_path, embeddings_history, colors_palette=None):
"""
Plot 2d PCA visualization of the embedding space according to cluster colors.
:param document: Document with clustering results
:param embedding_property: Embedding property of words - normally 'embedding' or 'unprojected_embedding'
:return:
"""
if not os.path.exists(output_path):
os.makedirs(output_path)
words = document.get_words()
clusters = document.get_clusters()
if len(words) == 0 or embeddings_history is None or len(embeddings_history) == 0:
return
if colors_palette is None:
colors_palette = VisHandler.generate_colors_list(amount=len(clusters))
word_to_color = {word: colors_palette[cluster_idx]
for cluster_idx, cluster in enumerate(clusters)
for word in cluster.words}
colors = [word_to_color[word] for word in words]
scatter_data = []
for state_idx, embeddings_state in enumerate(embeddings_history):
epoch = state_idx + 1
normalized_embeddings_dict = embeddings_state['normalized']
unnormalized_embeddings_dict = embeddings_state['unnormalized']
if len(normalized_embeddings_dict) > 0:
normalized_embeddings = [normalized_embeddings_dict[word].detach().cpu().numpy() for word in words]
chosen_embedding = normalized_embeddings
elif len(unnormalized_embeddings_dict) > 0:
unnormalized_embeddings = [unnormalized_embeddings_dict[word].detach().cpu().numpy() for word in words]
chosen_embedding = unnormalized_embeddings
else:
return
embeddings_array = np.array(chosen_embedding).squeeze()
num_pca_comp = 2
embeddings_2d = PCA(n_components=num_pca_comp).fit_transform(embeddings_array)
x_list = [embeddings_2d[i, 0] for i in range(embeddings_2d.shape[0])]
y_list = [embeddings_2d[i, 1] for i in range(embeddings_2d.shape[0])]
push_pull_ratio = embeddings_state['push_pull_ratio']
scatter_data.append((epoch, x_list, y_list, push_pull_ratio))
min_x = min(min(scatter_data, key=lambda entry: min(entry[1]))[1])
max_x = max(max(scatter_data, key=lambda entry: max(entry[1]))[1])
min_y = min(min(scatter_data, key=lambda entry: min(entry[2]))[2])
max_y = max(max(scatter_data, key=lambda entry: max(entry[2]))[2])
padding_factor = 0.1
min_x -= (max_x - min_x) * padding_factor
max_x += (max_x - min_x) * padding_factor
min_y -= (max_y - min_y) * padding_factor
max_y += (max_y - min_y) * padding_factor
frames = []
for epoch, x_list, y_list, push_pull_ratio in scatter_data:
fig, ax = plt.subplots(1)
ax.set_xlim(min_x, max_x)
ax.set_ylim(min_y, max_y)
plot_title = 'Projected embeddings at epoch #' + str(epoch) + ' (PCA)'
plt.title(plot_title)
plt.scatter(x_list, y_list, c=colors, s=18, alpha=1.0, edgecolors='black', linewidth=1.0, zorder=3)
plt.tick_params(axis='both', which='both',
bottom='off', top='off', labelbottom='off', right='off', left='off',
labelleft='off')
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
# Used to return the plot as an image rray
fig.tight_layout()
fig.canvas.draw() # draw the canvas, cache the renderer
output_frame = np.frombuffer(fig.canvas.tostring_rgb(), dtype='uint8')
output_frame = output_frame.reshape(fig.canvas.get_width_height()[::-1] + (3,))
frames.append(output_frame)
imageio.mimsave(os.path.join(output_path, document.basename + '_embeddings_history.gif'), frames, fps=2)
| 2.265625 | 2 |
openstates/openstates-master/openstates/de/legislators.py | Jgorsick/Advocacy_Angular | 0 | 6029 | import re
import lxml.html
from openstates.utils import LXMLMixin
from billy.scrape.legislators import LegislatorScraper, Legislator
class DELegislatorScraper(LegislatorScraper,LXMLMixin):
jurisdiction = 'de'
def scrape(self, chamber, term):
url = {
'upper': 'http://legis.delaware.gov/legislature.nsf/sen?openview',
'lower': 'http://legis.delaware.gov/Legislature.nsf/Reps?openview',
}[chamber]
doc = self.lxmlize(url)
if chamber == "upper":
#for the senate, it's the same table
#but the html is hard-coded in js.
table_js = doc.xpath('.//script')[-1].text_content()
table = None
for line in table_js.split("\n"):
if line.strip().startswith("var") and "sen=" in line:
table = line.replace("var","")
table = table.replace('sen="<','<')
table = table.replace('>";','>')
break
assert table is not None, "Senate table could not be found"
table = lxml.html.fromstring(table)
table.make_links_absolute(url)
trs = table.xpath('//tr')
else:
#same table for the house, but kindly in actual html
trs = doc.xpath('//tr')
base_url = "http://legis.delaware.gov"
for tr in trs:
name_and_url = tr.xpath('.//a')[0]
bio_url = name_and_url.attrib["href"]
bio_url = bio_url.replace("JavaScript:window.top.location.href=","")
bio_url = bio_url.replace('"','')
name = name_and_url.text_content()
if name.strip() == "." or name.strip() == "":
continue
if name.strip().lower().startswith("vacant"):
continue
re_spaces=re.compile(r'\s{1,5}')
name = ' '.join(re_spaces.split(name))
district = tr.xpath('.//td')[2].text_content()
district = district.replace("District:","").strip()
leg = self.scrape_bio(term, chamber, district, name, bio_url)
leg.add_source(bio_url, page="legislator detail page")
leg.add_source(url, page="legislator list page")
self.save_legislator(leg)
def scrape_bio(self, term, chamber, district, name, url):
# this opens the committee section without having to do another request
url += '&TableRow=1.5.5'
frame_doc = self.lxmlize(url)
actual_url = frame_doc.xpath("//frame[@name='right']/@src")[0]
doc = self.lxmlize(actual_url)
# party is in one of these
party = doc.xpath('//div[@id="page_header"]')[0].text.strip()[-3:]
if '(D)' in party:
party = 'Democratic'
elif '(R)' in party:
party = 'Republican'
else:
raise AssertionError("No party found for {name}".format(name=name))
leg = Legislator(term, chamber, district, name, party=party)
photo_url = doc.xpath('//img[contains(@src, "jpg")]/@src')
if photo_url:
leg['photo_url'] = photo_url[0]
contact_info = self.scrape_contact_info(doc)
leg.update(contact_info)
return leg
def scrape_contact_info(self, doc):
# Email
email = doc.xpath(".//a[contains(@href,'mailto')]")
email = email[0].text_content().strip()
leg_email = None
dist_email = None
try:
emails = email.split(";")
except AttributeError:
pass
else:
for e in emails:
e = e.strip()
if e:
if "state.de.us" in e:
leg_email = e
else:
dist_email = e
# Offices
leg_office = dict(name="Capitol Office", type="capitol",
phone=None, fax=None, email=leg_email, address=None)
dist_office = dict(name="Outside Office", type="capitol",
phone=None,fax=None, email=dist_email, address=None)
#this is enormously painful, DE.
office_list = doc.xpath("//tr")
for office in office_list:
title_td = 0
#in some trs the photo is the first td
if len(office.xpath("./td/img")) > 0:
title_td = 1
try:
title_text = office.xpath("./td")[title_td].text_content().lower()
content = office.xpath("./td")[title_td+1].text_content()
except IndexError:
continue
leg_office = self.add_contact("legislative",
title_text,content,leg_office)
dist_office = self.add_contact("outside",
title_text,content,dist_office)
offices = [o for o in [leg_office,dist_office] if o["address"]]
assert len(offices) > 0, "No offices with addresses found "\
"make sure we're not losing any data."
return {"offices":offices}
def add_contact(self,office_type,
title_text,content,office):
#office type is the name of the office
#either "legislative" or "outside"
if "{} office".format(office_type) in title_text:
office["address"] = content.strip()
if "{} phone".format(office_type) in title_text:
phones = content.lower().split("\n")
if len(phones) == 1:
phone = self.clean_phone(phones[0])
if phone:
office["phone"] = phone
else:
for line in phones:
if "phone" in line:
phone = self.clean_phone(line)
if phone:
office["phone"] = phone
elif "fax" in line:
phone = self.clean_phone(line)
if phone:
office["fax"] = phone
return office
def clean_phone(self,phone):
if not phone.strip():
return
if not re.search("\d",phone):
return
if not ":" in phone:
return phone
return phone.split(":")[1].strip()
| 2.890625 | 3 |
simpleredial/dataloader/fine_grained_test_dataloader.py | gmftbyGMFTBY/SimpleReDial-v1 | 36 | 6030 | <gh_stars>10-100
from header import *
from .utils import *
from .util_func import *
'''Only for Testing'''
class FineGrainedTestDataset(Dataset):
def __init__(self, vocab, path, **args):
self.args = args
self.vocab = vocab
self.vocab.add_tokens(['[EOS]'])
self.pad = self.vocab.convert_tokens_to_ids('[PAD]')
self.sep = self.vocab.convert_tokens_to_ids('[SEP]')
self.eos = self.vocab.convert_tokens_to_ids('[EOS]')
self.cls = self.vocab.convert_tokens_to_ids('[CLS]')
suffix = args['tokenizer'].replace('/', '_')
self.pp_path = f'{os.path.splitext(path)[0]}_fg_test_{suffix}.pt'
if os.path.exists(self.pp_path):
self.data = torch.load(self.pp_path)
print(f'[!] load preprocessed file from {self.pp_path}')
return None
self.data = []
for fix in ['brandenwang', 'lt', 'lt2']:
path = f'{args["root_dir"]}/data/{args["dataset"]}/fg-{fix}-test.txt'
data = read_text_data_utterances(path, lang=self.args['lang'])
for i in tqdm(range(0, len(data), 7)):
batch = data[i:i+7]
rids = []
for label, utterances in batch:
item = self.vocab.batch_encode_plus(utterances, add_special_tokens=False)['input_ids']
cids, rids_ = item[:-1], item[-1]
ids = []
for u in cids:
ids.extend(u + [self.sep])
ids.pop()
ids = ids[-(self.args['max_len']-2):] # ignore [CLS] and [SEP]
rids_ = rids_[:(self.args['res_max_len']-2)]
ids = [self.cls] + ids + [self.sep]
rids_ = [self.cls] + rids_ + [self.sep]
rids.append(rids_)
self.data.append({
'label': [b[0] for b in batch],
'ids': ids,
'rids': rids,
'text': ['\t'.join(b[1]) for b in batch],
'owner': fix,
})
def __len__(self):
return len(self.data)
def __getitem__(self, i):
bundle = self.data[i]
ids = torch.LongTensor(bundle['ids'])
rids = [torch.LongTensor(i) for i in bundle['rids']]
return ids, rids, bundle['label'], bundle['text'], bundle['owner']
def save(self):
data = torch.save(self.data, self.pp_path)
print(f'[!] save preprocessed dataset into {self.pp_path}')
def collate(self, batch):
assert len(batch) == 1
ids, rids, label, text, owner = batch[0]
rids = pad_sequence(rids, batch_first=True, padding_value=self.pad)
rids_mask = generate_mask(rids)
label = torch.LongTensor(label)
ids, rids, rids_mask, label = to_cuda(ids, rids, rids_mask, label)
return {
'ids': ids,
'rids': rids,
'rids_mask': rids_mask,
'label': label,
'text': text,
'owner': owner,
}
class FineGrainedTestPositionWeightDataset(Dataset):
def __init__(self, vocab, path, **args):
self.args = args
self.vocab = vocab
self.vocab.add_tokens(['[EOS]'])
self.pad = self.vocab.convert_tokens_to_ids('[PAD]')
self.sep = self.vocab.convert_tokens_to_ids('[SEP]')
self.eos = self.vocab.convert_tokens_to_ids('[EOS]')
self.cls = self.vocab.convert_tokens_to_ids('[CLS]')
self.unk = self.vocab.convert_tokens_to_ids('[UNK]')
self.special_tokens = set([self.unk, self.cls, self.sep])
suffix = args['tokenizer'].replace('/', '_')
self.pp_path = f'{os.path.splitext(path)[0]}_fg_test_pw_{suffix}.pt'
if os.path.exists(self.pp_path):
self.data = torch.load(self.pp_path)
print(f'[!] load preprocessed file from {self.pp_path}')
return None
self.data = []
for fix in ['brandenwang', 'lt', 'lt2']:
path = f'{args["root_dir"]}/data/{args["dataset"]}/fg-{fix}-test.txt'
data = read_text_data_utterances(path, lang=self.args['lang'])
for i in tqdm(range(0, len(data), 7)):
batch = data[i:i+7]
rids = []
for label, utterances in batch:
item = self.vocab.batch_encode_plus(utterances, add_special_tokens=False)['input_ids']
cids, rids_ = item[:-1], item[-1]
ids = []
position_w, w = [], self.args['min_w']
for u in cids:
ids.extend(u + [self.sep])
for token in u + [self.sep]:
if token not in self.special_tokens:
position_w.append(w)
else:
position_w.append(self.args['w_sp_token'])
w += self.args['w_delta']
ids.pop()
position_w.pop()
ids = ids[-(self.args['max_len']-2):] # ignore [CLS] and [SEP]
position_w = position_w[-(self.args['max_len']-2):]
rids_ = rids_[:(self.args['res_max_len']-2)]
ids = [self.cls] + ids + [self.sep]
position_w = [w-self.args['w_delta']] + position_w + [self.args['w_sp_token']]
rids_ = [self.cls] + rids_ + [self.sep]
rids.append(rids_)
self.data.append({
'label': [b[0] for b in batch],
'ids': ids,
'rids': rids,
'text': ['\t'.join(b[1]) for b in batch],
'position_w': position_w,
'owner': fix,
})
def __len__(self):
return len(self.data)
def __getitem__(self, i):
bundle = self.data[i]
ids = torch.LongTensor(bundle['ids'])
rids = [torch.LongTensor(i) for i in bundle['rids']]
position_w = torch.tensor(bundle['position_w'])
return ids, rids, position_w, bundle['label'], bundle['text'], bundle['owner']
def save(self):
data = torch.save(self.data, self.pp_path)
print(f'[!] save preprocessed dataset into {self.pp_path}')
def collate(self, batch):
assert len(batch) == 1
ids, rids, pos_w, label, text, owner = batch[0]
rids = pad_sequence(rids, batch_first=True, padding_value=self.pad)
rids_mask = generate_mask(rids)
label = torch.LongTensor(label)
ids, rids, pos_w, rids_mask, label = to_cuda(ids, rids, pos_w, rids_mask, label)
return {
'ids': ids,
'rids': rids,
'rids_mask': rids_mask,
'pos_w': pos_w,
'label': label,
'text': text,
'owner': owner,
}
class FineGrainedTestInteractionDataset(Dataset):
def __init__(self, vocab, path, **args):
self.args = args
self.vocab = vocab
self.vocab.add_tokens(['[EOS]'])
self.pad = self.vocab.convert_tokens_to_ids('[PAD]')
self.sep = self.vocab.convert_tokens_to_ids('[SEP]')
self.eos = self.vocab.convert_tokens_to_ids('[EOS]')
self.cls = self.vocab.convert_tokens_to_ids('[CLS]')
suffix = args['tokenizer'].replace('/', '_')
self.pp_path = f'{os.path.splitext(path)[0]}_fg_interaction_test_{suffix}.pt'
if os.path.exists(self.pp_path):
self.data = torch.load(self.pp_path)
print(f'[!] load preprocessed file from {self.pp_path}')
return None
self.data = []
for fix in ['brandenwang', 'lt', 'lt2']:
path = f'{args["root_dir"]}/data/{args["dataset"]}/fg-{fix}-test.txt'
data = read_text_data_utterances(path, lang=self.args['lang'])
for i in tqdm(range(0, len(data), 7)):
batch = data[i:i+7]
rids = []
ids, tids = [], []
context, responses = [], []
for _, utterances in batch:
item = self.vocab.batch_encode_plus(utterances, add_special_tokens=False)['input_ids']
cids = []
for u in item[:-1]:
cids.extend(u + [self.eos])
cids.pop()
rids = item[-1]
truncate_pair(cids, rids, self.args['max_len'])
ids_ = [self.cls] + cids + [self.sep] + rids + [self.sep]
tids_ = [0] * (len(cids) + 2) + [1] * (len(rids) + 1)
ids.append(ids_)
tids.append(tids_)
responses.append(utterances[-1])
context = ' [SEP] '.join(utterances[:-1])
self.data.append({
'label': [b[0] for b in batch],
'ids': ids,
'tids': tids,
'context': context,
'responses': responses,
'owner': fix,
})
def __len__(self):
return len(self.data)
def __getitem__(self, i):
bundle = self.data[i]
ids = [torch.LongTensor(i) for i in bundle['ids']]
tids = [torch.LongTensor(i) for i in bundle['tids']]
context, responses = bundle['context'], bundle['responses']
return ids, tids, bundle['label'], context, responses, bundle['owner']
def save(self):
data = torch.save(self.data, self.pp_path)
print(f'[!] save preprocessed dataset into {self.pp_path}')
def collate(self, batch):
assert len(batch) == 1
ids, tids, label, context, responses, owner = batch[0]
ids = pad_sequence(ids, batch_first=True, padding_value=self.pad)
tids = pad_sequence(tids, batch_first=True, padding_value=self.pad)
label = torch.LongTensor(label)
mask = generate_mask(ids)
ids, tids, mask, label = to_cuda(ids, tids, mask, label)
return {
'ids': ids,
'tids': tids,
'mask': mask,
'label': label,
'owner': owner,
}
| 2.28125 | 2 |
dabing/DABING-MIB.py | SvajkaJ/dabing | 0 | 6031 | <filename>dabing/DABING-MIB.py
#
# PySNMP MIB module DABING-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file://..\DABING-MIB.mib
# Produced by pysmi-0.3.4 at Tue Mar 22 12:53:47 2022
# On host ? platform ? version ? by user ?
# Using Python version 3.8.2 (tags/v3.8.2:7b3ab59, Feb 25 2020, 22:45:29) [MSC v.1916 32 bit (Intel)]
#
OctetString, ObjectIdentifier, Integer = mibBuilder.importSymbols("ASN1", "OctetString", "ObjectIdentifier", "Integer")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ConstraintsIntersection, ConstraintsUnion, ValueRangeConstraint, SingleValueConstraint, ValueSizeConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "ConstraintsIntersection", "ConstraintsUnion", "ValueRangeConstraint", "SingleValueConstraint", "ValueSizeConstraint")
NotificationGroup, ModuleCompliance = mibBuilder.importSymbols("SNMPv2-CONF", "NotificationGroup", "ModuleCompliance")
MibScalar, MibTable, MibTableRow, MibTableColumn, Gauge32, ModuleIdentity, IpAddress, ObjectIdentity, iso, Counter32, Unsigned32, Bits, NotificationType, TimeTicks, Counter64, enterprises, MibIdentifier, Integer32 = mibBuilder.importSymbols("SNMPv2-SMI", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "Gauge32", "ModuleIdentity", "IpAddress", "ObjectIdentity", "iso", "Counter32", "Unsigned32", "Bits", "NotificationType", "TimeTicks", "Counter64", "enterprises", "MibIdentifier", "Integer32")
DisplayString, TextualConvention = mibBuilder.importSymbols("SNMPv2-TC", "DisplayString", "TextualConvention")
dabing = ModuleIdentity((1, 3, 6, 1, 4, 1, 55532))
dabing.setRevisions(('2022-03-17 00:00',))
if mibBuilder.loadTexts: dabing.setLastUpdated('202203170000Z')
if mibBuilder.loadTexts: dabing.setOrganization('www.stuba.sk')
Parameters = MibIdentifier((1, 3, 6, 1, 4, 1, 55532, 1))
Agent = MibIdentifier((1, 3, 6, 1, 4, 1, 55532, 2))
Manager = MibIdentifier((1, 3, 6, 1, 4, 1, 55532, 3))
Notifications = MibIdentifier((1, 3, 6, 1, 4, 1, 55532, 4))
NotificationPrefix = MibIdentifier((1, 3, 6, 1, 4, 1, 55532, 4, 1))
NotificationObjects = MibIdentifier((1, 3, 6, 1, 4, 1, 55532, 4, 2))
channel = MibScalar((1, 3, 6, 1, 4, 1, 55532, 1, 1), OctetString().clone('12C')).setMaxAccess("readonly")
if mibBuilder.loadTexts: channel.setStatus('current')
interval = MibScalar((1, 3, 6, 1, 4, 1, 55532, 1, 2), Integer32().clone(960)).setMaxAccess("readonly")
if mibBuilder.loadTexts: interval.setStatus('current')
trapEnabled = MibScalar((1, 3, 6, 1, 4, 1, 55532, 1, 3), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: trapEnabled.setStatus('current')
agentIdentifier = MibScalar((1, 3, 6, 1, 4, 1, 55532, 2, 1), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: agentIdentifier.setStatus('current')
agentLabel = MibScalar((1, 3, 6, 1, 4, 1, 55532, 2, 2), OctetString()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: agentLabel.setStatus('current')
agentStatus = MibScalar((1, 3, 6, 1, 4, 1, 55532, 2, 3), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: agentStatus.setStatus('current')
managerHostname = MibScalar((1, 3, 6, 1, 4, 1, 55532, 3, 1), OctetString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: managerHostname.setStatus('current')
managerPort = MibScalar((1, 3, 6, 1, 4, 1, 55532, 3, 2), Integer32().clone(162)).setMaxAccess("readonly")
if mibBuilder.loadTexts: managerPort.setStatus('current')
genericPayload = MibScalar((1, 3, 6, 1, 4, 1, 55532, 4, 2, 1), OctetString()).setMaxAccess("accessiblefornotify")
if mibBuilder.loadTexts: genericPayload.setStatus('current')
malfunctionTrap = NotificationType((1, 3, 6, 1, 4, 1, 55532, 4, 1, 1)).setObjects(("DABING-MIB", "genericPayload"))
if mibBuilder.loadTexts: malfunctionTrap.setStatus('current')
testTrap = NotificationType((1, 3, 6, 1, 4, 1, 55532, 4, 1, 2)).setObjects(("DABING-MIB", "genericPayload"))
if mibBuilder.loadTexts: testTrap.setStatus('current')
mibBuilder.exportSymbols("DABING-MIB", Notifications=Notifications, channel=channel, PYSNMP_MODULE_ID=dabing, testTrap=testTrap, malfunctionTrap=malfunctionTrap, Parameters=Parameters, agentLabel=agentLabel, managerPort=managerPort, trapEnabled=trapEnabled, managerHostname=managerHostname, Manager=Manager, NotificationPrefix=NotificationPrefix, Agent=Agent, genericPayload=genericPayload, NotificationObjects=NotificationObjects, agentIdentifier=agentIdentifier, dabing=dabing, agentStatus=agentStatus, interval=interval)
| 1.609375 | 2 |
parameter_setup/run_setup_extra_vis.py | kharris/allen-voxel-network | 7 | 6032 | import os
import numpy as np
save_stem='extra_vis_friday_harbor'
data_dir='../../data/sdk_new_100'
resolution=100
cre=False
source_acronyms=['VISal','VISam','VISl','VISp','VISpl','VISpm',
'VISli','VISpor','VISrl','VISa']
lambda_list = np.logspace(3,12,10)
scale_lambda=True
min_vox=0
# save_file_name='visual_output.hdf5'
#source_coverage=0.90
source_coverage=0.95
#source_shell = 1
source_shell=None
save_dir=os.path.join('../../data/connectivities',save_stem)
experiments_fn=None
target_acronyms=source_acronyms
solver=os.path.abspath('../smoothness_c/solve')
cmdfile=os.path.join(save_dir,'model_fitting_cmds')
selected_fit_cmds=os.path.join(save_dir,'model_fitting_after_selection_cmds')
save_mtx=True
cross_val_matrices=True
cross_val=5
fit_gaussian=False
select_one_lambda=False
if select_one_lambda:
lambda_fn='lambda_opt'
else:
lambda_fn='lambda_ipsi_contra_opt'
laplacian='free'
shuffle_seed=666
max_injection_volume=0.7
| 1.75 | 2 |
examples/runall.py | GNiklas/MOSSEPy | 0 | 6033 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Nov 20 09:42:39 2020
@author: niklas
"""
from mossepy.mosse_tracker import MOSSE
# choose position of object in first frame
# that should be done by mouse click
objPos = [256, 256]
# choose tracker type
tracker = MOSSE()
# initialize object position in first frame
tracker.setObjPos(objPos)
# start tracking
tracker.trackImg() | 2.734375 | 3 |
core/formulas.py | mike006322/PolynomialCalculator | 0 | 6034 | def solve(polynomial):
"""
input is polynomial
if more than one variable, returns 'too many variables'
looks for formula to apply to coefficients
returns solution or 'I cannot solve yet...'
"""
if len(polynomial.term_matrix[0]) > 2:
return 'too many variables'
elif len(polynomial.term_matrix[0]) == 1:
return polynomial.term_matrix[1][0]
elif len(polynomial.term_matrix[0]) == 2:
degree = polynomial.term_matrix[1][1]
if degree == 1:
if len(polynomial.term_matrix) == 2:
return 0
else:
return -polynomial.term_matrix[2][0]/polynomial.term_matrix[1][0]
if degree == 2:
ans = quadratic_formula(polynomial)
return ans
if degree > 2:
return Durand_Kerner(polynomial)
def quadratic_formula(polynomial):
"""
input is single-variable polynomial of degree 2
returns zeros
"""
if len(polynomial.term_matrix) == 3:
if polynomial.term_matrix[2][1] == 1:
a, b = polynomial.term_matrix[1][0], polynomial.term_matrix[2][0]
return 0, -b/a
a, c = polynomial.term_matrix[1][0], polynomial.term_matrix[2][0]
return (-c/a)**.5, -(-c/a)**.5
if len(polynomial.term_matrix) == 2:
a, b, c, = polynomial.term_matrix[1][0], 0, 0
elif len(polynomial.term_matrix) == 3:
a, b, c = polynomial.term_matrix[1][0], polynomial.term_matrix[2][0], 0
else:
a, b, c = polynomial.term_matrix[1][0], polynomial.term_matrix[2][0], polynomial.term_matrix[3][0]
ans1 = (-b + (b**2 - 4*a*c)**.5)/2*a
ans2 = (-b - (b**2 - 4*a*c)**.5)/2*a
if ans1 == ans2:
return ans1
return ans1, ans2
def isclose(a, b, rel_tol=1e-09, abs_tol=0.0001):
"""
returns boolean whether abs(a-b) is less than abs_total or rel_total*max(a, b)
"""
return abs(a-b) <= max(rel_tol * max(abs(a), abs(b)), abs_tol)
def Durand_Kerner(f):
"""
input polynomial
returns numerical approximation of all complex roots
"""
roots = []
for i in range(f.degree()):
roots.append((0.4 + 0.9j)**i)
diff = 1
diff_temp = 0
def iterate():
nonlocal roots
new_roots = roots[:]
for i in range(len(roots)):
q = 1
for j, root in enumerate(roots):
if j != i:
q *= roots[i] - root
new_roots[i] = roots[i] - f(roots[i])/q
nonlocal diff
nonlocal diff_temp
diff_temp = diff
diff = 0
for i in range(len(roots)):
diff += abs(roots[i] - new_roots[i])
roots = new_roots
while diff > .00000001 and not isclose(diff_temp, diff):
iterate()
for i in range(len(roots)):
if isclose(roots[i].real, round(roots[i].real)):
temp = round(roots[i].real)
roots[i] -= roots[i].real
roots[i] += temp
if isclose(roots[i].imag, round(roots[i].imag)):
temp = round(roots[i].imag)
roots[i] -= roots[i].imag*1j
roots[i] += temp*1j
return roots
if __name__ == '__main__':
pass
| 3.828125 | 4 |
manim/mobject/svg/style_utils.py | 5Points7Edges/manim | 0 | 6035 | <reponame>5Points7Edges/manim
"""Utility functions for parsing SVG styles."""
__all__ = ["cascade_element_style", "parse_style", "parse_color_string"]
from xml.dom.minidom import Element as MinidomElement
from colour import web2hex
from ...utils.color import rgb_to_hex
from typing import Dict, List
CASCADING_STYLING_ATTRIBUTES: List[str] = [
"fill",
"stroke",
"fill-opacity",
"stroke-opacity",
]
# The default styling specifications for SVG images,
# according to https://www.w3.org/TR/SVG/painting.html
# (ctrl-F for "initial")
SVG_DEFAULT_ATTRIBUTES: Dict[str, str] = {
"fill": "black",
"fill-opacity": "1",
"stroke": "none",
"stroke-opacity": "1",
}
def cascade_element_style(
element: MinidomElement, inherited: Dict[str, str]
) -> Dict[str, str]:
"""Collect the element's style attributes based upon both its inheritance and its own attributes.
SVG uses cascading element styles. A closer ancestor's style takes precedence over a more distant ancestor's
style. In order to correctly calculate the styles, the attributes are passed down through the inheritance tree,
updating where necessary.
Note that this method only copies the values and does not parse them. See :meth:`parse_color_string` for converting
from SVG attributes to manim keyword arguments.
Parameters
----------
element : :class:`MinidomElement`
Element of the SVG parse tree
inherited : :class:`dict`
Dictionary of SVG attributes inherited from the parent element.
Returns
-------
:class:`dict`
Dictionary mapping svg attributes to values with `element`'s values overriding inherited values.
"""
style = inherited.copy()
# cascade the regular elements.
for attr in CASCADING_STYLING_ATTRIBUTES:
entry = element.getAttribute(attr)
if entry:
style[attr] = entry
# the style attribute should be handled separately in order to
# break it up nicely. furthermore, style takes priority over other
# attributes in the same element.
style_specs = element.getAttribute("style")
if style_specs:
for style_spec in style_specs.split(";"):
try:
key, value = style_spec.split(":")
except ValueError as e:
if not style_spec.strip():
# there was just a stray semicolon at the end, producing an emptystring
pass
else:
raise e
else:
style[key.strip()] = value.strip()
return style
def parse_color_string(color_spec: str) -> str:
"""Handle the SVG-specific color strings and convert them to HTML #rrggbb format.
Parameters
----------
color_spec : :class:`str`
String in any web-compatible format
Returns
-------
:class:`str`
Hexadecimal color string in the format `#rrggbb`
"""
if color_spec[0:3] == "rgb":
# these are only in integer form, but the Colour module wants them in floats.
splits = color_spec[4:-1].split(",")
if splits[0][-1] == "%":
# if the last character of the first number is a percentage,
# then interpret the number as a percentage
parsed_rgbs = [float(i[:-1]) / 100.0 for i in splits]
else:
parsed_rgbs = [int(i) / 255.0 for i in splits]
hex_color = rgb_to_hex(parsed_rgbs)
elif color_spec[0] == "#":
# its OK, parse as hex color standard.
hex_color = color_spec
else:
# attempt to convert color names like "red" to hex color
hex_color = web2hex(color_spec, force_long=True)
return hex_color
def fill_default_values(svg_style: Dict) -> None:
"""
Fill in the default values for properties of SVG elements,
if they are not currently set in the style dictionary.
Parameters
----------
svg_style : :class:`dict`
Style dictionary with SVG property names. Some may be missing.
Returns
-------
:class:`dict`
Style attributes; none are missing.
"""
for key in SVG_DEFAULT_ATTRIBUTES:
if key not in svg_style:
svg_style[key] = SVG_DEFAULT_ATTRIBUTES[key]
def parse_style(svg_style: Dict[str, str]) -> Dict:
"""Convert a dictionary of SVG attributes to Manim VMobject keyword arguments.
Parameters
----------
svg_style : :class:`dict`
Style attributes as a string-to-string dictionary. Keys are valid SVG element attributes (fill, stroke, etc)
Returns
-------
:class:`dict`
Style attributes, but in manim kwargs form, e.g., keys are fill_color, stroke_color
"""
manim_style = {}
fill_default_values(svg_style)
if "fill-opacity" in svg_style:
manim_style["fill_opacity"] = float(svg_style["fill-opacity"])
if "stroke-opacity" in svg_style:
manim_style["stroke_opacity"] = float(svg_style["stroke-opacity"])
# nones need to be handled specially
if "fill" in svg_style:
if svg_style["fill"] == "none":
manim_style["fill_opacity"] = 0
else:
manim_style["fill_color"] = parse_color_string(svg_style["fill"])
if "stroke" in svg_style:
if svg_style["stroke"] == "none":
# In order to not break animations.creation.Write,
# we interpret no stroke as stroke-width of zero and
# color the same as the fill color, if it exists.
manim_style["stroke_width"] = 0
if "fill_color" in manim_style:
manim_style["stroke_color"] = manim_style["fill_color"]
else:
manim_style["stroke_color"] = parse_color_string(svg_style["stroke"])
return manim_style
| 2.65625 | 3 |
iotrigger.py | mm011106/iotrigger | 0 | 6036 | #!/usr/bin/env python
#coding:utf-8
import os
import RPi.GPIO as GPIO #
import json
from time import sleep #
from twython import Twython
f=open("tw_config.json",'r')
config=json.load(f)
f.close()
CONSUMER_KEY =config['consumer_key']
CONSUMER_SECRET =config['consumer_secret']
ACCESS_TOKEN =config['access_token']
ACCESS_SECRET =config['access_secret']
dist=config['dist']
def on_positive_edge(channel):
#time stamp
timestamp = 'date +%F_%H:%M:%S'
current_time=os.popen(timestamp).readline().strip()
# get CPU temperature
cmd = '/opt/vc/bin/vcgencmd measure_temp'
line = os.popen(cmd).readline().strip()
temp = line.split('=')[1].split("'")[0]
direct_message='CPU:'+temp+'deg @'+current_time+' : by Python script'
global ledstate
if channel == trigger_input:
ledstate = not ledstate
GPIO.output(25, ledstate)
api.send_direct_message(text=direct_message ,screen_name=dist)
api = Twython(CONSUMER_KEY,CONSUMER_SECRET,ACCESS_TOKEN,ACCESS_SECRET)
trigger_input=21
GPIO.setmode(GPIO.BCM)
GPIO.setup(25, GPIO.OUT)
GPIO.setup(trigger_input, GPIO.IN, pull_up_down=GPIO.PUD_UP)
GPIO.add_event_detect(trigger_input, GPIO.RISING, callback=on_positive_edge, bouncetime=1000)
ledstate = GPIO.LOW
try:
while True:
sleep(0.01)
except KeyboardInterrupt: #
pass
GPIO.cleanup() #
| 2.578125 | 3 |
src/wheezy/template/tests/test_utils.py | nxsofsys/wheezy.template | 2 | 6037 |
""" Unit tests for ``wheezy.templates.utils``.
"""
import unittest
class FindAllBalancedTestCase(unittest.TestCase):
""" Test the ``find_all_balanced``.
"""
def test_start_out(self):
""" The start index is out of range.
"""
from wheezy.template.utils import find_all_balanced
assert 10 == find_all_balanced('test', 10)
def test_start_separator(self):
""" If text doesn't start with ``([`` return.
"""
from wheezy.template.utils import find_all_balanced
assert 0 == find_all_balanced('test([', 0)
assert 3 == find_all_balanced('test([', 3)
def test_not_balanced(self):
""" Separators are not balanced.
"""
from wheezy.template.utils import find_all_balanced
assert 4 == find_all_balanced('test(a, b', 4)
assert 4 == find_all_balanced('test[a, b()', 4)
def test_balanced(self):
""" Separators are balanced.
"""
from wheezy.template.utils import find_all_balanced
assert 10 == find_all_balanced('test(a, b)', 4)
assert 13 == find_all_balanced('test(a, b)[0]', 4)
assert 12 == find_all_balanced('test(a, b())', 4)
assert 17 == find_all_balanced('test(a, b())[0]()', 4)
class FindBalancedTestCase(unittest.TestCase):
""" Test the ``find_balanced``.
"""
def test_start_out(self):
""" The start index is out of range.
"""
from wheezy.template.utils import find_balanced
assert 10 == find_balanced('test', 10)
def test_start_separator(self):
""" If text doesn't start with ``start_sep`` return.
"""
from wheezy.template.utils import find_balanced
assert 0 == find_balanced('test(', 0)
assert 3 == find_balanced('test(', 3)
def test_not_balanced(self):
""" Separators are not balanced.
"""
from wheezy.template.utils import find_balanced
assert 4 == find_balanced('test(a, b', 4)
assert 4 == find_balanced('test(a, b()', 4)
def test_balanced(self):
""" Separators are balanced.
"""
from wheezy.template.utils import find_balanced
assert 10 == find_balanced('test(a, b)', 4)
assert 12 == find_balanced('test(a, b())', 4)
| 3.296875 | 3 |
akshare/economic/macro_constitute.py | peterrosetu/akshare | 1 | 6038 | <gh_stars>1-10
# -*- coding:utf-8 -*-
# /usr/bin/env python
"""
Date: 2019/10/21 12:08
Desc: 获取金十数据-数据中心-主要机构-宏观经济
"""
import json
import time
import pandas as pd
import requests
from tqdm import tqdm
from akshare.economic.cons import (
JS_CONS_GOLD_ETF_URL,
JS_CONS_SLIVER_ETF_URL,
JS_CONS_OPEC_URL,
)
def macro_cons_gold_volume():
"""
全球最大黄金ETF—SPDR Gold Trust持仓报告, 数据区间从20041118-至今
:return: pandas.Series
2004-11-18 8.09
2004-11-19 57.85
2004-11-22 87.09
2004-11-23 87.09
2004-11-24 96.42
...
2019-10-20 924.64
2019-10-21 924.64
2019-10-22 919.66
2019-10-23 918.48
2019-10-24 918.48
"""
t = time.time()
res = requests.get(
JS_CONS_GOLD_ETF_URL.format(
str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)
)
)
json_data = json.loads(res.text[res.text.find("{"): res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
value_list = [item["datas"]["黄金"] for item in json_data["list"]]
value_df = pd.DataFrame(value_list)
value_df.columns = json_data["kinds"]
value_df.index = pd.to_datetime(date_list)
temp_df = value_df["总库存(吨)"]
url = "https://datacenter-api.jin10.com/reports/list_v2"
params = {
"max_date": "",
"category": "etf",
"attr_id": "1",
"_": str(int(round(t * 1000))),
}
headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
"cache-control": "no-cache",
"origin": "https://datacenter.jin10.com",
"pragma": "no-cache",
"referer": "https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36",
"x-app-id": "rU6QIu7JHe2gOUeR",
"x-csrf-token": "",
"x-version": "1.0.0",
}
r = requests.get(url, params=params, headers=headers)
temp_se = pd.DataFrame(r.json()["data"]["values"]).iloc[:, :2]
temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])
temp_se = temp_se.iloc[:, 1]
temp_df = temp_df.append(temp_se)
temp_df.dropna(inplace=True)
temp_df.sort_index(inplace=True)
temp_df = temp_df.reset_index()
temp_df.drop_duplicates(subset="index", keep="last", inplace=True)
temp_df.set_index("index", inplace=True)
temp_df = temp_df.squeeze()
temp_df.index.name = None
temp_df.name = "gold_volume"
temp_df = temp_df.astype(float)
return temp_df
def macro_cons_gold_change():
"""
全球最大黄金ETF—SPDR Gold Trust持仓报告, 数据区间从20041118-至今
:return: pandas.Series
2004-11-18 0
2004-11-19 49.76
2004-11-22 29.24
2004-11-23 0.00
2004-11-24 9.33
...
2019-10-20 0.00
2019-10-21 0.00
2019-10-22 -4.98
2019-10-23 -1.18
2019-10-24 0.00
"""
t = time.time()
res = requests.get(
JS_CONS_GOLD_ETF_URL.format(
str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)
)
)
json_data = json.loads(res.text[res.text.find("{"): res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
value_list = [item["datas"]["黄金"] for item in json_data["list"]]
value_df = pd.DataFrame(value_list)
value_df.columns = json_data["kinds"]
value_df.index = pd.to_datetime(date_list)
temp_df = value_df["增持/减持(吨)"]
url = "https://datacenter-api.jin10.com/reports/list_v2"
params = {
"max_date": "",
"category": "etf",
"attr_id": "1",
"_": str(int(round(t * 1000))),
}
headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
"cache-control": "no-cache",
"origin": "https://datacenter.jin10.com",
"pragma": "no-cache",
"referer": "https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36",
"x-app-id": "rU6QIu7JHe2gOUeR",
"x-csrf-token": "",
"x-version": "1.0.0",
}
r = requests.get(url, params=params, headers=headers)
temp_se = pd.DataFrame(r.json()["data"]["values"]).iloc[:, [0, 2]]
temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])
temp_se = temp_se.iloc[:, 1]
temp_df = temp_df.append(temp_se)
temp_df.dropna(inplace=True)
temp_df.sort_index(inplace=True)
temp_df = temp_df.reset_index()
temp_df.drop_duplicates(subset="index", keep="last", inplace=True)
temp_df.set_index("index", inplace=True)
temp_df = temp_df.squeeze()
temp_df.index.name = None
temp_df.name = "gold_change"
temp_df = temp_df.astype(float)
return temp_df
def macro_cons_gold_amount():
"""
全球最大黄金ETF—SPDR Gold Trust持仓报告, 数据区间从20041118-至今
:return: pandas.Series
2004-11-18 114920000.00
2004-11-19 828806907.20
2004-11-22 1253785205.50
2004-11-23 1254751438.19
2004-11-24 1390568824.08
...
2019-10-20 44286078486.23
2019-10-21 44333677232.68
2019-10-22 43907962483.56
2019-10-23 44120217405.82
2019-10-24 44120217405.82
"""
t = time.time()
res = requests.get(
JS_CONS_GOLD_ETF_URL.format(
str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)
)
)
json_data = json.loads(res.text[res.text.find("{"): res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
value_list = [item["datas"]["黄金"] for item in json_data["list"]]
value_df = pd.DataFrame(value_list)
value_df.columns = json_data["kinds"]
value_df.index = pd.to_datetime(date_list)
temp_df = value_df["总价值(美元)"]
url = "https://datacenter-api.jin10.com/reports/list_v2"
params = {
"max_date": "",
"category": "etf",
"attr_id": "1",
"_": str(int(round(t * 1000))),
}
headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
"cache-control": "no-cache",
"origin": "https://datacenter.jin10.com",
"pragma": "no-cache",
"referer": "https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36",
"x-app-id": "rU6QIu7JHe2gOUeR",
"x-csrf-token": "",
"x-version": "1.0.0",
}
r = requests.get(url, params=params, headers=headers)
temp_se = pd.DataFrame(r.json()["data"]["values"]).iloc[:, [0, 3]]
temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])
temp_se = temp_se.iloc[:, 1]
temp_df = temp_df.append(temp_se)
temp_df.dropna(inplace=True)
temp_df.sort_index(inplace=True)
temp_df = temp_df.reset_index()
temp_df.drop_duplicates(subset="index", keep="last", inplace=True)
temp_df.set_index("index", inplace=True)
temp_df = temp_df.squeeze()
temp_df.index.name = None
temp_df.name = "gold_amount"
temp_df = temp_df.astype(float)
return temp_df
def macro_cons_silver_volume():
"""
全球最大白银ETF--iShares Silver Trust持仓报告, 数据区间从20060429-至今
:return: pandas.Series
2006-04-29 653.17
2006-05-02 653.17
2006-05-03 995.28
2006-05-04 1197.43
2006-05-05 1306.29
...
2019-10-17 11847.91
2019-10-18 11847.91
2019-10-21 11813.02
2019-10-22 11751.96
2019-10-23 11751.96
"""
t = time.time()
res = requests.get(
JS_CONS_SLIVER_ETF_URL.format(
str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)
)
)
json_data = json.loads(res.text[res.text.find("{"): res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
value_list = [item["datas"]["白银"] for item in json_data["list"]]
value_df = pd.DataFrame(value_list)
value_df.columns = json_data["kinds"]
value_df.index = pd.to_datetime(date_list)
temp_df = value_df["总库存(吨)"]
url = "https://datacenter-api.jin10.com/reports/list_v2"
params = {
"max_date": "",
"category": "etf",
"attr_id": "2",
"_": str(int(round(t * 1000))),
}
headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
"cache-control": "no-cache",
"origin": "https://datacenter.jin10.com",
"pragma": "no-cache",
"referer": "https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36",
"x-app-id": "rU6QIu7JHe2gOUeR",
"x-csrf-token": "",
"x-version": "1.0.0",
}
r = requests.get(url, params=params, headers=headers)
temp_se = pd.DataFrame(r.json()["data"]["values"]).iloc[:, [0, 1]]
temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])
temp_se = temp_se.iloc[:, 1]
temp_df = temp_df.append(temp_se)
temp_df.dropna(inplace=True)
temp_df.sort_index(inplace=True)
temp_df = temp_df.reset_index()
temp_df.drop_duplicates(subset="index", keep="last", inplace=True)
temp_df.set_index("index", inplace=True)
temp_df = temp_df.squeeze()
temp_df.index.name = None
temp_df.name = "silver_volume"
url = "https://cdn.jin10.com/data_center/reports/etf_2.json"
r = requests.get(url)
data_json = r.json()
append_temp_df = pd.DataFrame(data_json["values"]).T
append_temp_df.columns = [item["name"] for item in data_json["keys"]]
temp_append_df = append_temp_df["总库存"]
temp_append_df.name = "silver_volume"
temp_df = temp_df.reset_index()
temp_df["index"] = temp_df["index"].astype(str)
temp_df = temp_df.append(temp_append_df.reset_index())
temp_df.drop_duplicates(subset=["index"], keep="last", inplace=True)
temp_df.index = pd.to_datetime(temp_df["index"])
del temp_df["index"]
temp_df = temp_df[temp_df != 'Show All']
temp_df.sort_index(inplace=True)
temp_df = temp_df.astype(float)
return temp_df
def macro_cons_silver_change():
"""
全球最大白银ETF--iShares Silver Trust持仓报告, 数据区间从20060429-至今
:return: pandas.Series
2006-04-29 0
2006-05-02 0.00
2006-05-03 342.11
2006-05-04 202.15
2006-05-05 108.86
...
2019-10-17 -58.16
2019-10-18 0.00
2019-10-21 -34.89
2019-10-22 -61.06
2019-10-23 0.00
"""
t = time.time()
res = requests.get(
JS_CONS_SLIVER_ETF_URL.format(
str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)
)
)
json_data = json.loads(res.text[res.text.find("{"): res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
value_list = [item["datas"]["白银"] for item in json_data["list"]]
value_df = pd.DataFrame(value_list)
value_df.columns = json_data["kinds"]
value_df.index = pd.to_datetime(date_list)
temp_df = value_df["增持/减持(吨)"]
temp_df.name = "silver_change"
url = "https://datacenter-api.jin10.com/reports/list_v2"
params = {
"max_date": "",
"category": "etf",
"attr_id": "2",
"_": str(int(round(t * 1000))),
}
headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
"cache-control": "no-cache",
"origin": "https://datacenter.jin10.com",
"pragma": "no-cache",
"referer": "https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36",
"x-app-id": "rU6QIu7JHe2gOUeR",
"x-csrf-token": "",
"x-version": "1.0.0",
}
r = requests.get(url, params=params, headers=headers)
temp_se = pd.DataFrame(r.json()["data"]["values"]).iloc[:, [0, 2]]
temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])
temp_se = temp_se.iloc[:, 1]
temp_df = temp_df.append(temp_se)
temp_df.dropna(inplace=True)
temp_df.sort_index(inplace=True)
temp_df = temp_df.reset_index()
temp_df.drop_duplicates(subset="index", keep="last", inplace=True)
temp_df.set_index("index", inplace=True)
temp_df = temp_df.squeeze()
temp_df.index.name = None
temp_df.name = "silver_change"
url = "https://cdn.jin10.com/data_center/reports/etf_2.json"
r = requests.get(url)
data_json = r.json()
append_temp_df = pd.DataFrame(data_json["values"]).T
append_temp_df.columns = [item["name"] for item in data_json["keys"]]
temp_append_df = append_temp_df["增持/减持"]
temp_append_df.name = "silver_change"
temp_df = temp_df.reset_index()
temp_df["index"] = temp_df["index"].astype(str)
temp_df = temp_df.append(temp_append_df.reset_index())
temp_df.drop_duplicates(subset=["index"], keep="last", inplace=True)
temp_df.index = pd.to_datetime(temp_df["index"])
del temp_df["index"]
temp_df = temp_df[temp_df != 'Show All']
temp_df.sort_index(inplace=True)
temp_df = temp_df.astype(float)
return temp_df
def macro_cons_silver_amount():
"""
全球最大白银ETF--iShares Silver Trust持仓报告, 数据区间从20060429-至今
:return: pandas.Series
2006-04-29 263651152
2006-05-02 263651152
2006-05-03 445408550
2006-05-04 555123947
2006-05-05 574713264
...
2019-10-17 Show All
2019-10-18 Show All
2019-10-21 Show All
2019-10-22 Show All
2019-10-23 Show All
"""
t = time.time()
res = requests.get(
JS_CONS_SLIVER_ETF_URL.format(
str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)
)
)
json_data = json.loads(res.text[res.text.find("{"): res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
value_list = [item["datas"]["白银"] for item in json_data["list"]]
value_df = pd.DataFrame(value_list)
value_df.columns = json_data["kinds"]
value_df.index = pd.to_datetime(date_list)
temp_df = value_df["总价值(美元)"]
url = "https://datacenter-api.jin10.com/reports/list_v2"
params = {
"max_date": "",
"category": "etf",
"attr_id": "2",
"_": str(int(round(t * 1000))),
}
headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
"cache-control": "no-cache",
"origin": "https://datacenter.jin10.com",
"pragma": "no-cache",
"referer": "https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36",
"x-app-id": "rU6QIu7JHe2gOUeR",
"x-csrf-token": "",
"x-version": "1.0.0",
}
r = requests.get(url, params=params, headers=headers)
temp_se = pd.DataFrame(r.json()["data"]["values"]).iloc[:, [0, 3]]
temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])
temp_se = temp_se.iloc[:, 1]
temp_df = temp_df.append(temp_se)
temp_df.dropna(inplace=True)
temp_df.sort_index(inplace=True)
temp_df = temp_df.reset_index()
temp_df.drop_duplicates(subset="index", keep="last", inplace=True)
temp_df.set_index("index", inplace=True)
temp_df = temp_df.squeeze()
temp_df.index.name = None
temp_df.name = "silver_amount"
url = "https://cdn.jin10.com/data_center/reports/etf_2.json"
r = requests.get(url)
data_json = r.json()
append_temp_df = pd.DataFrame(data_json["values"]).T
append_temp_df.columns = [item["name"] for item in data_json["keys"]]
temp_append_df = append_temp_df["总价值"]
temp_append_df.name = "silver_amount"
temp_df = temp_df.reset_index()
temp_df["index"] = temp_df["index"].astype(str)
temp_df = temp_df.append(temp_append_df.reset_index())
temp_df.drop_duplicates(subset=["index"], keep="last", inplace=True)
temp_df.index = pd.to_datetime(temp_df["index"])
del temp_df["index"]
temp_df = temp_df[temp_df != 'Show All']
temp_df.sort_index(inplace=True)
temp_df = temp_df.astype(float)
return temp_df
def macro_cons_opec_near_change():
"""
欧佩克报告-变动, 数据区间从20170118-至今
:return: pandas.Series
阿尔及利亚 安哥拉 厄瓜多尔 加蓬 伊朗 伊拉克 科威特 利比亚 尼日利亚 \
2017-01-18 -0.87 3.56 -0.25 -0.87 0.95 4.26 0.20 3.13 -11.35
2017-02-13 -4.17 -2.32 -1.67 -1.00 5.02 -16.57 -14.12 6.47 10.18
2017-03-14 -0.02 -1.82 -0.44 -0.69 3.61 -6.20 -0.93 -1.11 5.80
2017-04-12 0.45 -1.87 -0.28 0.19 -2.87 -0.85 -0.95 -6.08 -2.98
2017-05-11 -0.75 9.71 -0.06 0.88 -3.47 -3.91 0.03 -6.16 5.08
2017-06-13 0.96 -5.42 0.22 -0.13 0.45 4.44 0.00 17.82 17.42
2017-07-12 -0.09 6.60 -0.21 -0.77 1.67 6.06 -0.02 12.70 9.67
2017-08-10 -0.10 -1.93 0.85 0.71 0.69 -3.31 -0.74 15.43 3.43
2017-09-12 0.41 0.83 -0.03 -3.23 -0.23 -2.31 0.01 -11.23 13.83
2017-10-11 -0.85 -0.29 -0.05 1.44 0.09 3.16 -0.17 5.39 5.08
2017-11-13 -3.84 6.98 0.71 0.18 -1.13 -13.10 -0.37 4.23 -5.44
2017-12-13 1.41 -10.87 -0.51 -0.47 -0.22 0.10 -0.53 0.61 9.58
2018-01-18 3.03 4.48 -0.72 -0.01 1.32 0.79 -0.25 -0.70 7.57
2018-04-12 -4.95 -8.17 0.26 -0.91 0.33 -1.31 0.23 -3.72 1.82
2018-05-14 1.77 -0.78 0.31 -0.93 1.00 -0.07 0.08 0.69 -0.83
2018-06-12 3.90 1.40 0.06 0.18 0.56 2.77 -0.57 -2.43 -5.35
2018-07-11 0.46 -8.83 -0.09 0.35 -2.27 7.15 2.73 -25.43 2.78
2018-08-13 1.38 1.17 0.42 -0.34 -5.63 2.41 7.85 -5.67 7.05
2018-09-12 -1.40 -0.80 0.40 18.80 -15.00 9.00 0.80 25.60 7.40
2018-10-11 -0.80 5.70 53.10 -0.10 -15.00 0.80 0.60 10.30 2.60
2018-11-13 -0.40 2.20 -0.30 0.30 -15.60 465.30 -3.30 6.00 -1.70
2018-12-12 -0.50 0.30 0.10 -1.10 -38.00 -2.30 4.50 -1.10 -3.00
2019-03-14 0.20 2.20 0.50 0.70 1.20 -7.00 -1.40 2.30 1.00
2019-04-10 -0.70 0.70 52.40 0.90 -2.80 -12.60 -0.10 19.60 1.10
2019-06-13 0.60 7.40 -0.10 2.30 -22.70 9.40 1.30 -0.30 -9.20
沙特 阿联酋 委内瑞拉 欧佩克产量
2017-01-18 -14.93 -0.63 -4.52 -22.09
2017-02-13 -49.62 -15.93 -3.05 -89.02
2017-03-14 -6.81 -3.69 -1.60 -13.95
2017-04-12 4.16 -3.27 -2.59 -15.27
2017-05-11 4.92 -6.23 -2.60 -1.82
2017-06-13 0.23 -1.80 -0.77 33.61
2017-07-12 5.13 -0.07 -1.36 39.35
2017-08-10 3.18 -0.67 -1.58 17.26
2017-09-12 -1.03 -2.02 -3.19 -7.91
2017-10-11 -0.07 -0.84 -5.19 8.85
2017-11-13 1.69 -0.60 -4.36 -15.09
2017-12-13 -4.54 -3.55 -4.16 -13.35
2018-01-18 -1.09 -0.70 -8.22 4.24
2018-04-12 -4.69 4.49 -5.53 -20.14
2018-05-14 4.65 0.61 -4.17 1.21
2018-06-12 8.55 -0.63 -4.25 3.54
2018-07-11 40.54 3.51 -4.75 17.34
2018-08-13 -5.28 6.92 -4.77 4.07
2018-09-12 3.80 1.20 -3.60 27.80
2018-10-11 10.80 3.00 -4.20 13.20
2018-11-13 12.70 14.20 -4.00 12.70
2018-12-12 37.70 7.10 -5.20 -1.10
2019-03-14 -8.60 -0.40 -14.20 -22.10
2019-04-10 -32.40 -0.90 -28.90 -53.40
2019-06-13 -7.60 0.30 -3.50 -23.60
"""
t = time.time()
big_df = pd.DataFrame()
headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
"cache-control": "no-cache",
"origin": "https://datacenter.jin10.com",
"pragma": "no-cache",
"referer": "https://datacenter.jin10.com/reportType/dc_opec_report",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.117 Safari/537.36",
"x-app-id": "rU6QIu7JHe2gOUeR",
"x-csrf-token": "",
"x-version": "1.0.0",
}
res = requests.get(f"https://datacenter-api.jin10.com/reports/dates?category=opec&_={str(int(round(t * 1000)))}",
headers=headers) # 日期序列
all_date_list = res.json()["data"]
bar = tqdm(reversed(all_date_list[:-1]))
for item in bar:
bar.set_description(f"Please wait for a moment, now downing {item}'s data")
res = requests.get(
f"https://datacenter-api.jin10.com/reports/list?category=opec&date={item}&_={str(int(round(t * 1000)))}",
headers=headers)
temp_df = pd.DataFrame(res.json()["data"]["values"],
columns=pd.DataFrame(res.json()["data"]["keys"])["name"].tolist()).T
temp_df.columns = temp_df.iloc[0, :]
temp_df = temp_df.iloc[1:, :]
try:
temp_df = temp_df[['阿尔及利亚', '安哥拉', '加蓬', '伊朗', '伊拉克', '科威特', '利比亚', '尼日利亚', '沙特',
'阿联酋', '委内瑞拉', '欧佩克产量']].iloc[-1, :]
except:
temp_df = temp_df[['阿尔及利亚', '安哥拉', '加蓬', '伊朗', '伊拉克', '科威特', '利比亚', '尼日利亚', '沙特',
'阿联酋', '委内瑞拉', '欧佩克产量']].iloc[-1, :]
big_df[temp_df.name] = temp_df
big_df = big_df.T
big_df.columns.name = "日期"
big_df = big_df.astype(float)
return big_df
def _macro_cons_opec_month():
"""
欧佩克报告-月度, 数据区间从20170118-至今
这里返回的具体索引日期的数据为上一个月的数据, 由于某些国家的数据有缺失,
只选择有数据的国家返回
:return: pandas.Series
阿尔及利亚 安哥拉 厄瓜多尔 加蓬 伊朗 伊拉克 科威特 利比亚 尼日利亚 \
2017-01-18 108.0 172.4 54.5 21.3 372.0 463.2 281.2 60.8 154.2
2017-02-13 104.5 165.1 52.7 19.9 377.5 447.6 271.8 67.5 157.6
2017-03-14 105.3 164.1 52.6 19.4 381.4 441.4 270.9 66.9 160.8
2017-04-12 105.6 161.4 52.6 19.8 379.0 440.2 270.2 62.2 154.5
2017-05-11 104.7 169.2 52.4 20.6 375.9 437.3 270.2 55.0 150.8
2017-06-13 105.9 161.3 52.8 20.4 379.5 442.4 270.5 73.0 168.0
2017-07-12 106.0 166.8 52.7 19.7 379.0 450.2 270.9 85.2 173.3
2017-08-10 105.9 164.6 53.6 20.5 382.4 446.8 270.3 100.1 174.8
2017-09-12 106.5 164.6 53.7 17.3 382.8 444.8 270.2 89.0 186.1
2017-10-11 104.6 164.1 53.6 20.1 382.7 449.4 270.0 92.3 185.5
2017-11-13 101.2 171.1 54.1 20.3 382.3 438.3 270.8 96.2 173.8
2017-12-13 101.3 158.1 53.3 19.7 381.8 439.6 270.3 97.3 179.0
2018-01-18 103.7 163.3 52.6 19.7 382.9 440.5 270.0 96.2 186.1
2018-04-12 98.4 152.4 51.8 18.3 381.4 442.6 270.4 96.8 181.0
2018-05-14 99.7 151.5 52.0 18.3 382.3 442.9 270.5 98.2 179.1
2018-06-12 103.1 152.5 51.9 18.9 382.9 445.5 270.1 95.5 171.1
2018-07-11 103.9 143.1 51.9 19.0 379.9 453.3 273.1 70.8 166.0
2018-08-13 106.2 145.6 52.5 18.8 373.7 455.6 279.1 66.4 166.7
2018-09-12 104.5 144.8 52.9 18.7 358.4 464.9 280.2 92.6 172.5
2018-10-11 104.9 151.9 53.1 18.7 344.7 465.0 281.2 105.3 174.8
2018-11-13 105.4 153.3 52.5 18.6 329.6 465.4 276.4 111.4 175.1
2018-12-12 105.2 152.1 52.5 17.6 295.4 463.1 280.9 110.4 173.6
2019-03-14 102.6 145.7 52.2 20.3 274.3 463.3 270.9 90.6 174.1
2019-04-10 101.8 145.4 52.4 21.4 269.8 452.2 270.9 109.8 173.3
2019-06-13 102.9 147.1 52.9 21.1 237.0 472.4 271.0 117.4 173.3
沙特 阿联酋 委内瑞拉 欧佩克产量
2017-01-18 1047.4 307.1 202.1 3308.5
2017-02-13 994.6 293.1 200.4 3213.9
2017-03-14 979.7 292.5 198.7 3195.8
2017-04-12 999.4 289.5 197.2 3192.8
2017-05-11 995.4 284.2 195.6 3173.2
2017-06-13 994.0 288.5 196.3 3213.9
2017-07-12 995.0 289.8 193.8 3261.1
2017-08-10 1006.7 290.5 193.2 3286.9
2017-09-12 1002.2 290.1 191.8 3275.5
2017-10-11 997.5 290.5 189.0 3274.8
2017-11-13 1000.0 291.1 186.3 3258.9
2017-12-13 999.6 288.3 183.4 3244.8
2018-01-18 991.8 287.8 174.5 3241.6
2018-04-12 993.4 286.4 148.8 3195.8
2018-05-14 995.9 287.2 143.6 3193.0
2018-06-12 998.7 286.5 139.2 3186.9
2018-07-11 1042.0 289.7 134.0 3232.7
2018-08-13 1038.7 295.9 127.8 3232.3
2018-09-12 1040.1 297.2 123.5 3256.5
2018-10-11 1051.2 300.4 119.7 3276.1
2018-11-13 1063.0 316.0 117.1 3290.0
2018-12-12 1101.6 324.6 113.7 3296.5
2019-03-14 1008.7 307.2 100.8 3054.9
2019-04-10 979.4 305.9 73.2 3002.2
2019-06-13 969.0 306.1 74.1 2987.6
"""
t = time.time()
res = requests.get(
JS_CONS_OPEC_URL.format(
str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)
)
)
json_data = json.loads(res.text[res.text.find("{"): res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
big_df = pd.DataFrame()
for country in [item["datas"] for item in json_data["list"]][0].keys():
try:
value_list = [item["datas"][country] for item in json_data["list"]]
value_df = pd.DataFrame(value_list)
value_df.columns = json_data["kinds"]
value_df.index = pd.to_datetime(date_list)
temp_df = value_df["上个月"]
temp_df.name = country
big_df = big_df.append(temp_df)
except:
continue
headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
"cache-control": "no-cache",
"origin": "https://datacenter.jin10.com",
"pragma": "no-cache",
"referer": "https://datacenter.jin10.com/reportType/dc_opec_report",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.117 Safari/537.36",
"x-app-id": "rU6QIu7JHe2gOUeR",
"x-csrf-token": "",
"x-version": "1.0.0",
}
res = requests.get(f"https://datacenter-api.jin10.com/reports/dates?category=opec&_={str(int(round(t * 1000)))}",
headers=headers) # 日期序列
all_date_list = res.json()["data"]
need_date_list = [item for item in all_date_list if
item.split("-")[0] + item.split("-")[1] + item.split("-")[2] not in date_list]
for item in reversed(need_date_list):
res = requests.get(
f"https://datacenter-api.jin10.com/reports/list?category=opec&date={item}&_={str(int(round(t * 1000)))}",
headers=headers)
temp_df = pd.DataFrame(res.json()["data"]["values"],
columns=pd.DataFrame(res.json()["data"]["keys"])["name"].tolist()).T
temp_df.columns = temp_df.iloc[0, :]
temp_df = temp_df[['阿尔及利亚', '安哥拉', '厄瓜多尔', '加蓬', '伊朗', '伊拉克', '科威特', '利比亚', '尼日利亚', '沙特',
'阿联酋', '委内瑞拉', '欧佩克产量']].iloc[-2, :]
big_df[item] = temp_df
return big_df.T
def macro_cons_opec_month():
"""
欧佩克报告-月度, 数据区间从 20170118-至今
这里返回的具体索引日期的数据为上一个月的数据, 由于某些国家的数据有缺失
只选择有数据的国家返回
20200312:fix:由于 “厄瓜多尔” 已经有几个月没有更新数据,在这里加以剔除
https://datacenter.jin10.com/reportType/dc_opec_report
:return: pandas.Series
阿尔及利亚 安哥拉 厄瓜多尔 加蓬 伊朗 伊拉克 科威特 利比亚 尼日利亚 \
2017-01-18 108.0 172.4 54.5 21.3 372.0 463.2 281.2 60.8 154.2
2017-02-13 104.5 165.1 52.7 19.9 377.5 447.6 271.8 67.5 157.6
2017-03-14 105.3 164.1 52.6 19.4 381.4 441.4 270.9 66.9 160.8
2017-04-12 105.6 161.4 52.6 19.8 379.0 440.2 270.2 62.2 154.5
2017-05-11 104.7 169.2 52.4 20.6 375.9 437.3 270.2 55.0 150.8
2017-06-13 105.9 161.3 52.8 20.4 379.5 442.4 270.5 73.0 168.0
2017-07-12 106.0 166.8 52.7 19.7 379.0 450.2 270.9 85.2 173.3
2017-08-10 105.9 164.6 53.6 20.5 382.4 446.8 270.3 100.1 174.8
2017-09-12 106.5 164.6 53.7 17.3 382.8 444.8 270.2 89.0 186.1
2017-10-11 104.6 164.1 53.6 20.1 382.7 449.4 270.0 92.3 185.5
2017-11-13 101.2 171.1 54.1 20.3 382.3 438.3 270.8 96.2 173.8
2017-12-13 101.3 158.1 53.3 19.7 381.8 439.6 270.3 97.3 179.0
2018-01-18 103.7 163.3 52.6 19.7 382.9 440.5 270.0 96.2 186.1
2018-04-12 98.4 152.4 51.8 18.3 381.4 442.6 270.4 96.8 181.0
2018-05-14 99.7 151.5 52.0 18.3 382.3 442.9 270.5 98.2 179.1
2018-06-12 103.1 152.5 51.9 18.9 382.9 445.5 270.1 95.5 171.1
2018-07-11 103.9 143.1 51.9 19.0 379.9 453.3 273.1 70.8 166.0
2018-08-13 106.2 145.6 52.5 18.8 373.7 455.6 279.1 66.4 166.7
2018-09-12 104.5 144.8 52.9 18.7 358.4 464.9 280.2 92.6 172.5
2018-10-11 104.9 151.9 53.1 18.7 344.7 465.0 281.2 105.3 174.8
2018-11-13 105.4 153.3 52.5 18.6 329.6 465.4 276.4 111.4 175.1
2018-12-12 105.2 152.1 52.5 17.6 295.4 463.1 280.9 110.4 173.6
2019-03-14 102.6 145.7 52.2 20.3 274.3 463.3 270.9 90.6 174.1
2019-04-10 101.8 145.4 52.4 21.4 269.8 452.2 270.9 109.8 173.3
2019-06-13 102.9 147.1 52.9 21.1 237.0 472.4 271.0 117.4 173.3
沙特 阿联酋 委内瑞拉 欧佩克产量
2017-01-18 1047.4 307.1 202.1 3308.5
2017-02-13 994.6 293.1 200.4 3213.9
2017-03-14 979.7 292.5 198.7 3195.8
2017-04-12 999.4 289.5 197.2 3192.8
2017-05-11 995.4 284.2 195.6 3173.2
2017-06-13 994.0 288.5 196.3 3213.9
2017-07-12 995.0 289.8 193.8 3261.1
2017-08-10 1006.7 290.5 193.2 3286.9
2017-09-12 1002.2 290.1 191.8 3275.5
2017-10-11 997.5 290.5 189.0 3274.8
2017-11-13 1000.0 291.1 186.3 3258.9
2017-12-13 999.6 288.3 183.4 3244.8
2018-01-18 991.8 287.8 174.5 3241.6
2018-04-12 993.4 286.4 148.8 3195.8
2018-05-14 995.9 287.2 143.6 3193.0
2018-06-12 998.7 286.5 139.2 3186.9
2018-07-11 1042.0 289.7 134.0 3232.7
2018-08-13 1038.7 295.9 127.8 3232.3
2018-09-12 1040.1 297.2 123.5 3256.5
2018-10-11 1051.2 300.4 119.7 3276.1
2018-11-13 1063.0 316.0 117.1 3290.0
2018-12-12 1101.6 324.6 113.7 3296.5
2019-03-14 1008.7 307.2 100.8 3054.9
2019-04-10 979.4 305.9 73.2 3002.2
2019-06-13 969.0 306.1 74.1 2987.6
"""
t = time.time()
big_df = pd.DataFrame()
headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
"cache-control": "no-cache",
"origin": "https://datacenter.jin10.com",
"pragma": "no-cache",
"referer": "https://datacenter.jin10.com/reportType/dc_opec_report",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.117 Safari/537.36",
"x-app-id": "rU6QIu7JHe2gOUeR",
"x-csrf-token": "",
"x-version": "1.0.0",
}
res = requests.get(f"https://datacenter-api.jin10.com/reports/dates?category=opec&_={str(int(round(t * 1000)))}",
headers=headers) # 日期序列
all_date_list = res.json()["data"]
bar = tqdm(reversed(all_date_list))
for item in bar:
bar.set_description(f"Please wait for a moment, now downing {item}'s data")
res = requests.get(
f"https://datacenter-api.jin10.com/reports/list?category=opec&date={item}&_={str(int(round(t * 1000)))}",
headers=headers)
temp_df = pd.DataFrame(res.json()["data"]["values"],
columns=pd.DataFrame(res.json()["data"]["keys"])["name"].tolist()).T
temp_df.columns = temp_df.iloc[0, :]
temp_df = temp_df.iloc[1:, :]
try:
temp_df = temp_df[['阿尔及利亚', '安哥拉', '加蓬', '伊朗', '伊拉克', '科威特', '利比亚', '尼日利亚', '沙特',
'阿联酋', '委内瑞拉', '欧佩克产量']].iloc[-2, :]
except:
temp_df = temp_df[['阿尔及利亚', '安哥拉', '加蓬', '伊朗', '伊拉克', '科威特', '利比亚', '尼日利亚', '沙特',
'阿联酋', '委内瑞拉', '欧佩克产量']].iloc[-1, :]
big_df[temp_df.name] = temp_df
big_df = big_df.T
big_df.columns.name = "日期"
big_df = big_df.astype(float)
return big_df
if __name__ == "__main__":
macro_cons_gold_volume_df = macro_cons_gold_volume()
print(macro_cons_gold_volume_df)
macro_cons_gold_change_df = macro_cons_gold_change()
print(macro_cons_gold_change_df)
macro_cons_gold_amount_df = macro_cons_gold_amount()
print(macro_cons_gold_amount_df)
print(pd.concat([macro_cons_gold_volume_df, macro_cons_gold_change_df, macro_cons_gold_amount_df], axis=1))
macro_cons_silver_volume_df = macro_cons_silver_volume()
print(macro_cons_silver_volume_df)
macro_cons_silver_change_df = macro_cons_silver_change()
print(macro_cons_silver_change_df)
macro_cons_silver_amount_df = macro_cons_silver_amount()
print(macro_cons_silver_amount_df)
print(pd.concat([macro_cons_silver_volume_df, macro_cons_silver_change_df, macro_cons_silver_amount_df], axis=1))
macro_cons_opec_near_change_df = macro_cons_opec_near_change()
print(macro_cons_opec_near_change_df)
macro_cons_opec_month_df = macro_cons_opec_month()
print(macro_cons_opec_month_df)
| 2.34375 | 2 |
test/testers/winforms/scrollbar/__init__.py | ABEMBARKA/monoUI | 1 | 6039 |
##############################################################################
# Written by: <NAME> <<EMAIL>>
# Date: 08/06/2008
# Description: Application wrapper for scrollbar.py
# Used by the scrollbar-*.py tests
##############################################################################$
'Application wrapper for scrollbar'
from strongwind import *
from os.path import exists
from sys import path
def launchScrollBar(exe=None):
'Launch ScrollBar with accessibility enabled and return a scrollbar object. Log an error and return None if something goes wrong'
if exe is None:
# make sure we can find the sample application
harness_dir = path[0]
i = harness_dir.rfind("/")
j = harness_dir[:i].rfind("/")
uiaqa_path = harness_dir[:j]
if uiaqa_path is None:
raise IOError, "When launching an application you must provide the "\
"full path or set the\nUIAQA_HOME environment "\
"variable."
exe = '%s/samples/winforms/scrollbar.py' % uiaqa_path
if not os.path.exists(exe):
raise IOError, "%s does not exist" % exe
args = [exe]
(app, subproc) = cache.launchApplication(args=args, name='ipy', wait=config.LONG_DELAY)
scrollbar = ScrollBar(app, subproc)
cache.addApplication(scrollbar)
scrollbar.scrollBarFrame.app = scrollbar
return scrollbar
# class to represent the application
class ScrollBar(accessibles.Application):
#checkShowing=False
def __init__(self, accessible, subproc=None):
'Get a reference to the scrollBar window'
super(ScrollBar, self).__init__(accessible, subproc)
self.findFrame(re.compile('^ScrollBar control'), logName='Scroll Bar')
| 2.34375 | 2 |
save_tweets.py | iglesiasmanu/data_analysis | 0 | 6040 | import json
from os import path
from tweepy import OAuthHandler, Stream
from tweepy.streaming import StreamListener
from sqlalchemy.orm.exc import NoResultFound
from database import session, Tweet, Hashtag, User
consumer_key = "0qFf4T2xPWVIycLmAwk3rDQ55"
consumer_secret = "<KEY>"
access_token = "<KEY>"
acces_token_secret = "<KEY>"
auth = OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, acces_token_secret)
def save_tweets():
directory = _get_dir_absolute_path()
filepath = path.join(directory, "tweets.json")
listener = DatabaseListener(number_tweets_to_save = 1000, filepath=filepath)
stream = Stream(auth, listener)
languages = ("en",)
try:
stream.sample(languages = languages)
except KeyboardInterrupt:
listener.file.close()
class DatabaseListener(StreamListener):
def __init__(self, number_tweets_to_save, filepath = None):
self._final_count = number_tweets_to_save
self._current_count = 0
if filepath is None:
filepath = "tweets.txt"
self.file = open(filepath,"w")
#Slightly dangerous due to circular references>>
def __del__(self):
self.file.close()
def on_data(self, raw_data):
data = json.loads(raw_data)
json.dump(raw_data, self.file)
self.file.write("\n")
if "in_reply_to_status_id" in data:
return self.on_status(data)
def on_status(self, data):
#this method is define in this file
save_to_database(data)
self._current_count += 1
print("status count: {}".format(self._current_count))
if self._current_count >= self._final_count:
return False
def create_user_helper(user_data):
#alias to shorten calls
u = user_data
user = user(uid = u["id_str"],
name = u["name"],
screen_name = u["screen_name"],
created_at = u["created_at"],
description = u.get("description"),
followers_count = u["followers_count"],
statuses_count = u["statuses_count"],
favourites_count = u["favourites_count"],
listed_count = u["listed_count"],
geo_enabled = u["geo_enabled"],
lang = u.get("lang"))
return user
def create_tweet_helper(tweet_data, user):
#alias for shorten calls
t = tweet_data
retweet = True if t["text"][:3] == "RT " else False
coordinates = json.dumps(t["coordinates"])
tweet = Tweet(tid=t["id_str"],
tweet=t["text"],
user=user,
coordinates=coordinates,
created_at = t["created_at"],
favorite_count = t["favorite_count"],
in_reply_to_screen_name = t["in_reply_to_screen_name"],
in_reply_to_status_id = t["in_reply_to_status_id"],
in_reply_to_user_id = t["in_reply_to_user_id"],
lang = t.get("lang"),
quoted_status_id = t.get("quoted_status_id"),
retweet_count = t["retweet_count"],
source = t["source"],
is_retweet = retweet)
return tweet
def save_to_database(data):
try:
user = session.query(User).filter_by(id=str(data["user"]["id"])).one()
except NoResultFound:
user = create_user_helper(data["user"])
session.add(user)
hashtag_results = []
hashtags = data["entities"]["hashtags"]
for hashtag in hashtags:
hashtag = hashtag["text"].lower()
try:
hashtag_obj=session.query(Hashtag).filer_by(text = hashtag).one()
except NoResutlFound:
user = create_
hashtag_obj = Hashtag(text = hashtag)
session.add(hashtag_obj)
hashtag_results.append(hashtag_obj)
tweet = create_tweet_helper(data, user)
for hashtag in hashtag_results:
tweet.hashtags.append(hashtag)
session.add(tweet)
session.commit()
| 2.84375 | 3 |
app/views/main.py | chrisjws-harness/flaskSaaS | 0 | 6041 | from flask import render_template, jsonify
from app import app
import random
@app.route('/')
@app.route('/index')
def index():
# Feature flags init goes here!
#
# noinspection PyDictCreation
flags = {
"welcome_text": "welcome to my python FF tutorial!"
}
# Flag goes here!
#
flags["alternate_homescreen"] = False
return render_template(
'index.html',
**flags,
title='Home'
)
@app.route('/map')
def map():
return render_template('map.html', title='Map')
@app.route('/map/refresh', methods=['POST'])
def map_refresh():
points = [(random.uniform(48.8434100, 48.8634100),
random.uniform(2.3388000, 2.3588000))
for _ in range(random.randint(2, 9))]
return jsonify({'points': points})
@app.route('/contact')
def contact():
return render_template('contact.html', title='Contact')
| 2.734375 | 3 |
base_sample/numpy_mat.py | keepangry/ai_algorithm | 2 | 6042 | # encoding: utf-8
'''
@author: yangsen
@license:
@contact:
@software:
@file: numpy_mat.py
@time: 18-8-25 下午9:56
@desc:
'''
import numpy as np
a = np.arange(9).reshape(3,3)
# 行
a[1]
a[[1,2]]
a[np.array([1,2])]
# 列
a[:,1]
a[:,[1,2]]
a[:,np.array([1,2])] | 2.46875 | 2 |
ai_traineree/agents/rainbow.py | laszukdawid/ai-traineree | 22 | 6043 | import copy
from typing import Callable, Dict, List, Optional
import torch
import torch.nn as nn
import torch.optim as optim
from ai_traineree import DEVICE
from ai_traineree.agents import AgentBase
from ai_traineree.agents.agent_utils import soft_update
from ai_traineree.buffers import NStepBuffer, PERBuffer
from ai_traineree.buffers.buffer_factory import BufferFactory
from ai_traineree.loggers import DataLogger
from ai_traineree.networks.heads import RainbowNet
from ai_traineree.types import ActionType, AgentState, BufferState, DoneType, NetworkState, ObsType, RewardType
from ai_traineree.types.dataspace import DataSpace
from ai_traineree.utils import to_numbers_seq, to_tensor
class RainbowAgent(AgentBase):
"""Rainbow agent as described in [1].
Rainbow is a DQN agent with some improvments that were suggested before 2017.
As mentioned by the authors it's not exhaustive improvment but all changes are in
relatively separate areas so their connection makes sense. These improvements are:
* Priority Experience Replay
* Multi-step
* Double Q net
* Dueling nets
* NoisyNet
* CategoricalNet for Q estimate
Consider this class as a particular version of the DQN agent.
[1] "Rainbow: Combining Improvements in Deep Reinforcement Learning" by Hessel et al. (DeepMind team)
https://arxiv.org/abs/1710.02298
"""
model = "Rainbow"
def __init__(
self,
obs_space: DataSpace,
action_space: DataSpace,
state_transform: Optional[Callable]=None,
reward_transform: Optional[Callable]=None,
**kwargs
):
"""
A wrapper over the DQN thus majority of the logic is in the DQNAgent.
Special treatment is required because the Rainbow agent uses categorical nets
which operate on probability distributions. Each action is taken as the estimate
from such distributions.
Parameters:
obs_space (DataSpace): Dataspace describing the input.
action_space (DataSpace): Dataspace describing the output.
state_transform (optional func):
reward_transform (optional func):
Keyword parameters:
pre_network_fn (function that takes input_shape and returns network):
Used to preprocess state before it is used in the value- and advantage-function in the dueling nets.
hidden_layers (tuple of ints): Shape of the hidden layers in fully connected network. Default: (100, 100).
lr (default: 1e-3): Learning rate value.
gamma (float): Discount factor. Default: 0.99.
tau (float): Soft-copy factor. Default: 0.002.
update_freq (int): Number of steps between each learning step. Default 1.
batch_size (int): Number of samples to use at each learning step. Default: 80.
buffer_size (int): Number of most recent samples to keep in memory for learning. Default: 1e5.
warm_up (int): Number of samples to observe before starting any learning step. Default: 0.
number_updates (int): How many times to use learning step in the learning phase. Default: 1.
max_grad_norm (float): Maximum norm of the gradient used in learning. Default: 10.
using_double_q (bool): Whether to use Double Q Learning network. Default: True.
n_steps (int): Number of lookahead steps when estimating reward. See :ref:`NStepBuffer`. Default: 3.
v_min (float): Lower bound for distributional value V. Default: -10.
v_max (float): Upper bound for distributional value V. Default: 10.
num_atoms (int): Number of atoms (discrete states) in the value V distribution. Default: 21.
"""
super().__init__(**kwargs)
self.device = self._register_param(kwargs, "device", DEVICE, update=True)
self.obs_space = obs_space
self.action_space = action_space
self._config['obs_space'] = self.obs_space
self._config['action_space'] = self.action_space
self.action_size = action_space.to_feature()
self.lr = float(self._register_param(kwargs, 'lr', 3e-4))
self.gamma = float(self._register_param(kwargs, 'gamma', 0.99))
self.tau = float(self._register_param(kwargs, 'tau', 0.002))
self.update_freq = int(self._register_param(kwargs, 'update_freq', 1))
self.batch_size = int(self._register_param(kwargs, 'batch_size', 80, update=True))
self.buffer_size = int(self._register_param(kwargs, 'buffer_size', int(1e5), update=True))
self.warm_up = int(self._register_param(kwargs, 'warm_up', 0))
self.number_updates = int(self._register_param(kwargs, 'number_updates', 1))
self.max_grad_norm = float(self._register_param(kwargs, 'max_grad_norm', 10))
self.iteration: int = 0
self.using_double_q = bool(self._register_param(kwargs, "using_double_q", True))
self.state_transform = state_transform if state_transform is not None else lambda x: x
self.reward_transform = reward_transform if reward_transform is not None else lambda x: x
v_min = float(self._register_param(kwargs, "v_min", -10))
v_max = float(self._register_param(kwargs, "v_max", 10))
self.num_atoms = int(self._register_param(kwargs, "num_atoms", 21, drop=True))
self.z_atoms = torch.linspace(v_min, v_max, self.num_atoms, device=self.device)
self.z_delta = self.z_atoms[1] - self.z_atoms[0]
self.buffer = PERBuffer(**kwargs)
self.__batch_indices = torch.arange(self.batch_size, device=self.device)
self.n_steps = int(self._register_param(kwargs, "n_steps", 3))
self.n_buffer = NStepBuffer(n_steps=self.n_steps, gamma=self.gamma)
# Note that in case a pre_network is provided, e.g. a shared net that extracts pixels values,
# it should be explicitly passed in kwargs
kwargs["hidden_layers"] = to_numbers_seq(self._register_param(kwargs, "hidden_layers", (100, 100)))
self.net = RainbowNet(obs_space.shape, self.action_size, num_atoms=self.num_atoms, **kwargs)
self.target_net = RainbowNet(obs_space.shape, self.action_size, num_atoms=self.num_atoms, **kwargs)
self.optimizer = optim.Adam(self.net.parameters(), lr=self.lr)
self.dist_probs = None
self._loss = float('nan')
@property
def loss(self):
return {'loss': self._loss}
@loss.setter
def loss(self, value):
if isinstance(value, dict):
value = value['loss']
self._loss = value
def step(self, obs: ObsType, action: ActionType, reward: RewardType, next_obs: ObsType, done: DoneType) -> None:
"""Letting the agent to take a step.
On some steps the agent will initiate learning step. This is dependent on
the `update_freq` value.
Parameters:
obs (ObservationType): Observation.
action (int): Discrete action associated with observation.
reward (float): Reward obtained for taking action at state.
next_obs (ObservationType): Observation in a state where the action took.
done: (bool) Whether in terminal (end of episode) state.
"""
assert isinstance(action, int), "Rainbow expects discrete action (int)"
self.iteration += 1
t_obs = to_tensor(self.state_transform(obs)).float().to("cpu")
t_next_obs = to_tensor(self.state_transform(next_obs)).float().to("cpu")
reward = self.reward_transform(reward)
# Delay adding to buffer to account for n_steps (particularly the reward)
self.n_buffer.add(
state=t_obs.numpy(), action=[int(action)], reward=[reward], done=[done], next_state=t_next_obs.numpy()
)
if not self.n_buffer.available:
return
self.buffer.add(**self.n_buffer.get().get_dict())
if self.iteration < self.warm_up:
return
if len(self.buffer) >= self.batch_size and (self.iteration % self.update_freq) == 0:
for _ in range(self.number_updates):
self.learn(self.buffer.sample())
# Update networks only once - sync local & target
soft_update(self.target_net, self.net, self.tau)
def act(self, obs: ObsType, eps: float = 0.) -> int:
"""
Returns actions for given state as per current policy.
Parameters:
state: Current available state from the environment.
epislon: Epsilon value in the epislon-greedy policy.
"""
# Epsilon-greedy action selection
if self._rng.random() < eps:
# TODO: Update with action_space.sample() once implemented
assert len(self.action_space.shape) == 1, "Only 1D is supported right now"
return self._rng.randint(self.action_space.low, self.action_space.high)
t_obs = to_tensor(self.state_transform(obs)).float().unsqueeze(0).to(self.device)
self.dist_probs = self.net.act(t_obs)
q_values = (self.dist_probs * self.z_atoms).sum(-1)
return int(q_values.argmax(-1)) # Action maximizes state-action value Q(s, a)
def learn(self, experiences: Dict[str, List]) -> None:
"""
Parameters:
experiences: Contains all experiences for the agent. Typically sampled from the memory buffer.
Five keys are expected, i.e. `state`, `action`, `reward`, `next_state`, `done`.
Each key contains a array and all arrays have to have the same length.
"""
rewards = to_tensor(experiences['reward']).float().to(self.device)
dones = to_tensor(experiences['done']).type(torch.int).to(self.device)
states = to_tensor(experiences['state']).float().to(self.device)
next_states = to_tensor(experiences['next_state']).float().to(self.device)
actions = to_tensor(experiences['action']).type(torch.long).to(self.device)
assert rewards.shape == dones.shape == (self.batch_size, 1)
assert states.shape == next_states.shape == (self.batch_size,) + self.obs_space.shape
assert actions.shape == (self.batch_size, 1) # Discrete domain
with torch.no_grad():
prob_next = self.target_net.act(next_states)
q_next = (prob_next * self.z_atoms).sum(-1) * self.z_delta
if self.using_double_q:
duel_prob_next = self.net.act(next_states)
a_next = torch.argmax((duel_prob_next * self.z_atoms).sum(-1), dim=-1)
else:
a_next = torch.argmax(q_next, dim=-1)
prob_next = prob_next[self.__batch_indices, a_next, :]
m = self.net.dist_projection(rewards, 1 - dones, self.gamma ** self.n_steps, prob_next)
assert m.shape == (self.batch_size, self.num_atoms)
log_prob = self.net(states, log_prob=True)
assert log_prob.shape == (self.batch_size,) + self.action_size + (self.num_atoms,)
log_prob = log_prob[self.__batch_indices, actions.squeeze(), :]
assert log_prob.shape == m.shape == (self.batch_size, self.num_atoms)
# Cross-entropy loss error and the loss is batch mean
error = -torch.sum(m * log_prob, 1)
assert error.shape == (self.batch_size,)
loss = error.mean()
assert loss >= 0
self.optimizer.zero_grad()
loss.backward()
nn.utils.clip_grad_norm_(self.net.parameters(), self.max_grad_norm)
self.optimizer.step()
self._loss = float(loss.item())
if hasattr(self.buffer, 'priority_update'):
assert (~torch.isnan(error)).any()
self.buffer.priority_update(experiences['index'], error.detach().cpu().numpy())
# Update networks - sync local & target
soft_update(self.target_net, self.net, self.tau)
def state_dict(self) -> Dict[str, dict]:
"""Returns agent's state dictionary.
Returns:
State dicrionary for internal networks.
"""
return {"net": self.net.state_dict(), "target_net": self.target_net.state_dict()}
def log_metrics(self, data_logger: DataLogger, step: int, full_log: bool=False):
data_logger.log_value("loss/agent", self._loss, step)
if full_log and self.dist_probs is not None:
assert len(self.action_space.shape) == 1, "Only 1D actions currently supported"
action_size = self.action_size[0]
for action_idx in range(action_size):
dist = self.dist_probs[0, action_idx]
data_logger.log_value(f'dist/expected_{action_idx}', (dist*self.z_atoms).sum().item(), step)
data_logger.add_histogram(
f'dist/Q_{action_idx}', min=self.z_atoms[0], max=self.z_atoms[-1], num=len(self.z_atoms),
sum=dist.sum(), sum_squares=dist.pow(2).sum(), bucket_limits=self.z_atoms+self.z_delta,
bucket_counts=dist, global_step=step
)
# This method, `log_metrics`, isn't executed on every iteration but just in case we delay plotting weights.
# It simply might be quite costly. Thread wisely.
if full_log:
for idx, layer in enumerate(self.net.value_net.layers):
if hasattr(layer, "weight"):
data_logger.create_histogram(f"value_net/layer_weights_{idx}", layer.weight.cpu(), step)
if hasattr(layer, "bias") and layer.bias is not None:
data_logger.create_histogram(f"value_net/layer_bias_{idx}", layer.bias.cpu(), step)
for idx, layer in enumerate(self.net.advantage_net.layers):
if hasattr(layer, "weight"):
data_logger.create_histogram(f"advantage_net/layer_{idx}", layer.weight.cpu(), step)
if hasattr(layer, "bias") and layer.bias is not None:
data_logger.create_histogram(f"advantage_net/layer_bias_{idx}", layer.bias.cpu(), step)
def get_state(self) -> AgentState:
"""Provides agent's internal state."""
return AgentState(
model=self.model,
obs_space=self.obs_space,
action_space=self.action_space,
config=self._config,
buffer=copy.deepcopy(self.buffer.get_state()),
network=copy.deepcopy(self.get_network_state()),
)
def get_network_state(self) -> NetworkState:
return NetworkState(net=dict(net=self.net.state_dict(), target_net=self.target_net.state_dict()))
@staticmethod
def from_state(state: AgentState) -> AgentBase:
config = copy.copy(state.config)
config.update({'obs_space': state.obs_space, 'action_space': state.action_space})
agent = RainbowAgent(**config)
if state.network is not None:
agent.set_network(state.network)
if state.buffer is not None:
agent.set_buffer(state.buffer)
return agent
def set_network(self, network_state: NetworkState) -> None:
self.net.load_state_dict(network_state.net['net'])
self.target_net.load_state_dict(network_state.net['target_net'])
def set_buffer(self, buffer_state: BufferState) -> None:
self.buffer = BufferFactory.from_state(buffer_state)
def save_state(self, path: str) -> None:
"""Saves agent's state into a file.
Parameters:
path: String path where to write the state.
"""
agent_state = self.get_state()
torch.save(agent_state, path)
def load_state(self, path: str) -> None:
"""Loads state from a file under provided path.
Parameters:
path: String path indicating where the state is stored.
"""
agent_state = torch.load(path)
self._config = agent_state.get('config', {})
self.__dict__.update(**self._config)
self.net.load_state_dict(agent_state['net'])
self.target_net.load_state_dict(agent_state['target_net'])
def save_buffer(self, path: str) -> None:
"""Saves data from the buffer into a file under provided path.
Parameters:
path: String path where to write the buffer.
"""
import json
dump = self.buffer.dump_buffer(serialize=True)
with open(path, 'w') as f:
json.dump(dump, f)
def load_buffer(self, path: str) -> None:
"""Loads data into the buffer from provided file path.
Parameters:
path: String path indicating where the buffer is stored.
"""
import json
with open(path, 'r') as f:
buffer_dump = json.load(f)
self.buffer.load_buffer(buffer_dump)
def __eq__(self, o: object) -> bool:
return super().__eq__(o) \
and isinstance(o, type(self)) \
and self._config == o._config \
and self.buffer == o.buffer \
and self.get_network_state() == o.get_network_state()
| 2.28125 | 2 |
nvvm/core/nvvm.py | uchytilc/PyCu | 0 | 6044 | <filename>nvvm/core/nvvm.py
from pycu.nvvm import (get_libdevice, ir_version, version, add_module_to_program, compile_program,
create_program, destroy_program, get_compiled_result, get_compiled_result_size,
get_program_log, get_program_log_size, lazy_add_module_to_program, verify_program)
import os
import sys
from ctypes import c_char_p
import weakref
class NVVMPtr:
# #key: arch associated with libdevice (None indicates libdevice is not arch specific)
# #value: libdevice source
# libdevice = {}
# #key:given arch
# #value: closest available arch found
# searched_arch = {}
def __init__(self, handle, arch = 20):
self.get_libdevice(arch)
self.handle = handle
def get_libdevice(self, arch = 20):
return get_libdevice(arch)
# libdevice = self.libdevice.get(arch, None)
# if libdevice is None:
# #note: use False instead of None in searched_arch.get when indicating failure to prevent getting None key from libdevice (libdevice with no "compute_" is stored under None key)
# libdevice = self.libdevice.get(self.searched_arch.get(arch, False), None)
# if libdevice is None:
# found_arch, libdevice = next(iter(get_libdevice(arch).items()))
# self.searched_arch[arch] = found_arch
# self.libdevice[arch] = libdevice
# return libdevice
def get_version(self):
return version()
def get_ir_version(self):
return ir_version()
def add_module(self, buff, name = "<unnamed>"):
if isinstance(buff, str):
buff = buff.encode('utf8')
if isinstance(name, str):
name = name.encode('utf8')
size = len(buff)
add_module_to_program(self.handle, buff, size, name)
def compile(self, options = {}):
"""
https://docs.nvidia.com/cuda/libnvvm-api/group__compilation.html#group__compilation_1g76ac1e23f5d0e2240e78be0e63450346
Valid compiler options are
-g (enable generation of debugging information, valid only with -opt=0)
-generate-line-info (generate line number information)
-opt=
0 (disable optimizations)
3 (default, enable optimizations)
-arch=
compute_35
compute_37
compute_50
compute_52 (default)
compute_53
compute_60
compute_61
compute_62
compute_70
compute_72
compute_75
compute_80
-ftz=
0 (default, preserve denormal values, when performing single-precision floating-point operations)
1 (flush denormal values to zero, when performing single-precision floating-point operations)
-prec-sqrt=
0 (use a faster approximation for single-precision floating-point square root)
1 (default, use IEEE round-to-nearest mode for single-precision floating-point square root)
-prec-div=
0 (use a faster approximation for single-precision floating-point division and reciprocals)
1 (default, use IEEE round-to-nearest mode for single-precision floating-point division and reciprocals)
-fma=
0 (disable FMA contraction)
1 (default, enable FMA contraction)
-g (enable generation of debugging information, valid only with -opt=0)
-generate-line-info (generate line number information)
"""
opt = options.get("opt", 3)
arch = options.get("arch", 52)
ftz = options.get("ftz", 0)
prec_sqrt = options.get("prec_sqrt", 1)
prec_div = options.get("prec_div", 1)
fma = options.get("fma", 0)
opts = [f"-opt={opt}",
f"-arch=compute_{arch}",
f"-ftz={ftz}",
f"-prec-sqrt={prec_sqrt}",
f"-prec-div={prec_div}",
f"-fma={fma}",]
if options.get("g", False) and opt == 0:
if opt == 0:
opts.append("-g")
else:
#raise warning (g is only valid when -opt=0)
pass
if options.get("generate-line-info", True):
opts.append("-generate-line-info")
options = (c_char_p * len(opts))(*[c_char_p(opt.encode('utf8')) for opt in opts])
compile_program(self.handle, options)
ptx = get_compiled_result(self.handle)
#TO DO
#Apply Numba's debug patch to ptx
return ptx
def verify_program(self, options = {}):
pass
# verify_program(self.handle, )
class NVVM(NVVMPtr):
def __init__(self, arch = 20):
# self.handle = handle = create_program()
handle = create_program()
weakref.finalize(self, destroy_program, handle)
super().__init__(handle, arch)
| 2.109375 | 2 |
fieldservice/fieldservice/doctype/fieldservice_settings/test_fieldservice_settings.py | itsdaveit/fieldservice | 0 | 6045 | <gh_stars>0
# Copyright (c) 2022, itsdve GmbH and Contributors
# See license.txt
# import frappe
import unittest
class TestFieldserviceSettings(unittest.TestCase):
pass
| 1.460938 | 1 |
Codes/Data Processing.py | BrickerP/Investment- | 0 | 6046 | <reponame>BrickerP/Investment-
# -*- coding: utf-8 -*-
"""
Created on Sun Nov 21 14:51:01 2021
@author: 75638
"""
import pandas as pd
import numpy as np
pd.set_option('display.max_columns', None)
pd.set_option('display.width', 10000)
def process_data(path1,path2):
'''
1.path1: file path of different factor
2.path2:file path of SP500members
3.remove anomalies
4.normalized data
5.fill NaN with 0
'''
#read factor.xlsx
factor=pd.read_excel(path1,index_col=0)
#remove anomalies which is greater than median+5*std or less than median-s*std
for date in factor:
median=factor[date].quantile(0.5)
std=factor[date].std()
min=median-5*std
max=median+5*std
factor[date]=factor[date].clip(min,max)
#normalize data
for date in factor:
mean=factor[date].mean()
std=factor[date].std()
factor[date]=(factor[date]-mean)/std
# fill NAN
for date in factor:
median=factor[date].quantile(0.5)
factor.fillna(median,inplace=True)
#read SP500 member datas
member=pd.read_excel(path2,index_col=0)
#merge industry data
factor=pd.merge(member,factor,left_index=True,right_index=True)
# save processed data
factor.to_csv('C:\\Users\\75638\\OneDrive - UW\\Desktop\\703project\\data\\volatility.csv')
return factor
def remove_dates(data):
columns = []
for i in data:
if '20' in i:
columns.append(i[:7])
else:
columns.append(i)
data.columns = columns
return data
def Seasonal_data_fill(path):
data = pd.read_csv('{}'.format(path))
order = 2
for j in data:
if '20' in j:
year = j.split('/')[2]
month = j.split('/')[0]
month =(int)(month)
time_1 = year + '-' +str(month+1)
time_2 = year + '-' +str(month+2)
data.insert(order+1, '{}'.format(time_1), np.nan)
data.insert(order+2, '{}'.format(time_2), np.nan)
order += 3
temp = data.iloc[:,:2]
data = data.iloc[:,2:]
data = data.ffill(axis = 1)
data = pd.concat([temp, data], axis = 1)
data.columns = remove_dates(pd.read_csv('PE.csv')).columns
data = data.set_index(data.columns[0])
return data.to_csv('New {}'.format(path))
if __name__ == '__main__':
path1='C:\\Users\\75638\\OneDrive - UW\\Desktop\\703project\\original_data\\volatility.xlsx'
path2='C:\\Users\\75638\\OneDrive - UW\\Desktop\\703project\\SP500\\SP500members.xlsx'
data=process_data(path1,path2) | 2.75 | 3 |
tests/test_env.py | Majanao/pytorch-blender | 381 | 6047 | import pytest
from pathlib import Path
from blendtorch import btt
BLENDDIR = Path(__file__).parent/'blender'
class MyEnv(btt.env.OpenAIRemoteEnv):
def __init__(self, background=True, **kwargs):
super().__init__(version='1.0.0')
self.launch(scene=BLENDDIR/'env.blend', script=BLENDDIR /
'env.blend.py', background=background, **kwargs)
# For Blender 2.9 if we pass scene='', the tests below fail since
# _env_post_step() is not called. Its unclear currently why this happens.
def _run_remote_env(background):
env = MyEnv(background=background)
obs = env.reset()
assert obs == 0.
obs, reward, done, info = env.step(0.1)
assert obs == pytest.approx(0.1)
assert reward == 0.
assert not done
assert info['count'] == 2 # 1 is already set by reset()
obs, reward, done, info = env.step(0.6)
assert obs == pytest.approx(0.6)
assert reward == 1.
assert not done
assert info['count'] == 3
for _ in range(8):
obs, reward, done, info = env.step(0.6)
assert done
obs = env.reset()
assert obs == 0.
obs, reward, done, info = env.step(0.1)
assert obs == pytest.approx(0.1)
assert reward == 0.
assert not done
assert info['count'] == 2
env.close()
@pytest.mark.background
def test_remote_env():
_run_remote_env(background=True)
def test_remote_env_ui():
_run_remote_env(background=False)
| 1.9375 | 2 |
sitetree/__init__.py | sitkatech/django-sitetree | 3 | 6048 | VERSION = (0, 9, 5)
| 1.195313 | 1 |
deepvariant/runtime_by_region_vis.py | tahashmi/deepvariant | 4 | 6049 | <filename>deepvariant/runtime_by_region_vis.py
# Copyright 2020 Google LLC.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from this
# software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
r"""Create a visual report of make_examples runtime by region.
Use this script to visualize the runtime-by-region data generated by running
make_examples with --runtime_by_region.
"""
from typing import Dict, Sequence, List, Tuple, Text, Any, Union
from absl import app
from absl import flags
import altair as alt
import pandas as pd
import tensorflow as tf
from third_party.nucleus.io import sharded_file_utils
# Altair uses a lot of method chaining, such as
# chart.mark_bar().encode(...).properties(...), so using backslash
# continuation to break this into separate lines makes the code more readable.
# pylint: disable=g-backslash-continuation
VEGA_URL = 'https://storage.googleapis.com/deepvariant/lib/vega'
FLAGS = flags.FLAGS
flags.DEFINE_string(
'input', None, 'TSV file that was produced when running make_examples '
'with --runtime_by_region. Can be sharded, e.g. /path/[email protected].')
flags.DEFINE_string(
'title', None, 'Title will be shown at the top of the report and will '
'be used as a prefix for downloaded image files.')
flags.DEFINE_string('output', 'runtime_by_region_report.html',
'Path for the output report, which will be an html file.')
RUNTIME_COLUMNS = [
'get reads', 'find candidates', 'make pileup images', 'write outputs'
]
COUNT_COLUMNS = ['num reads', 'num candidates', 'num examples']
CSS_STYLES = """
<style>
body {
font-family: sans-serif;
}
.chart-container {
padding: 30px;
}
</style>
"""
def read_sharded_runtime_tsvs(path_string: str) -> pd.DataFrame:
"""Imports data from a single or sharded path into a pandas dataframe.
Args:
path_string: The path to the input file, which may be sharded.
Returns:
A dataframe matching the TSV file(s) but with added Task column.
"""
if sharded_file_utils.is_sharded_file_spec(path_string):
paths = sharded_file_utils.generate_sharded_filenames(path_string)
else:
paths = [path_string]
list_of_dataframes = []
for i, path in enumerate(paths):
if path.startswith('gs://'):
# Once pandas is updated to 0.24+, pd.read_csv will work for gs://
# without this workaround.
with tf.io.gfile.GFile(path) as f:
d = pd.read_csv(f, sep='\t')
else:
d = pd.read_csv(path, sep='\t')
d['Task'] = i
list_of_dataframes.append(d)
return pd.concat(list_of_dataframes, axis=0, ignore_index=True)
def format_runtime_string(raw_seconds: float) -> str:
"""Creates a nice format string from a potentially large number of seconds.
Args:
raw_seconds: A number of seconds.
Returns:
The seconds divided into hours, minutes, and remaining seconds, formatted
nicely. For example, 2h3m5.012s.
"""
minutes, seconds = divmod(raw_seconds, 60)
hours, minutes = divmod(minutes, 60)
seconds = round(seconds, 3)
output = ''
if hours > 0:
output += f'{int(hours)}h'
if minutes > 0:
output += f'{int(minutes)}m'
if seconds > 0 or not output:
output += f'{seconds}s'
return output
def calculate_totals(df: pd.DataFrame) -> pd.DataFrame:
"""Calculates total runtime, formats it nicely, and sorts by it.
Args:
df: A dataframe of runtime profiling numbers.
Returns:
The same dataframe with some additional summary columns.
"""
# 'total runtime' is a simple sum of the runtime columns.
df['total runtime'] = df[RUNTIME_COLUMNS].sum(axis=1)
# Create a formatted runtime string for tooltips.
df['Runtime'] = df['total runtime'].apply(format_runtime_string)
# Sort by descending total region runtime.
df.sort_values(by='total runtime', inplace=True, ascending=False)
return df
def summarize_by_task(df: pd.DataFrame) -> pd.DataFrame:
"""Groups regions to get the total runtime for each task.
Args:
df: A dataframe of runtime profiling numbers.
Returns:
The dataframe grouped by task.
"""
by_task = df.groupby(by=['Task']).sum()
return by_task.reset_index()
def stage_histogram(d: pd.DataFrame, title: str = '') -> alt.Chart:
"""Plots a histogram of runtimes stacked by stage.
Args:
d: A dataframe of runtimes, either by region or by task.
title: A title for the plot.
Returns:
An altair chart.
"""
columns_used = RUNTIME_COLUMNS
d = d[columns_used]
return alt.Chart(d).transform_fold(
RUNTIME_COLUMNS, as_=['Stage', 'runtime_by_stage']) \
.mark_bar(opacity=0.3) \
.encode(
x=alt.X('runtime_by_stage:Q', bin=alt.Bin(maxbins=100),
title='Runtime (seconds)'),
y=alt.Y('count()', title='Count of regions', stack=None),
color=alt.Color('Stage:N', sort=None)
).properties(title=title)
def correlation_scatter_charts(d: pd.DataFrame, title: str = '') -> alt.Chart:
"""Produces a grid of scatter plots of runtimes of stages versus covariates.
Args:
d: A pandas dataframe of runtime by regions.
title: A title for the plot.
Returns:
An altair chart
"""
columns_used = ['region', 'total runtime'] + RUNTIME_COLUMNS + COUNT_COLUMNS
d = d[columns_used]
return alt.Chart(d).mark_circle(opacity=0.1).encode(
x=alt.X(alt.repeat('column'), type='quantitative',
axis=alt.Axis(labelExpr="datum.value + 's'")),
y=alt.Y(alt.repeat('row'), type='quantitative'),
tooltip='region'
).properties(width=100, height=100) \
.repeat(
column=['total runtime'] + RUNTIME_COLUMNS,
row=COUNT_COLUMNS,
).properties(title=title)
def totals_by_stage(d: pd.DataFrame) -> alt.Chart:
"""Plots total runtimes for each stage.
Args:
d: A dataframe of runtimes.
Returns:
An altair chart.
"""
stage_totals_series = d.sum()[RUNTIME_COLUMNS]
stage_totals = pd.DataFrame(
stage_totals_series, columns=['Runtime (seconds)'])
stage_totals.reset_index(inplace=True)
stage_totals = stage_totals.rename(columns={'index': 'Stage'})
stage_totals['Runtime'] = stage_totals['Runtime (seconds)'].apply(
format_runtime_string)
return alt.Chart(stage_totals).mark_bar().encode(
x='Runtime (seconds)',
y=alt.Y('Stage', sort=None),
tooltip=['Runtime'],
fill=alt.Fill('Stage',
sort=None)).properties(title='Overall runtime by stage')
def pareto_by_task_tooltip(row: pd.Series) -> str:
"""For one row of a dataframe, computes a tooltip description.
Args:
row: A Pandas Series, one row of a dataframe containing some specific
cumulative sum columns.
Returns:
A string to show as the tooltip for a pareto curve.
"""
return (f"{row['task cumsum order'] * 100:.2f}% of regions "
f"account for {row['task cumsum fraction'] * 100:.2f}% of "
f"the runtime in task {row['Task']}")
def calculate_pareto_metrics(df_subset: pd.DataFrame) -> pd.DataFrame:
"""Calculates cumulative sums for a subset of a dataframe.
Args:
df_subset: A dataframe subset of one task.
Returns:
The same dataframe subset with some additional columns.
"""
# These are the same for all regions in the same task, for the scatter plot:
df_subset['task total runtime'] = df_subset['total runtime'].sum()
df_subset['Runtime for task'] = df_subset['task total runtime'].apply(
format_runtime_string)
df_subset['task num examples'] = df_subset['num examples'].sum()
# These are cumulative sums for the pareto curves:
df_subset['task cumsum fraction'] = df_subset['total runtime'].cumsum(
) / df_subset['total runtime'].sum()
n = len(df_subset)
df_subset['task cumsum order'] = list(map(lambda x: x / n, range(0, n)))
df_subset['tooltip'] = df_subset.apply(pareto_by_task_tooltip, axis=1)
return df_subset
def pareto_and_runtimes_by_task(df: pd.DataFrame) -> alt.Chart:
"""Creates an interactive Pareto curve and scatter plot of task runtimes.
Tracing each curve shows to what extent a small proportion of long-running
regions contribute disproportionately to the overall runtime. That is,
"The longest-running X% of regions account for Y% of the total runtime."
There is a curve for each task.
Args:
df: A dataframe of all regions.
Returns:
An altair chart.
"""
grouped = df.groupby(df['Task'], sort=False)
df = grouped.apply(calculate_pareto_metrics)
# Sample along the Pareto curve, ensuring the longest regions are shown.
if len(df) > 5000:
x = 1000
df = pd.concat([df.nlargest(x, 'total runtime'), df.sample(5000 - x)])
# Limit columns to greatly reduce the size of the html report.
columns_used = [
'task cumsum order', 'task cumsum fraction', 'tooltip', 'Task',
'task total runtime', 'task num examples', 'Runtime for task'
]
df = df[columns_used]
# Brushing on the task_scatter plot highlights the same tasks in the Pareto
# curve.
brush = alt.selection_interval()
pareto_by_task = alt.Chart(df).mark_line(size=2).encode(
x=alt.X(
'task cumsum order',
title='The longest-runtime X% of regions',
axis=alt.Axis(format='%')),
y=alt.Y(
'task cumsum fraction',
title='Account for Y% of the total runtime',
axis=alt.Axis(format='%')),
tooltip='tooltip',
color=alt.condition(brush, 'Task:N', alt.value('lightgray'))).properties(
title='Pareto curve for each task').interactive()
# This chart needs to use the same dataframe as the first chart to enable the
# brushing on one to affect the other. Using max(task) for 'text' is a
# trick that causes bundling by task to avoid showing multiple overlapping
# points which otherwise make the text look funky.
task_scatter = alt.Chart(df).mark_point(size=10).encode(
x=alt.X('max(task total runtime)', title='Runtime (seconds)'),
y=alt.Y('task num examples:Q', title='Number of examples'),
color=alt.condition(brush, 'Task:N', alt.value('lightgray')),
tooltip=['Task', 'Runtime for task']
) \
.properties(title='Total runtime for each task (drag to highlight)') \
.add_selection(brush)
return pareto_by_task | task_scatter
def individual_region_bars(small_df: pd.DataFrame,
title: Union[str, Dict[str, str]] = '') -> alt.Chart:
"""Makes a stacked bar chart with runtime of each stage for individual regions.
Args:
small_df: A dataframe of regions, each of which will be shown as a bar.
title: A title for the plot. If a dict, it should contain 'title' and/or
'subtitle'.
Returns:
An altair chart.
"""
columns_used = ['region', 'Runtime'] + RUNTIME_COLUMNS
d = small_df[columns_used]
return alt.Chart(d).transform_fold(
RUNTIME_COLUMNS, as_=['Stage', 'runtime_by_stage']) \
.mark_bar().encode(
x=alt.X('region:N', sort=None),
y=alt.Y('runtime_by_stage:Q', scale=alt.Scale(type='linear'), title='Runtime (seconds)'),
fill=alt.Fill('Stage:N', sort=None),
tooltip='Runtime:N'
).properties(title=title)
def selected_longest_and_median_regions(df: pd.DataFrame) -> alt.Chart:
"""Creates a stacked bar charts of the top 20 and median 20 regions.
Args:
df: A dataframe of all regions.
Returns:
An altair chart.
"""
num_rows = len(df)
mid = round(num_rows / 2)
return individual_region_bars(df.iloc[0:20], 'Top runtime regions') \
| individual_region_bars(df.iloc[mid-10:mid+11], 'Median runtime regions')
def top_regions_producing_zero_examples(df: pd.DataFrame) -> alt.Chart:
"""Creates a chart of the top regions that produced zero examples.
Args:
df: A dataframe of all regions.
Returns:
An altair chart.
"""
regions_with_zero_examples = df[df['num examples'] == 0]
runtime_of_zeros = regions_with_zero_examples['total runtime'].sum() / 3600
total_runtime = df['total runtime'].sum() / 3600
subtitle = (
f'Spent {runtime_of_zeros:.2f} hours processing the '
f'{len(regions_with_zero_examples)} regions that produced no examples, '
f'which is {runtime_of_zeros / total_runtime * 100:.2f}% of the total '
f'runtime of {total_runtime:.2f} hours.')
return individual_region_bars(
regions_with_zero_examples.nlargest(50, 'total runtime'),
title={
'text': 'The longest-running regions that produced no examples',
'subtitle': subtitle
})
def write_to_html_report(charts: List[Dict[Text, alt.Chart]], title: str,
subtitle: str, html_output: Any) -> None:
"""Makes the html report with all the charts inserted.
Args:
charts: A list of altair chart objects.
title: The title to show at the top of the report.
subtitle: The subtitle to show just below the title on the report.
html_output: a writable file object.
Returns:
None. Writes into the html_output file object.
"""
# Start the HTML document.
html_output.write('<!DOCTYPE html>\n<html>\n<head>')
# Add dependencies vega and vega-lite, which render the altair charts.
html_output.write('<script type="text/javascript" src="{}/vega@5"></script>'
'\n'.format(VEGA_URL))
html_output.write(
'<script type="text/javascript" src="{}/[email protected]"></script>'
'\n'.format(VEGA_URL))
html_output.write(
'<script type="text/javascript" src="{}/vega-embed@6"></script>'
'\n'.format(VEGA_URL))
# Add styles (CSS).
html_output.write(CSS_STYLES)
html_output.write('</head>\n<body>')
html_output.write('<h1>{}</h1>\n'.format(title))
html_output.write('<h2>{}</h2>\n'.format(subtitle))
# Make a div containing all the charts.
html_output.write('<div>')
for chart in charts:
html_output.write(
'<div class="chart-container" id="vis_{}"></div>\n'.format(chart['id']))
html_output.write('</div>')
# Add JSON vega specs and hook them up to the divs with VegaEmbed.
html_output.write('<script>\n')
for chart in charts:
html_output.write('var spec_{} = {};\n'.format(chart['id'],
chart['chart'].to_json()))
download_filename = '{}_{}'.format(title.replace(' ', '_'), chart['id'])
embed_options = {'mode': 'vega-lite', 'downloadFileName': download_filename}
html_output.write('vegaEmbed("#vis_{}", spec_{}, {})\n'.format(
chart['id'], chart['id'], embed_options))
html_output.write('</script>\n')
# Close HTML document.
html_output.write('</body></html>')
def read_data_and_make_dataframes(
input_path: str) -> Tuple[pd.DataFrame, pd.DataFrame]:
"""Loads data from a file into one dataframe as-is and one by task.
Args:
input_path: str, path of the input TSV file (may be sharded).
Returns:
df: A dataframe with one row per region.
by_task: A dataframe with one row per task.
"""
df = read_sharded_runtime_tsvs(input_path)
df = calculate_totals(df)
by_task = summarize_by_task(df)
return df, by_task
def make_all_charts(
df: pd.DataFrame,
by_task: pd.DataFrame) -> List[Dict[Text, Union[str, alt.Chart]]]:
"""Creates charts and puts them in a list with their ID names.
Args:
df: A dataframe with one row per region.
by_task: A dataframe with one row per task.
Returns:
list of dicts, each containing a chart and a descriptive ID.
"""
charts = [{
'id': 'total_by_stage',
'chart': totals_by_stage(by_task)
}, {
'id': 'pareto_and_runtimes_by_task',
'chart': pareto_and_runtimes_by_task(df)
}, {
'id': 'histogram_by_task',
'chart': stage_histogram(by_task, title='Stage runtimes for each task')
}, {
'id': 'selected_longest_and_median_regions',
'chart': selected_longest_and_median_regions(df)
}, {
'id': 'zero_examples',
'chart': top_regions_producing_zero_examples(df)
}]
# Altair shows a max of 5000 data points.
if len(df) <= 5000:
# With up to 5000 points, just show them all.
charts.extend([{
'id': 'histogram',
'chart': stage_histogram(df, title='Runtime by stage for all regions')
}, {
'id': 'scatter_grid',
'chart': correlation_scatter_charts(df, title='Trends for all regions')
}])
else:
# With too many points, make different subsets to show trends better.
top_100 = df.nlargest(100, 'total runtime')
top_5000 = df.nlargest(5000, 'total runtime')
# Sample the bottom 99% to avoid outliers that obscure general trends.
bottom_99_percent = df.nsmallest(int(len(df) * .99), 'total runtime')
if len(bottom_99_percent) > 5000:
bottom_99_percent = bottom_99_percent.sample(5000)
charts.extend([{
'id':
'histogram_bottom_99_percent',
'chart':
stage_histogram(
bottom_99_percent,
title='Runtime by stage for regions in the bottom 99%')
}, {
'id':
'histogram_top_100',
'chart':
stage_histogram(
top_100, title='Runtime by stage for regions in the top 100')
}, {
'id':
'scatter_grid_top_5000',
'chart':
correlation_scatter_charts(
top_5000, title='Trends for regions in the top 5000')
}, {
'id':
'scatter_grid_bottom_99_percent',
'chart':
correlation_scatter_charts(
bottom_99_percent, title='Trends for regions in the bottom 99%')
}])
return charts
def make_report(input_path: str, title: str,
html_output: tf.io.gfile.GFile) -> None:
"""Reads data, creates charts, and composes the charts into an HTML report.
Args:
input_path: Path of the input TSV file (or sharded files).
title: Title to put at the top of the report.
html_output: Writable file object where output will be written.
"""
# Load data into pandas dataframes and add summary columns.
df, by_task = read_data_and_make_dataframes(input_path)
# Build all the charts.
charts = make_all_charts(df, by_task)
# Write a subtitle with some top-level stats.
subtitle = (f'Runtime profiling for make_examples on {len(df)} regions '
f'across {len(by_task)} task{"(s)" if len(by_task) > 1 else ""}')
# Write the HTML report with all the charts.
write_to_html_report(
charts=charts, title=title, subtitle=subtitle, html_output=html_output)
def main(argv: Sequence[str]):
if len(argv) > 1:
raise app.UsageError(
'Command line parsing failure: this script does not accept '
'positional arguments, but found these extra arguments: "{}".'
''.format(str(argv[1:])))
# Add html to the output path if that is not already the suffix.
if FLAGS.output.endswith('html'):
output_filename = FLAGS.output
else:
output_filename = f'{FLAGS.output}.html'
# Start HTML document. Using GFile enables writing to GCS too.
html_output = tf.io.gfile.GFile(output_filename, 'w')
make_report(
input_path=FLAGS.input, title=FLAGS.title, html_output=html_output)
html_output.close() # Abstracted out the file open/close to enable testing.
print('Output written to:', output_filename)
if __name__ == '__main__':
flags.mark_flags_as_required(['input', 'title'])
app.run(main)
| 1.429688 | 1 |
pkgs/dynd-python-0.7.2-py27_0/lib/python2.7/site-packages/dynd/tests/test_nd_fields.py | wangyum/anaconda | 0 | 6050 | <filename>pkgs/dynd-python-0.7.2-py27_0/lib/python2.7/site-packages/dynd/tests/test_nd_fields.py
import sys
import unittest
from dynd import nd, ndt
"""
class TestFields(unittest.TestCase):
def test_simple(self):
a = nd.array([
(1, 2, 'a', 'b'),
(3, 4, 'ab', 'cd'),
(5, 6, 'def', 'ghi')],
type='3 * {x: int32, y: int32, z: string, w: string}')
# Selecting a single field
b = nd.fields(a, 'x')
self.assertEqual(nd.dtype_of(b), ndt.make_struct(
[ndt.int32],
['x']))
self.assertEqual(nd.as_py(b.x), nd.as_py(a.x))
# Selecting two fields
b = nd.fields(a, 'z', 'y')
self.assertEqual(nd.dtype_of(b), ndt.make_struct(
[ndt.string, ndt.int32],
['z', 'y']))
self.assertEqual(nd.as_py(b.z), nd.as_py(a.z))
self.assertEqual(nd.as_py(b.y), nd.as_py(a.y))
# Selecting three fields
b = nd.fields(a, 'w', 'y', 'z')
self.assertEqual(nd.dtype_of(b), ndt.make_struct(
[ndt.string, ndt.int32, ndt.string],
['w', 'y', 'z']))
self.assertEqual(nd.as_py(b.w), nd.as_py(a.w))
self.assertEqual(nd.as_py(b.y), nd.as_py(a.y))
self.assertEqual(nd.as_py(b.z), nd.as_py(a.z))
# Reordering all four fields
b = nd.fields(a, 'w', 'y', 'x', 'z')
self.assertEqual(nd.dtype_of(b), ndt.make_struct(
[ndt.string, ndt.int32, ndt.int32, ndt.string],
['w', 'y', 'x', 'z']))
self.assertEqual(nd.as_py(b.w), nd.as_py(a.w))
self.assertEqual(nd.as_py(b.y), nd.as_py(a.y))
self.assertEqual(nd.as_py(b.x), nd.as_py(a.x))
self.assertEqual(nd.as_py(b.z), nd.as_py(a.z))
def test_fixed_var(self):
a = nd.array([
[(1, 2, 'a', 'b'),
(3, 4, 'ab', 'cd')],
[(5, 6, 'def', 'ghi')],
[(7, 8, 'alpha', 'beta'),
(9, 10, 'X', 'Y'),
(11, 12, 'the', 'end')]],
type='3 * var * {x: int32, y: int32, z: string, w: string}')
# Selecting a single field
b = nd.fields(a, 'x')
self.assertEqual(nd.type_of(b), ndt.make_fixed_dim(3,
ndt.make_var_dim(ndt.make_struct(
[ndt.int32],
['x']))))
self.assertEqual(nd.as_py(b.x), nd.as_py(a.x))
# Selecting two fields
b = nd.fields(a, 'z', 'y')
self.assertEqual(nd.type_of(b), ndt.make_fixed_dim(3,
ndt.make_var_dim(ndt.make_struct(
[ndt.string, ndt.int32],
['z', 'y']))))
self.assertEqual(nd.as_py(b.z), nd.as_py(a.z))
self.assertEqual(nd.as_py(b.y), nd.as_py(a.y))
# Selecting three fields
b = nd.fields(a, 'w', 'y', 'z')
self.assertEqual(nd.type_of(b), ndt.make_fixed_dim(3,
ndt.make_var_dim(ndt.make_struct(
[ndt.string, ndt.int32, ndt.string],
['w', 'y', 'z']))))
self.assertEqual(nd.as_py(b.w), nd.as_py(a.w))
self.assertEqual(nd.as_py(b.y), nd.as_py(a.y))
self.assertEqual(nd.as_py(b.z), nd.as_py(a.z))
# Reordering all four fields
b = nd.fields(a, 'w', 'y', 'x', 'z')
self.assertEqual(nd.type_of(b), ndt.make_fixed_dim(3,
ndt.make_var_dim(ndt.make_struct(
[ndt.string, ndt.int32, ndt.int32, ndt.string],
['w', 'y', 'x', 'z']))))
self.assertEqual(nd.as_py(b.w), nd.as_py(a.w))
self.assertEqual(nd.as_py(b.y), nd.as_py(a.y))
self.assertEqual(nd.as_py(b.x), nd.as_py(a.x))
self.assertEqual(nd.as_py(b.z), nd.as_py(a.z))
def test_bad_field_name(self):
a = nd.array([
(1, 2, 'a', 'b'),
(3, 4, 'ab', 'cd'),
(5, 6, 'def', 'ghi')],
type='3 * {x: int32, y: int32, z: string, w: string}')
self.assertRaises(RuntimeError, nd.fields, a, 'y', 'v')
"""
if __name__ == '__main__':
unittest.main()
| 3.015625 | 3 |
xeofs/pandas/_transformer.py | nicrie/xeofs | 3 | 6051 | from typing import Union, Iterable, List
import numpy as np
import pandas as pd
from ..models._transformer import _ArrayTransformer, _MultiArrayTransformer
class _DataFrameTransformer(_ArrayTransformer):
'''`_ArrayTransformer` wrapper for `pandas.DataFrame`.
'''
def __init__(self):
super().__init__()
def fit(self, X : pd.DataFrame, axis : Union[int, Iterable[int]] = 0):
if not isinstance(X, pd.DataFrame):
raise ValueError('This interface is for `pandas.DataFrame` only')
if isinstance(axis, list):
axis = axis[0]
# Set sample and feature index
if axis == 0:
self.index_samples = X.index
self.index_features = X.columns
elif axis == 1:
self.index_samples = X.columns
self.index_features = X.index
else:
raise ValueError('axis must be either 0 or 1')
# Fit the data
try:
super().fit(X=X.values, axis=axis)
except AttributeError:
err_msg = 'weights must be of type {:}.'.format(repr(pd.DataFrame))
raise TypeError(err_msg)
return self
def transform(self, X : pd.DataFrame) -> np.ndarray:
try:
return super().transform(X.values)
except AttributeError:
err_msg = 'weights must be of type {:}.'.format(repr(pd.DataFrame))
raise TypeError(err_msg)
def fit_transform(self, X : pd.DataFrame, axis : int = 0) -> np.ndarray:
return self.fit(X=X, axis=axis).transform(X)
def transform_weights(self, weights : pd.DataFrame) -> np.ndarray:
try:
return super().transform_weights(weights.values)
except AttributeError:
return super().transform_weights(weights)
def back_transform(self, X : np.ndarray) -> pd.DataFrame:
df = super().back_transform(X)
return pd.DataFrame(
df,
index=self.index_samples,
columns=self.index_features
)
def back_transform_eofs(self, X : np.ndarray) -> pd.DataFrame:
eofs = super().back_transform_eofs(X)
return pd.DataFrame(
eofs,
index=self.index_features,
columns=range(1, eofs.shape[-1] + 1)
)
def back_transform_pcs(self, X : np.ndarray) -> pd.DataFrame:
pcs = super().back_transform_pcs(X)
return pd.DataFrame(
pcs,
index=self.index_samples,
columns=range(1, pcs.shape[-1] + 1)
)
class _MultiDataFrameTransformer(_MultiArrayTransformer):
'Transform multiple 2D ``pd.DataFrame`` to a single 2D ``np.ndarry``.'
def __init__(self):
super().__init__()
def fit(self, X : Union[pd.DataFrame, List[pd.DataFrame]], axis : Union[int, Iterable[int]] = 0):
X = self._convert2list(X)
self.tfs = [_DataFrameTransformer().fit(x, axis=axis) for x in X]
if len(set([tf.n_valid_samples for tf in self.tfs])) > 1:
err_msg = 'All individual arrays must have same number of samples.'
raise ValueError(err_msg)
self.idx_array_sep = np.cumsum([tf.n_valid_features for tf in self.tfs])
self.axis_samples = self.tfs[0].axis_samples
return self
def transform(self, X : Union[pd.DataFrame, List[pd.DataFrame]]) -> np.ndarray:
return super().transform(X=X)
def transform_weights(self, weights : Union[pd.DataFrame, List[pd.DataFrame]]) -> np.ndarray:
return super().transform_weights(weights=weights)
def fit_transform(
self, X : Union[pd.DataFrame, List[pd.DataFrame]],
axis : Union[int, Iterable[int]] = 0
) -> np.ndarray:
return self.fit(X=X, axis=axis).transform(X)
def back_transform(self, X : np.ndarray) -> pd.DataFrame:
return super().back_transform(X=X)
def back_transform_eofs(self, X : np.ndarray) -> pd.DataFrame:
return super().back_transform_eofs(X=X)
def back_transform_pcs(self, X : np.ndarray) -> pd.DataFrame:
return super().back_transform_pcs(X=X)
| 2.859375 | 3 |
tests/bogus_python_model.py | FossilizedContainers/fossilized-controller | 1 | 6052 | <filename>tests/bogus_python_model.py
import os
import sys
import lipd
# import pythonAdapter, assumes in ../python-adapter/
tests_dir = os.path.dirname(os.path.realpath(__file__))
fc_dir = os.path.dirname(tests_dir)
python_adapter_dir = os.path.join(fc_dir, "python-adapter")
sys.path.append(python_adapter_dir)
import adapter
def fake_model(adapter):
# check to see inside function
print("\n---\nStart of the fake_model function\n---\n")
# the parameters are handed to you by the adapter
files = adapter.get_files()
# use the parameters given by the adapter to get the binary data of the LiPD file
lipd.readLipd(files['weldeab'])
# get the binary data of the NetCDF file
net_cdf_path = files['net_cdf']
# mark the NetCDF file as an output file
adapter.set_output_files(net_cdf_path)
adapter.set_output_files("lipd-files\\")
return
# have to call adapter in the adapter.py file as adapter.adapter
adapter = adapter.global_adapter
adapter.register(fake_model)
adapter.start_server()
| 2.46875 | 2 |
tello_control_ui.py | banne2266/UAV-autopilot-NCTU-2021 | 0 | 6053 | from PIL import Image
from PIL import ImageTk
import tkinter as tki
from tkinter import Toplevel, Scale
import threading
import datetime
import cv2
import os
import time
import platform
class TelloUI:
"""Wrapper class to enable the GUI."""
def __init__(self,tello,outputpath):
"""
Initial all the element of the GUI,support by Tkinter
:param tello: class interacts with the Tello drone.
Raises:
RuntimeError: If the Tello rejects the attempt to enter command mode.
"""
self.tello = tello # videostream device
self.outputPath = outputpath # the path that save pictures created by clicking the takeSnapshot button
self.frame = None # frame read from h264decoder and used for pose recognition
self.thread = None # thread of the Tkinter mainloop
self.stopEvent = None
# control variables
self.distance = 0.1 # default distance for 'move' cmd
self.degree = 30 # default degree for 'cw' or 'ccw' cmd
# if the flag is TRUE,the auto-takeoff thread will stop waiting for the response from tello
self.quit_waiting_flag = False
# initialize the root window and image panel
self.root = tki.Tk()
self.panel = None
# create buttons
self.btn_snapshot = tki.Button(self.root, text="Snapshot!",
command=self.takeSnapshot)
self.btn_snapshot.pack(side="bottom", fill="both",
expand="yes", padx=10, pady=5)
self.btn_pause = tki.Button(self.root, text="Pause", relief="raised", command=self.pauseVideo)
self.btn_pause.pack(side="bottom", fill="both",
expand="yes", padx=10, pady=5)
self.btn_landing = tki.Button(
self.root, text="Open Command Panel", relief="raised", command=self.openCmdWindow)
self.btn_landing.pack(side="bottom", fill="both",
expand="yes", padx=10, pady=5)
# start a thread that constantly pools the video sensor for
# the most recently read frame
self.stopEvent = threading.Event()
self.thread = threading.Thread(target=self.videoLoop, args=())
self.thread.start()
# set a callback to handle when the window is closed
self.root.wm_title("TELLO Controller")
self.root.wm_protocol("WM_DELETE_WINDOW", self.onClose)
# the sending_command will send command to tello every 5 seconds
self.sending_command_thread = threading.Thread(target = self._sendingCommand)
def videoLoop(self):
"""
The mainloop thread of Tkinter
Raises:
RuntimeError: To get around a RunTime error that Tkinter throws due to threading.
"""
try:
# start the thread that get GUI image and drwa skeleton
time.sleep(0.5)
self.sending_command_thread.start()
while not self.stopEvent.is_set():
system = platform.system()
# read the frame for GUI show
self.frame = self.tello.read()
if self.frame is None or self.frame.size == 0:
continue
# transfer the format from frame to image
image = Image.fromarray(self.frame)
# we found compatibility problem between Tkinter,PIL and Macos,and it will
# sometimes result the very long preriod of the "ImageTk.PhotoImage" function,
# so for Macos,we start a new thread to execute the _updateGUIImage function.
if system =="Windows" or system =="Linux":
self._updateGUIImage(image)
else:
thread_tmp = threading.Thread(target=self._updateGUIImage,args=(image,))
thread_tmp.start()
time.sleep(0.03)
except RuntimeError as e:
print("[INFO] caught a RuntimeError")
def _updateGUIImage(self,image):
"""
Main operation to initial the object of image,and update the GUI panel
"""
image = ImageTk.PhotoImage(image)
# if the panel none ,we need to initial it
if self.panel is None:
self.panel = tki.Label(image=image)
self.panel.image = image
self.panel.pack(side="left", padx=10, pady=10)
# otherwise, simply update the panel
else:
self.panel.configure(image=image)
self.panel.image = image
def _sendingCommand(self):
"""
start a while loop that sends 'command' to tello every 5 second
"""
while True:
self.tello.send_command('command')
time.sleep(5)
def _setQuitWaitingFlag(self):
"""
set the variable as TRUE,it will stop computer waiting for response from tello
"""
self.quit_waiting_flag = True
def openCmdWindow(self):
"""
open the cmd window and initial all the button and text
"""
panel = Toplevel(self.root)
panel.wm_title("Command Panel")
# create text input entry
text0 = tki.Label(panel,
text='This Controller map keyboard inputs to Tello control commands\n'
'Adjust the trackbar to reset distance and degree parameter',
font='Helvetica 10 bold'
)
text0.pack(side='top')
text1 = tki.Label(panel, text=
'W - Move Tello Up\t\t\tArrow Up - Move Tello Forward\n'
'S - Move Tello Down\t\t\tArrow Down - Move Tello Backward\n'
'A - Rotate Tello Counter-Clockwise\tArrow Left - Move Tello Left\n'
'D - Rotate Tello Clockwise\t\tArrow Right - Move Tello Right',
justify="left")
text1.pack(side="top")
self.btn_landing = tki.Button(
panel, text="Land", relief="raised", command=self.telloLanding)
self.btn_landing.pack(side="bottom", fill="both",
expand="yes", padx=10, pady=5)
self.btn_takeoff = tki.Button(
panel, text="Takeoff", relief="raised", command=self.telloTakeOff)
self.btn_takeoff.pack(side="bottom", fill="both",
expand="yes", padx=10, pady=5)
# binding arrow keys to drone control
self.tmp_f = tki.Frame(panel, width=100, height=2)
self.tmp_f.bind('<KeyPress-w>', self.on_keypress_w)
self.tmp_f.bind('<KeyPress-s>', self.on_keypress_s)
self.tmp_f.bind('<KeyPress-a>', self.on_keypress_a)
self.tmp_f.bind('<KeyPress-d>', self.on_keypress_d)
self.tmp_f.bind('<KeyPress-Up>', self.on_keypress_up)
self.tmp_f.bind('<KeyPress-Down>', self.on_keypress_down)
self.tmp_f.bind('<KeyPress-Left>', self.on_keypress_left)
self.tmp_f.bind('<KeyPress-Right>', self.on_keypress_right)
self.tmp_f.pack(side="bottom")
self.tmp_f.focus_set()
self.btn_landing = tki.Button(
panel, text="Flip", relief="raised", command=self.openFlipWindow)
self.btn_landing.pack(side="bottom", fill="both",
expand="yes", padx=10, pady=5)
self.distance_bar = Scale(panel, from_=0.02, to=5, tickinterval=0.01, digits=3, label='Distance(m)',
resolution=0.01)
self.distance_bar.set(0.2)
self.distance_bar.pack(side="left")
self.btn_distance = tki.Button(panel, text="Reset Distance", relief="raised",
command=self.updateDistancebar,
)
self.btn_distance.pack(side="left", fill="both",
expand="yes", padx=10, pady=5)
self.degree_bar = Scale(panel, from_=1, to=360, tickinterval=10, label='Degree')
self.degree_bar.set(30)
self.degree_bar.pack(side="right")
self.btn_distance = tki.Button(panel, text="Reset Degree", relief="raised", command=self.updateDegreebar)
self.btn_distance.pack(side="right", fill="both",
expand="yes", padx=10, pady=5)
def openFlipWindow(self):
"""
open the flip window and initial all the button and text
"""
panel = Toplevel(self.root)
panel.wm_title("Gesture Recognition")
self.btn_flipl = tki.Button(
panel, text="Flip Left", relief="raised", command=self.telloFlip_l)
self.btn_flipl.pack(side="bottom", fill="both",
expand="yes", padx=10, pady=5)
self.btn_flipr = tki.Button(
panel, text="Flip Right", relief="raised", command=self.telloFlip_r)
self.btn_flipr.pack(side="bottom", fill="both",
expand="yes", padx=10, pady=5)
self.btn_flipf = tki.Button(
panel, text="Flip Forward", relief="raised", command=self.telloFlip_f)
self.btn_flipf.pack(side="bottom", fill="both",
expand="yes", padx=10, pady=5)
self.btn_flipb = tki.Button(
panel, text="Flip Backward", relief="raised", command=self.telloFlip_b)
self.btn_flipb.pack(side="bottom", fill="both",
expand="yes", padx=10, pady=5)
def takeSnapshot(self):
"""
save the current frame of the video as a jpg file and put it into outputpath
"""
# grab the current timestamp and use it to construct the filename
ts = datetime.datetime.now()
filename = "{}.jpg".format(ts.strftime("%Y-%m-%d_%H-%M-%S"))
p = os.path.sep.join((self.outputPath, filename))
# save the file
cv2.imwrite(p, cv2.cvtColor(self.frame, cv2.COLOR_RGB2BGR))
print("[INFO] saved {}".format(filename))
def pauseVideo(self):
"""
Toggle the freeze/unfreze of video
"""
if self.btn_pause.config('relief')[-1] == 'sunken':
self.btn_pause.config(relief="raised")
self.tello.video_freeze(False)
else:
self.btn_pause.config(relief="sunken")
self.tello.video_freeze(True)
def telloTakeOff(self):
return self.tello.takeoff()
def telloLanding(self):
return self.tello.land()
def telloFlip_l(self):
return self.tello.flip('l')
def telloFlip_r(self):
return self.tello.flip('r')
def telloFlip_f(self):
return self.tello.flip('f')
def telloFlip_b(self):
return self.tello.flip('b')
def telloCW(self, degree):
return self.tello.rotate_cw(degree)
def telloCCW(self, degree):
return self.tello.rotate_ccw(degree)
def telloMoveForward(self, distance):
return self.tello.move_forward(distance)
def telloMoveBackward(self, distance):
return self.tello.move_backward(distance)
def telloMoveLeft(self, distance):
return self.tello.move_left(distance)
def telloMoveRight(self, distance):
return self.tello.move_right(distance)
def telloUp(self, dist):
return self.tello.move_up(dist)
def telloDown(self, dist):
return self.tello.move_down(dist)
def updateTrackBar(self):
self.my_tello_hand.setThr(self.hand_thr_bar.get())
def updateDistancebar(self):
self.distance = self.distance_bar.get()
print ('reset distance to %.1f' % self.distance)
def updateDegreebar(self):
self.degree = self.degree_bar.get()
print ('reset distance to %d' % self.degree)
def on_keypress_w(self, event):
print ("up %d m" % self.distance)
self.telloUp(self.distance)
def on_keypress_s(self, event):
print ("down %d m" % self.distance)
self.telloDown(self.distance)
def on_keypress_a(self, event):
print ("ccw %d degree" % self.degree)
self.tello.rotate_ccw(self.degree)
def on_keypress_d(self, event):
print ("cw %d m" % self.degree)
self.tello.rotate_cw(self.degree)
def on_keypress_up(self, event):
print ("forward %d m" % self.distance)
self.telloMoveForward(self.distance)
def on_keypress_down(self, event):
print ("backward %d m" % self.distance)
self.telloMoveBackward(self.distance)
def on_keypress_left(self, event):
print ("left %d m" % self.distance)
self.telloMoveLeft(self.distance)
def on_keypress_right(self, event):
print ("right %d m" % self.distance)
self.telloMoveRight(self.distance)
def on_keypress_enter(self, event):
if self.frame is not None:
self.registerFace()
self.tmp_f.focus_set()
def onClose(self):
"""
set the stop event, cleanup the camera, and allow the rest of
the quit process to continue
"""
print("[INFO] closing...")
self.stopEvent.set()
del self.tello
self.root.quit()
| 2.9375 | 3 |
__temp/examples/rhino/mesh-stanford-dragon.py | robin-gdwl/examples_topop-desc | 0 | 6054 | <reponame>robin-gdwl/examples_topop-desc
import compas
import compas_rhino
from compas.datastructures import Mesh
mesh = Mesh.from_ply(compas.get('stanford_dragon.ply'))
compas_rhino.mesh_draw(mesh)
| 1.6875 | 2 |
Python/ML_DL/DL/Neural-Networks-Demystified-master/partOne.py | vbsteja/code | 3 | 6055 | # Neural Networks Demystified
# Part 1: Data + Architecture
#
# Supporting code for short YouTube series on artificial neural networks.
#
# <NAME>
# @stephencwelch
import numpy as np
# X = (hours sleeping, hours studying), y = Score on test
X = np.array(([3,5], [5,1], [10,2]), dtype=float)
y = np.array(([75], [82], [93]), dtype=float)
# Normalize
X = X/np.amax(X, axis=0)
y = y/100 #Max test score is 100 | 3.78125 | 4 |
manubot/process/util.py | benstear/manubot | 0 | 6056 | <gh_stars>0
import json
import logging
import os
import pathlib
import re
import textwrap
import warnings
from typing import List, Optional
import jinja2
import pandas
import requests
import requests_cache
import yaml
from manubot.util import read_serialized_data, read_serialized_dict
from manubot.process.bibliography import load_manual_references
from manubot.process.ci import get_continuous_integration_parameters
from manubot.process.metadata import (
get_header_includes,
get_thumbnail_url,
get_manuscript_urls,
get_software_versions,
)
from manubot.process.manuscript import (
datetime_now,
get_manuscript_stats,
get_text,
)
from manubot.cite.citekey import (
citekey_to_csl_item,
shorten_citekey,
is_valid_citekey,
standardize_citekey,
)
def check_collisions(citekeys_df):
"""
Check for short_citekey hash collisions
"""
collision_df = citekeys_df[["standard_citekey", "short_citekey"]].drop_duplicates()
collision_df = collision_df[collision_df.short_citekey.duplicated(keep=False)]
if not collision_df.empty:
logging.error(f"OMF! Hash collision. Congratulations.\n{collision_df}")
return collision_df
def check_multiple_citation_strings(citekeys_df):
"""
Identify different citation strings referring the the same reference.
"""
message = textwrap.dedent(
f"""\
{len(citekeys_df)} unique citations strings extracted from text
{citekeys_df.standard_citekey.nunique()} unique standard citations\
"""
)
logging.info(message)
multi_df = citekeys_df[citekeys_df.standard_citekey.duplicated(keep=False)]
if not multi_df.empty:
table = multi_df.to_string(
index=False, columns=["standard_citekey", "manuscript_citekey"]
)
logging.warning(f"Multiple citekeys detected for the same reference:\n{table}")
return multi_df
def read_variable_files(paths: List[str], variables: Optional[dict] = None) -> dict:
"""
Read multiple serialized data files into a user_variables dictionary.
Provide `paths` (a list of URLs or local file paths).
Paths can optionally have a namespace prepended.
For example:
```python
paths = [
'https://git.io/vbkqm', # update the dictionary's top-level
'namespace_1=https://git.io/vbkqm', # store under 'namespace_1' key
'namespace_2=some_local_path.json', # store under 'namespace_2' key
]
```
If a namespace is not provided, the JSON must contain a dictionary as its
top level. Namespaces should consist only of ASCII alphanumeric characters
(includes underscores, first character cannot be numeric).
Pass a dictionary to `variables` to update an existing dictionary rather
than create a new dictionary.
"""
if variables is None:
variables = {}
for path in paths:
logging.info(f"Reading user-provided templating variables at {path!r}")
# Match only namespaces that are valid jinja2 variable names
# http://jinja.pocoo.org/docs/2.10/api/#identifier-naming
match = re.match(r"([a-zA-Z_][a-zA-Z0-9_]*)=(.+)", path)
if match:
namespace, path = match.groups()
logging.info(
f"Using the {namespace!r} namespace for template variables from {path!r}"
)
try:
if match:
obj = {namespace: read_serialized_data(path)}
else:
obj = read_serialized_dict(path)
except Exception:
logging.exception(f"Error reading template variables from {path!r}")
continue
assert isinstance(obj, dict)
conflicts = variables.keys() & obj.keys()
if conflicts:
logging.warning(
f"Template variables in {path!r} overwrite existing "
"values for the following keys:\n" + "\n".join(conflicts)
)
variables.update(obj)
logging.debug(
f"Reading user-provided templating variables complete:\n"
f"{json.dumps(variables, indent=2, ensure_ascii=False)}"
)
return variables
def add_author_affiliations(variables: dict) -> dict:
"""
Edit variables to contain numbered author affiliations. Specifically,
add a list of affiliation_numbers for each author and add a list of
affiliations to the top-level of variables. If no authors have any
affiliations, variables is left unmodified.
"""
rows = list()
for author in variables["authors"]:
if "affiliations" not in author:
continue
if not isinstance(author["affiliations"], list):
warnings.warn(
f"Expected list for {author['name']}'s affiliations. "
f"Assuming multiple affiliations are `; ` separated. "
f"Please switch affiliations to a list.",
category=DeprecationWarning,
)
author["affiliations"] = author["affiliations"].split("; ")
for affiliation in author["affiliations"]:
rows.append((author["name"], affiliation))
if not rows:
return variables
affil_map_df = pandas.DataFrame(rows, columns=["name", "affiliation"])
affiliation_df = affil_map_df[["affiliation"]].drop_duplicates()
affiliation_df["affiliation_number"] = range(1, 1 + len(affiliation_df))
affil_map_df = affil_map_df.merge(affiliation_df)
name_to_numbers = {
name: sorted(df.affiliation_number) for name, df in affil_map_df.groupby("name")
}
for author in variables["authors"]:
author["affiliation_numbers"] = name_to_numbers.get(author["name"], [])
variables["affiliations"] = affiliation_df.to_dict(orient="records")
return variables
def load_variables(args) -> dict:
"""
Read `metadata.yaml` and files specified by `--template-variables-path` to generate
manuscript variables available for jinja2 templating.
Returns a dictionary, refered to as `variables`, with the following keys:
- `pandoc`: a dictionary for passing options to Pandoc via the `yaml_metadata_block`.
Fields in `pandoc` are either generated by Manubot or hard-coded by the user if `metadata.yaml`
includes a `pandoc` dictionary.
- `manubot`: a dictionary for manubot-related information and metadata.
Fields in `manubot` are either generated by Manubot or hard-coded by the user if `metadata.yaml`
includes a `manubot` dictionary.
- All fields from a manuscript's `metadata.yaml` that are not interpreted by Manubot are
copied to `variables`. Interpreted fields include `pandoc`, `manubot`, `title`,
`keywords`, `authors` (formerly `author_info`, now deprecated), `lang`, and `thumbnail`.
- User-specified fields inserted according to the `--template-variables-path` option.
User-specified variables take highest precedence and can overwrite values for existing
keys like `pandoc` or `manubot` (dangerous).
"""
# Generated manuscript variables
variables = {"pandoc": {}, "manubot": {}}
# Read metadata which contains pandoc_yaml_metadata
# as well as authors information.
if args.meta_yaml_path.is_file():
metadata = read_serialized_dict(args.meta_yaml_path)
else:
metadata = {}
logging.warning(
f"missing {args.meta_yaml_path} file with yaml_metadata_block for pandoc"
)
# Interpreted keys that are intended for pandoc
move_to_pandoc = "title", "keywords", "lang"
for key in move_to_pandoc:
if key in metadata:
variables["pandoc"][key] = metadata.pop(key)
# Add date to metadata
now = datetime_now()
logging.info(
f"Using {now:%Z} timezone.\n"
f"Dating manuscript with the current datetime: {now.isoformat()}"
)
variables["pandoc"]["date-meta"] = now.date().isoformat()
variables["manubot"]["date"] = f"{now:%B} {now.day}, {now.year}"
# Process authors metadata
if "author_info" in metadata:
authors = metadata.pop("author_info", [])
warnings.warn(
"metadata.yaml: 'author_info' is deprecated. Use 'authors' instead.",
category=DeprecationWarning,
)
else:
authors = metadata.pop("authors", [])
if authors is None:
authors = []
variables["pandoc"]["author-meta"] = [author["name"] for author in authors]
variables["manubot"]["authors"] = authors
add_author_affiliations(variables["manubot"])
# Set repository version metadata for CI builds
ci_params = get_continuous_integration_parameters()
if ci_params:
variables["manubot"]["ci_source"] = ci_params
# Add manuscript URLs
variables["manubot"].update(get_manuscript_urls(metadata.pop("html_url", None)))
# Add software versions
variables["manubot"].update(get_software_versions())
# Add thumbnail URL if present
thumbnail_url = get_thumbnail_url(metadata.pop("thumbnail", None))
if thumbnail_url:
variables["manubot"]["thumbnail_url"] = thumbnail_url
# Update variables with metadata.yaml pandoc/manubot dicts
for key in "pandoc", "manubot":
dict_ = metadata.pop(key, {})
if not isinstance(dict_, dict):
logging.warning(
f"load_variables expected metadata.yaml field {key!r} to be a dict."
f"Received a {dict_.__class__.__name__!r} instead."
)
continue
variables[key].update(dict_)
# Update variables with uninterpreted metadata.yaml fields
variables.update(metadata)
# Update variables with user-provided variables here
variables = read_variable_files(args.template_variables_path, variables)
# Add header-includes metadata with <meta> information for the HTML output's <head>
variables["pandoc"]["header-includes"] = get_header_includes(variables)
assert args.skip_citations
# Extend Pandoc's metadata.bibliography field with manual references paths
bibliographies = variables["pandoc"].get("bibliography", [])
if isinstance(bibliographies, str):
bibliographies = [bibliographies]
assert isinstance(bibliographies, list)
bibliographies.extend(args.manual_references_paths)
bibliographies = list(map(os.fspath, bibliographies))
variables["pandoc"]["bibliography"] = bibliographies
# enable pandoc-manubot-cite option to write bibliography to a file
variables["pandoc"]["manubot-output-bibliography"] = os.fspath(args.references_path)
variables["pandoc"]["manubot-output-citekeys"] = os.fspath(args.citations_path)
variables["pandoc"]["manubot-requests-cache-path"] = os.fspath(
args.requests_cache_path
)
variables["pandoc"]["manubot-clear-requests-cache"] = args.clear_requests_cache
return variables
def get_citekeys_df(citekeys: list, citekey_aliases: dict = {}):
"""
Generate and return citekeys_df.
citekeys_df is a pandas.DataFrame with the following columns:
- manuscript_citekey: citation keys extracted from the manuscript content files.
- detagged_citekey: manuscript_citekey but with tag citekeys dereferenced
- standard_citekey: detagged_citekey standardized
- short_citekey: standard_citekey hashed to create a shortened citekey
"""
citekeys_df = pandas.DataFrame(
{"manuscript_citekey": list(citekeys)}
).drop_duplicates()
citekeys_df["detagged_citekey"] = citekeys_df.manuscript_citekey.map(
lambda citekey: citekey_aliases.get(citekey, citekey)
)
for citation in citekeys_df.detagged_citekey:
is_valid_citekey(citation, allow_raw=True)
citekeys_df["standard_citekey"] = citekeys_df.detagged_citekey.map(
standardize_citekey
)
citekeys_df["short_citekey"] = citekeys_df.standard_citekey.map(shorten_citekey)
citekeys_df = citekeys_df.sort_values(["standard_citekey", "detagged_citekey"])
check_collisions(citekeys_df)
check_multiple_citation_strings(citekeys_df)
return citekeys_df
def read_citations_tsv(path) -> dict:
"""
Read citekey aliases from a citation-tags.tsv file.
"""
if not path.is_file():
logging.info(
f"no citation tags file at {path} "
"Not reading citekey_aliases from citation-tags.tsv."
)
return {}
tag_df = pandas.read_csv(path, sep="\t")
na_rows_df = tag_df[tag_df.isnull().any(axis="columns")]
if not na_rows_df.empty:
logging.error(
f"{path} contains rows with missing values:\n"
f"{na_rows_df}\n"
"This error can be caused by using spaces rather than tabs to delimit fields.\n"
"Proceeding to reread TSV with delim_whitespace=True."
)
tag_df = pandas.read_csv(path, delim_whitespace=True)
tag_df["manuscript_citekey"] = "tag:" + tag_df.tag
tag_df = tag_df.rename(columns={"citation": "detagged_citekey"})
citekey_aliases = dict(
zip(tag_df["manuscript_citekey"], tag_df["detagged_citekey"])
)
return citekey_aliases
def write_citekeys_tsv(citekeys_df, path):
if not path:
return
citekeys_df.to_csv(path, sep="\t", index=False)
def _citation_tags_to_reference_links(args) -> str:
"""
Convert citation-tags.tsv to markdown reference link syntax
"""
citekey_aliases = read_citations_tsv(args.citation_tags_path)
if not citekey_aliases:
return ""
text = "\n\n"
for key, value in citekey_aliases.items():
text += f"[@{key}]: {value}\n"
logging.warning(
"citation-tags.tsv is deprecated. "
f"Consider deleting citation-tags.tsv and inserting the following paragraph into your Markdown content:{text}"
)
return text
def generate_csl_items(
citekeys: list,
manual_refs: dict = {},
requests_cache_path: Optional[str] = None,
clear_requests_cache: Optional[bool] = False,
) -> list:
"""
General CSL (citeproc) items for standard_citekeys in citekeys_df.
Parameters:
- citekeys: list of standard_citekeys
- manual_refs: mapping from standard_citekey to csl_item for manual references
- requests_cache_path: path for the requests cache database.
Passed as cache_name to `requests_cache.install_cache`.
requests_cache may append an extension to this path, so it is not always the exact
path to the cache. If None, do not use requests_cache.
- clear_requests_cache: If True, clear the requests cache before generating citekey metadata.
"""
# Deduplicate citations
citekeys = list(dict.fromkeys(citekeys))
# Install cache
if requests_cache_path is not None:
requests # require `import requests` in case this is essential for monkey patching by requests_cache.
requests_cache.install_cache(requests_cache_path, include_get_headers=True)
cache = requests_cache.get_cache()
if clear_requests_cache:
logging.info("Clearing requests-cache")
requests_cache.clear()
logging.info(
f"requests-cache starting with {len(cache.responses)} cached responses"
)
csl_items = list()
failures = list()
for standard_citekey in citekeys:
if standard_citekey in manual_refs:
csl_items.append(manual_refs[standard_citekey])
continue
elif standard_citekey.startswith("raw:"):
logging.error(
f"CSL JSON Data with a standard_citekey of {standard_citekey!r} not found in manual-references.json. "
"Metadata must be provided for raw citekeys."
)
failures.append(standard_citekey)
try:
csl_item = citekey_to_csl_item(standard_citekey)
csl_items.append(csl_item)
except Exception:
logging.exception(f"Citeproc retrieval failure for {standard_citekey!r}")
failures.append(standard_citekey)
# Uninstall cache
if requests_cache_path is not None:
logging.info(
f"requests-cache finished with {len(cache.responses)} cached responses"
)
requests_cache.uninstall_cache()
if failures:
message = "CSL JSON Data retrieval failed for the following standardized citation keys:\n{}".format(
"\n".join(failures)
)
logging.error(message)
return csl_items
def _generate_csl_items(args, citekeys_df):
"""
General CSL (citeproc) items for standard_citekeys in citekeys_df.
Writes references.json to disk and logs warnings for potential problems.
"""
# Read manual references (overrides) in JSON CSL
manual_refs = load_manual_references(args.manual_references_paths)
# Retrieve CSL Items
csl_items = generate_csl_items(
citekeys=citekeys_df.standard_citekey.unique(),
manual_refs=manual_refs,
requests_cache_path=args.requests_cache_path,
clear_requests_cache=args.clear_requests_cache,
)
# Write CSL JSON bibliography for Pandoc.
write_csl_json(csl_items, args.references_path)
return csl_items
def write_csl_json(csl_items, path):
"""
Write CSL Items to a JSON file at `path`.
If `path` evaluates as False, do nothing.
"""
if not path:
return
path = pathlib.Path(path)
with path.open("w", encoding="utf-8") as write_file:
json.dump(csl_items, write_file, indent=2, ensure_ascii=False)
write_file.write("\n")
def template_with_jinja2(text, variables):
"""
Template using jinja2 with the variables dictionary unpacked as keyword
arguments.
"""
jinja_environment = jinja2.Environment(
loader=jinja2.BaseLoader(),
undefined=jinja2.make_logging_undefined(logging.getLogger()),
autoescape=False,
comment_start_string="{##",
comment_end_string="##}",
extensions=["jinja2.ext.do", "jinja2.ext.loopcontrols"],
)
template = jinja_environment.from_string(text)
return template.render(**variables)
def prepare_manuscript(args):
"""
Compile manuscript, creating manuscript.md and references.json as inputs
for pandoc.
"""
text = get_text(args.content_directory)
assert args.skip_citations
text += _citation_tags_to_reference_links(args)
variables = load_variables(args)
variables["manubot"]["manuscript_stats"] = get_manuscript_stats(text)
with args.variables_path.open("w", encoding="utf-8") as write_file:
json.dump(variables, write_file, ensure_ascii=False, indent=2)
write_file.write("\n")
text = template_with_jinja2(text, variables)
# Write manuscript for pandoc
with args.manuscript_path.open("w", encoding="utf-8") as write_file:
yaml.dump(
variables["pandoc"],
write_file,
default_flow_style=False,
explicit_start=True,
explicit_end=True,
width=float("inf"),
)
write_file.write("\n")
write_file.write(text)
| 2.25 | 2 |
iba_scrape.py | wmwilcox/mix-mind | 1 | 6057 | <gh_stars>1-10
#! /usr/bin/env python
# scrape the IBA pages for cocktail lists
import sys
import xml.etree.ElementTree as ET
from lxml import html
import requests
from pprint import pprint
from collections import OrderedDict
import json
url = 'http://iba-world.com/new-era-drinks/'
jsonfile = 'IBA_new_era_drinks.json'
url = 'http://iba-world.com/iba-cocktails/'
jsonfile = 'IBA_unforgettables.json'
url = 'http://iba-world.com/contemporary-classics/'
jsonfile = 'IBA_contemporary_classics.json'
jsonfile = 'IBA_.json'
recipes = OrderedDict()
page = requests.get(url)
tree = html.fromstring(page.content)
items = tree.findall(".//div[@class='blog_list_item_lists']")
for item in items:
name = item.find(".//h3").text
name = ' '.join([word.capitalize() for word in name.split()])
body = item.find(".//div[@class='blog_text']")
recipes[name] = {'unit': 'cL'}
print name
children = [c for c in body.iterchildren()]
n = 0
if children[1].tag == 'ul':
n = -1
style = children[n+1].text
if style is None:
try:
style = children[n+1].find('span').text
except:
pass
recipes[name]['style'] = style
recipes[name]['ingredients'] = OrderedDict()
if not children[n+2].tag == 'ul':
print "adapting <p> ingredients:", children[n+2].text
ing_list = ET.tostring(children[n+2]).lstrip('<p>').rstrip('</p>\n').split('<br />\n')
else:
ing_list = [i.text for i in children[n+2].iterchildren()]
for ingredient in ing_list:
if len(ingredient.split()) == 1:
recipes[name]['ingredients'][ingredient.lower()] = ''
continue
unit = ingredient.split()[1].lower()
if unit == 'cl':
recipes[name]['ingredients'][' '.join([w.lower() for w in ingredient.split()[2:]])] = float(ingredient.split()[0])
elif unit == 'bar' or unit == 'to': # bar spoon
recipes[name]['ingredients'][' '.join([w.lower() for w in ingredient.split()[3:]])] = ' '.join(ingredient.split()[:3])
elif unit == 'dashes' or unit == 'drops' or unit == 'with':
recipes[name]['ingredients'][' '.join([w.lower() for w in ingredient.split()[2:]])] = ' '.join(ingredient.split()[:2])
elif unit == 'dash':
recipes[name]['ingredients'][' '.join([w.lower() for w in ingredient.split()[2:]])] = 'dash'
else:
print "using literal: ", ingredient
literal = {'1': 'one', '2': 'two', 'A': 'one'}
try:
recipes[name]['ingredients'][' '.join([w.lower() for w in ingredient.split()[1:]])] = literal[ingredient.split()[0]]
except:
recipes[name]['ingredients'][ingredient.lower()] = ''
# Get full description from the link
ref_url = item.find(".//a[@class='top_hover_image']").attrib.get('href')
detail_page = requests.get(ref_url)
detail_tree = html.fromstring(detail_page.content)
use_next = False
for child in detail_tree.find(".//div[@class='col-sm-9']").iterchildren():
if use_next and child.tag == 'p':
recipes[name]['IBA_description'] = child.text
break
if child.tag =='ul':
use_next = True
with open(jsonfile, 'w') as fp:
json.dump(recipes, fp, indent=4, separators=(',', ': '))
print "Wrote out as {}".format(jsonfile)
sys.exit(0)
raw = sys.argv[1]
with open(raw) as fp:
for line in fp.readlines():
if line.lstrip().startswith(r'<h3>'):
print line.lstrip()
# super hax
if line.startswith(r'<p>'):
print line
if line.startswith(r'<li>'):
print line
if not line.lstrip().startswith('<'):
print line
| 2.84375 | 3 |
Data Structures/Tree.py | Royals-Aeo-Gamer/MyPyMods | 0 | 6058 | class TreeNode:
def __init__(self, name, data, parent=None):
self.name = name
self.parent = parent
self.data = data
self.childs = {}
def add_child(self, name, data):
self.childs.update({name:(type(self))(name, data, self)})
def rm_branch(self, name, ansistors_n: list = None,):
focus = self.childs
while True:
if ansistors_n == None or ansistors_n == self.name:
del focus[name]
break
elif ansistors_n[0] in focus:
focus = (focus[ansistors_n[0]]).childs
del ansistors_n[0]
elif name in focus and ansistors_n is None:
del focus[name]
break
else:
print(focus)
raise NameError(f"couldn't find branch {ansistors_n[0]}")
def __getitem__(self, item):
return self.childs[item]
def __setitem__(self, key, value):
self.childs[key] = value
def __delitem__(self, key, ansistors_n: list = None):
self.rm_branch(key, ansistors_n)
| 3.140625 | 3 |
config.py | ggiaquin16/GroupProject19 | 0 | 6059 | api_key = "<KEY>"
mongo_url = 'mongodb://localhost:27017'
mongo_db = 'CarPopularity'
mongo_collections = ['CarSalesByYear', 'PopularCarsByRegion']
years_data = ['2019', '2018', '2017', '2016', '2015']
test_mode = True | 1.710938 | 2 |
pytorch_ares/pytorch_ares/attack_torch/mim.py | thu-ml/realsafe | 107 | 6060 | <gh_stars>100-1000
import imp
import torch
import torch.nn as nn
import numpy as np
import torch.nn.functional as F
from pytorch_ares.attack_torch.utils import loss_adv
class MIM(object):
'''Projected Gradient Descent'''
def __init__(self, net, epsilon, p, stepsize, steps, decay_factor, data_name,target, loss, device):
self.epsilon = epsilon
self.p = p
self.net = net
self.decay_factor = decay_factor
self.stepsize = stepsize
self.target = target
self.steps = steps
self.loss = loss
self.data_name = data_name
self.device = device
if self.data_name=="cifar10" and self.target:
raise AssertionError('cifar10 dont support targeted attack')
def forward(self, image, label, target_labels):
image, label = image.to(self.device), label.to(self.device)
if target_labels is not None:
target_labels = target_labels.to(self.device)
batchsize = image.shape[0]
advimage = image
momentum = torch.zeros_like(image).detach()
# PGD to get adversarial example
for i in range(self.steps):
advimage = advimage.clone().detach().requires_grad_(True) # clone the advimage as the next iteration input
netOut = self.net(advimage)
loss = loss_adv(self.loss, netOut, label, target_labels, self.target, self.device)
grad = torch.autograd.grad(loss, [advimage])[0].detach()
grad_norm = torch.norm(nn.Flatten()(grad), p=1, dim=1)
grad = grad / grad_norm.view([-1]+[1]*(len(grad.shape)-1))
grad = grad + momentum*self.decay_factor
momentum = grad
if self.p==np.inf:
updates = grad.sign()
else:
normVal = torch.norm(grad.view(batchsize, -1), self.p, 1)
updates = grad/normVal.view(batchsize, 1, 1, 1)
updates = updates*self.stepsize
advimage = advimage+updates
# project the disturbed image to feasible set if needed
delta = advimage-image
if self.p==np.inf:
delta = torch.clamp(delta, -self.epsilon, self.epsilon)
else:
normVal = torch.norm(delta.view(batchsize, -1), self.p, 1)
mask = normVal<=self.epsilon
scaling = self.epsilon/normVal
scaling[mask] = 1
delta = delta*scaling.view(batchsize, 1, 1, 1)
advimage = image+delta
advimage = torch.clamp(advimage, 0, 1)#cifar10(-1,1)
return advimage | 2.375 | 2 |
src/utils/templatetags/menubutton.py | pwelzel/bornhack-website | 0 | 6061 | from django import template
register = template.Library()
@register.simple_tag(takes_context=True)
def menubuttonclass(context, appname):
if appname == context['request'].resolver_match.func.view_class.__module__.split(".")[0]:
return "btn-primary"
else:
return "btn-default"
| 1.929688 | 2 |
wiki/tests.py | Prones94/Make_Wiki | 0 | 6062 | <reponame>Prones94/Make_Wiki
from django.test import TestCase
from django.contrib.auth.models import User
from wiki.models import Page
from django.utils.text import slugify
# Create your tests here.
class WikiPageTest(TestCase):
def test_edit(self):
user = User.objects.create_user(username='admin', password='<PASSWORD>')
self.client.login(username='admin', password='<PASSWORD>')
page = Page.objects.create(title="My Test Page", content="test", author=user)
page.save()
edit = {
'title': 'testing title',
'content': 'testing content'
}
response = self.client.post('/%s/' %slugify(page.title), edit)
updated = Page.objects.get(title = edit['title'])
self.assertEqual(response.status_code, 302)
self.assertEqual(updated.title, edit['title'])
def test_page(self):
user = User.objects.create_user(username='admin', password='<PASSWORD>')
self.client.login(username='admin', password='<PASSWORD>')
page = Page.objects.create(title="My Test Page", content="test", author=user)
page.save()
response = self.client.get('/%s/' %slugify(page.title))
self.assertEqual(response.status_code, 200)
self.assertContains(response, 'test')
def test_create(self):
user = User.objects.create_user(username='admin', password='<PASSWORD>')
self.client.login(username='admin', password='<PASSWORD>')
new = {
'title': 'testing title',
'content': 'testing content'
}
response = self.client.post('/wiki/new/', new)
updated = Page.objects.get(title = new['title'])
self.assertEqual(response.status_code, 302)
self.assertEqual(updated.title, new['title'])
'''
Steps to writing a test
1. Set up your test data
2. Make a request (GET, POST)
3a. Check if response matches what we expect
3b. Check if database matches what we expect
''' | 2.734375 | 3 |
birdy/__init__.py | tkiapril/birdy | 1 | 6063 | __author__ = '<NAME> <<EMAIL>>'
__version__ = '0.2'
| 1.023438 | 1 |
ares/defense/randomization.py | KuanKuanQAQ/ares | 206 | 6064 | <reponame>KuanKuanQAQ/ares<gh_stars>100-1000
''' The randomization defense method, which applies random . '''
import tensorflow as tf
from ares.defense.input_transformation import input_transformation
def randomize(xs, scale_min=0.875, pad_value=0.0):
''' Apply random rescaling and padding to xs.
:param xs: A batch of inputs for some classifier.
:param scale_min: The random rescaling rate would be chosen between ``scale_min`` and 1.0.
:param pad_value: ``constant_values`` parameter for the ``tf.pad`` method.
:return: A new tensor with same shape and dtype as xs.
'''
ratio = tf.random.uniform((), minval=scale_min, maxval=1.0)
height, width = tf.cast(xs.shape[1].value * ratio, tf.int32), tf.cast(xs.shape[2].value * ratio, tf.int32)
xs_rescaled = tf.image.resize(xs, (height, width), method=tf.image.ResizeMethod.NEAREST_NEIGHBOR,
align_corners=True, preserve_aspect_ratio=False)
height_rem, width_rem = xs.shape[1].value - height, xs.shape[2].value - width
pad_left = tf.random_uniform((), 0, width_rem, dtype=tf.int32)
pad_right = width_rem - pad_left
pad_top = tf.random_uniform((), 0, height_rem, dtype=tf.int32)
pad_bottom = height_rem - pad_top
xs_padded = tf.pad(xs_rescaled, [[0, 0], [pad_top, pad_bottom], [pad_left, pad_right], [0, 0]],
constant_values=pad_value)
xs_padded.set_shape(xs.shape)
return xs_padded
def randomization(scale_min=0.875, pad_value=0.0):
''' A decorator to apply randomize rescaling and padding to input of the classifier.
:param scale_min: The random rescaling rate would be chosen between ``scale_min`` and 1.0.
:param pad_value: ``constant_values`` parameter for the ``tf.pad`` method.
'''
def args_fn(_):
return (scale_min, pad_value)
def kwargs_fn(_):
return {}
return lambda rs_class: input_transformation(rs_class, randomize, args_fn, kwargs_fn)
| 2.921875 | 3 |
annotate/backend/admin.py | hopeogbons/image-annotation | 0 | 6065 | <gh_stars>0
from django.contrib import admin
from annotate.backend.models import Image, Annotation
admin.site.register(Image)
admin.site.register(Annotation)
| 1.367188 | 1 |
34. Find First and Last Position of Element in Sorted Array/main.py | Competitive-Programmers-Community/LeetCode | 2 | 6066 | class Solution:
def searchRange(self, nums, target):
"""
:type nums: List[int]
:type target: int
:rtype: List[int]
"""
if not nums:
return [-1, -1]
low = 0
high = len(nums) - 1
f = 0
while low<=high:
mid = (low+high)//2
if nums[mid] == target:
f = 1
break
elif nums[mid] < target:
low = mid + 1
elif nums[mid] > target:
high = mid - 1
i, j = mid, mid
while i>=1 and nums[i-1] == target:
i = i-1
while j<len(nums)-1 and nums[j+1] == target:
j = j+1
if f == 1:
return [i, j]
else:
return [-1, -1]
| 3.34375 | 3 |
c/create.py | LMS57/domato | 0 | 6067 | <gh_stars>0
data = open('./original').readlines()
alphabet = {
"<":"lt",
">":"gt",
"=":"=",
"-":'-',
"+":"+",
"-":"-",
"~":"~",
"!":"ex",
"%":"%",
"^":"^",
"&":"&",
"*":"*",
"(":"(",
")":"right_paran",
"[":"[",
"]":"]",
"{":"{",
"}":"}",
"[":"[",
"]":"]",
"|":"|",
";":";",
":":":",
",":",",
".":".",
"?":"?",
"/":"/",
}
def item(y):
if "'" in y:
tmp = y.split("'")[1]
test = 0
for x in alphabet:
if x in tmp:
test = 1
if test:
final = ''
for x in tmp:
final += item(alphabet[x])
return final
else:
return item(tmp)
else:
return "<"+y+">"
start = 0
current = ""
space = "<space>"
declared = []
referenced = []
for x in data:
x = x.strip()
if x == "":
continue
if '%%' == x:
start = 1
continue
elif start != 1:
continue
if x == "test":
break;
x = x.split(' ')
if len(x) == 1:#item declaration or end
if x[0] == ';':
current = ""
else:
current = x[0]
declared.append(item(x[0]))
print ""
else:
x = x[1:]
tmp = item(current)+'\t=\t'
for y in range(len(x)):
referenced.append(item(x[y]))
tmp += item(x[y])
if y != len(x)-1 and "'" not in x[y+1] and "'" not in x[y]:
tmp+=space
print tmp
referenced = set(referenced)
final = []
for x in referenced:
if x not in declared:
final.append(x)
print ""
for x in final:
tmp = x+'\t=\t'
x = x[1:-1]
print tmp + x.lower()
| 2.984375 | 3 |
AppServer/google/appengine/api/memcache/memcache_distributed.py | isabella232/scale-safe | 3 | 6068 | #!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
""" Non-stub version of the memcache API, keeping all data in memcached.
Uses the python-memcached library to interface with memcached.
"""
import base64
import cPickle
import logging
import memcache
import os
import time
from google.appengine.api import apiproxy_stub
from google.appengine.api.memcache import memcache_service_pb
from google.appengine.runtime import apiproxy_errors
MemcacheSetResponse = memcache_service_pb.MemcacheSetResponse
MemcacheSetRequest = memcache_service_pb.MemcacheSetRequest
MemcacheIncrementRequest = memcache_service_pb.MemcacheIncrementRequest
MemcacheIncrementResponse = memcache_service_pb.MemcacheIncrementResponse
MemcacheDeleteResponse = memcache_service_pb.MemcacheDeleteResponse
from google.appengine.api.memcache import TYPE_INT
from google.appengine.api.memcache import TYPE_LONG
class MemcacheService(apiproxy_stub.APIProxyStub):
"""Python only memcache service.
This service keeps all data in any external servers running memcached.
"""
# The memcached default port.
MEMCACHE_PORT = "11211"
# An AppScale file which has a list of IPs running memcached.
APPSCALE_MEMCACHE_FILE = "/etc/appscale/memcache_ips"
# The minimum frequency by which memcache clients will update their list of
# clients that they connect to (which can change if AppScale scales up or
# down).
UPDATE_WINDOW = 60 # seconds
def __init__(self, gettime=time.time, service_name='memcache'):
"""Initializer.
Args:
gettime: time.time()-like function used for testing.
service_name: Service name expected for all calls.
"""
super(MemcacheService, self).__init__(service_name)
self._gettime = gettime
self._memcache = None
self.setupMemcacheClient()
def setupMemcacheClient(self):
""" Sets up the memcache client. """
if os.path.exists(self.APPSCALE_MEMCACHE_FILE):
memcache_file = open(self.APPSCALE_MEMCACHE_FILE, "r")
all_ips = memcache_file.read().split("\n")
memcache_file.close()
else:
all_ips = ['localhost']
memcaches = [ip + ":" + self.MEMCACHE_PORT for ip in all_ips if ip != '']
memcaches.sort()
self._memcache = memcache.Client(memcaches, debug=0)
def _Dynamic_Get(self, request, response):
"""Implementation of gets for memcache.
Args:
request: A MemcacheGetRequest protocol buffer.
response: A MemcacheGetResponse protocol buffer.
"""
for key in set(request.key_list()):
internal_key = self._GetKey(request.name_space(), key)
value = self._memcache.get(internal_key)
if value is None:
continue
flags = 0
stored_flags, cas_id, stored_value = cPickle.loads(value)
flags |= stored_flags
item = response.add_item()
item.set_key(key)
item.set_value(stored_value)
item.set_flags(flags)
if request.for_cas():
item.set_cas_id(cas_id)
def _Dynamic_Set(self, request, response):
"""Implementation of sets for memcache.
Args:
request: A MemcacheSetRequest.
response: A MemcacheSetResponse.
"""
for item in request.item_list():
key = self._GetKey(request.name_space(), item.key())
set_policy = item.set_policy()
old_entry = self._memcache.get(key)
cas_id = 0
if old_entry:
_, cas_id, _ = cPickle.loads(old_entry)
set_status = MemcacheSetResponse.NOT_STORED
if ((set_policy == MemcacheSetRequest.SET) or
(set_policy == MemcacheSetRequest.ADD and old_entry is None) or
(set_policy == MemcacheSetRequest.REPLACE and
old_entry is not None)):
if (old_entry is None or set_policy == MemcacheSetRequest.SET):
set_status = MemcacheSetResponse.STORED
elif (set_policy == MemcacheSetRequest.CAS and item.for_cas() and
item.has_cas_id()):
if old_entry is None:
set_status = MemcacheSetResponse.NOT_STORED
elif cas_id != item.cas_id():
set_status = MemcacheSetResponse.EXISTS
else:
set_status = MemcacheSetResponse.STORED
if (set_status == MemcacheSetResponse.STORED
or set_policy == MemcacheSetRequest.REPLACE):
set_value = cPickle.dumps(
[item.flags(), cas_id + 1, item.value()])
if set_policy == MemcacheSetRequest.REPLACE:
self._memcache.replace(key, set_value)
else:
self._memcache.set(key, set_value, item.expiration_time())
response.add_set_status(set_status)
def _Dynamic_Delete(self, request, response):
"""Implementation of delete in memcache.
Args:
request: A MemcacheDeleteRequest protocol buffer.
response: A MemcacheDeleteResponse protocol buffer.
"""
for item in request.item_list():
key = self._GetKey(request.name_space(), item.key())
entry = self._memcache.get(key)
delete_status = MemcacheDeleteResponse.DELETED
if entry is None:
delete_status = MemcacheDeleteResponse.NOT_FOUND
else:
self._memcache.delete(key)
response.add_delete_status(delete_status)
def _Increment(self, namespace, request):
"""Internal function for incrementing from a MemcacheIncrementRequest.
Args:
namespace: A string containing the namespace for the request,
if any. Pass an empty string if there is no namespace.
request: A MemcacheIncrementRequest instance.
Returns:
An integer or long if the offset was successful, None on error.
"""
if not request.delta():
return None
cas_id = 0
key = self._GetKey(namespace, request.key())
value = self._memcache.get(key)
if value is None:
if not request.has_initial_value():
return None
flags, cas_id, stored_value = (
TYPE_INT, cas_id, str(request.initial_value()))
else:
flags, cas_id, stored_value = cPickle.loads(value)
if flags == TYPE_INT:
new_value = int(stored_value)
elif flags == TYPE_LONG:
new_value = long(stored_value)
if request.direction() == MemcacheIncrementRequest.INCREMENT:
new_value += request.delta()
elif request.direction() == MemcacheIncrementRequest.DECREMENT:
new_value -= request.delta()
new_stored_value = cPickle.dumps([flags, cas_id + 1, str(new_value)])
try:
self._memcache.cas(key, new_stored_value)
except Exception, e:
logging.error(str(e))
return None
return new_value
def _Dynamic_Increment(self, request, response):
"""Implementation of increment for memcache.
Args:
request: A MemcacheIncrementRequest protocol buffer.
response: A MemcacheIncrementResponse protocol buffer.
"""
new_value = self._Increment(request.name_space(), request)
if new_value is None:
raise apiproxy_errors.ApplicationError(
memcache_service_pb.MemcacheServiceError.UNSPECIFIED_ERROR)
response.set_new_value(new_value)
def _Dynamic_BatchIncrement(self, request, response):
"""Implementation of batch increment for memcache.
Args:
request: A MemcacheBatchIncrementRequest protocol buffer.
response: A MemcacheBatchIncrementResponse protocol buffer.
"""
namespace = request.name_space()
for request_item in request.item_list():
new_value = self._Increment(namespace, request_item)
item = response.add_item()
if new_value is None:
item.set_increment_status(MemcacheIncrementResponse.NOT_CHANGED)
else:
item.set_increment_status(MemcacheIncrementResponse.OK)
item.set_new_value(new_value)
def _Dynamic_FlushAll(self, request, response):
"""Implementation of MemcacheService::FlushAll().
Args:
request: A MemcacheFlushRequest.
response: A MemcacheFlushResponse.
"""
self._memcache.flush_all()
def _Dynamic_Stats(self, request, response):
"""Implementation of MemcacheService::Stats().
Args:
request: A MemcacheStatsRequest.
response: A MemcacheStatsResponse.
"""
stats = response.mutable_stats()
num_servers = 0
hits_total = 0
misses_total = 0
byte_hits_total = 0
items_total = 0
bytes_total = 0
time_total = 0
def get_stats_value(stats_dict, key, _type=int):
""" Gets statisical values and makes sure the key is in the dict. """
if key not in stats_dict:
logging.warn("No stats for key '%s'." % key)
return _type(stats_dict.get(key, '0'))
for server, server_stats in self._memcache.get_stats():
num_servers += 1
hits_total += get_stats_value(server_stats, 'get_hits')
misses_total += get_stats_value(server_stats, 'get_misses')
byte_hits_total += get_stats_value(server_stats, 'bytes_read')
items_total += get_stats_value(server_stats, 'curr_items')
bytes_total += get_stats_value(server_stats, 'bytes')
time_total += get_stats_value(server_stats, 'time', float)
stats.set_hits(hits_total)
stats.set_misses(misses_total)
stats.set_byte_hits(byte_hits_total)
stats.set_items(items_total)
stats.set_bytes(bytes_total)
# With the Python 2.7 GAE runtime, it expects all fields here to be ints.
# Python 2.5 was fine with this being a float, so callers in that runtime
# may not be expecting an int.
stats.set_oldest_item_age(int(time.time() - time_total / num_servers))
def _GetKey(self, namespace, key):
"""Used to get the Memcache key. It is encoded because the sdk
allows special characters but the Memcache client does not.
Args:
namespace: The namespace as provided by the application.
key: The key as provided by the application.
Returns:
A base64 string __{appname}__{namespace}__{key}
"""
appname = os.environ['APPNAME']
internal_key = appname + "__" + namespace + "__" + key
return base64.b64encode(internal_key)
| 1.984375 | 2 |
inflateutils/exportmesh.py | arpruss/inflatemesh | 8 | 6069 | <filename>inflateutils/exportmesh.py
from struct import pack
from .vector import *
from .formatdecimal import decimal
from numbers import Number
import os
import sys
try:
basestring
except:
basestring = str
def isColorTriangleList(polys):
return isinstance(polys[0][1][0][0], Number)
def toPolyhedra(polys):
if isColorTriangleList(polys):
return [ (polys[0][0], list(face for rgb,face in polys)) ]
else:
return polys
def toMesh(polys):
if isColorTriangleList(polys):
return polys
else:
output = []
for rgb,polyhedron in polys:
for face in polyhedron:
output.append((rgb,face))
return output
def describeColor(c):
if c is None:
return "undef";
elif isinstance(c, str):
return c
else:
return "[%s,%s,%s]" % tuple(decimal(component) for component in c)
def toSCADModule(polys, moduleName, digitsAfterDecimal=9, colorOverride=None):
"""
INPUT:
polys: list of (color,polyhedra) pairs (counterclockwise triangles), or a list of (color,triangle) pairs (TODO: currently uses first color for all in latter case)
moduleName: OpenSCAD module name
OUTPUT: string with OpenSCAD code implementing the polys
"""
polys = toPolyhedra(polys)
scad = []
scad.append("module " +moduleName+ "() {")
for rgb,poly in polys:
if colorOverride != "" and (colorOverride or rgb):
line = " color(%s) " % describeColor(colorOverride if colorOverride else tuple(min(max(c,0.),1.0) for c in rgb))
else:
line = " "
pointsDict = {}
i = 0
line += "polyhedron(points=["
points = []
for face in poly:
for v in reversed(face):
if tuple(v) not in pointsDict:
pointsDict[tuple(v)] = i
points.append( ("[%s,%s,%s]") % tuple(decimal(x,digitsAfterDecimal) for x in v) )
i += 1
line += ",".join(points)
line += "], faces=["
line += ",".join( "[" + ",".join(str(pointsDict[tuple(v)]) for v in reversed(face)) + "]" for face in poly ) + "]"
line += ");"
scad.append(line)
scad.append("}\n")
return "\n".join(scad)
def saveSCAD(filename, polys, moduleName="object1", quiet=False):
"""
filename: filename to write OpenSCAD file
polys: list of (color,polyhedra) pairs (counterclockwise triangles)
moduleName: OpenSCAD module name
quiet: give no status message if set
"""
if not quiet: sys.stderr.write("Saving %s\n" % filename)
if filename:
with open(filename, "w") as f:
f.write(toSCADModule(polys, moduleName))
f.write("\n" + moduleName + "();\n")
else:
sys.stdout.write(toSCADModule(polys, moduleName))
sys.stdout.write("\n" + moduleName + "();\n")
def saveSTL(filename, mesh, swapYZ=False, quiet=False):
"""
filename: filename to save STL file
mesh: list of (color,triangle) pairs (counterclockwise)
swapYZ: should Y/Z axes be swapped?
quiet: give no status message if set
"""
mesh = toMesh(mesh)
if not quiet: sys.stderr.write("Saving %s\n" % filename)
minY = float("inf")
minVector = Vector(float("inf"),float("inf"),float("inf"))
numTriangles = 0
if swapYZ:
matrix = Matrix( (1,0,0), (0,0,-1), (0,1,0) )
else:
matrix = Matrix.identity(3)
mono = True
for rgb,triangle in mesh:
if rgb is not None:
mono = False
numTriangles += 1
for vertex in triangle:
vertex = matrix*vertex
minVector = Vector(min(minVector[i], vertex[i]) for i in range(3))
minVector -= Vector(0.001,0.001,0.001) # make sure all STL coordinates are strictly positive as per Wikipedia
def writeSTL(write):
write(pack("80s",b''))
write(pack("<I",numTriangles))
for rgb,tri in mesh:
if mono:
color = 0
else:
if rgb is None:
rgb = (255,255,255)
else:
rgb = tuple(min(255,max(0,int(0.5 + 255 * comp))) for comp in rgb)
color = 0x8000 | ( (rgb[0] >> 3) << 10 ) | ( (rgb[1] >> 3) << 5 ) | ( (rgb[2] >> 3) << 0 )
normal = (Vector(tri[1])-Vector(tri[0])).cross(Vector(tri[2])-Vector(tri[0])).normalize()
write(pack("<3f", *(matrix*normal)))
for vertex in tri:
write(pack("<3f", *(matrix*(vertex-minVector))))
write(pack("<H", color))
if filename:
with open(filename, "wb") as f:
writeSTL(f.write)
else:
if sys.platform == "win32":
import msvcrt
msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY)
writeSTL(lambda data : os.write(sys.stdout.fileno(), data))
| 2.796875 | 3 |
Assignment1/Identification/match_module.py | arywatt/FDS_2020_2021 | 0 | 6070 | import numpy as np
from PIL import Image
import matplotlib.pyplot as plt
import histogram_module
import dist_module
def rgb2gray(rgb):
r, g, b = rgb[:, :, 0], rgb[:, :, 1], rgb[:, :, 2]
gray = 0.2989 * r + 0.5870 * g + 0.1140 * b
return gray
# model_images - list of file names of model images
# query_images - list of file names of query images
#
# dist_type - string which specifies distance type: 'chi2', 'l2', 'intersect'
# hist_type - string which specifies histogram type: 'grayvalue', 'dxdy', 'rgb', 'rg'
#
# note: use functions 'get_dist_by_name', 'get_hist_by_name' and 'is_grayvalue_hist' to obtain
# handles to distance and histogram functions, and to find out whether histogram function
# expects grayvalue or color image
def find_best_match(model_images, query_images, dist_type, hist_type, num_bins):
hist_isgray = histogram_module.is_grayvalue_hist(hist_type)
model_hists = compute_histograms(model_images, hist_type, hist_isgray, num_bins)
query_hists = compute_histograms(query_images, hist_type, hist_isgray, num_bins)
D = np.zeros((len(model_images), len(query_images)))
# compute distance for each couple of query - image
for j, query in enumerate(query_hists):
for i, model in enumerate(model_hists):
D[i, j] = dist_module.get_dist_by_name(model, query, dist_type)
best_match = [] # to save best matches
# for each query , find best model
for j in range(len(query_images)):
query_matches = D[:, j] # get query columns from matrix
argmin = np.argmin(query_matches) # get index with minimum distance
best_match.append(argmin) # save index for query
best_match = np.array(best_match) # array of best match for each query
return best_match, D
def compute_histograms(image_list, hist_type, hist_isgray, num_bins):
image_hist = []
# Compute hisgoram for each image and add it at the bottom of image_hist
# ... (your code here)
for img in image_list:
img_color = np.array(Image.open(img))
# if hist is gray type we use gray image
# othewise rgb image
img_to_process = rgb2gray(img_color) if hist_isgray else img_color.astype('double')
# We compute histogram for image
hist = histogram_module.get_hist_by_name(img=img_to_process,
num_bins_gray=num_bins,
hist_name=hist_type
)
image_hist.append(hist)
return image_hist
# For each image file from 'query_images' find and visualize the 5 nearest images from 'model_image'.
#
# Note: use the previously implemented function 'find_best_match'
# Note: use subplot command to show all the images in the same Python figure, one row per query image
def show_neighbors(model_images, query_images, dist_type, hist_type, num_bins):
plt.figure()
num_nearest = 5 # show the top-5 neighbors
# ... (your code here)
_, D = find_best_match(model_images=model_images,
query_images=query_images,
dist_type=dist_type,
hist_type=hist_type,
num_bins=num_bins
)
Q = len(query_images)
pos = 0
for j in range(Q):
query_matches = D[:, j]
best_args = np.argsort(query_matches)[:num_nearest]
query_img = query_images[j]
pos += 1
plt.subplot(Q, 6, pos);
plt.imshow(np.array(Image.open(query_img)), vmin=0, vmax=255);
plt.title(f'Q{j}')
for ind in range(len(best_args)):
pos += 1
model_ind = best_args[ind]
model_img = model_images[model_ind]
plt.subplot(Q, 6, pos);
plt.imshow(np.array(Image.open(model_img)), vmin=0, vmax=255);
plt.title(f'MO.{model_ind}')
plt.show()
| 2.984375 | 3 |
dycco/__main__.py | rojalator/dycco | 0 | 6071 | <reponame>rojalator/dycco<filename>dycco/__main__.py
import argparse
import logging
import sys
from .dycco import document
def main(paths, output_dir, use_ascii:bool, escape_html:bool, single_file:bool):
try:
document(paths, output_dir, use_ascii, escape_html, single_file)
except IOError as e:
logging.error('Unable to open file: %s', e)
return 1
except Exception as e:
logging.error('An error occurred: %s', e)
return 1
else:
return 0
if __name__ == '__main__':
arg_parser = argparse.ArgumentParser(prog='dycco', description='Literate-style documentation generator.')
arg_parser.add_argument('source_file', nargs='+', default=sys.stdin, help='Source files to document')
arg_parser.add_argument('-o', '--output-dir', default='docs', help='Output directory (will be created if necessary)')
arg_parser.add_argument('-a', '--asciidoc3', action='store_true', default=False, dest='use_ascii',
help='Process with asciidoc3 instead of markdown (you will have to install asciidoc3, of course)')
arg_parser.add_argument('-e', '--escape-html', action='store_true', default=False, dest='escape_html',
help='Run the documentation through html.escape() before markdown or asciidoc3')
arg_parser.add_argument('-f', '--single-file', action='store_true', default=False, dest='single_file',
help='Just produce a .md or .adoc file in single-column to be processed externally')
args = arg_parser.parse_args()
sys.exit(main(args.source_file, args.output_dir, args.use_ascii, args.escape_html, args.single_file))
| 2.734375 | 3 |
jumpy/jumpy/ndarray.py | rghwer/testdocs | 13,006 | 6072 | ################################################################################
# Copyright (c) 2015-2018 Skymind, Inc.
#
# This program and the accompanying materials are made available under the
# terms of the Apache License, Version 2.0 which is available at
# https://www.apache.org/licenses/LICENSE-2.0.
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# SPDX-License-Identifier: Apache-2.0
################################################################################
from .java_classes import *
import numpy as np
import ctypes
import warnings
native_ops = NativeOpsHolder.getInstance().getDeviceNativeOps()
# DATA TYPE MANAGEMENT
DOUBLE = DataType.DOUBLE
FLOAT = DataType.FLOAT
HALF = DataType.HALF
LONG = DataType.LONG
INT = DataType.INT
SHORT = DataType.SHORT
UBYTE = DataType.UBYTE
BYTE = DataType.BYTE
BOOL = DataType.BOOL
UTF8 = DataType.UTF8
COMPRESSED = DataType.COMPRESSED
UNKNOWN = DataType.UNKNOWN
SUPPORTED_JAVA_DTYPES = [
DOUBLE,
FLOAT,
HALF,
LONG,
INT,
SHORT,
BOOL
#UTF8
]
SUPPORTED_PYTHON_DTYPES = [
np.float64,
np.float32,
np.float16,
np.int64,
np.int32,
np.int16,
np.bool_
#np.str_
]
_PY2J = {SUPPORTED_PYTHON_DTYPES[i] : SUPPORTED_JAVA_DTYPES[i] for i in range(len(SUPPORTED_JAVA_DTYPES))}
_J2PY = {SUPPORTED_JAVA_DTYPES[i] : SUPPORTED_PYTHON_DTYPES[i] for i in range(len(SUPPORTED_JAVA_DTYPES))}
def _dtype_py2j(dtype):
if isinstance(dtype, str):
dtype = np.dtype(dtype).type
elif isinstance(dtype, np.dtype):
dtype = dtype.type
jtype = _PY2J.get(dtype)
if jtype is None:
raise NotImplementedError("Unsupported type: " + dtype.name)
return jtype
def _dtype_j2py(dtype):
pytype = _J2PY.get(dtype)
if pytype is None:
raise NotImplementedError("Unsupported type: " + (str(dtype)))
return pytype
def set_context_dtype(dtype):
'''
Sets the dtype for nd4j
# Arguments
dtype: 'float' or 'double'
'''
dtype_map = {
'float32': 'float',
'float64': 'double'
}
dtype = dtype_map.get(dtype, dtype)
if dtype not in ['float', 'double']:
raise ValueError("Invalid dtype '{}'. Available dtypes are 'float' and 'double'.".format(dtype))
dtype_ = DataTypeUtil.getDtypeFromContext(dtype)
DataTypeUtil.setDTypeForContext(dtype_)
if get_context_dtype() != dtype:
warnings.warn("Can not set context dtype now. Set it at the beginning of your program.")
def get_context_dtype():
'''
Returns the nd4j dtype
'''
dtype = DataTypeUtil.getDtypeFromContext()
return DataTypeUtil.getDTypeForName(dtype)
_refs = []
def _from_numpy(np_array):
'''
Convert numpy array to nd4j array
'''
pointer_address, _ = np_array.__array_interface__['data']
_refs.append(np_array)
pointer = native_ops.pointerForAddress(pointer_address)
size = np_array.size
pointer.limit(size)
jdtype = _dtype_py2j(np_array.dtype)
'''
mapping = {
DOUBLE: DoublePointer,
FLOAT: FloatPointer,
HALF: HalfPointer,
LONG: LongPointer,
INT: IntPointer,
SHORT: ShortPointer,
BOOL: BoolPointer
}
pc = mapping[jdtype]
#pointer = pc(pointer)
'''
buff = Nd4j.createBuffer(pointer, size, jdtype)
assert buff.address() == pointer_address
_refs.append(buff)
elem_size = buff.getElementSize()
assert elem_size == np_array.dtype.itemsize
strides = np_array.strides
strides = [dim / elem_size for dim in strides]
shape = np_array.shape
nd4j_array = Nd4j.create(buff, shape, strides, 0)
assert buff.address() == nd4j_array.data().address()
return nd4j_array
def _to_numpy(nd4j_array):
'''
Convert nd4j array to numpy array
'''
buff = nd4j_array.data()
address = buff.pointer().address()
dtype = nd4j_array.dataType().toString()
mapping = {
'DOUBLE': ctypes.c_double,
'FLOAT': ctypes.c_float,
'HALF': ctypes.c_short,
'LONG': ctypes.c_long,
'INT': ctypes.c_int,
'SHORT': ctypes.c_short,
'BOOL': ctypes.c_bool
}
Pointer = ctypes.POINTER(mapping[dtype])
pointer = ctypes.cast(address, Pointer)
np_array = np.ctypeslib.as_array(pointer, tuple(nd4j_array.shape()))
return np_array
def _indarray(x):
typ = type(x)
if typ is INDArray:
return x
elif typ is ndarray:
return x.array
elif 'numpy' in str(typ):
return _from_numpy(x)
elif typ in (list, tuple):
return _from_numpy(np.array(x))
elif typ in (int, float):
return Nd4j.scalar(x)
else:
raise Exception('Data type not understood :' + str(typ))
def _nparray(x):
typ = type(x)
if typ is INDArray:
return ndarray(x).numpy()
elif typ is ndarray:
return x.numpy()
elif 'numpy' in str(typ):
return x
elif typ in (list, tuple):
return np.array(x)
elif typ in (int, float):
return np.array(x)
else:
raise Exception('Data type not understood :' + str(typ))
def broadcast_like(y, x):
xs = x.shape()
ys = y.shape()
if xs == ys:
return y
_xs = tuple(xs)
_ys = tuple(ys)
nx = len(xs)
ny = len(ys)
if nx > ny:
diff = nx - ny
ys = ([1] * diff) + ys
y = y.reshape(ys)
ny = nx
elif ny > nx:
raise Exception('Unable to broadcast shapes ' + str(_xs) + ''
' and ' + str(_ys))
yt = []
rep_y = False
for xd, yd in zip(xs, ys):
if xd == yd:
yt.append(1)
elif xd == 1:
raise Exception('Unable to broadcast shapes ' + str(_xs) + ''
' and ' + str(_ys))
elif yd == 1:
yt.append(xd)
rep_y = True
else:
raise Exception('Unable to broadcast shapes ' + str(_xs) + ''
' and ' + str(_ys))
if rep_y:
y = y.repmat(*yt)
return y
def broadcast(x, y):
xs = x.shape()
ys = y.shape()
if xs == ys:
return x, y
_xs = tuple(xs)
_ys = tuple(ys)
nx = len(xs)
ny = len(ys)
if nx > ny:
diff = nx - ny
ys = ([1] * diff) + ys
y = y.reshape(*ys)
ny = nx
elif ny > nx:
diff = ny - nx
xs = ([1] * diff) + xs
x = x.reshape(*xs)
nx = ny
xt = []
yt = []
rep_x = False
rep_y = False
for xd, yd in zip(xs, ys):
if xd == yd:
xt.append(1)
yt.append(1)
elif xd == 1:
xt.append(yd)
yt.append(1)
rep_x = True
elif yd == 1:
xt.append(1)
yt.append(xd)
rep_y = True
else:
raise Exception('Unable to broadcast shapes ' + str(_xs) + ''
' and ' + str(_ys))
if rep_x:
x = Nd4j.tile(x, *xt)
if rep_y:
try:
y = Nd4j.tile(y, *yt)
except:
y = Nd4j.tile(y, *yt)
return x, y
class ndarray(object):
def __init__(self, data, dtype=None):
# we ignore dtype for now
typ = type(data)
if 'nd4j' in typ.__name__:
# Note that we don't make a copy here
self.array = data
elif typ is ndarray:
self.array = data.array.dup()
else:
if typ is not np.ndarray:
data = np.array(data)
self.array = _from_numpy(data)
def numpy(self):
try:
return self.np_array
except AttributeError:
self.np_array = _to_numpy(self.array)
return self.np_array
@property
def size(self):
return self.array.length()
@property
def shape(self):
return tuple(self.array.shape())
@shape.setter
def shape(self, value):
arr = self.reshape(value)
self.array = arr.array
@property
def ndim(self):
return len(self.array.shape())
def __getitem__(self, key):
return ndarray(self.numpy()[key])
if type(key) is int:
return ndarray(self.array.get(NDArrayIndex.point(key)))
if type(key) is slice:
start = key.start
stop = key.stop
step = key.step
if start is None:
start = 0
if stop is None:
shape = self.array.shape()
if shape[0] == 1:
stop = shape[1]
else:
stop = shape[0]
if stop - start <= 0:
return None
if step is None or step == 1:
return ndarray(self.array.get(NDArrayIndex.interval(start, stop)))
else:
return ndarray(self.array.get(NDArrayIndex.interval(start, step, stop)))
if type(key) is list:
raise NotImplementedError(
'Sorry, this type of indexing is not supported yet.')
if type(key) is tuple:
key = list(key)
shape = self.array.shape()
ndim = len(shape)
nk = len(key)
key += [slice(None)] * (ndim - nk)
args = []
for i, dim in enumerate(key):
if type(dim) is int:
args.append(NDArrayIndex.point(dim))
elif type(dim) is slice:
if dim == slice(None):
args.append(NDArrayIndex.all())
else:
start = dim.start
stop = dim.stop
step = dim.step
if start is None:
start = 0
if stop is None:
stop = shape[i]
if stop - start <= 0:
return None
if step is None or step == 1:
args.append(NDArrayIndex.interval(start, stop))
else:
args.append(NDArrayIndex.interval(
start, step, stop))
elif type(dim) in (list, tuple):
raise NotImplementedError(
'Sorry, this type of indexing is not supported yet.')
return ndarray(self.array.get(*args))
def __setitem__(self, key, other):
self.numpy()[key] = _nparray(other)
return
other = _indarray(other)
view = self[key]
if view is None:
return
view = view.array
other = broadcast_like(other, view)
view.assign(other)
def __add__(self, other):
return ndarray(self.numpy() + _nparray(other))
other = _indarray(other)
x, y = broadcast(self.array, other)
return ndarray(x.add(y))
def __sub__(self, other):
return ndarray(self.numpy() - _nparray(other))
other = _indarray(other)
x, y = broadcast(self.array, other)
return ndarray(x.sub(y))
def __mul__(self, other):
return ndarray(self.numpy() * _nparray(other))
other = _indarray(other)
x, y = broadcast(self.array, other)
return ndarray(x.mul(y))
def __div__(self, other):
return ndarray(self.numpy() / _nparray(other))
other = _indarray(other)
x, y = broadcast(self.array, other)
return ndarray(x.div(y))
def __pow__(self, other):
return ndarray(self.numpy() ** _nparray(other))
other = _indarray(other)
x, y = broadcast(self.array, other)
return ndarray(Transforms.pow(x, y))
def __iadd__(self, other):
self.numpy().__iadd__(_nparray(other))
return self
other = _indarray(other)
if self.array.shape() == other.shape():
self.array = self.array.addi(other)
else:
x, y = broadcast(self.array, other)
self.array = x.add(y)
return self
def __isub__(self, other):
self.numpy().__isub__(_nparray(other))
return self
other = _indarray(other)
if self.array.shape() == other.shape():
self.array = self.array.subi(other)
else:
x, y = broadcast(self.array, other)
self.array = x.sub(y)
return self
def __imul__(self, other):
self.numpy().__imul__(_nparray(other))
return self
other = _indarray(other)
if self.array.shape() == other.shape():
self.array = self.array.muli(other)
else:
x, y = broadcast(self.array, other)
self.array = x.mul(y)
return self
def __idiv__(self, other):
self.numpy().__idiv__(_nparray(other))
return self
other = _indarray(other)
if self.array.shape() == other.shape():
self.array = self.array.divi(other)
else:
x, y = broadcast(self.array, other)
self.array = x.div(y)
return self
def __ipow__(self, other):
self.numpy().__ipow__(_nparray(other))
return self
other = _indarray(other)
if self.array.shape() == other.shape():
self.array = self.array.divi(other)
else:
x, y = broadcast(self.array, other)
self.array = Transforms.pow(x, y)
return self
def __getattr__(self, attr):
import ops
f = getattr(ops, attr)
setattr(ndarray, attr, f)
return getattr(self, attr)
def __int__(self):
if self.array.length() == 1:
return self.array.getInt(0)
raise Exception('Applicable only for scalars')
def __float__(self):
if self.array.length() == 1:
return self.array.getDouble(0)
raise Exception('Applicable only for scalars')
@property
def T(self):
return self.transpose()
def array(*args, **kwargs):
return ndarray(*args, **kwargs)
| 1.984375 | 2 |
Python/usec_mode.py | hanayik/StimSync | 6 | 6073 | import serial
ser = serial.Serial('/dev/tty.usbmodem7071', 115200, timeout=10)
ser.write("\xb1\xa3\xb5\xb5") #set usec mode 177,163,181,181
ser.flush()
ser.flushInput()
obs = ser.read(8)
if len(obs) != 8:
print('Error: no buttons presses detected')
print 'Observed data (as hex): '+ obs.encode('hex')
obsBin = [ord(c) for c in obs]
usec = (obsBin[3] << 24)+ (obsBin[4] << 16)+ (obsBin[5] << 8)+obsBin[6]
keys = (obsBin[1] << 8)+obsBin[2]
print 'keys pressed %d at %d usec' % (keys, usec)
ser.write("\xb1\xa3\xa9\xa9") #turn off oscilloscope: set keyboard mode 177,163,169,169
ser.close() | 2.546875 | 3 |
torchaudio/datasets/libritts.py | hahaxun/audio | 1 | 6074 | import os
from typing import Tuple
import torchaudio
from torch import Tensor
from torch.utils.data import Dataset
from torchaudio.datasets.utils import (
download_url,
extract_archive,
walk_files,
)
URL = "train-clean-100"
FOLDER_IN_ARCHIVE = "LibriTTS"
_CHECKSUMS = {
"http://www.openslr.org/60/dev-clean.tar.gz": "0c3076c1e5245bb3f0af7d82087ee207",
"http://www.openslr.org/60/dev-other.tar.gz": "815555d8d75995782ac3ccd7f047213d",
"http://www.openslr.org/60/test-clean.tar.gz": "7bed3bdb047c4c197f1ad3bc412db59f",
"http://www.openslr.org/60/test-other.tar.gz": "ae3258249472a13b5abef2a816f733e4",
"http://www.openslr.org/60/train-clean-100.tar.gz": "4a8c202b78fe1bc0c47916a98f3a2ea8",
"http://www.openslr.org/60/train-clean-360.tar.gz": "a84ef10ddade5fd25df69596a2767b2d",
"http://www.openslr.org/60/train-other-500.tar.gz": "7b181dd5ace343a5f38427999684aa6f",
}
def load_libritts_item(
fileid: str,
path: str,
ext_audio: str,
ext_original_txt: str,
ext_normalized_txt: str,
) -> Tuple[Tensor, int, str, str, int, int, str]:
speaker_id, chapter_id, segment_id, utterance_id = fileid.split("_")
utterance_id = fileid
normalized_text = utterance_id + ext_normalized_txt
normalized_text = os.path.join(path, speaker_id, chapter_id, normalized_text)
original_text = utterance_id + ext_original_txt
original_text = os.path.join(path, speaker_id, chapter_id, original_text)
file_audio = utterance_id + ext_audio
file_audio = os.path.join(path, speaker_id, chapter_id, file_audio)
# Load audio
waveform, sample_rate = torchaudio.load(file_audio)
# Load original text
with open(original_text) as ft:
original_text = ft.readline()
# Load normalized text
with open(normalized_text, "r") as ft:
normalized_text = ft.readline()
return (
waveform,
sample_rate,
original_text,
normalized_text,
int(speaker_id),
int(chapter_id),
utterance_id,
)
class LIBRITTS(Dataset):
"""Create a Dataset for LibriTTS.
Args:
root (str): Path to the directory where the dataset is found or downloaded.
url (str, optional): The URL to download the dataset from,
or the type of the dataset to dowload.
Allowed type values are ``"dev-clean"``, ``"dev-other"``, ``"test-clean"``,
``"test-other"``, ``"train-clean-100"``, ``"train-clean-360"`` and
``"train-other-500"``. (default: ``"train-clean-100"``)
folder_in_archive (str, optional):
The top-level directory of the dataset. (default: ``"LibriTTS"``)
download (bool, optional):
Whether to download the dataset if it is not found at root path. (default: ``False``).
"""
_ext_original_txt = ".original.txt"
_ext_normalized_txt = ".normalized.txt"
_ext_audio = ".wav"
def __init__(
self,
root: str,
url: str = URL,
folder_in_archive: str = FOLDER_IN_ARCHIVE,
download: bool = False,
) -> None:
if url in [
"dev-clean",
"dev-other",
"test-clean",
"test-other",
"train-clean-100",
"train-clean-360",
"train-other-500",
]:
ext_archive = ".tar.gz"
base_url = "http://www.openslr.org/resources/60/"
url = os.path.join(base_url, url + ext_archive)
basename = os.path.basename(url)
archive = os.path.join(root, basename)
basename = basename.split(".")[0]
folder_in_archive = os.path.join(folder_in_archive, basename)
self._path = os.path.join(root, folder_in_archive)
if download:
if not os.path.isdir(self._path):
if not os.path.isfile(archive):
checksum = _CHECKSUMS.get(url, None)
download_url(url, root, hash_value=checksum)
extract_archive(archive)
walker = walk_files(
self._path, suffix=self._ext_audio, prefix=False, remove_suffix=True
)
self._walker = list(walker)
def __getitem__(self, n: int) -> Tuple[Tensor, int, str, str, int, int, str]:
"""Load the n-th sample from the dataset.
Args:
n (int): The index of the sample to be loaded
Returns:
tuple: ``(waveform, sample_rate, original_text, normalized_text, speaker_id,
chapter_id, utterance_id)``
"""
fileid = self._walker[n]
return load_libritts_item(
fileid,
self._path,
self._ext_audio,
self._ext_original_txt,
self._ext_normalized_txt,
)
def __len__(self) -> int:
return len(self._walker)
| 2.34375 | 2 |
Others/Source/19/19.2/barh_test.py | silence0201/Learn-Python | 1 | 6075 | # coding: utf-8
#########################################################################
# 网站: <a href="http://www.crazyit.org">疯狂Java联盟</a> #
# author yeeku.H.lee <EMAIL> #
# #
# version 1.0 #
# #
# Copyright (C), 2001-2018, yeeku.H.Lee #
# #
# This program is protected by copyright laws. #
# #
# Program Name: #
# #
# <br>Date: #
#########################################################################
import matplotlib.pyplot as plt
import numpy as np
# 构建数据
x_data = ['2011', '2012', '2013', '2014', '2015', '2016', '2017']
y_data = [58000, 60200, 63000, 71000, 84000, 90500, 107000]
y_data2 = [52000, 54200, 51500,58300, 56800, 59500, 62700]
bar_width=0.3
# Y轴数据使用range(len(x_data), 就是0、1、2...
plt.barh(y=range(len(x_data)), width=y_data, label='疯狂Java讲义',
color='steelblue', alpha=0.8, height=bar_width)
# Y轴数据使用np.arange(len(x_data))+bar_width,
# 就是bar_width、1+bar_width、2+bar_width...这样就和第一个柱状图并列了
plt.barh(y=np.arange(len(x_data))+bar_width, width=y_data2,
label='疯狂Android讲义', color='indianred', alpha=0.8, height=bar_width)
# 在柱状图上显示具体数值, ha参数控制水平对齐方式, va控制垂直对齐方式
for y, x in enumerate(y_data):
plt.text(x+5000, y-bar_width/2, '%s' % x, ha='center', va='bottom')
for y, x in enumerate(y_data2):
plt.text(x+5000, y+bar_width/2, '%s' % x, ha='center', va='bottom')
# 为Y轴设置刻度值
plt.yticks(np.arange(len(x_data))+bar_width/2, x_data)
# 设置标题
plt.title("Java与Android图书对比")
# 为两条坐标轴设置名称
plt.xlabel("销量")
plt.ylabel("年份")
# 显示图例
plt.legend()
plt.show()
| 2.578125 | 3 |
03_docker_compose/03_c_simple_case_with_mongodb_orm/app/app.py | kendny/study_docker | 2 | 6076 | <gh_stars>1-10
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Dec 18 20:13:57 2018
@author: allen
"""
import random, os, json, datetime, time
from flask import Flask, Response
from pymongo import MongoClient
from bson import json_util
app = Flask(__name__)
MONGO_URI = "mongodb://mongodb:27017" # "mongodb:<container_name>:27017"
mongdb_client= MongoClient(MONGO_URI)
random_numbers = mongdb_client.demo.random_numbers
time.sleep(5) # hack for the mongoDb database to get running
######################
##
##########################
from pymodm.connection import connect
from pymongo.write_concern import WriteConcern
from pymodm import MongoModel, fields
# Connect to MongoDB and call the connection "my-app".
connect("mongodb://mongodb:27017/myDatabase", alias="my-app")
class User(MongoModel):
email = fields.EmailField(primary_key=True)
first_name = fields.CharField()
last_name = fields.CharField()
class Meta:
write_concern = WriteConcern(j=True)
connection_alias = 'my-app'
@app.route("/")
def hello():
html = "<h3> Hello world...</h3>"
#User('<EMAIL>', name, 'Ross').save()
return html
@app.route("/add_user/<name>")
def add_user(name):
#User('<EMAIL>', name, 'Ross').save()
html = "<h3> Hello </h3>"
User('<EMAIL>', name, 'Ross').save()
return "name {} save to database".format(name)
@app.route("/random/<int:lower>/<int:upper>")
def random_generator(lower, upper):
number = str(random.randint(lower, upper))
random_numbers.update(
{"_id" : "lasts"},
{"$push" : {
"items" : {
"$each": [{"value" : number, "date": datetime.datetime.utcnow()}],
"$sort" : {"date" : -1},
"$slice" : 5
}
}},
upsert=True
)
return Response(number, status=200, mimetype='application/json')
@app.route("/random-list")
def last_number_list():
last_numbers = list(random_numbers.find({"_id" : "lasts"}))
extracted = [d['value'] for d in last_numbers[0]['items']]
return Response(json.dumps(extracted, default=json_util.default), status=200, mimetype='application/json')
if __name__ == "__main__":
port = int(os.environ.get('PORT', 5000))
app.config['DEBUG'] = True
app.run(host='0.0.0.0', port=port)
| 3.203125 | 3 |
goethe/eval/analogy_space.py | HPI-DeepLearning/wort2vek | 4 | 6077 | #! /usr/bin/Python
from gensim.models.keyedvectors import KeyedVectors
from scipy import spatial
from numpy import linalg
import argparse
import sys
vector_file = sys.argv[1]
if len(sys.argv) != 6:
print('arguments wrong!')
print(len(sys.argv))
exit()
else:
words = [sys.argv[2], sys.argv[3], sys.argv[4], sys.argv[5]]
print(words)
wvs = KeyedVectors.load_word2vec_format(vector_file, binary=True)
print('WVs loaded.')
for w in words:
if w not in wvs.vocab:
print('out of vocab!')
exit()
#print(wvs.most_similar(positive=[words[1], words[2]], negative=[words[0]], topn=3))
w1 = wvs[words[0]]
w2 = wvs[words[1]]
w3 = wvs[words[2]]
w4 = wvs[words[3]]
m1 = w1 / linalg.norm(w1)
m2 = w2 / linalg.norm(w2)
m3 = w3 / linalg.norm(w3)
m4 = w4 / linalg.norm(w4)
diff1 = w1 - w2
diff2 = w3 - w4
miff1 = m1 - m2
miff2 = m3 - m4
print('-------Word Space---------')
print('to word-4: ', 1-spatial.distance.cosine(m2+m3-m1, m4))
print('to word-3: ', 1-spatial.distance.cosine(m1+m4-m2, m3))
print('to word-2: ', 1-spatial.distance.cosine(m4+m1-m3, m2))
print('to word-1: ', 1-spatial.distance.cosine(m2+m3-m4, m1))
print('------Analogy Space-------')
print(' cosine: ', 1-spatial.distance.cosine(diff1, diff2))
print(' Euclidean: ', 1-linalg.norm(diff1-diff2)/(linalg.norm(diff1)+linalg.norm(diff2)))
print(' M-cosine: ', 1-spatial.distance.cosine(miff1, miff2))
print('M-Euclidean: ', 1-linalg.norm(miff1-miff2)/(linalg.norm(miff1)+linalg.norm(miff2)))
| 2.578125 | 3 |
localgraphclustering/algorithms/eig2_nL.py | vishalbelsare/LocalGraphClustering | 106 | 6078 | import numpy as np
import scipy as sp
import scipy.sparse.linalg as splinalg
def eig2_nL(g, tol_eigs = 1.0e-6, normalize:bool = True, dim:int=1):
"""
DESCRIPTION
-----------
Computes the eigenvector that corresponds to the second smallest eigenvalue
of the normalized Laplacian matrix then it uses sweep cut to round the solution.
PARAMETERS (mandatory)
----------------------
g: graph object
PARAMETERS (optional)
---------------------
dim: positive, int
default == 1
The number of eigenvectors or dimensions to compute.
tol_eigs: positive float, double
default == 1.0e-6
Tolerance for computation of the eigenvector that corresponds to
the second smallest eigenvalue of the normalized Laplacian matrix.
normalize: bool,
default == True
True if we should return the eigenvectors of the generalized
eigenvalue problem associated with the normalized Laplacian.
This should be on unless you know what you are doing.
RETURNS
------
p: Eigenvector or Eigenvector matrixthat
corresponds to the second smallest eigenvalue of the
normalized Laplacian matrix and larger eigenvectors if dim >= 0.
"""
n = g.adjacency_matrix.shape[0]
D_sqrt_neg = sp.sparse.spdiags(g.dn_sqrt.transpose(), 0, n, n)
L = sp.sparse.identity(n) - D_sqrt_neg.dot((g.adjacency_matrix.dot(D_sqrt_neg)))
emb_eig_val, p = splinalg.eigsh(L, which='SM', k=1+dim, tol = tol_eigs)
F = np.real(p[:,1:])
if normalize:
F *= g.dn_sqrt[:,np.newaxis]
return F, emb_eig_val
"""
Random walks and local cuts in graphs, Chung, LAA 2007
We just form the sub-matrix of the Laplacian and use the eigenvector there.
"""
def eig2nL_subgraph(g, ref_nodes, tol_eigs = 1.0e-6, normalize: bool = True):
A_sub = g.adjacency_matrix.tocsr()[ref_nodes, :].tocsc()[:, ref_nodes]
nref = len(ref_nodes)
D_sqrt_neg = sp.sparse.spdiags(g.dn_sqrt[ref_nodes].transpose(), 0, nref, nref)
L_sub = sp.sparse.identity(nref) - D_sqrt_neg.dot((A_sub.dot(D_sqrt_neg)))
emb_eig_val, emb_eig = splinalg.eigsh(L_sub, which='SM', k=1, tol=tol_eigs)
emb_eig *= -1 if max(emb_eig) < 0 else 1
f = emb_eig[:,0]
if normalize:
f *= g.dn_sqrt[ref_nodes]
return ((ref_nodes,f), emb_eig_val)
| 2.6875 | 3 |
build/common/hex2carray.py | isabella232/nanos-nonsecure-firmware | 16 | 6079 | <gh_stars>10-100
"""
*******************************************************************************
* Ledger Blue
* (c) 2016 Ledger
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
********************************************************************************
"""
from ledgerblue.hexParser import IntelHexParser
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--hex", help="Hex file to be converted as a C array")
args = parser.parse_args()
if args.hex == None:
raise Exception("Missing hex filename to sign")
parser = IntelHexParser(args.hex)
def hexU8(value):
return hex(0x100|(value & 0xFF))[3:]
for a in parser.getAreas():
if (len(a.data) > 0x10000):
raise BaseException("data must be splitted in chunks of 64k")
print "0x" + hexU8(a.start >> 24) + ", 0x" + hexU8(a.start >> 16) + ", 0x" + hexU8(a.start >> 8) + ", 0x" + hexU8(a.start) + ", "
print "0x" + hexU8(len(a.data) >> 24) + ", 0x" + hexU8(len(a.data) >> 16) + ", 0x" + hexU8(len(a.data) >> 8) + ", 0x" + hexU8(len(a.data)) + ", "
# low @ to high @
offset = 0
while offset < len(a.data):
string = ""
for i in range(8):
if offset+i < len(a.data):
string += " 0x" + hexU8(a.data[offset+i]) + ","
print string
offset+=8
| 2.484375 | 2 |
setup.py | xames3/vdoxa | 1 | 6080 | # Copyright 2020 XAMES3. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ======================================================================
"""
vdoXA is an open-source python package for trimming the videos.
It is built as a subsystem for < XXXXX Not to be named XXXXX > project.
Originally inspired by my colleague's work, I thought of improving the
concept and build a tool to simplify the process. I hope it comes with
strong support for continuous updates, reliable functions and overall
ease of use.
Read complete documentation at: <https://github.com/xames3/vdoxa>.
"""
from setuptools import find_packages, setup
from vdoxa.vars import dev
doclines = __doc__.split('\n')
def use_readme() -> str:
"""Use `README.md` for parsing long description."""
with open('README.md') as file:
return file.read()
with open('requirements.txt', 'r') as requirements:
required_packages = [package.rstrip() for package in requirements]
setup(
name=dev.PROJECT_NAME,
version=dev.PROJECT_VERSION,
url=dev.PROJECT_LINK,
download_url=dev.PROJECT_LINK,
author=dev.AUTHOR,
author_email=dev.AUTHOR_EMAIL,
maintainer=dev.AUTHOR,
maintainer_email=dev.AUTHOR_EMAIL,
classifiers=[
'Intended Audience :: Developers',
'Intended Audience :: End Users/Desktop',
'Intended Audience :: Information Technology',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: Apache Software License',
'Natural Language :: English',
],
license=dev.PROJECT_LICENSE,
description=f'{doclines[1]}',
long_description=use_readme(),
long_description_content_type='text/markdown',
keywords='opencv2 cv2 moviepy',
zip_safe=False,
install_requires=required_packages,
python_requires='~=3.6',
include_package_data=True,
packages=find_packages(),
entry_points={
'console_scripts': [
'vdoxa = vdoxa.parser:main',
],
}
)
| 1.695313 | 2 |
application/modules/login.py | BaggerFast/Simple_votings | 0 | 6081 | <gh_stars>0
from django.contrib import messages
from django.contrib.auth import login, authenticate
from django.shortcuts import render, redirect
from django.urls import reverse
from django.views import View
from application.forms import AuthenticateForm
from application.views import get_navbar, Page
class LoginView(View):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.context = {}
def get(self, request):
self.context['navbar'] = get_navbar(request)
self.context['form'] = AuthenticateForm()
return render(request, Page.login, self.context)
def post(self, request):
self.context['navbar'] = get_navbar(request)
data = request.POST
form = AuthenticateForm(data)
if form.is_valid():
user = authenticate(
username=data['username'],
password=data['password'],
)
if user:
login(request, user)
messages.success(request, 'You have successfully logged in!')
return redirect(reverse('main'))
messages.error(request, 'Invalid username and password pair.', extra_tags='danger')
else:
messages.error(request, 'Invalid username and password pair.', extra_tags='danger')
self.context['form'] = AuthenticateForm(data)
return render(request, Page.login, self.context)
| 2.28125 | 2 |
little_questions/utils/log.py | HelloChatterbox/little_questions | 0 | 6082 | # Copyright 2017 Mycroft AI Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import inspect
import logging
import sys
class LOG:
"""
Custom logger class that acts like logging.Logger
The logger name is automatically generated by the module of the caller
Usage:
>>> LOG.debug('My message: %s', debug_str)
13:12:43.673 - :<module>:1 - DEBUG - My message: hi
>>> LOG('custom_name').debug('Another message')
13:13:10.462 - custom_name - DEBUG - Another message
"""
base_path = "stdout"
fmt = '%(asctime)s.%(msecs)03d - ' \
'%(name)s - %(levelname)s - %(message)s'
datefmt = '%Y-%m-%d %H:%M:%S'
formatter = logging.Formatter(fmt, datefmt)
name = 'little_questions'
level = "DEBUG"
_loggers = {}
@classmethod
def set_level(cls, level="INFO"):
cls.level = level
for n in cls._loggers:
cls._loggers[n].setLevel(cls.level)
@classmethod
def create_logger(cls, name):
if name in cls._loggers:
return cls._loggers[name]
logger = logging.getLogger(name)
logger.propagate = False
stdout_handler = logging.StreamHandler(sys.stdout)
stdout_handler.setFormatter(cls.formatter)
logger.addHandler(stdout_handler)
logger.setLevel(cls.level)
cls._loggers[name] = logger
return logger
@classmethod
def _log(cls):
name = ""
if cls.name is not None:
name = cls.name + " - "
# Stack:
# [0] - _log()
# [1] - debug(), info(), warning(), or error()
# [2] - caller
stack = inspect.stack()
# Record:
# [0] - frame object
# [1] - filename
# [2] - line number
# [3] - function
# ...
record = stack[2]
name += record[3] + ':' + str(record[2])
logger = cls.create_logger(name)
return logger
@classmethod
def info(cls, *args, **kwargs):
cls._log().info(*args, **kwargs)
@classmethod
def debug(cls, *args, **kwargs):
cls._log().debug(*args, **kwargs)
@classmethod
def warning(cls, *args, **kwargs):
cls._log().warning(*args, **kwargs)
@classmethod
def error(cls, *args, **kwargs):
cls._log().error(*args, **kwargs)
@classmethod
def exception(cls, *args, **kwargs):
cls._log().exception(*args, **kwargs)
| 2.453125 | 2 |
alipay/aop/api/response/AlipayOpenMiniVersionAuditApplyResponse.py | antopen/alipay-sdk-python-all | 0 | 6083 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.response.AlipayResponse import AlipayResponse
class AlipayOpenMiniVersionAuditApplyResponse(AlipayResponse):
def __init__(self):
super(AlipayOpenMiniVersionAuditApplyResponse, self).__init__()
self._speed_up = None
self._speed_up_memo = None
@property
def speed_up(self):
return self._speed_up
@speed_up.setter
def speed_up(self, value):
self._speed_up = value
@property
def speed_up_memo(self):
return self._speed_up_memo
@speed_up_memo.setter
def speed_up_memo(self, value):
self._speed_up_memo = value
def parse_response_content(self, response_content):
response = super(AlipayOpenMiniVersionAuditApplyResponse, self).parse_response_content(response_content)
if 'speed_up' in response:
self.speed_up = response['speed_up']
if 'speed_up_memo' in response:
self.speed_up_memo = response['speed_up_memo']
| 2.03125 | 2 |
cvstudio/view/widgets/loading_dialog/loading_dialog.py | haruiz/PytorchCvStudio | 32 | 6084 | <filename>cvstudio/view/widgets/loading_dialog/loading_dialog.py
import os
from PyQt5 import QtCore
from PyQt5.QtCore import QRect, QPoint
from PyQt5.QtGui import QMovie, QCloseEvent, QShowEvent
from PyQt5.QtWidgets import QDialog, QLabel, QVBoxLayout, QApplication, QWidget
class QLoadingDialog(QDialog):
def __init__(self, parent=None):
super(QLoadingDialog, self).__init__()
self.setFixedSize(100, 100)
# self.setWindowOpacity(0.8)
self.setWindowFlags(QtCore.Qt.FramelessWindowHint)
self.setAttribute(QtCore.Qt.WA_TranslucentBackground)
app = QApplication.instance()
curr_theme = "light"
if app:
curr_theme = app.property("theme")
gif_file = os.path.abspath("./assets/icons/{}/loading.gif".format(curr_theme))
self.movie = QMovie(gif_file)
self.label = QLabel()
self.label.setMovie(self.movie)
self.layout = QVBoxLayout(self)
self.layout.addWidget(self.label)
def center(self, host: QWidget = None):
if host:
hostGeometry: QRect = host.geometry()
# dialogGeometry : QRect = self.geometry()
centerPoint: QPoint = hostGeometry.center()
centerPoint = host.mapToGlobal(centerPoint)
offset = 30
targetPoint = QPoint(centerPoint.x() - offset, centerPoint.y() - offset)
self.move(targetPoint)
else:
screen = QApplication.desktop().screenNumber(QApplication.desktop().cursor().pos())
centerPoint = QApplication.desktop().screenGeometry(screen).center()
self.move(centerPoint)
return self
def showEvent(self, e: QShowEvent):
if self.movie.state() == QMovie.NotRunning:
self.movie.start()
def closeEvent(self, e: QCloseEvent):
if self.movie.state() == QMovie.Running:
self.movie.stop()
def exec_(self):
self.center()
return QDialog.exec_(self)
| 2.34375 | 2 |
pysnmp-with-texts/MWORKS-MIB.py | agustinhenze/mibs.snmplabs.com | 8 | 6085 | #
# PySNMP MIB module MWORKS-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/MWORKS-MIB
# Produced by pysmi-0.3.4 at Wed May 1 14:16:04 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
ObjectIdentifier, OctetString, Integer = mibBuilder.importSymbols("ASN1", "ObjectIdentifier", "OctetString", "Integer")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ValueRangeConstraint, SingleValueConstraint, ConstraintsUnion, ValueSizeConstraint, ConstraintsIntersection = mibBuilder.importSymbols("ASN1-REFINEMENT", "ValueRangeConstraint", "SingleValueConstraint", "ConstraintsUnion", "ValueSizeConstraint", "ConstraintsIntersection")
ModuleCompliance, NotificationGroup = mibBuilder.importSymbols("SNMPv2-CONF", "ModuleCompliance", "NotificationGroup")
Gauge32, Unsigned32, ObjectIdentity, IpAddress, Bits, MibIdentifier, Integer32, enterprises, ModuleIdentity, TimeTicks, Counter32, NotificationType, iso, Counter64, MibScalar, MibTable, MibTableRow, MibTableColumn = mibBuilder.importSymbols("SNMPv2-SMI", "Gauge32", "Unsigned32", "ObjectIdentity", "IpAddress", "Bits", "MibIdentifier", "Integer32", "enterprises", "ModuleIdentity", "TimeTicks", "Counter32", "NotificationType", "iso", "Counter64", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn")
TextualConvention, DisplayString = mibBuilder.importSymbols("SNMPv2-TC", "TextualConvention", "DisplayString")
tecElite = MibIdentifier((1, 3, 6, 1, 4, 1, 217))
meterWorks = MibIdentifier((1, 3, 6, 1, 4, 1, 217, 16))
mw501 = MibIdentifier((1, 3, 6, 1, 4, 1, 217, 16, 1))
mwMem = MibIdentifier((1, 3, 6, 1, 4, 1, 217, 16, 1, 1))
mwHeap = MibIdentifier((1, 3, 6, 1, 4, 1, 217, 16, 1, 2))
mwMemCeiling = MibScalar((1, 3, 6, 1, 4, 1, 217, 16, 1, 1, 1), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: mwMemCeiling.setStatus('mandatory')
if mibBuilder.loadTexts: mwMemCeiling.setDescription('bytes of memory the agent memory manager will allow the agent to use.')
mwMemUsed = MibScalar((1, 3, 6, 1, 4, 1, 217, 16, 1, 1, 2), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: mwMemUsed.setStatus('mandatory')
if mibBuilder.loadTexts: mwMemUsed.setDescription("bytes of memory that meterworks has malloc'ed. some of this may be in free pools.")
mwHeapTotal = MibScalar((1, 3, 6, 1, 4, 1, 217, 16, 1, 2, 1), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: mwHeapTotal.setStatus('mandatory')
if mibBuilder.loadTexts: mwHeapTotal.setDescription('bytes of memory given to the heap manager.')
mwHeapUsed = MibScalar((1, 3, 6, 1, 4, 1, 217, 16, 1, 2, 2), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: mwHeapUsed.setStatus('mandatory')
if mibBuilder.loadTexts: mwHeapUsed.setDescription('bytes of available memory in the heap.')
mibBuilder.exportSymbols("MWORKS-MIB", mwHeap=mwHeap, mwHeapUsed=mwHeapUsed, mwMemCeiling=mwMemCeiling, meterWorks=meterWorks, tecElite=tecElite, mwMem=mwMem, mw501=mw501, mwHeapTotal=mwHeapTotal, mwMemUsed=mwMemUsed)
| 1.546875 | 2 |
Assets/Editor/PostprocessBuildPlayer_MpireNxusMeasurementPostBuildiOS.py | mpire-nxus/nxus_unity_sdk | 1 | 6086 | #!/usr/bin/env python
import sys
import re
from subprocess import Popen, PIPE
import argparse
from pbxproj import XcodeProject, TreeType
from pbxproj import FileOptions
def main():
parser = argparse.ArgumentParser(description="MpireNxusMeasurement post build iOS script")
parser.add_argument('ios_project_path', help="path to the folder of the iOS project generated by unity3d")
with open('MpireNxusMeasurementPostBuildiOSLog.txt', 'w') as fileLog:
# Log function with file injected.
LogFunc = LogInput(fileLog)
# Path of the Xcode SDK on the system.
xcode_sdk_path = get_xcode_sdk_path(LogFunc)
# Path for unity iOS Xcode project and framework on the system.
unity_xcode_project_path, framework_path = get_paths(LogFunc, parser, xcode_sdk_path)
# Edit the Xcode project using mod_pbxproj:
# - Add the adSupport framework library.
# - Add the iAd framework library.
# - Change the compilation flags of the adjust project files to support non-ARC.
edit_unity_xcode_project(LogFunc, unity_xcode_project_path, framework_path)
# Removed.
# Change the Xcode project directly:
# - Allow objective-c exceptions
# rewrite_unity_xcode_project(LogFunc, unity_xcode_project_path)
sys.exit(0)
def LogInput(writeObject):
def Log(message, *args):
messageNLine = (message if message else "None") + "\n"
writeObject.write(messageNLine.format(*args))
return Log
def get_paths(Log, parser, xcode_sdk_path):
args, ignored_args = parser.parse_known_args()
ios_project_path = args.ios_project_path
unity_xcode_project_path = ios_project_path + "/Unity-iPhone.xcodeproj/project.pbxproj"
Log("Unity3d Xcode project path: {0}", unity_xcode_project_path)
framework_path = xcode_sdk_path + "/System/Library/Frameworks/"
Log("framework path: {0}", framework_path)
return unity_xcode_project_path, framework_path
def edit_unity_xcode_project(Log, unity_xcode_project_path, framework_path):
# load unity iOS pbxproj project file
unity_XcodeProject = XcodeProject.load(unity_xcode_project_path)
frameworks = unity_XcodeProject.get_or_create_group('Frameworks')
file_options_security_framework = FileOptions(embed_framework=False, weak=True)
unity_XcodeProject.add_file(framework_path + "Security.framework", parent=frameworks, tree='SDKROOT', force=False, file_options=file_options_security_framework)
Log("added Security framework")
# Add -ObjC to "Other Linker Flags" project settings.
unity_XcodeProject.add_other_ldflags('-ObjC')
# Save changes.
unity_XcodeProject.save()
def rewrite_unity_xcode_project(Log, unity_xcode_project_path):
unity_xcode_lines = []
# Allow objective-c exceptions
re_objc_excep = re.compile(r"\s*GCC_ENABLE_OBJC_EXCEPTIONS *= *NO.*")
with open(unity_xcode_project_path) as upf:
for line in upf:
if re_objc_excep.match(line):
#Log("matched line: {0}", re_objc_excep.match(line).group())
line = line.replace("NO","YES")
Log("Objective-c exceptions enabled")
unity_xcode_lines.append(line)
with open(unity_xcode_project_path, "w+") as upf:
upf.writelines(unity_xcode_lines)
def get_xcode_sdk_path(Log):
# Output all info from Xcode.
proc = Popen(["xcodebuild", "-version", "-sdk"], stdout=PIPE, stderr=PIPE)
out, err = proc.communicate()
if proc.returncode not in [0, 66]:
Log("Could not retrieve Xcode sdk path. code: {0}, err: {1}", proc.returncode, err)
return None
match = re.search("iPhoneOS.*?Path: (?P<sdk_path>.*?)\n", out, re.DOTALL)
xcode_sdk_path = match.group('sdk_path') if match else None
Log("Xcode sdk path: {0}", xcode_sdk_path)
return xcode_sdk_path
if __name__ == "__main__":
main()
| 2.1875 | 2 |
vars_in_python.py | klyusba/python-quiz | 0 | 6087 | <filename>vars_in_python.py<gh_stars>0
# == 1 ==
bar = [1, 2]
def foo(bar):
bar = sum(bar)
return bar
print(foo(bar))
# == 2 ==
bar = [1, 2]
def foo(bar):
bar[0] = 1
return sum(bar)
print(foo(bar))
# == 3 ==
bar = [1, 2]
def foo():
bar = sum(bar)
return bar
print(foo())
# == 4 ==
bar = [1, 2]
def foo(bar):
bar = [1, 2, 3, ]
return sum(bar)
print(foo(bar), bar)
# == 5 ==
bar = [1, 2]
def foo(bar):
bar[:] = [1, 2, 3, ]
return sum(bar)
print(foo(bar), bar)
# == 6 ==
try:
bar = 1 / 0
print(bar)
except ZeroDivisionError as bar:
print(bar)
print(bar)
# == 7 ==
bar = [1, 2]
print(list(bar for bar in bar))
print(bar)
# == 8 ==
bar = [1, 2]
f = lambda: sum(bar)
print(f())
bar = [1, 2, 3, ]
print(f())
# == 9 ==
bar = [1, 2]
def foo(bar):
return lambda: sum(bar)
f = foo(bar)
print(f())
bar = [1, 2, 3, ]
print(f())
# == 10 ==
bar = [1, 2]
foo = []
for i in bar:
foo.append(lambda: i)
print([f() for f in foo])
# == 11 ==
bar = [1, 2]
foo = [
lambda: i
for i in bar
]
print(list(f() for f in foo))
# == 12 ==
bar = [1, 2]
foo = [
lambda: i
for i in bar
]
print(list(f() for f in foo))
bar = [1, 2, 3, ]
print(list(f() for f in foo))
bar[:] = [1, 2, 3, ]
print(list(f() for f in foo))
# == 13 ==
bar = [1, 2]
foo = [
lambda i=i: i
for i in bar
]
print(list(f() for f in foo))
| 3.625 | 4 |
hack/dev/gh-replay-events.py | sm43/pipelines-as-code | 0 | 6088 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Author: <NAME> <<EMAIL>>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# See README.md for documentation
import typing
import argparse
import base64
import hashlib
import hmac
import json
import os
import subprocess
import sys
import time
import requests
import ghapp_token
NAMESPACE = "pipelines-as-code"
SECRET_NAME = "pipelines-as-code-secret"
ELNAME = "pipelines-as-code"
EXPIRE_MINUTES_AS_SECONDS = (
int(os.environ.get("GITHUBAPP_TOKEN_EXPIRATION_MINUTES", 10)) * 60
)
def get_controller_route():
elroute = subprocess.run(
f"kubectl get route -n {NAMESPACE} -l pipelines-as-code/route=controller -o json",
shell=True,
check=True,
capture_output=True,
)
return (
"https://"
+ json.loads(elroute.stdout)["items"][0]["status"]["ingress"][0]["host"]
)
def get_controller_ingress():
elroute = subprocess.run(
f"kubectl get ingress -n {NAMESPACE} -l pipelines-as-code/route=controller -o json",
shell=True,
check=True,
capture_output=True,
)
return (
"http://" + json.loads(elroute.stdout)["items"][0]["spec"]["rules"][0]["host"]
)
def get_token_secret(
github_api_url=ghapp_token.GITHUB_API_URL, expiration_time=EXPIRE_MINUTES_AS_SECONDS
):
secret = subprocess.run(
f"kubectl get secret {SECRET_NAME} -n{NAMESPACE} -o json",
shell=True,
check=True,
capture_output=True,
)
jeez = json.loads(secret.stdout)
private_key = base64.b64decode(jeez["data"]["github-private-key"])
app_id = base64.b64decode(jeez["data"]["github-application-id"])
webhook_secret = base64.b64decode(jeez["data"]["webhook.secret"]).decode()
if not private_key or not app_id or not webhook_secret:
print(
f"private_key={private_key[1:10]} or app_id={app_id} or webhook_secret={webhook_secret} are empty"
)
sys.exit(1)
gh = ghapp_token.GitHub(
private_key,
app_id,
expiration_time,
github_api_url,
)
return gh.token, webhook_secret, app_id
def _request_app_delivery(token, iid=None, api_url=ghapp_token.GITHUB_API_URL):
url = f"{api_url}/app/hook/deliveries"
if iid:
url += f"/{iid}"
headers = {
"Accept": "application/vnd.github.v3+json",
"Authorization": f"Bearer {token}",
}
return requests.request("GET", url, headers=headers)
def _request_webhooks_installed(
token: str,
owner_repo: str,
iid: typing.Union[int, None] = None,
api_url: str = ghapp_token.GITHUB_API_URL,
):
url = f"{api_url}/repos/{owner_repo}/hooks"
if iid:
url += f"/{iid}/deliveries"
headers = {
"Accept": "application/vnd.github.v3+json",
"Authorization": f"Bearer {token}",
}
return requests.request("GET", url, headers=headers)
def _request_webhooks_reattempt(
token: str,
owner_repo: str,
iid: int,
delivery_id: int,
api_url: str = ghapp_token.GITHUB_API_URL,
):
url = f"{api_url}/repos/{owner_repo}/hooks/{iid}/deliveries/{delivery_id}/attempts"
print(url)
headers = {
"Accept": "application/vnd.github.v3+json",
"Authorization": f"Bearer {token}",
}
return requests.request("POST", url, headers=headers)
def ask_which(token: str, api_url: str, last: bool, deliveries: dict) -> int:
dico = []
i = 1
if "message" in deliveries:
print(deliveries)
sys.exit(0)
for delivery in deliveries:
print(
f"{i}) Action={delivery['action']} Event={delivery['event']} Delivered at {delivery['delivered_at']}"
)
dico.append(delivery["id"])
if i == 10:
break
i += 1
chosen = input("Choose a delivery: ")
# return _request_app_delivery(token, dico[int(chosen) - 1], api_url=api_url).json()
return int(chosen) - 1
def webhook_get_delivery(
token: str,
owner_repo: str,
last: bool = False,
api_url: str = ghapp_token.GITHUB_API_URL,
) -> str:
r = _request_webhooks_installed(token, api_url=api_url, owner_repo=owner_repo)
r.raise_for_status()
webhooks = r.json()
if len(webhooks) == 1:
webhook_id = int(webhooks[0]["id"])
elif len(webhooks) > 1:
cnt = 1
for wh in webhooks:
print(f"{cnt}) {wh['name']} - {wh['config']['url']} ")
cnt += 1
chosen = input("Choose a delivery: ")
webhook_id = int(webhooks[int(chosen) - 1]["id"])
else:
print("could not find any webhook configuration on your repo {}")
sys.exit(1)
r = _request_webhooks_installed(
token, api_url=api_url, owner_repo=owner_repo, iid=webhook_id
)
r.raise_for_status()
deliveries = r.json()
if not deliveries:
print("no deliveries has been set ")
sys.exit(1)
if last:
delivery_id = deliveries[0]["id"]
else:
chosen = ask_which(token, api_url, last, r.json())
delivery_id = deliveries[chosen]["id"]
r = _request_webhooks_reattempt(
token=token,
owner_repo=owner_repo,
iid=webhook_id,
api_url=api_url,
delivery_id=delivery_id,
)
r.raise_for_status()
print(f"Delivery has been replayed, you can replay directly it with: ")
s = f"http POST {api_url}/repos/{owner_repo}/hooks/{webhook_id}/deliveries/{delivery_id}/attempts"
s += f' Authorization:"Bearer { os.environ.get("PASS_TOKEN", "$TOKEN") }"'
s += " Accept:application/vnd.github.v3+json"
print(s)
return s
def app_get_delivery(
token: str, last: bool = False, api_url: str = ghapp_token.GITHUB_API_URL
) -> dict:
r = _request_app_delivery(token, api_url=api_url)
r.raise_for_status()
deliveries = r.json()
if not deliveries:
print("no deliveries has been set ")
sys.exit(1)
if last:
return _request_app_delivery(token, deliveries[0]["id"], api_url=api_url).json()
chosen = ask_which(token, api_url, last, deliveries)
return _request_app_delivery(
token, deliveries[chosen]["id"], api_url=api_url
).json()
def save_script(target: str, el_route: str, headers: dict, payload: str):
s = f"""#!/usr/bin/env python3
import requests
import sys
payload = \"\"\"{json.dumps(payload)}\"\"\"
headers={headers}
el_route = "http://localhost:8080" if (len(sys.argv) > 1 and sys.argv[1] == "-l") else "{el_route}"
r = requests.request("POST",el_route,data=payload.encode("utf-8"),headers=headers)
r.raise_for_status()
print("Request has been replayed on " + el_route)
"""
with open(target, "w") as fp:
fp.write(s)
os.chmod(target, 0o755)
print(f"Request saved to {target}")
def main(args):
el = args.eroute
if not el:
try:
el = get_controller_route()
except subprocess.CalledProcessError:
try:
el = get_controller_ingress()
except subprocess.CalledProcessError:
print("Could not find an ingress or route")
sys.exit(1)
if args.webhook_repo:
token, webhook_secret = args.webhook_token, args.webhook_secret
replays = webhook_get_delivery(
token,
last=args.last_event,
api_url=args.api_url,
owner_repo=args.webhook_repo,
)
if args.save:
open(args.save, "w").write(f"""#!/usr/bin/env bash\n{replays}\n""")
os.chmod(args.save, 0o755)
print(f"Saved to {args.save}")
sys.exit(0)
else:
token, webhook_secret, app_id = get_token_secret(github_api_url=args.api_url)
delivery = app_get_delivery(token, args.last_event, args.api_url)
jeez = delivery["request"]["payload"]
headers = delivery["request"]["headers"]
payload = json.dumps(jeez)
esha256 = hmac.new(
webhook_secret.encode("utf-8"),
msg=payload.encode("utf-8"),
digestmod=hashlib.sha256,
).hexdigest()
esha1 = hmac.new(
webhook_secret.encode("utf-8"),
msg=payload.encode("utf-8"),
digestmod=hashlib.sha1,
).hexdigest()
print("Replay event for repo " + jeez["repository"]["full_name"])
headers.update(
{
"X-Hub-Signature": "sha1=" + esha1,
"X-Hub-Signature-256": "sha256=" + esha256,
}
)
if args.save:
save_script(args.save, el, headers, jeez)
sys.exit(0)
for _ in range(args.retry):
try:
r = requests.request(
"POST", el, data=payload.encode("utf-8"), headers=headers
)
except requests.exceptions.ConnectionError:
print(f"sleeping until {el} is up")
time.sleep(5)
continue
print(f"Payload has been replayed on {el}: {r}")
return
print("You have reached the maximum number of retries")
def parse_args():
parser = argparse.ArgumentParser(description="Replay a webhook")
parser.add_argument(
"--installation-id",
"-i",
default=os.environ.get("INSTALLATION_ID"),
help="Installation ID",
)
parser.add_argument(
"--controller-route",
"-e",
dest="eroute",
help="Route hostname (default to detect on openshift/ingress)",
default=os.environ.get("EL_ROUTE"),
)
parser.add_argument("--last-event", "-L", action="store_true")
parser.add_argument(
"--webhook-repo", "-w", help="Use a webhook-repo instead of app"
)
parser.add_argument("--webhook-token", "-t", help="Use this token")
parser.add_argument("--webhook-secret", "-S", help="Use this webhook secret")
parser.add_argument(
"--save", "-s", help="save the request to a shell script to replay easily"
)
parser.add_argument(
"-a",
"--api-url",
help="Github API URL",
default=os.environ.get("GITHUB_API_URL", ghapp_token.GITHUB_API_URL),
)
parser.add_argument(
"--retry",
type=int,
default=1,
help="how many time to try to contact the el route",
)
return parser.parse_args()
if __name__ == "__main__":
main(parse_args())
| 2.140625 | 2 |
nintendeals/noa/api/__init__.py | Pooroomoo/nintendeals | 37 | 6089 | <gh_stars>10-100
from .algolia import search_by_nsuid
from .algolia import search_by_platform
from .algolia import search_by_query
| 1.179688 | 1 |
076_Minimum_Window_Substring.py | joshlyman/Josh-LeetCode | 0 | 6090 |
# Other solution
# V2
def minWindow(s, t):
need = collections.Counter(t) #hash table to store char frequency
missing = len(t) #total number of chars we care
start, end = 0, 0
i = 0
for j, char in enumerate(s, 1): #index j from 1
if need[char] > 0:
missing -= 1
need[char] -= 1
if missing == 0: #match all chars
while i < j and need[s[i]] < 0: #remove chars to find the real start
need[s[i]] += 1
i += 1
need[s[i]] += 1 #make sure the first appearing char satisfies need[char]>0
missing += 1 #we missed this first char, so add missing by 1
if end == 0 or j-i < end-start: #update window
start, end = i, j
i += 1 #update i to start+1 for next window
return s[start:end]
# Time: O(|S|+|T|)
# Space:O(|S|+|T|)
# Refer from:
# https://leetcode.com/problems/minimum-window-substring/solution/
# Sliding Window
# We start with two pointers, leftleft and rightright initially pointing to the first element of the string S.
# We use the rightright pointer to expand the window until we get a desirable window i.e. a window that contains all of the characters of T.
# Once we have a window with all the characters, we can move the left pointer ahead one by one. If the window is still a desirable one we keep on updating the minimum window size.
# If the window is not desirable any more, we repeat step 2 onwards.
# The current window is s[i:j] and the result window is s[I:J]. In need[c] I store how many times I
# need character c (can be negative) and missing tells how many characters are still missing.
# In the loop, first add the new character to the window. Then, if nothing is missing,
# remove as much as possible from the window start and then update the result.
class Solution:
def minWindow(self, s: str, t: str) -> str:
m = len(s)
n = len(t)
if m < n:
return ''
lt = {}
# put t into dict (lt) and count how many # for each char
for i in t:
if i not in lt:
lt[i] = 1
else:
lt[i] += 1
# missing is to count how many remaining char needed from substring
# finally get candidate substring which satisfy need of t
missing = n
i = I = J = 0
for j, c in enumerate(s, 1):
if c in lt and lt[c] > 0:
missing -= 1
if c in lt:
# lt can be negative
lt[c] -= 1
# i is index of candidate substring, remove as many as char from candidate
while i < j and not missing:
if not J or j-i < J-I:
I, J = i, j
if s[i] not in lt:
i += 1
continue
else:
# if lt contains s[i], then # of s[i] +1, might reach to 0
lt[s[i]] += 1
# if > 0, means we need more, then missing +1
if lt[s[i]] > 0:
missing += 1
i += 1
return s[I:J]
# Time: O(|S|+|T|)
# Space:O(|S|+|T|)
# Optimized Sliding Window
# A small improvement to the above approach can reduce the time complexity of the algorithm to O(2*∣filtered_S∣+∣S∣+∣T∣),
# where filtered(S) is the string formed from S by removing all the elements not present in T
| 3.375 | 3 |
home/migrations/0002_auto_20171017_0412.py | Taywee/amberherbert.com | 0 | 6091 | <filename>home/migrations/0002_auto_20171017_0412.py
# -*- coding: utf-8 -*-
# Generated by Django 1.11.5 on 2017-10-17 04:12
from __future__ import unicode_literals
from django.db import migrations
import wagtail.core.blocks
import wagtail.core.fields
class Migration(migrations.Migration):
dependencies = [
('home', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='homepage',
name='navigation',
field=wagtail.core.fields.StreamField((('item', wagtail.core.blocks.StructBlock((('text', wagtail.core.blocks.CharBlock(help_text='If this is left blank, the title of the linked page will be used instead', max_length=16, required=False)), ('page', wagtail.core.blocks.PageChooserBlock(required=True))))),), blank=True, help_text='The list of navigation items', null=True),
),
]
| 1.757813 | 2 |
lib/adv_model.py | chawins/entangle-rep | 15 | 6092 | import torch
import torch.nn as nn
import torch.nn.functional as F
class PGDModel(nn.Module):
"""
code adapted from
https://github.com/karandwivedi42/adversarial/blob/master/main.py
"""
def __init__(self, basic_net, config):
super(PGDModel, self).__init__()
self.basic_net = basic_net
self.rand = config['random_start']
self.step_size = config['step_size']
self.epsilon = config['epsilon']
self.num_steps = config['num_steps']
assert config['loss_func'] == 'xent', 'Only xent supported for now.'
def forward(self, inputs, targets, attack=False):
if not attack:
return self.basic_net(inputs)
x = inputs.clone()
if self.rand:
x = x + torch.zeros_like(x).uniform_(-self.epsilon, self.epsilon)
for _ in range(self.num_steps):
x.requires_grad_()
with torch.enable_grad():
logits = self.basic_net(x)
loss = F.cross_entropy(logits, targets, reduction='sum')
grad = torch.autograd.grad(loss, x)[0]
x = x.detach() + self.step_size * torch.sign(grad.detach())
x = torch.min(torch.max(x, inputs.detach() - self.epsilon),
inputs.detach() + self.epsilon)
x = torch.clamp(x, 0, 1)
return self.basic_net(x)
class PGDL2Model(nn.Module):
"""
code adapted from
https://github.com/karandwivedi42/adversarial/blob/master/main.py
"""
def __init__(self, basic_net, config):
super(PGDL2Model, self).__init__()
self.basic_net = basic_net
self.epsilon = config['epsilon']
self.rand = config['random_start']
self.step_size = config['step_size']
self.num_steps = config['num_steps']
assert config['loss_func'] == 'xent', 'Only xent supported for now.'
def forward(self, inputs, targets, attack=False):
if not attack:
return self.basic_net(inputs)
x = inputs.clone()
if self.rand:
x = x + torch.zeros_like(x).normal_(0, self.step_size)
for _ in range(self.num_steps):
x.requires_grad_()
with torch.enable_grad():
logits = self.basic_net(x)
loss = F.cross_entropy(logits, targets, reduction='sum')
grad = torch.autograd.grad(loss, x)[0].detach()
grad_norm = grad.view(x.size(0), -1).norm(2, 1)
delta = self.step_size * grad / grad_norm.view(x.size(0), 1, 1, 1)
x = x.detach() + delta
diff = (x - inputs).view(x.size(0), -1).renorm(2, 0, self.epsilon)
x = diff.view(x.size()) + inputs
x.clamp_(0, 1)
return self.basic_net(x)
| 2.734375 | 3 |
fs_image/rpm/storage/tests/storage_base_test.py | singhaditya28/fs_image | 0 | 6093 | <filename>fs_image/rpm/storage/tests/storage_base_test.py
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import unittest
from unittest.mock import patch, MagicMock
from typing import List, Tuple
from .. import Storage # Module import to ensure we get plugins
class StorageBaseTestCase(unittest.TestCase):
'A tiny test suite that can be used to check any Storage implementation.'
def _check_write_and_read(self, storage: Storage, writes: List[bytes]):
with storage.writer() as output:
for piece in writes:
output.write(piece)
sid = output.commit()
with storage.reader(sid) as input:
written = b''.join(writes)
partial_read = input.read(3)
if written:
self.assertGreater(len(partial_read), 0)
self.assertLessEqual(len(partial_read), 3)
self.assertEqual(written, partial_read + input.read())
return sid
def check_storage_impl(
self,
storage: Storage, *,
no_empty_blobs=False,
skip_empty_writes=False,
# To make testing more meaningful, it's useful to make sure that
# some writes fill up any output buffers. For filesystem writes
# from Python, this default is probably enough.
mul=314159, # just about 300KB
# If the blob-store has a read-through cache, we cannot effectively
# test that the remove actually happened.
remove_is_immediate=True,
) -> List[Tuple[List[str], str]]: # Writes + their storage ID
# Make sure nothing bad happens if an exception flies before a
# commit. Since we don't have an ID, we can't really test that the
# partial write got discarded.
with self.assertRaisesRegex(RuntimeError, '^humbug$'):
with storage.writer() as output:
output.write(b'bah')
raise RuntimeError('humbug')
with self.assertRaisesRegex(AssertionError, '^Cannot commit twice$'):
with storage.writer() as output:
output.write(b'foo')
output.commit(remove_on_exception=True) # Leave no litter
output.commit()
# Check that the `remove_on_exception` kwarg triggers `remove`.
mock_remove = MagicMock()
with patch.object(storage, 'remove', mock_remove):
with self.assertRaisesRegex(RuntimeError, '^remove_on_exception$'):
with storage.writer() as output:
output.write(b'foo')
id_to_remove = output.commit(remove_on_exception=True)
# Contract: committed blobs are available to read
with storage.reader(id_to_remove) as reader:
self.assertEqual(b'foo', reader.read())
raise RuntimeError('remove_on_exception')
# Check that `remove` would have been called, and then call it.
mock_remove.assert_called_once_with(id_to_remove)
storage.remove(id_to_remove) # Exercise the real `remove`
if remove_is_immediate:
# The removed ID should not longer be available.
with self.assertRaises(Exception):
with storage.reader(id_to_remove) as input:
# The reader may be a pipe from another Python process,
# let's consume its output to avoid BrokenPipe logspam.
input.read()
return [
(
writes,
self._check_write_and_read(
storage,
writes if i is None else [*writes[:i], b'', *writes[i:]],
),
) for writes in [
# Some large writes
[b'abcd' * mul, b'efgh' * mul],
[b'abc' * mul, b'defg' * mul],
[b'abc' * mul, b'def' * mul, b'g' * mul],
[b'abcd' * mul],
[b'abc' * mul, b'd' * mul],
# Some tiny writes without a multiplier
[b'a', b'b', b'c', b'd'],
[b'ab'],
[b'a', b'b'],
# While clowny, some blob storage systems refuse empty blobs.
*([] if no_empty_blobs else [
[b''],
[],
]),
]
# Test the given writes, optionally insert a blank at each pos
for i in [
None,
*([] if skip_empty_writes else range(len(writes) + 1)),
]
]
| 2.0625 | 2 |
sdk/python/pulumi_gcp/kms/get_kms_crypto_key_version.py | sisisin/pulumi-gcp | 0 | 6094 | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
__all__ = [
'GetKMSCryptoKeyVersionResult',
'AwaitableGetKMSCryptoKeyVersionResult',
'get_kms_crypto_key_version',
'get_kms_crypto_key_version_output',
]
@pulumi.output_type
class GetKMSCryptoKeyVersionResult:
"""
A collection of values returned by getKMSCryptoKeyVersion.
"""
def __init__(__self__, algorithm=None, crypto_key=None, id=None, name=None, protection_level=None, public_keys=None, state=None, version=None):
if algorithm and not isinstance(algorithm, str):
raise TypeError("Expected argument 'algorithm' to be a str")
pulumi.set(__self__, "algorithm", algorithm)
if crypto_key and not isinstance(crypto_key, str):
raise TypeError("Expected argument 'crypto_key' to be a str")
pulumi.set(__self__, "crypto_key", crypto_key)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if protection_level and not isinstance(protection_level, str):
raise TypeError("Expected argument 'protection_level' to be a str")
pulumi.set(__self__, "protection_level", protection_level)
if public_keys and not isinstance(public_keys, list):
raise TypeError("Expected argument 'public_keys' to be a list")
pulumi.set(__self__, "public_keys", public_keys)
if state and not isinstance(state, str):
raise TypeError("Expected argument 'state' to be a str")
pulumi.set(__self__, "state", state)
if version and not isinstance(version, int):
raise TypeError("Expected argument 'version' to be a int")
pulumi.set(__self__, "version", version)
@property
@pulumi.getter
def algorithm(self) -> str:
"""
The CryptoKeyVersionAlgorithm that this CryptoKeyVersion supports.
"""
return pulumi.get(self, "algorithm")
@property
@pulumi.getter(name="cryptoKey")
def crypto_key(self) -> str:
return pulumi.get(self, "crypto_key")
@property
@pulumi.getter
def id(self) -> str:
"""
The provider-assigned unique ID for this managed resource.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def name(self) -> str:
"""
The resource name for this CryptoKeyVersion in the format `projects/*/locations/*/keyRings/*/cryptoKeys/*/cryptoKeyVersions/*`
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="protectionLevel")
def protection_level(self) -> str:
"""
The ProtectionLevel describing how crypto operations are performed with this CryptoKeyVersion. See the [protection_level reference](https://cloud.google.com/kms/docs/reference/rest/v1/ProtectionLevel) for possible outputs.
"""
return pulumi.get(self, "protection_level")
@property
@pulumi.getter(name="publicKeys")
def public_keys(self) -> Sequence['outputs.GetKMSCryptoKeyVersionPublicKeyResult']:
"""
If the enclosing CryptoKey has purpose `ASYMMETRIC_SIGN` or `ASYMMETRIC_DECRYPT`, this block contains details about the public key associated to this CryptoKeyVersion. Structure is documented below.
"""
return pulumi.get(self, "public_keys")
@property
@pulumi.getter
def state(self) -> str:
"""
The current state of the CryptoKeyVersion. See the [state reference](https://cloud.google.com/kms/docs/reference/rest/v1/projects.locations.keyRings.cryptoKeys.cryptoKeyVersions#CryptoKeyVersion.CryptoKeyVersionState) for possible outputs.
"""
return pulumi.get(self, "state")
@property
@pulumi.getter
def version(self) -> Optional[int]:
return pulumi.get(self, "version")
class AwaitableGetKMSCryptoKeyVersionResult(GetKMSCryptoKeyVersionResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetKMSCryptoKeyVersionResult(
algorithm=self.algorithm,
crypto_key=self.crypto_key,
id=self.id,
name=self.name,
protection_level=self.protection_level,
public_keys=self.public_keys,
state=self.state,
version=self.version)
def get_kms_crypto_key_version(crypto_key: Optional[str] = None,
version: Optional[int] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetKMSCryptoKeyVersionResult:
"""
Provides access to a Google Cloud Platform KMS CryptoKeyVersion. For more information see
[the official documentation](https://cloud.google.com/kms/docs/object-hierarchy#key_version)
and
[API](https://cloud.google.com/kms/docs/reference/rest/v1/projects.locations.keyRings.cryptoKeys.cryptoKeyVersions).
A CryptoKeyVersion represents an individual cryptographic key, and the associated key material.
## Example Usage
```python
import pulumi
import pulumi_gcp as gcp
my_key_ring = gcp.kms.get_kms_key_ring(name="my-key-ring",
location="us-central1")
my_crypto_key = gcp.kms.get_kms_crypto_key(name="my-crypto-key",
key_ring=my_key_ring.id)
my_crypto_key_version = gcp.kms.get_kms_crypto_key_version(crypto_key=data["google_kms_key"]["my_key"]["id"])
```
:param str crypto_key: The `self_link` of the Google Cloud Platform CryptoKey to which the key version belongs. This is also the `id` field of the
`kms.CryptoKey` resource/datasource.
:param int version: The version number for this CryptoKeyVersion. Defaults to `1`.
"""
__args__ = dict()
__args__['cryptoKey'] = crypto_key
__args__['version'] = version
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('gcp:kms/getKMSCryptoKeyVersion:getKMSCryptoKeyVersion', __args__, opts=opts, typ=GetKMSCryptoKeyVersionResult).value
return AwaitableGetKMSCryptoKeyVersionResult(
algorithm=__ret__.algorithm,
crypto_key=__ret__.crypto_key,
id=__ret__.id,
name=__ret__.name,
protection_level=__ret__.protection_level,
public_keys=__ret__.public_keys,
state=__ret__.state,
version=__ret__.version)
@_utilities.lift_output_func(get_kms_crypto_key_version)
def get_kms_crypto_key_version_output(crypto_key: Optional[pulumi.Input[str]] = None,
version: Optional[pulumi.Input[Optional[int]]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetKMSCryptoKeyVersionResult]:
"""
Provides access to a Google Cloud Platform KMS CryptoKeyVersion. For more information see
[the official documentation](https://cloud.google.com/kms/docs/object-hierarchy#key_version)
and
[API](https://cloud.google.com/kms/docs/reference/rest/v1/projects.locations.keyRings.cryptoKeys.cryptoKeyVersions).
A CryptoKeyVersion represents an individual cryptographic key, and the associated key material.
## Example Usage
```python
import pulumi
import pulumi_gcp as gcp
my_key_ring = gcp.kms.get_kms_key_ring(name="my-key-ring",
location="us-central1")
my_crypto_key = gcp.kms.get_kms_crypto_key(name="my-crypto-key",
key_ring=my_key_ring.id)
my_crypto_key_version = gcp.kms.get_kms_crypto_key_version(crypto_key=data["google_kms_key"]["my_key"]["id"])
```
:param str crypto_key: The `self_link` of the Google Cloud Platform CryptoKey to which the key version belongs. This is also the `id` field of the
`kms.CryptoKey` resource/datasource.
:param int version: The version number for this CryptoKeyVersion. Defaults to `1`.
"""
...
| 1.945313 | 2 |
lecture11/subsets.py | nd-cse-30872-fa20/cse-30872-fa20-examples | 0 | 6095 | #!/usr/bin/env python3
import itertools
# Constants
NUMBERS = range(0, 10)
# Main Execution
def main():
count = 0
for length in range(0, len(NUMBERS) + 1):
for subset in itertools.combinations(NUMBERS, length):
if sum(subset) % 3 == 0:
count += 1
print(count)
if __name__ == '__main__':
main()
| 3.71875 | 4 |
src/toil/batchSystems/abstractBatchSystem.py | Hexotical/toil | 348 | 6096 | <reponame>Hexotical/toil<gh_stars>100-1000
# Copyright (C) 2015-2021 Regents of the University of California
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import enum
import logging
import os
import shutil
from abc import ABC, abstractmethod
from argparse import ArgumentParser, _ArgumentGroup
from contextlib import contextmanager
from typing import (Any,
Callable,
ContextManager,
Dict,
Iterator,
List,
Optional,
Tuple,
Type,
TypeVar,
Union,
NamedTuple)
from toil.common import Toil, cacheDirName, Config
from toil.deferred import DeferredFunctionManager
from toil.fileStores.abstractFileStore import AbstractFileStore
from toil.job import JobDescription
from toil.resource import Resource
logger = logging.getLogger(__name__)
# Value to use as exitStatus in UpdatedBatchJobInfo.exitStatus when status is not available.
EXIT_STATUS_UNAVAILABLE_VALUE = 255
class BatchJobExitReason(enum.Enum):
FINISHED: int = 1 # Successfully finished.
FAILED: int = 2 # Job finished, but failed.
LOST: int = 3 # Preemptable failure (job's executing host went away).
KILLED: int = 4 # Job killed before finishing.
ERROR: int = 5 # Internal error.
MEMLIMIT: int = 6 # Job hit batch system imposed memory limit
class UpdatedBatchJobInfo(NamedTuple):
jobID: int
exitStatus: int
"""
The exit status (integer value) of the job. 0 implies successful.
EXIT_STATUS_UNAVAILABLE_VALUE is used when the exit status is not available (e.g. job is lost).
"""
exitReason: Optional[BatchJobExitReason]
wallTime: Union[float, int, None]
# Information required for worker cleanup on shutdown of the batch system.
class WorkerCleanupInfo(NamedTuple):
workDir: str
"""workdir path (where the cache would go)"""
workflowID: str
"""used to identify files specific to this workflow"""
cleanWorkDir: str
class AbstractBatchSystem(ABC):
"""
An abstract (as far as Python currently allows) base class to represent the interface the batch
system must provide to Toil.
"""
@classmethod
@abstractmethod
def supportsAutoDeployment(cls) -> bool:
"""
Whether this batch system supports auto-deployment of the user script itself. If it does,
the :meth:`.setUserScript` can be invoked to set the resource object representing the user
script.
Note to implementors: If your implementation returns True here, it should also override
"""
raise NotImplementedError()
@classmethod
@abstractmethod
def supportsWorkerCleanup(cls) -> bool:
"""
Indicates whether this batch system invokes
:meth:`BatchSystemSupport.workerCleanup` after the last job for a
particular workflow invocation finishes. Note that the term *worker*
refers to an entire node, not just a worker process. A worker process
may run more than one job sequentially, and more than one concurrent
worker process may exist on a worker node, for the same workflow. The
batch system is said to *shut down* after the last worker process
terminates.
"""
raise NotImplementedError()
def setUserScript(self, userScript: Resource) -> None:
"""
Set the user script for this workflow. This method must be called before the first job is
issued to this batch system, and only if :meth:`.supportsAutoDeployment` returns True,
otherwise it will raise an exception.
:param userScript: the resource object representing the user script
or module and the modules it depends on.
"""
raise NotImplementedError()
@abstractmethod
def issueBatchJob(self, jobDesc: JobDescription, job_environment: Optional[Dict[str, str]] = None) -> int:
"""
Issues a job with the specified command to the batch system and returns a unique jobID.
:param jobDesc a toil.job.JobDescription
:param job_environment: a collection of job-specific environment variables
to be set on the worker.
:return: a unique jobID that can be used to reference the newly issued job
"""
raise NotImplementedError()
@abstractmethod
def killBatchJobs(self, jobIDs: List[int]) -> None:
"""
Kills the given job IDs. After returning, the killed jobs will not
appear in the results of getRunningBatchJobIDs. The killed job will not
be returned from getUpdatedBatchJob.
:param jobIDs: list of IDs of jobs to kill
"""
raise NotImplementedError()
# FIXME: Return value should be a set (then also fix the tests)
@abstractmethod
def getIssuedBatchJobIDs(self) -> List[int]:
"""
Gets all currently issued jobs
:return: A list of jobs (as jobIDs) currently issued (may be running, or may be
waiting to be run). Despite the result being a list, the ordering should not
be depended upon.
"""
raise NotImplementedError()
@abstractmethod
def getRunningBatchJobIDs(self) -> Dict[int, float]:
"""
Gets a map of jobs as jobIDs that are currently running (not just waiting)
and how long they have been running, in seconds.
:return: dictionary with currently running jobID keys and how many seconds they have
been running as the value
"""
raise NotImplementedError()
@abstractmethod
def getUpdatedBatchJob(self, maxWait: int) -> Optional[UpdatedBatchJobInfo]:
"""
Returns information about job that has updated its status (i.e. ceased
running, either successfully or with an error). Each such job will be
returned exactly once.
Does not return info for jobs killed by killBatchJobs, although they
may cause None to be returned earlier than maxWait.
:param maxWait: the number of seconds to block, waiting for a result
:return: If a result is available, returns UpdatedBatchJobInfo.
Otherwise it returns None. wallTime is the number of seconds (a strictly
positive float) in wall-clock time the job ran for, or None if this
batch system does not support tracking wall time.
"""
raise NotImplementedError()
def getSchedulingStatusMessage(self) -> Optional[str]:
"""
Get a log message fragment for the user about anything that might be
going wrong in the batch system, if available.
If no useful message is available, return None.
This can be used to report what resource is the limiting factor when
scheduling jobs, for example. If the leader thinks the workflow is
stuck, the message can be displayed to the user to help them diagnose
why it might be stuck.
:return: User-directed message about scheduling state.
"""
# Default implementation returns None.
# Override to provide scheduling status information.
return None
@abstractmethod
def shutdown(self) -> None:
"""
Called at the completion of a toil invocation.
Should cleanly terminate all worker threads.
"""
raise NotImplementedError()
def setEnv(self, name: str, value: Optional[str] = None) -> None:
"""
Set an environment variable for the worker process before it is launched. The worker
process will typically inherit the environment of the machine it is running on but this
method makes it possible to override specific variables in that inherited environment
before the worker is launched. Note that this mechanism is different to the one used by
the worker internally to set up the environment of a job. A call to this method affects
all jobs issued after this method returns. Note to implementors: This means that you
would typically need to copy the variables before enqueuing a job.
If no value is provided it will be looked up from the current environment.
"""
raise NotImplementedError()
@classmethod
def add_options(cls, parser: Union[ArgumentParser, _ArgumentGroup]) -> None:
"""
If this batch system provides any command line options, add them to the given parser.
"""
pass
OptionType = TypeVar('OptionType')
@classmethod
def setOptions(cls, setOption: Callable[[str, Optional[Callable[[Any], OptionType]], Optional[Callable[[OptionType], None]], Optional[OptionType], Optional[List[str]]], None]) -> None:
"""
Process command line or configuration options relevant to this batch system.
:param setOption: A function with signature
setOption(option_name, parsing_function=None, check_function=None, default=None, env=None)
returning nothing, used to update run configuration as a side effect.
"""
# TODO: change type to a Protocol to express kwarg names, or else use a
# different interface (generator?)
pass
def getWorkerContexts(self) -> List[ContextManager[Any]]:
"""
Get a list of picklable context manager objects to wrap worker work in,
in order.
Can be used to ask the Toil worker to do things in-process (such as
configuring environment variables, hot-deploying user scripts, or
cleaning up a node) that would otherwise require a wrapping "executor"
process.
"""
return []
class BatchSystemSupport(AbstractBatchSystem):
"""
Partial implementation of AbstractBatchSystem, support methods.
"""
def __init__(self, config: Config, maxCores: float, maxMemory: int, maxDisk: int) -> None:
"""
Initializes initial state of the object
:param toil.common.Config config: object is setup by the toilSetup script and
has configuration parameters for the jobtree. You can add code
to that script to get parameters for your batch system.
:param float maxCores: the maximum number of cores the batch system can
request for any one job
:param int maxMemory: the maximum amount of memory the batch system can
request for any one job, in bytes
:param int maxDisk: the maximum amount of disk space the batch system can
request for any one job, in bytes
"""
super().__init__()
self.config = config
self.maxCores = maxCores
self.maxMemory = maxMemory
self.maxDisk = maxDisk
self.environment: Dict[str, str] = {}
self.workerCleanupInfo = WorkerCleanupInfo(workDir=self.config.workDir,
workflowID=self.config.workflowID,
cleanWorkDir=self.config.cleanWorkDir)
def checkResourceRequest(self, memory: int, cores: float, disk: int, job_name: str = '', detail: str = '') -> None:
"""
Check resource request is not greater than that available or allowed.
:param int memory: amount of memory being requested, in bytes
:param float cores: number of cores being requested
:param int disk: amount of disk space being requested, in bytes
:param str job_name: Name of the job being checked, for generating a useful error report.
:param str detail: Batch-system-specific message to include in the error.
:raise InsufficientSystemResources: raised when a resource is requested in an amount
greater than allowed
"""
batch_system = self.__class__.__name__ or 'this batch system'
for resource, requested, available in [('cores', cores, self.maxCores),
('memory', memory, self.maxMemory),
('disk', disk, self.maxDisk)]:
assert requested is not None
if requested > available:
unit = 'bytes of ' if resource in ('disk', 'memory') else ''
R = f'The job {job_name} is r' if job_name else 'R'
if resource == 'disk':
msg = (f'{R}equesting {requested} {unit}{resource} for temporary space, '
f'more than the maximum of {available} {unit}{resource} of free space on '
f'{self.config.workDir} that {batch_system} was configured with, or enforced '
f'by --max{resource.capitalize()}. Try setting/changing the toil option '
f'"--workDir" or changing the base temporary directory by setting TMPDIR.')
else:
msg = (f'{R}equesting {requested} {unit}{resource}, more than the maximum of '
f'{available} {unit}{resource} that {batch_system} was configured with, '
f'or enforced by --max{resource.capitalize()}.')
if detail:
msg += detail
raise InsufficientSystemResources(msg)
def setEnv(self, name: str, value: Optional[str] = None) -> None:
"""
Set an environment variable for the worker process before it is launched. The worker
process will typically inherit the environment of the machine it is running on but this
method makes it possible to override specific variables in that inherited environment
before the worker is launched. Note that this mechanism is different to the one used by
the worker internally to set up the environment of a job. A call to this method affects
all jobs issued after this method returns. Note to implementors: This means that you
would typically need to copy the variables before enqueuing a job.
If no value is provided it will be looked up from the current environment.
:param str name: the environment variable to be set on the worker.
:param str value: if given, the environment variable given by name will be set to this value.
if None, the variable's current value will be used as the value on the worker
:raise RuntimeError: if value is None and the name cannot be found in the environment
"""
if value is None:
try:
value = os.environ[name]
except KeyError:
raise RuntimeError(f"{name} does not exist in current environment")
self.environment[name] = value
def formatStdOutErrPath(self, toil_job_id: int, cluster_job_id: str, std: str) -> str:
"""
Format path for batch system standard output/error and other files
generated by the batch system itself.
Files will be written to the Toil work directory (which may
be on a shared file system) with names containing both the Toil and
batch system job IDs, for ease of debugging job failures.
:param: int toil_job_id : The unique id that Toil gives a job.
:param: cluster_job_id : What the cluster, for example, GridEngine, uses as its internal job id.
:param: string std : The provenance of the stream (for example: 'err' for 'stderr' or 'out' for 'stdout')
:rtype: string : Formatted filename; however if self.config.noStdOutErr is true,
returns '/dev/null' or equivalent.
"""
if self.config.noStdOutErr:
return os.devnull
fileName: str = f'toil_{self.config.workflowID}.{toil_job_id}.{cluster_job_id}.{std}.log'
workDir: str = Toil.getToilWorkDir(self.config.workDir)
return os.path.join(workDir, fileName)
@staticmethod
def workerCleanup(info: WorkerCleanupInfo) -> None:
"""
Cleans up the worker node on batch system shutdown. Also see :meth:`supportsWorkerCleanup`.
:param WorkerCleanupInfo info: A named tuple consisting of all the relevant information
for cleaning up the worker.
"""
assert isinstance(info, WorkerCleanupInfo)
workflowDir = Toil.getLocalWorkflowDir(info.workflowID, info.workDir)
DeferredFunctionManager.cleanupWorker(workflowDir)
workflowDirContents = os.listdir(workflowDir)
AbstractFileStore.shutdownFileStore(workflowDir, info.workflowID)
if (info.cleanWorkDir == 'always'
or info.cleanWorkDir in ('onSuccess', 'onError')
and workflowDirContents in ([], [cacheDirName(info.workflowID)])):
shutil.rmtree(workflowDir, ignore_errors=True)
class NodeInfo:
"""
The coresUsed attribute is a floating point value between 0 (all cores idle) and 1 (all cores
busy), reflecting the CPU load of the node.
The memoryUsed attribute is a floating point value between 0 (no memory used) and 1 (all memory
used), reflecting the memory pressure on the node.
The coresTotal and memoryTotal attributes are the node's resources, not just the used resources
The requestedCores and requestedMemory attributes are all the resources that Toil Jobs have reserved on the
node, regardless of whether the resources are actually being used by the Jobs.
The workers attribute is an integer reflecting the number of workers currently active workers
on the node.
"""
def __init__(self, coresUsed: float, memoryUsed: float,
coresTotal: float, memoryTotal: int,
requestedCores: float, requestedMemory: int,
workers: int) -> None:
self.coresUsed = coresUsed
self.memoryUsed = memoryUsed
self.coresTotal = coresTotal
self.memoryTotal = memoryTotal
self.requestedCores = requestedCores
self.requestedMemory = requestedMemory
self.workers = workers
class AbstractScalableBatchSystem(AbstractBatchSystem):
"""
A batch system that supports a variable number of worker nodes. Used by :class:`toil.
provisioners.clusterScaler.ClusterScaler` to scale the number of worker nodes in the cluster
up or down depending on overall load.
"""
@abstractmethod
def getNodes(self, preemptable: Optional[bool] = None) -> Dict[str, NodeInfo]:
"""
Returns a dictionary mapping node identifiers of preemptable or non-preemptable nodes to
NodeInfo objects, one for each node.
:param preemptable: If True (False) only (non-)preemptable nodes will be returned.
If None, all nodes will be returned.
"""
raise NotImplementedError()
@abstractmethod
def nodeInUse(self, nodeIP: str) -> bool:
"""
Can be used to determine if a worker node is running any tasks. If the node is doesn't
exist, this function should simply return False.
:param nodeIP: The worker nodes private IP address
:return: True if the worker node has been issued any tasks, else False
"""
raise NotImplementedError()
# TODO: May be unused!
@abstractmethod
@contextmanager
def nodeFiltering(self, filter: Optional[Callable[[NodeInfo], bool]]) -> Iterator[None]:
"""
Used to prevent races in autoscaling where
1) nodes have reported to the autoscaler as having no jobs
2) scaler decides to terminate these nodes. In parallel the batch system assigns jobs to the same nodes
3) scaler terminates nodes, resulting in job failures for all jobs on that node.
Call this method prior to node termination to ensure that nodes being considered for termination are not
assigned new jobs. Call the method again passing None as the filter to disable the filtering
after node termination is done.
:param method: This will be used as a filter on nodes considered when assigning new jobs.
After this context manager exits the filter should be removed
"""
raise NotImplementedError()
@abstractmethod
def ignoreNode(self, nodeAddress: str) -> None:
"""
Stop sending jobs to this node. Used in autoscaling
when the autoscaler is ready to terminate a node, but
jobs are still running. This allows the node to be terminated
after the current jobs have finished.
:param nodeAddress: IP address of node to ignore.
"""
raise NotImplementedError()
@abstractmethod
def unignoreNode(self, nodeAddress: str) -> None:
"""
Stop ignoring this address, presumably after
a node with this address has been terminated. This allows for the
possibility of a new node having the same address as a terminated one.
"""
raise NotImplementedError()
class InsufficientSystemResources(Exception):
pass
| 1.859375 | 2 |
demo/other_demo.py | Heartfilia/lite_tools | 5 | 6097 | # -*- coding: utf-8 -*-
from lite_tools import get_md5, get_sha, get_sha3, get_b64e, get_b64d
# about hashlib ==> get_md5, get_sha, get_sha3 || default mode=256
s = "test_information" # 这里只能丢字符串
print(get_md5(s)) # 5414ffd88fcb58417e64ecec51bb3a6b
print(get_md5(s, upper=True)) # 5414FFD88FCB58417E64ECEC51BB3A6B
print(get_md5(s, to_bin=True)) # b'T\x14\xff\xd8\x8f\xcbXA~d\xec\xecQ\xbb:k' # 转成二进制的需求没什么用但是可以保留
print(get_sha(s)) # d09869fdf901465c8566f0e2debfa3f6a3d878a8157e199c7c4c6dd755617f33
print(get_sha(s, to_bin=True)) # b'\xd0\x98i\xfd\xf9\x01F\\\x85f\xf0\xe2\xde\xbf\xa3\xf6\xa3\xd8x\xa8\x15~\x19\x9c|Lm\xd7Ua\x7f3'
print(get_sha(s, mode=1)) # ada5dfdf0c9a76a84958310b838a70b6fd6d01f6 # default mode=256 // mode: 1 224 256 384 512
print(get_sha3(s)) # 9c539ca35c6719f546e67837ff37fe7791e53fe40715cd4da0167c78c9adc2e8
print(get_sha3(s, to_bin=True)) # b'\x9cS\x9c\xa3\\g\x19\xf5F\xe6x7\xff7\xfew\x91\xe5?\xe4\x07\x15\xcdM\xa0\x16|x\xc9\xad\xc2\xe8'
print(get_sha3(s, mode=1)) # return "" // SUPPORT: sha3_224 sha3_256 sha3_384 sha3_512// only need inputting: 224 256 384 512 # default mode=256 // mode: 224 256 384 512
print(get_sha3(s, mode=384)) # 95c09e20a139843eae877a64cd95d6a629b3c9ff383b5460557aab2612682d4228d05fe41606a79acf5ae1c4de35160c
# about base64 ==> get_b64e, get_b64d
res_b64_encode = get_b64e(s)
print(res_b64_encode) # dGVzdF9pbmZvcm1hdGlvbg==
res_b64_bin = get_b64e(s, to_bin=True)
print(res_b64_bin) # b'dGVzdF9pbmZvcm1hdGlvbg=='
res_b32_encode = get_b64e(s, mode=32) # default mode=64 // mode: 16 32 64 85
print(res_b32_encode) # ORSXG5C7NFXGM33SNVQXI2LPNY======
res_b64_decode = get_b64d(res_b64_encode)
print(res_b64_decode) # test_information
res_b32_decode = get_b64d(res_b32_encode, mode=32) # default mode=64 // mode: 16 32 64 85
print(res_b32_decode) # test_information
| 1.71875 | 2 |
prereise/gather/solardata/tests/__init__.py | terrywqf/PreREISE | 0 | 6098 | __all__ = ["mock_pv_info", "test_pv_tracking"]
| 0.976563 | 1 |
arfit/cp_utils.py | farr/arfit | 5 | 6099 | <reponame>farr/arfit<filename>arfit/cp_utils.py
import carmcmc as cm
from gatspy.periodic import LombScargleFast
import matplotlib.pyplot as plt
import numpy as np
def csample_from_files(datafile, chainfile, p, q):
data = np.loadtxt(datafile)
times, tind = np.unique(data[:,0], return_index=True)
data = data[tind, :]
chain = np.loadtxt(chainfile)
assert chain.shape[1] == p + q + 5, 'dimension mismatch'
return cm.CarmaSample(data[:,0], data[:,1], data[:,2], None, q=q, trace=chain[:,:-2], loglike=chain[:,-2], logpost=chain[:,-1])
def normalised_lombscargle(ts, ys, dys, oversampling=5, nyquist_factor=3):
model = LombScargleFast().fit(ts, ys, dys)
pers, pows = model.periodogram_auto(oversampling=oversampling, nyquist_factor=nyquist_factor)
fs = 1.0/pers
T = np.max(ts) - np.min(ts)
mu = 1/T*np.trapz(ys, ts)
s2 = 1/T*np.trapz(np.square(ys-mu), ts)
return fs, s2*pows/np.trapz(pows, fs)
def plot_psd_sample_data(sample, oversampling=5, nyquist_factor=3):
psd_low, psd_high, psd_med, fs = sample.plot_power_spectrum(doShow=False)
plt.clf()
plt.loglog(fs, psd_med, '-b', alpha=0.33)
plt.fill_between(fs, psd_low, psd_high, color='b', alpha=0.17)
fs, psd = normalised_lombscargle(sample.time, sample.y, sample.ysig, oversampling=oversampling, nyquist_factor=nyquist_factor)
bw = fs[-1] - fs[0]
T = sample.time[-1] - sample.time[0]
s2 = 1/T*np.trapz(np.square(sample.ysig), sample.time)
noise_level = s2/bw
levels = noise_level*np.sqrt(sample.get_samples('measerr_scale'))
plt.axhline(np.median(levels), color='g', alpha=0.33)
plt.fill_between(fs, np.percentile(levels, 84)+0*fs, np.percentile(levels, 16)+0*fs, color='g', alpha=0.17)
plt.loglog(fs, psd, '-r', alpha=0.33)
def plot_psd_sample_draw(sample, loc='upper left', oversampling=5, nyquist_factor=3):
fs, psd = normalised_lombscargle(sample.time, sample.y, sample.ysig, oversampling=oversampling, nyquist_factor=nyquist_factor)
ys_draw = sample.predict(sample.time, bestfit='random')[0]
fs, dpsd = normalised_lombscargle(sample.time, ys_draw, sample.ysig, oversampling=oversampling, nyquist_factor=nyquist_factor)
plt.loglog(fs, psd, '-k', label='Data', alpha=0.5)
plt.loglog(fs, dpsd, '-b', label='Prediction', alpha=0.5)
plt.legend(loc=loc)
| 1.828125 | 2 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.