max_stars_repo_path
stringlengths 3
269
| max_stars_repo_name
stringlengths 4
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.05M
| score
float64 0.23
5.13
| int_score
int64 0
5
|
---|---|---|---|---|---|---|
annotations/rip_annotated_junctions.py | ChristopherWilks/snaptron | 25 | 3800 | #!/usr/bin/env python
"""
rip_annotated_junctions.py
Non-reference/species verson of this script, no lift-over
Rips junctions from annotation files contained in
jan_24_2016_annotations.tar.gz, as described in annotation_definition.md.
Junctions are dumped to stdout, which we record as annotated_junctions.tsv.gz
in runs/sra (same directory as this file). annotated_junctions.tsv.gz is
required by tables.py. The format of annotated_junctions.tsv.gz is
(tab-separated fields), one per junction
1. Chromosome
2. Start position (1-based, inclusive)
3. End position (1-based, inclusive)
4. Strand (+ or -)
5. anno source (abbreviation)
Must have
Stats are written to stderr
From the runs/sra/v2 directory, we ran
pypy rip_annotated_junctions.py
--hisat2-dir /path/to/hisat2-2.0.1-beta
--annotations path/to/jan_24_2016_annotations.tar.gz
| sort -k1,1 -k2,2n -k3,3n | gzip >annotated_junctions.tsv.gz
"""
import subprocess
import tarfile
import argparse
import tempfile
import atexit
import shutil
import glob
import os
import gzip
import sys
#file2source = {"hg19/gencode.v19.annotation.gtf.gz":"gC19","hg19/refGene.txt.gz":"rG19","hg19/acembly.txt.gz":"aC19","hg19/ccdsGene.txt.gz":"cG19","hg19/vegaGene.txt.gz":"vG19","hg19/knownGene.txt.gz":"kG19","hg19/mgcGenes.txt.gz":"mG19","hg19/lincRNAsTranscripts.txt.gz":"lR19","hg19/sibGene.txt.gz":"sG19","hg38/refGene.txt.gz":"rG38","hg38/ccdsGene.txt.gz":"cG38","hg38/gencode.v24.annotation.gtf.gz":"gC38","hg38/knownGene.txt.gz":"kG38","hg38/mgcGenes.txt.gz":"mG38","hg38/lincRNAsTranscripts.txt.gz":"lR38","hg38/sibGene.txt.gz":"sG38"}
#file2source = {"mm10/mouse10_ucsc_genes.gtf.gz":"kG10","mm10/mouse10_gencodevm11_comp.gtf.gz":"gC11","mm10/mouse10_gencodevm09_comp.gtf.gz":"gC09","mm10/mouse10_refseq_refgene.gtf.gz":"rG10"}
file2source = {"mouse10_ucsc_genes.gtf.gz":"kG10","mouse10_gencodevm11_comp.gtf.gz":"gC11","mouse10_gencodevm09_comp.gtf.gz":"gC09","mouse10_refseq_refgene.gtf.gz":"rG10"}
if __name__ == '__main__':
# Print file's docstring if -h is invoked
parser = argparse.ArgumentParser(description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter)
# Add command-line arguments
parser.add_argument('--extract-script-dir', type=str, required=True,
help=('path to directory containing extract_splice_sites.py script (from HISAT2)')
)
parser.add_argument('--annotations', type=str, required=True,
help=('full path to directory that has the annotation GTF(s) in gzipped format')
)
args = parser.parse_args()
extract_destination = tempfile.mkdtemp()
atexit.register(shutil.rmtree, extract_destination)
#with tarfile.open(args.annotations, 'r:gz') as tar:
# tar.extractall(path=extract_destination)
extract_splice_sites_path = os.path.join(args.extract_script_dir,
'extract_splice_sites.py')
containing_dir = os.path.dirname(os.path.realpath(__file__))
annotated_junctions_ = set()
for junction_file in glob.glob(
os.path.join(args.annotations, '*')
):
label = os.path.basename(junction_file)
datasource_code = file2source[label]
unique_junctions = set()
#extract_splice_sites_path prints 0-based, exon coords around junctions
#hence the +2 for the start here
extract_process = subprocess.Popen(' '.join([
sys.executable,
extract_splice_sites_path,
'<(gzip -cd %s)'
% junction_file
]),
shell=True,
executable='/bin/bash',
stdout=subprocess.PIPE
)
for line in extract_process.stdout:
tokens = line.strip().split('\t')
tokens[1] = int(tokens[1]) + 2
tokens[2] = int(tokens[2])
if tokens[2] < tokens[1]:
print >>sys.stderr, (
'Invalid junction ({}, {}, {}) from file {}. '
'Skipping.'
).format(
tokens[0], tokens[1], tokens[2], junction_file
)
continue
tokens.append(datasource_code)
junction_to_add = tuple(tokens)
annotated_junctions_.add(junction_to_add)
unique_junctions.add(junction_to_add)
extract_process.stdout.close()
exit_code = extract_process.wait()
if exit_code != 0:
raise RuntimeError(
'extract_splice_sites.py had nonzero exit code {}.'.format(
exit_code
)
)
print >>sys.stderr, 'Junctions in {}: {}'.format(
label,
len(unique_junctions)
)
junc2datasource = {}
for junction in annotated_junctions_:
if junction[:4] not in junc2datasource:
junc2datasource[junction[:4]]=set()
junc2datasource[junction[:4]].add(junction[4])
seen = set()
for junction in annotated_junctions_:
if junction[:4] not in seen:
sources = ",".join(sorted(junc2datasource[junction[:4]]))
print "%s\t%s" % ('\t'.join(map(str, junction[:4])),sources)
seen.add(junction[:4])
| 2.1875 | 2 |
dramkit/_tmp/VMD.py | Genlovy-Hoo/dramkit | 0 | 3801 | <gh_stars>0
# -*- coding: utf-8 -*-
import numpy as np
def vmd( signal, alpha, tau, K, DC, init, tol):
'''
用VMD分解算法时只要把信号输入进行分解就行了,只是对信号进行分解,和采样频率没有关系,
VMD的输入参数也没有采样频率。
VMD分解出的各分量在输出量 u 中,这个和信号的长度、信号的采样频率没有关系。
迭代时各分量的中心频率在输出量omega,可以用2*pi/fs*omega求出中心频率,
但迭代时的频率是变化的。
Input and Parameters:
signal - the time domain signal (1D) to be decomposed
alpha - the balancing parameter of the data-fidelity constraint
tau - time-step of the dual ascent ( pick 0 for noise-slack )
K - the number of modes to be recovered
DC - true if the first mode is put and kept at DC (0-freq)
init - 0 = all omegas start at 0
1 = all omegas start uniformly distributed
2 = all omegas initialized randomly
tol - tolerance of convergence criterion; typically around 1e-6
Output:
u - the collection of decomposed modes
u_hat - spectra of the modes
omega - estimated mode center-frequencies
'''
# Period and sampling frequency of input signal
#分解算法中的采样频率和时间是标准化的,分解信号的采样时间为1s,然后就得到相应的采样频率。采样时间间隔:1/ length(signal),频率: length(signal)。
save_T = len(signal)
fs = 1 / save_T
# extend the signal by mirroring镜像延拓
T = save_T
f_mirror = []
temp = signal[0:T//2]
f_mirror.extend(temp[::-1]) #temp[::-1] 倒序排列
f_mirror.extend(signal)
temp = signal[T//2:T]
f_mirror.extend(temp[::-1])
f = f_mirror
# Time Domain 0 to T (of mirrored signal)
T = len(f)
t = [(i + 1) / T for i in range(T)] # 列表从1开始
# Spectral Domain discretization
#freqs 进行移位是由于进行傅里叶变换时,会有正负对称的频率,分析时一般只有正频率,所以看到的频谱图是没有负频率的
freqs = np.array( [i - 0.5 - 1 / T for i in t] )
# Maximum number of iterations (if not converged yet, then it won't anyway)
N = 500
# For future generalizations: individual alpha for each mode
Alpha = alpha * np.ones(K)
# Construct and center f_hat
transformed = np.fft.fft(f) # 使用fft函数对信号进行快速傅里叶变换。
f_hat = np.fft.fftshift(transformed) # 使用fftshift函数进行移频操作。
f_hat_plus = f_hat
f_hat_plus[0:T // 2] = 0
# f_hat_plus[0:T] = 1 #????????????????????????????////////////
# matrix keeping track of every iterant // could be discarded for mem
u_hat_plus = [np.zeros((N, len(freqs)), dtype=complex) for i in range(K)]
# Initialization of omega_k
omega_plus = np.zeros((N, K))
if init == 1:
for i in range(K):
omega_plus[0, i] = (0.5 / K) * i
elif init == 2:
omega_plus[0, :] = np.sort(np.exp(np.log(fs) + (np.log(0.5) - np.log(fs)) * np.random.rand(K)))
else:
omega_plus[0, :] = 0
# if DC mode imposed, set its omega to 0
if DC:
omega_plus[0, 0] = 0
# start with empty dual variables
lambda_hat = np.zeros( (N, len(freqs)), dtype=complex)
# other inits
eps = 2.2204e-16 # python里没有eps功能
uDiff = tol + eps # update step
n = 1 # loop counter
sum_uk = 0 # accumulator
#----------- Main loop for iterative updates----------
while (uDiff > tol and n < N ): #not converged and below iterations limit
#update first mode accumulator
k = 0
sum_uk = u_hat_plus[K-1][n-1,:]+ sum_uk - u_hat_plus[0][n-1,:] #sum_uk 一直都等于0(1,2000)????????????????
#update spectrum of first mode through Wiener filter of residuals
u_hat_plus[k][n,:] = (f_hat_plus - sum_uk - lambda_hat[n-1,:]/2)/(1+Alpha[k]*(freqs - omega_plus[n-1,k])**2)
#update first omega if not held at 0
if not DC:
omega_plus[n,k] = (freqs[T//2:T]*np.mat(np.abs(u_hat_plus[k][n, T//2:T])**2).H)/np.sum(np.abs(u_hat_plus[k][n,T//2:T])**2)
#update of any other mode
for k in range(K-1):
#accumulator
sum_uk = u_hat_plus[k][n,:] + sum_uk - u_hat_plus[k+1][n-1,:]
#mode spectrum
u_hat_plus[k+1][n,:] = (f_hat_plus - sum_uk - lambda_hat[n-1,:]/2)/(1+Alpha[k+1]*(freqs - omega_plus[n-1,k+1])**2)
#center frequencies
omega_plus[n,k+1] = (freqs[T//2:T]*np.mat(np.abs(u_hat_plus[k+1][n, T//2:T])**2).H)/np.sum(np.abs(u_hat_plus[k+1][n,T//2:T])**2)
#Dual ascent
lambda_hat[n,:] = lambda_hat[n-1,:] + tau*(np.sum([ u_hat_plus[i][n,:] for i in range(K)],0) - f_hat_plus)
#loop counter
n = n+1
#converged yet?
uDiff = eps
for i in range(K):
uDiff = uDiff + 1/T*(u_hat_plus[i][n-1,:]-u_hat_plus[i][n-2,:])*np.mat((u_hat_plus[i][n-1,:]-u_hat_plus[i][n-2,:]).conjugate()).H
uDiff = np.abs(uDiff)
# ------ Postprocessing and cleanup-------
#discard empty space if converged early
N = min(N,n)
omega = omega_plus[0:N,:]
#Signal reconstruction
u_hat = np.zeros((T, K), dtype=complex)
temp = [u_hat_plus[i][N-1,T//2:T] for i in range(K) ]
u_hat[T//2:T,:] = np.squeeze(temp).T
temp = np.squeeze(np.mat(temp).conjugate())
u_hat[1:(T//2+1),:] = temp.T[::-1]
u_hat[0,:] = (u_hat[-1,:]).conjugate()
u = np.zeros((K,len(t)))
for k in range(K):
u[k,:]=np.real(np.fft.ifft(np.fft.ifftshift(u_hat[:,k])))
#remove mirror part
u = u[:,T//4:3*T//4]
#recompute spectrum
u_hat = np.zeros((T//2, K), dtype=complex)
for k in range(K):
u_hat[:,k]= np.squeeze( np.mat( np.fft.fftshift(np.fft.fft(u[k,:])) ).H)
return u, u_hat, omega
| 2.453125 | 2 |
src/PyDS/Queue/Deque.py | AoWangPhilly/PyDS | 0 | 3802 | <reponame>AoWangPhilly/PyDS<gh_stars>0
class Deque:
def add_first(self, value):
...
def add_last(self, value):
...
def delete_first(self):
...
def delete_last(self):
...
def first(self):
...
def last(self):
...
def is_empty(self):
...
def __len__(self):
...
def __str__(self):
...
| 2.671875 | 3 |
forum/main.py | asmaasalih/my_project | 1 | 3803 | <gh_stars>1-10
import models
import stores
member1 =models.Member("ahmed",33)
member2 =models.Member("mohamed",30)
post1=models.Post("Post1", "Content1")
post2= models.Post("Post2", "Content2")
post3= models.Post("Post3", "Content3")
#member store
member_store=stores.MemberStore()
member_store.add(member1)
member_store.add(member2)
print (member_store.get_all())
post_store=stores.PostStore()
post_store.add(post1)
post_store.add(post2)
post_store.add(post3)
print (post_store.get_all())
| 2.265625 | 2 |
shellfind.py | bhavyanshu/Shell-Finder | 4 | 3804 | <gh_stars>1-10
#!/usr/bin/env python
'''
Author : <NAME>
Email : <EMAIL>
Description : shellfind.py is a Python command line utility which lets you look for shells on a site that the hacker must have uploaded. It considers all the shells available and tries all possibilities via dictionary match.
'''
import socket
import sys
import httplib
from urlparse import urlparse
import time as t
import urllib2
from urllib2 import Request, urlopen, URLError
negative = '\033[91m'
positive = '\033[32m'
wait = '\033[95m'
final = '\033[93m'
total_scanned_global=0
found_scanned_global=0
def OpenLog(log_file_name):
try:
f = open(log_file_name, 'r')
return f.read()
f.close()
except IOError:
return "File" + log_file_name + "does not exist."
def main():
socket.setdefaulttimeout(10)
print wait+"\n## ------ Welcome to Shell Finder Utility - Developed by <NAME> (http://bhavyanshu.github.io) | Apache License V2.0 | Project Source (https://github.com/bhavyanshu/Shell-Finder) ------ ##"
website_url = raw_input("\n\nEnter URL to scan ([eg, http://sitename.com or https://sitename.com/subdir ] | Do not add slash at the end of URL) : ")
parse_url=urlparse(website_url)
log_file_name = "LOG/"+parse_url.netloc+".log"
global total_scanned_global
global found_scanned_global
try:
try:
create=open(log_file_name,"w")
except:
print negative+"\nError generating log file. Please check directory access permissions."
print wait+"\nCreating a persistent connection to site "+website_url
conn = urllib2.Request(website_url)
urllib2.urlopen(website_url)
print positive+"Connected! Begining to scan for shells.."
except (urllib2.HTTPError) as Exit:
print negative+"\nEither the server is down or you are not connected to the internet."
exit()
try:
dictionary = open("dictionary","r")
except(IOError):
print negative+"Dictionary file not found_scanned_global. Please download the latest dictionary from github link"
exit()
keywords = dictionary.readlines()
for keys in keywords:
keys=keys.replace("\n","") #To replace newline with empty
New_URL = website_url+"/"+keys
print wait+">>>> "+New_URL
req=Request(New_URL)
try:
response = urlopen(req)
except URLError, e:
if hasattr(e,'reason'):
print negative+"Not found"
total_scanned_global = total_scanned_global+1
elif hasattr(e,'code'):
print negative+"Not found "
total_scanned_global = total_scanned_global+1
else:
try:
log_file=open(log_file_name,"a+") #Appending to it
except(IOError):
print negative+"Failed to create log file. Check dir permissions."
found_scanned_url=New_URL
print positive+"Possible shell found at ",found_scanned_url
log_file.writelines(found_scanned_url+"\n")
found_scanned_global=found_scanned_global+1
total_scanned_global=total_scanned_global+1
log_file.close()
print "\nTotal tries : ", total_scanned_global
print positive+"\nPossible shells: ",found_scanned_global
print final+"\nFollowing are the links to possible shells "
print OpenLog(log_file_name)
if __name__ == '__main__':
main()
| 2.8125 | 3 |
question3.py | nosisky/algo-solution | 1 | 3805 | <gh_stars>1-10
# A string S consisting of N characters is considered to be properly nested if any of the following conditions is true:
# S is empty;
# S has the form "(U)" or "[U]" or "{U}" where U is a properly nested string; S has the form "VW" where V and W are properly nested strings.
# For example, the string "{[()()]}" is properly nested but "([)()]" is not.
# Write a function:
# int solution(char *S);
# that, given a string S consisting of N characters, returns 1 if S is properly nested and 0 otherwise.
# For example, given S = "{[()()]}", the function should return 1 and given S = "([)()]", the function should return 0, as explained above.
# Assume that:
# N is an integer within the range [0..200,000];
# string S consists only of the following characters: "(", "{", "[", "]", "}" and/or ")". Complexity:
# expected worst-case time complexity is O(N);
# expected worst-case space complexity is O(N) (not counting the storage required for input arguments).
def solution(s):
sets = dict(zip('({[', ')}]'))
if(not isinstance(s, str)):
return "Invalid input"
collector = []
for bracket in s:
if(bracket in sets):
collector.append(sets[bracket])
elif bracket not in(sets.values()):
return "Invalid input"
elif (bracket != collector.pop()):
return False
return not collector
print(solution("()[]{}"))
| 3.9375 | 4 |
Teil_27_Game_of_Life_3d.py | chrMenzel/A-beautiful-code-in-Python | 50 | 3806 | import bpy
import random as rnd
from collections import Counter
import itertools as iter
feld_von, feld_bis = -4, 4
spielfeld_von, spielfeld_bis = feld_von-6, feld_bis+6
anz = int((feld_bis-feld_von)**3*.3)
spielfeld = {(rnd.randint(feld_von, feld_bis), rnd.randint(
feld_von, feld_bis), rnd.randint(feld_von, feld_bis)) for _ in range(anz)}
animate_frame = 8
def nachbarn(pos):
for x,y,z in iter.product(range(-1,2), repeat = 3):
if z == y == x == 0: continue
yield pos[0]+x, pos[1]+y, pos[2]+z
def nächsteGeneration(spielfeld):
nachb = Counter([p for pos in spielfeld for p in nachbarn(pos)])
return {pos for pos, anz in nachb.items() if anz == 6 or (anz in (5, 6, 7, 8) and pos in spielfeld)}
def scale_rotate(ob, scale, rot, fr):
ob.scale = (scale, scale, scale)
ob.rotation_euler.rotate_axis("Z", rot)
ob.keyframe_insert(data_path='rotation_euler', frame=fr)
ob.keyframe_insert(data_path='scale', frame=fr)
bpy.ops.mesh.primitive_cube_add(size=0.001, location=(0, 0, 0))
orig_cube = bpy.context.active_object
n = "cube"
m = orig_cube.data.copy()
cubes = {}
for x,y,z in iter.product(range(spielfeld_von,spielfeld_bis), repeat = 3):
o = bpy.data.objects.new(n, m)
o.location = (x, y, z)
cubes[x, y, z] = o
bpy.context.collection.objects.link(o)
o.select_set(False)
for i in range(200):
print(f'Durchlauf No. {i}, Anz. Zellen = {len(spielfeld)}')
spielfeld2 = nächsteGeneration(spielfeld)
dead = spielfeld - spielfeld2
new = spielfeld2 - spielfeld
spielfeld = spielfeld2
if not new and not dead:
break
for zelle in new | dead:
if zelle not in cubes:
continue
ob = cubes[zelle]
if zelle in new:
scale_rotate(ob, 0.001, -3.141/2, (i-1)*animate_frame)
scale_rotate(ob, 750, 3.141/2, i * animate_frame)
else:
scale_rotate(ob, 750, 3.141/2, (i-1) * animate_frame)
scale_rotate(ob, 0.001, -3.141/2, i * animate_frame)
if not spielfeld:
break
bpy.context.scene.frame_current = 1
| 2.25 | 2 |
power_perceiver/xr_batch_processor/reduce_num_pv_systems.py | openclimatefix/power_perceiver | 0 | 3807 | from dataclasses import dataclass
import numpy as np
import xarray as xr
from power_perceiver.load_prepared_batches.data_sources import PV
from power_perceiver.load_prepared_batches.data_sources.prepared_data_source import XarrayBatch
@dataclass
class ReduceNumPVSystems:
"""Reduce the number of PV systems per example to `requested_num_pv_systems`.
Randomly select PV systems for each example. If there are less PV systems available
than requested, then randomly sample with duplicates allowed.
This is implemented as an xr_batch_processor so it can run after
SelectPVSystemsNearCenterOfImage.
"""
requested_num_pv_systems: int
def __post_init__(self):
self.rng = np.random.default_rng() # Seeded by seed_rngs worker_init_function
def __call__(self, xr_batch: XarrayBatch) -> XarrayBatch:
pv_batch = xr_batch[PV]
num_examples = len(pv_batch.example)
selection = np.zeros(shape=(num_examples, self.requested_num_pv_systems), dtype=np.int32)
for example_i in range(num_examples):
pv_mask_for_example = pv_batch.pv_mask.isel(example=example_i).values
all_indicies = np.nonzero(pv_mask_for_example)[0]
# Only allow a PV system to be chosen multiple times for this example if there are
# less available PV systems than requested PV systems.
replace = len(all_indicies) < self.requested_num_pv_systems
chosen_indicies = self.rng.choice(
all_indicies, size=self.requested_num_pv_systems, replace=replace
)
selection[example_i] = chosen_indicies
selection = xr.DataArray(selection, dims=("example", "pv_system"))
pv_batch = pv_batch.isel(pv_system=selection)
xr_batch[PV] = pv_batch
return xr_batch
| 2.921875 | 3 |
HelloWorld_python/log/demo_log_3.py | wang153723482/HelloWorld_my | 0 | 3808 | #encoding=utf8
# 按天生成文件
import logging
import time
from logging.handlers import TimedRotatingFileHandler
#----------------------------------------------------------------------
if __name__ == "__main__":
logFilePath = "timed_test.log"
logger = logging.getLogger("YouLoggerName")
logger.setLevel(logging.INFO)
handler = TimedRotatingFileHandler(logFilePath,
when="d",
interval=1,
backupCount=7)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
handler.setFormatter(formatter)
handler.setLevel(logging.INFO)
logger.addHandler(handler)
for i in range(6):
logger.info("This is a info!")
logger.debug("This is a debug!")
# time.sleep(61) | 2.90625 | 3 |
bot_da_os/statemachine/person/person_action.py | Atsocs/bot-da-os | 0 | 3809 | from operator import eq
class PersonAction:
def __init__(self, action):
self.action = action
def __str__(self): return self.action
def __eq__(self, other):
return eq(self.action, other.action)
# Necessary when __cmp__ or __eq__ is defined
# in order to make this class usable as a
# dictionary key:
def __hash__(self):
return hash(self.action)
# Static fields; an enumeration of instances:
PersonAction.compliment = PersonAction("person compliments")
PersonAction.informing = PersonAction("person gives information about the service order")
PersonAction.query = PersonAction("person wants to know about his/her order")
PersonAction.angry = PersonAction("person is pissed off")
| 3.546875 | 4 |
MyServer.py | bisw1jit/MyServer | 3 | 3810 | # Tool Name :- MyServer
# Author :- LordReaper
# Date :- 13/11/2018 - 9/11/2019
# Powered By :- H1ckPro Software's
import sys
import os
from time import sleep
from core.system import *
if len(sys.argv)>1:
pass
else:
print ("error : invalid arguments !!")
print ("use : myserver --help for more information")
sys.exit()
if sys.argv[1]=="-s":
if len(sys.argv)==2:
if system=="ubuntu":
os.system("sudo python3 core/s.py "+sys.argv[1])
else:
os.system("python3 core/s.py "+sys.argv[1])
elif len(sys.argv)==3:
if sys.argv[2]=="apache":
if system=="ubuntu":
os.system("sudo python3 core/server.py -apa")
else:
os.system("python3 core/server.py -apa")
else:
print ("error : invalid arguments !!")
print ("use : myserver --help for more information")
elif len(sys.argv)==6:
if sys.argv[2]=="-php":
if system=="ubuntu":
os.system("sudo python3 core/server.py -php "+sys.argv[3]+" "+sys.argv[4]+" "+sys.argv[5])
else:
os.system("python3 core/server.py -php "+sys.argv[3]+" "+sys.argv[4]+" "+sys.argv[5])
elif sys.argv[2]=="-py":
if system=="ubuntu":
os.system("sudo python3 core/server.py -py "+sys.argv[3]+" "+sys.argv[4]+" "+sys.argv[5])
else:
os.system("python3 core/server.py -py "+sys.argv[3]+" "+sys.argv[4]+" "+sys.argv[5])
elif sys.argv[2]=="-ng":
if system=="ubuntu":
os.system("sudo python3 core/server.py -ng "+sys.argv[3]+" "+sys.argv[4]+" "+sys.argv[5])
else:
os.system("python3 core/server.py -ng "+sys.argv[3]+" "+sys.argv[4]+" "+sys.argv[5])
else:
print ("error : invalid arguments !!")
print ("use : myserver --help for more information")
elif len(sys.argv)==5:
if system=="ubuntu":
os.system("sudo python3 core/server.py -d "+sys.argv[2]+" "+sys.argv[3]+" "+sys.argv[4])
else:
os.system("python3 core/server.py -d "+sys.argv[2]+" "+sys.argv[3]+" "+sys.argv[4])
else:
print ("error : invalid arguments !!")
print ("use : myserver --help for more information")
elif sys.argv[1]=="-h":
if len(sys.argv)==2:
if system=="ubuntu":
os.system("sudo python3 core/s.py "+sys.argv[1])
else:
os.system("python3 core/s.py "+sys.argv[1])
elif len(sys.argv)==5:
if system=="ubuntu":
os.system("sudo python3 core/host.py "+sys.argv[2]+" "+sys.argv[3]+" "+sys.argv[4])
else:
os.system("python3 core/host.py "+sys.argv[2]+" "+sys.argv[3]+" "+sys.argv[4])
else:
print ("error : invalid arguments")
print ("use : myserver --help for more information")
elif sys.argv[1]=="-db":
if len(sys.argv)==3:
if sys.argv[2]=="start":
if system=="ubuntu":
os.system("sudo python3 core/mysql.py "+sys.argv[2])
else:
os.system("python3 core/mysql.py "+sys.argv[2])
elif sys.argv[2]=="stop":
if system=="ubuntu":
os.system("sudo python3 core/mysql.py "+sys.argv[2])
else:
os.system("python3 core/mysql.py "+sys.argv[2])
else:
print ("error : invalid arguments !!")
print ("use : myserver --help for more information")
else:
print ("error : invalid arguments !!")
print ("use : myserver --help for more information")
elif sys.argv[1]=="rm":
if len(sys.argv)==3:
if sys.argv[2]=="-T" or sys.argv[2]=="-t":
if system=="ubuntu":
os.system("sudo python3 core/un.py")
else:
os.system("python3 core/un.py")
else:
print ("error : invalid arguments")
print ("use : myserver --help for more information")
else:
print ("error : invalid arguments")
print ("use : myserver --help for more information")
elif sys.argv[1]=="update":
if system=="ubuntu":
os.system("sudo python3 core/upd.py")
else:
os.system("python3 core/upd.py")
elif sys.argv[1]=="start":
if system=="ubuntu":
os.system("sudo python3 .MyServer.py")
else:
os.system("python3 .MyServer.py")
elif sys.argv[1]=="--help" or sys.argv[1]=="-help" or sys.argv[1]=="help":
print ("")
print ("Usage: myserver [command]... [arguments]...")
print ("")
print (" Commands:")
print (" -s <hostname> <port> <path> to start default localhost server.")
print (" -s -ng <hostname> <port> <path> to start php localhost server.")
print (" -s -php <hostname> <port> <path> to start php localhost server.")
print (" -s -py <hostname> <port> <path> to start python localhost server.")
print (" -h <hostname> <localhost_port> <port> to access localhost server on internet.")
print (" -db [start/stop] to start/stop MySQL database server.")
print (" -s apache to start apache web server.")
print (" update update MyServer.")
print (" rm -t uninstall MyServer.")
print (" start start MyServer menu.")
print ("")
else:
print ("error : invalid arguments !!")
print ("use : myserver --help for more information")
| 2.4375 | 2 |
tests/test_gen_epub.py | ffreemt/tmx2epub | 0 | 3811 | """ test gen_epub. """
from tmx2epub.gen_epub import gen_epub
def test_gen_epub2():
""" test_gen_epub2. """
from pathlib import Path
infile = r"tests\2.tmx"
stem = Path(infile).absolute().stem
outfile = f"{Path(infile).absolute().parent / stem}.epub"
assert gen_epub(infile, debug=True) == outfile
# assert 0
| 2.15625 | 2 |
pub_sub/python/http/checkout/app.py | amulyavarote/quickstarts | 0 | 3812 | import json
import time
import random
import logging
import requests
import os
logging.basicConfig(level=logging.INFO)
base_url = os.getenv('BASE_URL', 'http://localhost') + ':' + os.getenv(
'DAPR_HTTP_PORT', '3500')
PUBSUB_NAME = 'order_pub_sub'
TOPIC = 'orders'
logging.info('Publishing to baseURL: %s, Pubsub Name: %s, Topic: %s' % (
base_url, PUBSUB_NAME, TOPIC))
for i in range(1, 10):
order = {'orderId': i}
# Publish an event/message using Dapr PubSub via HTTP Post
result = requests.post(
url='%s/v1.0/publish/%s/%s' % (base_url, PUBSUB_NAME, TOPIC),
json=order
)
logging.info('Published data: ' + json.dumps(order))
time.sleep(1)
| 2.59375 | 3 |
jj.py | smailedge/pro | 1 | 3813 | <gh_stars>1-10
# -*- coding: utf-8 -*-
from linepy import *
from datetime import datetime
from time import sleep
from humanfriendly import format_timespan, format_size, format_number, format_length
import time, random, sys, json, codecs, threading, glob, re, string, os, requests, subprocess, six, ast, pytz, urllib, urllib.parse
#==============================================================================#
botStart = time.time()
cl = LINE()
#cl = LINE("TOKEN KAMU")
#cl = LINE("Email","Password")
cl.log("Auth Token : " + str(cl.authToken))
channelToken = cl.getChannelResult()
cl.log("Channel Token : " + str(channelToken))
clMID = cl.profile.mid
clProfile = cl.getProfile()
lineSettings = cl.getSettings()
oepoll = OEPoll(cl)
#==============================================================================#
readOpen = codecs.open("read.json","r","utf-8")
settingsOpen = codecs.open("temp.json","r","utf-8")
read = json.load(readOpen)
settings = json.load(settingsOpen)
myProfile = {
"displayName": "",
"statusMessage": "",
"pictureStatus": ""
}
msg_dict = {}
bl = [""]
myProfile["displayName"] = clProfile.displayName
myProfile["statusMessage"] = clProfile.statusMessage
myProfile["pictureStatus"] = clProfile.pictureStatus
#==============================================================================#
def restartBot():
print ("[ INFO ] BOT RESETTED")
backupData()
python = sys.executable
os.execl(python, python, *sys.argv)
def backupData():
try:
backup = settings
f = codecs.open('temp.json','w','utf-8')
json.dump(backup, f, sort_keys=True, indent=4, ensure_ascii=False)
backup = read
f = codecs.open('read.json','w','utf-8')
json.dump(backup, f, sort_keys=True, indent=4, ensure_ascii=False)
return True
except Exception as error:
logError(error)
return False
def logError(text):
cl.log("[ ERROR ] " + str(text))
time_ = datetime.now()
with open("errorLog.txt","a") as error:
error.write("\n[%s] %s" % (str(time), text))
def sendMessageWithMention(to, mid):
try:
aa = '{"S":"0","E":"3","M":'+json.dumps(mid)+'}'
text_ = '@x '
cl.sendMessage(to, text_, contentMetadata={'MENTION':'{"MENTIONEES":['+aa+']}'}, contentType=0)
except Exception as error:
logError(error)
def helpmessage():
helpMessage = """╔═════════════
╠♥ ✿✿✿ 十香の特製Bot ✿✿✿ ♥
╠SR 設定已讀點
╠LR 查看誰已讀
╠Nk @ 標註踢人
╠Nk 全部再見
╠══✪〘 其他功能略 〙✪═══
"""
return helpMessage
wait = {
"share":False,
"sender" :{},
}
admin =['ud5ff1dff426cf9e3030c7ac2a61512f0','ua10c2ad470b4b6e972954e1140ad1891',clMID]
owners = ["ua10c2ad470b4b6e972954e1140ad1891","ud5ff1dff426cf9e3030c7ac2a61512f0"]
#if clMID not in owners:
# python = sys.executable
# os.execl(python, python, *sys.argv)
#==============================================================================#
def lineBot(op):
try:
if op.type == 0:
print ("[ 0 ] END OF OPERATION")
return
if op.type == 5:
print ("[ 5 ] NOTIFIED ADD CONTACT")
if settings["autoAdd"] == True:
cl.sendMessage(op.param1, "感謝您加入本帳為好友w".format(str(cl.getContact(op.param1).displayName)))
if op.type == 13:
print ("[ 13 ] NOTIFIED INVITE GROUP")
group = cl.getGroup(op.param1)
if settings["autoJoin"] == True:
cl.acceptGroupInvitation(op.param1)
if op.type == 19:
if op.param2 not in owners:
if op.param2 in owners:
pass
elif wait["protect"] == True:
settings["blacklist"][op.param2] = True
cl.kickoutFromGroup(op.param1,[op.param2])
else:
cl.sendMessage(op.param1,"")
else:
cl.sendMessage(op.param1,"")
if op.type == 24:
print ("[ 24 ] NOTIFIED LEAVE ROOM")
if settings["autoLeave"] == True:
cl.leaveRoom(op.param1)
if op.type == 25 or op.type == 26:
K0 = admin
msg = op.message
if wait["share"] == True:
K0 = msg._from
else:
K0 = admin
# if op.type == 25:
# to = msg.to
# receiver = str(to.displayName)
# print ("send" + receiver + str(text.lower()))
# if op.type == 26:
# to = msg._from
# sender = str(to.displayName)
# print ("receiver" + sender + str(text.lower()))
if op.type == 26 or op.type == 25:
print ("[ 25 ] SEND MESSAGE")
msg = op.message
text = msg.text
msg_id = msg.id
receiver = msg.to
sender = msg._from
if msg.toType == 0:
if sender != cl.profile.mid:
to = sender
else:
to = receiver
else:
to = receiver
if msg.contentType == 0:
if text is None:
return
#==============================================================================#
if sender in K0:
if text.lower() == 'help':
helpMessage = helpmessage()
cl.sendMessage(to, str(helpMessage))
cl.sendContact(to,"u0a59c278b1529476ddb210cb5e827ffc")
cl.sendContact(to,"ufb30e2203f44bc7b72e28b09a88c9bbd")
#==============================================================================#
elif text.lower() == 'speed':
start = time.time()
cl.sendMessage(to, "計算中...")
elapsed_time = time.time() - start
cl.sendMessage(to,format(str(elapsed_time)))
elif text.lower() == 'restart':
cl.sendMessage(to, "重新啟動中...")
time.sleep(5)
cl.sendMessage(to, "重啟成功,請重新登入")
restartBot()
elif text.lower() == 'runtime':
timeNow = time.time()
runtime = timeNow - botStart
runtime = format_timespan(runtime)
cl.sendMessage(to, "系統已運作 {}".format(str(runtime)))
elif text.lower() == 'about':
try:
arr = []
owner = "ua10c2ad470b4b6e972954e1140ad1891"
creator = cl.getContact(owner)
contact = cl.getContact(clMID)
grouplist = cl.getGroupIdsJoined()
contactlist = cl.getAllContactIds()
blockedlist = cl.getBlockedContactIds()
ret_ = "╔══[ 關於使用者 ]"
ret_ += "\n╠ 使用者名稱 : {}".format(contact.displayName)
ret_ += "\n╠ 群組數 : {}".format(str(len(grouplist)))
ret_ += "\n╠ 好友數 : {}".format(str(len(contactlist)))
ret_ += "\n╠ 已封鎖 : {}".format(str(len(blockedlist)))
ret_ += "\n╠══[ 關於本bot ]"
ret_ += "\n╠ 版本 : 最新"
ret_ += "\n╠ 製作者 : {}".format(creator.displayName)
ret_ += "\n╚══[ 感謝您的使用 ]"
cl.sendMessage(to, str(ret_))
except Exception as e:
cl.sendMessage(msg.to, str(e))
#==============================================================================#
elif text.lower() == 'set':
try:
ret_ = "╔══[ 狀態 ]"
if settings["autoAdd"] == True: ret_ += "\n╠ Auto Add ✅"
else: ret_ += "\n╠ Auto Add ❌"
if settings["autoJoin"] == True: ret_ += "\n╠ Auto Join ✅"
else: ret_ += "\n╠ Auto Join ❌"
if settings["autoLeave"] == True: ret_ += "\n╠ Auto Leave ✅"
else: ret_ += "\n╠ Auto Leave ❌"
if settings["autoRead"] == True: ret_ += "\n╠ Auto Read ✅"
else: ret_ += "\n╠ Auto Read ❌"
if settings["reread"] ==True: ret_+="\n╠ Reread ✅"
else: ret_ += "\n╠ Reread ❌"
ret_ += "\n╚══[ Finish ]"
cl.sendMessage(to, str(ret_))
except Exception as e:
cl.sendMessage(msg.to, str(e))
elif text.lower() == 'autoadd on':
settings["autoAdd"] = True
cl.sendMessage(to, "Auto Add on success")
elif text.lower() == 'autoadd off':
settings["autoAdd"] = False
cl.sendMessage(to, "Auto Add off success")
elif text.lower() == 'autojoin on':
settings["autoJoin"] = True
cl.sendMessage(to, "Auto Join on success")
elif text.lower() == 'autojoin off':
settings["autoJoin"] = False
cl.sendMessage(to, "Auto Join off success")
elif text.lower() == 'autoleave on':
settings["autoLeave"] = True
cl.sendMessage(to, "Auto Leave on success")
elif text.lower() == 'autojoin off':
settings["autoLeave"] = False
cl.sendMessage(to, "Auto Leave off success")
elif text.lower() == 'autoread on':
settings["autoRead"] = True
cl.sendMessage(to, "Auto Read on success")
elif text.lower() == 'autoread off':
settings["autoRead"] = False
cl.sendMessage(to, "Auto Read off success")
elif text.lower() == 'checksticker on':
settings["checkSticker"] = True
cl.sendMessage(to, "Berhasil mengaktifkan Check Details Sticker")
elif text.lower() == 'checksticker off':
settings["checkSticker"] = False
cl.sendMessage(to, "Berhasil menonaktifkan Check Details Sticker")
elif text.lower() == 'detectmention on':
settings["datectMention"] = True
cl.sendMessage(to, "Berhasil mengaktifkan Detect Mention")
elif text.lower() == 'detectmention off':
settings["datectMention"] = False
cl.sendMessage(to, "Berhasil menonaktifkan Detect Mention")
elif text.lower() == 'reread on':
settings["reread"] = True
cl.sendMessage(to,"reread on success")
elif text.lower() == 'reread off':
settings["reread"] = False
cl.sendMessage(to,"reread off success")
elif text.lower() == 'protect on':
settings["protect"] = True
cl.sendMessage(to, "Protect on success")
elif text.lower() == 'protect off':
settings["protect"] = False
cl.sendMessage(to, "Protect off success")
elif text.lower() == 'share on':
wait["share"] = True
cl.sendMessage(to, "已開啟分享")
elif text.lower() == 'share off':
wait["share"] = False
cl.sendMessage(to, "已關閉分享")
#==============================================================================#
elif text.lower() == 'admin ':
MENTION =eval(msg.contentMetadata['MENTION'])
inkey =MENTION['MENTIONEES'][0]['M']
admin.append(str(inkey))
cl.sendMessage(to,"已新增權限")
elif text.lower() == 'demin ':
MENTION =eval(msg.contentMetadata['MENTION'])
inkey =MENTION['MENTIONEES'][0]['M']
admin.remove(str(inkey))
cl.sendMessage(to,"已停止權限")
elif text.lower() == 'adminlist':
if admin == []:
cl.sendMessage(to,"無擁有權限者!")
else:
mc = "╔══[ Admin List ]"
for mi_d in admin:
mc += "\n╠ "+cl.getContact(mi_d).displayName
cl.sendMessage(to,mc + "\n╚══[ Finish ]")
#==============================================================================#
elif text.lower() == 'me':
sendMessageWithMention(to, clMID)
cl.sendContact(to, clMID)
elif text.lower() == 'mymid':
cl.sendMessage(msg.to,"[MID]\n" + clMID)
elif text.lower() == 'myname':
me = cl.getContact(clMID)
cl.sendMessage(msg.to,"[Name]\n" + me.displayName)
elif text.lower() == 'mytoken':
me = cl.getContact(clMID)
cl.sendMessage(msg.to,"[StatusMessage]\n" + me.statusMessage)
elif text.lower() == 'mypicture':
me = cl.getContact(clMID)
cl.sendImageWithURL(msg.to,"http://dl.profile.line-cdn.net/" + me.pictureStatus)
elif text.lower() == 'myvideoprofile':
me = cl.getContact(clMID)
cl.sendVideoWithURL(msg.to,"http://dl.profile.line-cdn.net/" + me.pictureStatus + "/vp")
elif text.lower() == 'mycover':
me = cl.getContact(clMID)
cover = cl.getProfileCoverURL(clMID)
cl.sendImageWithURL(msg.to, cover)
elif msg.text.lower().startswith("contact "):
if 'MENTION' in msg.contentMetadata.keys()!= None:
names = re.findall(r'@(\w+)', text)
mention = ast.literal_eval(msg.contentMetadata['MENTION'])
mentionees = mention['MENTIONEES']
lists = []
for mention in mentionees:
if mention["M"] not in lists:
lists.append(mention["M"])
for ls in lists:
contact = cl.getContact(ls)
mi_d = contact.mid
cl.sendContact(msg.to, mi_d)
elif msg.text.lower().startswith("mid "):
if 'MENTION' in msg.contentMetadata.keys()!= None:
names = re.findall(r'@(\w+)', text)
mention = ast.literal_eval(msg.contentMetadata['MENTION'])
mentionees = mention['MENTIONEES']
lists = []
for mention in mentionees:
if mention["M"] not in lists:
lists.append(mention["M"])
ret_ = "[ Mid User ]"
for ls in lists:
ret_ += "\n" + ls
cl.sendMessage(msg.to, str(ret_))
elif msg.text.lower().startswith("name "):
if 'MENTION' in msg.contentMetadata.keys()!= None:
names = re.findall(r'@(\w+)', text)
mention = ast.literal_eval(msg.contentMetadata['MENTION'])
mentionees = mention['MENTIONEES']
lists = []
for mention in mentionees:
if mention["M"] not in lists:
lists.append(mention["M"])
for ls in lists:
contact = cl.getContact(ls)
cl.sendMessage(msg.to, "[ 名字 ]\n" + contact.displayName)
for ls in lists:
contact = cl.getContact(ls)
cl.sendMessage(msg.to, "[ 個簽 ]\n" + contact.statusMessage)
for ls in lists:
path = "http://dl.profile.cl.naver.jp/" + cl.getContact(ls).pictureStatus
cl.sendImageWithURL(msg.to, str(path))
for ls in lists:
path = cl.getProfileCoverURL(ls)
pmath = "http://dl.profile.cl.naver.jp/" + cl.getContact(ls).pictureStatus
cl.sendImageWithURL(msg.to, path)
try:
key = eval(msg.contentMetadata["MENTION"])
u = key["MENTIONEES"][0]["M"]
cname = cl.getContact(u).displayName
cmid = cl.getContact(u).mid
cstatus = cl.getContact(u).statusMessage
cpic = cl.getContact(u).picturePath
cl.sendMessage(receiver, 'Nama : '+cname+'\nMID : '+cmid+'\nStatus Msg : '+cstatus+'\nPicture : http://dl.profile.line.naver.jp'+cpic)
cl.sendMessage(receiver, None, contentMetadata={'mid': cmid}, contentType=13)
if cl.getContact(u).videoProfile != None:
cl.sendVideoWithURL(receiver, 'http://dl.profile.line.naver.jp'+cpic+'/vp.small')
else:
cl.sendImageWithURL(receiver, 'http://dl.profile.line.naver.jp'+cpic)
except Exception as e:
cl.sendMessage(receiver, str(e))
if line != None:
if 'MENTION' in msg.contentMetadata.keys()!= None:
names = re.findall(r'@(\w+)', text)
mention = ast.literal_eval(msg.contentMetadata['MENTION'])
mentionees = mention['MENTIONEES']
lists = []
for mention in mentionees:
if mention["M"] not in lists:
lists.append(mention["M"])
for ls in lists:
path = cl.getProfileCoverURL(ls)
cl.sendImageWithURL(msg.to, str(path))
elif msg.text.lower().startswith("cloneprofile "):
if 'MENTION' in msg.contentMetadata.keys()!= None:
names = re.findall(r'@(\w+)', text)
mention = ast.literal_eval(msg.contentMetadata['MENTION'])
mentionees = mention['MENTIONEES']
for mention in mentionees:
contact = mention["M"]
break
try:
cl.cloneContactProfile(contact)
cl.sendMessage(msg.to, "Berhasil clone member tunggu beberapa saat sampai profile berubah")
except:
cl.sendMessage(msg.to, "Gagal clone member")
elif text.lower() == 'restoreprofile':
try:
clProfile.displayName = str(myProfile["displayName"])
clProfile.statusMessage = str(myProfile["statusMessage"])
clProfile.pictureStatus = str(myProfile["pictureStatus"])
cl.updateProfileAttribute(8, clProfile.pictureStatus)
cl.updateProfile(clProfile)
cl.sendMessage(msg.to, "Berhasil restore profile tunggu beberapa saat sampai profile berubah")
except:
cl.sendMessage(msg.to, "Gagal restore profile")
#==============================================================================#
elif msg.text.lower().startswith("mimicadd "):
targets = []
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
try:
settings["mimic"]["target"][target] = True
cl.sendMessage(msg.to,"已加入模仿名單!")
break
except:
cl.sendMessage(msg.to,"添加失敗 !")
break
elif msg.text.lower().startswith("mimicdel "):
targets = []
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
try:
del settings["模仿名單"]["target"][target]
cl.sendMessage(msg.to,"刪除成功 !")
break
except:
cl.sendMessage(msg.to,"刪除失敗 !")
break
elif text.lower() == 'mimiclist':
if settings["mimic"]["target"] == {}:
cl.sendMessage(msg.to,"未設定模仿目標")
else:
mc = "╔══[ Mimic List ]"
for mi_d in settings["mimic"]["target"]:
mc += "\n╠ "+cl.getContact(mi_d).displayName
cl.sendMessage(msg.to,mc + "\n╚══[ Finish ]")
elif "mimic" in msg.text.lower():
sep = text.split(" ")
mic = text.replace(sep[0] + " ","")
if mic == "on":
if settings["mimic"]["status"] == False:
settings["mimic"]["status"] = True
cl.sendMessage(msg.to,"Reply Message on")
elif mic == "off":
if settings["mimic"]["status"] == True:
settings["mimic"]["status"] = False
cl.sendMessage(msg.to,"Reply Message off")
#==============================================================================#
elif text.lower() == 'groupcreator':
group = cl.getGroup(to)
GS = group.creator.mid
cl.sendContact(to, GS)
elif text.lower() == 'groupid':
gid = cl.getGroup(to)
cl.sendMessage(to, "[ID Group : ]\n" + gid.id)
elif text.lower() == 'grouppicture':
group = cl.getGroup(to)
path = "http://dl.profile.line-cdn.net/" + group.pictureStatus
cl.sendImageWithURL(to, path)
elif text.lower() == 'groupname':
gid = cl.getGroup(to)
cl.sendMessage(to, "[群組名稱 : ]\n" + gid.name)
elif text.lower() == 'grouplink':
if msg.toType == 2:
group = cl.getGroup(to)
if group.preventedJoinByTicket == False:
ticket = cl.reissueGroupTicket(to)
cl.sendMessage(to, "[ Group Ticket ]\nhttps://cl.me/R/ti/g/{}".format(str(ticket)))
else:
cl.sendMessage(to, "Grouplink未開啟 {}openlink".format(str(settings["keyCommand"])))
elif text.lower() == 'link off':
if msg.toType == 2:
group = cl.getGroup(to)
if group.preventedJoinByTicket == False:
cl.sendMessage(to, "群組網址已關")
else:
group.preventedJoinByTicket = False
cl.updateGroup(group)
cl.sendMessage(to, "關閉成功")
elif text.lower() == 'link on':
if msg.toType == 2:
group = cl.getGroup(to)
if group.preventedJoinByTicket == True:
cl.sendMessage(to, "群組網址已開")
else:
group.preventedJoinByTicket = True
cl.updateGroup(group)
cl.sendMessage(to, "開啟成功")
elif text.lower() == 'groupinfo':
group = cl.getGroup(to)
try:
gCreator = group.creator.displayName
except:
gCreator = "不明"
if group.invitee is None:
gPending = "0"
else:
gPending = str(len(group.invitee))
if group.preventedJoinByTicket == True:
gQr = "關閉"
gTicket = "無"
else:
gQr = "開啟"
gTicket = "https://cl.me/R/ti/g/{}".format(str(cl.reissueGroupTicket(group.id)))
path = "http://dl.profile.line-cdn.net/" + group.pictureStatus
ret_ = "╔══[ Group Info ]"
ret_ += "\n╠ 群組名稱 : {}".format(str(group.name))
ret_ += "\n╠ 群組 Id : {}".format(group.id)
ret_ += "\n╠ 創建者 : {}".format(str(gCreator))
ret_ += "\n╠ 群組人數 : {}".format(str(len(group.members)))
ret_ += "\n╠ 邀請中 : {}".format(gPending)
ret_ += "\n╠ 網址狀態 : {}".format(gQr)
ret_ += "\n╠ 群組網址 : {}".format(gTicket)
ret_ += "\n╚══[ Finish ]"
cl.sendMessage(to, str(ret_))
cl.sendImageWithURL(to, path)
elif text.lower() == 'groupmemberlist':
if msg.toType == 2:
group = cl.getGroup(to)
ret_ = "╔══[ 成員名單 ]"
no = 0 + 1
for mem in group.members:
ret_ += "\n╠ {}. {}".format(str(no), str(mem.displayName))
no += 1
ret_ += "\n╚══[ 全部成員共 {} 人]".format(str(len(group.members)))
cl.sendMessage(to, str(ret_))
elif text.lower() == 'grouplist':
groups = cl.groups
ret_ = "╔══[ Group List ]"
no = 0 + 1
for gid in groups:
group = cl.getGroup(gid)
ret_ += "\n╠ {}. {} | {}".format(str(no), str(group.name), str(len(group.members)))
no += 1
ret_ += "\n╚══[ Total {} Groups ]".format(str(len(groups)))
cl.sendMessage(to, str(ret_))
elif msg.text.lower().startswith("nk "):
targets = []
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
try:
cl.sendMessage(to,"Fuck you")
cl.kickoutFromGroup(msg.to,[target])
except:
cl.sendMessage(to,"Error")
elif msg.text.lower().startswith("ri "):
targets = []
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
try:
cl.sendMessage(to,"來回機票一張ww")
cl.kickoutFromGroup(msg.to,[target])
cl.inviteIntoGroup(to,[target])
except:
cl.sendMessage(to,"Error")
elif text.lower() == 'nk':
if msg.toType == 2:
print ("[ 19 ] KICK ALL MEMBER")
_name = msg.text.replace("Byeall","")
gs = cl.getGroup(msg.to)
cl.sendMessage(msg.to,"Sorry guys")
targets = []
for g in gs.members:
if _name in g.displayName:
targets.append(g.mid)
if targets == []:
cl.sendMessage(msg.to,"Not Found")
else:
for target in targets:
try:
cl.kickoutFromGroup(msg.to,[target])
print (msg.to,[g.mid])
except:
cl.sendMessage(msg.to,"")
elif ("Gn " in msg.text):
if msg.toType == 2:
X = cl.getGroup(msg.to)
X.name = msg.text.replace("Gn ","")
cl.updateGroup(X)
else:
cl.sendMessage(msg.to,"It can't be used besides the group.")
elif text.lower() == 'cancel':
if msg.toType == 2:
group = cl.getGroup(to)
gMembMids = [contact.mid for contact in group.invitee]
for _mid in gMembMids:
cl.cancelGroupInvitation(msg.to,[_mid])
cl.sendMessage(msg.to,"已取消所有邀請!")
elif ("Inv " in msg.text):
if msg.toType == 2:
midd = msg.text.replace("Inv ","")
cl.findAndAddContactsByMid(midd)
cl.inviteIntoGroup(to,[midd])
#==============================================================================#
elif text.lower() == 'tagall':
group = cl.getGroup(msg.to)
nama = [contact.mid for contact in group.members]
k = len(nama)//100
for a in range(k+1):
txt = u''
s=0
b=[]
for i in group.members[a*100 : (a+1)*100]:
b.append({"S":str(s), "E" :str(s+6), "M":i.mid})
s += 7
txt += u'@Alin \n'
cl.sendMessage(to, text=txt, contentMetadata={u'MENTION': json.dumps({'MENTIONEES':b})}, contentType=0)
cl.sendMessage(to, "Total {} Mention".format(str(len(nama))))
elif text.lower() == 'sr':
tz = pytz.timezone("Asia/Jakarta")
timeNow = datetime.now(tz=tz)
day = ["Sunday", "Monday", "Tuesday", "Wednesday", "Thursday","Friday", "Saturday"]
hari = ["Minggu", "Senin", "Selasa", "Rabu", "Kamis", "Jumat", "Sabtu"]
bulan = ["Januari", "Februari", "Maret", "April", "Mei", "Juni", "Juli", "Agustus", "September", "Oktober", "November", "Desember"]
hr = timeNow.strftime("%A")
bln = timeNow.strftime("%m")
for i in range(len(day)):
if hr == day[i]: hasil = hari[i]
for k in range(0, len(bulan)):
if bln == str(k): bln = bulan[k-1]
readTime = hasil + ", " + timeNow.strftime('%d') + " - " + bln + " - " + timeNow.strftime('%Y') + "\nJam : [ " + timeNow.strftime('%H:%M:%S') + " ]"
if msg.to in read['readPoint']:
try:
del read['readPoint'][msg.to]
del read['readMember'][msg.to]
del read['readTime'][msg.to]
except:
pass
read['readPoint'][msg.to] = msg.id
read['readMember'][msg.to] = ""
read['readTime'][msg.to] = datetime.now().strftime('%H:%M:%S')
read['ROM'][msg.to] = {}
with open('read.json', 'w') as fp:
json.dump(read, fp, sort_keys=True, indent=4)
cl.sendMessage(msg.to,"偵測點已設置")
else:
try:
del read['readPoint'][msg.to]
del read['readMember'][msg.to]
del read['readTime'][msg.to]
except:
pass
read['readPoint'][msg.to] = msg.id
read['readMember'][msg.to] = ""
read['readTime'][msg.to] = datetime.now().strftime('%H:%M:%S')
read['ROM'][msg.to] = {}
with open('read.json', 'w') as fp:
json.dump(read, fp, sort_keys=True, indent=4)
cl.sendMessage(msg.to, "Set reading point:\n" + readTime)
elif text.lower() == 'readcancel':
tz = pytz.timezone("Asia/Jakarta")
timeNow = datetime.now(tz=tz)
day = ["Sunday", "Monday", "Tuesday", "Wednesday", "Thursday","Friday", "Saturday"]
hari = ["Minggu", "Senin", "Selasa", "Rabu", "Kamis", "Jumat", "Sabtu"]
bulan = ["Januari", "Februari", "Maret", "April", "Mei", "Juni", "Juli", "Agustus", "September", "Oktober", "November", "Desember"]
hr = timeNow.strftime("%A")
bln = timeNow.strftime("%m")
for i in range(len(day)):
if hr == day[i]: hasil = hari[i]
for k in range(0, len(bulan)):
if bln == str(k): bln = bulan[k-1]
readTime = hasil + ", " + timeNow.strftime('%d') + " - " + bln + " - " + timeNow.strftime('%Y') + "\nJam : [ " + timeNow.strftime('%H:%M:%S') + " ]"
if msg.to not in read['readPoint']:
cl.sendMessage(msg.to,"偵測點已取消")
else:
try:
del read['readPoint'][msg.to]
del read['readMember'][msg.to]
del read['readTime'][msg.to]
except:
pass
cl.sendMessage(msg.to, "Delete reading point:\n" + readTime)
elif text.lower() == 'resetread':
tz = pytz.timezone("Asia/Jakarta")
timeNow = datetime.now(tz=tz)
day = ["Sunday", "Monday", "Tuesday", "Wednesday", "Thursday","Friday", "Saturday"]
hari = ["Minggu", "Senin", "Selasa", "Rabu", "Kamis", "Jumat", "Sabtu"]
bulan = ["Januari", "Februari", "Maret", "April", "Mei", "Juni", "Juli", "Agustus", "September", "Oktober", "November", "Desember"]
hr = timeNow.strftime("%A")
bln = timeNow.strftime("%m")
for i in range(len(day)):
if hr == day[i]: hasil = hari[i]
for k in range(0, len(bulan)):
if bln == str(k): bln = bulan[k-1]
readTime = hasil + ", " + timeNow.strftime('%d') + " - " + bln + " - " + timeNow.strftime('%Y') + "\nJam : [ " + timeNow.strftime('%H:%M:%S') + " ]"
if msg.to in read["readPoint"]:
try:
del read["readPoint"][msg.to]
del read["readMember"][msg.to]
del read["readTime"][msg.to]
except:
pass
cl.sendMessage(msg.to, "Reset reading point:\n" + readTime)
else:
cl.sendMessage(msg.to, "偵測點未設置?")
elif text.lower() == 'lr':
tz = pytz.timezone("Asia/Jakarta")
timeNow = datetime.now(tz=tz)
day = ["Sunday", "Monday", "Tuesday", "Wednesday", "Thursday","Friday", "Saturday"]
hari = ["Minggu", "Senin", "Selasa", "Rabu", "Kamis", "Jumat", "Sabtu"]
bulan = ["Januari", "Februari", "Maret", "April", "Mei", "Juni", "Juli", "Agustus", "September", "Oktober", "November", "Desember"]
hr = timeNow.strftime("%A")
bln = timeNow.strftime("%m")
for i in range(len(day)):
if hr == day[i]: hasil = hari[i]
for k in range(0, len(bulan)):
if bln == str(k): bln = bulan[k-1]
readTime = hasil + ", " + timeNow.strftime('%d') + " - " + bln + " - " + timeNow.strftime('%Y') + "\nJam : [ " + timeNow.strftime('%H:%M:%S') + " ]"
if receiver in read['readPoint']:
if read["ROM"][receiver].items() == []:
cl.sendMessage(receiver,"[ 已讀的人 ]:\nNone")
else:
chiya = []
for rom in read["ROM"][receiver].items():
chiya.append(rom[1])
cmem = cl.getContacts(chiya)
zx = ""
zxc = ""
zx2 = []
xpesan = '[ 已讀的人 ]:\n'
for x in range(len(cmem)):
xname = str(cmem[x].displayName)
pesan = ''
pesan2 = pesan+"@c\n"
xlen = str(len(zxc)+len(xpesan))
xlen2 = str(len(zxc)+len(pesan2)+len(xpesan)-1)
zx = {'S':xlen, 'E':xlen2, 'M':cmem[x].mid}
zx2.append(zx)
zxc += pesan2
text = xpesan+ zxc + "\n[ 已讀時間 ]: \n" + readTime
try:
cl.sendMessage(receiver, text, contentMetadata={'MENTION':str('{"MENTIONEES":'+json.dumps(zx2).replace(' ','')+'}')}, contentType=0)
except Exception as error:
print (error)
pass
else:
cl.sendMessage(receiver,"尚未設置偵測點")
#==============================================================================#
elif msg.text.lower().startswith("ban "):
targets = []
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
try:
settings["blacklist"][target] = True
cl.sendMessage(msg.to,"已加入黑單!")
break
except:
cl.sendMessage(msg.to,"添加失敗 !")
break
elif msg.text.lower().startswith("unban "):
targets = []
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
try:
del settings["blacklist"][target]
cl.sendMessage(msg.to,"刪除成功 !")
break
except:
cl.sendMessage(msg.to,"刪除失敗 !")
break
elif text.lower() == 'banlist':
if settings["blacklist"] == {}:
cl.sendMessage(msg.to,"無黑單成員!")
else:
mc = "╔══[ Black List ]"
for mi_d in settings["blacklist"]:
mc += "\n╠ "+cl.getContact(mi_d).displayName
cl.sendMessage(msg.to,mc + "\n╚══[ Finish ]")
elif text.lower() == 'nkban':
if msg.toType == 2:
group = cl.getGroup(to)
gMembMids = [contact.mid for contact in group.members]
matched_list = []
for tag in settings["blacklist"]:
matched_list+=filter(lambda str: str == tag, gMembMids)
if matched_list == []:
cl.sendMessage(msg.to,"There was no blacklist user")
return
for jj in matched_list:
cl.kickoutFromGroup(msg.to,[jj])
cl.sendMessage(msg.to,"Blacklist kicked out")
elif text.lower() == 'cleanban':
settings["blacklist"] == {ok}
for mi_d in settings["blacklist"]:
try:
del settings["blacklist"][mi_d]
cl.sendMessage(msg.to,"已清空黑單!")
break
except:
cl.sendMessage(msg.to,"刪除失敗 !")
break
elif text.lower() == 'banmidlist':
if settings["blacklist"] == {}:
cl.sendMessage(msg.to,"無黑單成員!")
else:
mc = "╔══[ Black List ]"
for mi_d in settings["blacklist"]:
mc += "\n╠ "+mi_d
cl.sendMessage(to,mc + "\n╚══[ Finish ]")
#==============================================================================#
elif "Copy " in msg.text:
targets = []
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
try:
contact = cl.getContact(target)
X = contact.displayName
profile = cl.getProfile()
profile.displayName = X
cl.updateProfile(profile)
cl.sendMessage(to, "Success...")
Y = contact.statusMessage
lol = cl.getProfile()
lol.statusMessage = Y
cl.updateProfile(lol)
P = contact.pictureStatus
pic = cl.getProfile()
pic.pictureStatus = P
cl.updateProfilePicture(P)
cl.cloneContactProfile(target)
except Exception as e:
cl.sendMessage(to, "Failed!")
elif text.lower() == 'cc9487':
if sender in ['ua10c2ad470b4b6e972954e1140ad1891']:
python = sys.executable
os.execl(python, python, *sys.argv)
else:
pass
#==============================================================================#
elif text.lower() == 'calender':
tz = pytz.timezone("Asia/Makassar")
timeNow = datetime.now(tz=tz)
day = ["Sunday", "Monday", "Tuesday", "Wednesday", "Thursday","Friday", "Saturday"]
hari = ["Minggu", "Senin", "Selasa", "Rabu", "Kamis", "Jumat", "Sabtu"]
bulan = ["Januari", "Februari", "Maret", "April", "Mei", "Juni", "Juli", "Agustus", "September", "Oktober", "November", "Desember"]
hr = timeNow.strftime("%A")
bln = timeNow.strftime("%m")
for i in range(len(day)):
if hr == day[i]: hasil = hari[i]
for k in range(0, len(bulan)):
if bln == str(k): bln = bulan[k-1]
readTime = hasil + ", " + timeNow.strftime('%d') + " - " + bln + " - " + timeNow.strftime('%Y') + "\nJam : [ " + timeNow.strftime('%H:%M:%S') + " ]"
cl.sendMessage(msg.to, readTime)
elif "screenshotwebsite" in msg.text.lower():
sep = text.split(" ")
query = text.replace(sep[0] + " ","")
with requests.session() as web:
r = web.get("http://rahandiapi.herokuapp.com/sswebAPI?key=betakey&link={}".format(urllib.parse.quote(query)))
data = r.text
data = json.loads(data)
cl.sendImageWithURL(to, data["result"])
elif "checkdate" in msg.text.lower():
sep = msg.text.split(" ")
tanggal = msg.text.replace(sep[0] + " ","")
r=requests.get('https://script.google.com/macros/exec?service=AKfycbw7gKzP-WYV2F5mc9RaR7yE3Ve1yN91Tjs91hp_jHSE02dSv9w&nama=ervan&tanggal='+tanggal)
data=r.text
data=json.loads(data)
ret_ = "╔══[ D A T E ]"
ret_ += "\n╠ Date Of Birth : {}".format(str(data["data"]["lahir"]))
ret_ += "\n╠ Age : {}".format(str(data["data"]["usia"]))
ret_ += "\n╠ Birthday : {}".format(str(data["data"]["ultah"]))
ret_ += "\n╠ Zodiak : {}".format(str(data["data"]["zodiak"]))
ret_ += "\n╚══[ Success ]"
cl.sendMessage(to, str(ret_))
elif msg.contentType == 7:
if settings["checkSticker"] == True:
stk_id = msg.contentMetadata['STKID']
stk_ver = msg.contentMetadata['STKVER']
pkg_id = msg.contentMetadata['STKPKGID']
ret_ = "╔══[ Sticker Info ]"
ret_ += "\n╠ STICKER ID : {}".format(stk_id)
ret_ += "\n╠ STICKER PACKAGES ID : {}".format(pkg_id)
ret_ += "\n╠ STICKER VERSION : {}".format(stk_ver)
ret_ += "\n╠ STICKER URL : line://shop/detail/{}".format(pkg_id)
ret_ += "\n╚══[ Finish ]"
cl.sendMessage(to, str(ret_))
elif msg.contentType == 13:
if settings["copy"] == True:
_name = msg.contentMetadata["displayName"]
copy = msg.contentMetadata["mid"]
groups = cl.getGroup(msg.to)
targets = []
for s in groups.members:
if _name in s.displayName:
print ("[Target] Copy")
break
else:
targets.append(copy)
if targets == []:
cl.sendMessage(msg.to, "Not Found...")
pass
else:
for target in targets:
try:
cl.cloneContactProfile(target)
cl.sendMessage(msg.to, "Berhasil clone member tunggu beberapa saat sampai profile berubah")
settings['copy'] = False
break
except:
msg.contentMetadata = {'mid': target}
settings["copy"] = False
break
#==============================================================================#
if op.type == 26:
print ("[ 26 ] RECEIVE MESSAGE")
msg = op.message
text = msg.text
msg_id = msg.id
receiver = msg.to
sender = msg._from
if msg.toType == 0:
if sender != cl.profile.mid:
to = sender
else:
to = receiver
else:
to = receiver
if settings["autoRead"] == True:
cl.sendChatChecked(to, msg_id)
if to in read["readPoint"]:
if sender not in read["ROM"][to]:
read["ROM"][to][sender] = True
if sender in settings["mimic"]["target"] and settings["mimic"]["status"] == True and settings["mimic"]["target"][sender] == True:
text = msg.text
if text is not None:
cl.sendMessage(msg.to,text)
if msg.contentType == 0 and sender not in clMID and msg.toType == 2:
if 'MENTION' in msg.contentMetadata.keys()!= None:
names = re.findall(r'@(\w+)', text)
mention = ast.literal_eval(msg.contentMetadata['MENTION'])
mentionees = mention['MENTIONEES']
lists = []
for mention in mentionees:
if clMID in mention["M"]:
if settings["detectMention"] == True:
contact = cl.getContact(sender)
cl.sendMessage(to, "sundala nu")
sendMessageWithMention(to, contact.mid)
break
#==============================================================================#
if op.type == 65:
print ("[ 65 ] REREAD")
try:
at = op.param1
msg_id = op.param2
if setting["reread"] == True:
if msg_id in msg_dict:
if msg_dict[msg_id]["from"] not in bl:
cl.sendMessage(at,"[收回訊息者]\n%s\n[訊息內容]\n%s"%(cl.getContact(msg_dict[msg_id]["from"]).displayName,msg_dict[msg_id]["text"]))
del msg_dict[msg_id]
else:
pass
except Exception as e:
print (e)
#==============================================================================#
if op.type == 55:
print ("[ 55 ] NOTIFIED READ MESSAGE")
try:
if op.param1 in read['readPoint']:
if op.param2 in read['readMember'][op.param1]:
pass
else:
read['readMember'][op.param1] += op.param2
read['ROM'][op.param1][op.param2] = op.param2
backupData()
else:
pass
except:
pass
except Exception as error:
logError(error)
#==============================================================================#
while True:
try:
ops = oepoll.singleTrace(count=50)
if ops is not None:
for op in ops:
lineBot(op)
oepoll.setRevision(op.revision)
except Exception as e:
logError(e)
| 2.171875 | 2 |
dd_app/messaging/backend.py | datadealer/dd_app | 2 | 3814 | <gh_stars>1-10
class RedisBackend(object):
def __init__(self, settings={}, *args, **kwargs):
self.settings = settings
@property
def connection(self):
# cached redis connection
if not hasattr(self, '_connection'):
self._connection = self.settings.get('redis.connector').get()
return self._connection
@property
def channel(self):
# Fanout channel
if not hasattr(self, '_channel'):
self._channel = self.connection.pubsub()
return self._channel
def subscribe(self, channels=[]):
# Fanout subscriber
for chan_id in channels:
self.channel.subscribe(chan_id)
def listen(self):
# Fanout generator
for m in self.channel.listen():
if m['type'] == 'message':
yield m
def send(self, channel_id, payload):
# Fanout emitter
return self.connection.publish(channel_id, payload)
def listen_queue(self, queue_keys):
# Message queue generator
while 1:
yield self.connection.blpop(queue_keys)
def send_queue(self, queue_key, payload):
return self.connection.rpush(payload)
| 2.46875 | 2 |
fetch_data.py | bitfag/bt-macd-binance | 0 | 3815 | <gh_stars>0
#!/usr/bin/env python
from btmacd.binance_fetcher import BinanceFetcher
def main():
fetcher = BinanceFetcher("BTCUSDT", filename="binance_ohlc.csv", start_date="01.01.2018")
fetcher.fetch()
if __name__ == "__main__":
main()
| 1.828125 | 2 |
tensorflow_probability/python/mcmc/diagnostic.py | Frightera/probability | 1 | 3816 | # Copyright 2018 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Utilities for Markov Chain Monte Carlo (MCMC) sampling.
@@effective_sample_size
@@potential_scale_reduction
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow.compat.v2 as tf
from tensorflow_probability.python import stats
from tensorflow_probability.python.internal import assert_util
from tensorflow_probability.python.internal import dtype_util
from tensorflow_probability.python.internal import nest_util
from tensorflow_probability.python.internal import prefer_static as ps
from tensorflow_probability.python.internal import tensorshape_util
from tensorflow.python.util import nest # pylint: disable=g-direct-tensorflow-import
__all__ = [
'effective_sample_size',
'potential_scale_reduction',
]
def effective_sample_size(states,
filter_threshold=0.,
filter_beyond_lag=None,
filter_beyond_positive_pairs=False,
cross_chain_dims=None,
validate_args=False,
name=None):
"""Estimate a lower bound on effective sample size for each independent chain.
Roughly speaking, "effective sample size" (ESS) is the size of an iid sample
with the same variance as `state`.
More precisely, given a stationary sequence of possibly correlated random
variables `X_1, X_2, ..., X_N`, identically distributed, ESS is the
number such that
```
Variance{ N**-1 * Sum{X_i} } = ESS**-1 * Variance{ X_1 }.
```
If the sequence is uncorrelated, `ESS = N`. If the sequence is positively
auto-correlated, `ESS` will be less than `N`. If there are negative
correlations, then `ESS` can exceed `N`.
Some math shows that, with `R_k` the auto-correlation sequence,
`R_k := Covariance{X_1, X_{1+k}} / Variance{X_1}`, we have
```
ESS(N) = N / [ 1 + 2 * ( (N - 1) / N * R_1 + ... + 1 / N * R_{N-1} ) ]
```
This function estimates the above by first estimating the auto-correlation.
Since `R_k` must be estimated using only `N - k` samples, it becomes
progressively noisier for larger `k`. For this reason, the summation over
`R_k` should be truncated at some number `filter_beyond_lag < N`. This
function provides two methods to perform this truncation.
* `filter_threshold` -- since many MCMC methods generate chains where `R_k >
0`, a reasonable criterion is to truncate at the first index where the
estimated auto-correlation becomes negative. This method does not estimate
the `ESS` of super-efficient chains (where `ESS > N`) correctly.
* `filter_beyond_positive_pairs` -- reversible MCMC chains produce
an auto-correlation sequence with the property that pairwise sums of the
elements of that sequence are positive [Geyer][1], i.e.
`R_{2k} + R_{2k + 1} > 0` for `k in {0, ..., N/2}`. Deviations are only
possible due to noise. This method truncates the auto-correlation sequence
where the pairwise sums become non-positive.
The arguments `filter_beyond_lag`, `filter_threshold` and
`filter_beyond_positive_pairs` are filters intended to remove noisy tail terms
from `R_k`. You can combine `filter_beyond_lag` with `filter_threshold` or
`filter_beyond_positive_pairs. E.g., combining `filter_beyond_lag` and
`filter_beyond_positive_pairs` means that terms are removed if they were to be
filtered under the `filter_beyond_lag` OR `filter_beyond_positive_pairs`
criteria.
This function can also compute cross-chain ESS following
[Vehtari et al. (2019)][2] by specifying the `cross_chain_dims` argument.
Cross-chain ESS takes into account the cross-chain variance to reduce the ESS
in cases where the chains are not mixing well. In general, this will be a
smaller number than computing the ESS for individual chains and then summing
them. In an extreme case where the chains have fallen into K non-mixing modes,
this function will return ESS ~ K. Even when chains are mixing well it is
still preferrable to compute cross-chain ESS via this method because it will
reduce the noise in the estimate of `R_k`, reducing the need for truncation.
Args:
states: `Tensor` or Python structure of `Tensor` objects. Dimension zero
should index identically distributed states.
filter_threshold: `Tensor` or Python structure of `Tensor` objects. Must
broadcast with `state`. The sequence of auto-correlations is truncated
after the first appearance of a term less than `filter_threshold`.
Setting to `None` means we use no threshold filter. Since `|R_k| <= 1`,
setting to any number less than `-1` has the same effect. Ignored if
`filter_beyond_positive_pairs` is `True`.
filter_beyond_lag: `Tensor` or Python structure of `Tensor` objects. Must
be `int`-like and scalar valued. The sequence of auto-correlations is
truncated to this length. Setting to `None` means we do not filter based
on the size of lags.
filter_beyond_positive_pairs: Python boolean. If `True`, only consider the
initial auto-correlation sequence where the pairwise sums are positive.
cross_chain_dims: An integer `Tensor` or a structure of integer `Tensors`
corresponding to each state component. If a list of `states` is provided,
then this argument should also be a list of the same length. Which
dimensions of `states` to treat as independent chains that ESS will be
summed over. If `None`, no summation is performed. Note this requires at
least 2 chains.
validate_args: Whether to add runtime checks of argument validity. If False,
and arguments are incorrect, correct behavior is not guaranteed.
name: `String` name to prepend to created ops.
Returns:
ess: `Tensor` structure parallel to `states`. The effective sample size of
each component of `states`. If `cross_chain_dims` is None, the shape will
be `states.shape[1:]`. Otherwise, the shape is `tf.reduce_mean(states,
cross_chain_dims).shape[1:]`.
Raises:
ValueError: If `states` and `filter_threshold` or `states` and
`filter_beyond_lag` are both structures of different shapes.
ValueError: If `cross_chain_dims` is not `None` and there are less than 2
chains.
#### Examples
We use ESS to estimate standard error.
```
import tensorflow as tf
import tensorflow_probability as tfp
tfd = tfp.distributions
target = tfd.MultivariateNormalDiag(scale_diag=[1., 2.])
# Get 1000 states from one chain.
states = tfp.mcmc.sample_chain(
num_burnin_steps=200,
num_results=1000,
current_state=tf.constant([0., 0.]),
trace_fn=None,
kernel=tfp.mcmc.HamiltonianMonteCarlo(
target_log_prob_fn=target.log_prob,
step_size=0.05,
num_leapfrog_steps=20))
states.shape
==> (1000, 2)
ess = effective_sample_size(states, filter_beyond_positive_pairs=True)
==> Shape (2,) Tensor
mean, variance = tf.nn.moments(states, axis=0)
standard_error = tf.sqrt(variance / ess)
```
#### References
[1]: <NAME>, Practical Markov chain Monte Carlo (with discussion).
Statistical Science, 7:473-511, 1992.
[2]: <NAME>, <NAME>, <NAME>, <NAME>, <NAME>. Rank-normalization, folding, and localization: An improved R-hat
for assessing convergence of MCMC, 2019. Retrieved from
http://arxiv.org/abs/1903.08008
"""
if cross_chain_dims is None:
cross_chain_dims = nest_util.broadcast_structure(states, None)
filter_beyond_lag = nest_util.broadcast_structure(states, filter_beyond_lag)
filter_threshold = nest_util.broadcast_structure(states, filter_threshold)
filter_beyond_positive_pairs = nest_util.broadcast_structure(
states, filter_beyond_positive_pairs)
# Process items, one at a time.
def single_state(*args):
return _effective_sample_size_single_state(
*args, validate_args=validate_args)
with tf.name_scope('effective_sample_size' if name is None else name):
return nest.map_structure_up_to(
states,
single_state,
states, filter_beyond_lag, filter_threshold,
filter_beyond_positive_pairs, cross_chain_dims)
def _effective_sample_size_single_state(states, filter_beyond_lag,
filter_threshold,
filter_beyond_positive_pairs,
cross_chain_dims,
validate_args):
"""ESS computation for one single Tensor argument."""
with tf.name_scope('effective_sample_size_single_state'):
states = tf.convert_to_tensor(states, name='states')
dt = states.dtype
# filter_beyond_lag == None ==> auto_corr is the full sequence.
auto_cov = stats.auto_correlation(
states, axis=0, max_lags=filter_beyond_lag, normalize=False)
n = _axis_size(states, axis=0)
if cross_chain_dims is not None:
num_chains = _axis_size(states, cross_chain_dims)
num_chains_ = tf.get_static_value(num_chains)
assertions = []
msg = ('When `cross_chain_dims` is not `None`, there must be > 1 chain '
'in `states`.')
if num_chains_ is not None:
if num_chains_ < 2:
raise ValueError(msg)
elif validate_args:
assertions.append(
assert_util.assert_greater(num_chains, 1., message=msg))
with tf.control_dependencies(assertions):
# We're computing the R[k] from equation 10 of Vehtari et al.
# (2019):
#
# R[k] := 1 - (W - 1/C * Sum_{c=1}^C s_c**2 R[k, c]) / (var^+),
#
# where:
# C := number of chains
# N := length of chains
# x_hat[c] := 1 / N Sum_{n=1}^N x[n, c], chain mean.
# x_hat := 1 / C Sum_{c=1}^C x_hat[c], overall mean.
# W := 1/C Sum_{c=1}^C s_c**2, within-chain variance.
# B := N / (C - 1) Sum_{c=1}^C (x_hat[c] - x_hat)**2, between chain
# variance.
# s_c**2 := 1 / (N - 1) Sum_{n=1}^N (x[n, c] - x_hat[c])**2, chain
# variance
# R[k, m] := auto_corr[k, m, ...], auto-correlation indexed by chain.
# var^+ := (N - 1) / N * W + B / N
cross_chain_dims = ps.non_negative_axis(
cross_chain_dims, ps.rank(states))
# B / N
between_chain_variance_div_n = _reduce_variance(
tf.reduce_mean(states, axis=0),
biased=False, # This makes the denominator be C - 1.
axis=cross_chain_dims - 1)
# W * (N - 1) / N
biased_within_chain_variance = tf.reduce_mean(auto_cov[0],
cross_chain_dims - 1)
# var^+
approx_variance = (
biased_within_chain_variance + between_chain_variance_div_n)
# 1/C * Sum_{c=1}^C s_c**2 R[k, c]
mean_auto_cov = tf.reduce_mean(auto_cov, cross_chain_dims)
auto_corr = 1. - (biased_within_chain_variance -
mean_auto_cov) / approx_variance
else:
auto_corr = auto_cov / auto_cov[:1]
num_chains = 1
# With R[k] := auto_corr[k, ...],
# ESS = N / {1 + 2 * Sum_{k=1}^N R[k] * (N - k) / N}
# = N / {-1 + 2 * Sum_{k=0}^N R[k] * (N - k) / N} (since R[0] = 1)
# approx N / {-1 + 2 * Sum_{k=0}^M R[k] * (N - k) / N}
# where M is the filter_beyond_lag truncation point chosen above.
# Get the factor (N - k) / N, and give it shape [M, 1,...,1], having total
# ndims the same as auto_corr
k = tf.range(0., _axis_size(auto_corr, axis=0))
nk_factor = (n - k) / n
if tensorshape_util.rank(auto_corr.shape) is not None:
new_shape = [-1] + [1] * (tensorshape_util.rank(auto_corr.shape) - 1)
else:
new_shape = tf.concat(
([-1],
tf.ones([tf.rank(auto_corr) - 1], dtype=tf.int32)),
axis=0)
nk_factor = tf.reshape(nk_factor, new_shape)
weighted_auto_corr = nk_factor * auto_corr
if filter_beyond_positive_pairs:
def _sum_pairs(x):
x_len = ps.shape(x)[0]
# For odd sequences, we drop the final value.
x = x[:x_len - x_len % 2]
new_shape = ps.concat([[x_len // 2, 2], ps.shape(x)[1:]], axis=0)
return tf.reduce_sum(tf.reshape(x, new_shape), 1)
# Pairwise sums are all positive for auto-correlation spectra derived from
# reversible MCMC chains.
# E.g. imagine the pairwise sums are [0.2, 0.1, -0.1, -0.2]
# Step 1: mask = [False, False, True, True]
mask = _sum_pairs(auto_corr) < 0.
# Step 2: mask = [0, 0, 1, 1]
mask = tf.cast(mask, dt)
# Step 3: mask = [0, 0, 1, 2]
mask = tf.cumsum(mask, axis=0)
# Step 4: mask = [1, 1, 0, 0]
mask = tf.maximum(1. - mask, 0.)
# N.B. this reduces the length of weighted_auto_corr by a factor of 2.
# It still works fine in the formula below.
weighted_auto_corr = _sum_pairs(weighted_auto_corr) * mask
elif filter_threshold is not None:
filter_threshold = tf.convert_to_tensor(
filter_threshold, dtype=dt, name='filter_threshold')
# Get a binary mask to zero out values of auto_corr below the threshold.
# mask[i, ...] = 1 if auto_corr[j, ...] > threshold for all j <= i,
# mask[i, ...] = 0, otherwise.
# So, along dimension zero, the mask will look like [1, 1, ..., 0, 0,...]
# Building step by step,
# Assume auto_corr = [1, 0.5, 0.0, 0.3], and filter_threshold = 0.2.
# Step 1: mask = [False, False, True, False]
mask = auto_corr < filter_threshold
# Step 2: mask = [0, 0, 1, 0]
mask = tf.cast(mask, dtype=dt)
# Step 3: mask = [0, 0, 1, 1]
mask = tf.cumsum(mask, axis=0)
# Step 4: mask = [1, 1, 0, 0]
mask = tf.maximum(1. - mask, 0.)
weighted_auto_corr *= mask
return num_chains * n / (-1 + 2 * tf.reduce_sum(weighted_auto_corr, axis=0))
def potential_scale_reduction(chains_states,
independent_chain_ndims=1,
split_chains=False,
validate_args=False,
name=None):
"""<NAME> Rubin (1992)'s potential scale reduction for chain convergence.
Given `N > 1` states from each of `C > 1` independent chains, the potential
scale reduction factor, commonly referred to as R-hat, measures convergence of
the chains (to the same target) by testing for equality of means.
Specifically, R-hat measures the degree to which variance (of the means)
between chains exceeds what one would expect if the chains were identically
distributed. See [Gelman and Rubin (1992)][1]; [Brooks and Gelman (1998)][2].
Some guidelines:
* The initial state of the chains should be drawn from a distribution
overdispersed with respect to the target.
* If all chains converge to the target, then as `N --> infinity`, R-hat --> 1.
Before that, R-hat > 1 (except in pathological cases, e.g. if the chain
paths were identical).
* The above holds for any number of chains `C > 1`. Increasing `C` does
improve effectiveness of the diagnostic.
* Sometimes, R-hat < 1.2 is used to indicate approximate convergence, but of
course this is problem-dependent. See [Brooks and Gelman (1998)][2].
* R-hat only measures non-convergence of the mean. If higher moments, or
other statistics are desired, a different diagnostic should be used. See
[Brooks and Gelman (1998)][2].
Args:
chains_states: `Tensor` or Python structure of `Tensor`s representing the
states of a Markov Chain at each result step. The `ith` state is
assumed to have shape `[Ni, Ci1, Ci2,...,CiD] + A`.
Dimension `0` indexes the `Ni > 1` result steps of the Markov Chain.
Dimensions `1` through `D` index the `Ci1 x ... x CiD` independent
chains to be tested for convergence to the same target.
The remaining dimensions, `A`, can have any shape (even empty).
independent_chain_ndims: Integer type `Tensor` with value `>= 1` giving the
number of dimensions, from `dim = 1` to `dim = D`, holding independent
chain results to be tested for convergence.
split_chains: Python `bool`. If `True`, divide samples from each chain into
first and second halves, treating these as separate chains. This makes
R-hat more robust to non-stationary chains, and is recommended in [3].
validate_args: Whether to add runtime checks of argument validity. If False,
and arguments are incorrect, correct behavior is not guaranteed.
name: `String` name to prepend to created tf. Default:
`potential_scale_reduction`.
Returns:
`Tensor` structure parallel to `chains_states` representing the
R-hat statistic for the state(s). Same `dtype` as `state`, and
shape equal to `state.shape[1 + independent_chain_ndims:]`.
Raises:
ValueError: If `independent_chain_ndims < 1`.
#### Examples
Diagnosing convergence by monitoring 10 chains that each attempt to
sample from a 2-variate normal.
```python
import tensorflow as tf
import tensorflow_probability as tfp
tfd = tfp.distributions
target = tfd.MultivariateNormalDiag(scale_diag=[1., 2.])
# Get 10 (2x) overdispersed initial states.
initial_state = target.sample(10) * 2.
==> (10, 2)
# Get 1000 samples from the 10 independent chains.
chains_states = tfp.mcmc.sample_chain(
num_burnin_steps=200,
num_results=1000,
current_state=initial_state,
trace_fn=None,
kernel=tfp.mcmc.HamiltonianMonteCarlo(
target_log_prob_fn=target.log_prob,
step_size=0.05,
num_leapfrog_steps=20))
chains_states.shape
==> (1000, 10, 2)
rhat = tfp.mcmc.diagnostic.potential_scale_reduction(
chains_states, independent_chain_ndims=1)
# The second dimension needed a longer burn-in.
rhat.eval()
==> [1.05, 1.3]
```
To see why R-hat is reasonable, let `X` be a random variable drawn uniformly
from the combined states (combined over all chains). Then, in the limit
`N, C --> infinity`, with `E`, `Var` denoting expectation and variance,
```R-hat = ( E[Var[X | chain]] + Var[E[X | chain]] ) / E[Var[X | chain]].```
Using the law of total variance, the numerator is the variance of the combined
states, and the denominator is the total variance minus the variance of the
the individual chain means. If the chains are all drawing from the same
distribution, they will have the same mean, and thus the ratio should be one.
#### References
[1]: <NAME> and <NAME>. General Methods for Monitoring
Convergence of Iterative Simulations. _Journal of Computational and
Graphical Statistics_, 7(4), 1998.
[2]: <NAME> and <NAME>. Inference from Iterative Simulation
Using Multiple Sequences. _Statistical Science_, 7(4):457-472, 1992.
[3]: <NAME>, <NAME>, <NAME>, <NAME>, Paul-Christian
Burkner. Rank-normalization, folding, and localization: An improved R-hat
for assessing convergence of MCMC, 2019. Retrieved from
http://arxiv.org/abs/1903.08008
"""
# tf.get_static_value returns None iff a constant value (as a numpy
# array) is not efficiently computable. Therefore, we try constant_value then
# check for None.
icn_const_ = tf.get_static_value(
ps.convert_to_shape_tensor(independent_chain_ndims))
if icn_const_ is not None:
independent_chain_ndims = icn_const_
if icn_const_ < 1:
raise ValueError(
'Argument `independent_chain_ndims` must be `>= 1`, found: {}'.format(
independent_chain_ndims))
def single_state(s):
return _potential_scale_reduction_single_state(
s, independent_chain_ndims, split_chains, validate_args)
with tf.name_scope('potential_scale_reduction' if name is None else name):
return tf.nest.map_structure(single_state, chains_states)
def _potential_scale_reduction_single_state(state, independent_chain_ndims,
split_chains, validate_args):
"""potential_scale_reduction for one single state `Tensor`."""
# casting integers to floats for floating-point division
# check to see if the `state` is a numpy object for the numpy test suite
if dtype_util.as_numpy_dtype(state.dtype) is np.int64:
state = tf.cast(state, tf.float64)
elif dtype_util.is_integer(state.dtype):
state = tf.cast(state, tf.float32)
with tf.name_scope('potential_scale_reduction_single_state'):
# We assume exactly one leading dimension indexes e.g. correlated samples
# from each Markov chain.
state = tf.convert_to_tensor(state, name='state')
n_samples_ = tf.compat.dimension_value(state.shape[0])
if n_samples_ is not None: # If available statically.
if split_chains and n_samples_ < 4:
raise ValueError(
'Must provide at least 4 samples when splitting chains. '
'Found {}'.format(n_samples_))
if not split_chains and n_samples_ < 2:
raise ValueError(
'Must provide at least 2 samples. Found {}'.format(n_samples_))
elif validate_args:
if split_chains:
assertions = [assert_util.assert_greater(
ps.shape(state)[0], 4,
message='Must provide at least 4 samples when splitting chains.')]
with tf.control_dependencies(assertions):
state = tf.identity(state)
else:
assertions = [assert_util.assert_greater(
ps.shape(state)[0], 2,
message='Must provide at least 2 samples.')]
with tf.control_dependencies(assertions):
state = tf.identity(state)
# Define so it's not a magic number.
# Warning! `if split_chains` logic assumes this is 1!
sample_ndims = 1
if split_chains:
# Split the sample dimension in half, doubling the number of
# independent chains.
# For odd number of samples, keep all but the last sample.
state_shape = ps.shape(state)
n_samples = state_shape[0]
state = state[:n_samples - n_samples % 2]
# Suppose state = [0, 1, 2, 3, 4, 5]
# Step 1: reshape into [[0, 1, 2], [3, 4, 5]]
# E.g. reshape states of shape [a, b] into [2, a//2, b].
state = tf.reshape(
state,
ps.concat([[2, n_samples // 2], state_shape[1:]], axis=0)
)
# Step 2: Put the size `2` dimension in the right place to be treated as a
# chain, changing [[0, 1, 2], [3, 4, 5]] into [[0, 3], [1, 4], [2, 5]],
# reshaping [2, a//2, b] into [a//2, 2, b].
state = tf.transpose(
a=state,
perm=ps.concat(
[[1, 0], ps.range(2, ps.rank(state))], axis=0))
# We're treating the new dim as indexing 2 chains, so increment.
independent_chain_ndims += 1
sample_axis = ps.range(0, sample_ndims)
chain_axis = ps.range(sample_ndims,
sample_ndims + independent_chain_ndims)
sample_and_chain_axis = ps.range(
0, sample_ndims + independent_chain_ndims)
n = _axis_size(state, sample_axis)
m = _axis_size(state, chain_axis)
# In the language of Brooks and Gelman (1998),
# B / n is the between chain variance, the variance of the chain means.
# W is the within sequence variance, the mean of the chain variances.
b_div_n = _reduce_variance(
tf.reduce_mean(state, axis=sample_axis, keepdims=True),
sample_and_chain_axis,
biased=False)
w = tf.reduce_mean(
_reduce_variance(state, sample_axis, keepdims=True, biased=False),
axis=sample_and_chain_axis)
# sigma^2_+ is an estimate of the true variance, which would be unbiased if
# each chain was drawn from the target. c.f. "law of total variance."
sigma_2_plus = ((n - 1) / n) * w + b_div_n
return ((m + 1.) / m) * sigma_2_plus / w - (n - 1.) / (m * n)
# TODO(b/72873233) Move some variant of this to tfd.sample_stats.
def _reduce_variance(x, axis=None, biased=True, keepdims=False):
with tf.name_scope('reduce_variance'):
x = tf.convert_to_tensor(x, name='x')
mean = tf.reduce_mean(x, axis=axis, keepdims=True)
biased_var = tf.reduce_mean(
tf.math.squared_difference(x, mean), axis=axis, keepdims=keepdims)
if biased:
return biased_var
n = _axis_size(x, axis)
return (n / (n - 1.)) * biased_var
def _axis_size(x, axis=None):
"""Get number of elements of `x` in `axis`, as type `x.dtype`."""
if axis is None:
return ps.cast(ps.size(x), x.dtype)
return ps.cast(
ps.reduce_prod(
ps.gather(ps.shape(x), axis)), x.dtype)
| 1.695313 | 2 |
mpl/models/leaf.py | jiangyuang/ModelPruningLibrary | 13 | 3817 | from torch import nn as nn
from .base_model import BaseModel
from ..nn.conv2d import DenseConv2d
from ..nn.linear import DenseLinear
__all__ = ["Conv2", "conv2", "Conv4", "conv4"]
class Conv2(BaseModel):
def __init__(self):
super(Conv2, self).__init__()
self.features = nn.Sequential(DenseConv2d(1, 32, kernel_size=5, padding=2), # 32x28x28
nn.ReLU(inplace=True),
nn.MaxPool2d(2, stride=2), # 32x14x14
DenseConv2d(32, 64, kernel_size=5, padding=2), # 64x14x14
nn.ReLU(inplace=True),
nn.MaxPool2d(2, stride=2)) # 64x7x7
self.classifier = nn.Sequential(DenseLinear(64 * 7 * 7, 2048),
nn.ReLU(inplace=True),
DenseLinear(2048, 62))
self.collect_prunable_layers()
def forward(self, inp):
out = self.features(inp)
out = out.view(out.size(0), -1)
out = self.classifier(out)
return out
class Conv4(BaseModel):
def __init__(self):
super(Conv4, self).__init__()
self.features = nn.Sequential(DenseConv2d(3, 32, kernel_size=3, padding=1),
nn.BatchNorm2d(32),
nn.MaxPool2d(2),
DenseConv2d(32, 32, kernel_size=3, padding=1),
nn.BatchNorm2d(32),
nn.MaxPool2d(2),
DenseConv2d(32, 32, kernel_size=3, padding=2),
nn.BatchNorm2d(32),
nn.MaxPool2d(2),
DenseConv2d(32, 32, kernel_size=3, padding=2),
nn.BatchNorm2d(32),
nn.MaxPool2d(2))
self.classifier = DenseLinear(in_features=32 * 6 * 6, out_features=2)
def forward(self, inp):
out = self.features(inp)
out = out.view(out.size(0), -1)
out = self.classifier(out)
return out
def conv2() -> Conv2:
return Conv2()
def conv4() -> Conv4:
return Conv4()
# TODO: define pretrain etc.
| 2.859375 | 3 |
scripts/generate_network_interactomix.py | quimaguirre/NetworkAnalysis | 1 | 3818 | <reponame>quimaguirre/NetworkAnalysis
import argparse
import ConfigParser
import sys, os, re
import biana
try: from biana import *
except: sys.exit(10)
import methods_dictionaries as methods_dicts
def main():
options = parse_user_arguments()
generate_network(options)
def parse_user_arguments(*args, **kwds):
parser = argparse.ArgumentParser(
description = "Generate a protein-protein interaction network (implemented for Interactomix platform)",
epilog = "@oliva's lab 2019")
parser.add_argument('-iseed','--seeds_input_file',dest='seed',action = 'store',
help = 'Seeds Input file (default is input_seed)')
parser.add_argument('-radius','--radius_of_subnetwork_around_seeds',dest='radius',default=0,action = 'store',type=int,
help = '''Network is built in a radius of connections around the seed proteins.
If 0, it creates the complete interactome''')
parser.add_argument('-taxid','--TaxID',dest='taxid',action = 'store',default='9606',
help = 'Tax ID (i.e. human=9606 is default if TaxID=0 there is no restriction)')
parser.add_argument('-stype','--seed_type',dest='stype',action = 'store',default='geneid',
help = 'Type of identifier for seeds (default is geneid)')
parser.add_argument('-ttype','--translation_type',dest='ttype',action = 'store',default='accessionnumber',
help = '''Type of identifier for the output translation of codes (default is accessionnumber)
Using "proteinsequence" provides with the longest sequence of all codes''')
parser.add_argument('-trans','--translation_of_nodes_file',dest='translation_file',action = 'store',default='translation_nodes.txt',
help = 'File with the translation of codes from BIANA to the selected type for all nodes')
parser.add_argument('-strans','--translation_of_seeds_file',dest='translation_seeds_file',action = 'store',default='translation_seeds_to_BIANA_codes.txt',
help = 'File with the translation of codes from the introduced type of code to BIANA codes')
parser.add_argument('-edge','--edge_file',dest='edge',action = 'store', default='biana_edges',
help = 'Output file with edges(default is biana_edges)')
parser.add_argument('-node','--node_file',dest='node',action = 'store', default='biana_nodes',
help = 'Output file with nodes(default is biana_nodes)')
parser.add_argument('-format','--output_format',dest='format',action = 'store',default='sif',
help = '''Format file of the edge file:\tsif (default), netscore, raw, multi-fields:\n
'sif': <node1>\tscore\t<node2>\n
'netscore': <node1>\t<node2>\t<score>\n
'raw': <node1>\t<node2>\n
'multi-fields' : <node1>\t<node2>\t<sources>\t<method_ids>\t<method_names>\t<pmids>\n''')
parser.add_argument('-rAFF','--restricted_to_TAP',dest='restricted_to_TAP',action = 'store_true',
help = 'Flag to use interactions at least described by affinity methods (i.e. Tandem Affinity Purification)')
parser.add_argument('-rY2H','--restricted_to_Y2H',dest='restricted_to_Y2H',action = 'store_true',
help = 'Flag to use interactions at least described by yeast two hybrid methods (Y2H)')
parser.add_argument('-rUSER','--restricted_to_user',dest='restricted_to_user',action = 'store',default='restricted_methods',
help = 'File to use interactions described by the user selected methods')
parser.add_argument('-eAFF','--except_TAP',dest='except_TAP',action = 'store_true',
help = 'Flag to use all interactions except those described by affinity methods (i.e. Tandem Affinity Purification)')
parser.add_argument('-eY2H','--except_Y2H',dest='except_Y2H',action = 'store_true',
help = 'Flag to use all interactions except those described by yeast two hybrid methods (Y2H)')
parser.add_argument('-eUSER','--except_user',dest='except_user',action = 'store',default='restricted_methods',
help = 'File to reject interactions described by the user selected methods')
parser.add_argument('-v','--verbose',dest='verbose',action = 'store_true',
help = 'Flag to use verbose mode')
options=parser.parse_args()
"""
Example:
python generate_network_interactomix.py -iseed example/sample1.txt -radius 1 -taxid 9606 -stype uniprotentry -ttype proteinsequence -trans example/output/example.proteinsequence.trans -strans example/output/example.seeds.trans -edge example/output/example.edges -node example/output/example.nodes -format raw -rY2H
python /home/quim/PHD/Projects/BIANA/scripts/generate_network_interactomix.py -radius 0 -taxid 9606 -edge /home/quim/PHD/Projects/BIANA/outputs/BIANA_2020_geneID_seqtax_drugtarget/human_network_biana_2020.txt -node /home/quim/PHD/Projects/BIANA/outputs/BIANA_2020_geneID_seqtax_drugtarget/human_network_biana_2020_nodes.txt -trans /home/quim/PHD/Projects/BIANA/outputs/BIANA_2020_geneID_seqtax_drugtarget/human_network_biana_2020_translation.txt -ttype geneid -format multi-fields &> /home/quim/PHD/Projects/BIANA/outputs/BIANA_2020_geneID_seqtax_drugtarget/human_network_biana_2020.log
"""
return options
def generate_network(options):
"""
Generates a protein-protein interaction network extracting information from BIANA.
"""
#----------------------#
# FIXED PARAMETERS #
#----------------------#
# Parameters that I have decided to fix
restricted_to_seeds = False
minimum_number_of_methods = 1
minimum_number_of_db = 1
seed_score = 0.1
#--------------------------------------#
# GET INFORMATION FROM CONFIG FILE #
#--------------------------------------#
# Get the program path
main_path = os.path.abspath(os.path.dirname(__file__))
# Read the config file
config_file = os.path.join(main_path, 'config.ini')
config = ConfigParser.ConfigParser()
config.read(config_file)
#--------------------------------------#
# LOAD THE DICTIONARIES OF METHODS #
#--------------------------------------#
# Get the affinity dictionary
affinity_dict = methods_dicts.affinity_dict
affinity=set(affinity_dict.keys())
# Get the complementation dictionary
complementation_dict = methods_dicts.complementation_dict
complementation=set(complementation_dict.keys())
#---------------------------------------#
# GET METHODS THAT WILL BE FILTERED #
#---------------------------------------#
# Check if the user has introduced a file with methods that must be included
if not fileExist(options.restricted_to_user):
print "No restriction on methods selected by the user"
user_selection=False
else:
use_methods=[]
with open(options.restricted_to_user) as input_method_fd:
for line in input_method_fd:
fields = line.strip().split("\t")
use_methods.append(fields[0])
user_selection=True
print "Input to use only Methods:",repr(use_methods)
# Check if the user has introduced a file with methods that have to be excluded
if not fileExist(options.except_user):
print "No rejection of methods selected by the user"
user_rejection=False
else:
no_methods=[]
with open(options.except_user) as input_method_fd:
for line in input_method_fd:
fields = line.strip().split("\t")
no_methods.append(fields[0])
user_rejection=True
print "Input of rejected Methods:",repr(no_methods)
#---------------------------#
# START A BIANA SESSION #
#---------------------------#
print "Open session"
session = create_new_session( sessionID="biana_session",
dbname=config.get('BIANA', 'database'),
dbhost=config.get('BIANA', 'host'),
dbuser=config.get('BIANA', 'user'),
dbpassword=config.get('BIANA', 'password'),
unification_protocol=config.get('BIANA', 'unification_protocol') )
print "Continue"
#------------------------------#
# DEFINE A USER ENTITY SET #
#------------------------------#
# Create network network of expansion if the radius is larger than 0
if restricted_to_seeds or options.radius>0:
# Check if the seeds file exists
if not fileExist(options.seed):
print "File with seeds is missing or not found"
sys.exit(10)
else:
level=options.radius
seed_list = get_seeds_from_file(options.seed)
# If we have Taxonomy restriction, we add it
if options.taxid != "0":
print("Check Proteome %s"%(repr(options.taxid)))
proteome = session.create_new_user_entity_set( identifier_description_list =seed_list,
attribute_restriction_list=[("taxid",options.taxid)],
id_type=options.stype,new_user_entity_set_id="proteome",
negative_attribute_restriction_list=[] )
else:
print('Proteome without Taxonomy restriction')
proteome = session.create_new_user_entity_set( identifier_description_list =seed_list,
id_type=options.stype,new_user_entity_set_id="proteome",
negative_attribute_restriction_list=[] )
else:
level=0
proteome = session.create_new_user_entity_set( identifier_description_list = [("taxid",options.taxid)],
attribute_restriction_list=[], id_type="embedded",
new_user_entity_set_id="proteome",
negative_attribute_restriction_list=[] )
#----------------------------------------------------#
# SELECT THE INTERACTIONS OF THE USER ENTITY SET #
#----------------------------------------------------#
print ("Selecting interactions")
# Select interactions that have been detected at least by affinity technology
if options.restricted_to_TAP:
print ('Using interactions at least described by affinity methods (i.e. Tandem Affinity Purification)')
session.create_network( user_entity_set_id = "proteome" , level = level, relation_type_list=["interaction"] ,
relation_attribute_restriction_list = [("Method_id",400)],
#relation_attribute_restriction_list = [("psimi_name","affinity technology")],
include_relations_last_level = (not restricted_to_seeds) , use_self_relations = False)
# Select interactions that have been detected at least by yeast two hybrid
elif options.restricted_to_Y2H:
print ('Using interactions at least described by yeast-two-hybrid methods (Y2H)')
session.create_network( user_entity_set_id = "proteome" , level = level, relation_type_list=["interaction"] ,
relation_attribute_restriction_list = [("Method_id",18)],
#relation_attribute_restriction_list = [("psimi_name","y2h2")],
include_relations_last_level = (not restricted_to_seeds) , use_self_relations = False)
# Select all interactions
else:
session.create_network( user_entity_set_id = "proteome" , level = level, relation_type_list=["interaction"] ,
include_relations_last_level = (not restricted_to_seeds) , use_self_relations = False)
# Summary of interactions
out_network = open(options.edge,'w')
all_interactions = proteome.getRelations()
print "Num interactions:", len(all_interactions)
#--------------------------------------#
# FILTER THE SELECTED INTERACTIONS #
#--------------------------------------#
nodes=set()
# Get all the user entity ids from the user entity set 'proteome'
all_uEs = proteome.get_user_entity_ids()
# Obtain a dictionary user entity ID => type
uEId_to_type = session.dbAccess.get_user_entity_type(config.get('BIANA', 'unification_protocol'), all_uEs)
skip_interactions=0
for (uE_id1, uE_id2) in all_interactions:
#self.dbAccess.get_external_entities_dict( externalEntityIdsList = [external_entity_relation_id] )
# Get TYPE of user entity
uE1_type = uEId_to_type[uE_id1]
uE2_type = uEId_to_type[uE_id2]
# If type is not protein, we skip the interaction
if uE1_type != 'protein' or uE2_type != 'protein':
if options.verbose:
print('Skipping interaction because the type of one of the user entities is not protein!')
print('Node 1: {}\tType: {}'.format(uE_id1, uE1_type))
print('Node 2: {}\tType: {}'.format(uE_id2, uE2_type))
skip_interactions=skip_interactions+1
continue
eErIDs_list = proteome.get_external_entity_relation_ids(uE_id1, uE_id2)
method_names = set()
method_ids = set()
source_databases = set()
use_method_ids=set()
pubmed_ids = set()
unused_method_names = set()
relationObj_dict = session.dbAccess.get_external_entities_dict(
externalEntityIdsList = eErIDs_list, attribute_list = [],
relation_attribute_list = ["method_id","psimi_name","pubmed"], participant_attribute_list = [] )
num_methods=0
for current_eErID in eErIDs_list:
relationObj = relationObj_dict[current_eErID]
if options.verbose:
print "Interaction: (",uE_id1,",",uE_id2,")"
print relationObj
#if relationObj.get_attribute(attribute_identifier="psimi_name") is not None:
# print "\t".join([ x.value for x in relationObj.get_attribute(attribute_identifier="psimi_name") ])
#if relationObj.get_attribute(attribute_identifier="method_id") is not None:
#print "\t".join([ x.value for x in relationObj.get_attribute(attribute_identifier="method_id") ])
#print relationObj.get_attributes_dict()
#print [ x.value for x in relationObj.get_attributes_dict()["psimi_name"] ]
#print [ x.value for x in relationObj.get_attributes_dict()["method_id"] ]
if "psimi_name" in relationObj.get_attributes_dict():
method_names.update([ str(x.value) for x in relationObj.get_attributes_dict()["psimi_name"] ])
if "method_id" in relationObj.get_attributes_dict():
method_ids.update([ x.value for x in relationObj.get_attributes_dict()["method_id"]])
if "pubmed" in relationObj.get_attributes_dict():
pubmed_ids.update([ x.value for x in relationObj.get_attributes_dict()["pubmed"]])
source_databases.add(str(session.dbAccess.get_external_database(
database_id = relationObj.get_source_database()) ))
if options.except_TAP:
for m in method_ids:
if m not in affinity:
use_method_ids.add(m)
#print "Add", m
else:
unused_method_names.add(affinity_dict[m])
elif options.except_Y2H:
#print "check Y2H"
for m in method_ids:
if m not in complementation:
use_method_ids.add(m)
#print "Add", m
else:
unused_method_names.add(complementation_dict[m])
elif user_rejection:
for m in method_ids:
if m not in no_methods:
use_method_ids.add(m)
elif user_selection:
for m in method_ids:
#print "Check",repr(use_methods)
if m in set(use_methods):
use_method_ids.add(m)
if options.verbose:
print "Not among selected methods ",m
else:
use_method_ids.update(method_ids)
if len(source_databases) > 0:
info_sources=";".join([str(x) for x in source_databases])
else:
if options.verbose:
print('Skipping interaction it has no source database!')
print('Node 1: {}\tNode 2: {}'.format(uE_id1, uE_id2))
skip_interactions=skip_interactions+1
continue
if len(method_names) > 0:
method_names = [x for x in method_names if x not in unused_method_names] # Remove method names that were excluded
info_methods=";".join([str(x) for x in method_names])
else:
info_methods='-'
if len(use_method_ids) > 0:
info_methods_ids=";".join([str(x) for x in use_method_ids])
else:
if options.verbose:
print('Skipping interaction it has no method!')
print('Node 1: {}\tNode 2: {}'.format(uE_id1, uE_id2))
skip_interactions=skip_interactions+1
continue
if len(pubmed_ids) > 0:
info_pubmed_ids=";".join([str(x) for x in pubmed_ids])
else:
info_pubmed_ids='-'
num_databases=len(source_databases)
num_methods=len(use_method_ids)
num_pubmeds = len(pubmed_ids)
if options.verbose:
print "Methods",num_methods,info_methods,"\tSelected:",info_methods_ids
print "Databases",num_databases,info_sources
print "Pubmeds",num_pubmeds,info_pubmed_ids
# Check if the number of methods is higher than the minimum established
if num_methods >= minimum_number_of_methods:
use=True
else:
use=False
# Check if the number of database is higher than the minimum established
if use and num_databases >= minimum_number_of_db:
use=True
else:
use=False
if not use:
skip_interactions=skip_interactions+1
#print method_names, method_ids, source_databases
#----------------------#
# OUTPUT EDGE FILE #
#----------------------#
if use:
#print uE_id1, uE_id/2
nodes.add(uE_id1)
nodes.add(uE_id2)
#print "Attribute ",(uE_id1,uE_id2).get_attribute(
if options.format == 'multi-fields' :
out_network.write("{0}\t{1}\t{2}\t{3}\t{4}\t{5}\n".
format(uE_id1,uE_id2,info_sources,info_methods_ids,info_methods,info_pubmed_ids))
elif options.format == 'netscore':
out_network.write('\t{}\t{}\t{:.2f}\n'.format(uE_id1,uE_id2,1.))
elif options.format == 'raw':
out_network.write("{}\t{}\n".format(uE_id1,uE_id2))
else:
# If the format is not multi-fields, netscore or raw, the output format is sif
out_network.write("{}\t{:.2f}\t{}\n".format(uE_id1,1.,uE_id2))
print "Num neglected interactions:", skip_interactions
out_network.close()
#---------------------------------------#
# OUTPUT NODE AND TRANSLATION FILES #
#---------------------------------------#
# If we wanted the complete interactome, the translation will be done differently
if options.radius <= 0:
# Output node file
out_proteins = open(options.node,'w')
for protein in nodes:
if options.format == 'multi-fields':
out_proteins.write("{0}\t{1:.2f}\t{2:.2f}\t{3:.2f}\n".format(protein,1.,1.,0.1))
elif options.format == 'netscore':
out_proteins.write("{0}\t{1:.2f}\t{2:.2f}\t{3:.2f}\n".format(protein,1.,1.,0.1))
else:
out_proteins.write("{0}\t{1:.2f}\n".format(protein,0.1))
out_proteins.close()
################################# TRANSLATION ####################################
out_translation = open(options.translation_file,'w')
# TRANSLATION TO 'stype'
trans_stype=False
if options.stype != 'proteinsequence' and options.stype != options.ttype:
trans_stype = True
out_trans_stype = open(options.translation_file+'.'+options.stype+'.trans','w')
for protein in nodes:
uE = session.get_user_entity(protein)
translate=set()
translate_stype=set()
if options.ttype == "proteinsequence":
maxlen=0;
for current_id in uE.get_attribute(attribute_identifier=options.ttype):
if maxlen < len(current_id.value.get_sequence().upper()):
maxlen=len(current_id.value.get_sequence().upper())
translation=",".join([str(current_id.value.get_sequence().upper()) for current_id in uE.get_attribute(attribute_identifier=options.ttype) if len(str(current_id.value.get_sequence().upper())) == maxlen ] )
#print "Translation",protein,translation
#print("{0}\t'{1}'\n".format(protein,translation))
else:
##### TRANSLATION TO 'ttype'
for current_id in uE.get_attribute(attribute_identifier=options.ttype):
translate.add(current_id.value.upper())
translation="','".join(["{0}".format(x) for x in translate])
out_translation.write("{0}\t'{1}'\n".format(protein,translation))
##### TRANSLATION TO STYPE
if trans_stype:
for current_id in uE.get_attribute(attribute_identifier=options.stype):
translate_stype.add(current_id.value.upper())
translation_stype="','".join(["{0}".format(x) for x in translate_stype])
out_trans_stype.write("{0}\t'{1}'\n".format(protein,translation_stype))
out_translation.close()
if trans_stype:
out_trans_stype.close()
####################################################################################
# If we wanted a network of expansion, the translation will be done differently
elif options.radius > 0:
# Read the seeds
seeds=set()
input_seed = open(options.seed,'r')
for line in input_seed:
fields = line.strip().split("\t")
seeds.add(fields[0].lower())
input_seed.close()
# Output node file
out_proteins = open(options.node,'w')
translate={}
for protein in nodes:
score=seed_score
uE = session.get_user_entity(protein)
for current_id in uE.get_attribute(attribute_identifier=options.stype):
if current_id.value.lower() in seeds:
translate.setdefault(current_id.value.lower(),[])
translate[current_id.value.lower()].append(protein)
score=1.0
if options.format == 'multi-fields':
out_proteins.write("{0}\t{1:.2f}\t{2:.2f}\t{3:.2f}\n".format(protein,1.,1.,score))
elif options.format == 'netscore':
out_proteins.write("{0}\t{1:.2f}\t{2:.2f}\t{3:.2f}\n".format(protein,1.,1.,score))
else:
out_proteins.write("{0}\t{1:.2f}\n".format(protein,score))
out_proteins.close()
# Get the IDS of single nodes that were not previously found in the network
single=set()
for uE_id in proteome.get_unconnected_nodes():
single.add(uE_id)
for protein in single:
uE = session.get_user_entity(protein)
for current_id in uE.get_attribute(attribute_identifier=options.stype):
if current_id.value.lower() in seeds:
translate.setdefault(current_id.value.lower(),[])
translate[current_id.value.lower()].append(protein)
# Get all IDS of SEEDS, defined as "proteome", and check missing codes to be
# added for translation
allseed=set()
for uE_id in proteome.get_user_entity_ids():
allseed.add(uE_id)
for protein in allseed:
if protein not in single and protein not in nodes:
uE = session.get_user_entity(protein)
for current_id in uE.get_attribute(attribute_identifier=options.stype):
if current_id.value.lower() in seeds:
translate.setdefault(current_id.value.lower(),[])
translate[current_id.value.lower()].append(protein)
################################# TRANSLATION ####################################
out_translation = open(options.translation_seeds_file,'w')
for s in seeds:
if s == '': continue
if s in translate:
codes=set(translate[s])
translation="','".join([str(x) for x in codes])
#out_translation.write("%s\t'%s'\n" % (s.upper(),translation))
out_translation.write("{0}\t'{1}'\n".format(s.upper(),translation))
else:
out_translation.write("{0}\t'Unknown'\n".format(s.upper()))
out_translation.close()
# Output translation file
# TRANSLATION TO 'ttype'
out_translation = open(options.translation_file,'w')
# TRANSLATION TO 'stype'
trans_stype=False
if options.stype != 'proteinsequence' and options.stype != options.ttype:
trans_stype = True
out_trans_stype = open(options.translation_file+'.'+options.stype+'.trans','w')
for protein in nodes:
uE = session.get_user_entity(protein)
translate=set()
translate_stype=set()
if options.ttype == "proteinsequence":
maxlen=0;
for current_id in uE.get_attribute(attribute_identifier=options.ttype):
if maxlen < len(current_id.value.get_sequence().upper()):
maxlen=len(current_id.value.get_sequence().upper())
translation=",".join([str(current_id.value.get_sequence().upper()) for current_id in uE.get_attribute(attribute_identifier=options.ttype) if len(str(current_id.value.get_sequence().upper())) == maxlen ] )
#print "Translation",protein,translation
#print("{0}\t'{1}'\n".format(protein,translation))
else:
for current_id in uE.get_attribute(attribute_identifier=options.ttype):
translate.add(current_id.value.upper())
translation="','".join(["{0}".format(x) for x in translate])
out_translation.write("{0}\t'{1}'\n".format(protein,translation))
##### TRANSLATION TO STYPE
if trans_stype:
for current_id in uE.get_attribute(attribute_identifier=options.stype):
translate_stype.add(current_id.value.upper())
translation_stype="','".join(["{0}".format(x) for x in translate_stype])
out_trans_stype.write("{0}\t'{1}'\n".format(protein,translation_stype))
out_translation.close()
if trans_stype:
out_trans_stype.close()
####################################################################################
print('Generation of the network done!')
return
def fileExist(file):
"""
Checks if a file exists AND is a file
"""
return os.path.exists(file) and os.path.isfile(file)
def get_seeds_from_file(seed_file):
"""
Obtain the seeds from a file and introduce them to a Python list.
The seeds must be separated by new lines!
"""
seed_set = set()
with open(seed_file, 'r') as seed_file_fd:
for line in seed_file_fd:
fields = line.strip().split('\t')
seed_set.add(fields[0])
return list(seed_set)
if __name__ == "__main__":
main()
| 2.671875 | 3 |
tests/data/s3_scrape_config.py | kids-first/kf-api-study-creator | 3 | 3819 | <gh_stars>1-10
"""
This is an extract config intended for S3 object manifests produced by TBD.
To use it, you must import it in another extract config and override at least
the `source_data_url`. You may also append additional operations to the
`operations` list as well.
For example you could have the following in your extract config module:
from kf_ingest_packages.common.extract_configs.s3_object_info import *
source_data_url = 'file://../data/kf-seq-data-bcm-chung-s3-objects.tsv'
operations.append(
value_map(
in_col='Key',
out_col=CONCEPT.BIOSPECIMEN.ID,
m=lambda x: x
)
)
"""
import os
from kf_lib_data_ingest.common import constants
from kf_lib_data_ingest.common.constants import GENOMIC_FILE
from kf_lib_data_ingest.common.concept_schema import CONCEPT
from kf_lib_data_ingest.etl.extract.operations import (
keep_map,
row_map,
value_map,
constant_map,
)
def file_ext(x):
"""
Get genomic file extension
"""
matches = [
file_ext for file_ext in FILE_EXT_FORMAT_MAP if x.endswith(file_ext)
]
if matches:
file_ext = max(matches, key=len)
else:
file_ext = None
return file_ext
FILE_EXT_FORMAT_MAP = {
"fq": GENOMIC_FILE.FORMAT.FASTQ,
"fastq": GENOMIC_FILE.FORMAT.FASTQ,
"fq.gz": GENOMIC_FILE.FORMAT.FASTQ,
"fastq.gz": GENOMIC_FILE.FORMAT.FASTQ,
"bam": GENOMIC_FILE.FORMAT.BAM,
"hgv.bam": GENOMIC_FILE.FORMAT.BAM,
"cram": GENOMIC_FILE.FORMAT.CRAM,
"bam.bai": GENOMIC_FILE.FORMAT.BAI,
"bai": GENOMIC_FILE.FORMAT.BAI,
"cram.crai": GENOMIC_FILE.FORMAT.CRAI,
"crai": GENOMIC_FILE.FORMAT.CRAI,
"g.vcf.gz": GENOMIC_FILE.FORMAT.GVCF,
"g.vcf.gz.tbi": GENOMIC_FILE.FORMAT.TBI,
"vcf.gz": GENOMIC_FILE.FORMAT.VCF,
"vcf": GENOMIC_FILE.FORMAT.VCF,
"vcf.gz.tbi": GENOMIC_FILE.FORMAT.TBI,
"peddy.html": "html",
}
DATA_TYPES = {
GENOMIC_FILE.FORMAT.FASTQ: GENOMIC_FILE.DATA_TYPE.UNALIGNED_READS,
GENOMIC_FILE.FORMAT.BAM: GENOMIC_FILE.DATA_TYPE.ALIGNED_READS,
GENOMIC_FILE.FORMAT.CRAM: GENOMIC_FILE.DATA_TYPE.ALIGNED_READS,
GENOMIC_FILE.FORMAT.BAI: "Aligned Reads Index",
GENOMIC_FILE.FORMAT.CRAI: "Aligned Reads Index",
GENOMIC_FILE.FORMAT.VCF: "Variant Calls",
GENOMIC_FILE.FORMAT.GVCF: "gVCF",
"g.vcf.gz.tbi": "gVCF Index",
"vcf.gz.tbi": "Variant Calls Index",
"html": "Other",
}
def filter_df_by_file_ext(df):
"""
Only keep rows where file extension is one of those in
FILE_EXT_FORMAT_MAP.keys
"""
df[CONCEPT.GENOMIC_FILE.FILE_FORMAT] = df["Key"].apply(
lambda x: file_format(x)
)
return df[df[CONCEPT.GENOMIC_FILE.FILE_FORMAT].notnull()]
source_data_url = (
'https://localhost:5002/download/study/SD_ME0WME0W/'
'file/SF_Y1JMXTTS/version/FV_4RYEMD71'
)
do_after_read = filter_df_by_file_ext
def s3_url(row):
"""
Create S3 URL for object from S3 bucket and key
"""
return f's3://{row["Bucket"]}/{row["Key"]}'
def file_format(x):
"""
Get genomic file format by looking genomic file ext up in
FILE_EXT_FORMAT_MAP dict
"""
# File format
return FILE_EXT_FORMAT_MAP.get(file_ext(x))
def data_type(x):
"""
Get genomic file data type by looking up file format in DATA_TYPES.
However, if the file's extension has `tbi` in it, then use the file
extension itself to do the data type lookup.
"""
ext = file_ext(x)
if "tbi" in ext:
data_type = DATA_TYPES.get(ext)
else:
data_type = DATA_TYPES.get(file_format(x))
return data_type
operations = [
row_map(out_col=CONCEPT.GENOMIC_FILE.ID, m=lambda row: s3_url(row)),
row_map(
out_col=CONCEPT.GENOMIC_FILE.URL_LIST, m=lambda row: [s3_url(row)]
),
value_map(
in_col="Key",
out_col=CONCEPT.GENOMIC_FILE.FILE_NAME,
m=lambda x: os.path.split(x)[-1],
),
keep_map(in_col="Size", out_col=CONCEPT.GENOMIC_FILE.SIZE),
value_map(
in_col="ETag",
out_col=CONCEPT.GENOMIC_FILE.HASH_DICT,
m=lambda x: {constants.FILE.HASH.S3_ETAG.lower(): x.replace('"', "")},
),
constant_map(
out_col=CONCEPT.GENOMIC_FILE.AVAILABILITY,
m=constants.GENOMIC_FILE.AVAILABILITY.IMMEDIATE,
),
keep_map(
in_col=CONCEPT.GENOMIC_FILE.FILE_FORMAT,
out_col=CONCEPT.GENOMIC_FILE.FILE_FORMAT,
),
value_map(
in_col="Key",
out_col=CONCEPT.GENOMIC_FILE.DATA_TYPE,
m=lambda x: data_type(x),
),
]
| 1.765625 | 2 |
hard-gists/5c973ec1b5ab2e387646/snippet.py | jjhenkel/dockerizeme | 21 | 3820 | import bpy
from bpy.app.handlers import persistent
bl_info = {
"name": "Playback Once",
"author": "<NAME>",
"version": (1, 0, 0),
"blender": (2, 67, 3),
"location": "",
"description": "Playback once.",
"warning": "",
"wiki_url": "",
"tracker_url": "",
"category": "Animation"}
@persistent
def stopPlaybackAtEnd(scene):
if scene.frame_current >= scene.frame_end:
bpy.ops.screen.animation_cancel()
def register():
bpy.app.handlers.frame_change_pre.append(stopPlaybackAtEnd)
def unregister():
bpy.app.handlers.frame_change_pre.remove(stopPlaybackAtEnd)
if __name__ == "__main__":
register()
| 2.453125 | 2 |
Py3Challenges/saves/challenges/c6_min.py | AlbertUnruh/Py3Challenges | 2 | 3821 | <filename>Py3Challenges/saves/challenges/c6_min.py
"""
To master this you should consider using the builtin-``min``-function.
"""
from ...challenge import Challenge
from random import randint
x = []
for _ in range(randint(2, 10)):
x.append(randint(1, 100))
intro = f"You have to print the lowest value of {', '.join(str(_) for _ in x[:-1])} and {x[-1]}. (values: x)"
def validate_function(stdin: str, stdout: str, stderr: str, exc: tuple) -> bool:
try:
z = int(stdout.removesuffix("\n"))
except ValueError:
return False
else:
return min(x) == z
challenge = Challenge(
intro=intro,
validate_function=validate_function,
help=__doc__,
values={"x": x},
capture_stdout=True,
)
| 3.8125 | 4 |
services/web/server/tests/unit/with_dbs/01/test_director_v2.py | mrnicegyu11/osparc-simcore | 0 | 3822 | # pylint:disable=unused-variable
# pylint:disable=unused-argument
# pylint:disable=redefined-outer-name
from typing import AsyncIterator
import pytest
from aioresponses import aioresponses
from faker import Faker
from hypothesis import HealthCheck, given, settings
from hypothesis import strategies as st
from models_library.clusters import ClusterID
from models_library.projects import ProjectID
from models_library.projects_pipeline import ComputationTask
from models_library.projects_state import RunningState
from models_library.users import UserID
from simcore_service_webserver import director_v2_api
from simcore_service_webserver.director_v2_models import (
ClusterCreate,
ClusterPatch,
ClusterPing,
)
@pytest.fixture()
async def mocked_director_v2(
director_v2_service_mock: aioresponses,
) -> AsyncIterator[aioresponses]:
yield director_v2_service_mock
@pytest.fixture
def user_id(faker: Faker) -> UserID:
return UserID(faker.pyint(min_value=1))
@pytest.fixture
def project_id(faker: Faker) -> ProjectID:
return ProjectID(faker.uuid4())
@pytest.fixture
def cluster_id(faker: Faker) -> ClusterID:
return ClusterID(faker.pyint(min_value=0))
async def test_create_pipeline(
mocked_director_v2, client, user_id: UserID, project_id: ProjectID
):
task_out = await director_v2_api.create_or_update_pipeline(
client.app, user_id, project_id
)
assert task_out
assert isinstance(task_out, dict)
assert task_out["state"] == RunningState.NOT_STARTED
async def test_get_computation_task(
mocked_director_v2,
client,
user_id: UserID,
project_id: ProjectID,
):
task_out = await director_v2_api.get_computation_task(
client.app, user_id, project_id
)
assert task_out
assert isinstance(task_out, ComputationTask)
assert task_out.state == RunningState.NOT_STARTED
async def test_delete_pipeline(
mocked_director_v2, client, user_id: UserID, project_id: ProjectID
):
await director_v2_api.delete_pipeline(client.app, user_id, project_id)
@settings(suppress_health_check=[HealthCheck.function_scoped_fixture])
@given(cluster_create=st.builds(ClusterCreate))
async def test_create_cluster(
mocked_director_v2, client, user_id: UserID, cluster_create
):
created_cluster = await director_v2_api.create_cluster(
client.app, user_id=user_id, new_cluster=cluster_create
)
assert created_cluster is not None
assert isinstance(created_cluster, dict)
assert "id" in created_cluster
async def test_list_clusters(mocked_director_v2, client, user_id: UserID):
list_of_clusters = await director_v2_api.list_clusters(client.app, user_id=user_id)
assert isinstance(list_of_clusters, list)
assert len(list_of_clusters) > 0
async def test_get_cluster(
mocked_director_v2, client, user_id: UserID, cluster_id: ClusterID
):
cluster = await director_v2_api.get_cluster(
client.app, user_id=user_id, cluster_id=cluster_id
)
assert isinstance(cluster, dict)
assert cluster["id"] == cluster_id
async def test_get_cluster_details(
mocked_director_v2, client, user_id: UserID, cluster_id: ClusterID
):
cluster_details = await director_v2_api.get_cluster_details(
client.app, user_id=user_id, cluster_id=cluster_id
)
assert isinstance(cluster_details, dict)
@settings(suppress_health_check=[HealthCheck.function_scoped_fixture])
@given(cluster_patch=st.from_type(ClusterPatch))
async def test_update_cluster(
mocked_director_v2, client, user_id: UserID, cluster_id: ClusterID, cluster_patch
):
print(f"--> updating cluster with {cluster_patch=}")
updated_cluster = await director_v2_api.update_cluster(
client.app, user_id=user_id, cluster_id=cluster_id, cluster_patch=cluster_patch
)
assert isinstance(updated_cluster, dict)
assert updated_cluster["id"] == cluster_id
async def test_delete_cluster(
mocked_director_v2, client, user_id: UserID, cluster_id: ClusterID
):
await director_v2_api.delete_cluster(
client.app, user_id=user_id, cluster_id=cluster_id
)
@settings(suppress_health_check=[HealthCheck.function_scoped_fixture])
@given(cluster_ping=st.builds(ClusterPing))
async def test_ping_cluster(mocked_director_v2, client, cluster_ping: ClusterPing):
await director_v2_api.ping_cluster(client.app, cluster_ping=cluster_ping)
async def test_ping_specific_cluster(
mocked_director_v2, client, user_id: UserID, cluster_id: ClusterID
):
await director_v2_api.ping_specific_cluster(
client.app, user_id=user_id, cluster_id=cluster_id
)
| 1.828125 | 2 |
tools/py/heatmap.py | sriramreddyM/pLitter | 5 | 3823 | '''
converts video to frames and saves images by different interval, or overlap, etc
'''
import folium
from folium import plugins
from folium.plugins import HeatMap
import csv
# class plitterMap():
# def __int__(self, file_path):
# self.data = file_path
# df = []
# with open(self.data) as f:
# reader = csv.reader(f)
# for row in reader:
# df_row = []
# df_row.append(row[0])
# df_row.append(row[0])
# df_row.append(row[0])
# df.append(row)
# self.tooltip = df[0][0]
# def loadMap():
# self.map = folium.Map(location=[float(row[1]), float(row[2])], zoom_start = 18)
# def loadGpsLoc():
# folium.Marker([float(row[1]), float(row[2])], popup="<i>"+row[0]+"</i>", tooltip=tooltip, icon=icon_circle).add_to(rangsit_map)
# rangsit_map | 3.046875 | 3 |
generator.py | Geoalert/emergency-mapping | 3 | 3824 | import numpy as np
def random_augmentation(img, mask):
#you can add any augmentations you need
return img, mask
def batch_generator(image, mask,
batch_size=1,
crop_size=0,
patch_size=256,
bbox= None,
augmentation=False):
'''
image: nparray, must have 3 dimension
mask: nparray, 2 dimensions, same size as image
batch_size: int, number of images in a batch
patch_size: int, size of the image returned, patch is square
crop_size: int, how much pixels should be cropped off the mask
bbox: None or tuple of 4 ints, (min_y, max_y, min_x, max_x), the data is selected from within the bbox
augmentation: turn on/off data augmentation. The augmentation function is random_augmentation() above
returns batch of image and mask patches, image is turned to 'channels last' as required by unet
'''
if np.ndim(mask) != 2 or np.ndim(image) != 3:
raise ValueError('image must have 3 dims and mask 2 dims')
if mask.shape != image.shape[1:]:
raise ValueError('image and mask shape is different')
im_max = float(np.max(image))
mask_max = 1.0
#select subimage
if bbox is not None:
# check bbox
if bbox[0] < 0 or bbox [2] < 0 \
or bbox[1] > mask.shape[0] or bbox[3] > mask.shape[0] \
or bbox[0] + patch_size > bbox[1] or bbox[2] + patch_size > bbox[3] \
or patch_size <= 0:
raise ValueError("Incorrect bbox or patch size")
img_ = image[:, bbox[0] : bbox[1], bbox[2]:bbox[3]]
mask_ = mask[bbox[0] : bbox[1], bbox[2]:bbox[3]]
else:
img_ = image
mask_ = mask
while 1:
x = []
y = []
for i in range (batch_size):
random_x = np.random.randint(0, mask_.shape[1] - patch_size)
random_y = np.random.randint(0, mask_.shape[0] - patch_size)
img_patch = img_[:,
random_y : random_y + patch_size,
random_x : random_x + patch_size] / im_max
# transform the image from channels-first (rasterio format) to channels-last (default tensorflow format)
img_patch = np.moveaxis(img_patch, 0, 2)
mask_patch = mask_[random_y : random_y + patch_size,
random_x : random_x + patch_size] / mask_max
if augmentation:
img_patch, mask_patch = random_augmentation(img_patch, mask_patch)
# mask is cropped as it may be useful for some convnets that have output size less than input
if crop_size > 0:
mask_patch = mask_patch[crop_size : -crop_size,
crop_size : -crop_size]
mask_patch = np.expand_dims(mask_patch, 2)
x.append(img_patch)
y.append(mask_patch)
yield (np.array(x), np.array(y))
| 3.28125 | 3 |
awx/api/metadata.py | Avinesh/awx | 1 | 3825 | # Copyright (c) 2016 Ansible, Inc.
# All Rights Reserved.
from collections import OrderedDict
# Django
from django.core.exceptions import PermissionDenied
from django.db.models.fields import PositiveIntegerField, BooleanField
from django.db.models.fields.related import ForeignKey
from django.http import Http404
from django.utils.encoding import force_text, smart_text
from django.utils.translation import ugettext_lazy as _
# Django REST Framework
from rest_framework import exceptions
from rest_framework import metadata
from rest_framework import serializers
from rest_framework.relations import RelatedField, ManyRelatedField
from rest_framework.fields import JSONField as DRFJSONField
from rest_framework.request import clone_request
# AWX
from awx.main.fields import JSONField, ImplicitRoleField
from awx.main.models import InventorySource, NotificationTemplate
from awx.main.scheduler.kubernetes import PodManager
class Metadata(metadata.SimpleMetadata):
def get_field_info(self, field):
field_info = OrderedDict()
field_info['type'] = self.label_lookup[field]
field_info['required'] = getattr(field, 'required', False)
text_attrs = [
'read_only', 'label', 'help_text',
'min_length', 'max_length',
'min_value', 'max_value',
'category', 'category_slug',
'defined_in_file'
]
for attr in text_attrs:
value = getattr(field, attr, None)
if value is not None and value != '':
field_info[attr] = force_text(value, strings_only=True)
placeholder = getattr(field, 'placeholder', serializers.empty)
if placeholder is not serializers.empty:
field_info['placeholder'] = placeholder
serializer = getattr(field, 'parent', None)
if serializer and hasattr(serializer, 'Meta') and hasattr(serializer.Meta, 'model'):
# Update help text for common fields.
field_help_text = {
'id': _('Database ID for this {}.'),
'name': _('Name of this {}.'),
'description': _('Optional description of this {}.'),
'type': _('Data type for this {}.'),
'url': _('URL for this {}.'),
'related': _('Data structure with URLs of related resources.'),
'summary_fields': _('Data structure with name/description for related resources.'),
'created': _('Timestamp when this {} was created.'),
'modified': _('Timestamp when this {} was last modified.'),
}
if field.field_name in field_help_text:
opts = serializer.Meta.model._meta.concrete_model._meta
verbose_name = smart_text(opts.verbose_name)
field_info['help_text'] = field_help_text[field.field_name].format(verbose_name)
if field.field_name == 'type':
field_info['filterable'] = True
else:
for model_field in serializer.Meta.model._meta.fields:
if field.field_name == model_field.name:
if getattr(model_field, '__accepts_json__', None):
field_info['type'] = 'json'
field_info['filterable'] = True
break
else:
field_info['filterable'] = False
# Indicate if a field has a default value.
# FIXME: Still isn't showing all default values?
try:
default = field.get_default()
if field.field_name == 'TOWER_URL_BASE' and default == 'https://towerhost':
default = '{}://{}'.format(self.request.scheme, self.request.get_host())
field_info['default'] = default
except serializers.SkipField:
pass
if getattr(field, 'child', None):
field_info['child'] = self.get_field_info(field.child)
elif getattr(field, 'fields', None):
field_info['children'] = self.get_serializer_info(field)
if not isinstance(field, (RelatedField, ManyRelatedField)) and hasattr(field, 'choices'):
field_info['choices'] = [(choice_value, choice_name) for choice_value, choice_name in field.choices.items()]
# Indicate if a field is write-only.
if getattr(field, 'write_only', False):
field_info['write_only'] = True
# Special handling of inventory source_region choices that vary based on
# selected inventory source.
if field.field_name == 'source_regions':
for cp in ('azure_rm', 'ec2', 'gce'):
get_regions = getattr(InventorySource, 'get_%s_region_choices' % cp)
field_info['%s_region_choices' % cp] = get_regions()
# Special handling of group_by choices for EC2.
if field.field_name == 'group_by':
for cp in ('ec2',):
get_group_by_choices = getattr(InventorySource, 'get_%s_group_by_choices' % cp)
field_info['%s_group_by_choices' % cp] = get_group_by_choices()
# Special handling of notification configuration where the required properties
# are conditional on the type selected.
if field.field_name == 'notification_configuration':
for (notification_type_name, notification_tr_name, notification_type_class) in NotificationTemplate.NOTIFICATION_TYPES:
field_info[notification_type_name] = notification_type_class.init_parameters
# Special handling of notification messages where the required properties
# are conditional on the type selected.
try:
view_model = field.context['view'].model
except (AttributeError, KeyError):
view_model = None
if view_model == NotificationTemplate and field.field_name == 'messages':
for (notification_type_name, notification_tr_name, notification_type_class) in NotificationTemplate.NOTIFICATION_TYPES:
field_info[notification_type_name] = notification_type_class.default_messages
# Update type of fields returned...
model_field = None
if serializer and hasattr(serializer, 'Meta') and hasattr(serializer.Meta, 'model'):
try:
model_field = serializer.Meta.model._meta.get_field(field.field_name)
except Exception:
pass
if field.field_name == 'type':
field_info['type'] = 'choice'
elif field.field_name in ('url', 'custom_virtualenv', 'token'):
field_info['type'] = 'string'
elif field.field_name in ('related', 'summary_fields'):
field_info['type'] = 'object'
elif isinstance(field, PositiveIntegerField):
field_info['type'] = 'integer'
elif field.field_name in ('created', 'modified'):
field_info['type'] = 'datetime'
elif (
RelatedField in field.__class__.__bases__ or
isinstance(model_field, ForeignKey)
):
field_info['type'] = 'id'
elif (
isinstance(field, JSONField) or
isinstance(model_field, JSONField) or
isinstance(field, DRFJSONField) or
isinstance(getattr(field, 'model_field', None), JSONField) or
field.field_name == 'credential_passwords'
):
field_info['type'] = 'json'
elif (
isinstance(field, ManyRelatedField) and
field.field_name == 'credentials'
# launch-time credentials
):
field_info['type'] = 'list_of_ids'
elif isinstance(model_field, BooleanField):
field_info['type'] = 'boolean'
return field_info
def get_serializer_info(self, serializer, method=None):
filterer = getattr(serializer, 'filter_field_metadata', lambda fields, method: fields)
return filterer(
super(Metadata, self).get_serializer_info(serializer),
method
)
def determine_actions(self, request, view):
# Add field information for GET requests (so field names/labels are
# available even when we can't POST/PUT).
actions = {}
for method in {'GET', 'PUT', 'POST'} & set(view.allowed_methods):
view.request = clone_request(request, method)
obj = None
try:
# Test global permissions
if hasattr(view, 'check_permissions'):
view.check_permissions(view.request)
# Test object permissions
if method == 'PUT' and hasattr(view, 'get_object'):
obj = view.get_object()
except (exceptions.APIException, PermissionDenied, Http404):
continue
else:
# If user has appropriate permissions for the view, include
# appropriate metadata about the fields that should be supplied.
serializer = view.get_serializer(instance=obj)
actions[method] = self.get_serializer_info(serializer, method=method)
finally:
view.request = request
for field, meta in list(actions[method].items()):
if not isinstance(meta, dict):
continue
if field == "pod_spec_override":
meta['default'] = PodManager().pod_definition
# Add type choices if available from the serializer.
if field == 'type' and hasattr(serializer, 'get_type_choices'):
meta['choices'] = serializer.get_type_choices()
# For GET method, remove meta attributes that aren't relevant
# when reading a field and remove write-only fields.
if method == 'GET':
attrs_to_remove = ('required', 'read_only', 'default', 'min_length', 'max_length', 'placeholder')
for attr in attrs_to_remove:
meta.pop(attr, None)
meta.get('child', {}).pop(attr, None)
if meta.pop('write_only', False):
actions['GET'].pop(field)
# For PUT/POST methods, remove read-only fields.
if method in ('PUT', 'POST'):
# This value should always be False for PUT/POST, so don't
# show it (file-based read-only settings can't be updated)
meta.pop('defined_in_file', False)
if meta.pop('read_only', False):
if field == 'id' and hasattr(view, 'attach'):
continue
actions[method].pop(field)
return actions
def determine_metadata(self, request, view):
# store request on self so we can use it to generate field defaults
# (such as TOWER_URL_BASE)
self.request = request
try:
setattr(view, '_request', request)
metadata = super(Metadata, self).determine_metadata(request, view)
finally:
delattr(view, '_request')
# Add type(s) handled by this view/serializer.
if hasattr(view, 'get_serializer'):
serializer = view.get_serializer()
if hasattr(serializer, 'get_types'):
metadata['types'] = serializer.get_types()
# Add search fields if available from the view.
if getattr(view, 'search_fields', None):
metadata['search_fields'] = view.search_fields
# Add related search fields if available from the view.
if getattr(view, 'related_search_fields', None):
metadata['related_search_fields'] = view.related_search_fields
# include role names in metadata
roles = []
model = getattr(view, 'model', None)
if model:
for field in model._meta.get_fields():
if type(field) is ImplicitRoleField:
roles.append(field.name)
if len(roles) > 0:
metadata['object_roles'] = roles
from rest_framework import generics
if isinstance(view, generics.ListAPIView) and hasattr(view, 'paginator'):
metadata['max_page_size'] = view.paginator.max_page_size
return metadata
class RoleMetadata(Metadata):
def determine_metadata(self, request, view):
metadata = super(RoleMetadata, self).determine_metadata(request, view)
if 'actions' in metadata:
metadata['actions'].pop('POST')
metadata['actions']['POST'] = {
"id": {"type": "integer", "label": "ID", "help_text": "Database ID for this role."},
"disassociate": {"type": "integer", "label": "Disassociate", "help_text": "Provide to remove this role."},
}
return metadata
class SublistAttachDetatchMetadata(Metadata):
def determine_actions(self, request, view):
actions = super(SublistAttachDetatchMetadata, self).determine_actions(request, view)
method = 'POST'
if method in actions:
for field in list(actions[method].keys()):
if field == 'id':
continue
actions[method].pop(field)
return actions
| 1.8125 | 2 |
plugins/python/tasks.py | BBVA/deeptracy | 85 | 3826 | import json
from washer.worker.actions import AppendStdout, AppendStderr
from washer.worker.actions import CreateNamedLog, AppendToLog
from washer.worker.actions import SetProperty
from washer.worker.commands import washertask
def pipenv_graph2deps(rawgraph):
graph = json.loads(rawgraph)
def build_entry(data):
if 'required_version' in data:
spec = data['key'] + data['required_version']
else:
spec = data['key']
return {'installer': 'pipenv',
'spec': spec,
'source': 'pypi',
'name': data['package_name'],
'version': data['installed_version']}
def extract_dependencies(entries):
for entry in entries:
if 'package' in entry:
package = entry['package']
dependencies = entry.get('dependencies', [])
yield build_entry(package)
yield from extract_dependencies(dependencies)
else:
yield build_entry(entry)
yield from extract_dependencies(graph)
@washertask
def pip_install(repopath, path=".", **kwargs):
import invoke
c = invoke.Context()
with c.cd(repopath):
with c.cd(path):
res = c.run("pipenv install .")
deps = c.run("pipenv graph --json")
yield AppendStdout(res.stdout)
yield AppendStderr(res.stderr)
yield SetProperty("dependencies", list(pipenv_graph2deps(deps.stdout)))
return True
@washertask
def requirement_file(repopath, requirement="requirements.txt",
path=".", **kwargs):
import invoke
c = invoke.Context()
with c.cd(repopath):
with c.cd(path):
res = c.run("pipenv install -r %s" % requirement)
deps = c.run("pipenv graph --json")
yield AppendStdout(res.stdout)
yield AppendStderr(res.stderr)
yield SetProperty("dependencies", list(pipenv_graph2deps(deps.stdout)))
return True
| 2.40625 | 2 |
senity/utils/getSiteProfile.py | pkokkinos/senity | 1 | 3827 | <filename>senity/utils/getSiteProfile.py
import json
import os
# get site profile
def getSiteProfile(site_file):
with open(site_file) as json_file:
json_data = json.load(json_file)
return json_data
# get all site profile
def getAllSiteProfiles(site_folder):
allSiteProfiles = {}
allSiteFiles = os.listdir(site_folder)
for sf in allSiteFiles:
sp = getSiteProfile(site_folder + "/" + sf)
allSiteProfiles[sp["siteName"]] = []
for device in sp["devicesAvailable"]:
for i in range(device["deviceCounter"]):
allSiteProfiles[sp["siteName"]].append(device["deviceName"])
return allSiteProfiles
#sites_folder = "sites"
#print getAllSiteProfiles(sites_folder)
| 2.796875 | 3 |
ppo_new/baseline.py | QingXinHu123/Lane_change_RL | 1 | 3828 | <gh_stars>1-10
import os, sys
from env.LaneChangeEnv import LaneChangeEnv
import random
import numpy as np
if 'SUMO_HOME' in os.environ:
tools = os.path.join(os.environ['SUMO_HOME'], 'tools')
sys.path.append(tools)
print('success')
else:
sys.exit("please declare environment variable 'SUMO_HOME'")
import traci
def episode_generator(pi, env, is_gui, ttc, gap, sumoseed, randomseed):
egoid = 'lane1.' + str(random.randint(1, 6))
ob = env.reset(egoid=egoid, tlane=0, tfc=2, is_gui=is_gui, sumoseed=sumoseed, randomseed=randomseed)
traci.vehicle.setColor(egoid, (255, 69, 0))
cur_ep_ret = 0 # return in current episode
cur_ep_ret_detail = 0
cur_ep_len = 0 # len of current episode
cur_ep_obs = []
cur_ep_acs = []
while True:
ac = pi(ob=ob, env=env, ttc=ttc, gap=gap)
ob, rew, new, info = env.step(ac)
cur_ep_ret += rew
cur_ep_ret_detail += np.array(list(info['reward_dict'].values()))
cur_ep_len += 1
cur_ep_obs.append(ob)
cur_ep_acs.append(ac)
if new:
return {"ep_obs": cur_ep_obs, "ep_acs": cur_ep_acs,
"ep_ret": cur_ep_ret, 'ep_rets_detail': cur_ep_ret_detail, "ep_len": cur_ep_len,
'ep_num_danger': info['num_danger'], 'ep_is_success': info['is_success'], 'ep_num_crash': info['num_crash'],
'ep_is_collision': info["is_collision"]}
def pi_baseline(ob, env, ttc, gap):
# safety gap set to seconds to collision
if env.ego.trgt_leader:
leader_speed = env.ego.trgt_leader.speed
else:
leader_speed = env.ego.speed
if env.ego.trgt_follower:
follower_speed = env.ego.trgt_follower.speed
else:
follower_speed = env.ego.speed
leader_dis = abs(ob[3 * 4 + 0 + 1])*239.8
follower_dis = abs(ob[4 * 4 + 0 + 1])*239.8
TTC = (leader_dis - 5) / max(env.ego.speed, 0.001)
TTC2 = (follower_dis - 5) / max(follower_speed, 0.001)
# print(TTC, TTC)
if TTC > ttc and TTC2 > ttc and leader_dis > gap and follower_dis > gap:
ac_lat = 1 # change lane
else:
ac_lat = 0 # abort
ac = ac_lat * 3 + 1
return ac
def evaluate_baseline(num_eps, ttc, gap, is_gui):
sumoseed = 0
randomseed = 0
pi = pi_baseline
env = LaneChangeEnv(is_train=False)
ret_eval = 0
ret_det_eval = 0 # not a integer, will be broadcasted
danger_num = 0
crash_num = 0
level_1_danger = []
level_2_danger = []
collision_num = 0
ep_len_list = []
success_num = 0
for i in range(num_eps):
ep_eval = episode_generator(pi, env, is_gui=is_gui, ttc=ttc, gap=gap, sumoseed=sumoseed, randomseed=randomseed)
ret_eval += ep_eval['ep_ret']
ret_det_eval += ep_eval['ep_rets_detail']
danger_num += ep_eval['ep_num_danger']
crash_num += ep_eval['ep_num_crash']
level_1_danger.append(1 if ep_eval['ep_num_danger'] > 0 else 0)
level_2_danger.append((1 if ep_eval['ep_num_crash'] > 0 else 0))
collision_num += ep_eval['ep_is_collision']
success_num += int(ep_eval['ep_is_success'])
if ep_eval['ep_is_success']:
ep_len_list.append(ep_eval['ep_len'])
sumoseed += 1
randomseed += 1
ret_eval /= float(num_eps)
ret_det_eval /= float(num_eps)
danger_rate = danger_num / num_eps
crash_rate = crash_num / num_eps
level_1_danger_rate = np.mean(level_1_danger)
level_2_danger_rate = np.mean(level_2_danger)
coll_rate = collision_num / num_eps
success_rate = success_num / float(num_eps)
success_len = np.mean(ep_len_list)
print('reward_detail: ', ret_det_eval)
print('reward: ', ret_eval,
'\ndanger_rate: ', danger_rate,
'\ncrash_rate: ', crash_rate,
'\nlevel-1-danger_rate: ', level_1_danger_rate,
'\nlevel-2-danger_rate: ', level_2_danger_rate,
'\ncollision_rate: ', coll_rate,
'\nsuccess_rate: ', success_rate,
'\nsucess_len: ', success_len)
env.close()
return ret_eval, danger_rate, crash_rate, level_1_danger_rate, level_2_danger_rate, coll_rate, success_rate, success_len
NUM_EPS = 100
IS_GUI = False
# f = open('../data/baseline_evaluation/testseed2.csv', 'w+')
# safety_gap = 2
constraints_list = [3.0] # [1.0, 2.0, 3.0, 4.0, 5.0, 10.0, 20.0]
ttcs = [0.1, 0.3, 0.5, 1, 2, 3]
# ttcs = [2]
gap = 0
reward_list = []
danger_rate_list = []
crash_rate_list = []
level_1_danger_list = []
level_2_danger_list = []
coll_rate_list = []
succ_rate_list = []
succ_len_list = []
for ttc in ttcs:
ret_eval, danger_rate, crash_rate, level_1_danger_rate, level_2_danger_rate, coll_rate, success_rate, success_len = evaluate_baseline(NUM_EPS, ttc, gap, IS_GUI)
reward_list.append(ret_eval)
danger_rate_list.append(danger_rate)
crash_rate_list.append(crash_rate)
level_1_danger_list.append(level_1_danger_rate)
level_2_danger_list.append(level_2_danger_rate)
coll_rate_list.append(coll_rate)
succ_rate_list.append(success_rate)
succ_len_list.append(success_len)
print('reward: ', reward_list)
print('danger rate: ', danger_rate_list)
print('crash rate: ', crash_rate_list)
print('level-1-danger_rate: ', level_1_danger_list)
print('level-2-danger_rate: ', level_2_danger_list)
print('collison rate: ', coll_rate_list)
print('success rate: ', succ_rate_list)
print('sucess len: ', succ_len_list)
# reward: [-89.12552753359037, -69.84537459892903, -73.81562785829651, -148.23580687485645, -227.71842861064192, -229.9101089174337]
# danger rate: [2.13, 0.88, 0.77, 1.88, 3.82, 3.82]
# crash rate: [0.58, 0.33, 0.5, 1.24, 2.09, 2.09]
# level-1-danger_rate: [0.23, 0.09, 0.05, 0.14, 0.25, 0.25]
# level-2-danger_rate: [0.05, 0.03, 0.05, 0.12, 0.2, 0.2]
# collison rate: [0.0, 0.0, 0.02, 0.09, 0.14, 0.14]
# success rate: [0.99, 0.99, 0.9, 0.6, 0.08, 0.05]
# sucess len: [55.656565656565654, 62.43434343434343, 67.5, 90.1, 66.625, 73.4]
| 2.046875 | 2 |
clean_data.py | toogy/pendigits-hmm | 0 | 3829 | import numpy as np
import pickle
from collections import defaultdict
from parsing import parser
from analysis import training
def main():
parse = parser.Parser();
train_digits = parse.parse_file('data/pendigits-train');
test_digits = parse.parse_file('data/pendigits-test')
centroids = training.get_digit_kmeans_centroids(
train_digits, 256 - 3)
training.set_digit_observations(
train_digits, centroids, 256)
training.set_digit_observations(
test_digits, centroids, 256)
train_sequences = defaultdict(list)
test_sequences = []
n_test_sequences = len(test_digits)
test_expected_labels = np.ndarray(shape=(n_test_sequences,))
for digit in train_digits:
train_sequences[digit.label].append(digit.np_array_observations)
for i, digit in enumerate(test_digits):
test_sequences.append(digit.np_array_observations)
test_expected_labels[i] = digit.label
with open('train_sequences', 'wb') as f:
pickle.dump(train_sequences, f)
with open('test_sequences', 'wb') as f:
pickle.dump(test_sequences, f)
with open('test_expected_labels', 'wb') as f:
pickle.dump(test_expected_labels, f)
if __name__ == '__main__':
main()
| 2.765625 | 3 |
scripts/commit_validation/commit_validation/commit_validation.py | cypherdotXd/o3de | 8 | 3830 | #
# Copyright (c) Contributors to the Open 3D Engine Project.
# For complete copyright and license terms please see the LICENSE at the root of this distribution.
#
# SPDX-License-Identifier: Apache-2.0 OR MIT
#
#
import abc
import importlib
import os
import pkgutil
import re
import time
from typing import Dict, List, Tuple
VERBOSE = False
class Commit(abc.ABC):
"""An interface for accessing details about a commit"""
@abc.abstractmethod
def get_files(self) -> List[str]:
"""Returns a list of local files added/modified by the commit"""
pass
@abc.abstractmethod
def get_removed_files(self) -> List[str]:
"""Returns a list of local files removed by the commit"""
pass
@abc.abstractmethod
def get_file_diff(self, str) -> str:
"""
Given a file name, returns a string in unified diff format
that represents the changes made to that file for this commit.
Most validators will only pay attention to added lines (with + in front)
"""
pass
@abc.abstractmethod
def get_description(self) -> str:
"""Returns the description of the commit"""
pass
@abc.abstractmethod
def get_author(self) -> str:
"""Returns the author of the commit"""
pass
def validate_commit(commit: Commit, out_errors: List[str] = None, ignore_validators: List[str] = None) -> bool:
"""Validates a commit against all validators
:param commit: The commit to validate
:param out_errors: if not None, will populate with the list of errors given by the validators
:param ignore_validators: Optional list of CommitValidator classes to ignore, by class name
:return: True if there are no validation errors, and False otherwise
"""
failed_count = 0
passed_count = 0
start_time = time.time()
# Find all the validators in the validators package (recursively)
validator_classes = []
validators_dir = os.path.join(os.path.dirname(__file__), 'validators')
for _, module_name, is_package in pkgutil.iter_modules([validators_dir]):
if not is_package:
module = importlib.import_module('commit_validation.validators.' + module_name)
validator = module.get_validator()
if ignore_validators and validator.__name__ in ignore_validators:
print(f"Disabled validation for '{validator.__name__}'")
else:
validator_classes.append(validator)
error_summary = {}
# Process validators
for validator_class in validator_classes:
validator = validator_class()
validator_name = validator.__class__.__name__
error_list = []
passed = validator.run(commit, errors = error_list)
if passed:
passed_count += 1
print(f'{validator.__class__.__name__} PASSED')
else:
failed_count += 1
print(f'{validator.__class__.__name__} FAILED')
error_summary[validator_name] = error_list
end_time = time.time()
if failed_count:
print("VALIDATION FAILURE SUMMARY")
for val_name in error_summary.keys():
errors = error_summary[val_name]
if errors:
for error_message in errors:
first_line = True
for line in error_message.splitlines():
if first_line:
first_line = False
print(f'VALIDATOR_FAILED: {val_name} {line}')
else:
print(f' {line}') # extra detail lines do not need machine parsing
stats_strs = []
if failed_count > 0:
stats_strs.append(f'{failed_count} failed')
if passed_count > 0:
stats_strs.append(f'{passed_count} passed')
stats_str = ', '.join(stats_strs) + f' in {end_time - start_time:.2f}s'
print()
print(stats_str)
return failed_count == 0
def IsFileSkipped(file_name) -> bool:
if os.path.splitext(file_name)[1].lower() not in SOURCE_AND_SCRIPT_FILE_EXTENSIONS:
skipped = True
for pattern in SOURCE_AND_SCRIPT_FILE_PATTERNS:
if pattern.match(file_name):
skipped = False
break
return skipped
return False
class CommitValidator(abc.ABC):
"""A commit validator"""
@abc.abstractmethod
def run(self, commit: Commit, errors: List[str]) -> bool:
"""Validates a commit
:param commit: The commit to validate
:param errors: List of errors generated, append them to this list
:return: True if the commit is valid, and False otherwise
"""
pass
SOURCE_FILE_EXTENSIONS: Tuple[str, ...] = (
'.c', '.cc', '.cpp', '.cxx', '.h', '.hpp', '.hxx', '.inl', '.m', '.mm', '.cs', '.java'
)
"""File extensions for compiled source code"""
SCRIPT_FILE_EXTENSIONS: Tuple[str, ...] = (
'.py', '.lua', '.bat', '.cmd', '.sh', '.js'
)
"""File extensions for interpreted code"""
BUILD_FILE_EXTENSIONS: Tuple[str, ...] = (
'.cmake',
)
"""File extensions for build files"""
SOURCE_AND_SCRIPT_FILE_EXTENSIONS: Tuple[str, ...] = SOURCE_FILE_EXTENSIONS + SCRIPT_FILE_EXTENSIONS + BUILD_FILE_EXTENSIONS
"""File extensions for both compiled and interpreted code"""
BUILD_FILE_PATTERNS: Tuple[re.Pattern, ...] = (
re.compile(r'.*CMakeLists\.txt'),
re.compile(r'.*Jenkinsfile')
)
"""File patterns for build files"""
SOURCE_AND_SCRIPT_FILE_PATTERNS: Tuple[re.Pattern, ...] = BUILD_FILE_PATTERNS
EXCLUDED_VALIDATION_PATTERNS = [
'*/.git/*',
'*/3rdParty/*',
'*/__pycache__/*',
'*/External/*',
'build',
'Cache',
'*/Code/Framework/AzCore/azgnmx/azgnmx/*',
'Code/Tools/CryFXC',
'Code/Tools/HLSLCrossCompiler',
'Code/Tools/HLSLCrossCompilerMETAL',
'Docs',
'python/runtime',
'restricted/*/Tools/*RemoteControl',
'Tools/3dsmax',
'*/user/Cache/*',
'*/user/log/*',
]
| 2.640625 | 3 |
matrixprofile/algorithms/snippets.py | KSaiRahul21/matrixprofile | 0 | 3831 | <filename>matrixprofile/algorithms/snippets.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
range = getattr(__builtins__, 'xrange', range)
# end of py2 compatability boilerplate
import numpy as np
from matrixprofile import core
from matrixprofile.algorithms.mpdist import mpdist_vector
def snippets(ts, snippet_size, num_snippets=2, window_size=None):
"""
The snippets algorithm is used to summarize your time series by
identifying N number of representative subsequences. If you want to
identify typical patterns in your time series, then this is the algorithm
to use.
Parameters
----------
ts : array_like
The time series.
snippet_size : int
The size of snippet desired.
num_snippets : int, Default 2
The number of snippets you would like to find.
window_size : int, Default (snippet_size / 2)
The window size.
Returns
-------
list : snippets
A list of snippets as dictionary objects with the following structure.
>>> {
>>> fraction: fraction of the snippet,
>>> index: the index of the snippet,
>>> snippet: the snippet values
>>> }
"""
ts = core.to_np_array(ts).astype('d')
n = len(ts)
if not isinstance(snippet_size, int) or snippet_size < 4:
raise ValueError('snippet_size must be an integer >= 4')
if n < (2 * snippet_size):
raise ValueError('Time series is too short relative to snippet length')
if not window_size:
window_size = int(np.floor(snippet_size / 2))
if window_size >= snippet_size:
raise ValueError('window_size must be smaller than snippet_size')
# pad end of time series with zeros
num_zeros = int(snippet_size * np.ceil(n / snippet_size) - n)
ts = np.append(ts, np.zeros(num_zeros))
# compute all profiles
indices = np.arange(0, len(ts) - snippet_size, snippet_size)
distances = []
for j, i in enumerate(indices):
distance = mpdist_vector(ts, ts[i:(i + snippet_size - 1)], int(window_size))
distances.append(distance)
distances = np.array(distances)
# find N snippets
snippets = []
minis = np.inf
total_min = None
for n in range(num_snippets):
minims = np.inf
for i in range(len(indices)):
s = np.sum(np.minimum(distances[i, :], minis))
if minims > s:
minims = s
index = i
minis = np.minimum(distances[index, :], minis)
actual_index = indices[index]
snippet = ts[actual_index:actual_index + snippet_size]
snippet_distance = distances[index]
snippets.append({
'index': actual_index,
'snippet': snippet,
'distance': snippet_distance
})
if isinstance(total_min, type(None)):
total_min = snippet_distance
else:
total_min = np.minimum(total_min, snippet_distance)
# compute the fraction of each snippet
for snippet in snippets:
mask = (snippet['distance'] <= total_min)
snippet['fraction'] = mask.sum() / (len(ts) - snippet_size)
total_min = total_min - mask
del snippet['distance']
return snippets
| 2.9375 | 3 |
jina/logging/formatter.py | yk/jina | 1 | 3832 | import json
import re
from copy import copy
from logging import Formatter
from .profile import used_memory
from ..helper import colored
class ColorFormatter(Formatter):
"""Format the log into colored logs based on the log-level. """
MAPPING = {
'DEBUG': dict(color='white', on_color=None), # white
'INFO': dict(color='white', on_color=None), # cyan
'WARNING': dict(color='yellow', on_color='on_grey'), # yellow
'ERROR': dict(color='red', on_color=None), # 31 for red
'CRITICAL': dict(color='white', on_color='on_red'), # white on red bg
'SUCCESS': dict(color='green', on_color=None), # white on red bg
} #: log-level to color mapping
def format(self, record):
cr = copy(record)
seq = self.MAPPING.get(cr.levelname, self.MAPPING['INFO']) # default white
cr.msg = colored(cr.msg, **seq)
return super().format(cr)
class PlainFormatter(Formatter):
"""Remove all control chars from the log and format it as plain text
Also restrict the max-length of msg to 512
"""
def format(self, record):
cr = copy(record)
if isinstance(cr.msg, str):
cr.msg = re.sub(r'\u001b\[.*?[@-~]', '', str(cr.msg))[:512]
return super().format(cr)
class JsonFormatter(Formatter):
"""Format the log message as a JSON object so that it can be later used/parsed in browser with javascript. """
KEYS = {'created', 'filename', 'funcName', 'levelname', 'lineno', 'msg',
'module', 'name', 'pathname', 'process', 'thread', 'processName',
'threadName', 'log_id'} #: keys to extract from the log
def format(self, record):
cr = copy(record)
cr.msg = re.sub(r'\u001b\[.*?[@-~]', '', str(cr.msg))
return json.dumps(
{k: getattr(cr, k) for k in self.KEYS if hasattr(cr, k)},
sort_keys=True)
class ProfileFormatter(Formatter):
"""Format the log message as JSON object and add the current used memory into it"""
def format(self, record):
cr = copy(record)
if isinstance(cr.msg, dict):
cr.msg.update({k: getattr(cr, k) for k in ['created', 'module', 'process', 'thread']})
cr.msg['memory'] = used_memory(unit=1)
return json.dumps(cr.msg, sort_keys=True)
else:
return ''
| 2.40625 | 2 |
atcoder/abc191/b.py | sugitanishi/competitive-programming | 0 | 3833 | import sys
sys.setrecursionlimit(10000000)
input=lambda : sys.stdin.readline().rstrip()
n,x=map(int,input().split())
a=list(map(int,input().split()))
aa=list(filter(lambda b:b!=x,a))
print(*aa) | 2.546875 | 3 |
tests/integration/test_streaming_e2e.py | cfogg/python-client | 0 | 3834 | <gh_stars>0
"""Streaming integration tests."""
# pylint:disable=no-self-use,invalid-name,too-many-arguments,too-few-public-methods,line-too-long
# pylint:disable=too-many-statements,too-many-locals,too-many-lines
import threading
import time
import json
from queue import Queue
from splitio.client.factory import get_factory
from tests.helpers.mockserver import SSEMockServer, SplitMockServer
try: # try to import python3 names. fallback to python2
from urllib.parse import parse_qs
except ImportError:
from urlparse import parse_qs
class StreamingIntegrationTests(object):
"""Test streaming operation and failover."""
def test_happiness(self):
"""Test initialization & splits/segment updates."""
auth_server_response = {
'pushEnabled': True,
'token': ('<KEY>
'<KEY>'
'<KEY>'
'<KEY>'
'<KEY>'
'<KEY>'
'dGE6cHVibGlzaGVyc1wiXX0iLCJ4LWFibHktY2xpZW50SWQiOiJjbGllbnRJZCIsImV4c'
'CI6MTYwNDEwMDU5MSwiaWF0IjoxNjA0MDk2OTkxfQ.aP9BfR534K6J9h8gfDWg_CQgpz5E'
'vJh17WlOlAKhcD0')
}
split_changes = {
-1: {
'since': -1,
'till': 1,
'splits': [make_simple_split('split1', 1, True, False, 'on', 'user', True)]
},
1: {
'since': 1,
'till': 1,
'splits': []
}
}
segment_changes = {}
split_backend_requests = Queue()
split_backend = SplitMockServer(split_changes, segment_changes, split_backend_requests,
auth_server_response)
sse_requests = Queue()
sse_server = SSEMockServer(sse_requests)
split_backend.start()
sse_server.start()
sse_server.publish(make_initial_event())
sse_server.publish(make_occupancy('control_pri', 2))
sse_server.publish(make_occupancy('control_sec', 2))
kwargs = {
'sdk_api_base_url': 'http://localhost:%d/api' % split_backend.port(),
'events_api_base_url': 'http://localhost:%d/api' % split_backend.port(),
'auth_api_base_url': 'http://localhost:%d/api' % split_backend.port(),
'streaming_api_base_url': 'http://localhost:%d' % sse_server.port(),
'config': {'connectTimeout': 10000}
}
factory = get_factory('some_apikey', **kwargs)
factory.block_until_ready(1)
assert factory.ready
assert factory.client().get_treatment('maldo', 'split1') == 'on'
time.sleep(1)
split_changes[1] = {
'since': 1,
'till': 2,
'splits': [make_simple_split('split1', 2, True, False, 'off', 'user', False)]
}
split_changes[2] = {'since': 2, 'till': 2, 'splits': []}
sse_server.publish(make_split_change_event(2))
time.sleep(1)
assert factory.client().get_treatment('maldo', 'split1') == 'off'
split_changes[2] = {
'since': 2,
'till': 3,
'splits': [make_split_with_segment('split2', 2, True, False,
'off', 'user', 'off', 'segment1')]
}
split_changes[3] = {'since': 3, 'till': 3, 'splits': []}
segment_changes[('segment1', -1)] = {
'name': 'segment1',
'added': ['maldo'],
'removed': [],
'since': -1,
'till': 1
}
segment_changes[('segment1', 1)] = {'name': 'segment1', 'added': [],
'removed': [], 'since': 1, 'till': 1}
sse_server.publish(make_split_change_event(3))
time.sleep(1)
sse_server.publish(make_segment_change_event('segment1', 1))
time.sleep(1)
assert factory.client().get_treatment('pindon', 'split2') == 'off'
assert factory.client().get_treatment('maldo', 'split2') == 'on'
# Validate the SSE request
sse_request = sse_requests.get()
assert sse_request.method == 'GET'
path, qs = sse_request.path.split('?', 1)
assert path == '/event-stream'
qs = parse_qs(qs)
assert qs['accessToken'][0] == (
'<KEY>'
'<KEY>2FwYWJpbGl0eSI6IntcIk1UW'
'XlNVGN4T1RRNE13PT1fTWpBNE16Y3pORFUxTWc9PV9zZWdtZW50c1wiOltcInN1YnNjc'
'mliZVwiXSxcIk1UWXlNVGN4T1RRNE13PT1fTWpBNE16Y3pORFUxTWc9PV9zcGxpdHNcI'
'jpbXCJzdWJzY3JpYmVcIl0sXCJjb250cm9sX3ByaVwiOltcInN1YnNjcmliZVwiLFwiY'
'2hhbm5lbC1tZXRhZGF0YTpwdWJsaXNoZXJzXCJdLFwiY29udHJvbF9zZWNcIjpbXCJzd'
'WJzY3JpY<KEY>bWV0YWRhdGE6cHVibGlzaGVyc1wiXX0iLCJ4LWFib'
'<KEY>'
'Dk2OTkxfQ.aP9BfR534K6J9h8gfDWg_CQgpz5EvJh17WlOlAKhcD0'
)
assert set(qs['channels'][0].split(',')) == set(['MTYyMTcxOTQ4Mw==_MjA4MzczNDU1Mg==_splits',
'MTYyMTcxOTQ4Mw==_MjA4MzczNDU1Mg==_segments',
'[?occupancy=metrics.publishers]control_pri',
'[?occupancy=metrics.publishers]control_sec'])
assert qs['v'][0] == '1.1'
# Initial apikey validation
req = split_backend_requests.get()
assert req.method == 'GET'
assert req.path == '/api/segmentChanges/__SOME_INVALID_SEGMENT__?since=-1'
assert req.headers['authorization'] == 'Bearer some_apikey'
# Initial splits fetch
req = split_backend_requests.get()
assert req.method == 'GET'
assert req.path == '/api/splitChanges?since=-1'
assert req.headers['authorization'] == 'Bearer some_apikey'
# Iteration until since == till
req = split_backend_requests.get()
assert req.method == 'GET'
assert req.path == '/api/splitChanges?since=1'
assert req.headers['authorization'] == 'Bearer some_apikey'
# Auth
req = split_backend_requests.get()
assert req.method == 'GET'
assert req.path == '/api/auth'
assert req.headers['authorization'] == 'Bearer some_apikey'
# SyncAll after streaming connected
req = split_backend_requests.get()
assert req.method == 'GET'
assert req.path == '/api/splitChanges?since=1'
assert req.headers['authorization'] == 'Bearer some_apikey'
# Fetch after first notification
req = split_backend_requests.get()
assert req.method == 'GET'
assert req.path == '/api/splitChanges?since=1'
assert req.headers['authorization'] == 'Bearer some_apikey'
# Iteration until since == till
req = split_backend_requests.get()
assert req.method == 'GET'
assert req.path == '/api/splitChanges?since=2'
assert req.headers['authorization'] == 'Bearer some_apikey'
# Fetch after second notification
req = split_backend_requests.get()
assert req.method == 'GET'
assert req.path == '/api/splitChanges?since=2'
assert req.headers['authorization'] == 'Bearer some_apikey'
# Iteration until since == till
req = split_backend_requests.get()
assert req.method == 'GET'
assert req.path == '/api/splitChanges?since=3'
assert req.headers['authorization'] == 'Bearer some_apikey'
# Segment change notification
req = split_backend_requests.get()
assert req.method == 'GET'
assert req.path == '/api/segmentChanges/segment1?since=-1'
assert req.headers['authorization'] == 'Bearer some_apikey'
# Iteration until segment1 since == till
req = split_backend_requests.get()
assert req.method == 'GET'
assert req.path == '/api/segmentChanges/segment1?since=1'
assert req.headers['authorization'] == 'Bearer some_apikey'
# Cleanup
destroy_event = threading.Event()
factory.destroy(destroy_event)
destroy_event.wait()
sse_server.publish(sse_server.GRACEFUL_REQUEST_END)
sse_server.stop()
split_backend.stop()
def test_occupancy_flicker(self):
"""Test that changes in occupancy switch between polling & streaming properly."""
auth_server_response = {
'pushEnabled': True,
'token': ('<KEY>
'<KEY>'
'<KEY>'
'<KEY>'
'<KEY>'
'<KEY>'
'<KEY>'
'CI6MTYwNDEwMDU5MSwiaWF0IjoxNjA0MDk2OTkxfQ.aP9BfR534K6J9h8gfDWg_CQgpz5E'
'vJh17WlOlAKhcD0')
}
split_changes = {
-1: {
'since': -1,
'till': 1,
'splits': [make_simple_split('split1', 1, True, False, 'off', 'user', True)]
},
1: {'since': 1, 'till': 1, 'splits': []}
}
segment_changes = {}
split_backend_requests = Queue()
split_backend = SplitMockServer(split_changes, segment_changes, split_backend_requests,
auth_server_response)
sse_requests = Queue()
sse_server = SSEMockServer(sse_requests)
split_backend.start()
sse_server.start()
sse_server.publish(make_initial_event())
sse_server.publish(make_occupancy('control_pri', 2))
sse_server.publish(make_occupancy('control_sec', 2))
kwargs = {
'sdk_api_base_url': 'http://localhost:%d/api' % split_backend.port(),
'events_api_base_url': 'http://localhost:%d/api' % split_backend.port(),
'auth_api_base_url': 'http://localhost:%d/api' % split_backend.port(),
'streaming_api_base_url': 'http://localhost:%d' % sse_server.port(),
'config': {'connectTimeout': 10000, 'featuresRefreshRate': 10}
}
factory = get_factory('some_apikey', **kwargs)
factory.block_until_ready(1)
assert factory.ready
time.sleep(2)
# Get a hook of the task so we can query its status
task = factory._sync_manager._synchronizer._split_tasks.split_task._task # pylint:disable=protected-access
assert not task.running()
assert factory.client().get_treatment('maldo', 'split1') == 'on'
# Make a change in the BE but don't send the event.
# After dropping occupancy, the sdk should switch to polling
# and perform a syncAll that gets this change
split_changes[1] = {
'since': 1,
'till': 2,
'splits': [make_simple_split('split1', 2, True, False, 'off', 'user', False)]
}
split_changes[2] = {'since': 2, 'till': 2, 'splits': []}
sse_server.publish(make_occupancy('control_pri', 0))
sse_server.publish(make_occupancy('control_sec', 0))
time.sleep(2)
assert factory.client().get_treatment('maldo', 'split1') == 'off'
assert task.running()
# We make another chagne in the BE and don't send the event.
# We restore occupancy, and it should be fetched by the
# sync all after streaming is restored.
split_changes[2] = {
'since': 2,
'till': 3,
'splits': [make_simple_split('split1', 3, True, False, 'off', 'user', True)]
}
split_changes[3] = {'since': 3, 'till': 3, 'splits': []}
sse_server.publish(make_occupancy('control_pri', 1))
time.sleep(2)
assert factory.client().get_treatment('maldo', 'split1') == 'on'
assert not task.running()
# Now we make another change and send an event so it's propagated
split_changes[3] = {
'since': 3,
'till': 4,
'splits': [make_simple_split('split1', 4, True, False, 'off', 'user', False)]
}
split_changes[4] = {'since': 4, 'till': 4, 'splits': []}
sse_server.publish(make_split_change_event(4))
time.sleep(2)
assert factory.client().get_treatment('maldo', 'split1') == 'off'
# Kill the split
split_changes[4] = {
'since': 4,
'till': 5,
'splits': [make_simple_split('split1', 5, True, True, 'frula', 'user', False)]
}
split_changes[5] = {'since': 5, 'till': 5, 'splits': []}
sse_server.publish(make_split_kill_event('split1', 'frula', 5))
time.sleep(2)
assert factory.client().get_treatment('maldo', 'split1') == 'frula'
# Validate the SSE request
sse_request = sse_requests.get()
assert sse_request.method == 'GET'
path, qs = sse_request.path.split('?', 1)
assert path == '/event-stream'
qs = parse_qs(qs)
assert qs['accessToken'][0] == (
'<KEY>'
'<KEY>'
'XlNVGN4T1RRNE13PT1fTWpBNE16Y3pORFUxTWc9PV9zZWdtZW50c1wiOltcInN1YnNjc'
'mliZVwiXSxcIk1UWXlNVGN4T1RRNE13PT1fTWpBNE16Y3pORFUxTWc9PV9zcGxpdHNcI'
'jpbXCJzdWJzY3JpYmVcIl0sXCJjb250cm9sX3ByaVwiOltcInN1YnNjcmliZVwiLFwiY'
'2hhbm5lbC1tZXRhZGF0YTpwdWJsaXNoZXJzXCJdLFwiY29udHJvbF9zZWNcIjpbXCJzd'
'WJzY3JpYmVcIixcImNoYW5uZWwtbWV0YWRhdGE6cHVibGlzaGVyc1wiXX0iLCJ4LWFib'
'HktY2xpZW<KEY>'
'Dk2OTkxfQ.aP9BfR534K6J9h8gfDWg_CQgpz5EvJh17WlOlAKhcD0'
)
assert set(qs['channels'][0].split(',')) == set(['MTYyMTcxOTQ4Mw==_MjA4MzczNDU1Mg==_splits',
'MTYyMTcxOTQ4Mw==_MjA4MzczNDU1Mg==_segments',
'[?occupancy=metrics.publishers]control_pri',
'[?occupancy=metrics.publishers]control_sec'])
assert qs['v'][0] == '1.1'
# Initial apikey validation
req = split_backend_requests.get()
assert req.method == 'GET'
assert req.path == '/api/segmentChanges/__SOME_INVALID_SEGMENT__?since=-1'
assert req.headers['authorization'] == 'Bearer some_apikey'
# Initial splits fetch
req = split_backend_requests.get()
assert req.method == 'GET'
assert req.path == '/api/splitChanges?since=-1'
assert req.headers['authorization'] == 'Bearer some_apikey'
# Iteration until since == till
req = split_backend_requests.get()
assert req.method == 'GET'
assert req.path == '/api/splitChanges?since=1'
assert req.headers['authorization'] == 'Bearer some_apikey'
# Auth
req = split_backend_requests.get()
assert req.method == 'GET'
assert req.path == '/api/auth'
assert req.headers['authorization'] == 'Bearer some_apikey'
# SyncAll after streaming connected
req = split_backend_requests.get()
assert req.method == 'GET'
assert req.path == '/api/splitChanges?since=1'
assert req.headers['authorization'] == 'Bearer some_apikey'
# Fetch after first notification
req = split_backend_requests.get()
assert req.method == 'GET'
assert req.path == '/api/splitChanges?since=1'
assert req.headers['authorization'] == 'Bearer some_apikey'
# Iteration until since == till
req = split_backend_requests.get()
assert req.method == 'GET'
assert req.path == '/api/splitChanges?since=2'
assert req.headers['authorization'] == 'Bearer some_apikey'
# Fetch after second notification
req = split_backend_requests.get()
assert req.method == 'GET'
assert req.path == '/api/splitChanges?since=2'
assert req.headers['authorization'] == 'Bearer some_apikey'
# Iteration until since == till
req = split_backend_requests.get()
assert req.method == 'GET'
assert req.path == '/api/splitChanges?since=3'
assert req.headers['authorization'] == 'Bearer some_apikey'
# Iteration until since == till
req = split_backend_requests.get()
assert req.method == 'GET'
assert req.path == '/api/splitChanges?since=3'
assert req.headers['authorization'] == 'Bearer some_apikey'
# Iteration until since == till
req = split_backend_requests.get()
assert req.method == 'GET'
assert req.path == '/api/splitChanges?since=4'
assert req.headers['authorization'] == 'Bearer some_apikey'
# Split kill
req = split_backend_requests.get()
assert req.method == 'GET'
assert req.path == '/api/splitChanges?since=4'
assert req.headers['authorization'] == 'Bearer some_apikey'
# Iteration until since == till
req = split_backend_requests.get()
assert req.method == 'GET'
assert req.path == '/api/splitChanges?since=5'
assert req.headers['authorization'] == 'Bearer some_apikey'
# Cleanup
destroy_event = threading.Event()
factory.destroy(destroy_event)
destroy_event.wait()
sse_server.publish(sse_server.GRACEFUL_REQUEST_END)
sse_server.stop()
split_backend.stop()
def test_start_without_occupancy(self):
"""Test an SDK starting with occupancy on 0 and switching to streamin afterwards."""
auth_server_response = {
'pushEnabled': True,
'token': ('<KEY>
'<KEY>'
'<KEY>'
'<KEY>'
'<KEY>'
'<KEY>'
'<KEY>'
'CI6MTYwNDEwMDU5MSwiaWF0IjoxNjA0MDk2OTkxfQ.aP9BfR534K6J9h8gfDWg_CQgpz5E'
'vJh17WlOlAKhcD0')
}
split_changes = {
-1: {
'since': -1,
'till': 1,
'splits': [make_simple_split('split1', 1, True, False, 'off', 'user', True)]
},
1: {'since': 1, 'till': 1, 'splits': []}
}
segment_changes = {}
split_backend_requests = Queue()
split_backend = SplitMockServer(split_changes, segment_changes, split_backend_requests,
auth_server_response)
sse_requests = Queue()
sse_server = SSEMockServer(sse_requests)
split_backend.start()
sse_server.start()
sse_server.publish(make_initial_event())
sse_server.publish(make_occupancy('control_pri', 0))
sse_server.publish(make_occupancy('control_sec', 0))
kwargs = {
'sdk_api_base_url': 'http://localhost:%d/api' % split_backend.port(),
'events_api_base_url': 'http://localhost:%d/api' % split_backend.port(),
'auth_api_base_url': 'http://localhost:%d/api' % split_backend.port(),
'streaming_api_base_url': 'http://localhost:%d' % sse_server.port(),
'config': {'connectTimeout': 10000, 'featuresRefreshRate': 10}
}
factory = get_factory('some_apikey', **kwargs)
factory.block_until_ready(1)
assert factory.ready
time.sleep(2)
# Get a hook of the task so we can query its status
task = factory._sync_manager._synchronizer._split_tasks.split_task._task # pylint:disable=protected-access
assert task.running()
assert factory.client().get_treatment('maldo', 'split1') == 'on'
# Make a change in the BE but don't send the event.
# After restoring occupancy, the sdk should switch to polling
# and perform a syncAll that gets this change
split_changes[1] = {
'since': 1,
'till': 2,
'splits': [make_simple_split('split1', 2, True, False, 'off', 'user', False)]
}
split_changes[2] = {'since': 2, 'till': 2, 'splits': []}
sse_server.publish(make_occupancy('control_sec', 1))
time.sleep(2)
assert factory.client().get_treatment('maldo', 'split1') == 'off'
assert not task.running()
# Validate the SSE request
sse_request = sse_requests.get()
assert sse_request.method == 'GET'
path, qs = sse_request.path.split('?', 1)
assert path == '/event-stream'
qs = parse_qs(qs)
assert qs['accessToken'][0] == (
'<KEY>'
'<KEY>'
'XlNVGN4T1RRNE13PT1fTWpBNE16Y3pORFUxTWc9PV9zZWdtZW50c1wiOltcInN1YnNjc'
'<KEY>UWXlNVGN4T1RRNE13PT1fTWpBNE16Y3pORFUxTWc9PV<KEY>I'
'jpbXCJzdWJzY3JpYmVcIl0sXCJjb250cm9sX3ByaV<KEY>'
'2hhbm5lbC1tZXRhZGF0YTpwdWJsaXNoZXJzXC<KEY>'
'WJzY3JpYmVcIixcImNoYW5uZWwtbWV0YWRhdGE6cHVibGlzaGVyc1wiXX0iLCJ4LWFib'
'<KEY>'
'Dk2OTkxfQ.<KEY>'
)
assert set(qs['channels'][0].split(',')) == set(['MTYyMTcxOTQ4Mw==_MjA4MzczNDU1Mg==_splits',
'MTYyMTcxOTQ4Mw==_MjA4MzczNDU1Mg==_segments',
'[?occupancy=metrics.publishers]control_pri',
'[?occupancy=metrics.publishers]control_sec'])
assert qs['v'][0] == '1.1'
# Initial apikey validation
req = split_backend_requests.get()
assert req.method == 'GET'
assert req.path == '/api/segmentChanges/__SOME_INVALID_SEGMENT__?since=-1'
assert req.headers['authorization'] == 'Bearer some_apikey'
# Initial splits fetch
req = split_backend_requests.get()
assert req.method == 'GET'
assert req.path == '/api/splitChanges?since=-1'
assert req.headers['authorization'] == 'Bearer some_apikey'
# Iteration until since == till
req = split_backend_requests.get()
assert req.method == 'GET'
assert req.path == '/api/splitChanges?since=1'
assert req.headers['authorization'] == 'Bearer some_apikey'
# Auth
req = split_backend_requests.get()
assert req.method == 'GET'
assert req.path == '/api/auth'
assert req.headers['authorization'] == 'Bearer some_apikey'
# SyncAll after streaming connected
req = split_backend_requests.get()
assert req.method == 'GET'
assert req.path == '/api/splitChanges?since=1'
assert req.headers['authorization'] == 'Bearer some_apikey'
# SyncAll after push down
req = split_backend_requests.get()
assert req.method == 'GET'
assert req.path == '/api/splitChanges?since=1'
assert req.headers['authorization'] == 'Bearer some_apikey'
# SyncAll after push restored
req = split_backend_requests.get()
assert req.method == 'GET'
assert req.path == '/api/splitChanges?since=1'
assert req.headers['authorization'] == 'Bearer some_apikey'
# Second iteration of previous syncAll
req = split_backend_requests.get()
assert req.method == 'GET'
assert req.path == '/api/splitChanges?since=2'
assert req.headers['authorization'] == 'Bearer some_apikey'
# Cleanup
destroy_event = threading.Event()
factory.destroy(destroy_event)
destroy_event.wait()
sse_server.publish(sse_server.GRACEFUL_REQUEST_END)
sse_server.stop()
split_backend.stop()
def test_streaming_status_changes(self):
"""Test changes between streaming enabled, paused and disabled."""
auth_server_response = {
'pushEnabled': True,
'token': ('<KEY>
'<KEY>'
'<KEY>'
'<KEY>'
'<KEY>'
'<KEY>'
'<KEY>'
'CI6MTYwNDEwMDU5MSwiaWF0IjoxNjA0MDk2OTkxfQ.aP9BfR534K6J9h8gfDWg_CQgpz5E'
'vJh17WlOlAKhcD0')
}
split_changes = {
-1: {
'since': -1,
'till': 1,
'splits': [make_simple_split('split1', 1, True, False, 'off', 'user', True)]
},
1: {'since': 1, 'till': 1, 'splits': []}
}
segment_changes = {}
split_backend_requests = Queue()
split_backend = SplitMockServer(split_changes, segment_changes, split_backend_requests,
auth_server_response)
sse_requests = Queue()
sse_server = SSEMockServer(sse_requests)
split_backend.start()
sse_server.start()
sse_server.publish(make_initial_event())
sse_server.publish(make_occupancy('control_pri', 2))
sse_server.publish(make_occupancy('control_sec', 2))
kwargs = {
'sdk_api_base_url': 'http://localhost:%d/api' % split_backend.port(),
'events_api_base_url': 'http://localhost:%d/api' % split_backend.port(),
'auth_api_base_url': 'http://localhost:%d/api' % split_backend.port(),
'streaming_api_base_url': 'http://localhost:%d' % sse_server.port(),
'config': {'connectTimeout': 10000, 'featuresRefreshRate': 10}
}
factory = get_factory('some_apikey', **kwargs)
factory.block_until_ready(1)
assert factory.ready
time.sleep(2)
# Get a hook of the task so we can query its status
task = factory._sync_manager._synchronizer._split_tasks.split_task._task # pylint:disable=protected-access
assert not task.running()
assert factory.client().get_treatment('maldo', 'split1') == 'on'
# Make a change in the BE but don't send the event.
# After dropping occupancy, the sdk should switch to polling
# and perform a syncAll that gets this change
split_changes[1] = {
'since': 1,
'till': 2,
'splits': [make_simple_split('split1', 2, True, False, 'off', 'user', False)]
}
split_changes[2] = {'since': 2, 'till': 2, 'splits': []}
sse_server.publish(make_control_event('STREAMING_PAUSED', 1))
time.sleep(2)
assert factory.client().get_treatment('maldo', 'split1') == 'off'
assert task.running()
# We make another chagne in the BE and don't send the event.
# We restore occupancy, and it should be fetched by the
# sync all after streaming is restored.
split_changes[2] = {
'since': 2,
'till': 3,
'splits': [make_simple_split('split1', 3, True, False, 'off', 'user', True)]
}
split_changes[3] = {'since': 3, 'till': 3, 'splits': []}
sse_server.publish(make_control_event('STREAMING_ENABLED', 2))
time.sleep(2)
assert factory.client().get_treatment('maldo', 'split1') == 'on'
assert not task.running()
# Now we make another change and send an event so it's propagated
split_changes[3] = {
'since': 3,
'till': 4,
'splits': [make_simple_split('split1', 4, True, False, 'off', 'user', False)]
}
split_changes[4] = {'since': 4, 'till': 4, 'splits': []}
sse_server.publish(make_split_change_event(4))
time.sleep(2)
assert factory.client().get_treatment('maldo', 'split1') == 'off'
assert not task.running()
split_changes[4] = {
'since': 4,
'till': 5,
'splits': [make_simple_split('split1', 5, True, False, 'off', 'user', True)]
}
split_changes[5] = {'since': 5, 'till': 5, 'splits': []}
sse_server.publish(make_control_event('STREAMING_DISABLED', 2))
time.sleep(2)
assert factory.client().get_treatment('maldo', 'split1') == 'on'
assert task.running()
assert 'PushStatusHandler' not in [t.name for t in threading.enumerate()]
# Validate the SSE request
sse_request = sse_requests.get()
assert sse_request.method == 'GET'
path, qs = sse_request.path.split('?', 1)
assert path == '/event-stream'
qs = parse_qs(qs)
assert qs['accessToken'][0] == (
'<KEY>'
'<KEY>'
'XlNVGN4T1RRNE13PT1fTWpBNE16Y3pORFUxTWc9PV9zZWdtZW50c1wiOltcInN1YnNjc'
'mliZVwiXSxcIk1UWXlNVGN4T1RRNE13PT1fTWpBNE16Y3pORFUxTWc9PV9zcGxpdHNcI'
'jpbXCJzdWJzY3JpYmVcIl0sXCJjb250cm9sX3ByaVwiOltcInN1YnNjcmliZVwiLFwiY'
'2hhbm5lbC1tZXRhZGF0YTpwdWJsaXNoZXJzXCJdLFwiY29udHJvbF9zZWNcIjpbXCJzd'
'WJzY3JpYmVcIixcImNoYW5uZWwtbWV0YWRhdGE6cHVibGlzaGVyc1wiXX0iLCJ4LWFib'
'HktY2xpZW50SWQiOiJjbGllbnRJZCIsImV4cCI6MTYwNDEwMDU5MSwiaWF0IjoxNjA0M'
'Dk2OTkxfQ.aP9BfR534K6J9h8gfDWg_CQgpz5EvJh17WlOlAKhcD0'
)
assert set(qs['channels'][0].split(',')) == set(['MTYyMTcxOTQ4Mw==_MjA4MzczNDU1Mg==_splits',
'MTYyMTcxOTQ4Mw==_MjA4MzczNDU1Mg==_segments',
'[?occupancy=metrics.publishers]control_pri',
'[?occupancy=metrics.publishers]control_sec'])
assert qs['v'][0] == '1.1'
# Initial apikey validation
req = split_backend_requests.get()
assert req.method == 'GET'
assert req.path == '/api/segmentChanges/__SOME_INVALID_SEGMENT__?since=-1'
assert req.headers['authorization'] == 'Bearer some_apikey'
# Initial splits fetch
req = split_backend_requests.get()
assert req.method == 'GET'
assert req.path == '/api/splitChanges?since=-1'
assert req.headers['authorization'] == 'Bearer some_apikey'
# Iteration until since == till
req = split_backend_requests.get()
assert req.method == 'GET'
assert req.path == '/api/splitChanges?since=1'
assert req.headers['authorization'] == 'Bearer some_apikey'
# Auth
req = split_backend_requests.get()
assert req.method == 'GET'
assert req.path == '/api/auth'
assert req.headers['authorization'] == 'Bearer some_apikey'
# SyncAll after streaming connected
req = split_backend_requests.get()
assert req.method == 'GET'
assert req.path == '/api/splitChanges?since=1'
assert req.headers['authorization'] == 'Bearer some_apikey'
# SyncAll on push down
req = split_backend_requests.get()
assert req.method == 'GET'
assert req.path == '/api/splitChanges?since=1'
assert req.headers['authorization'] == 'Bearer some_apikey'
# Iteration until since == till
req = split_backend_requests.get()
assert req.method == 'GET'
assert req.path == '/api/splitChanges?since=2'
assert req.headers['authorization'] == 'Bearer some_apikey'
# SyncAll after push is up
req = split_backend_requests.get()
assert req.method == 'GET'
assert req.path == '/api/splitChanges?since=2'
assert req.headers['authorization'] == 'Bearer some_apikey'
# Iteration until since == till
req = split_backend_requests.get()
assert req.method == 'GET'
assert req.path == '/api/splitChanges?since=3'
assert req.headers['authorization'] == 'Bearer some_apikey'
# Fetch after notification
req = split_backend_requests.get()
assert req.method == 'GET'
assert req.path == '/api/splitChanges?since=3'
assert req.headers['authorization'] == 'Bearer some_apikey'
# Iteration until since == till
req = split_backend_requests.get()
assert req.method == 'GET'
assert req.path == '/api/splitChanges?since=4'
assert req.headers['authorization'] == 'Bearer some_apikey'
# SyncAll after streaming disabled
req = split_backend_requests.get()
assert req.method == 'GET'
assert req.path == '/api/splitChanges?since=4'
assert req.headers['authorization'] == 'Bearer some_apikey'
# Iteration until since == till
req = split_backend_requests.get()
assert req.method == 'GET'
assert req.path == '/api/splitChanges?since=5'
assert req.headers['authorization'] == 'Bearer some_apikey'
# Cleanup
destroy_event = threading.Event()
factory.destroy(destroy_event)
destroy_event.wait()
sse_server.publish(sse_server.GRACEFUL_REQUEST_END)
sse_server.stop()
split_backend.stop()
def test_server_closes_connection(self):
"""Test that if the server closes the connection, the whole flow is retried with BO."""
auth_server_response = {
'pushEnabled': True,
'token': ('<KEY>
'<KEY>'
'<KEY>'
'<KEY>'
'<KEY>'
'<KEY>'
'<KEY>'
'<KEY>'
'vJh17WlOlAKhcD0')
}
split_changes = {
-1: {
'since': -1,
'till': 1,
'splits': [make_simple_split('split1', 1, True, False, 'on', 'user', True)]
},
1: {
'since': 1,
'till': 1,
'splits': []
}
}
segment_changes = {}
split_backend_requests = Queue()
split_backend = SplitMockServer(split_changes, segment_changes, split_backend_requests,
auth_server_response)
sse_requests = Queue()
sse_server = SSEMockServer(sse_requests)
split_backend.start()
sse_server.start()
sse_server.publish(make_initial_event())
sse_server.publish(make_occupancy('control_pri', 2))
sse_server.publish(make_occupancy('control_sec', 2))
kwargs = {
'sdk_api_base_url': 'http://localhost:%d/api' % split_backend.port(),
'events_api_base_url': 'http://localhost:%d/api' % split_backend.port(),
'auth_api_base_url': 'http://localhost:%d/api' % split_backend.port(),
'streaming_api_base_url': 'http://localhost:%d' % sse_server.port(),
'config': {'connectTimeout': 10000, 'featuresRefreshRate': 100,
'segmentsRefreshRate': 100, 'metricsRefreshRate': 100,
'impressionsRefreshRate': 100, 'eventsPushRate': 100}
}
factory = get_factory('some_apikey', **kwargs)
factory.block_until_ready(1)
assert factory.ready
assert factory.client().get_treatment('maldo', 'split1') == 'on'
task = factory._sync_manager._synchronizer._split_tasks.split_task._task # pylint:disable=protected-access
assert not task.running()
time.sleep(1)
split_changes[1] = {
'since': 1,
'till': 2,
'splits': [make_simple_split('split1', 2, True, False, 'off', 'user', False)]
}
split_changes[2] = {'since': 2, 'till': 2, 'splits': []}
sse_server.publish(make_split_change_event(2))
time.sleep(1)
assert factory.client().get_treatment('maldo', 'split1') == 'off'
sse_server.publish(SSEMockServer.GRACEFUL_REQUEST_END)
time.sleep(1)
assert factory.client().get_treatment('maldo', 'split1') == 'off'
assert task.running()
time.sleep(2) # wait for the backoff to expire so streaming gets re-attached
# re-send initial event AND occupancy
sse_server.publish(make_initial_event())
sse_server.publish(make_occupancy('control_pri', 2))
sse_server.publish(make_occupancy('control_sec', 2))
time.sleep(2)
assert not task.running()
split_changes[2] = {
'since': 2,
'till': 3,
'splits': [make_simple_split('split1', 3, True, False, 'off', 'user', True)]
}
split_changes[3] = {'since': 3, 'till': 3, 'splits': []}
sse_server.publish(make_split_change_event(3))
time.sleep(1)
assert factory.client().get_treatment('maldo', 'split1') == 'on'
assert not task.running()
# Validate the SSE requests
sse_request = sse_requests.get()
assert sse_request.method == 'GET'
path, qs = sse_request.path.split('?', 1)
assert path == '/event-stream'
qs = parse_qs(qs)
assert qs['accessToken'][0] == (
'<KEY>'
'US45QnJtR0EiLCJ0eXAiOiJKV1QifQ.eyJ4LWFibHktY2FwYWJpbGl0eSI6IntcIk1UW'
'XlNVGN4T1RRNE13PT1fTWpBNE16Y3pORFUxTWc9PV9zZWdtZW50c1wiOltcInN1YnNjc'
'mliZVwiXSxcIk1UWXlNVGN4T1RRNE13PT1fTWpBNE16Y3pORFUxTWc9PV9zcGxpdHNcI'
'jpbXCJzdWJzY3JpYmVcIl0sXCJjb250cm9sX3ByaVwiOltcInN1YnNjcmliZVwiLFwiY'
'2hhbm5lbC1tZXRhZGF0YTpwdWJsaXNoZXJzXCJdLFwiY29udHJvbF9zZWNcIjpbXCJzd'
'WJzY3JpYmVcIixcImNoYW5uZWwtbWV0YWRhdGE6cHVibGlzaGVyc1wiXX0iLCJ4LWFib'
'<KEY>'
'Dk2OTkxfQ.aP9BfR534K6J9h8gfDWg_CQgpz5EvJh17WlOlAKhcD0'
)
assert set(qs['channels'][0].split(',')) == set(['MTYyMTcxOTQ4Mw==_MjA4MzczNDU1Mg==_splits',
'MTYyMTcxOTQ4Mw==_MjA4MzczNDU1Mg==_segments',
'[?occupancy=metrics.publishers]control_pri',
'[?occupancy=metrics.publishers]control_sec'])
assert qs['v'][0] == '1.1'
sse_request = sse_requests.get()
assert sse_request.method == 'GET'
path, qs = sse_request.path.split('?', 1)
assert path == '/event-stream'
qs = parse_qs(qs)
assert qs['accessToken'][0] == (
'<KEY>'
'US45QnJtR<KEY>'
'XlNVGN4T1RRNE13PT1fTWpBNE16Y3pORFUxTWc9PV9zZWdtZW50c1wiOltcInN1YnNjc'
'mliZVwiXSxcIk1UWXlNVGN4T1RRNE13PT1fTWpBNE16Y3pORFUxTWc9PV9zcGxpdHNcI'
'<KEY>WJ<KEY>'
'<KEY>'
'<KEY>'
'<KEY>'
'Dk2OTkxfQ.aP9BfR534K6J9h8gfDWg_CQgpz5EvJh17WlOlAKhcD0'
)
assert set(qs['channels'][0].split(',')) == set(['MTYyMTcxOTQ4Mw==_MjA4MzczNDU1Mg==_splits',
'MTYyMTcxOTQ4Mw==_MjA4MzczNDU1Mg==_segments',
'[?occupancy=metrics.publishers]control_pri',
'[?occupancy=metrics.publishers]control_sec'])
assert qs['v'][0] == '1.1'
# Initial apikey validation
req = split_backend_requests.get()
assert req.method == 'GET'
assert req.path == '/api/segmentChanges/__SOME_INVALID_SEGMENT__?since=-1'
assert req.headers['authorization'] == 'Bearer some_apikey'
# Initial splits fetch
req = split_backend_requests.get()
assert req.method == 'GET'
assert req.path == '/api/splitChanges?since=-1'
assert req.headers['authorization'] == 'Bearer some_apikey'
# Iteration until since == till
req = split_backend_requests.get()
assert req.method == 'GET'
assert req.path == '/api/splitChanges?since=1'
assert req.headers['authorization'] == 'Bearer some_apikey'
# Auth
req = split_backend_requests.get()
assert req.method == 'GET'
assert req.path == '/api/auth'
assert req.headers['authorization'] == 'Bearer some_apikey'
# SyncAll after streaming connected
req = split_backend_requests.get()
assert req.method == 'GET'
assert req.path == '/api/splitChanges?since=1'
assert req.headers['authorization'] == 'Bearer some_apikey'
# Fetch after first notification
req = split_backend_requests.get()
assert req.method == 'GET'
assert req.path == '/api/splitChanges?since=1'
assert req.headers['authorization'] == 'Bearer some_apikey'
# Iteration until since == till
req = split_backend_requests.get()
assert req.method == 'GET'
assert req.path == '/api/splitChanges?since=2'
assert req.headers['authorization'] == 'Bearer some_apikey'
# SyncAll on retryable error handling
req = split_backend_requests.get()
assert req.method == 'GET'
assert req.path == '/api/splitChanges?since=2'
assert req.headers['authorization'] == 'Bearer some_apikey'
# Auth after connection breaks
req = split_backend_requests.get()
assert req.method == 'GET'
assert req.path == '/api/auth'
assert req.headers['authorization'] == 'Bearer some_apikey'
# SyncAll after streaming connected again
req = split_backend_requests.get()
assert req.method == 'GET'
assert req.path == '/api/splitChanges?since=2'
assert req.headers['authorization'] == 'Bearer some_apikey'
# Fetch after new notification
req = split_backend_requests.get()
assert req.method == 'GET'
assert req.path == '/api/splitChanges?since=2'
assert req.headers['authorization'] == 'Bearer some_apikey'
# Iteration until since == till
req = split_backend_requests.get()
assert req.method == 'GET'
assert req.path == '/api/splitChanges?since=3'
assert req.headers['authorization'] == 'Bearer some_apikey'
# Cleanup
destroy_event = threading.Event()
factory.destroy(destroy_event)
destroy_event.wait()
sse_server.publish(sse_server.GRACEFUL_REQUEST_END)
sse_server.stop()
split_backend.stop()
def test_ably_errors_handling(self):
"""Test incoming ably errors and validate its handling."""
import logging
logger = logging.getLogger('splitio')
handler = logging.StreamHandler()
formatter = logging.Formatter('%(asctime)s %(name)-12s %(levelname)-8s %(message)s')
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.setLevel(logging.DEBUG)
auth_server_response = {
'pushEnabled': True,
'token': ('<KEY>
'<KEY>'
'<KEY>'
'T1fTWpBNE16Y3pORFUxTWc9PV9zcGxpdHNcIjpbXCJzdWJzY3JpYmVcIl0sXCJjb250cm'
'9sX3ByaVwiOltcInN1YnNjcmliZVwiLFwiY2hhbm5lbC1tZXRhZGF0YTpwdWJsaXNoZXJ'
'zXCJdLFwiY29udHJvbF9zZWNcIjpbXCJzdWJzY3JpYmVcIixcImNoYW5uZWwtbWV0YWRh'
'dGE6cHVibGlzaGVyc1wiXX0iLCJ4LWFibHktY2xpZW50SWQiOiJjbGllbnRJZCIsImV4c'
'CI6MTYwNDEwMDU5MSwiaWF0IjoxNjA0MDk2OTkxfQ.aP9BfR534K6J9h8gfDWg_CQgpz5E'
'<KEY>')
}
split_changes = {
-1: {
'since': -1,
'till': 1,
'splits': [make_simple_split('split1', 1, True, False, 'off', 'user', True)]
},
1: {'since': 1, 'till': 1, 'splits': []}
}
segment_changes = {}
split_backend_requests = Queue()
split_backend = SplitMockServer(split_changes, segment_changes, split_backend_requests,
auth_server_response)
sse_requests = Queue()
sse_server = SSEMockServer(sse_requests)
split_backend.start()
sse_server.start()
sse_server.publish(make_initial_event())
sse_server.publish(make_occupancy('control_pri', 2))
sse_server.publish(make_occupancy('control_sec', 2))
kwargs = {
'sdk_api_base_url': 'http://localhost:%d/api' % split_backend.port(),
'events_api_base_url': 'http://localhost:%d/api' % split_backend.port(),
'auth_api_base_url': 'http://localhost:%d/api' % split_backend.port(),
'streaming_api_base_url': 'http://localhost:%d' % sse_server.port(),
'config': {'connectTimeout': 10000, 'featuresRefreshRate': 10}
}
factory = get_factory('some_apikey', **kwargs)
factory.block_until_ready(1)
assert factory.ready
time.sleep(2)
# Get a hook of the task so we can query its status
task = factory._sync_manager._synchronizer._split_tasks.split_task._task # pylint:disable=protected-access
assert not task.running()
assert factory.client().get_treatment('maldo', 'split1') == 'on'
# Make a change in the BE but don't send the event.
# We'll send an ignorable error and check it has nothing happened
split_changes[1] = {
'since': 1,
'till': 2,
'splits': [make_simple_split('split1', 2, True, False, 'off', 'user', False)]
}
split_changes[2] = {'since': 2, 'till': 2, 'splits': []}
sse_server.publish(make_ably_error_event(60000, 600))
time.sleep(1)
assert factory.client().get_treatment('maldo', 'split1') == 'on'
assert not task.running()
sse_server.publish(make_ably_error_event(40145, 401))
sse_server.publish(sse_server.GRACEFUL_REQUEST_END)
time.sleep(3)
assert task.running()
assert factory.client().get_treatment('maldo', 'split1') == 'off'
# Re-publish initial events so that the retry succeeds
sse_server.publish(make_initial_event())
sse_server.publish(make_occupancy('control_pri', 2))
sse_server.publish(make_occupancy('control_sec', 2))
time.sleep(3)
assert not task.running()
# Assert streaming is working properly
split_changes[2] = {
'since': 2,
'till': 3,
'splits': [make_simple_split('split1', 3, True, False, 'off', 'user', True)]
}
split_changes[3] = {'since': 3, 'till': 3, 'splits': []}
sse_server.publish(make_split_change_event(3))
time.sleep(2)
assert factory.client().get_treatment('maldo', 'split1') == 'on'
assert not task.running()
# Send a non-retryable ably error
sse_server.publish(make_ably_error_event(40200, 402))
sse_server.publish(sse_server.GRACEFUL_REQUEST_END)
time.sleep(3)
# Assert sync-task is running and the streaming status handler thread is over
assert task.running()
assert 'PushStatusHandler' not in [t.name for t in threading.enumerate()]
# Validate the SSE requests
sse_request = sse_requests.get()
assert sse_request.method == 'GET'
path, qs = sse_request.path.split('?', 1)
assert path == '/event-stream'
qs = parse_qs(qs)
assert qs['accessToken'][0] == (
'<KEY>'
'<KEY>'
'XlNVGN4T1RRNE13PT1fTWpBNE16Y3pORFUxTWc9PV9zZWdtZW50c1wiOltcInN1YnNjc'
'mliZVwiXSxcIk1UWXlNVGN4T1RRNE13PT1fTWpBNE16Y3pORFUxTWc9PV9zcGxpdHNcI'
'jpbXCJzdWJzY3JpYmVcIl0sXCJjb250cm9sX3ByaVwiOltcInN1YnNjcmliZVwiLFwiY'
'2hhbm5lbC1tZXRhZGF0YTpwdWJsaXNoZXJzXCJdLFwiY29udHJvbF9zZWNcIjpbXCJzd'
'WJzY3JpYmVcIixcImNoYW5uZWwtbWV0YWRhdGE6cHVibGlzaGVyc1wiXX0iLCJ4LWFib'
'HktY2xpZW50SWQiOiJjbG<KEY>'
'Dk2OTkxfQ.aP9BfR534K6J9h8gfDWg_CQgpz5EvJh17WlOlAKhcD0'
)
assert set(qs['channels'][0].split(',')) == set(['MTYyMTcxOTQ4Mw==_MjA4MzczNDU1Mg==_splits',
'MTYyMTcxOTQ4Mw==_MjA4MzczNDU1Mg==_segments',
'[?occupancy=metrics.publishers]control_pri',
'[?occupancy=metrics.publishers]control_sec'])
assert qs['v'][0] == '1.1'
assert sse_request.method == 'GET'
path, qs = sse_request.path.split('?', 1)
assert path == '/event-stream'
qs = parse_qs(qs)
assert qs['accessToken'][0] == (
'<KEY>'
'<KEY>'
'XlNVGN4T1RRNE13PT1fTWpBNE16Y3pORFUxTWc9PV9zZWdtZW50c1wiOltcInN1YnNjc'
'mliZVwiXSxcIk1UWXlNVGN4T1RRNE13PT1fTWpBNE16Y3pORFUxTWc9PV9zcGxpdHNcI'
'jpbXCJzdWJzY3JpYmVcIl0sXCJjb250cm9sX3ByaVwiOltcInN1YnNjcmliZVwiLFwiY'
'2hhbm5lbC1tZXRhZGF0YTpwdWJsaXNoZXJzXCJdLFwiY29udHJvbF9zZWNcIjpbXCJzd'
'WJzY3JpYmVcIixcImNoYW5uZWwtbWV0YWRhdGE6cHVibGlzaGVyc1wiXX0iLCJ4LWFib'
'<KEY>'
'Dk2OTkxfQ.aP9BfR534K6J9h8gfDWg_CQgpz5EvJh17WlOlAKhcD0'
)
assert set(qs['channels'][0].split(',')) == set(['MTYyMTcxOTQ4Mw==_MjA4MzczNDU1Mg==_splits',
'MTYyMTcxOTQ4Mw==_MjA4MzczNDU1Mg==_segments',
'[?occupancy=metrics.publishers]control_pri',
'[?occupancy=metrics.publishers]control_sec'])
assert qs['v'][0] == '1.1'
# Initial apikey validation
req = split_backend_requests.get()
assert req.method == 'GET'
assert req.path == '/api/segmentChanges/__SOME_INVALID_SEGMENT__?since=-1'
assert req.headers['authorization'] == 'Bearer some_apikey'
# Initial splits fetch
req = split_backend_requests.get()
assert req.method == 'GET'
assert req.path == '/api/splitChanges?since=-1'
assert req.headers['authorization'] == 'Bearer some_apikey'
# Iteration until since == till
req = split_backend_requests.get()
assert req.method == 'GET'
assert req.path == '/api/splitChanges?since=1'
assert req.headers['authorization'] == 'Bearer some_apikey'
# Auth
req = split_backend_requests.get()
assert req.method == 'GET'
assert req.path == '/api/auth'
assert req.headers['authorization'] == 'Bearer some_apikey'
# SyncAll after streaming connected
req = split_backend_requests.get()
assert req.method == 'GET'
assert req.path == '/api/splitChanges?since=1'
assert req.headers['authorization'] == 'Bearer some_apikey'
# SyncAll retriable error
req = split_backend_requests.get()
assert req.method == 'GET'
assert req.path == '/api/splitChanges?since=1'
assert req.headers['authorization'] == 'Bearer some_apikey'
# Iteration until since == till
req = split_backend_requests.get()
assert req.method == 'GET'
assert req.path == '/api/splitChanges?since=2'
assert req.headers['authorization'] == 'Bearer some_apikey'
# Auth again
req = split_backend_requests.get()
assert req.method == 'GET'
assert req.path == '/api/auth'
assert req.headers['authorization'] == 'Bearer some_apikey'
# SyncAll after push is up
req = split_backend_requests.get()
assert req.method == 'GET'
assert req.path == '/api/splitChanges?since=2'
assert req.headers['authorization'] == 'Bearer some_apikey'
# Fetch after notification
req = split_backend_requests.get()
assert req.method == 'GET'
assert req.path == '/api/splitChanges?since=2'
assert req.headers['authorization'] == 'Bearer some_apikey'
# Iteration until since == till
req = split_backend_requests.get()
assert req.method == 'GET'
assert req.path == '/api/splitChanges?since=3'
assert req.headers['authorization'] == 'Bearer some_apikey'
# SyncAll after non recoverable ably error
req = split_backend_requests.get()
assert req.method == 'GET'
assert req.path == '/api/splitChanges?since=3'
assert req.headers['authorization'] == 'Bearer some_apikey'
# Cleanup
destroy_event = threading.Event()
factory.destroy(destroy_event)
destroy_event.wait()
sse_server.publish(sse_server.GRACEFUL_REQUEST_END)
sse_server.stop()
split_backend.stop()
def make_split_change_event(change_number):
"""Make a split change event."""
return {
'event': 'message',
'data': json.dumps({
'id':'TVUsxaabHs:0:0',
'clientId':'pri:MzM0ODI1MTkxMw==',
'timestamp': change_number-1,
'encoding':'json',
'channel':'MTYyMTcxOTQ4Mw==_MjA4MzczNDU1Mg==_splits',
'data': json.dumps({
'type': 'SPLIT_UPDATE',
'changeNumber': change_number
})
})
}
def make_split_kill_event(name, default_treatment, change_number):
"""Make a split change event."""
return {
'event': 'message',
'data': json.dumps({
'id':'TVUsxaabHs:0:0',
'clientId':'pri:MzM0ODI1MTkxMw==',
'timestamp': change_number-1,
'encoding':'json',
'channel':'MTYyMTcxOTQ4Mw==_MjA4MzczNDU1Mg==_splits',
'data': json.dumps({
'type': 'SPLIT_KILL',
'splitName': name,
'defaultTreatment': default_treatment,
'changeNumber': change_number
})
})
}
def make_initial_event():
"""Make a split change event."""
return {'id':'TVUsxaabHs:0:0'}
def make_occupancy(channel, publishers):
"""Make an occupancy event."""
return {
'event': 'message',
'data': json.dumps({
'id':'aP6EuhrcUm:0:0',
'timestamp':1604325712734,
'encoding': 'json',
'channel': "[?occupancy=metrics.publishers]%s" % channel,
'data': json.dumps({'metrics': {'publishers': publishers}}),
'name':'[meta]occupancy'
})
}
def make_segment_change_event(name, change_number):
"""Make a split change event."""
return {
'event': 'message',
'data': json.dumps({
'id':'TVUsxaabHs:0:0',
'clientId':'pri:MzM0ODI1MTkxMw==',
'timestamp': change_number-1,
'encoding':'json',
'channel':'MTYyMTcxOTQ4Mw==_MjA4MzczNDU1Mg==_segments',
'data': json.dumps({
'type': 'SEGMENT_UPDATE',
'segmentName': name,
'changeNumber': change_number
})
})
}
def make_control_event(control_type, timestamp):
"""Make a control event."""
return {
'event': 'message',
'data': json.dumps({
'id':'TVUsxaabHs:0:0',
'clientId':'pri:MzM0ODI1MTkxMw==',
'timestamp': timestamp,
'encoding':'json',
'channel':'[?occupancy=metrics.publishers]control_pri',
'data': json.dumps({
'type': 'CONTROL',
'controlType': control_type,
})
})
}
def make_ably_error_event(code, status):
"""Make a control event."""
return {
'event': 'error',
'data': json.dumps({
'message':'Invalid accessToken in request: sarasa',
'code': code,
'statusCode': status,
'href':"https://help.ably.io/error/%d" % code
})
}
def make_simple_split(name, cn, active, killed, default_treatment, tt, on):
"""Make a simple split."""
return {
'trafficTypeName': tt,
'name': name,
'seed': 1699838640,
'status': 'ACTIVE' if active else 'ARCHIVED',
'changeNumber': cn,
'killed': killed,
'defaultTreatment': default_treatment,
'conditions': [
{
'matcherGroup': {
'combiner': 'AND',
'matchers': [
{
'matcherType': 'ALL_KEYS',
'negate': False,
'userDefinedSegmentMatcherData': None,
'whitelistMatcherData': None
}
]
},
'partitions': [
{'treatment': 'on' if on else 'off', 'size': 100},
{'treatment': 'off' if on else 'on', 'size': 0}
]
}
]
}
def make_split_with_segment(name, cn, active, killed, default_treatment,
tt, on, segment):
"""Make a split with a segment."""
return {
'trafficTypeName': tt,
'name': name,
'seed': cn,
'status': 'ACTIVE' if active else 'ARCHIVED',
'changeNumber': cn,
'killed': killed,
'defaultTreatment': default_treatment,
'configurations': {
'on': '{\'size\':15,\'test\':20}'
},
'conditions': [
{
'matcherGroup': {
'combiner': 'AND',
'matchers': [
{
'matcherType': 'IN_SEGMENT',
'negate': False,
'userDefinedSegmentMatcherData': {'segmentName': segment},
'whitelistMatcherData': None
}
]
},
'partitions': [{
'treatment': 'on' if on else 'off',
'size': 100
}]
}
]
}
| 2.1875 | 2 |
venues/abstract_venue.py | weezel/BandEventNotifier | 0 | 3835 | import re
from abc import ABC, abstractmethod
from typing import Any, Dict, Generator
class IncorrectVenueImplementation(Exception):
pass
# class AbstractVenue(metaclass=ABC):
class AbstractVenue(ABC):
def __init__(self):
self.url = ""
self.name = ""
self.city = ""
self.country = ""
self.pricepat_monetary = re.compile("[0-9.,]+.€")
self.pricepat_plain = re.compile("[0-9.,]+")
def get_venue_name(self) -> str:
return self.name
def get_city(self) -> str:
return self.city
def get_country(self) -> str:
return self.country
def event_sqlentity(self) -> Dict[str, str]:
return {"name": self.name,
"city": self.city,
"country": self.country}
def parse_price(self, info_tag: str) -> str:
prices_with_mon = self.pricepat_monetary.findall(info_tag)
prices = []
for price in prices_with_mon:
parsed_price = self.pricepat_plain.findall(price)
if len(parsed_price) == 0:
continue
prices.append("".join(parsed_price))
if len(prices) == 0:
return "0€"
elif len(prices) == 2:
in_advance, from_door = prices[0], prices[1]
return f"{in_advance}€/{from_door}€"
return "{}€".format("".join(prices))
# FIXME Proper class type checking
def __eq__(self, other):
return hasattr(other, "url") \
and other.url == self.url
@abstractmethod
def parse_events(self, data: Any) \
-> Generator[Dict[str, Any], None, None]:
pass
| 3.15625 | 3 |
mtl/util/pipeline.py | vandurme/TFMTL | 10 | 3836 | # Copyright 2018 Johns Hopkins University. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from collections import namedtuple
import tensorflow as tf
from tensorflow.python.framework import sparse_tensor as sparse_tensor_lib
from tensorflow.python.ops import parsing_ops
class Pipeline(object):
def __init__(self, tfrecord_file, feature_map, batch_size=32,
num_threads=4, prefetch_buffer_size=1,
static_max_length=None, shuffle_buffer_size=10000,
shuffle=True, num_epochs=None, one_shot=False):
self._feature_map = feature_map
self._batch_size = batch_size
self._static_max_length = static_max_length
# Initialize the dataset
dataset = tf.data.TFRecordDataset(tfrecord_file)
# Maybe randomize
if shuffle:
dataset = dataset.shuffle(shuffle_buffer_size)
# Maybe repeat
if num_epochs is None:
dataset = dataset.repeat() # repeat indefinitely
elif num_epochs > 1:
dataset = dataset.repeat(count=num_epochs)
dataset = dataset.batch(batch_size)
dataset = dataset.map(self.parse_example,
num_parallel_calls=num_threads)
# Pre-fetch a batch for faster processing
dataset = dataset.prefetch(prefetch_buffer_size)
# Get the iterator
if one_shot:
self._iterator = dataset.make_one_shot_iterator()
else:
self._iterator = dataset.make_initializable_iterator()
self._init_op = self._iterator.initializer
# Get outputs
self._outputs = self._iterator.get_next()
# Map to features
index = 0
result = {}
for key in sorted(self._feature_map.keys()):
result[key] = self._outputs[index]
index += 1
self._result = result
def pad(self, t):
s = tf.shape(t)
paddings = [[0, 0], [0, self._static_max_length - s[1]]]
x = tf.pad(t, paddings, 'CONSTANT', constant_values=0)
x = tf.reshape(x, [s[0], self._static_max_length])
assert x.get_shape().as_list()[1] is self._static_max_length
return x
def parse_example(self, serialized):
parsed = parsing_ops.parse_example(serialized, self._feature_map)
result = []
for key in sorted(self._feature_map.keys()):
val = parsed[key]
if isinstance(val, sparse_tensor_lib.SparseTensor):
dense_tensor = tf.sparse_tensor_to_dense(val)
if self._static_max_length is not None:
dense_tensor = self.pad(dense_tensor)
result.append(dense_tensor)
else:
result.append(val)
return tuple(result)
@property
def iterator(self):
return self._iterator
@property
def init_op(self):
return self._init_op
@property
def batch(self):
return self._result
# namedtuple for bucket_info object (used in Pipeline)
# func: a mapping from examples to tf.int64 keys
# pads: a set of tf shapes that correspond to padded examples
bucket_info = namedtuple("bucket_info", "func pads")
def int64_feature(value):
""" Takes a single int (e.g. 3) and converts it to a tf Feature """
return tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))
def int64_list_feature(sequence):
""" Sequence of ints (e.g [1,2,3]) to TF feature """
return tf.train.Feature(int64_list=tf.train.Int64List(value=sequence))
| 2.1875 | 2 |
src/py65/devices/mpu65c02.py | dabeaz/py65 | 5 | 3837 | from py65.devices import mpu6502
from py65.utils.devices import make_instruction_decorator
class MPU(mpu6502.MPU):
def __init__(self, *args, **kwargs):
mpu6502.MPU.__init__(self, *args, **kwargs)
self.name = '65C02'
self.waiting = False
def step(self):
if self.waiting:
self.processorCycles += 1
else:
mpu6502.MPU.step(self)
return self
# Make copies of the lists
instruct = mpu6502.MPU.instruct[:]
cycletime = mpu6502.MPU.cycletime[:]
extracycles = mpu6502.MPU.extracycles[:]
disassemble = mpu6502.MPU.disassemble[:]
instruction = make_instruction_decorator(instruct, disassemble,
cycletime, extracycles)
# addressing modes
def ZeroPageIndirectAddr(self):
return self.WordAt( 255 & (self.ByteAt(self.pc)))
def AccumulatorAddr(self):
return self.a
# operations
def opRMB(self, x, mask):
address = x()
self.memory[address] &= mask
def opSMB(self, x, mask):
address = x()
self.memory[address] |= mask
def opSTZ(self, x):
self.memory[x()] = 0x00
def opTSB(self, x):
address = x()
m = self.memory[address]
self.p &= ~self.ZERO
z = m & self.a
if z != 0:
self.p |= self.ZERO
self.memory[address] = m | self.a
def opTRB(self, x):
address = x()
m = self.memory[address]
self.p &= ~self.ZERO
z = m & self.a
if z != 0:
self.p |= self.ZERO
self.memory[address] = m & ~self.a
# instructions
@instruction(name="RMB0", mode="zpg", cycles=5)
def inst_0x07(self):
self.opRMB(self.ZeroPageAddr, 0xFE)
self.pc += 1
@instruction(name="ORA", mode="zpi", cycles=5)
def inst_0x12(self):
self.opORA(self.ZeroPageIndirectAddr)
self.pc += 1
@instruction(name="RMB1", mode="zpg", cycles=5)
def inst_0x17(self):
self.opRMB(self.ZeroPageAddr, 0xFD)
self.pc += 1
@instruction(name="RMB2", mode="zpg", cycles=5)
def inst_0x27(self):
self.opRMB(self.ZeroPageAddr, 0xFB)
self.pc += 1
@instruction(name="AND", mode="zpi", cycles=5)
def inst_0x32(self):
self.opAND(self.ZeroPageIndirectAddr)
self.pc += 1
@instruction(name="BIT", mode="zpx", cycles=4)
def inst_0x34(self):
self.opBIT(self.ZeroPageXAddr)
self.pc += 1
@instruction(name="RMB3", mode="zpg", cycles=5)
def inst_0x37(self):
self.opRMB(self.ZeroPageAddr, 0xF7)
self.pc += 1
@instruction(name="BIT", mode="abx", cycles=4)
def inst_0x3c(self):
self.opBIT(self.AbsoluteXAddr)
self.pc += 2
@instruction(name="RMB4", mode="zpg", cycles=5)
def inst_0x47(self):
self.opRMB(self.ZeroPageAddr, 0xEF)
self.pc += 1
@instruction(name="EOR", mode="zpi", cycles=5)
def inst_0x52(self):
self.opEOR(self.ZeroPageIndirectAddr)
self.pc += 1
@instruction(name="RMB5", mode="zpg", cycles=5)
def inst_0x57(self):
self.opRMB(self.ZeroPageAddr, 0xDF)
self.pc += 1
@instruction(name="PHY", mode="imp", cycles=3)
def inst_0x5a(self):
self.stPush(self.y)
@instruction(name="STZ", mode="imp", cycles=3)
def inst_0x64(self):
self.opSTZ(self.ZeroPageAddr)
self.pc += 1
@instruction(name="RMB6", mode="zpg", cycles=5)
def inst_0x67(self):
self.opRMB(self.ZeroPageAddr, 0xBF)
self.pc += 1
@instruction(name="ADC", mode="zpi", cycles=5)
def inst_0x72(self):
self.opADC(self.ZeroPageIndirectAddr)
self.pc += 1
@instruction(name="STZ", mode="zpx", cycles=4)
def inst_0x74(self):
self.opSTZ(self.ZeroPageXAddr)
self.pc += 1
@instruction(name="PHY", mode="imp", cycles=4)
def inst_0x7a(self):
self.y = self.stPop()
self.FlagsNZ(self.y)
@instruction(name="RMB7", mode="zpg", cycles=5)
def inst_0x77(self):
self.opRMB(self.ZeroPageAddr, 0x7F)
self.pc += 1
@instruction(name="SMB0", mode="zpg", cycles=5)
def inst_0x87(self):
self.opSMB(self.ZeroPageAddr, 0x01)
self.pc += 1
@instruction(name="STA", mode="zpi", cycles=5)
def inst_0x92(self):
self.opSTA(self.ZeroPageIndirectAddr)
self.pc += 1
@instruction(name="SMB1", mode="zpg", cycles=5)
def inst_0x97(self):
self.opSMB(self.ZeroPageAddr, 0x02)
self.pc += 1
@instruction(name="STZ", mode="abs", cycles=4)
def inst_0x9c(self):
self.opSTZ(self.AbsoluteAddr)
self.pc += 2
@instruction(name="STZ", mode="abx", cycles=5)
def inst_0x9e(self):
self.opSTZ(self.AbsoluteXAddr)
self.pc += 2
@instruction(name="SMB2", mode="zpg", cycles=5)
def inst_0xa7(self):
self.opSMB(self.ZeroPageAddr, 0x04)
self.pc += 1
@instruction(name="LDA", mode="zpi", cycles=5)
def inst_0xb2(self):
self.opLDA(self.ZeroPageIndirectAddr)
self.pc += 1
@instruction(name="SMB3", mode="zpg", cycles=5)
def inst_0xb7(self):
self.opSMB(self.ZeroPageAddr, 0x08)
self.pc += 1
@instruction(name="SMB4", mode="zpg", cycles=5)
def inst_0xc7(self):
self.opSMB(self.ZeroPageAddr, 0x10)
self.pc += 1
@instruction(name="SMB5", mode="zpg", cycles=5)
def inst_0xd7(self):
self.opSMB(self.ZeroPageAddr, 0x20)
self.pc += 1
@instruction(name="PHX", mode="imp", cycles=3)
def inst_0xda(self):
self.stPush(self.x)
@instruction(name="SMB6", mode="zpg", cycles=5)
def inst_0xe7(self):
self.opSMB(self.ZeroPageAddr, 0x40)
self.pc += 1
@instruction(name="SMB7", mode="zpg", cycles=5)
def inst_0xf7(self):
self.opSMB(self.ZeroPageAddr, 0x80)
self.pc += 1
@instruction(name="PLX", mode="imp", cycles=4)
def inst_0xfa(self):
self.x = self.stPop()
self.FlagsNZ(self.x)
@instruction(name="TSB", mode="zpg", cycles=5)
def inst_0x04(self):
self.opTSB(self.ZeroPageAddr)
self.pc += 1
@instruction(name="TSB", mode="abs", cycles=6)
def inst_0x0c(self):
self.opTSB(self.AbsoluteAddr)
self.pc += 2
@instruction(name="TRB", mode="zpg", cycles=5)
def inst_0x14(self):
self.opTRB(self.ZeroPageAddr)
self.pc += 1
@instruction(name="INC", mode="acc", cycles=2)
def inst_0x1a(self):
self.opINCR(None)
@instruction(name="TRB", mode="abs", cycles=6)
def inst_0x1c(self):
self.opTRB(self.AbsoluteAddr)
self.pc += 2
@instruction(name="DEC", mode="acc", cycles=2)
def inst_0x3a(self):
self.opDECR(None)
@instruction(name="BRA", mode="rel", cycles=1, extracycles=1)
def inst_0x80(self):
self.BranchRelAddr()
@instruction(name="WAI", mode='imp', cycles=3)
def inst_0xCB(self):
self.waiting = True
@instruction(name="CMP", mode='zpi', cycles=6) # Don't know cycles
def inst_0xD2(self):
self.opCPY(self.ZeroPageIndirectAddr)
self.pc += 1
@instruction(name="SBC", mode="zpi", cycles=5)
def inst_0xf2(self):
self.opSBC(self.ZeroPageIndirectAddr)
self.pc += 1
| 2.609375 | 3 |
tests/test__io.py | soerendip/ms-mint | 1 | 3838 | import pandas as pd
import shutil
import os
import io
from ms_mint.Mint import Mint
from pathlib import Path as P
from ms_mint.io import (
ms_file_to_df,
mzml_to_pandas_df_pyteomics,
convert_ms_file_to_feather,
convert_ms_file_to_parquet,
MZMLB_AVAILABLE,
)
from paths import (
TEST_MZML,
TEST_MZXML,
TEST_PARQUET,
TEST_MZMLB_POS,
TEST_MZML_POS,
TEST_MZML_NEG,
)
def test__ms_file_to_df__mzML():
result = ms_file_to_df(TEST_MZML)
expected_cols = [
"scan_id",
"ms_level",
"polarity",
"scan_time_min",
"mz",
"intensity",
]
assert isinstance(result, pd.DataFrame), f"{type(result)} is not a dataframe"
assert expected_cols == result.columns.to_list(), result.columns
def test__ms_file_to_df__mzML_timeunit_minutes():
result = ms_file_to_df(TEST_MZML, time_unit="minutes")
expected_cols = [
"scan_id",
"ms_level",
"polarity",
"scan_time_min",
"mz",
"intensity",
]
assert isinstance(result, pd.DataFrame), f"{type(result)} is not a dataframe"
assert expected_cols == result.columns.to_list(), result.columns
def test__ms_file_to_df__mzXML():
result = ms_file_to_df(TEST_MZXML)
expected_cols = [
"scan_id",
"ms_level",
"polarity",
"scan_time_min",
"mz",
"intensity",
]
assert isinstance(result, pd.DataFrame), f"{type(result)} is not a dataframe"
assert expected_cols == result.columns.to_list(), result.columns
def test__mzml_to_pandas_df_pyteomics_pos():
result = mzml_to_pandas_df_pyteomics(TEST_MZML_POS)
expected_cols = [
"scan_id",
"ms_level",
"polarity",
"scan_time_min",
"mz",
"intensity",
]
assert isinstance(result, pd.DataFrame), f"{type(result)} is not a dataframe"
assert expected_cols == result.columns.to_list(), result.columns
assert all(result.polarity == "+"), f'Polarity should be "+"\n{result}'
def test__mzml_to_pandas_df_pyteomics_neg():
result = mzml_to_pandas_df_pyteomics(TEST_MZML_NEG)
expected_cols = [
"scan_id",
"ms_level",
"polarity",
"scan_time_min",
"mz",
"intensity",
]
assert isinstance(result, pd.DataFrame), f"{type(result)} is not a dataframe"
assert expected_cols == result.columns.to_list(), result.columns
assert all(result.polarity == "-"), f'Polarity should be "-"\n{result}'
def test__read_parquet():
result = ms_file_to_df(TEST_PARQUET)
expected_cols = [
"scan_id",
"ms_level",
"polarity",
"scan_time_min",
"mz",
"intensity",
]
assert isinstance(result, pd.DataFrame), f"{type(result)} is not a dataframe"
assert expected_cols == result.columns.to_list(), result.columns
def test__write_read_hdf(tmpdir):
df = ms_file_to_df(TEST_PARQUET)
fn = P(tmpdir) / "file.hdf"
df.to_hdf(fn, key="data")
result = ms_file_to_df(fn)
expected_cols = [
"scan_id",
"ms_level",
"polarity",
"scan_time_min",
"mz",
"intensity",
]
assert isinstance(result, pd.DataFrame), f"{type(result)} is not a dataframe"
assert expected_cols == result.columns.to_list(), result.columns
def test__read_mzMLb(tmpdir):
if not MZMLB_AVAILABLE:
return None
result = ms_file_to_df(TEST_MZMLB_POS)
expected_cols = [
"scan_id",
"ms_level",
"polarity",
"scan_time_min",
"mz",
"intensity",
]
assert isinstance(result, pd.DataFrame), f"{type(result)} is not a dataframe"
assert expected_cols == result.columns.to_list(), result.columns
# assert all(result.polarity == '+'), f'Polarity should be "+"\n{result}'
def test__convert_ms_file_to_feather(tmpdir):
print(tmpdir)
shutil.copy(TEST_MZML, tmpdir)
fn = P(tmpdir) / P(TEST_MZML).name
fn_out = fn.with_suffix(".feather")
print(fn, fn_out)
convert_ms_file_to_feather(fn)
assert fn_out.is_file(), f"File not generated {fn_out}"
df = ms_file_to_df(fn)
df_fea = ms_file_to_df(fn_out)
assert df_fea.equals(df), "DataFrames not equal"
def test__convert_ms_file_to_parquet(tmpdir):
print(tmpdir)
shutil.copy(TEST_MZML, tmpdir)
fn = P(tmpdir) / P(TEST_MZML).name
fn_out = fn.with_suffix(".parquet")
print(fn, fn_out)
convert_ms_file_to_parquet(fn)
assert fn_out.is_file(), f"File not generated {fn_out}"
df = ms_file_to_df(fn)
df_fea = ms_file_to_df(fn_out)
assert df_fea.equals(df), "DataFrames not equal"
def test__export_to_excel(tmp_path):
filename = os.path.join(tmp_path, "output.xlsx")
mint = Mint(verbose=True)
mint.ms_files = "tests/data/test.mzXML"
mint.run()
mint.export(filename)
assert os.path.isfile(filename)
def test__export_to_excel_without_fn():
mint = Mint(verbose=True)
mint.ms_files = TEST_MZXML
mint.targets = pd.DataFrame(
{
"peak_label": ["A"],
"mz_mean": [200],
"mz_width": [10],
"intensity_threshold": [0],
"rt_min": [0],
"rt_max": [10],
"targets_filename": ["unknown"],
}
)
mint.run()
buffer = mint.export()
assert isinstance(buffer, io.BytesIO)
df = pd.read_excel(buffer, sheet_name="Results")
assert len(df) == 1, len(df)
assert df.loc[0, "peak_label"] == "A", df.loc[0, "peak_label"]
assert df.loc[0, "ms_file"] == P(TEST_MZXML).name, df.loc[0, "ms_file"]
| 2.390625 | 2 |
core/views.py | moiyad/image | 0 | 3839 | <filename>core/views.py
from django.core.files.storage import FileSystemStorage
from django.shortcuts import render, redirect
from core.forms import DocumentForm
from core.models import Document
from media import image_cv2
def home(request):
documents = Document.objects.all()
number = len(image_cv2.myList)
return render(request, 'core/home.html', {'documents': documents, 'number': number})
def simple_upload(request):
if request.method == 'POST' and request.FILES['myfile']:
myfile = request.FILES['myfile']
fs = FileSystemStorage()
filename = fs.save(myfile.name, myfile)
uploaded_file_url = fs.url(filename)
return render(request, 'core/simple_upload.html', {
'uploaded_file_url': uploaded_file_url
})
return render(request, 'core/simple_upload.html')
def model_form_upload(request):
if request.method == 'POST':
form = DocumentForm(request.POST, request.FILES)
if form.is_valid():
form.save()
return redirect('home')
else:
form = DocumentForm()
return render(request, 'core/model_form_upload.html', {
'form': form
})
| 2.21875 | 2 |
python/verifair/benchmarks/fairsquare/M_BN_F_SVM_A_Q.py | obastani/verifair | 5 | 3840 | <reponame>obastani/verifair
from .helper import *
def sample(flag):
sex = step([(0,1,0.3307), (1,2,0.6693)])
if sex < 1:
capital_gain = gaussian(568.4105, 24248365.5428)
if capital_gain < 7298.0000:
age = gaussian(38.4208, 184.9151)
capital_loss = gaussian(86.5949, 157731.9553)
else:
age = gaussian(38.8125, 193.4918)
capital_loss = gaussian(117.8083, 252612.0300)
else:
capital_gain = gaussian(1329.3700, 69327473.1006)
if capital_gain < 5178.0000:
age = gaussian(38.6361, 187.2435)
capital_loss = gaussian(87.0152, 161032.4157)
else:
age = gaussian(38.2668, 187.2747)
capital_loss = gaussian(101.7672, 189798.1926)
sensitiveAttribute(sex < 1, flag)
qualified(age > 18)
N_age = (age - 17.0) / 62.0
N_capital_gain = (capital_gain - 0.0) / 22040.0
N_capital_loss = (capital_loss - 0.0) / 1258.0
t = 0.0006 * N_age + -5.7363 * N_capital_gain + -0.0002 * N_capital_loss + 1.0003
if sex > 1:
t = t + -0.0003
if sex < 1:
t = t - 0.5
return int(t < 0)
fairnessTarget(t < 0)
| 2.28125 | 2 |
ml4chem/atomistic/models/neuralnetwork.py | muammar/mlchem | 77 | 3841 | <filename>ml4chem/atomistic/models/neuralnetwork.py
import dask
import datetime
import logging
import time
import torch
import numpy as np
import pandas as pd
from collections import OrderedDict
from ml4chem.metrics import compute_rmse
from ml4chem.atomistic.models.base import DeepLearningModel, DeepLearningTrainer
from ml4chem.atomistic.models.loss import AtomicMSELoss
from ml4chem.optim.handler import get_optimizer, get_lr_scheduler, get_lr
from ml4chem.utils import convert_elapsed_time, get_chunks, get_number_of_parameters
from pprint import pformat
# Setting precision and starting logger object
torch.set_printoptions(precision=10)
logger = logging.getLogger()
class NeuralNetwork(DeepLearningModel, torch.nn.Module):
"""Atom-centered Neural Network Regression with Pytorch
This model is based on Ref. 1 by Behler and Parrinello.
Parameters
----------
hiddenlayers : tuple
Structure of hidden layers in the neural network.
activation : str
Activation functions. Supported "tanh", "relu", or "celu".
References
----------
1. <NAME>. & <NAME>. Generalized Neural-Network Representation
of High-Dimensional Potential-Energy Surfaces. Phys. Rev. Lett. 98,
146401 (2007).
2. <NAME>. & <NAME> : A modular approach to machine
learning in atomistic simulations. Comput. Phys. Commun. 207, 310–324
(2016).
"""
NAME = "PytorchPotentials"
@classmethod
def name(cls):
"""Returns name of class"""
return cls.NAME
def __init__(self, hiddenlayers=(3, 3), activation="relu", **kwargs):
super(DeepLearningModel, self).__init__()
self.hiddenlayers = hiddenlayers
self.activation = activation
def prepare_model(self, input_dimension, data=None, purpose="training"):
"""Prepare the model
Parameters
----------
input_dimension : int
Input's dimension.
data : object
Data object created from the handler.
purpose : str
Purpose of this model: 'training', 'inference'.
"""
self.input_dimension = input_dimension
activation = {
"tanh": torch.nn.Tanh,
"relu": torch.nn.ReLU,
"celu": torch.nn.CELU,
}
hl = len(self.hiddenlayers)
if purpose == "training":
logger.info(" ")
logger.info("Model")
logger.info("=====")
now = datetime.datetime.now()
logger.info(
"Module accessed on {}.".format(now.strftime("%Y-%m-%d %H:%M:%S"))
)
logger.info("Model name: {}.".format(self.name()))
logger.info("Number of hidden-layers: {}".format(hl))
logger.info(
"Structure of Neural Net: {}".format(
"(input, " + str(self.hiddenlayers)[1:-1] + ", output)"
)
)
layers = range(len(self.hiddenlayers) + 1)
try:
unique_element_symbols = data.unique_element_symbols[purpose]
except TypeError:
unique_element_symbols = data.get_unique_element_symbols(purpose=purpose)
unique_element_symbols = unique_element_symbols[purpose]
symbol_model_pair = []
for symbol in unique_element_symbols:
linears = []
intercept_name = "intercept_" + symbol
slope_name = "slope_" + symbol
if purpose == "training":
intercept = (data.max_energy + data.min_energy) / 2.0
intercept = torch.nn.Parameter(
torch.tensor(intercept, requires_grad=True)
)
slope = (data.max_energy - data.min_energy) / 2.0
slope = torch.nn.Parameter(torch.tensor(slope, requires_grad=True))
self.register_parameter(intercept_name, intercept)
self.register_parameter(slope_name, slope)
elif purpose == "inference":
intercept = torch.nn.Parameter(torch.tensor(0.0))
slope = torch.nn.Parameter(torch.tensor(0.0))
self.register_parameter(intercept_name, intercept)
self.register_parameter(slope_name, slope)
for index in layers:
# This is the input layer
if index == 0:
out_dimension = self.hiddenlayers[0]
_linear = torch.nn.Linear(input_dimension, out_dimension)
linears.append(_linear)
linears.append(activation[self.activation]())
# This is the output layer
elif index == len(self.hiddenlayers):
inp_dimension = self.hiddenlayers[index - 1]
out_dimension = 1
_linear = torch.nn.Linear(inp_dimension, out_dimension)
linears.append(_linear)
# These are hidden-layers
else:
inp_dimension = self.hiddenlayers[index - 1]
out_dimension = self.hiddenlayers[index]
_linear = torch.nn.Linear(inp_dimension, out_dimension)
linears.append(_linear)
linears.append(activation[self.activation]())
# Stacking up the layers.
linears = torch.nn.Sequential(*linears)
symbol_model_pair.append([symbol, linears])
self.linears = torch.nn.ModuleDict(symbol_model_pair)
if purpose == "training":
total_params, train_params = get_number_of_parameters(self)
logger.info("Total number of parameters: {}.".format(total_params))
logger.info("Number of training parameters: {}.".format(train_params))
logger.info(" ")
logger.info(self.linears)
# Iterate over all modules and just intialize those that are
# a linear layer.
logger.warning(
"Initialization of weights with Xavier Uniform by " "default."
)
for m in self.modules():
if isinstance(m, torch.nn.Linear):
# nn.init.normal_(m.weight) # , mean=0, std=0.01)
torch.nn.init.xavier_uniform_(m.weight)
def forward(self, X):
"""Forward propagation
This is forward propagation and it returns the atomic energy.
Parameters
----------
X : list
List of inputs in the feature space.
Returns
-------
outputs : tensor
A list of tensors with energies per image.
"""
outputs = []
for hash in X:
image = X[hash]
atomic_energies = []
for symbol, x in image:
# FIXME this conditional can be removed after de/serialization
# is fixed.
if isinstance(symbol, bytes):
symbol = symbol.decode("utf-8")
x = self.linears[symbol](x)
intercept_name = "intercept_" + symbol
slope_name = "slope_" + symbol
slope = getattr(self, slope_name)
intercept = getattr(self, intercept_name)
x = (slope * x) + intercept
atomic_energies.append(x)
atomic_energies = torch.cat(atomic_energies)
image_energy = torch.sum(atomic_energies)
outputs.append(image_energy)
outputs = torch.stack(outputs)
return outputs
def get_activations(self, images, model=None, numpy=True):
"""Get activations of each hidden-layer
This function allows to extract activations of each hidden-layer of
the neural network.
Parameters
----------
image : dict
Image with structure hash, features.
model : object
A ML4Chem model object.
numpy : bool
Whether we want numpy arrays or tensors.
Returns
-------
activations : DataFrame
A DataFrame with activations for each layer.
"""
activations = []
columns = ["hash", "atom.index", "atom.symbol"]
if model is None:
model = self
model.eval()
for hash, data in images.items():
for index, (symbol, features) in enumerate(data):
counter = 0
layer_counter = 0
for l, layer in enumerate(model.linears[symbol].modules()):
if isinstance(layer, torch.nn.Linear) and counter == 0:
x = layer(features)
if numpy:
data_ = [hash, index, symbol, x.detach_().numpy()]
else:
data_ = [hash, index, symbol, x.detach_()]
layer_column_name = f"layer{layer_counter}"
if layer_column_name not in columns:
columns.append(layer_column_name)
counter += 1
layer_counter += 1
elif isinstance(layer, torch.nn.Linear) and counter > 0:
x = layer(x)
if numpy:
data_.append(x.detach_().numpy())
else:
data_.append(x.detach_())
layer_column_name = f"layer{layer_counter}"
if layer_column_name not in columns:
columns.append(layer_column_name)
counter += 1
layer_counter += 1
activations.append(data_)
del data_
# Create DataFrame from lists
df = pd.DataFrame(activations, columns=columns)
return df
class train(DeepLearningTrainer):
"""Train the model
Parameters
----------
inputs : dict
Dictionary with hashed feature space.
targets : list
The expected values that the model has to learn aka y.
model : object
The NeuralNetwork class.
data : object
Data object created from the handler.
optimizer : tuple
The optimizer is a tuple with the structure:
>>> ('adam', {'lr': float, 'weight_decay'=float})
epochs : int
Number of full training cycles.
regularization : float
This is the L2 regularization. It is not the same as weight decay.
convergence : dict
Instead of using epochs, users can set a convergence criterion.
Supported keys are "training" and "test".
lossfxn : obj
A loss function object.
device : str
Calculation can be run in the cpu or cuda (gpu).
batch_size : int
Number of data points per batch to use for training. Default is None.
lr_scheduler : tuple
Tuple with structure: scheduler's name and a dictionary with keyword
arguments.
>>> lr_scheduler = ('ReduceLROnPlateau',
{'mode': 'min', 'patience': 10})
uncertainty : list
A list of uncertainties that are used to penalize during the loss
function evaluation.
checkpoint : dict
Set checkpoints. Dictionary with following structure:
>>> checkpoint = {"label": label, "checkpoint": 100, "path": ""}
`label` refers to the name used to save the checkpoint, `checkpoint`
is a integer or -1 for saving all epochs, and the path is where the
checkpoint is stored. Default is None and no checkpoint is saved.
test : dict
A dictionary used to compute the error over a validation/test set
during training procedures.
>>> test = {"features": test_space, "targets": test_targets, "data": data_test}
The keys,values of the dictionary are:
- "data": a `Data` object.
- "targets": test set targets.
- "features": a feature space obtained using `features.calculate()`.
"""
def __init__(
self,
inputs,
targets,
model=None,
data=None,
optimizer=(None, None),
regularization=None,
epochs=100,
convergence=None,
lossfxn=None,
device="cpu",
batch_size=None,
lr_scheduler=None,
uncertainty=None,
checkpoint=None,
test=None,
):
self.initial_time = time.time()
if lossfxn is None:
lossfxn = AtomicMSELoss
logger.info("")
logger.info("Training")
logger.info("========")
logger.info(f"Convergence criteria: {convergence}")
logger.info(f"Loss function: {lossfxn.__name__}")
if uncertainty is not None:
logger.info("Options:")
logger.info(f" - Uncertainty penalization: {pformat(uncertainty)}")
logger.info("")
atoms_per_image = data.atoms_per_image
if batch_size is None:
batch_size = len(inputs.values())
if isinstance(batch_size, int):
# Data batches
chunks = list(get_chunks(inputs, batch_size, svm=False))
targets = list(get_chunks(targets, batch_size, svm=False))
atoms_per_image = list(get_chunks(atoms_per_image, batch_size, svm=False))
if uncertainty != None:
uncertainty = list(get_chunks(uncertainty, batch_size, svm=False))
uncertainty = [
torch.tensor(u, requires_grad=False, dtype=torch.float)
for u in uncertainty
]
logger.info("")
logging.info("Batch Information")
logging.info("-----------------")
logging.info("Number of batches: {}.".format(len(chunks)))
logging.info("Batch size: {} elements per batch.".format(batch_size))
logger.info(" ")
atoms_per_image = [
torch.tensor(n_atoms, requires_grad=False, dtype=torch.float)
for n_atoms in atoms_per_image
]
targets = [torch.tensor(t, requires_grad=False) for t in targets]
if device == "cuda":
logger.info("Moving data to CUDA...")
atoms_per_image = atoms_per_image.cuda()
targets = targets.cuda()
_inputs = OrderedDict()
for hash, f in inputs.items():
_inputs[hash] = []
for features in f:
symbol, vector = features
_inputs[hash].append((symbol, vector.cuda()))
inputs = _inputs
move_time = time.time() - self.initial_time
h, m, s = convert_elapsed_time(move_time)
logger.info(
"Data moved to GPU in {} hours {} minutes {:.2f} \
seconds.".format(
h, m, s
)
)
logger.info(" ")
# Define optimizer
self.optimizer_name, self.optimizer = get_optimizer(
optimizer, model.parameters()
)
if lr_scheduler is not None:
self.scheduler = get_lr_scheduler(self.optimizer, lr_scheduler)
self.atoms_per_image = atoms_per_image
self.convergence = convergence
self.device = device
self.epochs = epochs
self.model = model
self.lr_scheduler = lr_scheduler
self.lossfxn = lossfxn
self.checkpoint = checkpoint
self.test = test
# Data scattering
client = dask.distributed.get_client()
self.chunks = [client.scatter(chunk) for chunk in chunks]
self.targets = [client.scatter(target) for target in targets]
if uncertainty != None:
self.uncertainty = [client.scatter(u) for u in uncertainty]
else:
self.uncertainty = uncertainty
# Let the hunger games begin...
self.trainer()
def trainer(self):
"""Run the training class"""
logger.info(" ")
logger.info("Starting training...\n")
if self.test is None:
logger.info(
"{:6s} {:19s} {:12s} {:12s} {:8s}".format(
"Epoch", "Time Stamp", "Loss", "Error/img", "Error/atom"
)
)
logger.info(
"{:6s} {:19s} {:12s} {:8s} {:8s}".format(
"------",
"-------------------",
"------------",
"------------",
"------------",
)
)
else:
test_features = self.test.get("features", None)
test_targets = self.test.get("targets", None)
test_data = self.test.get("data", None)
logger.info(
"{:6s} {:19s} {:12s} {:12s} {:12s} {:12s} {:16s}".format(
"Epoch",
"Time Stamp",
"Loss",
"Error/img",
"Error/atom",
"Error/img (t)",
"Error/atom (t)",
)
)
logger.info(
"{:6s} {:19s} {:12s} {:8s} {:8s} {:8s} {:8s}".format(
"------",
"-------------------",
"------------",
"------------",
"------------",
"------------",
"------------",
)
)
converged = False
_loss = []
_rmse = []
epoch = 0
client = dask.distributed.get_client()
while not converged:
epoch += 1
self.optimizer.zero_grad() # clear previous gradients
loss, outputs_ = train.closure(
self.chunks,
self.targets,
self.uncertainty,
self.model,
self.lossfxn,
self.atoms_per_image,
self.device,
)
# We step the optimizer
if self.optimizer_name != "LBFGS":
self.optimizer.step()
else:
options = {"closure": self.closure, "current_loss": loss, "max_ls": 10}
self.optimizer.step(options)
# RMSE per image and per/atom
rmse = client.submit(compute_rmse, *(outputs_, self.targets))
atoms_per_image = torch.cat(self.atoms_per_image)
rmse_atom = client.submit(
compute_rmse, *(outputs_, self.targets, atoms_per_image)
)
rmse = rmse.result()
rmse_atom = rmse_atom.result()
_loss.append(loss.item())
_rmse.append(rmse)
# In the case that lr_scheduler is not None
if self.lr_scheduler is not None:
self.scheduler.step(loss)
print("Epoch {} lr {}".format(epoch, get_lr(self.optimizer)))
ts = time.time()
ts = datetime.datetime.fromtimestamp(ts).strftime("%Y-%m-%d " "%H:%M:%S")
if self.test is None:
logger.info(
"{:6d} {} {:8e} {:4e} {:4e}".format(
epoch, ts, loss.detach(), rmse, rmse_atom
)
)
else:
test_model = self.model.eval()
test_predictions = test_model(test_features).detach()
rmse_test = client.submit(
compute_rmse, *(test_predictions, test_targets)
)
atoms_per_image_test = torch.tensor(
test_data.atoms_per_image, requires_grad=False
)
rmse_atom_test = client.submit(
compute_rmse,
*(test_predictions, test_targets, atoms_per_image_test),
)
rmse_test = rmse_test.result()
rmse_atom_test = rmse_atom_test.result()
logger.info(
"{:6d} {} {:8e} {:4e} {:4e} {:4e} {:4e}".format(
epoch,
ts,
loss.detach(),
rmse,
rmse_atom,
rmse_test,
rmse_atom_test,
)
)
if self.checkpoint is not None:
self.checkpoint_save(epoch, self.model, **self.checkpoint)
if self.convergence is None and epoch == self.epochs:
converged = True
elif self.convergence is not None and rmse < self.convergence["energy"]:
converged = True
training_time = time.time() - self.initial_time
h, m, s = convert_elapsed_time(training_time)
logger.info(
"Training finished in {} hours {} minutes {:.2f} seconds.".format(h, m, s)
)
@classmethod
def closure(
Cls, chunks, targets, uncertainty, model, lossfxn, atoms_per_image, device
):
"""Closure
This class method clears previous gradients, iterates over batches,
accumulates the gradients, reduces the gradients, update model
params, and finally returns loss and outputs_.
Parameters
----------
Cls : object
Class object.
chunks : tensor or list
Tensor with input data points in batch with index.
targets : tensor or list
The targets.
uncertainty : list
A list of uncertainties that are used to penalize during the loss
function evaluation.
model : obj
Pytorch model to perform forward() and get gradients.
lossfxn : obj
A loss function object.
atoms_per_image : list
Atoms per image because we are doing atom-centered methods.
device : str
Are we running cuda or cpu?
"""
outputs_ = []
# Get client to send futures to the scheduler
client = dask.distributed.get_client()
running_loss = torch.tensor(0, dtype=torch.float)
accumulation = []
grads = []
# Accumulation of gradients
for index, chunk in enumerate(chunks):
accumulation.append(
client.submit(
train.train_batches,
*(
index,
chunk,
targets,
uncertainty,
model,
lossfxn,
atoms_per_image,
device,
),
)
)
dask.distributed.wait(accumulation)
accumulation = client.gather(accumulation)
for outputs, loss, grad in accumulation:
grad = np.array(grad, dtype=object)
running_loss += loss
outputs_.append(outputs)
grads.append(grad)
grads = sum(grads)
for index, param in enumerate(model.parameters()):
param.grad = torch.tensor(grads[index], dtype=torch.float)
del accumulation
del grads
return running_loss, outputs_
@classmethod
def train_batches(
Cls, index, chunk, targets, uncertainty, model, lossfxn, atoms_per_image, device
):
"""A function that allows training per batches
Parameters
----------
index : int
Index of batch.
chunk : tensor or list
Tensor with input data points in batch with index.
targets : tensor or list
The targets.
model : obj
Pytorch model to perform forward() and get gradients.
uncertainty : list
A list of uncertainties that are used to penalize during the loss
function evaluation.
lossfxn : obj
A loss function object.
atoms_per_image : list
Atoms per image because we are doing atom-centered methods.
device : str
Are we running cuda or cpu?
Returns
-------
loss : tensor
The loss function of the batch.
"""
inputs = OrderedDict(chunk)
outputs = model(inputs)
if uncertainty == None:
loss = lossfxn(outputs, targets[index], atoms_per_image[index])
else:
loss = lossfxn(
outputs, targets[index], atoms_per_image[index], uncertainty[index]
)
loss.backward()
gradients = []
for param in model.parameters():
try:
gradient = param.grad.detach().numpy()
except AttributeError:
# This exception catches the case where an image does not
# contain variable that is following the gradient of certain
# atom. For example, suppose two batches with 2 molecules each.
# In the first batch we have only C, H, O but it turns out that
# N is also available only in the second batch. The
# contribution of the total gradient from the first batch for N is 0.
gradient = 0.0
gradients.append(gradient)
return outputs, loss, gradients
| 2.328125 | 2 |
hatsploit/core/db/db.py | EntySec/HatSploit | 139 | 3842 | #!/usr/bin/env python3
#
# MIT License
#
# Copyright (c) 2020-2022 EntySec
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
import json
import os
from hatsploit.core.cli.badges import Badges
from hatsploit.lib.config import Config
from hatsploit.lib.storage import LocalStorage
class DB:
badges = Badges()
config = Config()
local_storage = LocalStorage()
def disconnect_payload_database(self, name):
if self.local_storage.get("connected_payload_databases"):
if name in self.local_storage.get("connected_payload_databases"):
self.local_storage.delete_element("connected_payload_databases", name)
self.local_storage.delete_element("payloads", name)
return
self.badges.print_error("No such payload database connected!")
def disconnect_module_database(self, name):
if self.local_storage.get("connected_module_databases"):
if name in self.local_storage.get("connected_module_databases"):
self.local_storage.delete_element("connected_module_databases", name)
self.local_storage.delete_element("modules", name)
return
self.badges.print_error("No such module database connected!")
def disconnect_plugin_database(self, name):
if self.local_storage.get("connected_plugin_databases"):
if name in self.local_storage.get("connected_plugin_databases"):
self.local_storage.delete_element("connected_plugin_databases", name)
self.local_storage.delete_element("plugins", name)
return
self.badges.print_error("No such plugin database connected!")
def connect_payload_database(self, name, path):
if self.local_storage.get("connected_payload_databases"):
if name in self.local_storage.get("connected_payload_databases"):
self.badges.print_error("Payload database already connected!")
return
if not os.path.exists(path) or not str.endswith(path, "json"):
self.badges.print_error("Not a payload database!")
return
try:
database = json.load(open(path))
except Exception:
self.badges.print_error("Failed to connect payload database!")
return
if '__database__' not in database:
self.badges.print_error("No __database__ section found!")
return
if database['__database__']['type'] != "payloads":
self.badges.print_error("Not a payload database!")
return
del database['__database__']
payloads = {
name: database
}
data = {
name: {
'path': path
}
}
if not self.local_storage.get("connected_payload_databases"):
self.local_storage.set("connected_payload_databases", {})
self.local_storage.update("connected_payload_databases", data)
if self.local_storage.get("payloads"):
self.local_storage.update("payloads", payloads)
else:
self.local_storage.set("payloads", payloads)
def connect_module_database(self, name, path):
if self.local_storage.get("connected_module_databases"):
if name in self.local_storage.get("connected_module_databases"):
self.badges.print_error("Module database already connected!")
return
if not os.path.exists(path) or not str.endswith(path, "json"):
self.badges.print_error("Not a module database!")
return
try:
database = json.load(open(path))
except Exception:
self.badges.print_error("Failed to connect module database!")
return
if '__database__' not in database:
self.badges.print_error("No __database__ section found!")
return
if database['__database__']['type'] != "modules":
self.badges.print_error("Not a module database!")
return
del database['__database__']
modules = {
name: database
}
data = {
name: {
'path': path
}
}
if not self.local_storage.get("connected_module_databases"):
self.local_storage.set("connected_module_databases", {})
self.local_storage.update("connected_module_databases", data)
if self.local_storage.get("modules"):
self.local_storage.update("modules", modules)
else:
self.local_storage.set("modules", modules)
def connect_plugin_database(self, name, path):
if self.local_storage.get("connected_plugin_databases"):
if name in self.local_storage.get("connected_plugin_databases"):
self.badges.print_error("Plugin database already connected!")
return
if not os.path.exists(path) or not str.endswith(path, "json"):
self.badges.print_error("Not a database!")
return
try:
database = json.load(open(path))
except Exception:
self.badges.print_error("Failed to connect plugin database!")
return
if '__database__' not in database:
self.badges.print_error("No __database__ section found!")
return
if database['__database__']['type'] != "plugins":
self.badges.print_error("Not a plugin database!")
return
del database['__database__']
plugins = {
name: database
}
data = {
name: {
'path': path
}
}
if not self.local_storage.get("connected_plugin_databases"):
self.local_storage.set("connected_plugin_databases", {})
self.local_storage.update("connected_plugin_databases", data)
if self.local_storage.get("plugins"):
self.local_storage.update("plugins", plugins)
else:
self.local_storage.set("plugins", plugins)
| 2.03125 | 2 |
bluesky/tests/test_simulators.py | NSLS-II/bluesky | 43 | 3843 | from bluesky.plans import scan
from bluesky.simulators import (print_summary, print_summary_wrapper,
summarize_plan,
check_limits,
plot_raster_path)
import pytest
from bluesky.plans import grid_scan
def test_print_summary(hw):
det = hw.det
motor = hw.motor
print_summary(scan([det], motor, -1, 1, 10)) # old name
summarize_plan(scan([det], motor, -1, 1, 10)) # new name
list(print_summary_wrapper(scan([det], motor, -1, 1, 10)))
def test_old_module_name(hw):
det = hw.det
motor = hw.motor
motor1 = hw.motor1
motor2 = hw.motor2
from bluesky.plan_tools import (print_summary, print_summary_wrapper,
plot_raster_path)
with pytest.warns(UserWarning):
print_summary(scan([det], motor, -1, 1, 10))
with pytest.warns(UserWarning):
list(print_summary_wrapper(scan([det], motor, -1, 1, 10)))
with pytest.warns(UserWarning):
plan = grid_scan([det], motor1, -5, 5, 10, motor2, -7, 7, 15, True)
plot_raster_path(plan, 'motor1', 'motor2', probe_size=.3)
def test_check_limits(RE, hw):
det = hw.det
motor = hw.motor
# The motor object does not currently implement limits.
# Use an assert to help us out if this changes in the future.
assert not hasattr(motor, 'limits')
# # check_limits should warn if it can't find check_value
# TODO: Is there _any_ object to test?
# with pytest.warns(UserWarning):
# check_limits(scan([det], motor, -1, 1, 3))
# monkey-patch some limits
motor.limits = (-2, 2)
# check_limits should do nothing here
check_limits(scan([det], motor, -1, 1, 3))
# check_limits should error if limits are exceeded only if object raises
# this object does not raise
check_limits(scan([det], motor, -3, 3, 3))
# check_limits should raise if limits are equal only if object raises
# this object does not raise
motor.limits = (2, 2)
check_limits(scan([det], motor, -1, 1, 3))
def test_check_limits_needs_RE():
with pytest.raises(RuntimeError) as ctx:
check_limits([])
assert str(ctx.value) == "Bluesky event loop not running"
def test_plot_raster_path(hw):
det = hw.det
motor1 = hw.motor1
motor2 = hw.motor2
plan = grid_scan([det], motor1, -5, 5, 10, motor2, -7, 7, 15, True)
plot_raster_path(plan, 'motor1', 'motor2', probe_size=.3)
| 2.109375 | 2 |
shutTheBox/main.py | robi1467/shut-the-box | 0 | 3844 | <filename>shutTheBox/main.py
import random
numbers_list = [1,2,3,4,5,6,7,8,9,10]
game_won = False
game_completed = False
#Stats
games_played = 0
games_won = 0
games_lost = 0
average_score = 0
total_score = 0
def welcome():
welcome_message = "Welcome to shut the box"
print(welcome_message)
i = 0
result = ""
while i < len(numbers_list):
if i < len(numbers_list)-1:
result += str(numbers_list[i]) + " "
else:
result += str(numbers_list[i])
i+=1
print(result)
def dice_roll(amount):
total = 0
i = 0
while i < amount:
total += random.randint(1, 6)
i+=1
return total
def choose_dice_amount():
amount = 0
while True:
try:
amount = int(input("You choose to roll one or two dice. Please enter either '1' or '2': "))
except ValueError:
print("INVALID ENTRY PLEASE TRY AGAIN")
continue
if amount == 1 or amount == 2:
return amount
else:
print("INVALID ENTRY PLEASE TRY AGAIN!")
continue
return amount
def choose_number_to_drop(target_amount):
entered = 0
goal = target_amount
entered_numbers = list()
while goal != 0:
try:
print("Available numbers: " + str(numbers_list) + " to get to " + str(target_amount))
entered = int(input("Please enter a number that is available: "))
except ValueError:
print("Invalid Entry, please try again")
continue
if entered not in numbers_list or entered in entered_numbers:
print("Invalid Entry, please try again")
continue
else:
goal -= entered
entered_numbers.append(entered)
if goal < 0:
goal = target_amount
entered_numbers = list()
i = 0
while i < len(entered_numbers):
numbers_list.remove(entered_numbers[i])
i += 1
def check_lost_game(rolled):
value = True
if rolled not in numbers_list:
i = 0
while i < len(numbers_list):
j = i+1
while j< len(numbers_list):
if numbers_list[i] + numbers_list[j] == rolled:
return False
k = j+1
while k < len(numbers_list):
if numbers_list[i] + numbers_list[j] + numbers_list[k] == rolled:
return False
l = k+1
while l < len(numbers_list):
if numbers_list[i] + numbers_list[j] + numbers_list[k] + numbers_list[l] == rolled:
return False
l+=1
k+=1
j+=1
i +=1
else:
value = False
return value
def end_game():
game_completed = True
return game_completed
def win_game():
game_won = True
return game_won
def score_game():
score = 0
i = 0
while i < len(numbers_list):
score += numbers_list[i]
i+=1
return score
def all_less_than_7():
less_than_7 = True
i = 0
while i < len(numbers_list):
if numbers_list[i] > 6:
less_than_7 = False
i += 1
return less_than_7
def keep_playing_input():
while True:
try:
continue_playing = (input("Do you wish to keep playing? y or n: "))
except ValueError:
print("Invalid choice; please try again")
continue
if continue_playing.lower == "y":
return True
else:
return False
keep_playing = True
while keep_playing:
numbers_list = [1,2,3,4,5,6,7,8,9,10]
welcome()
roll_total = 0
while roll_total < 55:
dice_amount = 2
if all_less_than_7():
dice_amount = choose_dice_amount()
dice_total = dice_roll(dice_amount)
print("Your roll is: " + str(dice_total))
if check_lost_game(dice_total):
print("It is impossible to continue the game with this roll")
break
choose_number_to_drop(dice_total)
roll_total += dice_total
if roll_total == 55:
game_won = win_game()
if game_won:
print("Congrats you won!!!!")
games_played +=1
games_won +=1
else:
print("You lose, your score is " + str(score_game()))
print("Numbers remaining: " + str(numbers_list))
games_played += 1
games_lost += 1
total_score += score_game()
average_score = total_score/games_played
game_won = False
print("STATS:\n Games Played: " + str(games_played) + "\nGames Won: " + str(games_won) + "\nGames Lost: " + str(games_lost)
+ "\nAverage Score: " + str(average_score) + "\nTotal Score: " + str(total_score))
keep_playing_input()
| 4.125 | 4 |
repos/system_upgrade/common/actors/systemfacts/tests/test_systemfacts_selinux.py | sm00th/leapp-repository | 21 | 3845 | import warnings
import pytest
from leapp.libraries.actor.systemfacts import get_selinux_status
from leapp.models import SELinuxFacts
no_selinux = False
try:
import selinux
except ImportError:
no_selinux = True
warnings.warn(
'Tests which uses `selinux` will be skipped'
' due to library unavailability.', ImportWarning)
reason_to_skip_msg = "Selinux is not available"
# FIXME: create valid tests...
@pytest.mark.skipif(no_selinux, reason=reason_to_skip_msg)
def test_selinux_enabled_enforcing(monkeypatch):
"""
Test case SELinux is enabled in enforcing mode
"""
monkeypatch.setattr(selinux, 'is_selinux_mls_enabled', lambda: 1)
monkeypatch.setattr(selinux, 'security_getenforce', lambda: 1)
monkeypatch.setattr(selinux, 'selinux_getenforcemode', lambda: [0, 1])
monkeypatch.setattr(selinux, 'is_selinux_enabled', lambda: 1)
monkeypatch.setattr(selinux, 'selinux_getpolicytype', lambda: [0, 'targeted'])
expected_data = {'policy': 'targeted',
'mls_enabled': True,
'enabled': True,
'runtime_mode': 'enforcing',
'static_mode': 'enforcing'}
assert SELinuxFacts(**expected_data) == get_selinux_status()
@pytest.mark.skipif(no_selinux, reason=reason_to_skip_msg)
def test_selinux_enabled_permissive(monkeypatch):
"""
Test case SELinux is enabled in permissive mode
"""
monkeypatch.setattr(selinux, 'is_selinux_mls_enabled', lambda: 1)
monkeypatch.setattr(selinux, 'security_getenforce', lambda: 0)
monkeypatch.setattr(selinux, 'selinux_getenforcemode', lambda: [0, 0])
monkeypatch.setattr(selinux, 'is_selinux_enabled', lambda: 1)
monkeypatch.setattr(selinux, 'selinux_getpolicytype', lambda: [0, 'targeted'])
expected_data = {'policy': 'targeted',
'mls_enabled': True,
'enabled': True,
'runtime_mode': 'permissive',
'static_mode': 'permissive'}
assert SELinuxFacts(**expected_data) == get_selinux_status()
@pytest.mark.skipif(no_selinux, reason=reason_to_skip_msg)
def test_selinux_disabled(monkeypatch):
"""
Test case SELinux is disabled
"""
monkeypatch.setattr(selinux, 'is_selinux_mls_enabled', lambda: 0)
monkeypatch.setattr(selinux, 'security_getenforce', lambda: 0)
monkeypatch.setattr(selinux, 'selinux_getenforcemode', lambda: [0, 0])
monkeypatch.setattr(selinux, 'is_selinux_enabled', lambda: 0)
monkeypatch.setattr(selinux, 'selinux_getpolicytype', lambda: [0, 'targeted'])
expected_data = {'policy': 'targeted',
'mls_enabled': False,
'enabled': False,
'runtime_mode': 'permissive',
'static_mode': 'permissive'}
assert SELinuxFacts(**expected_data) == get_selinux_status()
class MockNoConfigFileOSError(object):
def __init__(self):
raise OSError
@pytest.mark.skipif(no_selinux, reason=reason_to_skip_msg)
def test_selinux_disabled_no_config_file(monkeypatch):
"""
Test case SELinux is disabled
"""
monkeypatch.setattr(selinux, 'is_selinux_mls_enabled', lambda: 0)
monkeypatch.setattr(selinux, 'security_getenforce', lambda: 0)
monkeypatch.setattr(selinux, 'selinux_getenforcemode', MockNoConfigFileOSError)
monkeypatch.setattr(selinux, 'is_selinux_enabled', lambda: 0)
monkeypatch.setattr(selinux, 'selinux_getpolicytype', lambda: [0, 'targeted'])
expected_data = {'policy': 'targeted',
'mls_enabled': False,
'enabled': False,
'runtime_mode': 'permissive',
'static_mode': 'disabled'}
assert SELinuxFacts(**expected_data) == get_selinux_status()
| 2.375 | 2 |
Phase-1/Python Basic 2/Day-24.py | emetowinner/python-challenges | 3 | 3846 | <gh_stars>1-10
"""
1. Write a Python program to reverse only the vowels of a given string.
Sample Output:
w3resuorce
Python
Perl
ASU
2. Write a Python program to check whether a given integer is a palindrome or not.
Note: An integer is a palindrome when it reads the same backward as forward. Negative numbers are not palindromic.
Sample Output:
False
True
False
3. Write a Python program to remove the duplicate elements of a given array of numbers such that each element appear only once and return the new length of the given array.
Sample Output:
5
4
4. Write a Python program to calculate the maximum profit from selling and buying values of stock. An array of numbers represent the stock prices in chronological order.
For example, given [8, 10, 7, 5, 7, 15], the function will return 10, since the buying value of the stock is 5 dollars and sell value is 15 dollars.
Sample Output:
10
7
0
5. Write a Python program to remove all instances of a given value from a given array of integers and find the length of the new array.
For example, given [8, 10, 7, 5, 7, 15], the function will return 10, since the buying value of the stock is 5 dollars and sell value is 15 dollars.
Sample Output:
6
0
5
0
6. Write a Python program to find the starting and ending position of a given value in a given array of integers, sorted in ascending order.
If the target is not found in the array, return [0, 0].
Input: [5, 7, 7, 8, 8, 8] target value = 8
Output: [0, 5]
Input: [1, 3, 6, 9, 13, 14] target value = 4
Output: [0, 0]
Sample Output:
[0, 5]
[0, 0]
7. The price of a given stock on each day is stored in an array.
Write a Python program to find the maximum profit in one transaction i.e., buy one and sell one share of the stock from the given price value of the said array. You cannot sell a stock before you buy one.
Input (Stock price of each day): [224, 236, 247, 258, 259, 225]
Output: 35
Explanation:
236 - 224 = 12
247 - 224 = 23
258 - 224 = 34
259 - 224 = 35
225 - 224 = 1
247 - 236 = 11
258 - 236 = 22
259 - 236 = 23
225 - 236 = -11
258 - 247 = 11
259 - 247 = 12
225 - 247 = -22
259 - 258 = 1
225 - 258 = -33
225 - 259 = -34
8. Write a Python program to print a given N by M matrix of numbers line by line in forward > backwards > forward >... order.
Input matrix:
[[1, 2, 3,4],
[5, 6, 7, 8],
[0, 6, 2, 8],
[2, 3, 0, 2]]
Output:
1
2
3
4
8
7
6
5
0
6
2
8
2
0
3
2
9. Write a Python program to compute the largest product of three integers from a given list of integers.
Sample Output:
4000
8
120
10. Write a Python program to find the first missing positive integer that does not exist in a given list.
"""
| 4.1875 | 4 |
etl/parsers/etw/Microsoft_Windows_IPxlatCfg.py | IMULMUL/etl-parser | 104 | 3847 | # -*- coding: utf-8 -*-
"""
Microsoft-Windows-IPxlatCfg
GUID : 3e5ac668-af52-4c15-b99b-a3e7a6616ebd
"""
from construct import Int8sl, Int8ul, Int16ul, Int16sl, Int32sl, Int32ul, Int64sl, Int64ul, Bytes, Double, Float32l, Struct
from etl.utils import WString, CString, SystemTime, Guid
from etl.dtyp import Sid
from etl.parsers.etw.core import Etw, declare, guid
@declare(guid=guid("3e5ac668-af52-4c15-b99b-a3e7a6616ebd"), event_id=1001, version=0)
class Microsoft_Windows_IPxlatCfg_1001_0(Etw):
pattern = Struct(
"ErrorString" / CString,
"ErrorCode" / Int32ul
)
@declare(guid=guid("3e5ac668-af52-4c15-b99b-a3e7a6616ebd"), event_id=1002, version=0)
class Microsoft_Windows_IPxlatCfg_1002_0(Etw):
pattern = Struct(
"ErrorString" / CString,
"ErrorCode" / Int32ul,
"InterfaceLuid" / Int64ul
)
@declare(guid=guid("3e5ac668-af52-4c15-b99b-a3e7a6616ebd"), event_id=1003, version=0)
class Microsoft_Windows_IPxlatCfg_1003_0(Etw):
pattern = Struct(
"InfoString" / CString
)
@declare(guid=guid("3e5ac668-af52-4c15-b99b-a3e7a6616ebd"), event_id=1005, version=0)
class Microsoft_Windows_IPxlatCfg_1005_0(Etw):
pattern = Struct(
"IPv4Address" / Int32ul,
"IPv4Prefix" / Int32ul
)
@declare(guid=guid("3e5ac668-af52-4c15-b99b-a3e7a6616ebd"), event_id=1006, version=0)
class Microsoft_Windows_IPxlatCfg_1006_0(Etw):
pattern = Struct(
"InfoString" / CString,
"InterfaceLuid" / Int64ul
)
@declare(guid=guid("3e5ac668-af52-4c15-b99b-a3e7a6616ebd"), event_id=1007, version=0)
class Microsoft_Windows_IPxlatCfg_1007_0(Etw):
pattern = Struct(
"InterfaceLuid" / Int64ul,
"PrefixLength" / Int32ul
)
@declare(guid=guid("3e5ac668-af52-4c15-b99b-a3e7a6616ebd"), event_id=1008, version=0)
class Microsoft_Windows_IPxlatCfg_1008_0(Etw):
pattern = Struct(
"InterfaceLuid" / Int64ul,
"IPv4Address" / Int32ul
)
@declare(guid=guid("3e5ac668-af52-4c15-b99b-a3e7a6616ebd"), event_id=1009, version=0)
class Microsoft_Windows_IPxlatCfg_1009_0(Etw):
pattern = Struct(
"InterfaceLuid" / Int64ul
)
@declare(guid=guid("3e5ac668-af52-4c15-b99b-a3e7a6616ebd"), event_id=1010, version=0)
class Microsoft_Windows_IPxlatCfg_1010_0(Etw):
pattern = Struct(
"InterfaceLuid" / Int64ul
)
@declare(guid=guid("3e5ac668-af52-4c15-b99b-a3e7a6616ebd"), event_id=1011, version=0)
class Microsoft_Windows_IPxlatCfg_1011_0(Etw):
pattern = Struct(
"InfoString" / CString,
"MTU" / Int32ul
)
@declare(guid=guid("3e5ac668-af52-4c15-b99b-a3e7a6616ebd"), event_id=1101, version=0)
class Microsoft_Windows_IPxlatCfg_1101_0(Etw):
pattern = Struct(
"InterfaceLuid" / Int64ul,
"Metric" / Int32ul,
"RemotePrefixLength" / Int32ul,
"LocalPrefixLength" / Int32ul
)
@declare(guid=guid("3e5ac668-af52-4c15-b99b-a3e7a6616ebd"), event_id=1102, version=0)
class Microsoft_Windows_IPxlatCfg_1102_0(Etw):
pattern = Struct(
"InterfaceLuid" / Int64ul,
"Metric" / Int32ul,
"RemotePrefixLength" / Int32ul,
"LocalPrefixLength" / Int32ul
)
@declare(guid=guid("3e5ac668-af52-4c15-b99b-a3e7a6616ebd"), event_id=1103, version=0)
class Microsoft_Windows_IPxlatCfg_1103_0(Etw):
pattern = Struct(
"InterfaceLuid" / Int64ul,
"PrefixLength" / Int32ul
)
| 1.765625 | 2 |
microservices/users/config.py | Levakin/sanic-test-app | 0 | 3848 | <reponame>Levakin/sanic-test-app
# -*- coding: utf-8 -*-
import os
from distutils.util import strtobool
class Config:
DEBUG = bool(strtobool(os.getenv('DEBUG', "False")))
DATABASE_URI = os.getenv('DATABASE_URI', '127.0.0.1:27017')
WORKERS = int(os.getenv('WORKERS', 2))
LOGO = os.getenv('LOGO', None)
HOST = os.getenv('HOST', '127.0.0.1')
PORT = int(os.getenv('PORT', 8000))
SECRET = os.getenv('SECRET', 'secret')
LOGIN_MIN_LENGTH = int(os.getenv('LOGIN_MIN_LENGTH', 1))
LOGIN_MAX_LENGTH = int(os.getenv('LOGIN_MAX_LENGTH', 32))
| 1.929688 | 2 |
semantic-python/test/fixtures/4-01-lambda-literals.py | Temurson/semantic | 8,844 | 3849 | <reponame>Temurson/semantic<gh_stars>1000+
# CHECK-TREE: { const <- \x -> \y -> x; y <- const #true #true; z <- const #false #false; #record { const: const, y : y, z: z, }}
const = lambda x, y: x
y = const(True, True)
z = const(False, False)
| 1.117188 | 1 |
main.py | mithi/semantic-segmentation | 33 | 3850 | <filename>main.py
import tensorflow as tf
import os.path
import warnings
from distutils.version import LooseVersion
import glob
import helper
import project_tests as tests
#--------------------------
# USER-SPECIFIED DATA
#--------------------------
# Tune these parameters
NUMBER_OF_CLASSES = 2
IMAGE_SHAPE = (160, 576)
EPOCHS = 20
BATCH_SIZE = 1
LEARNING_RATE = 0.0001
DROPOUT = 0.75
# Specify these directory paths
DATA_DIRECTORY = './data'
RUNS_DIRECTORY = './runs'
TRAINING_DATA_DIRECTORY ='./data/data_road/training'
NUMBER_OF_IMAGES = len(glob.glob('./data/data_road/training/calib/*.*'))
VGG_PATH = './data/vgg'
all_training_losses = [] # Used for plotting to visualize if our training is going well given parameters
#--------------------------
# DEPENDENCY CHECK
#--------------------------
# Check TensorFlow Version
assert LooseVersion(tf.__version__) >= LooseVersion('1.0'), 'Please use TensorFlow version 1.0 or newer. You are using {}'.format(tf.__version__)
print('TensorFlow Version: {}'.format(tf.__version__))
# Check for a GPU
if not tf.test.gpu_device_name():
warnings.warn('No GPU found. Please use a GPU to train your neural network.')
else:
print('Default GPU Device: {}'.format(tf.test.gpu_device_name()))
#--------------------------
# PLACEHOLDER TENSORS
#--------------------------
correct_label = tf.placeholder(tf.float32, [None, IMAGE_SHAPE[0], IMAGE_SHAPE[1], NUMBER_OF_CLASSES])
learning_rate = tf.placeholder(tf.float32)
keep_prob = tf.placeholder(tf.float32)
#--------------------------
# FUNCTIONS
#--------------------------
def load_vgg(sess, vgg_path):
"""
Load Pretrained VGG Model into TensorFlow.
sess: TensorFlow Session
vgg_path: Path to vgg folder, containing "variables/" and "saved_model.pb"
return: Tuple of Tensors from VGG model (image_input, keep_prob, layer3, layer4, layer7)
"""
# load the model and weights
model = tf.saved_model.loader.load(sess, ['vgg16'], vgg_path)
# Get Tensors to be returned from graph
graph = tf.get_default_graph()
image_input = graph.get_tensor_by_name('image_input:0')
keep_prob = graph.get_tensor_by_name('keep_prob:0')
layer3 = graph.get_tensor_by_name('layer3_out:0')
layer4 = graph.get_tensor_by_name('layer4_out:0')
layer7 = graph.get_tensor_by_name('layer7_out:0')
return image_input, keep_prob, layer3, layer4, layer7
def conv_1x1(layer, layer_name):
""" Return the output of a 1x1 convolution of a layer """
return tf.layers.conv2d(inputs = layer,
filters = NUMBER_OF_CLASSES,
kernel_size = (1, 1),
strides = (1, 1),
name = layer_name)
def upsample(layer, k, s, layer_name):
""" Return the output of transpose convolution given kernel_size k and strides s """
return tf.layers.conv2d_transpose(inputs = layer,
filters = NUMBER_OF_CLASSES,
kernel_size = (k, k),
strides = (s, s),
padding = 'same',
name = layer_name)
def layers(vgg_layer3_out, vgg_layer4_out, vgg_layer7_out, num_classes = NUMBER_OF_CLASSES):
"""
Create the layers for a fully convolutional network. Build skip-layers using the vgg layers.
vgg_layerX_out: TF Tensor for VGG Layer X output
num_classes: Number of classes to classify
return: The Tensor for the last layer of output
"""
# Use a shorter variable name for simplicity
layer3, layer4, layer7 = vgg_layer3_out, vgg_layer4_out, vgg_layer7_out
# Apply a 1x1 convolution to encoder layers
layer3x = conv_1x1(layer = layer3, layer_name = "layer3conv1x1")
layer4x = conv_1x1(layer = layer4, layer_name = "layer4conv1x1")
layer7x = conv_1x1(layer = layer7, layer_name = "layer7conv1x1")
# Add decoder layers to the network with skip connections and upsampling
# Note: the kernel size and strides are the same as the example in Udacity Lectures
# Semantic Segmentation Scene Understanding Lesson 10-9: FCN-8 - Decoder
decoderlayer1 = upsample(layer = layer7x, k = 4, s = 2, layer_name = "decoderlayer1")
decoderlayer2 = tf.add(decoderlayer1, layer4x, name = "decoderlayer2")
decoderlayer3 = upsample(layer = decoderlayer2, k = 4, s = 2, layer_name = "decoderlayer3")
decoderlayer4 = tf.add(decoderlayer3, layer3x, name = "decoderlayer4")
decoderlayer_output = upsample(layer = decoderlayer4, k = 16, s = 8, layer_name = "decoderlayer_output")
return decoderlayer_output
def optimize(nn_last_layer, correct_label, learning_rate, num_classes = NUMBER_OF_CLASSES):
"""
Build the TensorFLow loss and optimizer operations.
nn_last_layer: TF Tensor of the last layer in the neural network
correct_label: TF Placeholder for the correct label image
learning_rate: TF Placeholder for the learning rate
num_classes: Number of classes to classify
return: Tuple of (logits, train_op, cross_entropy_loss)
"""
# Reshape 4D tensors to 2D, each row represents a pixel, each column a class
logits = tf.reshape(nn_last_layer, (-1, num_classes))
class_labels = tf.reshape(correct_label, (-1, num_classes))
# The cross_entropy_loss is the cost which we are trying to minimize to yield higher accuracy
cross_entropy = tf.nn.softmax_cross_entropy_with_logits(logits = logits, labels = class_labels)
cross_entropy_loss = tf.reduce_mean(cross_entropy)
# The model implements this operation to find the weights/parameters that would yield correct pixel labels
train_op = tf.train.AdamOptimizer(learning_rate).minimize(cross_entropy_loss)
return logits, train_op, cross_entropy_loss
def train_nn(sess, epochs, batch_size, get_batches_fn, train_op,
cross_entropy_loss, input_image,
correct_label, keep_prob, learning_rate):
"""
Train neural network and print out the loss during training.
sess: TF Session
epochs: Number of epochs
batch_size: Batch size
get_batches_fn: Function to get batches of training data. Call using get_batches_fn(batch_size)
train_op: TF Operation to train the neural network
cross_entropy_loss: TF Tensor for the amount of loss
input_image: TF Placeholder for input images
correct_label: TF Placeholder for label images
keep_prob: TF Placeholder for dropout keep probability
learning_rate: TF Placeholder for learning rate
"""
for epoch in range(EPOCHS):
losses, i = [], 0
for images, labels in get_batches_fn(BATCH_SIZE):
i += 1
feed = { input_image: images,
correct_label: labels,
keep_prob: DROPOUT,
learning_rate: LEARNING_RATE }
_, partial_loss = sess.run([train_op, cross_entropy_loss], feed_dict = feed)
print("---> iteration: ", i, " partial loss:", partial_loss)
losses.append(partial_loss)
training_loss = sum(losses) / len(losses)
all_training_losses.append(training_loss)
print("------------------")
print("epoch: ", epoch + 1, " of ", EPOCHS, "training loss: ", training_loss)
print("------------------")
def run_tests():
tests.test_layers(layers)
tests.test_optimize(optimize)
tests.test_for_kitti_dataset(DATA_DIRECTORY)
tests.test_train_nn(train_nn)
def run():
""" Run a train a model and save output images resulting from the test image fed on the trained model """
# Get vgg model if we can't find it where it should be
helper.maybe_download_pretrained_vgg(DATA_DIRECTORY)
# A function to get batches
get_batches_fn = helper.gen_batch_function(TRAINING_DATA_DIRECTORY, IMAGE_SHAPE)
with tf.Session() as session:
# Returns the three layers, keep probability and input layer from the vgg architecture
image_input, keep_prob, layer3, layer4, layer7 = load_vgg(session, VGG_PATH)
# The resulting network architecture from adding a decoder on top of the given vgg model
model_output = layers(layer3, layer4, layer7, NUMBER_OF_CLASSES)
# Returns the output logits, training operation and cost operation to be used
# - logits: each row represents a pixel, each column a class
# - train_op: function used to get the right parameters to the model to correctly label the pixels
# - cross_entropy_loss: function outputting the cost which we are minimizing, lower cost should yield higher accuracy
logits, train_op, cross_entropy_loss = optimize(model_output, correct_label, learning_rate, NUMBER_OF_CLASSES)
# Initialize all variables
session.run(tf.global_variables_initializer())
session.run(tf.local_variables_initializer())
# Train the neural network
train_nn(session, EPOCHS, BATCH_SIZE, get_batches_fn,
train_op, cross_entropy_loss, image_input,
correct_label, keep_prob, learning_rate)
# Run the model with the test images and save each painted output image (roads painted green)
helper.save_inference_samples(RUNS_DIRECTORY, DATA_DIRECTORY, session, IMAGE_SHAPE, logits, keep_prob, image_input)
#--------------------------
# MAIN
#--------------------------
if __name__ == "__main__":
run_tests()
run() # Run a train a model and save output images resulting from the test image fed on the trained model
print(all_training_losses)
| 2.609375 | 3 |
tests/scanner/audit/log_sink_rules_engine_test.py | BrunoReboul/forseti-security | 0 | 3851 | <gh_stars>0
# Copyright 2018 The Forseti Security Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests the LogSinkRulesEngine."""
import unittest
import mock
from tests.unittest_utils import ForsetiTestCase
from tests.unittest_utils import get_datafile_path
from google.cloud.forseti.common.gcp_type.billing_account import BillingAccount
from google.cloud.forseti.common.gcp_type.folder import Folder
from google.cloud.forseti.common.gcp_type.log_sink import LogSink
from google.cloud.forseti.common.gcp_type.organization import Organization
from google.cloud.forseti.common.gcp_type.project import Project
from google.cloud.forseti.scanner.audit import log_sink_rules_engine as lsre
from google.cloud.forseti.scanner.audit.errors import InvalidRulesSchemaError
class LogSinkRulesEngineTest(ForsetiTestCase):
"""Tests for the LogSinkRulesEngine."""
def setUp(self):
"""Set up GCP resources for tests."""
self.lsre = lsre
self.lsre.LOGGER = mock.MagicMock()
# Set up resources in the following hierarchy:
# +-----> billing_acct_abcd
# |
# |
# +-----------------------> proj-1
# |
# |
# org_234 +-----> folder_56 +-----> proj-2
# |
# |
# +-----------------------> proj-3
self.org_234 = Organization(
'234',
display_name='Organization 234',
full_name='organization/234/',
data='fake_org_data_234')
self.billing_acct_abcd = BillingAccount(
'ABCD-1234',
display_name='Billing Account ABCD',
full_name='organization/234/billingAccount/ABCD-1234/',
data='fake_billing_account_data_abcd')
self.folder_56 = Folder(
'56',
display_name='Folder 56',
full_name='organization/234/folder/56/',
data='fake_folder_data456456')
self.proj_1 = Project(
'proj-1',
project_number=11223344,
display_name='My project 1',
parent=self.org_234,
full_name='organization/234/project/proj-1/',
data='fake_project_data_2341')
self.proj_2 = Project(
'proj-2',
project_number=223344,
display_name='My project 2',
parent=self.folder_56,
full_name='organization/234/folder/56/project/proj-2/',
data='fake_project_data_4562')
self.proj_3 = Project(
'proj-3',
project_number=33445566,
display_name='My project 3',
parent=self.org_234,
full_name='organization/234/project/proj-3/',
data='fake_project_data_1233')
def get_engine_with_valid_rules(self):
"""Create a rule engine build with a valid rules file."""
rules_local_path = get_datafile_path(
__file__, 'log_sink_test_valid_rules.yaml')
rules_engine = self.lsre.LogSinkRulesEngine(
rules_file_path=rules_local_path)
rules_engine.build_rule_book()
return rules_engine
def test_build_rule_book_from_local_yaml_file_works(self):
"""Tests that a RuleBook is built correctly with a yaml file."""
rules_engine = self.get_engine_with_valid_rules()
# Creates 'self' rules for 5 difference resources and 'children' rules
# for 2.
self.assertEqual(
6, len(rules_engine.rule_book.resource_rules_map['self']))
self.assertEqual(
2, len(rules_engine.rule_book.resource_rules_map['children']))
self_rule_resources = []
for resource in rules_engine.rule_book.resource_rules_map['self']:
self_rule_resources.append(resource.name)
expected_rule_resources = [
'billingAccounts/ABCD-1234', 'folders/56', 'organizations/234',
'projects/proj-1', 'projects/proj-2', 'projects/proj-3']
self.assertEqual(expected_rule_resources, sorted(self_rule_resources))
child_rule_resources = []
for resource in rules_engine.rule_book.resource_rules_map['children']:
child_rule_resources.append(resource.name)
expected_rule_resources = ['folders/56', 'organizations/234']
self.assertEqual(expected_rule_resources, sorted(child_rule_resources))
def test_build_rule_book_invalid_applies_to_fails(self):
"""Tests that a rule with invalid applies_to type cannot be created."""
rules_local_path = get_datafile_path(
__file__, 'log_sink_test_invalid_rules.yaml')
rules_engine = self.lsre.LogSinkRulesEngine(
rules_file_path=rules_local_path)
with self.assertRaises(InvalidRulesSchemaError):
rules_engine.build_rule_book()
def test_project_with_no_violations(self):
"""Tests that no violations are produced for a correct project."""
rules_engine = self.get_engine_with_valid_rules()
# proj-1 needs an Audit Log sink.
log_sinks = [
LogSink(
sink_id='audit_logs_to_bq',
destination=('bigquery.googleapis.com/projects/my-audit-logs/'
'datasets/proj_1_logs'),
sink_filter='logName:"logs/cloudaudit.googleapis.com"',
include_children=False,
writer_identity='serviceAccount:<EMAIL>',
parent=self.proj_1,
raw_json='_SINK_1_'
),
LogSink(
sink_id='compute_logs_saver',
destination=('bigquery.googleapis.com/projects/proj_1/'
'datasets/compute_logs'),
sink_filter='resource.type="gce_instance"',
include_children=False,
writer_identity=('serviceAccount:<PASSWORD>@'
'gcp-sa-logging.iam.gserviceaccount.com'),
parent=self.proj_1,
raw_json='_SINK_2_'
)
]
actual_violations = rules_engine.find_violations(
self.proj_1, log_sinks)
self.assertEqual(set(), actual_violations)
def test_folder_with_no_violations(self):
"""Tests that no violations are produced for a correct folder."""
rules_engine = self.get_engine_with_valid_rules()
# Rules disallow any folder-level LogSinks.
actual_violations = rules_engine.find_violations(self.folder_56, [])
self.assertEqual(set(), actual_violations)
def test_billing_account_with_no_violations(self):
"""Tests that no violations are produced for a correct billing acct."""
rules_engine = self.get_engine_with_valid_rules()
log_sinks = [
LogSink(
sink_id='billing_logs',
destination=('bigquery.googleapis.com/projects/my-audit-logs/'
'datasets/billing_logs'),
sink_filter='',
include_children=False,
writer_identity='serviceAccount:<EMAIL>',
parent=self.billing_acct_abcd,
raw_json='__SINK_1__'
),
]
actual_violations = rules_engine.find_violations(
self.billing_acct_abcd, log_sinks)
self.assertEqual(set(), actual_violations)
def test_org_with_no_violations(self):
"""Tests that no violations are produced for a correct organization."""
rules_engine = self.get_engine_with_valid_rules()
# Org needs an Audit Log sink, but to any destination.
log_sinks = [
LogSink(
sink_id='audit_logs_to_pubsub',
destination=('pubsub.googleapis.com/projects/proj-3/topics/'
'org-audit-logs'),
sink_filter='logName:"logs/cloudaudit.googleapis.com"',
include_children=True,
writer_identity='serviceAccount:<EMAIL>',
parent=self.org_234,
raw_json='__SINK_1__'
)
]
actual_violations = rules_engine.find_violations(
self.org_234, log_sinks)
self.assertEqual(set(), actual_violations)
def test_project_missing_required_sinks(self):
"""Tests violations are produced for project missing required sinks."""
rules_engine = self.get_engine_with_valid_rules()
# proj-2 needs an Audit Log sink, by org-level rules, and a pubsub
# sink, by folder-level rules.
log_sinks = [
LogSink(
sink_id='non_audit_logs_to_bq',
destination=('bigquery.googleapis.com/projects/my-audit-logs/'
'datasets/proj_2_logs'),
sink_filter='logName:"logs/non-cloudaudit.googleapis.com"',
include_children=False,
writer_identity='serviceAccount:<EMAIL>',
parent=self.proj_2,
raw_json='__SINK_1__'
),
LogSink(
sink_id='compute_logs_saver',
destination=('bigquery.googleapis.com/projects/proj_2/'
'datasets/compute_logs'),
sink_filter='resource.type="gce_instance"',
include_children=False,
writer_identity=('serviceAccount:p12345-67890@'
'gcp-sa-logging.iam.gserviceaccount.com'),
parent=self.proj_2,
raw_json='__SINK_2__'
)
]
actual_violations = rules_engine.find_violations(
self.proj_2, log_sinks)
expected_violations = set([
lsre.Rule.RuleViolation(
resource_name='proj-2',
resource_type='project',
resource_id='proj-2',
full_name='organization/234/folder/56/project/proj-2/',
rule_name='Require Audit Log sinks in all projects.',
rule_index=0,
violation_type='LOG_SINK_VIOLATION',
sink_destination=('^bigquery\\.googleapis\\.com\\/projects\\/'
'my\\-audit\\-logs\\/datasets\\/.+$'),
sink_filter=('^logName\\:\\"logs\\/'
'cloudaudit\\.googleapis\\.com\\"$'),
sink_include_children='*',
resource_data=''
),
lsre.Rule.RuleViolation(
resource_name='proj-2',
resource_type='project',
resource_id='proj-2',
full_name='organization/234/folder/56/project/proj-2/',
rule_name='Require a PubSub sink in folder-56 projects.',
rule_index=3,
violation_type='LOG_SINK_VIOLATION',
sink_destination='^pubsub\\.googleapis\\.com\\/.+$',
sink_filter='^$',
sink_include_children='*',
resource_data=''
)
])
self.assertEqual(expected_violations, actual_violations)
def test_project_whitelist_violation(self):
"""Tests violations are produced for non-whitelisted sinks."""
rules_engine = self.get_engine_with_valid_rules()
# proj-3 can only have BigQuery sinks.
log_sinks = [
LogSink(
sink_id='audit_logs_to_bq',
destination=('bigquery.googleapis.com/projects/my-audit-logs/'
'datasets/proj_1_logs'),
sink_filter='logName:"logs/cloudaudit.googleapis.com"',
include_children=False,
writer_identity='serviceAccount:<EMAIL>',
parent=self.proj_3,
raw_json='__SINK_1__'
),
LogSink(
sink_id='audit_logs_to_pubsub',
destination=('pubsub.googleapis.com/projects/proj-3/topics/'
'proj-audit-logs'),
sink_filter='logName:"logs/cloudaudit.googleapis.com"',
include_children=True,
writer_identity='serviceAccount:<EMAIL>',
parent=self.proj_3,
raw_json='__SINK_2__'
)
]
actual_violations = rules_engine.find_violations(
self.proj_3, log_sinks)
expected_violations = set([
lsre.Rule.RuleViolation(
resource_name='projects/proj-3/sinks/audit_logs_to_pubsub',
resource_type='sink',
resource_id='audit_logs_to_pubsub',
full_name='organization/234/project/proj-3/audit_logs_to_pubsub/',
rule_name='Only allow BigQuery sinks in Proj-1 and Proj-3.',
rule_index=4,
violation_type='LOG_SINK_VIOLATION',
sink_destination=('pubsub.googleapis.com/projects/proj-3/'
'topics/proj-audit-logs'),
sink_filter='logName:"logs/cloudaudit.googleapis.com"',
sink_include_children=True,
resource_data='__SINK_2__'
)
])
self.assertEqual(expected_violations, actual_violations)
def test_folder_blacklist_violation(self):
"""Tests violations are produced for blacklisted sinks."""
rules_engine = self.get_engine_with_valid_rules()
# Rules disallow any folder-level LogSinks.
log_sinks = [
LogSink(
sink_id='audit_logs_to_bq',
destination=('bigquery.googleapis.com/projects/my-audit-logs/'
'datasets/folder_logs'),
sink_filter='logName:"logs/cloudaudit.googleapis.com"',
include_children=False,
writer_identity='serviceAccount:<EMAIL>',
parent=self.folder_56,
raw_json='__SINK_1__'
)
]
actual_violations = rules_engine.find_violations(
self.folder_56, log_sinks)
expected_violations = set([
lsre.Rule.RuleViolation(
resource_name='folders/56/sinks/audit_logs_to_bq',
resource_type='sink',
resource_id='audit_logs_to_bq',
full_name='organization/234/folder/56/audit_logs_to_bq/',
rule_name='Disallow folder sinks.',
rule_index=2,
violation_type='LOG_SINK_VIOLATION',
sink_destination=('bigquery.googleapis.com/projects/'
'my-audit-logs/datasets/folder_logs'),
sink_filter='logName:"logs/cloudaudit.googleapis.com"',
sink_include_children=False,
resource_data='__SINK_1__')
])
self.assertEqual(expected_violations, actual_violations)
def test_billing_account_with_whitelist_violations(self):
"""Tests violations are produced for billing account sinks."""
rules_engine = self.get_engine_with_valid_rules()
log_sinks = [
LogSink(
sink_id='billing_logs',
destination=('bigquery.googleapis.com/projects/my-audit-logs/'
'datasets/wrong_dataset'),
sink_filter='',
include_children=False,
writer_identity='serviceAccount:<EMAIL>',
parent=self.billing_acct_abcd,
raw_json='__SINK_1__'
),
]
actual_violations = rules_engine.find_violations(
self.billing_acct_abcd, log_sinks)
expected_violations = set([
lsre.Rule.RuleViolation(
resource_type='sink',
resource_id='billing_logs',
resource_name='billingAccounts/ABCD-1234/sinks/billing_logs',
full_name='organization/234/billingAccount/ABCD-1234/billing_logs/',
rule_name=('Only allow Billing Account sinks to audit logs '
'project.'),
rule_index=6,
violation_type='LOG_SINK_VIOLATION',
sink_destination=('bigquery.googleapis.com/projects/'
'my-audit-logs/datasets/wrong_dataset'),
sink_filter='',
sink_include_children=False,
resource_data='__SINK_1__')
])
self.assertEqual(expected_violations, actual_violations)
def test_org_missing_required_sinks(self):
"""Tests violations are produced for an org missing required sinks."""
rules_engine = self.get_engine_with_valid_rules()
# Org needs an Audit Log sink, including children.
log_sinks = [
LogSink(
sink_id='sink_not_including_children',
destination=('pubsub.googleapis.com/projects/proj-3/topics/'
'org-audit-logs'),
sink_filter='logName:"logs/cloudaudit.googleapis.com"',
include_children=False,
writer_identity='serviceAccount:<EMAIL>',
parent=self.org_234,
raw_json='__SINK_1__'
),
LogSink(
sink_id='sink_with_wrong_filter',
destination=('pubsub.googleapis.com/projects/proj-3/topics/'
'org-more-logs'),
sink_filter='logName:"logs/otherapi.googleapis.com"',
include_children=True,
writer_identity='serviceAccount:<EMAIL>',
parent=self.org_234,
raw_json='__SINK_2__'
)
]
actual_violations = rules_engine.find_violations(
self.org_234, log_sinks)
expected_violations = set([
lsre.Rule.RuleViolation(
resource_name='234',
resource_type='organization',
resource_id='234',
full_name='organization/234/',
rule_name='Require an Org Level audit log sink.',
rule_index=1,
violation_type='LOG_SINK_VIOLATION',
sink_destination='^.*$',
sink_filter=('^logName\\:\\"logs\\/'
'cloudaudit\\.googleapis\\.com\\"$'),
sink_include_children=True,
resource_data=''
)
])
self.assertEqual(expected_violations, actual_violations)
def test_add_invalid_rules(self):
"""Tests that adding invalid rules raises exceptions."""
rule_book = self.lsre.LogSinkRuleBook(global_configs=None)
valid_resource = {
'type': 'organization',
'applies_to': 'children',
'resource_ids': ['1234']
}
valid_sink_spec = {
'destination': 'bigquery.*',
'filter': '',
'include_children': '*'
}
rule_book.add_rule(
{
'name': 'Valid rule',
'resource': [valid_resource],
'sink': valid_sink_spec,
'mode': 'whitelist'
}, 0)
bad_rules = [
{},
{
'name': 'Mising Resource',
'mode': 'whitelist',
'sink': valid_sink_spec,
}, {
'name': 'Mising sink',
'resource': [valid_resource],
'mode': 'whitelist',
}, {
'name': 'Bad mode',
'resource': [valid_resource],
'sink': valid_sink_spec,
'mode': 'other',
}, {
'name': 'Bad resource type',
'resource': [{
'type': 'bucket',
'applies_to': 'self',
'resource_ids': ['bucket-1']
}],
'sink': valid_sink_spec,
'mode': 'whitelist'
}, {
'name': 'Bad applies to type',
'resource': [{
'type': 'folder',
'applies_to': 'self_and_children',
'resource_ids': ['56']
}],
'sink': valid_sink_spec,
'mode': 'whitelist'
}, {
'name': 'Bad applies to type',
'resource': [{
'type': 'billing_account',
'applies_to': 'children',
'resource_ids': ['ABCD-1234']
}],
'sink': valid_sink_spec,
'mode': 'whitelist'
}, {
'name': 'Empty resource_ids',
'resource': [{
'type': 'project',
'applies_to': 'self',
'resource_ids': []
}],
'sink': valid_sink_spec,
'mode': 'whitelist'
}, {
'name': 'Missing filter',
'resource': [valid_resource],
'sink': {
'destination': 'bigquery.*',
'include_children': '*'
},
'mode': 'whitelist'
}, {
'name': 'Bad include_children',
'resource': [valid_resource],
'sink': {
'destination': 'bigquery.*',
'filter': '*',
'include_children': 'Yes'
},
'mode': 'whitelist'
}
]
for rule in bad_rules:
with self.assertRaises(InvalidRulesSchemaError):
rule_book.add_rule(rule, 1)
if __name__ == '__main__':
unittest.main()
| 1.820313 | 2 |
backend/api/ulca-ums-service/user-management/utilities/orgUtils.py | agupta54/ulca | 3 | 3852 | import uuid
from config import USR_ORG_MONGO_COLLECTION, USR_MONGO_COLLECTION
import db
from models.response import post_error
import logging
log = logging.getLogger('file')
class OrgUtils:
def __init__(self):
pass
#orgId generation
@staticmethod
def generate_org_id():
"""UUID generation for org registeration"""
return(uuid.uuid4().hex)
@staticmethod
def validate_org(org_code):
"""Validating Org
Org should be registered and active on Anuvaad system.
"""
try:
#connecting to mongo instance/collection
collections = db.get_db()[USR_ORG_MONGO_COLLECTION]
#searching for active org record
result = collections.find({"code": org_code}, {"_id": 0, "active": 1})
if result.count() == 0:
return post_error("Invalid Organization", "No such registered organization with the given Org Id", None)
for value in result:
if value["active"] == False:
return post_error("Invalid Organization", "Organization is currently inactive", None)
except Exception as e:
log.exception(f"Db connection exception : {e}")
return post_error("Database connection exception", "An error occurred while connecting to the database:{}".format(str(e)), None)
@staticmethod
def validate_org_upsert(i,org):
"""Org validation on upsert
deactivation of org allowed only once all the users in the corresponding org is inactive.
"""
if "code" not in org or not org["code"]:
return post_error("Data Missing", "code not found", None)
if "active" not in org:
return post_error("Data Missing", "active not found", None)
code = str(org["code"]).upper()
active = org["active"]
if not isinstance(active,bool):
return post_error("Invalid format", "active should be bool", None), 400
if active == False:
try:
#connecting to mongo instance/collection
collections = db.get_db()[USR_MONGO_COLLECTION]
#searching for active users in the org
result = collections.find({"orgID": code,"is_active":True})
if result.count()!=0:
log.info("Deactivation request for org failed, {} active users with the orgID".format(str(result.count())))
return post_error("Deactivation Failed","There exist active users in {} hence this action cannot be performed".format(code),None)
except Exception as e:
log.exception(f"Db connection exception : {e}")
return post_error("Database connection exception", "An error occurred while connecting to the database:{}".format(str(e)), None) | 2.515625 | 3 |
setup.py | AntonBiryukovUofC/diffvg | 0 | 3853 | <reponame>AntonBiryukovUofC/diffvg
# Adapted from https://github.com/pybind/cmake_example/blob/master/setup.py
import os
import re
import sys
import platform
import subprocess
import importlib
from sysconfig import get_paths
import importlib
from setuptools import setup, Extension
from setuptools.command.build_ext import build_ext
from setuptools.command.install import install
from distutils.sysconfig import get_config_var
from distutils.version import LooseVersion
class CMakeExtension(Extension):
def __init__(self, name, sourcedir, build_with_cuda):
Extension.__init__(self, name, sources=[])
self.sourcedir = os.path.abspath(sourcedir)
self.build_with_cuda = build_with_cuda
class Build(build_ext):
def run(self):
try:
out = subprocess.check_output(['cmake', '--version'])
except OSError:
raise RuntimeError("CMake must be installed to build the following extensions: " +
", ".join(e.name for e in self.extensions))
super().run()
def build_extension(self, ext):
if isinstance(ext, CMakeExtension):
extdir = os.path.abspath(os.path.dirname(self.get_ext_fullpath(ext.name)))
info = get_paths()
include_path = info['include']
cmake_args = ['-DCMAKE_LIBRARY_OUTPUT_DIRECTORY=' + extdir,
'-DPYTHON_INCLUDE_PATH=' + include_path,
]
cfg = 'Debug' if self.debug else 'Release'
build_args = ['--config', cfg]
if platform.system() == "Windows":
cmake_args += ['-DCMAKE_LIBRARY_OUTPUT_DIRECTORY_{}={}'.format(cfg.upper(), extdir),
'-DCMAKE_RUNTIME_OUTPUT_DIRECTORY_{}={}'.format(cfg.upper(), extdir)]
if sys.maxsize > 2 ** 32:
cmake_args += ['-A', 'x64']
build_args += ['--', '/m']
else:
cmake_args += ['-DCMAKE_BUILD_TYPE=' + cfg]
build_args += ['--', '-j8']
if ext.build_with_cuda:
cmake_args += ['-DDIFFVG_CUDA=1']
else:
cmake_args += ['-DDIFFVG_CUDA=0']
env = os.environ.copy()
env['CXXFLAGS'] = '{} -DVERSION_INFO=\\"{}\\"'.format(env.get('CXXFLAGS', ''),
self.distribution.get_version())
env_build = env
env["CXX"] = "/usr/bin/g++-5"
env["CC"] = "/usr/bin/gcc-5"
env_build["CXX"] = "/usr/bin/g++-5"
env_build["CC"] = "/usr/bin/gcc-5"
env["PATH"] = "/usr/local/cuda-10.1/bin" + ":" + os.environ['PATH']
env_build["PATH"] = "/usr/local/cuda-10.1/bin" + ":" + os.environ['PATH']
if not os.path.exists(self.build_temp):
os.makedirs(self.build_temp)
subprocess.check_call(['cmake', ext.sourcedir] + cmake_args, cwd=self.build_temp, env=env)
subprocess.check_call(['cmake', '--build', '.'] + build_args, cwd=self.build_temp, env=env_build)
else:
super().build_extension(ext)
torch_spec = importlib.util.find_spec("torch")
tf_spec = importlib.util.find_spec("tensorflow")
packages = []
build_with_cuda = False
if torch_spec is not None:
packages.append('pydiffvg')
import torch
if torch.cuda.is_available():
build_with_cuda = True
if tf_spec is not None and sys.platform != 'win32':
packages.append('pydiffvg_tensorflow')
if not build_with_cuda:
import tensorflow as tf
if tf.test.is_gpu_available(cuda_only=True, min_cuda_compute_capability=None):
build_with_cuda = True
if len(packages) == 0:
print('Error: PyTorch or Tensorflow must be installed. For Windows platform only PyTorch is supported.')
exit()
# Override build_with_cuda with environment variable
if 'DIFFVG_CUDA' in os.environ:
build_with_cuda = os.environ['DIFFVG_CUDA'] == '1'
setup(name='diffvg',
version='0.0.1',
install_requires=["svgpathtools"],
description='Differentiable Vector Graphics',
ext_modules=[CMakeExtension('diffvg', '', build_with_cuda)],
cmdclass=dict(build_ext=Build, install=install),
packages=packages,
zip_safe=False)
| 1.851563 | 2 |
robotpy_ext/common_drivers/navx/registerio.py | twinters007/robotpy-wpilib-utilities | 2 | 3854 | # validated: 2017-02-19 DS c5e3a8a9b642 roborio/java/navx_frc/src/com/kauailabs/navx/frc/RegisterIO.java
#----------------------------------------------------------------------------
# Copyright (c) <NAME> 2015. All Rights Reserved.
#
# Created in support of Team 2465 (Kauaibots). Go Purple Wave!
#
# Open Source Software - may be modified and shared by FRC teams. Any
# modifications to this code must be accompanied by the \License.txt file
# in the root directory of the project
#----------------------------------------------------------------------------
from ._impl import AHRSProtocol, IMUProtocol, IMURegisters
from wpilib.timer import Timer
import logging
logger = logging.getLogger('navx')
__all__ = ['RegisterIO']
IO_TIMEOUT_SECONDS = 1.0
DELAY_OVERHEAD_SECONDS = 0.004
class _BoardId:
type = 0
hw_rev = 0
fw_ver_major = 0
fw_ver_minor = 0
fw_revision = 0
unique_id = [0]*12
class _BoardState:
op_status = 0
sensor_status = 0
cal_status = 0
selftest_status = 0
capability_flags = 0
update_rate_hz = 0
accel_fsr_g = 0
gyro_fsr_dps = 0
class RegisterIO:
def __init__(self, io_provider, update_rate_hz, notify_sink, board_capabilities):
"""
:param board_capabilities: must have the following callable attributes:
_isOmniMountSupported, _isBoardYawResetSupported,
_isDisplacementSupported
:param notify_sink: must have the following callable attributes:
_setYawPitchRoll, _setAHRSData, _setAHRSPosData,
_setRawData, _setBoardID, _setBoardState, _yawResetComplete
"""
self.io_provider = io_provider
self.update_rate_hz = update_rate_hz
self.board_capabilities = board_capabilities
self.notify_sink = notify_sink
self.raw_data_update = IMUProtocol.GyroUpdate()
self.ahrspos_update = AHRSProtocol.AHRSPosUpdate()
self.board_state = _BoardState()
self.board_id = _BoardId()
self.last_update_time = 0
self.byte_count = 0
self.update_count = 0
self.last_sensor_timestamp = 0
self._stop = False
def stop(self):
self._stop = True
def shutdown(self):
self.io_provider.shutdown()
def run(self):
logger.info("NavX io thread starting")
try:
self.io_provider.init()
# initial device configuration
self.setUpdateRateHz(self.update_rate_hz)
if not self.getConfiguration():
logger.warning("-- Did not get configuration data")
else:
logger.info("-- Board is %s (rev %s)",
IMURegisters.model_type(self.board_id.type),
self.board_id.hw_rev)
logger.info("-- Firmware %s.%s", self.board_id.fw_ver_major,
self.board_id.fw_ver_minor)
log_error = True
# Calculate delay to match configured update rate
# Note: some additional time is removed from the
# 1/update_rate value to ensure samples are not
# dropped, esp. at higher update rates.
update_rate = 1.0/(self.update_rate_hz & 0xFF)
if update_rate > DELAY_OVERHEAD_SECONDS:
update_rate -= DELAY_OVERHEAD_SECONDS
logger.info("-- Update rate: %shz (%.4fs)",
self.update_rate_hz, update_rate)
# IO Loop
while not self._stop:
if self.board_state.update_rate_hz != self.update_rate_hz:
self.setUpdateRateHz(self.update_rate_hz)
try:
self.getCurrentData()
except IOError:
if log_error:
logger.exception("Error getting data")
log_error = False
else:
log_error = True
Timer.delay(update_rate)
except Exception:
logger.exception("Unhandled exception in NavX thread")
finally:
logger.info("NavX i/o thread exiting")
def getConfiguration(self):
success = False
retry_count = 0
while retry_count < 5 and not success:
try:
config = self.io_provider.read(IMURegisters.NAVX_REG_WHOAMI,
IMURegisters.NAVX_REG_SENSOR_STATUS_H+1)
except IOError as e:
logger.warning("Error reading configuration data, retrying (%s)", e)
success = False
Timer.delay(0.5)
else:
board_id = self.board_id
board_id.hw_rev = config[IMURegisters.NAVX_REG_HW_REV]
board_id.fw_ver_major = config[IMURegisters.NAVX_REG_FW_VER_MAJOR]
board_id.fw_ver_minor = config[IMURegisters.NAVX_REG_FW_VER_MINOR]
board_id.type = config[IMURegisters.NAVX_REG_WHOAMI]
self.notify_sink._setBoardID(board_id)
board_state = self.board_state
board_state.cal_status = config[IMURegisters.NAVX_REG_CAL_STATUS]
board_state.op_status = config[IMURegisters.NAVX_REG_OP_STATUS]
board_state.selftest_status = config[IMURegisters.NAVX_REG_SELFTEST_STATUS]
board_state.sensor_status = AHRSProtocol.decodeBinaryUint16(config,IMURegisters.NAVX_REG_SENSOR_STATUS_L)
board_state.gyro_fsr_dps = AHRSProtocol.decodeBinaryUint16(config,IMURegisters.NAVX_REG_GYRO_FSR_DPS_L)
board_state.accel_fsr_g = config[IMURegisters.NAVX_REG_ACCEL_FSR_G]
board_state.update_rate_hz = config[IMURegisters.NAVX_REG_UPDATE_RATE_HZ]
board_state.capability_flags = AHRSProtocol.decodeBinaryUint16(config,IMURegisters.NAVX_REG_CAPABILITY_FLAGS_L)
self.notify_sink._setBoardState(board_state)
success = True
retry_count += 1
return success
def getCurrentData(self):
first_address = IMURegisters.NAVX_REG_UPDATE_RATE_HZ
displacement_registers = self.board_capabilities._isDisplacementSupported()
# If firmware supports displacement data, acquire it - otherwise implement
# similar (but potentially less accurate) calculations on this processor.
if displacement_registers:
read_count = IMURegisters.NAVX_REG_LAST + 1 - first_address
else:
read_count = IMURegisters.NAVX_REG_QUAT_OFFSET_Z_H + 1 - first_address
curr_data = self.io_provider.read(first_address, read_count)
sensor_timestamp = AHRSProtocol.decodeBinaryUint32(curr_data, IMURegisters.NAVX_REG_TIMESTAMP_L_L-first_address)
if sensor_timestamp == self.last_sensor_timestamp:
return
self.last_sensor_timestamp = sensor_timestamp
ahrspos_update = self.ahrspos_update
ahrspos_update.op_status = curr_data[IMURegisters.NAVX_REG_OP_STATUS - first_address]
ahrspos_update.selftest_status = curr_data[IMURegisters.NAVX_REG_SELFTEST_STATUS - first_address]
ahrspos_update.cal_status = curr_data[IMURegisters.NAVX_REG_CAL_STATUS]
ahrspos_update.sensor_status = curr_data[IMURegisters.NAVX_REG_SENSOR_STATUS_L - first_address]
ahrspos_update.yaw = AHRSProtocol.decodeProtocolSignedHundredthsFloat(curr_data, IMURegisters.NAVX_REG_YAW_L-first_address)
ahrspos_update.pitch = AHRSProtocol.decodeProtocolSignedHundredthsFloat(curr_data, IMURegisters.NAVX_REG_PITCH_L-first_address)
ahrspos_update.roll = AHRSProtocol.decodeProtocolSignedHundredthsFloat(curr_data, IMURegisters.NAVX_REG_ROLL_L-first_address)
ahrspos_update.compass_heading = AHRSProtocol.decodeProtocolUnsignedHundredthsFloat(curr_data, IMURegisters.NAVX_REG_HEADING_L-first_address)
ahrspos_update.mpu_temp_c = AHRSProtocol.decodeProtocolSignedHundredthsFloat(curr_data, IMURegisters.NAVX_REG_MPU_TEMP_C_L - first_address)
ahrspos_update.world_linear_accel_x = AHRSProtocol.decodeProtocolSignedThousandthsFloat(curr_data, IMURegisters.NAVX_REG_LINEAR_ACC_X_L-first_address)
ahrspos_update.world_linear_accel_y = AHRSProtocol.decodeProtocolSignedThousandthsFloat(curr_data, IMURegisters.NAVX_REG_LINEAR_ACC_Y_L-first_address)
ahrspos_update.world_linear_accel_z = AHRSProtocol.decodeProtocolSignedThousandthsFloat(curr_data, IMURegisters.NAVX_REG_LINEAR_ACC_Z_L-first_address)
ahrspos_update.altitude = AHRSProtocol.decodeProtocol1616Float(curr_data, IMURegisters.NAVX_REG_ALTITUDE_D_L - first_address)
ahrspos_update.baro_pressure = AHRSProtocol.decodeProtocol1616Float(curr_data, IMURegisters.NAVX_REG_PRESSURE_DL - first_address)
ahrspos_update.fused_heading = AHRSProtocol.decodeProtocolUnsignedHundredthsFloat(curr_data, IMURegisters.NAVX_REG_FUSED_HEADING_L-first_address)
ahrspos_update.quaternionW = AHRSProtocol.decodeBinaryInt16(curr_data, IMURegisters.NAVX_REG_QUAT_W_L-first_address)/ 32768.0
ahrspos_update.quaternionX = AHRSProtocol.decodeBinaryInt16(curr_data, IMURegisters.NAVX_REG_QUAT_X_L-first_address)/ 32768.0
ahrspos_update.quaternionY = AHRSProtocol.decodeBinaryInt16(curr_data, IMURegisters.NAVX_REG_QUAT_Y_L-first_address)/ 32768.0
ahrspos_update.quaternionZ = AHRSProtocol.decodeBinaryInt16(curr_data, IMURegisters.NAVX_REG_QUAT_Z_L-first_address)/ 32768.0
if displacement_registers:
ahrspos_update.vel_x = AHRSProtocol.decodeProtocol1616Float(curr_data, IMURegisters.NAVX_REG_VEL_X_I_L-first_address)
ahrspos_update.vel_y = AHRSProtocol.decodeProtocol1616Float(curr_data, IMURegisters.NAVX_REG_VEL_Y_I_L-first_address)
ahrspos_update.vel_z = AHRSProtocol.decodeProtocol1616Float(curr_data, IMURegisters.NAVX_REG_VEL_Z_I_L-first_address)
ahrspos_update.disp_x = AHRSProtocol.decodeProtocol1616Float(curr_data, IMURegisters.NAVX_REG_DISP_X_I_L-first_address)
ahrspos_update.disp_y = AHRSProtocol.decodeProtocol1616Float(curr_data, IMURegisters.NAVX_REG_DISP_Y_I_L-first_address)
ahrspos_update.disp_z = AHRSProtocol.decodeProtocol1616Float(curr_data, IMURegisters.NAVX_REG_DISP_Z_I_L-first_address)
self.notify_sink._setAHRSPosData(ahrspos_update, sensor_timestamp)
else:
self.notify_sink._setAHRSData(ahrspos_update, sensor_timestamp)
board_state = self.board_state
board_state.cal_status = curr_data[IMURegisters.NAVX_REG_CAL_STATUS-first_address]
board_state.op_status = curr_data[IMURegisters.NAVX_REG_OP_STATUS-first_address]
board_state.selftest_status = curr_data[IMURegisters.NAVX_REG_SELFTEST_STATUS-first_address]
board_state.sensor_status = AHRSProtocol.decodeBinaryUint16(curr_data,IMURegisters.NAVX_REG_SENSOR_STATUS_L-first_address)
board_state.update_rate_hz = curr_data[IMURegisters.NAVX_REG_UPDATE_RATE_HZ-first_address]
board_state.gyro_fsr_dps = AHRSProtocol.decodeBinaryUint16(curr_data,IMURegisters.NAVX_REG_GYRO_FSR_DPS_L)
board_state.accel_fsr_g = curr_data[IMURegisters.NAVX_REG_ACCEL_FSR_G]
board_state.capability_flags= AHRSProtocol.decodeBinaryUint16(curr_data,IMURegisters.NAVX_REG_CAPABILITY_FLAGS_L-first_address)
self.notify_sink._setBoardState(board_state)
raw_data_update = self.raw_data_update
raw_data_update.raw_gyro_x = AHRSProtocol.decodeBinaryInt16(curr_data, IMURegisters.NAVX_REG_GYRO_X_L-first_address)
raw_data_update.raw_gyro_y = AHRSProtocol.decodeBinaryInt16(curr_data, IMURegisters.NAVX_REG_GYRO_Y_L-first_address)
raw_data_update.raw_gyro_z = AHRSProtocol.decodeBinaryInt16(curr_data, IMURegisters.NAVX_REG_GYRO_Z_L-first_address)
raw_data_update.raw_accel_x = AHRSProtocol.decodeBinaryInt16(curr_data, IMURegisters.NAVX_REG_ACC_X_L-first_address)
raw_data_update.raw_accel_y = AHRSProtocol.decodeBinaryInt16(curr_data, IMURegisters.NAVX_REG_ACC_Y_L-first_address)
raw_data_update.raw_accel_z = AHRSProtocol.decodeBinaryInt16(curr_data, IMURegisters.NAVX_REG_ACC_Z_L-first_address)
raw_data_update.cal_mag_x = AHRSProtocol.decodeBinaryInt16(curr_data, IMURegisters.NAVX_REG_MAG_X_L-first_address)
raw_data_update.cal_mag_y = AHRSProtocol.decodeBinaryInt16(curr_data, IMURegisters.NAVX_REG_MAG_Y_L-first_address)
raw_data_update.cal_mag_z = AHRSProtocol.decodeBinaryInt16(curr_data, IMURegisters.NAVX_REG_MAG_Z_L-first_address)
raw_data_update.mpu_temp_c = ahrspos_update.mpu_temp
self.notify_sink._setRawData(raw_data_update, sensor_timestamp)
self.last_update_time = Timer.getFPGATimestamp()
self.byte_count += len(curr_data)
self.update_count += 1
def isConnected(self):
time_since_last_update = Timer.getFPGATimestamp() - self.last_update_time
return time_since_last_update <= IO_TIMEOUT_SECONDS
def getByteCount(self):
return self.byte_count
def getUpdateCount(self):
return self.update_count
def setUpdateRateHz(self, update_rate_hz):
self.io_provider.write(IMURegisters.NAVX_REG_UPDATE_RATE_HZ, update_rate_hz)
def zeroYaw(self):
self.io_provider.write( IMURegisters.NAVX_REG_INTEGRATION_CTL,
AHRSProtocol.NAVX_INTEGRATION_CTL_RESET_YAW )
self.notify_sink._yawResetComplete()
def zeroDisplacement(self):
self.io_provider.write( IMURegisters.NAVX_REG_INTEGRATION_CTL,
(AHRSProtocol.NAVX_INTEGRATION_CTL_RESET_DISP_X |
AHRSProtocol.NAVX_INTEGRATION_CTL_RESET_DISP_Y |
AHRSProtocol.NAVX_INTEGRATION_CTL_RESET_DISP_Z ) )
| 1.695313 | 2 |
RigolWFM/channel.py | wvdv2002/RigolWFM | 0 | 3855 | #pylint: disable=invalid-name
#pylint: disable=too-many-instance-attributes
#pylint: disable=too-many-return-statements
#pylint: disable=too-many-statements
"""
Class structure and methods for an oscilloscope channel.
The idea is to collect all the relevant information from all the Rigol
scope waveforms into a single structure that can be handled in a uniform
and consistent manner.
Specifically this lets one just use
channel.times : numpy array of signal times
channel.volts : numpy array of signal voltages
or the stringification method to describe a channel
print(channel)
"""
from enum import Enum
import numpy as np
class UnitEnum(Enum):
"""Enumerated units for scopes without them."""
w = 0
a = 1
v = 2
u = 3
def best_scale(number):
"""Scale and units for a number with proper prefix."""
absnr = abs(number)
if absnr == 0:
return 1, ' '
if absnr < 0.99999999e-9:
return 1e12, 'p'
if absnr < 0.99999999e-6:
return 1e9, 'n'
if absnr < 0.99999999e-3:
return 1e6, 'µ'
if absnr < 0.99999999:
return 1e3, 'm'
if absnr < 0.99999999e3:
return 1, ' '
if absnr < 0.99999999e6:
return 1e-3, 'k'
if absnr < 0.999999991e9:
return 1e-6, 'M'
return 1e-9, 'G'
def engineering_string(number, n_digits):
"""Format number with proper prefix."""
scale, prefix = best_scale(number)
fformat = "%%.%df %%s" % n_digits
s = fformat % (number * scale, prefix)
return s
def _channel_bytes(channel_number, w):
"""
Return right series of bytes for a channel for 1000Z scopes.
Waveform points are interleaved stored in memory when two or more
channels are saved. This unweaves them.
Args:
channel_number: the number of enabled channels before this one
w: original waveform object
Returns
byte array for specified channel
"""
offset = 0
if w.header.stride == 2: # byte pattern CHx CHy
# use odd bytes when this is the second enabled channel
if any([w.header.ch[i].enabled for i in range(channel_number-1)]):
offset = 1
elif w.header.stride == 4: # byte pattern CH4 CH3 CH2 CH1
offset = 4 - channel_number
data = np.frombuffer(w.data.raw, dtype=np.uint8)
raw_bytes = data[offset::w.header.stride]
return raw_bytes
class Channel():
"""Base class for a single channel."""
def __init__(self, w, channel_number, scope, selected='1234'):
"""
Initialize a Channel Object.
Args:
w: Wfm object
channel_number: 1, 2, 3, or 4
scope: string describing scope
selected: string with channels chosen by user
Returns:
Channel object
"""
self.channel_number = channel_number
self.name = "CH %d" % channel_number
self.waveform = w
self.seconds_per_point = w.header.seconds_per_point
self.firmware = 'unknown'
self.unit = UnitEnum.v
self.points = 0
self.raw = None
self.volts = None
self.times = None
self.coupling = 'unknown'
self.roll_stop = 0
self.time_offset = 0
self.time_scale = 1
self.enabled = False
self.enabled_and_selected = False
self.volt_scale = 1
self.volt_offset = 0
self.y_scale = 1
self.y_offset = 0
self.volt_per_division = 1
self.probe_value = 1
self.inverted = False
# determine if this channel is one of those chosen by user
chosen = selected.find(str(channel_number)) != -1
if channel_number <= len(w.header.ch):
channel = w.header.ch[channel_number-1]
self.enabled = channel.enabled
self.enabled_and_selected = channel.enabled and chosen
self.volt_scale = channel.volt_scale
self.volt_offset = channel.volt_offset
self.y_scale = channel.volt_scale
self.y_offset = channel.volt_offset
self.volt_per_division = channel.volt_per_division
self.probe_value = channel.probe_value
self.unit = channel.unit
self.inverted = channel.inverted
if scope == 'wfm1000c':
self.ds1000c(w, channel_number)
elif scope == 'wfm1000d':
self.ds1000d(w, channel_number)
elif scope == 'wfm1000e':
self.ds1000e(w, channel_number)
elif scope == 'wfm1000z':
self.ds1000z(w, channel_number)
elif scope == 'wfm2000':
self.ds2000(w, channel_number)
elif scope == 'wfm4000':
self.ds4000(w, channel_number)
elif scope == 'wfm6000':
self.ds6000(w, channel_number)
def __str__(self):
"""Describe this channel."""
s = " Channel %d:\n" % self.channel_number
s += " Coupling = %8s\n" % self.coupling.rjust(7, ' ')
s += " Scale = %10sV/div\n" % engineering_string(self.volt_per_division, 2)
s += " Offset = %10sV\n" % engineering_string(self.volt_offset, 2)
s += " Probe = %7gX\n" % self.probe_value
s += " Inverted = %8s\n\n" % self.inverted
s += " Time Base = %10ss/div\n" % engineering_string(self.time_scale, 3)
s += " Offset = %10ss\n" % engineering_string(self.time_offset, 3)
s += " Delta = %10ss/point\n" % engineering_string(self.seconds_per_point, 3)
s += " Points = %8d\n\n" % self.points
if self.enabled_and_selected:
s += " Count = [%9d,%9d,%9d ... %9d,%9d]\n" % (
1, 2, 3, self.points-1, self.points)
s += " Raw = [%9d,%9d,%9d ... %9d,%9d]\n" % (
self.raw[0], self.raw[1], self.raw[2], self.raw[-2], self.raw[-1])
t = [engineering_string(self.times[i], 3) +
"s" for i in [0, 1, 2, -2, -1]]
s += " Times = [%9s,%9s,%9s ... %9s,%9s]\n" % (
t[0], t[1], t[2], t[-2], t[-1])
v = [engineering_string(self.volts[i], 2) +
"V" for i in [0, 1, 2, -2, -1]]
s += " Volts = [%9s,%9s,%9s ... %9s,%9s]\n" % (
v[0], v[1], v[2], v[-2], v[-1])
return s
def calc_times_and_volts(self):
"""Calculate the times and voltages for this channel."""
if self.enabled_and_selected:
self.volts = self.y_scale * (127.0 - self.raw) - self.y_offset
h = self.points * self.seconds_per_point / 2
self.times = np.linspace(-h, h, self.points) + self.time_offset
def ds1000c(self, w, channel_number):
"""Interpret waveform data for 1000CD series scopes."""
self.time_scale = 1.0e-12 * w.header.time_scale
self.time_offset = 1.0e-12 * w.header.time_offset
if channel_number == 1:
if self.enabled_and_selected:
self.points = len(w.data.ch1)
self.raw = np.frombuffer(w.data.ch1, dtype=np.uint8)
if channel_number == 2:
if self.enabled_and_selected:
self.points = len(w.data.ch2)
self.raw = np.frombuffer(w.data.ch2, dtype=np.uint8)
self.calc_times_and_volts()
def ds1000d(self, w, channel_number):
"""Interpret waveform data for 1000CD series scopes."""
self.time_scale = 1.0e-12 * w.header.time_scale
self.time_offset = 1.0e-12 * w.header.time_offset
if channel_number == 1:
if self.enabled_and_selected:
self.points = len(w.data.ch1)
self.raw = np.frombuffer(w.data.ch1, dtype=np.uint8)
if channel_number == 2:
if self.enabled_and_selected:
self.points = len(w.data.ch2)
self.raw = np.frombuffer(w.data.ch2, dtype=np.uint8)
self.calc_times_and_volts()
def ds1000e(self, w, channel_number):
"""Interpret waveform data for 1000D and 1000E series scopes."""
self.roll_stop = w.header.roll_stop
if channel_number == 1:
self.time_offset = w.header.ch1_time_offset
self.time_scale = w.header.ch1_time_scale
if self.enabled_and_selected:
self.points = len(w.data.ch1)
self.raw = np.frombuffer(w.data.ch1, dtype=np.uint8)
elif channel_number == 2:
self.time_offset = w.header.ch2_time_offset
self.time_scale = w.header.ch2_time_scale
if self.enabled_and_selected:
self.points = len(w.data.ch2)
self.raw = np.frombuffer(w.data.ch2, dtype=np.uint8)
self.calc_times_and_volts()
def ds1000z(self, w, channel_number):
"""Interpret waveform for the Rigol DS1000Z series."""
self.time_scale = w.header.time_scale
self.time_offset = w.header.time_offset
self.points = w.header.points
self.stride = w.header.stride
self.firmware = w.preheader.firmware_version
self.probe = w.header.ch[channel_number-1].probe_value
self.coupling = w.header.ch[channel_number-1].coupling.name.upper()
self.y_scale = w.header.ch[channel_number-1].y_scale
self.y_offset = w.header.ch[channel_number-1].y_offset
if self.enabled_and_selected:
self.raw = _channel_bytes(channel_number, w)
self.points = len(self.raw)
self.calc_times_and_volts()
def ds2000(self, w, channel_number):
"""Interpret waveform for the Rigol DS2000 series."""
self.time_offset = w.header.time_offset
self.time_scale = w.header.time_scale
self.points = w.header.storage_depth
self.firmware = w.header.firmware_version
self.unit = UnitEnum(w.header.ch[channel_number-1].unit_actual)
self.coupling = w.header.ch[channel_number-1].coupling.name.upper()
self.y_scale = -self.volt_scale
self.y_offset = self.volt_offset
if self.enabled_and_selected:
if channel_number == 1:
self.raw = np.frombuffer(w.header.raw_1, dtype=np.uint8)
if channel_number == 2:
self.raw = np.frombuffer(w.header.raw_2, dtype=np.uint8)
if channel_number == 3:
self.raw = np.frombuffer(w.header.raw_3, dtype=np.uint8)
if channel_number == 4:
self.raw = np.frombuffer(w.header.raw_4, dtype=np.uint8)
self.calc_times_and_volts()
def ds4000(self, w, channel_number):
"""Interpret waveform for the Rigol DS4000 series."""
self.time_offset = w.header.time_offset
self.time_scale = w.header.time_scale
self.points = w.header.points
self.firmware = w.header.firmware_version
self.coupling = w.header.ch[channel_number-1].coupling.name.upper()
self.y_scale = -self.volt_scale
self.y_offset = self.volt_offset
if self.enabled_and_selected:
if channel_number == 1:
self.raw = np.frombuffer(w.header.raw_1, dtype=np.uint8)
if channel_number == 2:
self.raw = np.frombuffer(w.header.raw_2, dtype=np.uint8)
if channel_number == 3:
self.raw = np.frombuffer(w.header.raw_3, dtype=np.uint8)
if channel_number == 4:
self.raw = np.frombuffer(w.header.raw_4, dtype=np.uint8)
self.calc_times_and_volts()
def ds6000(self, w, channel_number):
"""Interpret waveform for the Rigol DS6000 series."""
self.time_offset = w.header.time_offset
self.time_scale = w.header.time_scale
self.points = w.header.points
self.firmware = w.header.firmware_version
self.coupling = w.header.ch[channel_number-1].coupling.name.upper()
self.unit = w.header.ch[channel_number-1].unit
if self.enabled_and_selected:
if channel_number == 1:
self.raw = np.array(w.header.raw_1, dtype=np.uint8)
if channel_number == 2:
self.raw = np.array(w.header.raw_2, dtype=np.uint8)
if channel_number == 3:
self.raw = np.array(w.header.raw_3, dtype=np.uint8)
if channel_number == 4:
self.raw = np.array(w.header.raw_4, dtype=np.uint8)
self.calc_times_and_volts()
| 2.953125 | 3 |
configs/raubtierv2a/faster_rcnn_x101_64x4d_fpn_1x_raubtierv2a_nofreeze_4gpu.py | esf-bt2020/mmdetection | 0 | 3856 | _base_ = '../faster_rcnn/faster_rcnn_x101_64x4d_fpn_1x_coco.py'
model = dict(
backbone=dict(
num_stages=4,
#frozen_stages=4
),
roi_head=dict(
bbox_head=dict(
num_classes=3
)
)
)
dataset_type = 'COCODataset'
classes = ('luchs', 'rotfuchs', 'wolf')
data = dict(
train=dict(
img_prefix='raubtierv2a/train/',
classes=classes,
ann_file='raubtierv2a/train/_annotations.coco.json'),
val=dict(
img_prefix='raubtierv2a/valid/',
classes=classes,
ann_file='raubtierv2a/valid/_annotations.coco.json'),
test=dict(
img_prefix='raubtierv2a/test/',
classes=classes,
ann_file='raubtierv2a/test/_annotations.coco.json'))
#optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001) #original (8x2=16)
optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001) #(4x2=8) 4 GPUs
#optimizer = dict(type='SGD', lr=0.0025, momentum=0.9, weight_decay=0.0001) #(1x2=2)
total_epochs=24
evaluation = dict(classwise=True, interval=1, metric='bbox')
work_dir = '/media/storage1/projects/WilLiCam/checkpoint_workdir/raubtierv2a/faster_rcnn_x101_64x4d_fpn_1x_raubtierv2a_nofreeze_4gpu'
#http://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_x101_64x4d_fpn_1x_coco/faster_rcnn_x101_64x4d_fpn_1x_coco_20200204-833ee192.pth
load_from = 'checkpoints/faster_rcnn_x101_64x4d_fpn_1x_coco_20200204-833ee192.pth'
| 1.757813 | 2 |
driver/python/setup.py | wbaweto/QConf | 2,056 | 3857 | <filename>driver/python/setup.py
from distutils.core import setup, Extension
setup(name = 'qconf_py', version = '1.2.2', ext_modules = [Extension('qconf_py', ['lib/python_qconf.cc'],
include_dirs=['/usr/local/include/qconf'],
extra_objects=['/usr/local/qconf/lib/libqconf.a']
)])
| 1.3125 | 1 |
abc153/d.py | Lockdef/kyopro-code | 0 | 3858 | h = int(input())
i = 1
a = 1
b = 1
c = 1
while h >= a:
a = 2 ** i
i += 1
s = 0
t = True
for j in range(1, i-1):
c += 2 ** j
print(c)
| 3.4375 | 3 |
demos/python/sdk_wireless_camera_control/open_gopro/demos/log_battery.py | Natureshadow/OpenGoPro | 210 | 3859 | <filename>demos/python/sdk_wireless_camera_control/open_gopro/demos/log_battery.py<gh_stars>100-1000
# log_battery.py/Open GoPro, Version 2.0 (C) Copyright 2021 GoPro, Inc. (http://gopro.com/OpenGoPro).
# This copyright was auto-generated on Wed, Sep 1, 2021 5:05:45 PM
"""Example to continuously read the battery (with no Wifi connection)"""
import csv
import time
import logging
import argparse
import threading
from pathlib import Path
from datetime import datetime
from dataclasses import dataclass
from typing import Optional, Tuple, Literal, List
from rich.console import Console
from open_gopro import GoPro
from open_gopro.constants import StatusId
from open_gopro.util import setup_logging, set_logging_level
logger = logging.getLogger(__name__)
console = Console() # rich consoler printer
BarsType = Literal[0, 1, 2, 3]
@dataclass
class Sample:
"""Simple class to store battery samples"""
index: int
percentage: int
bars: BarsType
def __post_init__(self) -> None:
self.time = datetime.now()
def __str__(self) -> str: # pylint: disable=missing-return-doc
return f"Index {self.index} @ time {self.time.strftime('%H:%M:%S')} --> bars: {self.bars}, percentage: {self.percentage}"
SAMPLE_INDEX = 0
SAMPLES: List[Sample] = []
def dump_results_as_csv(location: Path) -> None:
"""Write all of the samples to a csv file
Args:
location (Path): File to write to
"""
console.print(f"Dumping results as CSV to {location}")
with open(location, mode="w") as f:
w = csv.writer(f, delimiter=",", quotechar='"', quoting=csv.QUOTE_MINIMAL)
w.writerow(["index", "time", "percentage", "bars"])
initial_time = SAMPLES[0].time
for s in SAMPLES:
w.writerow([s.index, (s.time - initial_time).seconds, s.percentage, s.bars])
def process_battery_notifications(gopro: GoPro, initial_bars: BarsType, initial_percentage: int) -> None:
"""Separate thread to continuously check for and store battery notifications.
If the CLI parameter was set to poll, this isn't used.
Args:
gopro (GoPro): instance to get updates from
initial_bars (BarsType): Initial bars level when notifications were enabled
initial_percentage (int): Initial percentage when notifications were enabled
"""
last_percentage = initial_percentage
last_bars = initial_bars
while True:
# Block until we receive an update
notification = gopro.get_update()
# Update data points if they have changed
last_percentage = (
notification.data[StatusId.INT_BATT_PER]
if StatusId.INT_BATT_PER in notification.data
else last_percentage
)
last_bars = (
notification.data[StatusId.BATT_LEVEL] if StatusId.BATT_LEVEL in notification.data else last_bars
)
# Append and print sample
global SAMPLE_INDEX
SAMPLES.append(Sample(index=SAMPLE_INDEX, percentage=last_percentage, bars=last_bars))
console.print(str(SAMPLES[-1]))
SAMPLE_INDEX += 1
def main() -> int:
"""Main program functionality
Returns:
int: program return code
"""
identifier, log_location, poll = parse_arguments()
global logger
logger = setup_logging(logger, log_location)
global SAMPLE_INDEX
gopro: Optional[GoPro] = None
return_code = 0
try:
with GoPro(identifier, enable_wifi=False) as gopro:
set_logging_level(logger, logging.ERROR)
# # Setup notifications if we are not polling
if poll is None:
console.print("Configuring battery notifications...")
# Enable notifications of the relevant battery statuses. Also store initial values.
bars = gopro.ble_status.batt_level.register_value_update().flatten
percentage = gopro.ble_status.int_batt_per.register_value_update().flatten
# Start a thread to handle asynchronous battery level notifications
threading.Thread(
target=process_battery_notifications, args=(gopro, bars, percentage), daemon=True
).start()
with console.status("[bold green]Receiving battery notifications until it dies..."):
# Sleep forever, allowing notification handler thread to deal with battery level notifications
while True:
time.sleep(1)
# Otherwise, poll
else:
with console.status("[bold green]Polling the battery until it dies..."):
while True:
SAMPLES.append(
Sample(
index=SAMPLE_INDEX,
percentage=gopro.ble_status.int_batt_per.get_value().flatten,
bars=gopro.ble_status.batt_level.get_value().flatten,
)
)
console.print(str(SAMPLES[-1]))
SAMPLE_INDEX += 1
time.sleep(poll)
except Exception as e: # pylint: disable=broad-except
logger.error(repr(e))
return_code = 1
except KeyboardInterrupt:
logger.warning("Received keyboard interrupt. Shutting down...")
finally:
if len(SAMPLES) > 0:
csv_location = Path(log_location.parent) / "battery_results.csv"
dump_results_as_csv(csv_location)
if gopro is not None:
gopro.close()
console.print("Exiting...")
return return_code # pylint: disable=lost-exception
def parse_arguments() -> Tuple[str, Path, Optional[int]]:
"""Parse command line arguments
Returns:
Tuple[str, Path, Path]: (identifier, path to save log, path to VLC)
"""
parser = argparse.ArgumentParser(
description="Connect to the GoPro via BLE only and continuously read the battery (either by polling or notifications)."
)
parser.add_argument(
"-i",
"--identifier",
type=str,
help="Last 4 digits of GoPro serial number, which is the last 4 digits of the default camera SSID. \
If not used, first discovered GoPro will be connected to",
default=None,
)
parser.add_argument(
"-l",
"--log",
type=Path,
help="Location to store detailed log",
default="log_battery.log",
)
parser.add_argument(
"-p",
"--poll",
type=int,
help="Set to poll the battery at a given interval. If not set, battery level will be notified instead. Defaults to notifications.",
default=None,
)
args = parser.parse_args()
return args.identifier, args.log, args.poll
if __name__ == "__main__":
main()
| 2.703125 | 3 |
tumbleweed/models.py | mcroydon/django-tumbleweed | 1 | 3860 | <reponame>mcroydon/django-tumbleweed<filename>tumbleweed/models.py
# These are not the droids you are looking for. | 1.164063 | 1 |
xos/hpc_observer/steps/sync_originserver.py | wathsalav/xos | 0 | 3861 | import os
import sys
import base64
from django.db.models import F, Q
from xos.config import Config
from observer.syncstep import SyncStep
from core.models import Service
from hpc.models import ServiceProvider, ContentProvider, CDNPrefix, OriginServer
from util.logger import Logger, logging
# hpclibrary will be in steps/..
parentdir = os.path.join(os.path.dirname(__file__),"..")
sys.path.insert(0,parentdir)
from hpclib import HpcLibrary
logger = Logger(level=logging.INFO)
class SyncOriginServer(SyncStep, HpcLibrary):
provides=[OriginServer]
requested_interval=0
def __init__(self, **args):
SyncStep.__init__(self, **args)
HpcLibrary.__init__(self)
def fetch_pending(self, deleted):
#self.consistency_check()
return SyncStep.fetch_pending(self, deleted)
def consistency_check(self):
# set to true if something changed
result=False
# sanity check to make sure our PS objects have CMI objects behind them
all_ors_ids = [x["origin_server_id"] for x in self.client.onev.ListAll("OriginServer")]
for ors in OriginServer.objects.all():
if (ors.origin_server_id is not None) and (ors.origin_server_id not in all_ors_ids):
# we have an origin server ID, but it doesn't exist in the CMI
# something went wrong
# start over
logger.info("origin server %s was not found on CMI" % ors.origin_server_id)
ors.origin_server_id=None
ors.save()
result = True
return result
def sync_record(self, ors):
logger.info("sync'ing origin server %s" % str(ors))
if (not ors.contentProvider) or (not ors.contentProvider.content_provider_id):
return
cpid = ors.contentProvider.content_provider_id
# validation requires URL start with http://
url = ors.url
if not url.startswith("http://"):
url = "http://" + url
ors_dict = {"authenticated_content": ors.authenticated, "zone_redirects": ors.redirects, "content_provider_id": cpid, "url": url, "service_type": "HyperCache", "caching_type": "Optimistic", "description": ors.description}
#print os_dict
if not ors.origin_server_id:
id = self.client.onev.Create("OriginServer", ors_dict)
ors.origin_server_id = id
else:
self.client.onev.Update("OriginServer", ors.origin_server_id, ors_dict)
# ... something breaks (analytics) if the URL starts with http://, so we
# change it in cob after we added it via onev.
url = url[7:]
self.client.cob.UpdateContent(ors.origin_server_id, {"url": url})
ors.silent = True
ors.save()
def delete_record(self, m):
if m.origin_server_id is not None:
self.client.onev.Delete("OriginServer", m.origin_server_id)
| 1.929688 | 2 |
main.py | aroxby/pixel-processor | 0 | 3862 | #!/usr/bin/env python3
from PIL import Image
def tranform(r, g, b):
tmp = b
b = g // 2
g = tmp
r = r // 2
return r, g, b
def main():
im = Image.open('blue-flames.jpg')
input_pixels = im.getdata()
output_pixels = tuple(tranform(*pixel) for pixel in input_pixels)
im.putdata(output_pixels)
im.save('green-flames.png')
if __name__ == '__main__':
main()
| 3.4375 | 3 |
scipy/weave/examples/swig2_example.py | lesserwhirls/scipy-cwt | 8 | 3863 | <reponame>lesserwhirls/scipy-cwt<filename>scipy/weave/examples/swig2_example.py
"""Simple example to show how to use weave.inline on SWIG2 wrapped
objects. SWIG2 refers to SWIG versions >= 1.3.
To run this example you must build the trivial SWIG2 extension called
swig2_ext. To do this you need to do something like this::
$ swig -c++ -python -I. -o swig2_ext_wrap.cxx swig2_ext.i
$ g++ -Wall -O2 -I/usr/include/python2.3 -fPIC -I. -c \
-o swig2_ext_wrap.os swig2_ext_wrap.cxx
$ g++ -shared -o _swig2_ext.so swig2_ext_wrap.os \
-L/usr/lib/python2.3/config
The files swig2_ext.i and swig2_ext.h are included in the same
directory that contains this file.
Note that weave's SWIG2 support works fine whether SWIG_COBJECT_TYPES
are used or not.
Author: <NAME>
Copyright (c) 2004, <NAME>
License: BSD Style.
"""
# Import our SWIG2 wrapped library
import swig2_ext
import scipy.weave as weave
from scipy.weave import swig2_spec, converters
# SWIG2 support is not enabled by default. We do this by adding the
# swig2 converter to the default list of converters.
converters.default.insert(0, swig2_spec.swig2_converter())
def test():
"""Instantiate the SWIG wrapped object and then call its method
from C++ using weave.inline
"""
a = swig2_ext.A()
b = swig2_ext.foo() # This will be an APtr instance.
b.thisown = 1 # Prevent memory leaks.
code = """a->f();
b->f();
"""
weave.inline(code, ['a', 'b'], include_dirs=['.'],
headers=['"swig2_ext.h"'], verbose=1)
if __name__ == "__main__":
test()
| 2.34375 | 2 |
src/simplify.py | denghz/Probabilistic-Programming | 0 | 3864 | <filename>src/simplify.py
from wolframclient.language.expression import WLSymbol
from nnDiff import *
def parseGlobalSymbol(s):
if isinstance(s, numbers.Number):
return s
if isinstance(s, WLSymbol):
if s.name == 'E':
return 'E'
else:
return s.name[7:]
def parse(exp):
symbol = parseGlobalSymbol(exp)
if symbol:
return [symbol]
else:
f = str(exp.head)
args = list(map(parse, exp.args))
res = []
if (f == "Power"):
res1 = []
p = args[1][0]
e = args[0]
if e == ['E']:
return ['Exp'] + args[1]
if p < 0:
res = ["Inv"]
p = -p
if p >= 2:
p = p - 2
res1 = ["Times"] + e + e
while p > 0:
p = p - 1
res1 = ["Times"] + res1 + e
return res + res1
else:
return res + e
else:
if len(args) == 1:
return [f] + args[0]
elif len(args) >= 2:
res = [f] + args[0] + args[1]
args = args[2:]
for arg in args:
res = [f] + res + arg
return res
def simplify(exp):
with WolframLanguageSession() as session:
session.evaluate("Inv[zzz_] := 1/zzz")
f = wlexpr(str(Func(exp)))
getfreeVars = wlexpr("Reduce`FreeVariables")
freeVariables = session.evaluate(getfreeVars(f))
ass = wl.Element(wl.Alternatives(freeVariables), wl.Reals)
wmres = session.evaluate(wl.FullSimplify(f,ass))
print(wmres)
res = parse(wmres)
return res
if __name__ == "__main__":
exp = sys.argv[1:]
if exp == []:
exp = ["Sin", "x"]
res = map(str,simplify(exp))
print(' '.join(res), file=sys.stderr) | 3.015625 | 3 |
setup.py | EdWard680/python-firetv | 0 | 3865 | from setuptools import setup
setup(
name='firetv',
version='1.0.7',
description='Communicate with an Amazon Fire TV device via ADB over a network.',
url='https://github.com/happyleavesaoc/python-firetv/',
license='MIT',
author='happyleaves',
author_email='<EMAIL>',
packages=['firetv'],
install_requires=['pycryptodome', 'rsa', 'adb-homeassistant', 'pure-python-adb-homeassistant'],
extras_require={
'firetv-server': ['Flask>=0.10.1', 'PyYAML>=3.12']
},
entry_points={
'console_scripts': [
'firetv-server = firetv.__main__:main'
]
},
classifiers=[
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 3'
]
)
| 1.523438 | 2 |
neo/io/exampleio.py | Mario-Kart-Felix/python-neo | 199 | 3866 | <gh_stars>100-1000
"""
neo.io have been split in 2 level API:
* neo.io: this API give neo object
* neo.rawio: this API give raw data as they are in files.
Developper are encourage to use neo.rawio.
When this is done the neo.io is done automagically with
this king of following code.
Author: sgarcia
"""
from neo.io.basefromrawio import BaseFromRaw
from neo.rawio.examplerawio import ExampleRawIO
class ExampleIO(ExampleRawIO, BaseFromRaw):
name = 'example IO'
description = "Fake IO"
# This is an inportant choice when there are several channels.
# 'split-all' : 1 AnalogSignal each 1 channel
# 'group-by-same-units' : one 2D AnalogSignal for each group of channel with same units
_prefered_signal_group_mode = 'group-by-same-units'
def __init__(self, filename=''):
ExampleRawIO.__init__(self, filename=filename)
BaseFromRaw.__init__(self, filename)
| 2.546875 | 3 |
scrapyproject/migrations/0003_auto_20170209_1025.py | sap9433/Distributed-Multi-User-Scrapy-System-with-a-Web-UI | 108 | 3867 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('scrapyproject', '0002_auto_20170208_1738'),
]
operations = [
migrations.AlterField(
model_name='project',
name='link_generator',
field=models.TextField(blank=True),
),
migrations.AlterField(
model_name='project',
name='scraper_function',
field=models.TextField(blank=True),
),
migrations.AlterField(
model_name='project',
name='settings',
field=models.TextField(blank=True),
),
]
| 1.460938 | 1 |
src/cart/forms.py | cbsBiram/xarala__ssr | 0 | 3868 | from django import forms
from django.utils.translation import gettext_lazy as _
COURSE_QUANTITY_CHOICES = [(i, str(i)) for i in range(1, 21)]
class CartAddCourseForm(forms.Form):
quantity = forms.TypedChoiceField(
choices=COURSE_QUANTITY_CHOICES, coerce=int, label=_("Quantité")
)
override = forms.BooleanField(
required=False, initial=False, widget=forms.HiddenInput
)
| 2.375 | 2 |
patches/datasets/__init__.py | sflippl/patches | 0 | 3869 | <reponame>sflippl/patches<gh_stars>0
"""Datasets of latent predictability tasks.
"""
from .pilgrimm import *
| 0.972656 | 1 |
appengine/components/tests/datastore_utils_properties_test.py | pombreda/swarming | 0 | 3870 | <filename>appengine/components/tests/datastore_utils_properties_test.py
#!/usr/bin/env python
# Copyright 2014 The Swarming Authors. All rights reserved.
# Use of this source code is governed by the Apache v2.0 license that can be
# found in the LICENSE file.
import sys
import unittest
import test_env
test_env.setup_test_env()
from google.appengine.ext import ndb
from components.datastore_utils import properties
from support import test_case
class BP(ndb.Model):
prop = properties.BytesComputedProperty(lambda _: '\x00')
class DJP(ndb.Model):
prop = properties.DeterministicJsonProperty(json_type=dict)
class PropertiesTest(test_case.TestCase):
def test_DeterministicJsonProperty(self):
self.assertEqual({'a': 1}, DJP(prop={'a': 1}).prop)
DJP(prop={'a': 1}).put()
self.assertEqual({'a': 1}, DJP.query().get().prop)
with self.assertRaises(TypeError):
DJP(prop=[])
def test_BytesComputedProperty(self):
self.assertEqual('\x00', BP().prop)
BP().put()
self.assertEqual('\x00', BP.query().get().prop)
if __name__ == '__main__':
if '-v' in sys.argv:
unittest.TestCase.maxDiff = None
unittest.main()
| 2.1875 | 2 |
neutron/tests/unit/services/qos/test_qos_plugin.py | dangervon/neutron | 0 | 3871 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
from unittest import mock
from keystoneauth1 import exceptions as ks_exc
import netaddr
from neutron_lib.api.definitions import qos
from neutron_lib.callbacks import events
from neutron_lib import constants as lib_constants
from neutron_lib import context
from neutron_lib import exceptions as lib_exc
from neutron_lib.exceptions import placement as pl_exc
from neutron_lib.exceptions import qos as qos_exc
from neutron_lib.objects import utils as obj_utils
from neutron_lib.plugins import constants as plugins_constants
from neutron_lib.plugins import directory
from neutron_lib.services.qos import constants as qos_consts
from neutron_lib.utils import net as net_utils
import os_resource_classes as orc
from oslo_config import cfg
from oslo_utils import uuidutils
import webob.exc
from neutron.exceptions import qos as neutron_qos_exc
from neutron.extensions import qos_pps_minimum_rule_alias
from neutron.extensions import qos_rules_alias
from neutron import manager
from neutron.objects import network as network_object
from neutron.objects import ports as ports_object
from neutron.objects.qos import policy as policy_object
from neutron.objects.qos import rule as rule_object
from neutron.services.qos import qos_plugin
from neutron.tests.unit.db import test_db_base_plugin_v2
from neutron.tests.unit.services.qos import base
DB_PLUGIN_KLASS = 'neutron.db.db_base_plugin_v2.NeutronDbPluginV2'
SERVICE_PLUGIN_KLASS = 'neutron.services.qos.qos_plugin.QoSPlugin'
class TestQosPlugin(base.BaseQosTestCase):
def setUp(self):
super(TestQosPlugin, self).setUp()
self.setup_coreplugin(load_plugins=False)
mock.patch('neutron.objects.db.api.create_object').start()
mock.patch('neutron.objects.db.api.update_object').start()
mock.patch('neutron.objects.db.api.delete_object').start()
mock.patch('neutron.objects.db.api.get_object').start()
_mock_qos_load_attr = mock.patch(
'neutron.objects.qos.policy.QosPolicy.obj_load_attr')
self.mock_qos_load_attr = _mock_qos_load_attr.start()
# We don't use real models as per mocks above. We also need to mock-out
# methods that work with real data types
mock.patch(
'neutron.objects.base.NeutronDbObject.modify_fields_from_db'
).start()
mock.patch.object(policy_object.QosPolicy, 'unset_default').start()
mock.patch.object(policy_object.QosPolicy, 'set_default').start()
cfg.CONF.set_override("core_plugin", DB_PLUGIN_KLASS)
cfg.CONF.set_override("service_plugins", ["qos"])
manager.init()
self.qos_plugin = directory.get_plugin(plugins_constants.QOS)
self.qos_plugin.driver_manager = mock.Mock()
self.rpc_push = mock.patch('neutron.api.rpc.handlers.resources_rpc'
'.ResourcesPushRpcApi.push').start()
self.ctxt = context.Context('fake_user', 'fake_tenant')
self.admin_ctxt = context.get_admin_context()
self.policy_data = {
'policy': {'id': uuidutils.generate_uuid(),
'project_id': uuidutils.generate_uuid(),
'name': 'test-policy',
'description': 'Test policy description',
'shared': True,
'is_default': False}}
self.rule_data = {
'bandwidth_limit_rule': {'id': uuidutils.generate_uuid(),
'max_kbps': 100,
'max_burst_kbps': 150},
'dscp_marking_rule': {'id': uuidutils.generate_uuid(),
'dscp_mark': 16},
'minimum_bandwidth_rule': {
'id': uuidutils.generate_uuid(),
'min_kbps': 10},
'packet_rate_limit_rule': {
'id': uuidutils.generate_uuid(),
'max_kpps': 20,
'max_burst_kpps': 130},
'minimum_packet_rate_rule': {
'id': uuidutils.generate_uuid(),
'min_kpps': 10,
'direction': 'any'},
}
self.policy = policy_object.QosPolicy(
self.ctxt, **self.policy_data['policy'])
self.rule = rule_object.QosBandwidthLimitRule(
self.ctxt, **self.rule_data['bandwidth_limit_rule'])
self.dscp_rule = rule_object.QosDscpMarkingRule(
self.ctxt, **self.rule_data['dscp_marking_rule'])
self.min_bw_rule = rule_object.QosMinimumBandwidthRule(
self.ctxt, **self.rule_data['minimum_bandwidth_rule'])
self.pps_rule = rule_object.QosPacketRateLimitRule(
self.ctxt, **self.rule_data['packet_rate_limit_rule'])
self.min_pps_rule = rule_object.QosMinimumPacketRateRule(
self.ctxt, **self.rule_data['minimum_packet_rate_rule'])
def _validate_driver_params(self, method_name, ctxt):
call_args = self.qos_plugin.driver_manager.call.call_args[0]
self.assertTrue(self.qos_plugin.driver_manager.call.called)
self.assertEqual(call_args[0], method_name)
self.assertEqual(call_args[1], ctxt)
self.assertIsInstance(call_args[2], policy_object.QosPolicy)
def _create_and_extend_port(self, min_bw_rules, min_pps_rules=None,
physical_network='public',
has_qos_policy=True, has_net_qos_policy=False,
request_groups_uuids=None):
network_id = uuidutils.generate_uuid()
self.port_data = {
'port': {'id': uuidutils.generate_uuid(),
'network_id': network_id}
}
if has_qos_policy:
self.port_data['port']['qos_policy_id'] = self.policy.id
elif has_net_qos_policy:
self.port_data['port']['qos_network_policy_id'] = self.policy.id
self.port = ports_object.Port(
self.ctxt, **self.port_data['port'])
port_res = {"binding:vnic_type": "normal"}
segment_mock = mock.MagicMock(network_id=network_id,
physical_network=physical_network)
min_pps_rules = min_pps_rules if min_pps_rules else []
with mock.patch('neutron.objects.network.NetworkSegment.get_objects',
return_value=[segment_mock]), \
mock.patch(
'neutron.objects.qos.rule.QosMinimumBandwidthRule.'
'get_objects',
return_value=min_bw_rules), \
mock.patch(
'neutron.objects.qos.rule.QosMinimumPacketRateRule.'
'get_objects',
return_value=min_pps_rules), \
mock.patch(
'uuid.uuid5',
return_value='fake_uuid',
side_effect=request_groups_uuids):
return qos_plugin.QoSPlugin._extend_port_resource_request(
port_res, self.port)
def _create_and_extend_ports(self, min_bw_rules, min_pps_rules=None,
physical_network='public',
request_groups_uuids=None):
network_id = uuidutils.generate_uuid()
ports_res = [
{
"resource_request": {
"port_id": uuidutils.generate_uuid(),
"qos_id": self.policy.id,
"network_id": network_id,
"vnic_type": "normal",
}
},
{
"resource_request": {
"port_id": uuidutils.generate_uuid(),
"qos_id": self.policy.id,
"network_id": network_id,
"vnic_type": "normal",
}
},
]
segment_mock = mock.MagicMock(network_id=network_id,
physical_network=physical_network)
min_pps_rules = min_pps_rules if min_pps_rules else []
with mock.patch('neutron.objects.network.NetworkSegment.get_objects',
return_value=[segment_mock]), \
mock.patch(
'neutron.objects.qos.rule.QosMinimumBandwidthRule.'
'get_objects',
return_value=min_bw_rules), \
mock.patch(
'neutron.objects.qos.rule.QosMinimumPacketRateRule.'
'get_objects',
return_value=min_pps_rules), \
mock.patch(
'uuid.uuid5',
return_value='fake_uuid',
side_effect=request_groups_uuids):
return qos_plugin.QoSPlugin._extend_port_resource_request_bulk(
ports_res, None)
def test__extend_port_resource_request_min_bw_rule(self):
self.min_bw_rule.direction = lib_constants.EGRESS_DIRECTION
port = self._create_and_extend_port([self.min_bw_rule])
self.assertEqual(
1,
len(port['resource_request']['request_groups'])
)
self.assertEqual(
'fake_uuid',
port['resource_request']['request_groups'][0]['id']
)
self.assertEqual(
['CUSTOM_PHYSNET_PUBLIC', 'CUSTOM_VNIC_TYPE_NORMAL'],
port['resource_request']['request_groups'][0]['required']
)
self.assertEqual(
{orc.NET_BW_EGR_KILOBIT_PER_SEC: 10},
port['resource_request']['request_groups'][0]['resources'],
)
self.assertEqual(
['fake_uuid'],
port['resource_request']['same_subtree'],
)
def test__extend_port_resource_request_min_pps_rule(self):
port = self._create_and_extend_port([], [self.min_pps_rule])
self.assertEqual(
1,
len(port['resource_request']['request_groups'])
)
self.assertEqual(
'fake_uuid',
port['resource_request']['request_groups'][0]['id']
)
self.assertEqual(
['CUSTOM_VNIC_TYPE_NORMAL'],
port['resource_request']['request_groups'][0]['required']
)
self.assertEqual(
{orc.NET_PACKET_RATE_KILOPACKET_PER_SEC: 10},
port['resource_request']['request_groups'][0]['resources'],
)
self.assertEqual(
['fake_uuid'],
port['resource_request']['same_subtree'],
)
def test__extend_port_resource_request_min_bw_and_pps_rule(self):
self.min_bw_rule.direction = lib_constants.EGRESS_DIRECTION
self.min_pps_rule.direction = lib_constants.EGRESS_DIRECTION
request_groups_uuids = ['fake_uuid0', 'fake_uuid1']
min_bw_rule_ingress_data = {
'id': uuidutils.generate_uuid(),
'min_kbps': 20,
'direction': lib_constants.INGRESS_DIRECTION}
min_pps_rule_ingress_data = {
'id': uuidutils.generate_uuid(),
'min_kpps': 20,
'direction': lib_constants.INGRESS_DIRECTION}
min_bw_rule_ingress = rule_object.QosMinimumBandwidthRule(
self.ctxt, **min_bw_rule_ingress_data)
min_pps_rule_ingress = rule_object.QosMinimumPacketRateRule(
self.ctxt, **min_pps_rule_ingress_data)
port = self._create_and_extend_port(
[self.min_bw_rule, min_bw_rule_ingress],
[self.min_pps_rule, min_pps_rule_ingress],
request_groups_uuids=request_groups_uuids)
self.assertEqual(
2,
len(port['resource_request']['request_groups'])
)
self.assertIn(
{
'id': 'fake_uuid0',
'required':
['CUSTOM_PHYSNET_PUBLIC', 'CUSTOM_VNIC_TYPE_NORMAL'],
'resources': {
orc.NET_BW_EGR_KILOBIT_PER_SEC: 10,
orc.NET_BW_IGR_KILOBIT_PER_SEC: 20},
},
port['resource_request']['request_groups']
)
self.assertIn(
{
'id': 'fake_uuid1',
'required': ['CUSTOM_VNIC_TYPE_NORMAL'],
'resources': {
orc.NET_PACKET_RATE_EGR_KILOPACKET_PER_SEC: 10,
orc.NET_PACKET_RATE_IGR_KILOPACKET_PER_SEC: 20,
},
},
port['resource_request']['request_groups']
)
self.assertEqual(
['fake_uuid0', 'fake_uuid1'],
port['resource_request']['same_subtree'],
)
def test__extend_port_resource_request_non_min_bw_or_min_pps_rule(self):
port = self._create_and_extend_port([], [])
self.assertIsNone(port.get('resource_request'))
def test__extend_port_resource_request_min_bw_non_provider_net(self):
self.min_bw_rule.direction = lib_constants.EGRESS_DIRECTION
port = self._create_and_extend_port([self.min_bw_rule],
physical_network=None)
self.assertIsNone(port.get('resource_request'))
def test__extend_port_resource_request_mix_rules_non_provider_net(self):
self.min_bw_rule.direction = lib_constants.EGRESS_DIRECTION
port = self._create_and_extend_port([self.min_bw_rule],
[self.min_pps_rule],
physical_network=None)
self.assertEqual(
1,
len(port['resource_request']['request_groups'])
)
self.assertEqual(
'fake_uuid',
port['resource_request']['request_groups'][0]['id']
)
self.assertEqual(
['CUSTOM_VNIC_TYPE_NORMAL'],
port['resource_request']['request_groups'][0]['required']
)
self.assertEqual(
{orc.NET_PACKET_RATE_KILOPACKET_PER_SEC: 10},
port['resource_request']['request_groups'][0]['resources'],
)
self.assertEqual(
['fake_uuid'],
port['resource_request']['same_subtree'],
)
def test__extend_port_resource_request_bulk_min_bw_rule(self):
self.min_bw_rule.direction = lib_constants.EGRESS_DIRECTION
ports = self._create_and_extend_ports([self.min_bw_rule])
for port in ports:
self.assertEqual(
1,
len(port['resource_request']['request_groups'])
)
self.assertEqual(
'fake_uuid',
port['resource_request']['request_groups'][0]['id']
)
self.assertEqual(
['CUSTOM_PHYSNET_PUBLIC', 'CUSTOM_VNIC_TYPE_NORMAL'],
port['resource_request']['request_groups'][0]['required']
)
self.assertEqual(
{orc.NET_BW_EGR_KILOBIT_PER_SEC: 10},
port['resource_request']['request_groups'][0]['resources'],
)
self.assertEqual(
['fake_uuid'],
port['resource_request']['same_subtree'],
)
def test__extend_port_resource_request_bulk_min_pps_rule(self):
ports = self._create_and_extend_ports([], [self.min_pps_rule])
for port in ports:
self.assertEqual(
1,
len(port['resource_request']['request_groups'])
)
self.assertEqual(
'fake_uuid',
port['resource_request']['request_groups'][0]['id']
)
self.assertEqual(
['CUSTOM_VNIC_TYPE_NORMAL'],
port['resource_request']['request_groups'][0]['required']
)
self.assertEqual(
{orc.NET_PACKET_RATE_KILOPACKET_PER_SEC: 10},
port['resource_request']['request_groups'][0]['resources'],
)
self.assertEqual(
['fake_uuid'],
port['resource_request']['same_subtree'],
)
def test__extend_port_resource_request_bulk_min_bw_and_pps_rule(self):
self.min_bw_rule.direction = lib_constants.EGRESS_DIRECTION
self.min_pps_rule.direction = lib_constants.EGRESS_DIRECTION
request_groups_uuids = ['fake_uuid0', 'fake_uuid1'] * 2
min_bw_rule_ingress_data = {
'id': uuidutils.generate_uuid(),
'min_kbps': 20,
'direction': lib_constants.INGRESS_DIRECTION}
min_pps_rule_ingress_data = {
'id': uuidutils.generate_uuid(),
'min_kpps': 20,
'direction': lib_constants.INGRESS_DIRECTION}
min_bw_rule_ingress = rule_object.QosMinimumBandwidthRule(
self.ctxt, **min_bw_rule_ingress_data)
min_pps_rule_ingress = rule_object.QosMinimumPacketRateRule(
self.ctxt, **min_pps_rule_ingress_data)
ports = self._create_and_extend_ports(
[self.min_bw_rule, min_bw_rule_ingress],
[self.min_pps_rule, min_pps_rule_ingress],
request_groups_uuids=request_groups_uuids)
for port in ports:
self.assertEqual(
2,
len(port['resource_request']['request_groups'])
)
self.assertIn(
{
'id': 'fake_uuid0',
'required':
['CUSTOM_PHYSNET_PUBLIC', 'CUSTOM_VNIC_TYPE_NORMAL'],
'resources': {
orc.NET_BW_EGR_KILOBIT_PER_SEC: 10,
orc.NET_BW_IGR_KILOBIT_PER_SEC: 20},
},
port['resource_request']['request_groups']
)
self.assertIn(
{
'id': 'fake_uuid1',
'required': ['CUSTOM_VNIC_TYPE_NORMAL'],
'resources': {
orc.NET_PACKET_RATE_EGR_KILOPACKET_PER_SEC: 10,
orc.NET_PACKET_RATE_IGR_KILOPACKET_PER_SEC: 20,
},
},
port['resource_request']['request_groups']
)
self.assertEqual(
['fake_uuid0', 'fake_uuid1'],
port['resource_request']['same_subtree'],
)
def test__extend_port_resource_request_no_qos_policy(self):
port = self._create_and_extend_port([], physical_network='public',
has_qos_policy=False)
self.assertIsNone(port.get('resource_request'))
def test__extend_port_resource_request_min_bw_inherited_policy(
self):
self.min_bw_rule.direction = lib_constants.EGRESS_DIRECTION
self.min_bw_rule.qos_policy_id = self.policy.id
port = self._create_and_extend_port([self.min_bw_rule],
has_net_qos_policy=True)
self.assertEqual(
1,
len(port['resource_request']['request_groups'])
)
self.assertEqual(
'fake_uuid',
port['resource_request']['request_groups'][0]['id']
)
self.assertEqual(
['CUSTOM_PHYSNET_PUBLIC', 'CUSTOM_VNIC_TYPE_NORMAL'],
port['resource_request']['request_groups'][0]['required']
)
self.assertEqual(
{orc.NET_BW_EGR_KILOBIT_PER_SEC: 10},
port['resource_request']['request_groups'][0]['resources'],
)
self.assertEqual(
['fake_uuid'],
port['resource_request']['same_subtree'],
)
def test_get_ports_with_policy(self):
network_ports = [
mock.MagicMock(qos_policy_id=None),
mock.MagicMock(qos_policy_id=uuidutils.generate_uuid()),
mock.MagicMock(qos_policy_id=None)
]
ports = [
mock.MagicMock(qos_policy_id=self.policy.id),
]
expected_network_ports = [
port for port in network_ports if port.qos_policy_id is None]
expected_ports = ports + expected_network_ports
with mock.patch(
'neutron.objects.ports.Port.get_objects',
side_effect=[network_ports, ports]
), mock.patch.object(
self.policy, "get_bound_networks"
), mock.patch.object(
self.policy, "get_bound_ports"
):
policy_ports = self.qos_plugin._get_ports_with_policy(
self.ctxt, self.policy)
self.assertEqual(
len(expected_ports), len(policy_ports))
for port in expected_ports:
self.assertIn(port, policy_ports)
def _test_validate_update_port_callback(self, policy_id=None,
original_policy_id=None):
port_id = uuidutils.generate_uuid()
kwargs = {
"port": {
"id": port_id,
qos_consts.QOS_POLICY_ID: policy_id
},
"original_port": {
"id": port_id,
qos_consts.QOS_POLICY_ID: original_policy_id
}
}
port_mock = mock.MagicMock(id=port_id, qos_policy_id=policy_id)
policy_mock = mock.MagicMock(id=policy_id)
admin_ctxt = mock.Mock()
with mock.patch(
'neutron.objects.ports.Port.get_object',
return_value=port_mock
) as get_port, mock.patch(
'neutron.objects.qos.policy.QosPolicy.get_object',
return_value=policy_mock
) as get_policy, mock.patch.object(
self.qos_plugin, "validate_policy_for_port"
) as validate_policy_for_port, mock.patch.object(
self.ctxt, "elevated", return_value=admin_ctxt
):
self.qos_plugin._validate_update_port_callback(
"PORT", "precommit_update", "test_plugin",
payload=events.DBEventPayload(
self.ctxt, desired_state=kwargs['port'],
states=(kwargs['original_port'],)))
if policy_id is None or policy_id == original_policy_id:
get_port.assert_not_called()
get_policy.assert_not_called()
validate_policy_for_port.assert_not_called()
else:
get_port.assert_called_once_with(self.ctxt, id=port_id)
get_policy.assert_called_once_with(admin_ctxt, id=policy_id)
validate_policy_for_port.assert_called_once_with(
self.ctxt, policy_mock, port_mock)
def test_validate_update_port_callback_policy_changed(self):
self._test_validate_update_port_callback(
policy_id=uuidutils.generate_uuid())
def test_validate_update_port_callback_policy_not_changed(self):
policy_id = uuidutils.generate_uuid()
self._test_validate_update_port_callback(
policy_id=policy_id, original_policy_id=policy_id)
def test_validate_update_port_callback_policy_removed(self):
self._test_validate_update_port_callback(
policy_id=None, original_policy_id=uuidutils.generate_uuid())
def _test_validate_update_network_callback(self, policy_id=None,
original_policy_id=None):
network_id = uuidutils.generate_uuid()
kwargs = {
"context": self.ctxt,
"network": {
"id": network_id,
qos_consts.QOS_POLICY_ID: policy_id
},
"original_network": {
"id": network_id,
qos_consts.QOS_POLICY_ID: original_policy_id
}
}
port_mock_with_own_policy = mock.MagicMock(
id=uuidutils.generate_uuid(),
qos_policy_id=uuidutils.generate_uuid())
port_mock_without_own_policy = mock.MagicMock(
id=uuidutils.generate_uuid(), qos_policy_id=None)
ports = [port_mock_with_own_policy, port_mock_without_own_policy]
policy_mock = mock.MagicMock(id=policy_id)
admin_ctxt = mock.Mock()
with mock.patch(
'neutron.objects.ports.Port.get_objects',
return_value=ports
) as get_ports, mock.patch(
'neutron.objects.qos.policy.QosPolicy.get_object',
return_value=policy_mock
) as get_policy, mock.patch.object(
self.qos_plugin, "validate_policy_for_network"
) as validate_policy_for_network, mock.patch.object(
self.qos_plugin, "validate_policy_for_ports"
) as validate_policy_for_ports, mock.patch.object(
self.ctxt, "elevated", return_value=admin_ctxt
):
self.qos_plugin._validate_update_network_callback(
"NETWORK", "precommit_update", "test_plugin",
payload=events.DBEventPayload(
self.ctxt, desired_state=kwargs['network'],
states=(kwargs['original_network'],)))
if policy_id is None or policy_id == original_policy_id:
get_policy.assert_not_called()
validate_policy_for_network.assert_not_called()
get_ports.assert_not_called()
validate_policy_for_ports.assert_not_called()
else:
get_policy.assert_called_once_with(admin_ctxt, id=policy_id)
get_ports.assert_called_once_with(self.ctxt,
network_id=network_id)
validate_policy_for_ports.assert_called_once_with(
self.ctxt, policy_mock, [port_mock_without_own_policy])
def test_validate_update_network_callback_policy_changed(self):
self._test_validate_update_network_callback(
policy_id=uuidutils.generate_uuid())
def test_validate_update_network_callback_policy_not_changed(self):
policy_id = uuidutils.generate_uuid()
self._test_validate_update_network_callback(
policy_id=policy_id, original_policy_id=policy_id)
def test_validate_update_network_callback_policy_removed(self):
self._test_validate_update_network_callback(
policy_id=None, original_policy_id=uuidutils.generate_uuid())
def test_validate_policy_for_port_rule_not_valid(self):
port = {'id': uuidutils.generate_uuid()}
with mock.patch.object(
self.qos_plugin.driver_manager, "validate_rule_for_port",
return_value=False
):
self.policy.rules = [self.rule]
self.assertRaises(
qos_exc.QosRuleNotSupported,
self.qos_plugin.validate_policy_for_port,
self.ctxt, self.policy, port)
def test_validate_policy_for_port_all_rules_valid(self):
port = {'id': uuidutils.generate_uuid()}
with mock.patch.object(
self.qos_plugin.driver_manager, "validate_rule_for_port",
return_value=True
):
self.policy.rules = [self.rule]
try:
self.qos_plugin.validate_policy_for_port(
self.ctxt, self.policy, port)
except qos_exc.QosRuleNotSupported:
self.fail("QosRuleNotSupported exception unexpectedly raised")
def test_validate_policy_for_network(self):
network = uuidutils.generate_uuid()
with mock.patch.object(
self.qos_plugin.driver_manager, "validate_rule_for_network",
return_value=True
):
self.policy.rules = [self.rule]
try:
self.qos_plugin.validate_policy_for_network(
self.ctxt, self.policy, network_id=network)
except qos_exc.QosRuleNotSupportedByNetwork:
self.fail("QosRuleNotSupportedByNetwork "
"exception unexpectedly raised")
def test_create_min_bw_rule_on_bound_port(self):
policy = self._get_policy()
policy.rules = [self.min_bw_rule]
segment = network_object.NetworkSegment(
physical_network='fake physnet')
net = network_object.Network(
self.ctxt,
segments=[segment])
port = ports_object.Port(
self.ctxt,
id=uuidutils.generate_uuid(),
network_id=uuidutils.generate_uuid(),
device_owner='compute:fake-zone')
with mock.patch(
'neutron.objects.qos.policy.QosPolicy.get_object',
return_value=policy), \
mock.patch(
'neutron.objects.network.Network.get_object',
return_value=net), \
mock.patch.object(
self.qos_plugin,
'_get_ports_with_policy',
return_value=[port]):
self.assertRaises(
NotImplementedError,
self.qos_plugin.create_policy_minimum_bandwidth_rule,
self.ctxt, policy.id, self.rule_data)
def test_create_min_bw_rule_on_unbound_port(self):
policy = self._get_policy()
policy.rules = [self.min_bw_rule]
segment = network_object.NetworkSegment(
physical_network='fake physnet')
net = network_object.Network(
self.ctxt,
segments=[segment])
port = ports_object.Port(
self.ctxt,
id=uuidutils.generate_uuid(),
network_id=uuidutils.generate_uuid(),
device_owner='')
with mock.patch(
'neutron.objects.qos.policy.QosPolicy.get_object',
return_value=policy), \
mock.patch(
'neutron.objects.network.Network.get_object',
return_value=net), \
mock.patch.object(
self.qos_plugin,
'_get_ports_with_policy',
return_value=[port]):
try:
self.qos_plugin.create_policy_minimum_bandwidth_rule(
self.ctxt, policy.id, self.rule_data)
except NotImplementedError:
self.fail()
@mock.patch(
'neutron.objects.rbac_db.RbacNeutronDbObjectMixin'
'.create_rbac_policy')
@mock.patch('neutron.objects.qos.policy.QosPolicy')
def test_add_policy(self, mock_qos_policy, mock_create_rbac_policy):
mock_manager = mock.Mock()
mock_manager.attach_mock(mock_qos_policy, 'QosPolicy')
mock_manager.attach_mock(self.qos_plugin.driver_manager, 'driver')
mock_manager.reset_mock()
self.qos_plugin.create_policy(self.ctxt, self.policy_data)
policy_mock_call = mock.call.QosPolicy().create()
create_precommit_mock_call = mock.call.driver.call(
'create_policy_precommit', self.ctxt, mock.ANY)
create_mock_call = mock.call.driver.call(
'create_policy', self.ctxt, mock.ANY)
self.assertTrue(
mock_manager.mock_calls.index(policy_mock_call) <
mock_manager.mock_calls.index(create_precommit_mock_call) <
mock_manager.mock_calls.index(create_mock_call))
def test_add_policy_with_extra_tenant_keyword(self, *mocks):
policy_id = uuidutils.generate_uuid()
project_id = uuidutils.generate_uuid()
tenant_policy = {
'policy': {'id': policy_id,
'project_id': project_id,
'tenant_id': project_id,
'name': 'test-policy',
'description': 'Test policy description',
'shared': True,
'is_default': False}}
policy_details = {'id': policy_id,
'project_id': project_id,
'name': 'test-policy',
'description': 'Test policy description',
'shared': True,
'is_default': False}
with mock.patch('neutron.objects.qos.policy.QosPolicy') as QosMocked:
self.qos_plugin.create_policy(self.ctxt, tenant_policy)
QosMocked.assert_called_once_with(self.ctxt, **policy_details)
@mock.patch.object(policy_object.QosPolicy, "get_object")
@mock.patch(
'neutron.objects.rbac_db.RbacNeutronDbObjectMixin'
'.create_rbac_policy')
@mock.patch.object(policy_object.QosPolicy, 'update')
def test_update_policy(self, mock_qos_policy_update,
mock_create_rbac_policy, mock_qos_policy_get):
mock_qos_policy_get.return_value = self.policy
mock_manager = mock.Mock()
mock_manager.attach_mock(mock_qos_policy_update, 'update')
mock_manager.attach_mock(self.qos_plugin.driver_manager, 'driver')
mock_manager.reset_mock()
fields = obj_utils.get_updatable_fields(
policy_object.QosPolicy, self.policy_data['policy'])
self.qos_plugin.update_policy(
self.ctxt, self.policy.id, {'policy': fields})
self._validate_driver_params('update_policy', self.ctxt)
policy_update_mock_call = mock.call.update()
update_precommit_mock_call = mock.call.driver.call(
'update_policy_precommit', self.ctxt, mock.ANY)
update_mock_call = mock.call.driver.call(
'update_policy', self.ctxt, mock.ANY)
self.assertTrue(
mock_manager.mock_calls.index(policy_update_mock_call) <
mock_manager.mock_calls.index(update_precommit_mock_call) <
mock_manager.mock_calls.index(update_mock_call))
@mock.patch('neutron.objects.db.api.get_object', return_value=None)
@mock.patch.object(policy_object.QosPolicy, 'delete')
def test_delete_policy(self, mock_qos_policy_delete, mock_api_get_policy):
mock_manager = mock.Mock()
mock_manager.attach_mock(mock_qos_policy_delete, 'delete')
mock_manager.attach_mock(self.qos_plugin.driver_manager, 'driver')
mock_manager.reset_mock()
self.qos_plugin.delete_policy(self.ctxt, self.policy.id)
self._validate_driver_params('delete_policy', self.ctxt)
policy_delete_mock_call = mock.call.delete()
delete_precommit_mock_call = mock.call.driver.call(
'delete_policy_precommit', self.ctxt, mock.ANY)
delete_mock_call = mock.call.driver.call(
'delete_policy', self.ctxt, mock.ANY)
self.assertTrue(
mock_manager.mock_calls.index(policy_delete_mock_call) <
mock_manager.mock_calls.index(delete_precommit_mock_call) <
mock_manager.mock_calls.index(delete_mock_call))
@mock.patch.object(policy_object.QosPolicy, "get_object")
@mock.patch.object(rule_object.QosBandwidthLimitRule, 'create')
def test_create_policy_rule(self, mock_qos_rule_create,
mock_qos_policy_get):
_policy = copy.copy(self.policy)
setattr(_policy, "rules", [])
mock_qos_policy_get.return_value = _policy
mock_manager = mock.Mock()
mock_manager.attach_mock(mock_qos_rule_create, 'create')
mock_manager.attach_mock(self.qos_plugin.driver_manager, 'driver')
mock_manager.reset_mock()
with mock.patch('neutron.objects.qos.qos_policy_validator'
'.check_bandwidth_rule_conflict',
return_value=None), \
mock.patch(
'neutron.objects.qos.qos_policy_validator'
'.check_min_pps_rule_conflict', return_value=None):
self.qos_plugin.create_policy_bandwidth_limit_rule(
self.ctxt, self.policy.id, self.rule_data)
self._validate_driver_params('update_policy', self.ctxt)
rule_create_mock_call = mock.call.create()
update_precommit_mock_call = mock.call.driver.call(
'update_policy_precommit', self.ctxt, mock.ANY)
update_mock_call = mock.call.driver.call(
'update_policy', self.ctxt, mock.ANY)
self.assertTrue(
mock_manager.mock_calls.index(rule_create_mock_call) <
mock_manager.mock_calls.index(update_precommit_mock_call) <
mock_manager.mock_calls.index(update_mock_call))
def test_create_policy_rule_check_rule_min_less_than_max(self):
_policy = self._get_policy()
setattr(_policy, "rules", [self.rule])
with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object',
return_value=_policy) as mock_qos_get_obj:
self.qos_plugin.create_policy_minimum_bandwidth_rule(
self.ctxt, _policy.id, self.rule_data)
self._validate_driver_params('update_policy', self.ctxt)
self.mock_qos_load_attr.assert_called_once_with('rules')
mock_qos_get_obj.assert_called_once_with(self.ctxt, id=_policy.id)
def test_create_policy_rule_check_rule_max_more_than_min(self):
_policy = self._get_policy()
setattr(_policy, "rules", [self.min_bw_rule])
with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object',
return_value=_policy) as mock_qos_get_obj:
self.qos_plugin.create_policy_bandwidth_limit_rule(
self.ctxt, _policy.id, self.rule_data)
self._validate_driver_params('update_policy', self.ctxt)
self.mock_qos_load_attr.assert_called_once_with('rules')
mock_qos_get_obj.assert_called_once_with(self.ctxt, id=_policy.id)
def test_create_policy_rule_check_rule_bwlimit_less_than_minbw(self):
_policy = self._get_policy()
self.rule_data['bandwidth_limit_rule']['max_kbps'] = 1
setattr(_policy, "rules", [self.min_bw_rule])
with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object',
return_value=_policy) as mock_qos_get_obj:
self.assertRaises(qos_exc.QoSRuleParameterConflict,
self.qos_plugin.create_policy_bandwidth_limit_rule,
self.ctxt, self.policy.id, self.rule_data)
mock_qos_get_obj.assert_called_once_with(self.ctxt, id=_policy.id)
def test_create_policy_rule_check_rule_minbw_gr_than_bwlimit(self):
_policy = self._get_policy()
self.rule_data['minimum_bandwidth_rule']['min_kbps'] = 1000000
setattr(_policy, "rules", [self.rule])
with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object',
return_value=_policy) as mock_qos_get_obj:
self.assertRaises(qos_exc.QoSRuleParameterConflict,
self.qos_plugin.create_policy_minimum_bandwidth_rule,
self.ctxt, self.policy.id, self.rule_data)
mock_qos_get_obj.assert_called_once_with(self.ctxt, id=_policy.id)
def test_create_policy_rule_duplicates(self):
_policy = self._get_policy()
setattr(_policy, "rules", [self.rule])
new_rule_data = {
'bandwidth_limit_rule': {
'max_kbps': 5000,
'direction': self.rule.direction
}
}
with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object',
return_value=_policy) as mock_qos_get_obj:
self.assertRaises(
qos_exc.QoSRulesConflict,
self.qos_plugin.create_policy_bandwidth_limit_rule,
self.ctxt, _policy.id, new_rule_data)
mock_qos_get_obj.assert_called_once_with(self.ctxt, id=_policy.id)
@mock.patch.object(rule_object.QosBandwidthLimitRule, 'update')
def test_update_policy_rule(self, mock_qos_rule_update):
mock_manager = mock.Mock()
mock_manager.attach_mock(mock_qos_rule_update, 'update')
mock_manager.attach_mock(self.qos_plugin.driver_manager, 'driver')
mock_manager.reset_mock()
_policy = policy_object.QosPolicy(
self.ctxt, **self.policy_data['policy'])
setattr(_policy, "rules", [self.rule])
with mock.patch('neutron.objects.qos.rule.get_rules',
return_value=[self.rule]), mock.patch(
'neutron.objects.qos.policy.QosPolicy.get_object',
return_value=_policy):
self.rule_data['bandwidth_limit_rule']['max_kbps'] = 1
self.qos_plugin.update_policy_bandwidth_limit_rule(
self.ctxt, self.rule.id, self.policy.id, self.rule_data)
self._validate_driver_params('update_policy', self.ctxt)
rule_update_mock_call = mock.call.update()
update_precommit_mock_call = mock.call.driver.call(
'update_policy_precommit', self.ctxt, mock.ANY)
update_mock_call = mock.call.driver.call(
'update_policy', self.ctxt, mock.ANY)
self.assertTrue(
mock_manager.mock_calls.index(rule_update_mock_call) <
mock_manager.mock_calls.index(update_precommit_mock_call) <
mock_manager.mock_calls.index(update_mock_call))
def test_update_policy_rule_check_rule_min_less_than_max(self):
_policy = self._get_policy()
setattr(_policy, "rules", [self.rule])
with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object',
return_value=_policy):
self.qos_plugin.update_policy_bandwidth_limit_rule(
self.ctxt, self.rule.id, self.policy.id, self.rule_data)
self.mock_qos_load_attr.assert_called_once_with('rules')
self._validate_driver_params('update_policy', self.ctxt)
rules = [self.rule, self.min_bw_rule]
setattr(_policy, "rules", rules)
self.mock_qos_load_attr.reset_mock()
with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object',
return_value=_policy):
self.qos_plugin.update_policy_minimum_bandwidth_rule(
self.ctxt, self.min_bw_rule.id,
self.policy.id, self.rule_data)
self.mock_qos_load_attr.assert_called_once_with('rules')
self._validate_driver_params('update_policy', self.ctxt)
def test_update_policy_rule_check_rule_bwlimit_less_than_minbw(self):
_policy = self._get_policy()
setattr(_policy, "rules", [self.rule])
with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object',
return_value=_policy):
self.qos_plugin.update_policy_bandwidth_limit_rule(
self.ctxt, self.rule.id, self.policy.id, self.rule_data)
self.mock_qos_load_attr.assert_called_once_with('rules')
self._validate_driver_params('update_policy', self.ctxt)
self.rule_data['minimum_bandwidth_rule']['min_kbps'] = 1000
with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object',
return_value=_policy):
self.assertRaises(
qos_exc.QoSRuleParameterConflict,
self.qos_plugin.update_policy_minimum_bandwidth_rule,
self.ctxt, self.min_bw_rule.id,
self.policy.id, self.rule_data)
def test_update_policy_rule_check_rule_minbw_gr_than_bwlimit(self):
_policy = self._get_policy()
setattr(_policy, "rules", [self.min_bw_rule])
with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object',
return_value=_policy):
self.qos_plugin.update_policy_minimum_bandwidth_rule(
self.ctxt, self.min_bw_rule.id, self.policy.id,
self.rule_data)
self.mock_qos_load_attr.assert_called_once_with('rules')
self._validate_driver_params('update_policy', self.ctxt)
self.rule_data['bandwidth_limit_rule']['max_kbps'] = 1
with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object',
return_value=_policy):
self.assertRaises(
qos_exc.QoSRuleParameterConflict,
self.qos_plugin.update_policy_bandwidth_limit_rule,
self.ctxt, self.rule.id,
self.policy.id, self.rule_data)
def _get_policy(self):
return policy_object.QosPolicy(
self.ctxt, **self.policy_data['policy'])
def test_update_policy_rule_bad_policy(self):
_policy = policy_object.QosPolicy(
self.ctxt, **self.policy_data['policy'])
with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object',
return_value=_policy):
setattr(_policy, "rules", [])
self.assertRaises(
qos_exc.QosRuleNotFound,
self.qos_plugin.update_policy_bandwidth_limit_rule,
self.ctxt, self.rule.id, self.policy.id,
self.rule_data)
@mock.patch.object(rule_object.QosBandwidthLimitRule, 'delete')
def test_delete_policy_rule(self, mock_qos_rule_delete):
mock_manager = mock.Mock()
mock_manager.attach_mock(mock_qos_rule_delete, 'delete')
mock_manager.attach_mock(self.qos_plugin.driver_manager, 'driver')
mock_manager.reset_mock()
_policy = policy_object.QosPolicy(
self.ctxt, **self.policy_data['policy'])
with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object',
return_value=_policy):
setattr(_policy, "rules", [self.rule])
self.qos_plugin.delete_policy_bandwidth_limit_rule(
self.ctxt, self.rule.id, _policy.id)
self._validate_driver_params('update_policy', self.ctxt)
rule_delete_mock_call = mock.call.delete()
update_precommit_mock_call = mock.call.driver.call(
'update_policy_precommit', self.ctxt, mock.ANY)
update_mock_call = mock.call.driver.call(
'update_policy', self.ctxt, mock.ANY)
self.assertTrue(
mock_manager.mock_calls.index(rule_delete_mock_call) <
mock_manager.mock_calls.index(update_precommit_mock_call) <
mock_manager.mock_calls.index(update_mock_call))
def test_delete_policy_rule_bad_policy(self):
_policy = policy_object.QosPolicy(
self.ctxt, **self.policy_data['policy'])
with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object',
return_value=_policy):
setattr(_policy, "rules", [])
self.assertRaises(
qos_exc.QosRuleNotFound,
self.qos_plugin.delete_policy_bandwidth_limit_rule,
self.ctxt, self.rule.id, _policy.id)
def test_get_policy_bandwidth_limit_rule(self):
with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object',
return_value=self.policy):
with mock.patch('neutron.objects.qos.rule.'
'QosBandwidthLimitRule.'
'get_object') as get_object_mock:
self.qos_plugin.get_policy_bandwidth_limit_rule(
self.ctxt, self.rule.id, self.policy.id)
get_object_mock.assert_called_once_with(self.ctxt,
id=self.rule.id)
def test_get_policy_bandwidth_limit_rules_for_policy(self):
with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object',
return_value=self.policy):
with mock.patch('neutron.objects.qos.rule.'
'QosBandwidthLimitRule.'
'get_objects') as get_objects_mock:
self.qos_plugin.get_policy_bandwidth_limit_rules(
self.ctxt, self.policy.id)
get_objects_mock.assert_called_once_with(
self.ctxt, _pager=mock.ANY, qos_policy_id=self.policy.id)
def test_get_policy_bandwidth_limit_rules_for_policy_with_filters(self):
with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object',
return_value=self.policy):
with mock.patch('neutron.objects.qos.rule.'
'QosBandwidthLimitRule.'
'get_objects') as get_objects_mock:
filters = {'filter': 'filter_id'}
self.qos_plugin.get_policy_bandwidth_limit_rules(
self.ctxt, self.policy.id, filters=filters)
get_objects_mock.assert_called_once_with(
self.ctxt, _pager=mock.ANY,
qos_policy_id=self.policy.id,
filter='filter_id')
def test_get_policy_for_nonexistent_policy(self):
with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object',
return_value=None):
self.assertRaises(
qos_exc.QosPolicyNotFound,
self.qos_plugin.get_policy,
self.ctxt, self.policy.id)
def test_get_policy_bandwidth_limit_rule_for_nonexistent_policy(self):
with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object',
return_value=None):
self.assertRaises(
qos_exc.QosPolicyNotFound,
self.qos_plugin.get_policy_bandwidth_limit_rule,
self.ctxt, self.rule.id, self.policy.id)
def test_get_policy_bandwidth_limit_rules_for_nonexistent_policy(self):
with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object',
return_value=None):
self.assertRaises(
qos_exc.QosPolicyNotFound,
self.qos_plugin.get_policy_bandwidth_limit_rules,
self.ctxt, self.policy.id)
def test_create_policy_dscp_marking_rule(self):
_policy = policy_object.QosPolicy(
self.ctxt, **self.policy_data['policy'])
with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object',
return_value=_policy):
setattr(_policy, "rules", [self.dscp_rule])
self.qos_plugin.create_policy_dscp_marking_rule(
self.ctxt, self.policy.id, self.rule_data)
self._validate_driver_params('update_policy', self.ctxt)
def test_update_policy_dscp_marking_rule(self):
_policy = policy_object.QosPolicy(
self.ctxt, **self.policy_data['policy'])
with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object',
return_value=_policy):
setattr(_policy, "rules", [self.dscp_rule])
self.qos_plugin.update_policy_dscp_marking_rule(
self.ctxt, self.dscp_rule.id, self.policy.id, self.rule_data)
self._validate_driver_params('update_policy', self.ctxt)
def test_delete_policy_dscp_marking_rule(self):
_policy = policy_object.QosPolicy(
self.ctxt, **self.policy_data['policy'])
with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object',
return_value=_policy):
setattr(_policy, "rules", [self.dscp_rule])
self.qos_plugin.delete_policy_dscp_marking_rule(
self.ctxt, self.dscp_rule.id, self.policy.id)
self._validate_driver_params('update_policy', self.ctxt)
def test_get_policy_dscp_marking_rules(self):
with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object',
return_value=self.policy):
with mock.patch('neutron.objects.qos.rule.'
'QosDscpMarkingRule.'
'get_objects') as get_objects_mock:
self.qos_plugin.get_policy_dscp_marking_rules(
self.ctxt, self.policy.id)
get_objects_mock.assert_called_once_with(
self.ctxt, _pager=mock.ANY, qos_policy_id=self.policy.id)
def test_get_policy_dscp_marking_rules_for_policy_with_filters(self):
with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object',
return_value=self.policy):
with mock.patch('neutron.objects.qos.rule.'
'QosDscpMarkingRule.'
'get_objects') as get_objects_mock:
filters = {'filter': 'filter_id'}
self.qos_plugin.get_policy_dscp_marking_rules(
self.ctxt, self.policy.id, filters=filters)
get_objects_mock.assert_called_once_with(
self.ctxt, qos_policy_id=self.policy.id,
_pager=mock.ANY, filter='filter_id')
def test_get_policy_dscp_marking_rule_for_nonexistent_policy(self):
with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object',
return_value=None):
self.assertRaises(
qos_exc.QosPolicyNotFound,
self.qos_plugin.get_policy_dscp_marking_rule,
self.ctxt, self.dscp_rule.id, self.policy.id)
def test_get_policy_dscp_marking_rules_for_nonexistent_policy(self):
with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object',
return_value=None):
self.assertRaises(
qos_exc.QosPolicyNotFound,
self.qos_plugin.get_policy_dscp_marking_rules,
self.ctxt, self.policy.id)
def test_get_policy_minimum_bandwidth_rule(self):
with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object',
return_value=self.policy):
with mock.patch('neutron.objects.qos.rule.'
'QosMinimumBandwidthRule.'
'get_object') as get_object_mock:
self.qos_plugin.get_policy_minimum_bandwidth_rule(
self.ctxt, self.rule.id, self.policy.id)
get_object_mock.assert_called_once_with(self.ctxt,
id=self.rule.id)
def test_get_policy_minimum_bandwidth_rules_for_policy(self):
with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object',
return_value=self.policy):
with mock.patch('neutron.objects.qos.rule.'
'QosMinimumBandwidthRule.'
'get_objects') as get_objects_mock:
self.qos_plugin.get_policy_minimum_bandwidth_rules(
self.ctxt, self.policy.id)
get_objects_mock.assert_called_once_with(
self.ctxt, _pager=mock.ANY, qos_policy_id=self.policy.id)
def test_get_policy_minimum_bandwidth_rules_for_policy_with_filters(self):
with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object',
return_value=self.policy):
with mock.patch('neutron.objects.qos.rule.'
'QosMinimumBandwidthRule.'
'get_objects') as get_objects_mock:
filters = {'filter': 'filter_id'}
self.qos_plugin.get_policy_minimum_bandwidth_rules(
self.ctxt, self.policy.id, filters=filters)
get_objects_mock.assert_called_once_with(
self.ctxt, _pager=mock.ANY,
qos_policy_id=self.policy.id,
filter='filter_id')
def test_get_policy_minimum_bandwidth_rule_for_nonexistent_policy(self):
with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object',
return_value=None):
self.assertRaises(
qos_exc.QosPolicyNotFound,
self.qos_plugin.get_policy_minimum_bandwidth_rule,
self.ctxt, self.rule.id, self.policy.id)
def test_get_policy_minimum_bandwidth_rules_for_nonexistent_policy(self):
with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object',
return_value=None):
self.assertRaises(
qos_exc.QosPolicyNotFound,
self.qos_plugin.get_policy_minimum_bandwidth_rules,
self.ctxt, self.policy.id)
def test_create_policy_rule_for_nonexistent_policy(self):
with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object',
return_value=None):
self.assertRaises(
qos_exc.QosPolicyNotFound,
self.qos_plugin.create_policy_bandwidth_limit_rule,
self.ctxt, self.policy.id, self.rule_data)
def test_update_policy_rule_for_nonexistent_policy(self):
with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object',
return_value=None):
self.assertRaises(
qos_exc.QosPolicyNotFound,
self.qos_plugin.update_policy_bandwidth_limit_rule,
self.ctxt, self.rule.id, self.policy.id, self.rule_data)
def test_delete_policy_rule_for_nonexistent_policy(self):
with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object',
return_value=None):
self.assertRaises(
qos_exc.QosPolicyNotFound,
self.qos_plugin.delete_policy_bandwidth_limit_rule,
self.ctxt, self.rule.id, self.policy.id)
def test_verify_bad_method_call(self):
self.assertRaises(AttributeError, getattr, self.qos_plugin,
'create_policy_bandwidth_limit_rules')
def test_get_rule_type(self):
admin_ctxt = context.get_admin_context()
drivers_details = [{
'name': 'fake-driver',
'supported_parameters': [{
'parameter_name': 'max_kbps',
'parameter_type': lib_constants.VALUES_TYPE_RANGE,
'parameter_range': {'start': 0, 'end': 100}
}]
}]
with mock.patch.object(
qos_plugin.QoSPlugin, "supported_rule_type_details",
return_value=drivers_details
):
rule_type_details = self.qos_plugin.get_rule_type(
admin_ctxt, qos_consts.RULE_TYPE_BANDWIDTH_LIMIT)
self.assertEqual(
qos_consts.RULE_TYPE_BANDWIDTH_LIMIT,
rule_type_details['type'])
self.assertEqual(
drivers_details, rule_type_details['drivers'])
def test_get_rule_type_as_user(self):
self.assertRaises(
lib_exc.NotAuthorized,
self.qos_plugin.get_rule_type,
self.ctxt, qos_consts.RULE_TYPE_BANDWIDTH_LIMIT)
def test_get_rule_types(self):
filters = {'type': 'type_id'}
with mock.patch.object(qos_plugin.QoSPlugin, 'supported_rule_types',
return_value=qos_consts.VALID_RULE_TYPES):
types = self.qos_plugin.get_rule_types(self.ctxt, filters=filters)
self.assertEqual(sorted(qos_consts.VALID_RULE_TYPES),
sorted(type_['type'] for type_ in types))
@mock.patch('neutron.objects.ports.Port')
@mock.patch('neutron.objects.qos.policy.QosPolicy')
def test_rule_notification_and_driver_ordering(self, qos_policy_mock,
port_mock):
rule_cls_mock = mock.Mock()
rule_cls_mock.rule_type = 'fake'
rule_actions = {'create': [self.ctxt, rule_cls_mock,
self.policy.id, {'fake_rule': {}}],
'update': [self.ctxt, rule_cls_mock,
self.rule.id,
self.policy.id, {'fake_rule': {}}],
'delete': [self.ctxt, rule_cls_mock,
self.rule.id, self.policy.id]}
mock_manager = mock.Mock()
mock_manager.attach_mock(qos_policy_mock, 'QosPolicy')
mock_manager.attach_mock(port_mock, 'Port')
mock_manager.attach_mock(rule_cls_mock, 'RuleCls')
mock_manager.attach_mock(self.qos_plugin.driver_manager, 'driver')
for action, arguments in rule_actions.items():
mock_manager.reset_mock()
method = getattr(self.qos_plugin, "%s_policy_rule" % action)
method(*arguments)
# some actions get rule from policy
get_rule_mock_call = getattr(
mock.call.QosPolicy.get_policy_obj().get_rule_by_id(),
action)()
# some actions construct rule from class reference
rule_mock_call = getattr(mock.call.RuleCls(), action)()
driver_mock_call = mock.call.driver.call('update_policy',
self.ctxt, mock.ANY)
if rule_mock_call in mock_manager.mock_calls:
action_index = mock_manager.mock_calls.index(rule_mock_call)
else:
action_index = mock_manager.mock_calls.index(
get_rule_mock_call)
self.assertLess(
action_index, mock_manager.mock_calls.index(driver_mock_call))
def test_create_policy_packet_rate_limit_rule(self):
_policy = policy_object.QosPolicy(
self.ctxt, **self.policy_data['policy'])
with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object',
return_value=_policy):
setattr(_policy, "rules", [self.pps_rule])
self.qos_plugin.create_policy_packet_rate_limit_rule(
self.ctxt, self.policy.id, self.rule_data)
self._validate_driver_params('update_policy', self.ctxt)
def test_create_policy_pps_rule_duplicates(self):
_policy = self._get_policy()
setattr(_policy, "rules", [self.pps_rule])
new_rule_data = {
'packet_rate_limit_rule': {
'max_kpps': 400,
'direction': self.pps_rule.direction
}
}
with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object',
return_value=_policy) as mock_qos_get_obj:
self.assertRaises(
qos_exc.QoSRulesConflict,
self.qos_plugin.create_policy_packet_rate_limit_rule,
self.ctxt, _policy.id, new_rule_data)
mock_qos_get_obj.assert_called_once_with(self.ctxt, id=_policy.id)
def test_update_policy_packet_rate_limit_rule(self):
_policy = policy_object.QosPolicy(
self.ctxt, **self.policy_data['policy'])
with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object',
return_value=_policy):
setattr(_policy, "rules", [self.pps_rule])
self.qos_plugin.update_policy_packet_rate_limit_rule(
self.ctxt, self.pps_rule.id, self.policy.id, self.rule_data)
self._validate_driver_params('update_policy', self.ctxt)
def test_update_policy_pps_rule_bad_policy(self):
_policy = policy_object.QosPolicy(
self.ctxt, **self.policy_data['policy'])
with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object',
return_value=_policy):
setattr(_policy, "rules", [])
self.assertRaises(
qos_exc.QosRuleNotFound,
self.qos_plugin.update_policy_packet_rate_limit_rule,
self.ctxt, self.pps_rule.id, self.policy.id,
self.rule_data)
def test_delete_policy_packet_rate_limit_rule(self):
_policy = policy_object.QosPolicy(
self.ctxt, **self.policy_data['policy'])
with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object',
return_value=_policy):
setattr(_policy, "rules", [self.pps_rule])
self.qos_plugin.delete_policy_packet_rate_limit_rule(
self.ctxt, self.pps_rule.id, self.policy.id)
self._validate_driver_params('update_policy', self.ctxt)
def test_delete_policy_pps_rule_bad_policy(self):
_policy = policy_object.QosPolicy(
self.ctxt, **self.policy_data['policy'])
with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object',
return_value=_policy):
setattr(_policy, "rules", [])
self.assertRaises(
qos_exc.QosRuleNotFound,
self.qos_plugin.delete_policy_packet_rate_limit_rule,
self.ctxt, self.pps_rule.id, _policy.id)
def test_get_policy_packet_rate_limit_rule(self):
with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object',
return_value=self.policy):
with mock.patch('neutron.objects.qos.rule.'
'QosPacketRateLimitRule.'
'get_object') as get_object_mock:
self.qos_plugin.get_policy_packet_rate_limit_rule(
self.ctxt, self.pps_rule.id, self.policy.id)
get_object_mock.assert_called_once_with(self.ctxt,
id=self.pps_rule.id)
def test_get_policy_packet_rate_limit_rules_for_policy(self):
with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object',
return_value=self.policy):
with mock.patch('neutron.objects.qos.rule.'
'QosPacketRateLimitRule.'
'get_objects') as get_objects_mock:
self.qos_plugin.get_policy_packet_rate_limit_rules(
self.ctxt, self.policy.id)
get_objects_mock.assert_called_once_with(
self.ctxt, _pager=mock.ANY, qos_policy_id=self.policy.id)
def test_get_policy_packet_rate_limit_rules_for_policy_with_filters(self):
with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object',
return_value=self.policy):
with mock.patch('neutron.objects.qos.rule.'
'QosPacketRateLimitRule.'
'get_objects') as get_objects_mock:
filters = {'filter': 'filter_id'}
self.qos_plugin.get_policy_packet_rate_limit_rules(
self.ctxt, self.policy.id, filters=filters)
get_objects_mock.assert_called_once_with(
self.ctxt, _pager=mock.ANY,
qos_policy_id=self.policy.id,
filter='filter_id')
def test_get_policy_packet_rate_limit_rule_for_nonexistent_policy(self):
with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object',
return_value=None):
self.assertRaises(
qos_exc.QosPolicyNotFound,
self.qos_plugin.get_policy_packet_rate_limit_rule,
self.ctxt, self.pps_rule.id, self.policy.id)
def test_get_policy_packet_rate_limit_rules_for_nonexistent_policy(self):
with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object',
return_value=None):
self.assertRaises(
qos_exc.QosPolicyNotFound,
self.qos_plugin.get_policy_packet_rate_limit_rules,
self.ctxt, self.policy.id)
def test_create_policy_pps_rule_for_nonexistent_policy(self):
with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object',
return_value=None):
self.assertRaises(
qos_exc.QosPolicyNotFound,
self.qos_plugin.create_policy_packet_rate_limit_rule,
self.ctxt, self.policy.id, self.rule_data)
def test_update_policy_pps_rule_for_nonexistent_policy(self):
with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object',
return_value=None):
self.assertRaises(
qos_exc.QosPolicyNotFound,
self.qos_plugin.update_policy_packet_rate_limit_rule,
self.ctxt, self.pps_rule.id, self.policy.id, self.rule_data)
def test_delete_policy_pps_rule_for_nonexistent_policy(self):
with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object',
return_value=None):
self.assertRaises(
qos_exc.QosPolicyNotFound,
self.qos_plugin.delete_policy_packet_rate_limit_rule,
self.ctxt, self.pps_rule.id, self.policy.id)
def test_get_pps_rule_type(self):
admin_ctxt = context.get_admin_context()
drivers_details = [{
'name': 'fake-driver',
'supported_parameters': [{
'parameter_name': 'max_kpps',
'parameter_type': lib_constants.VALUES_TYPE_RANGE,
'parameter_range': {'start': 0, 'end': 100}
}]
}]
with mock.patch.object(
qos_plugin.QoSPlugin, "supported_rule_type_details",
return_value=drivers_details
):
rule_type_details = self.qos_plugin.get_rule_type(
admin_ctxt, qos_consts.RULE_TYPE_PACKET_RATE_LIMIT)
self.assertEqual(
qos_consts.RULE_TYPE_PACKET_RATE_LIMIT,
rule_type_details['type'])
self.assertEqual(
drivers_details, rule_type_details['drivers'])
def test_get_pps_rule_type_as_user(self):
self.assertRaises(
lib_exc.NotAuthorized,
self.qos_plugin.get_rule_type,
self.ctxt, qos_consts.RULE_TYPE_PACKET_RATE_LIMIT)
def test_create_min_pps_rule_on_bound_port(self):
_policy = self._get_policy()
setattr(_policy, "rules", [self.min_pps_rule])
segment = network_object.NetworkSegment(
physical_network='fake physnet')
net = network_object.Network(
self.ctxt,
segments=[segment])
port = ports_object.Port(
self.ctxt,
id=uuidutils.generate_uuid(),
network_id=uuidutils.generate_uuid(),
device_owner='compute:fake-zone')
with mock.patch(
'neutron.objects.qos.policy.QosPolicy.get_object',
return_value=_policy), \
mock.patch(
'neutron.objects.network.Network.get_object',
return_value=net), \
mock.patch.object(
self.qos_plugin,
'_get_ports_with_policy',
return_value=[port]):
self.assertRaises(
NotImplementedError,
self.qos_plugin.create_policy_minimum_packet_rate_rule,
self.ctxt, _policy.id, self.rule_data)
def test_create_min_pps_rule_on_unbound_port(self):
_policy = self._get_policy()
setattr(_policy, "rules", [self.min_pps_rule])
segment = network_object.NetworkSegment(
physical_network='fake physnet')
net = network_object.Network(
self.ctxt,
segments=[segment])
port = ports_object.Port(
self.ctxt,
id=uuidutils.generate_uuid(),
network_id=uuidutils.generate_uuid(),
device_owner='')
with mock.patch(
'neutron.objects.qos.policy.QosPolicy.get_object',
return_value=_policy), \
mock.patch(
'neutron.objects.network.Network.get_object',
return_value=net), \
mock.patch.object(
self.qos_plugin,
'_get_ports_with_policy',
return_value=[port]):
try:
self.qos_plugin.create_policy_minimum_packet_rate_rule(
self.ctxt, _policy.id, self.rule_data)
except NotImplementedError:
self.fail()
def test_create_policy_rule_check_rule_min_pps_direction_conflict(self):
_policy = self._get_policy()
self.rule_data['minimum_packet_rate_rule']['direction'] = 'any'
setattr(_policy, "rules", [self.min_pps_rule])
rules = [
{
'minimum_packet_rate_rule': {
'id': uuidutils.generate_uuid(),
'min_kpps': 10,
'direction': 'ingress'
}
},
{
'minimum_packet_rate_rule': {
'id': uuidutils.generate_uuid(),
'min_kpps': 10,
'direction': 'egress'
}
},
]
for new_rule_data in rules:
with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object',
return_value=_policy) as mock_qos_get_obj:
self.assertRaises(qos_exc.QoSRuleParameterConflict,
self.qos_plugin.create_policy_minimum_packet_rate_rule,
self.ctxt, self.policy.id, new_rule_data)
mock_qos_get_obj.assert_called_once_with(self.ctxt,
id=_policy.id)
for rule_data in rules:
min_pps_rule = rule_object.QosMinimumPacketRateRule(
self.ctxt, **rule_data['minimum_packet_rate_rule'])
setattr(_policy, "rules", [min_pps_rule])
with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object',
return_value=_policy) as mock_qos_get_obj:
self.assertRaises(qos_exc.QoSRuleParameterConflict,
self.qos_plugin.create_policy_minimum_packet_rate_rule,
self.ctxt, self.policy.id, self.rule_data)
mock_qos_get_obj.assert_called_once_with(self.ctxt,
id=_policy.id)
def test_create_policy_min_pps_rule(self):
_policy = self._get_policy()
setattr(_policy, "rules", [self.min_pps_rule])
with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object',
return_value=_policy):
self.qos_plugin.create_policy_minimum_packet_rate_rule(
self.ctxt, self.policy.id, self.rule_data)
self._validate_driver_params('update_policy', self.ctxt)
def test_create_policy_min_pps_rule_duplicates(self):
_policy = self._get_policy()
setattr(_policy, "rules", [self.min_pps_rule])
new_rule_data = {
'minimum_packet_rate_rule': {
'id': uuidutils.generate_uuid(),
'min_kpps': 1234,
'direction': self.min_pps_rule.direction,
},
}
with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object',
return_value=_policy) as mock_qos_get_obj:
self.assertRaises(
qos_exc.QoSRulesConflict,
self.qos_plugin.create_policy_minimum_packet_rate_rule,
self.ctxt, _policy.id, new_rule_data)
mock_qos_get_obj.assert_called_once_with(self.ctxt, id=_policy.id)
def test_create_policy_min_pps_rule_for_nonexistent_policy(self):
with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object',
return_value=None):
self.assertRaises(
qos_exc.QosPolicyNotFound,
self.qos_plugin.create_policy_minimum_packet_rate_rule,
self.ctxt, self.policy.id, self.rule_data)
def test_update_policy_min_pps_rule(self):
_policy = self._get_policy()
setattr(_policy, "rules", [self.min_pps_rule])
with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object',
return_value=_policy):
self.qos_plugin.update_policy_minimum_packet_rate_rule(
self.ctxt, self.min_pps_rule.id, self.policy.id,
self.rule_data)
self._validate_driver_params('update_policy', self.ctxt)
def test_update_policy_rule_check_rule_min_pps_direction_conflict(self):
_policy = self._get_policy()
rules_data = [
{
'minimum_packet_rate_rule': {
'id': uuidutils.generate_uuid(),
'min_kpps': 10,
'direction': 'ingress'
}
},
{
'minimum_packet_rate_rule': {
'id': uuidutils.generate_uuid(),
'min_kpps': 10,
'direction': 'egress'
}
},
]
self.rule_data['minimum_packet_rate_rule']['direction'] = 'any'
for rule_data in rules_data:
rules = [
rule_object.QosMinimumPacketRateRule(
self.ctxt, **rules_data[0]['minimum_packet_rate_rule']),
rule_object.QosMinimumPacketRateRule(
self.ctxt, **rules_data[1]['minimum_packet_rate_rule']),
]
setattr(_policy, 'rules', rules)
with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object',
return_value=_policy) as mock_qos_get_obj:
self.assertRaises(qos_exc.QoSRuleParameterConflict,
self.qos_plugin.update_policy_minimum_packet_rate_rule,
self.ctxt, rule_data['minimum_packet_rate_rule']['id'],
self.policy.id, self.rule_data)
mock_qos_get_obj.assert_called_once_with(self.ctxt,
id=_policy.id)
def test_update_policy_min_pps_rule_bad_policy(self):
_policy = self._get_policy()
setattr(_policy, "rules", [])
with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object',
return_value=_policy):
self.assertRaises(
qos_exc.QosRuleNotFound,
self.qos_plugin.update_policy_minimum_packet_rate_rule,
self.ctxt, self.min_pps_rule.id, self.policy.id,
self.rule_data)
def test_update_policy_min_pps_rule_for_nonexistent_policy(self):
with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object',
return_value=None):
self.assertRaises(
qos_exc.QosPolicyNotFound,
self.qos_plugin.update_policy_minimum_packet_rate_rule,
self.ctxt, self.min_pps_rule.id, self.policy.id,
self.rule_data)
def test_delete_policy_min_pps_rule(self):
_policy = self._get_policy()
setattr(_policy, "rules", [self.min_pps_rule])
with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object',
return_value=_policy):
self.qos_plugin.delete_policy_minimum_packet_rate_rule(
self.ctxt, self.min_pps_rule.id, self.policy.id)
self._validate_driver_params('update_policy', self.ctxt)
def test_delete_policy_min_pps_rule_bad_policy(self):
_policy = self._get_policy()
setattr(_policy, "rules", [])
with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object',
return_value=_policy):
self.assertRaises(
qos_exc.QosRuleNotFound,
self.qos_plugin.delete_policy_minimum_packet_rate_rule,
self.ctxt, self.min_pps_rule.id, _policy.id)
def test_delete_policy_min_pps_rule_for_nonexistent_policy(self):
with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object',
return_value=None):
self.assertRaises(
qos_exc.QosPolicyNotFound,
self.qos_plugin.delete_policy_minimum_packet_rate_rule,
self.ctxt, self.min_pps_rule.id, self.policy.id)
def test_get_policy_min_pps_rule(self):
with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object',
return_value=self.policy):
with mock.patch('neutron.objects.qos.rule.'
'QosMinimumPacketRateRule.'
'get_object') as get_object_mock:
self.qos_plugin.get_policy_minimum_packet_rate_rule(
self.ctxt, self.min_pps_rule.id, self.policy.id)
get_object_mock.assert_called_once_with(
self.ctxt, id=self.min_pps_rule.id)
def test_get_policy_min_pps_rules_for_policy(self):
with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object',
return_value=self.policy):
with mock.patch('neutron.objects.qos.rule.'
'QosMinimumPacketRateRule.'
'get_objects') as get_objects_mock:
self.qos_plugin.get_policy_minimum_packet_rate_rules(
self.ctxt, self.policy.id)
get_objects_mock.assert_called_once_with(
self.ctxt, _pager=mock.ANY, qos_policy_id=self.policy.id)
def test_get_policy_min_pps_rules_for_policy_with_filters(self):
with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object',
return_value=self.policy):
with mock.patch('neutron.objects.qos.rule.'
'QosMinimumPacketRateRule.'
'get_objects') as get_objects_mock:
filters = {'filter': 'filter_id'}
self.qos_plugin.get_policy_minimum_packet_rate_rules(
self.ctxt, self.policy.id, filters=filters)
get_objects_mock.assert_called_once_with(
self.ctxt, _pager=mock.ANY,
qos_policy_id=self.policy.id,
filter='filter_id')
def test_get_policy_min_pps_rule_for_nonexistent_policy(self):
with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object',
return_value=None):
self.assertRaises(
qos_exc.QosPolicyNotFound,
self.qos_plugin.get_policy_minimum_packet_rate_rule,
self.ctxt, self.min_pps_rule.id, self.policy.id)
def test_get_policy_min_pps_rules_for_nonexistent_policy(self):
with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object',
return_value=None):
self.assertRaises(
qos_exc.QosPolicyNotFound,
self.qos_plugin.get_policy_minimum_packet_rate_rules,
self.ctxt, self.policy.id)
def test_get_min_pps_rule_type(self):
admin_ctxt = context.get_admin_context()
drivers_details = [{
'name': 'fake-driver',
'supported_parameters': [{
'parameter_name': 'min_kpps',
'parameter_type': lib_constants.VALUES_TYPE_RANGE,
'parameter_range': {'start': 0, 'end': 100}
}]
}]
with mock.patch.object(
qos_plugin.QoSPlugin, "supported_rule_type_details",
return_value=drivers_details
):
rule_type_details = self.qos_plugin.get_rule_type(
admin_ctxt, qos_consts.RULE_TYPE_MINIMUM_PACKET_RATE)
self.assertEqual(
qos_consts.RULE_TYPE_MINIMUM_PACKET_RATE,
rule_type_details['type'])
self.assertEqual(
drivers_details, rule_type_details['drivers'])
def test_get_min_pps_rule_type_as_user(self):
self.assertRaises(
lib_exc.NotAuthorized,
self.qos_plugin.get_rule_type,
self.ctxt, qos_consts.RULE_TYPE_MINIMUM_PACKET_RATE)
class QoSRuleAliasTestExtensionManager(object):
def get_resources(self):
return qos_rules_alias.Qos_rules_alias.get_resources()
def get_actions(self):
return []
def get_request_extensions(self):
return []
class QoSRuleAliasMinimumPacketRateTestExtensionManager(object):
def get_resources(self):
return qos_pps_minimum_rule_alias.Qos_pps_minimum_rule_alias.\
get_resources()
def get_actions(self):
return []
def get_request_extensions(self):
return []
class TestQoSRuleAlias(test_db_base_plugin_v2.NeutronDbPluginV2TestCase):
def setUp(self):
# Remove MissingAuthPlugin exception from logs
self.patch_notifier = mock.patch(
'neutron.notifiers.batch_notifier.BatchNotifier._notify')
self.patch_notifier.start()
plugin = 'ml2'
service_plugins = {'qos_plugin_name': SERVICE_PLUGIN_KLASS}
ext_mgr = QoSRuleAliasTestExtensionManager()
super(TestQoSRuleAlias, self).setUp(plugin=plugin, ext_mgr=ext_mgr,
service_plugins=service_plugins)
self.qos_plugin = directory.get_plugin(plugins_constants.QOS)
self.ctxt = context.Context('fake_user', 'fake_tenant')
self.rule_objects = {
'bandwidth_limit': rule_object.QosBandwidthLimitRule,
'dscp_marking': rule_object.QosDscpMarkingRule,
'minimum_bandwidth': rule_object.QosMinimumBandwidthRule
}
self.qos_policy_id = uuidutils.generate_uuid()
self.rule_data = {
'bandwidth_limit_rule': {'max_kbps': 100,
'max_burst_kbps': 150},
'dscp_marking_rule': {'dscp_mark': 16},
'minimum_bandwidth_rule': {'min_kbps': 10}
}
def _update_rule(self, rule_type, rule_id, **kwargs):
data = {'alias_%s_rule' % rule_type: kwargs}
resource = '%s/alias-%s-rules' % (qos.ALIAS,
rule_type.replace('_', '-'))
request = self.new_update_request(resource, data, rule_id, self.fmt)
res = request.get_response(self.ext_api)
if res.status_int >= webob.exc.HTTPClientError.code:
raise webob.exc.HTTPClientError(code=res.status_int)
return self.deserialize(self.fmt, res)
def _show_rule(self, rule_type, rule_id):
resource = '%s/alias-%s-rules' % (qos.ALIAS,
rule_type.replace('_', '-'))
request = self.new_show_request(resource, rule_id, self.fmt)
res = request.get_response(self.ext_api)
if res.status_int >= webob.exc.HTTPClientError.code:
raise webob.exc.HTTPClientError(code=res.status_int)
return self.deserialize(self.fmt, res)
def _delete_rule(self, rule_type, rule_id):
resource = '%s/alias-%s-rules' % (qos.ALIAS,
rule_type.replace('_', '-'))
request = self.new_delete_request(resource, rule_id, self.fmt)
res = request.get_response(self.ext_api)
if res.status_int >= webob.exc.HTTPClientError.code:
raise webob.exc.HTTPClientError(code=res.status_int)
@mock.patch.object(qos_plugin.QoSPlugin, "update_policy_rule")
def test_update_rule(self, update_policy_rule_mock):
calls = []
for rule_type, rule_object_class in self.rule_objects.items():
rule_id = uuidutils.generate_uuid()
rule_data_name = '%s_rule' % rule_type
data = self.rule_data[rule_data_name]
rule = rule_object_class(self.ctxt, id=rule_id,
qos_policy_id=self.qos_policy_id,
**data)
with mock.patch(
'neutron.objects.qos.rule.QosRule.get_object',
return_value=rule
), mock.patch.object(self.qos_plugin, 'get_policy_rule',
return_value=rule.to_dict()):
self._update_rule(rule_type, rule_id, **data)
calls.append(mock.call(mock.ANY, rule_object_class, rule_id,
self.qos_policy_id, {rule_data_name: data}))
update_policy_rule_mock.assert_has_calls(calls, any_order=True)
@mock.patch.object(qos_plugin.QoSPlugin, "get_policy_rule")
def test_show_rule(self, get_policy_rule_mock):
calls = []
for rule_type, rule_object_class in self.rule_objects.items():
rule_id = uuidutils.generate_uuid()
rule_data_name = '%s_rule' % rule_type
data = self.rule_data[rule_data_name]
rule = rule_object_class(self.ctxt, id=rule_id,
qos_policy_id=self.qos_policy_id,
**data)
with mock.patch('neutron.objects.qos.rule.QosRule.get_object',
return_value=rule):
self._show_rule(rule_type, rule_id)
calls.append(mock.call(mock.ANY, rule_object_class, rule_id,
self.qos_policy_id))
get_policy_rule_mock.assert_has_calls(calls, any_order=True)
@mock.patch.object(qos_plugin.QoSPlugin, "delete_policy_rule")
def test_delete_rule(self, delete_policy_rule_mock):
calls = []
for rule_type, rule_object_class in self.rule_objects.items():
rule_id = uuidutils.generate_uuid()
rule_data_name = '%s_rule' % rule_type
data = self.rule_data[rule_data_name]
rule = rule_object_class(self.ctxt, id=rule_id,
qos_policy_id=self.qos_policy_id,
**data)
with mock.patch(
'neutron.objects.qos.rule.QosRule.get_object',
return_value=rule
), mock.patch.object(self.qos_plugin, 'get_policy_rule',
return_value=rule.to_dict()):
self._delete_rule(rule_type, rule_id)
calls.append(mock.call(mock.ANY, rule_object_class, rule_id,
self.qos_policy_id))
delete_policy_rule_mock.assert_has_calls(calls, any_order=True)
def test_show_non_existing_rule(self):
for rule_type, rule_object_class in self.rule_objects.items():
rule_id = uuidutils.generate_uuid()
with mock.patch('neutron.objects.qos.rule.QosRule.get_object',
return_value=None):
resource = '%s/alias-%s-rules' % (qos.ALIAS,
rule_type.replace('_', '-'))
request = self.new_show_request(resource, rule_id, self.fmt)
res = request.get_response(self.ext_api)
self.assertEqual(webob.exc.HTTPNotFound.code, res.status_int)
class TestQoSRuleAliasMinimumPacketRate(TestQoSRuleAlias):
def setUp(self):
# Remove MissingAuthPlugin exception from logs
self.patch_notifier = mock.patch(
'neutron.notifiers.batch_notifier.BatchNotifier._notify')
self.patch_notifier.start()
plugin = 'ml2'
service_plugins = {'qos_plugin_name': SERVICE_PLUGIN_KLASS}
ext_mgr = QoSRuleAliasMinimumPacketRateTestExtensionManager()
super(TestQoSRuleAlias, self).setUp(plugin=plugin, ext_mgr=ext_mgr,
service_plugins=service_plugins)
self.qos_plugin = directory.get_plugin(plugins_constants.QOS)
self.ctxt = context.Context('fake_user', 'fake_tenant')
self.rule_objects = {
'minimum_packet_rate': rule_object.QosMinimumPacketRateRule
}
self.qos_policy_id = uuidutils.generate_uuid()
self.rule_data = {
'minimum_packet_rate_rule': {'min_kpps': 10, 'direction': 'any'}
}
class TestQosPluginDB(base.BaseQosTestCase):
PORT_ID = 'f02f160e-1612-11ec-b2b8-bf60ab98186c'
QOS_MIN_BW_RULE_ID = '8bf8eb46-160e-11ec-8024-9f96be32099d'
# uuid -v5 f02f160e-1612-11ec-b2b8-bf60ab98186c
# 8bf8eb46-160e-11ec-8024-9f96be32099d
MIN_BW_REQUEST_GROUP_UUID = 'c8bc1b27-59a1-5135-aa33-aeecad6093f4'
MIN_BW_RP = 'd7bea120-1626-11ec-9148-c32debfcf0f6'
QOS_MIN_PPS_RULE_ID = '6ac5db7e-1626-11ec-8c7f-0b70dbb8a8eb'
# uuid -v5 f02f160e-1612-11ec-b2b8-bf60ab98186c
# 6ac5db7e-1626-11ec-8c7f-0b70dbb8a8eb
MIN_PPS_REQUEST_GROUP_UUID = '995008f4-f120-547a-b051-428b89076067'
MIN_PPS_RP = 'e16161f4-1626-11ec-a5a2-1fc9396e27cc'
def setUp(self):
super(TestQosPluginDB, self).setUp()
self.setup_coreplugin(load_plugins=False)
cfg.CONF.set_override("core_plugin", DB_PLUGIN_KLASS)
cfg.CONF.set_override("service_plugins", ["qos"])
manager.init()
self.qos_plugin = directory.get_plugin(plugins_constants.QOS)
self.qos_plugin.driver_manager = mock.Mock()
self.rpc_push = mock.patch('neutron.api.rpc.handlers.resources_rpc'
'.ResourcesPushRpcApi.push').start()
self.context = context.get_admin_context()
self.project_id = uuidutils.generate_uuid()
def _make_qos_policy(self):
qos_policy = policy_object.QosPolicy(
self.context, project_id=self.project_id, shared=False,
is_default=False)
qos_policy.create()
return qos_policy
def _make_qos_minbw_rule(self, policy_id, direction='ingress',
min_kbps=1000, rule_id=None):
rule_id = rule_id if rule_id else uuidutils.generate_uuid()
qos_rule = rule_object.QosMinimumBandwidthRule(
self.context, project_id=self.project_id,
qos_policy_id=policy_id, direction=direction, min_kbps=min_kbps,
id=rule_id)
qos_rule.create()
return qos_rule
def _make_qos_minpps_rule(self, policy_id, direction='ingress',
min_kpps=1000, rule_id=None):
rule_id = rule_id if rule_id else uuidutils.generate_uuid()
qos_rule = rule_object.QosMinimumPacketRateRule(
self.context, project_id=self.project_id,
qos_policy_id=policy_id, direction=direction, min_kpps=min_kpps,
id=rule_id)
qos_rule.create()
return qos_rule
def _make_port(self, network_id, qos_policy_id=None, port_id=None,
qos_network_policy_id=None, device_owner=None):
port_id = port_id if port_id else uuidutils.generate_uuid()
base_mac = ['aa', 'bb', 'cc', 'dd', 'ee', 'ff']
mac = netaddr.EUI(next(net_utils.random_mac_generator(base_mac)))
device_owner = device_owner if device_owner else '3'
port = ports_object.Port(
self.context, network_id=network_id, device_owner=device_owner,
project_id=self.project_id, admin_state_up=True, status='DOWN',
device_id='2', qos_policy_id=qos_policy_id,
qos_network_policy_id=qos_network_policy_id, mac_address=mac,
id=port_id)
port.create()
return port
def _make_network(self, qos_policy_id=None):
network = network_object.Network(self.context,
qos_policy_id=qos_policy_id)
network.create()
return network
def _test_validate_create_network_callback(self, network_qos=False):
net_qos_obj = self._make_qos_policy()
net_qos_id = net_qos_obj.id if network_qos else None
network = self._make_network(qos_policy_id=net_qos_id)
kwargs = {"context": self.context,
"network": network}
with mock.patch.object(self.qos_plugin,
'validate_policy_for_network') \
as mock_validate_policy:
self.qos_plugin._validate_create_network_callback(
"NETWORK", "precommit_create", "test_plugin",
payload=events.DBEventPayload(
self.context, resource_id=kwargs['network']['id'],))
qos_policy = None
if network_qos:
qos_policy = net_qos_obj
if qos_policy:
mock_validate_policy.assert_called_once_with(
self.context, qos_policy, network.id)
else:
mock_validate_policy.assert_not_called()
def test_validate_create_network_callback(self):
self._test_validate_create_network_callback(network_qos=True)
def test_validate_create_network_callback_no_qos(self):
self._test_validate_create_network_callback(network_qos=False)
def _test_validate_create_port_callback(self, port_qos=False,
network_qos=False):
net_qos_obj = self._make_qos_policy()
port_qos_obj = self._make_qos_policy()
net_qos_id = net_qos_obj.id if network_qos else None
port_qos_id = port_qos_obj.id if port_qos else None
network = self._make_network(qos_policy_id=net_qos_id)
port = self._make_port(network.id, qos_policy_id=port_qos_id)
kwargs = {"context": self.context,
"port": {"id": port.id}}
with mock.patch.object(self.qos_plugin, 'validate_policy_for_port') \
as mock_validate_policy:
self.qos_plugin._validate_create_port_callback(
"PORT", "precommit_create", "test_plugin",
payload=events.DBEventPayload(
self.context,
resource_id=kwargs['port']['id'],))
qos_policy = None
if port_qos:
qos_policy = port_qos_obj
elif network_qos:
qos_policy = net_qos_obj
if qos_policy:
mock_validate_policy.assert_called_once_with(
self.context, qos_policy, port)
else:
mock_validate_policy.assert_not_called()
def test_validate_create_port_callback_policy_on_port(self):
self._test_validate_create_port_callback(port_qos=True)
def test_validate_create_port_callback_policy_on_port_and_network(self):
self._test_validate_create_port_callback(port_qos=True,
network_qos=True)
def test_validate_create_port_callback_policy_on_network(self):
self._test_validate_create_port_callback(network_qos=True)
def test_validate_create_port_callback_no_policy(self):
self._test_validate_create_port_callback()
def _prepare_for_port_placement_allocation_change(self, qos1, qos2,
qos_network_policy=None):
qos1_id = qos1.id if qos1 else None
qos2_id = qos2.id if qos2 else None
qos_network_policy_id = (
qos_network_policy.id if qos_network_policy else None)
network = self._make_network(qos_policy_id=qos_network_policy_id)
port = self._make_port(
network.id, qos_policy_id=qos1_id, port_id=TestQosPluginDB.PORT_ID)
return {"context": self.context,
"original_port": {
"id": port.id,
"device_owner": "compute:uu:id",
"qos_policy_id": qos1_id,
"qos_network_policy_id": qos_network_policy_id},
"port": {"id": port.id, "qos_policy_id": qos2_id}}
def test_check_port_for_placement_allocation_change_no_qos_change(self):
qos1_obj = self._make_qos_policy()
kwargs = self._prepare_for_port_placement_allocation_change(
qos1=qos1_obj, qos2=qos1_obj)
context = kwargs['context']
original_port = kwargs['original_port']
port = kwargs['port']
with mock.patch.object(
self.qos_plugin,
'_change_placement_allocation') as mock_alloc_change:
self.qos_plugin._check_port_for_placement_allocation_change(
'PORT', 'before_update', 'test_plugin',
payload=events.DBEventPayload(
context, states=(original_port, port)))
mock_alloc_change.assert_not_called()
def test_check_port_for_placement_allocation_change(self):
qos1_obj = self._make_qos_policy()
qos2_obj = self._make_qos_policy()
kwargs = self._prepare_for_port_placement_allocation_change(
qos1=qos1_obj, qos2=qos2_obj)
context = kwargs['context']
original_port = kwargs['original_port']
port = kwargs['port']
with mock.patch.object(
self.qos_plugin,
'_change_placement_allocation') as mock_alloc_change:
self.qos_plugin._check_port_for_placement_allocation_change(
'PORT', 'before_update', 'test_plugin',
payload=events.DBEventPayload(
context, states=(original_port, port)))
mock_alloc_change.assert_called_once_with(
qos1_obj, qos2_obj, kwargs['original_port'], port)
def test_check_port_for_placement_allocation_change_no_new_policy(self):
qos1_obj = self._make_qos_policy()
kwargs = self._prepare_for_port_placement_allocation_change(
qos1=qos1_obj, qos2=None)
context = kwargs['context']
original_port = kwargs['original_port']
port = kwargs['port']
with mock.patch.object(
self.qos_plugin,
'_change_placement_allocation') as mock_alloc_change:
self.qos_plugin._check_port_for_placement_allocation_change(
'PORT', 'before_update', 'test_plugin',
payload=events.DBEventPayload(
context, states=(original_port, port)))
mock_alloc_change.assert_called_once_with(
qos1_obj, None, kwargs['original_port'], port)
def test_check_port_for_placement_allocation_change_no_qos_update(self):
qos1_obj = self._make_qos_policy()
kwargs = self._prepare_for_port_placement_allocation_change(
qos1=qos1_obj, qos2=None)
kwargs['port'].pop('qos_policy_id')
context = kwargs['context']
original_port = kwargs['original_port']
port = kwargs['port']
with mock.patch.object(
self.qos_plugin,
'_change_placement_allocation') as mock_alloc_change:
self.qos_plugin._check_port_for_placement_allocation_change(
'PORT', 'before_update', 'test_plugin',
payload=events.DBEventPayload(
context, states=(original_port, port)))
mock_alloc_change.assert_not_called()
def test_check_port_for_placement_allocation_change_qos_network_policy(
self):
qos_network = self._make_qos_policy()
desired_qos = self._make_qos_policy()
kwargs = self._prepare_for_port_placement_allocation_change(
qos1=None, qos2=desired_qos, qos_network_policy=qos_network)
context = kwargs['context']
original_port = kwargs['original_port']
port = kwargs['port']
with mock.patch.object(
self.qos_plugin,
'_change_placement_allocation') as mock_alloc_change:
self.qos_plugin._check_port_for_placement_allocation_change(
'PORT', 'before_update', 'test_plugin',
payload=events.DBEventPayload(
context, states=(original_port, port)))
mock_alloc_change.assert_called_once_with(
qos_network, desired_qos, kwargs['original_port'], port)
def test_check_network_for_placement_allocation_change_no_qos_change(self):
qos1 = self._make_qos_policy()
original_network = self._make_network(qos1.id)
network = original_network
ml2plugin_mock = mock.MagicMock()
with mock.patch.object(
self.qos_plugin,
'_change_placement_allocation') as mock_alloc_change:
self.qos_plugin._check_network_for_placement_allocation_change(
'network', 'after_update', ml2plugin_mock,
payload=events.DBEventPayload(
self.context, states=(original_network, network)))
mock_alloc_change.assert_not_called()
ml2plugin_mock._make_port_dict.assert_not_called()
def test_check_network_for_placement_allocation_change_no_ports_to_update(
self):
original_qos = self._make_qos_policy()
qos = self._make_qos_policy()
port_qos = self._make_qos_policy()
original_network = self._make_network(original_qos.id)
network = self._make_network(qos.id)
# Port which is not compute bound
self._make_port(network_id=network.id, qos_policy_id=None,
device_owner='uu:id')
# Port with overwritten QoS policy
self._make_port(network_id=network.id, qos_policy_id=port_qos.id,
device_owner='compute:uu:id')
ml2plugin_mock = mock.MagicMock()
with mock.patch.object(
self.qos_plugin,
'_change_placement_allocation') as mock_alloc_change:
self.qos_plugin._check_network_for_placement_allocation_change(
'network', 'after_update', ml2plugin_mock,
payload=events.DBEventPayload(
self.context, states=(original_network, network)))
mock_alloc_change.assert_not_called()
ml2plugin_mock._make_port_dict.assert_not_called()
def test_check_network_for_placement_allocation_change_remove_qos(self):
original_qos = self._make_qos_policy()
original_network = self._make_network(original_qos.id)
network = self._make_network()
ml2plugin_mock = mock.MagicMock()
def fake_make_port_dict(port):
return {
'id': port.id,
'device_owner': port.device_owner,
'qos_policy_id': port.qos_policy_id,
'qos_network_policy_id': port.qos_network_policy_id,
}
ml2plugin_mock._make_port_dict.side_effect = fake_make_port_dict
port1 = self._make_port(
network_id=network.id, qos_policy_id=None,
device_owner='compute:uu:id')
port1_binding = ports_object.PortBinding(
self.context, port_id=port1.id, host='fake_host1',
vnic_type='fake_vnic_type', vif_type='fake_vif_type',
profile={'allocation': 'fake_allocation'})
port1_binding.create()
port1.bindings = [port1_binding]
port1.update()
with mock.patch.object(
self.qos_plugin,
'_change_placement_allocation') as mock_alloc_change:
def fake_change_placement_allocation(orig_policy, policy,
orig_port, port):
port['binding:profile'] = {}
mock_alloc_change.side_effect = fake_change_placement_allocation
self.qos_plugin._check_network_for_placement_allocation_change(
'network', 'after_update', ml2plugin_mock,
payload=events.DBEventPayload(
self.context, states=(original_network, network)))
self.assertEqual(ml2plugin_mock._make_port_dict.call_count, 1)
mock_alloc_change_calls = [
mock.call(
original_qos,
None,
{'id': port1.id,
'device_owner': 'compute:uu:id',
'qos_policy_id': None,
'qos_network_policy_id': None},
mock.ANY),
]
mock_alloc_change.assert_has_calls(mock_alloc_change_calls,
any_order=True)
port1.update()
self.assertDictEqual(port1.bindings[0].profile, {})
def test_check_network_for_placement_allocation_change(self):
original_qos = self._make_qos_policy()
qos = self._make_qos_policy()
original_network = self._make_network(original_qos.id)
network = self._make_network(qos.id)
ml2plugin_mock = mock.MagicMock()
def fake_make_port_dict(port):
return {
'id': port.id,
'device_owner': port.device_owner,
'qos_policy_id': port.qos_policy_id,
'qos_network_policy_id': port.qos_network_policy_id,
}
ml2plugin_mock._make_port_dict.side_effect = fake_make_port_dict
port1 = self._make_port(
network_id=network.id, qos_policy_id=None,
device_owner='compute:uu:id')
port1_binding = ports_object.PortBinding(
self.context, port_id=port1.id, host='fake_host1',
vnic_type='fake_vnic_type', vif_type='fake_vif_type', profile={})
port1_binding.create()
port1.bindings = [port1_binding]
port1.update()
port2 = self._make_port(
network_id=network.id, qos_policy_id=None,
device_owner='compute:uu:id')
port2_binding = ports_object.PortBinding(
self.context, port_id=port2.id, host='fake_host2',
vnic_type='fake_vnic_type', vif_type='fake_vif_type', profile={})
port2_binding.create()
port2.bindings = [port2_binding]
port2.update()
with mock.patch.object(
self.qos_plugin,
'_change_placement_allocation') as mock_alloc_change:
def fake_change_placement_allocation(orig_policy, policy,
orig_port, port):
port['binding:profile'] = {'allocation': 'fake_allocation'}
mock_alloc_change.side_effect = fake_change_placement_allocation
self.qos_plugin._check_network_for_placement_allocation_change(
'network', 'after_update', ml2plugin_mock,
payload=events.DBEventPayload(
self.context, states=(original_network, network)))
self.assertEqual(ml2plugin_mock._make_port_dict.call_count, 2)
mock_alloc_change_calls = [
mock.call(
original_qos,
qos,
{'id': port1.id,
'device_owner': 'compute:uu:id',
'qos_policy_id': None,
'qos_network_policy_id': qos.id},
mock.ANY),
mock.call(
original_qos,
qos,
{'id': port2.id,
'device_owner': 'compute:uu:id',
'qos_policy_id': None,
'qos_network_policy_id': qos.id},
mock.ANY)]
mock_alloc_change.assert_has_calls(mock_alloc_change_calls,
any_order=True)
port1.update()
port2.update()
self.assertDictEqual(
port1.bindings[0].profile, {'allocation': 'fake_allocation'})
self.assertDictEqual(
port2.bindings[0].profile, {'allocation': 'fake_allocation'})
def _prepare_port_for_placement_allocation(self, original_qos,
desired_qos=None,
qos_network_policy=None,
original_min_kbps=None,
desired_min_kbps=None,
original_min_kpps=None,
desired_min_kpps=None,
is_sriov=False):
kwargs = self._prepare_for_port_placement_allocation_change(
original_qos, desired_qos, qos_network_policy=qos_network_policy)
orig_port = kwargs['original_port']
qos = original_qos or qos_network_policy
qos.rules = []
allocation = {}
if original_min_kbps:
qos.rules += [self._make_qos_minbw_rule(
qos.id, min_kbps=original_min_kbps,
rule_id=TestQosPluginDB.QOS_MIN_BW_RULE_ID)]
allocation.update(
{TestQosPluginDB.MIN_BW_REQUEST_GROUP_UUID:
TestQosPluginDB.MIN_BW_RP})
if original_min_kpps:
qos.rules += [self._make_qos_minpps_rule(
qos.id, min_kpps=original_min_kpps,
rule_id=TestQosPluginDB.QOS_MIN_PPS_RULE_ID)]
allocation.update(
{TestQosPluginDB.MIN_PPS_REQUEST_GROUP_UUID:
TestQosPluginDB.MIN_PPS_RP})
if desired_qos:
desired_qos.rules = []
if desired_min_kbps:
desired_qos.rules += [self._make_qos_minbw_rule(
desired_qos.id, min_kbps=desired_min_kbps)]
if desired_min_kpps:
desired_qos.rules += [self._make_qos_minpps_rule(
desired_qos.id, min_kpps=desired_min_kpps)]
binding_prof = {}
if is_sriov:
binding_prof = {
'pci_slot': '0000:42:41.0',
'pci_vendor_info': '8086:107ed',
'physical_network': 'sriov_phy'
}
binding_prof.update({'allocation': allocation})
orig_port.update(
{'binding:profile': binding_prof,
'device_id': 'uu:id'}
)
return orig_port, kwargs['port']
def _assert_pci_info(self, port):
self.assertIn('pci_slot', port['binding:profile'])
self.assertIn('pci_vendor_info', port['binding:profile'])
self.assertIn('physical_network', port['binding:profile'])
def test_change_placement_allocation_increase(self):
qos1 = self._make_qos_policy()
qos2 = self._make_qos_policy()
orig_port, port = self._prepare_port_for_placement_allocation(
qos1, qos2, original_min_kbps=1000, desired_min_kbps=2000,
is_sriov=True)
with mock.patch.object(self.qos_plugin._placement_client,
'update_qos_allocation') as mock_update_qos_alloc:
self.qos_plugin._change_placement_allocation(
qos1, qos2, orig_port, port)
mock_update_qos_alloc.assert_called_once_with(
consumer_uuid='uu:id',
alloc_diff={self.MIN_BW_RP: {'NET_BW_IGR_KILOBIT_PER_SEC': 1000}})
self._assert_pci_info(port)
def test_change_placement_allocation_increase_min_pps(self):
qos1 = self._make_qos_policy()
qos2 = self._make_qos_policy()
orig_port, port = self._prepare_port_for_placement_allocation(
qos1, qos2, original_min_kpps=1000, desired_min_kpps=2000,
is_sriov=True)
with mock.patch.object(self.qos_plugin._placement_client,
'update_qos_allocation') as mock_update_qos_alloc:
self.qos_plugin._change_placement_allocation(
qos1, qos2, orig_port, port)
mock_update_qos_alloc.assert_called_once_with(
consumer_uuid='uu:id',
alloc_diff={self.MIN_PPS_RP: {
'NET_PACKET_RATE_IGR_KILOPACKET_PER_SEC': 1000}})
self._assert_pci_info(port)
def test_change_placement_allocation_increase_min_pps_and_min_bw(self):
qos1 = self._make_qos_policy()
qos2 = self._make_qos_policy()
orig_port, port = self._prepare_port_for_placement_allocation(
qos1, qos2, original_min_kbps=1000, desired_min_kbps=2000,
original_min_kpps=500, desired_min_kpps=1000)
with mock.patch.object(self.qos_plugin._placement_client,
'update_qos_allocation') as mock_update_qos_alloc:
self.qos_plugin._change_placement_allocation(
qos1, qos2, orig_port, port)
mock_update_qos_alloc.assert_called_once_with(
consumer_uuid='uu:id',
alloc_diff={
self.MIN_PPS_RP: {
'NET_PACKET_RATE_IGR_KILOPACKET_PER_SEC': 500},
self.MIN_BW_RP: {'NET_BW_IGR_KILOBIT_PER_SEC': 1000}})
def test_change_placement_allocation_change_direction_min_pps_and_min_bw(
self):
qos1 = self._make_qos_policy()
qos2 = self._make_qos_policy()
orig_port, port = self._prepare_port_for_placement_allocation(
qos1, qos2, original_min_kbps=1000, desired_min_kbps=2000,
original_min_kpps=500, desired_min_kpps=1000)
for rule in qos2.rules:
rule.direction = 'egress'
with mock.patch.object(self.qos_plugin._placement_client,
'update_qos_allocation') as mock_update_qos_alloc:
self.qos_plugin._change_placement_allocation(
qos1, qos2, orig_port, port)
mock_update_qos_alloc.assert_called_once_with(
consumer_uuid='uu:id',
alloc_diff={
self.MIN_PPS_RP: {
'NET_PACKET_RATE_IGR_KILOPACKET_PER_SEC': -500,
'NET_PACKET_RATE_EGR_KILOPACKET_PER_SEC': 1000},
self.MIN_BW_RP: {
'NET_BW_IGR_KILOBIT_PER_SEC': -1000,
'NET_BW_EGR_KILOBIT_PER_SEC': 2000}})
def test_change_placement_allocation_change_dir_min_pps_ingress_to_any(
self):
qos1 = self._make_qos_policy()
qos2 = self._make_qos_policy()
orig_port, port = self._prepare_port_for_placement_allocation(
qos1, qos2, original_min_kpps=1000, desired_min_kpps=1000)
for rule in qos2.rules:
rule.direction = 'any'
with mock.patch.object(self.qos_plugin._placement_client,
'update_qos_allocation') as mock_update_qos_alloc:
self.assertRaises(NotImplementedError,
self.qos_plugin._change_placement_allocation, qos1, qos2,
orig_port, port)
mock_update_qos_alloc.assert_not_called()
def test_change_placement_allocation_min_bw_dataplane_enforcement(self):
qos1 = self._make_qos_policy()
qos2 = self._make_qos_policy()
orig_port, port = self._prepare_port_for_placement_allocation(
qos1, qos2, desired_min_kbps=1000)
with mock.patch.object(self.qos_plugin._placement_client,
'update_qos_allocation') as mock_update_qos_alloc:
self.qos_plugin._change_placement_allocation(qos1, qos2, orig_port,
port)
mock_update_qos_alloc.assert_not_called()
def test_change_placement_allocation_min_bw_dataplane_enforcement_with_pps(
self):
qos1 = self._make_qos_policy()
qos2 = self._make_qos_policy()
orig_port, port = self._prepare_port_for_placement_allocation(
qos1, qos2, desired_min_kbps=1000, original_min_kpps=500,
desired_min_kpps=1000)
with mock.patch.object(self.qos_plugin._placement_client,
'update_qos_allocation') as mock_update_qos_alloc:
self.qos_plugin._change_placement_allocation(qos1, qos2, orig_port,
port)
mock_update_qos_alloc.assert_called_once_with(
consumer_uuid='uu:id',
alloc_diff={
self.MIN_PPS_RP: {
'NET_PACKET_RATE_IGR_KILOPACKET_PER_SEC': 500}})
def test_change_placement_allocation_decrease(self):
original_qos = self._make_qos_policy()
desired_qos = self._make_qos_policy()
orig_port, port = self._prepare_port_for_placement_allocation(
original_qos, desired_qos, original_min_kbps=2000,
desired_min_kbps=1000, is_sriov=True)
with mock.patch.object(self.qos_plugin._placement_client,
'update_qos_allocation') as mock_update_qos_alloc:
self.qos_plugin._change_placement_allocation(
original_qos, desired_qos, orig_port, port)
mock_update_qos_alloc.assert_called_once_with(
consumer_uuid='uu:id',
alloc_diff={self.MIN_BW_RP: {'NET_BW_IGR_KILOBIT_PER_SEC': -1000}})
self._assert_pci_info(port)
def test_change_placement_allocation_decrease_min_pps(self):
original_qos = self._make_qos_policy()
desired_qos = self._make_qos_policy()
orig_port, port = self._prepare_port_for_placement_allocation(
original_qos, desired_qos, original_min_kpps=2000,
desired_min_kpps=1000, is_sriov=True)
with mock.patch.object(self.qos_plugin._placement_client,
'update_qos_allocation') as mock_update_qos_alloc:
self.qos_plugin._change_placement_allocation(
original_qos, desired_qos, orig_port, port)
mock_update_qos_alloc.assert_called_once_with(
consumer_uuid='uu:id',
alloc_diff={self.MIN_PPS_RP: {
'NET_PACKET_RATE_IGR_KILOPACKET_PER_SEC': -1000}})
self._assert_pci_info(port)
def test_change_placement_allocation_no_original_qos(self):
qos1 = None
qos2 = self._make_qos_policy()
rule2_obj = self._make_qos_minbw_rule(qos2.id, min_kbps=1000)
qos2.rules = [rule2_obj]
orig_port = {'id': 'u:u', 'device_id': 'i:d', 'binding:profile': {}}
port = {}
with mock.patch.object(self.qos_plugin._placement_client,
'update_qos_allocation') as mock_update_qos_alloc:
self.qos_plugin._change_placement_allocation(qos1, qos2, orig_port,
port)
mock_update_qos_alloc.assert_not_called()
def test_change_placement_allocation_no_original_allocation(self):
qos1 = self._make_qos_policy()
rule1_obj = self._make_qos_minbw_rule(qos1.id, min_kbps=500)
qos1.rules = [rule1_obj]
qos2 = self._make_qos_policy()
rule2_obj = self._make_qos_minbw_rule(qos2.id, min_kbps=1000)
qos2.rules = [rule2_obj]
orig_port = {'id': 'u:u', 'device_id': 'i:d', 'binding:profile': {}}
port = {}
with mock.patch.object(self.qos_plugin._placement_client,
'update_qos_allocation') as mock_update_qos_alloc:
self.qos_plugin._change_placement_allocation(qos1, qos2, orig_port,
port)
mock_update_qos_alloc.assert_not_called()
def test_change_placement_allocation_new_policy_empty(self):
qos1 = self._make_qos_policy()
orig_port, port = self._prepare_port_for_placement_allocation(qos1,
original_min_kbps=1000, original_min_kpps=2000)
with mock.patch.object(self.qos_plugin._placement_client,
'update_qos_allocation') as mock_update_qos_alloc:
self.qos_plugin._change_placement_allocation(
qos1, None, orig_port, port)
mock_update_qos_alloc.assert_called_once_with(
consumer_uuid='uu:id',
alloc_diff={
self.MIN_BW_RP: {'NET_BW_IGR_KILOBIT_PER_SEC': -1000},
self.MIN_PPS_RP: {
'NET_PACKET_RATE_IGR_KILOPACKET_PER_SEC': -2000}})
def test_change_placement_allocation_no_min_bw(self):
qos1 = self._make_qos_policy()
qos2 = self._make_qos_policy()
bw_limit_rule1 = rule_object.QosDscpMarkingRule(dscp_mark=16)
bw_limit_rule2 = rule_object.QosDscpMarkingRule(dscp_mark=18)
qos1.rules = [bw_limit_rule1]
qos2.rules = [bw_limit_rule2]
orig_port = {
'binding:profile': {'allocation': {
self.MIN_BW_REQUEST_GROUP_UUID: self.MIN_BW_RP}},
'device_id': 'uu:id',
'id': '9416c220-160a-11ec-ba3d-474633eb825c',
}
port = {}
with mock.patch.object(self.qos_plugin._placement_client,
'update_qos_allocation') as mock_update_qos_alloc:
self.qos_plugin._change_placement_allocation(
qos1, None, orig_port, port)
mock_update_qos_alloc.assert_not_called()
def test_change_placement_allocation_old_rule_not_min_bw(self):
qos1 = self._make_qos_policy()
qos2 = self._make_qos_policy()
bw_limit_rule = rule_object.QosDscpMarkingRule(dscp_mark=16)
orig_port, port = self._prepare_port_for_placement_allocation(
qos1, qos2, desired_min_kbps=2000)
qos1.rules = [bw_limit_rule]
with mock.patch.object(self.qos_plugin._placement_client,
'update_qos_allocation') as mock_update_qos_alloc:
self.qos_plugin._change_placement_allocation(qos1, qos2, orig_port,
port)
mock_update_qos_alloc.assert_not_called()
def test_change_placement_allocation_new_rule_not_min_bw(self):
qos1 = self._make_qos_policy()
qos2 = self._make_qos_policy()
bw_limit_rule = rule_object.QosDscpMarkingRule(dscp_mark=16)
qos2.rules = [bw_limit_rule]
orig_port, port = self._prepare_port_for_placement_allocation(qos1,
original_min_kbps=1000)
with mock.patch.object(self.qos_plugin._placement_client,
'update_qos_allocation') as mock_update_qos_alloc:
self.qos_plugin._change_placement_allocation(
qos1, qos2, orig_port, port)
mock_update_qos_alloc.assert_called_once_with(
consumer_uuid='uu:id',
alloc_diff={self.MIN_BW_RP: {'NET_BW_IGR_KILOBIT_PER_SEC': -1000}})
def test_change_placement_allocation_equal_minkbps_and_minkpps(self):
qos1 = self._make_qos_policy()
qos2 = self._make_qos_policy()
orig_port, port = self._prepare_port_for_placement_allocation(
qos1, qos2, original_min_kbps=1000, desired_min_kbps=1000,
original_min_kpps=1000, desired_min_kpps=1000)
with mock.patch.object(self.qos_plugin._placement_client,
'update_qos_allocation') as mock_update_qos_alloc:
self.qos_plugin._change_placement_allocation(
qos1, qos2, orig_port, port)
mock_update_qos_alloc.assert_not_called()
def test_change_placement_allocation_update_conflict(self):
qos1 = self._make_qos_policy()
qos2 = self._make_qos_policy()
orig_port, port = self._prepare_port_for_placement_allocation(
qos1, qos2, original_min_kbps=1000, desired_min_kbps=2000)
with mock.patch.object(self.qos_plugin._placement_client,
'update_qos_allocation') as mock_update_qos_alloc:
mock_update_qos_alloc.side_effect = ks_exc.Conflict(
response={'errors': [{'code': 'placement.concurrent_update'}]}
)
self.assertRaises(
neutron_qos_exc.QosPlacementAllocationUpdateConflict,
self.qos_plugin._change_placement_allocation,
qos1, qos2, orig_port, port)
def test_change_placement_allocation_update_generation_conflict(self):
qos1 = self._make_qos_policy()
qos2 = self._make_qos_policy()
orig_port, port = self._prepare_port_for_placement_allocation(
qos1, qos2, original_min_kbps=1000, desired_min_kbps=2000)
with mock.patch.object(self.qos_plugin._placement_client,
'update_qos_allocation') as mock_update_qos_alloc:
mock_update_qos_alloc.side_effect = (
pl_exc.PlacementAllocationGenerationConflict(
consumer=self.MIN_BW_RP))
self.assertRaises(
pl_exc.PlacementAllocationGenerationConflict,
self.qos_plugin._change_placement_allocation,
qos1, qos2, orig_port, port)
def test_change_placement_allocation_qos_network_policy(self):
qos_network = self._make_qos_policy()
desired_qos = self._make_qos_policy()
orig_port, port = self._prepare_port_for_placement_allocation(
None, desired_qos, qos_network_policy=qos_network,
original_min_kbps=1000, desired_min_kbps=2000)
with mock.patch.object(self.qos_plugin._placement_client,
'update_qos_allocation') as mock_update_qos_alloc:
self.qos_plugin._change_placement_allocation(
qos_network, desired_qos, orig_port, port)
mock_update_qos_alloc.assert_called_once_with(
consumer_uuid='uu:id',
alloc_diff={self.MIN_BW_RP: {'NET_BW_IGR_KILOBIT_PER_SEC': 1000}})
| 1.140625 | 1 |
covfefe/covfefe.py | fixator10/Trusty-cogs | 148 | 3872 | import re
import discord
from redbot.core import commands
class Covfefe(commands.Cog):
"""
Convert almost any word into covfefe
"""
def __init__(self, bot):
self.bot = bot
async def covfefe(self, x, k="aeiouy])"):
"""
https://codegolf.stackexchange.com/a/123697
"""
try:
b, c, v = re.findall(f"(.*?[{k}([^{k}.*?([{k}", x)[0]
return b + c + (("bcdfgkpstvz" + c)["pgtvkgbzdfs".find(c)] + v) * 2
except IndexError:
return None
async def red_delete_data_for_user(self, **kwargs):
"""
Nothing to delete
"""
return
@commands.command()
async def covefy(self, ctx, msg):
"""Convert almost any word into covfefe"""
newword = await self.covfefe(msg)
if newword is not None:
await ctx.send(newword)
else:
await ctx.send("I cannot covfefeify that word")
| 2.890625 | 3 |
src/api/bkuser_core/audit/views.py | trueware/bk-user | 0 | 3873 | <reponame>trueware/bk-user
# -*- coding: utf-8 -*-
"""
TencentBlueKing is pleased to support the open source community by making 蓝鲸智云-用户管理(Bk-User) available.
Copyright (C) 2017-2021 THL A29 Limited, a Tencent company. All rights reserved.
Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License.
You may obtain a copy of the License at http://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
"""
from bkuser_core.common.viewset import AdvancedListAPIView, AdvancedModelViewSet
from . import serializers as local_serializers
from .models import GeneralLog, LogIn, ResetPassword
class GeneralLogViewSet(AdvancedModelViewSet, AdvancedListAPIView):
queryset = GeneralLog.objects.all()
serializer_class = local_serializers.GeneralLogSerializer
lookup_field = "id"
class LoginLogViewSet(AdvancedModelViewSet, AdvancedListAPIView):
queryset = LogIn.objects.all()
serializer_class = local_serializers.LoginLogSerializer
lookup_field = "id"
class ResetPasswordLogViewSet(AdvancedModelViewSet, AdvancedListAPIView):
queryset = ResetPassword.objects.all()
serializer_class = local_serializers.ResetPasswordLogSerializer
lookup_field = "id"
| 1.507813 | 2 |
exp_gqa/test.py | ronghanghu/gqa_single_hop_baseline | 19 | 3874 | import os
import numpy as np
import tensorflow as tf
from models_gqa.model import Model
from models_gqa.config import build_cfg_from_argparse
from util.gqa_train.data_reader import DataReader
import json
# Load config
cfg = build_cfg_from_argparse()
# Start session
os.environ["CUDA_VISIBLE_DEVICES"] = str(cfg.GPU_ID)
sess = tf.Session(config=tf.ConfigProto(
gpu_options=tf.GPUOptions(allow_growth=cfg.GPU_MEM_GROWTH)))
# Data files
imdb_file = cfg.IMDB_FILE % cfg.TEST.SPLIT_VQA
scene_graph_file = cfg.SCENE_GRAPH_FILE % \
cfg.TEST.SPLIT_VQA.replace('_balanced', '').replace('_all', '')
data_reader = DataReader(
imdb_file, shuffle=False, one_pass=True, batch_size=cfg.TEST.BATCH_SIZE,
T_encoder=cfg.T_ENCODER,
vocab_question_file=cfg.VOCAB_QUESTION_FILE,
vocab_answer_file=cfg.VOCAB_ANSWER_FILE,
feature_type=cfg.FEAT_TYPE,
spatial_feature_dir=cfg.SPATIAL_FEATURE_DIR,
objects_feature_dir=cfg.OBJECTS_FEATURE_DIR,
objects_max_num=cfg.W_FEAT,
scene_graph_file=scene_graph_file,
vocab_name_file=cfg.VOCAB_NAME_FILE,
vocab_attr_file=cfg.VOCAB_ATTR_FILE,
spatial_pos_enc_dim=cfg.SPATIAL_POS_ENC_DIM,
bbox_tile_num=cfg.BBOX_TILE_NUM)
num_vocab = data_reader.batch_loader.vocab_dict.num_vocab
num_choices = data_reader.batch_loader.answer_dict.num_vocab
# Inputs and model
input_seq_batch = tf.placeholder(tf.int32, [None, None])
seq_length_batch = tf.placeholder(tf.int32, [None])
image_feat_batch = tf.placeholder(
tf.float32, [None, cfg.H_FEAT, cfg.W_FEAT, cfg.D_FEAT])
image_valid_batch = tf.placeholder(
tf.float32, [None, cfg.H_FEAT, cfg.W_FEAT])
model = Model(
input_seq_batch, seq_length_batch, image_feat_batch, image_valid_batch,
num_vocab=num_vocab, num_choices=num_choices, is_training=False)
# Load snapshot
if cfg.TEST.USE_EMA:
ema = tf.train.ExponentialMovingAverage(decay=0.9) # decay doesn't matter
var_names = {
(ema.average_name(v) if v in model.params else v.op.name): v
for v in tf.global_variables()}
else:
var_names = {v.op.name: v for v in tf.global_variables()}
snapshot_file = cfg.TEST.SNAPSHOT_FILE % (cfg.EXP_NAME, cfg.TEST.ITER)
print('loading model snapshot from %s' % snapshot_file)
snapshot_saver = tf.train.Saver(var_names)
snapshot_saver.restore(sess, snapshot_file)
print('Done')
# Write results
result_dir = cfg.TEST.RESULT_DIR % (cfg.EXP_NAME, cfg.TEST.ITER)
os.makedirs(result_dir, exist_ok=True)
# Run test
answer_correct, num_questions = 0, 0
if cfg.TEST.OUTPUT_VQA_EVAL_PRED:
output_predictions = []
answer_word_list = data_reader.batch_loader.answer_dict.word_list
pred_file = os.path.join(
result_dir, 'gqa_eval_preds_%s_%s_%08d.json' % (
cfg.TEST.SPLIT_VQA, cfg.EXP_NAME, cfg.TEST.ITER))
for n_batch, batch in enumerate(data_reader.batches()):
if 'answer_label_batch' not in batch:
batch['answer_label_batch'] = -np.ones(
len(batch['qid_list']), np.int32)
if num_questions == 0:
print('imdb has no answer labels. Using dummy labels.\n\n'
'**The final accuracy will be zero (no labels provided)**\n')
vqa_scores_value = sess.run(model.vqa_scores, feed_dict={
input_seq_batch: batch['input_seq_batch'],
seq_length_batch: batch['seq_length_batch'],
image_feat_batch: batch['image_feat_batch'],
image_valid_batch: batch['image_valid_batch']})
# compute accuracy
vqa_labels = batch['answer_label_batch']
vqa_predictions = np.argmax(vqa_scores_value, axis=1)
answer_correct += np.sum(vqa_predictions == vqa_labels)
num_questions += len(vqa_labels)
accuracy = answer_correct / num_questions
if n_batch % 20 == 0:
print('exp: %s, iter = %d, accumulated accuracy on %s = %f (%d / %d)' %
(cfg.EXP_NAME, cfg.TEST.ITER, cfg.TEST.SPLIT_VQA,
accuracy, answer_correct, num_questions))
if cfg.TEST.OUTPUT_VQA_EVAL_PRED:
output_predictions.extend([
{"questionId": qId, "prediction": answer_word_list[p]}
for qId, p in zip(batch['qid_list'], vqa_predictions)])
with open(os.path.join(
result_dir, 'vqa_results_%s.txt' % cfg.TEST.SPLIT_VQA), 'w') as f:
print('\nexp: %s, iter = %d, final accuracy on %s = %f (%d / %d)' %
(cfg.EXP_NAME, cfg.TEST.ITER, cfg.TEST.SPLIT_VQA,
accuracy, answer_correct, num_questions))
print('exp: %s, iter = %d, final accuracy on %s = %f (%d / %d)' %
(cfg.EXP_NAME, cfg.TEST.ITER, cfg.TEST.SPLIT_VQA,
accuracy, answer_correct, num_questions), file=f)
if cfg.TEST.OUTPUT_VQA_EVAL_PRED:
with open(pred_file, 'w') as f:
json.dump(output_predictions, f, indent=2)
print('prediction file written to %s' % pred_file)
| 1.953125 | 2 |
extract_gear/armor_visitor.py | kamerons/dde-extract-gear | 0 | 3875 | <filename>extract_gear/armor_visitor.py
class ArmorVisitor:
def __init__(self, num_pages, first_page_col_start, first_page_row_start,
last_page_row_start, last_page_col_end, last_page_row_end, num_col_page=5, num_row_page=3):
self.num_pages = num_pages
self.first_page_col_start = first_page_col_start
self.first_page_row_start = first_page_row_start
self.last_page_row_start = last_page_row_start
self.last_page_col_end = last_page_col_end
self.last_page_row_end = last_page_row_end
self.num_col_page = num_col_page
self.num_row_page = num_row_page
def iterate(self, callback):
for page_num in range(1, self.num_pages + 1):
page = self.create_page(page_num)
i = 0
for coord in page:
callback(coord, page_num, i)
i += 1
def create_page(self, page_num):
if page_num == 1:
last_col = self.num_col_page if self.num_pages > 1 else self.last_page_col_end
last_row = self.num_row_page if self.num_pages > 1 else self.last_page_row_end
page = Page(self.first_page_col_start, self.first_page_row_start, last_col, last_row, self.num_col_page)
elif page_num == self.num_pages:
page = Page(1, self.last_page_row_start,
self.last_page_col_end, self.last_page_row_end, self.num_col_page)
else:
page = Page(1, 1, self.num_col_page, self.num_row_page, self.num_col_page)
return page
class Page:
def __init__(self, start_col, start_row, last_col, last_row, num_col_page=5):
self.start_col = start_col
self.start_row = start_row
self.last_col = last_col
self.last_row = last_row
self.num_col_page = num_col_page
def __iter__(self):
self.cur_row = self.start_row
self.cur_col = self.start_col
return self
def __next__(self):
position = (self.cur_row, self.cur_col)
if self.cur_row > self.last_row or (self.cur_col > self.last_col and self.cur_row == self.last_row):
raise StopIteration
elif self.cur_col == self.num_col_page:
self.cur_col = 1
self.cur_row += 1
else:
self.cur_col += 1
return position
| 2.84375 | 3 |
gamla/url_utils_test.py | hyroai/gamla | 17 | 3876 | <filename>gamla/url_utils_test.py
from gamla import url_utils
def test_add_to_query_string1():
assert (
url_utils.add_to_query_string(
{"a": 123},
"https://www.domain.com/path?param1=param1#anchor",
)
== "https://www.domain.com/path?param1=param1&a=123#anchor"
)
def test_add_to_query_string2():
assert (
url_utils.add_to_query_string(
{"param1": 123},
"https://www.domain.com/path?param1=param1#anchor",
)
== "https://www.domain.com/path?param1=123#anchor"
)
| 2.859375 | 3 |
examples/temp_feie_shetland.py | nilsmkMET/roppy | 0 | 3877 | import numpy as np
from netCDF4 import Dataset
# Import development version of roppy
import sys
sys.path = ['..'] + sys.path
import roppy
# --- EDIT -----------------
# ROMS file
romsfile = 'data/ocean_avg_example.nc'
# Section definition
lon0, lat0 = -0.67, 60.75 # Shetland
lon1, lat1 = 4.72, 60.75 # Feie
# --- EDIT ------------------
# Make a grid object
f = Dataset(romsfile)
grd = roppy.SGrid(f)
# Get grid coordinates of end points
x0, y0 = grd.ll2xy(lon0, lat0)
x1, y1 = grd.ll2xy(lon1, lat1)
# Find nearest rho-points
i0, j0, i1, j1 = [int(round(v)) for v in x0, y0, x1, y1]
# Make a Section object
sec = roppy.linear_section(i0, i1, j0, j1, grd)
# Read in a 3D temperature field
temp = f.variables['temp'][0,:,:,:]
# Interpolate to the section
temp_sec = sec.sample3D(temp)
# Compute mean temperature along section
# using trapezoidal integration
print "mean tempeature = ", np.sum(sec.Area * temp_sec) / np.sum(sec.Area)
# TODO: Make a mean method in the Section class
# Usage: sec.mean(temp_sec)
# or even directly from 3D: sec.mean(temp)
| 2.671875 | 3 |
examples/test_scalar_field.py | gemini3d/pv-gemini | 0 | 3878 | #!/usr/bin/env python3
"""
example of 3D scalar field
If you get this error, ParaView doesn't know your data file format:
TypeError: TestFileReadability argument %Id: %V
"""
from pathlib import Path
import argparse
import paraview.simple as pvs
p = argparse.ArgumentParser()
p.add_argument("fn", help="data file to load with paraview OpenDataFile()")
P = p.parse_args()
fn = Path(P.fn).expanduser()
if not fn.is_file():
raise FileNotFoundError(fn)
pvs.OpenDataFile(str(fn))
| 2.6875 | 3 |
src/cms/forms/languages/language_form.py | S10MC2015/cms-django | 0 | 3879 | <filename>src/cms/forms/languages/language_form.py
from django import forms
from ...models import Language
class LanguageForm(forms.ModelForm):
"""
Form for creating and modifying language objects
"""
class Meta:
model = Language
fields = [
"code",
"english_name",
"native_name",
"text_direction",
]
| 2.109375 | 2 |
search/forms.py | gregneagle/sal | 2 | 3880 | from django import forms
from .models import *
from server.models import *
class ChoiceFieldNoValidation(forms.ChoiceField):
def validate(self, value):
pass
class SaveSearchForm(forms.ModelForm):
class Meta:
model = SavedSearch
fields = ('name',)
class SearchRowForm(forms.ModelForm):
skip_fields = [
'id',
'machine_group',
'report',
'activity',
'errors',
'warnings',
'install_log',
'puppet_errors',
'install_log_hash'
]
search_fields = []
for f in Machine._meta.fields:
if f.name not in skip_fields:
add = (f.name,f.name,)
search_fields.append(add)
search_field = ChoiceFieldNoValidation(choices=sorted(search_fields))
and_or = ChoiceFieldNoValidation(choices=AND_OR_CHOICES)
def __init__(self, *args, **kwargs):
self.search_group = kwargs.pop('search_group', None)
super(SearchRowForm, self).__init__(*args, **kwargs)
try:
search_group_count = self.search_group.searchrow_set.count()
except:
search_group_count = 0
if search_group_count == 0 and self.search_group:
self.fields['and_or'] = ChoiceFieldNoValidation(
initial='AND',
widget=forms.HiddenInput()
)
class Meta:
model = SearchRow
fields = ('search_models', 'search_field', 'and_or', 'operator','search_term',)
| 2.3125 | 2 |
newsparser.py | antoreep-jana/BBC-News-Analyzer | 1 | 3881 | <reponame>antoreep-jana/BBC-News-Analyzer
from bs4 import BeautifulSoup as bs
import requests
class BBC:
def __init__(self, url:str):
article = requests.get(url)
self.soup = bs(article.content, "html.parser")
#print(dir(self.soup))
#print(self.soup.h1.text)
self.body = self.get_body()
self.link = url
self.title = self.get_title()
self.author = self.get_author()
self.images = self.get_images()
self.date = self.get_date()
#author = self.soup.find
#date = self.soup
#for img in imgs:
# print(img['src'])
paras = self.soup.find_all('div', {"class" : "ssrcss-17j9f6r-RichTextContainer e5tfeyi1"})
#for para in paras:
# print(para.text)
def get_body(self) -> list:
#body = self.soup.find(property="articleBody")
paras = self.soup.find_all('div', {"class" : "ssrcss-17j9f6r-RichTextContainer e5tfeyi1"})
#for para in paras:
# print(para.text)
return [p.text for p in paras]
#return [p.text for p in body.find_all("p")]
def get_title(self) -> str:
#return self.soup.find(class_="story-body__h1").text
return self.soup.h1.text
def get_author(self) -> str:
author = self.soup.find('p', {'class' : 'ssrcss-1rv0moy-Contributor e5xb54n2'})
return author.text.replace("BBC News", "")
def get_images(self) -> list:
imgs = self.soup.find_all('figure', {'class' : 'ssrcss-wpgbih-StyledFigure e34k3c23'})
imgs_lst = []
for img in imgs:
try:
if "blank_white_space" not in img.img['src']:
imgs_lst.append(img.img['src'])#['div']['span']['span']['img'])
except:
pass
return imgs_lst
def get_date(self) -> str:
date = self.soup.find_all('time')[0]
return date['datetime']
parsed = BBC("https://www.bbc.co.uk/news/world-europe-49345912")
#print(parsed.title)
#print(parsed.link)
#print(parsed.author)
#print(parsed.date)
#print(parsed.title)
#print(parsed.body)
#print(parsed.images)
#print(parsed.body)
| 3.03125 | 3 |
SIR_model-Copy.Caroline.1.py | Caroline-Odevall/final-project-team-18 | 0 | 3882 | # In[42]:
from scipy.integrate import odeint
import numpy as np
import matplotlib.pyplot as plt
# In[43]:
# describe the model
def deriv(y, t, N, beta, gamma, delta):
S, E, I, R = y
dSdt = -beta * S * I / N # S(t) – susceptible (de som är mottagliga för infektion).
dEdt = beta * S * I / N - gamma * E
dIdt = delta * E - gamma * I # I(t) – infected (de som har pågående infektion)
dRdt = gamma * I
return dSdt, dEdt, dIdt, dRdt
# In[44]:
# describe the parameters
N = 2283 #Totala befolkningen N=s(t)+I(t)+R(t)
D = 4.0 #infections last four days
gamma = 1.0 / D #Reoval rate (Hur många som tillfrisknar)
delta = 1.0 / 5.0 #incubation period of five days
R_0 = 2.5 #Reproduktionstalet
beta = R_0 * gamma #r_0=beta/gamma. antal som smittas per infekterad och per tid (beror på virusets egenskaper samt hur vi beter oss).
S0, E0, I0, R0 = N-1, 1, 0, 0 # initial conditions: one infected, rest susceptible
#Rt = R0 * S(t)/Ntot* (1 – b). b = effekt av policy och beteendeförändringar
# In[45]:
t = np.linspace(0, 99, 100) # Grid of time points (in days)
y0 = S0, E0, I0, R0 # Initial conditions vector
# Integrate the SIR equations over the time grid, t.
ret = odeint(deriv, y0, t, args=(N, beta, gamma, delta))
S, E, I, R = ret.T
# In[46]:
def plotsir(t, S, E, I, R):
f, ax = plt.subplots(1,1,figsize=(10,4))
ax.plot(t, S, 'b', alpha=0.7, linewidth=2, label='Susceptible')
ax.plot(t, E, 'y', alpha=0.7, linewidth=2, label='Exposed')
ax.plot(t, I, 'r', alpha=0.7, linewidth=2, label='Infected')
ax.plot(t, R, 'g', alpha=0.7, linewidth=2, label='Recovered')
ax.set_xlabel('Time (days)')
ax.yaxis.set_tick_params(length=0)
ax.xaxis.set_tick_params(length=0)
ax.grid(b=True, which='major', c='w', lw=2, ls='-')
legend = ax.legend()
legend.get_frame().set_alpha(0.5)
for spine in ('top', 'right', 'bottom', 'left'):
ax.spines[spine].set_visible(False)
plt.savefig('Plot.png')
plt.show();
# plot the graph
# In[47]:
plotsir(t, S, E, I, R)
# In[ ]:
| 2.84375 | 3 |
Condicionales anidados.py | gcardosov/PythonAprendeOrg | 1 | 3883 | pregunta = input('trabajas desde casa? ')
if pregunta == True:
print 'Eres afortunado'
if pregunta == False:
print 'Trabajas fuera de casa'
tiempo = input('Cuantos minutos haces al trabajo: ')
if tiempo == 0:
print 'trabajas desde casa'
elif tiempo <=20:
print 'Es poco tiempo'
elif tiempo >= 21 and tiempo <=45:
print 'Es un tiempo razonable'
else:
print 'Busca otras rutas'
| 3.921875 | 4 |
app/middleware/cache_headers.py | Niclnx/service-stac | 9 | 3884 | <gh_stars>1-10
import logging
import re
from urllib.parse import urlparse
from django.conf import settings
from django.utils.cache import add_never_cache_headers
from django.utils.cache import patch_cache_control
from django.utils.cache import patch_response_headers
logger = logging.getLogger(__name__)
STAC_BASE = settings.STAC_BASE
STAC_BASE_V = settings.STAC_BASE_V
class CacheHeadersMiddleware:
'''Middleware that adds appropriate cache headers to GET and HEAD methods.
NOTE: /checker, /get-token, /metrics and /{healthcheck} endpoints are marked as never cache.
'''
def __init__(self, get_response):
self.get_response = get_response
def __call__(self, request):
# Code to be executed for each request before
# the view (and later middleware) are called.
response = self.get_response(request)
# Code to be executed for each request/response after
# the view is called.
# match /xxx or /api/stac/xxx
# f.ex. /metrics, /checker, /api/stac/{healthcheck}, /api/stac/get-token
if re.match(fr'^(/{STAC_BASE})?/\w+$', request.path):
add_never_cache_headers(response)
elif (
request.method in ('GET', 'HEAD') and
not request.path.startswith(urlparse(settings.STATIC_URL).path)
):
logger.debug(
"Patching cache headers for request %s %s",
request.method,
request.path,
extra={"request": request}
)
patch_response_headers(response, settings.CACHE_MIDDLEWARE_SECONDS)
patch_cache_control(response, public=True)
return response
| 2.1875 | 2 |
mmdet/models/detectors/knowledge_distilling/kd_single_stage.py | anorthman/mmdetection | 5 | 3885 | <filename>mmdet/models/detectors/knowledge_distilling/kd_single_stage.py
# author huangchuanhong
import torch
from mmcv.runner import load_checkpoint
from ..base import BaseDetector
from ..single_stage import SingleStageDetector
from ...registry import DETECTORS
from ...builder import build_detector
@DETECTORS.register_module
class KDSingleStageDetector(SingleStageDetector):
def __init__(self,
backbone,
teacher,
neck=None,
bbox_head=None,
train_cfg=None,
test_cfg=None,
pretrained=None):
super(KDSingleStageDetector, self).__init__(backbone,
neck=neck,
bbox_head=bbox_head,
train_cfg=train_cfg,
test_cfg=test_cfg,
pretrained=pretrained)
self.teacher_detector = build_detector(teacher.model, train_cfg=None, test_cfg=test_cfg)
load_checkpoint(self.teacher_detector, teacher.checkpoint)
self.teacher_detector.eval()
self.beta = train_cfg.teacher.beta
def forward_train(self,
img,
img_metas,
gt_bboxes,
gt_labels,
gt_bboxes_ignore=None,
beta=1000.):
feats = ()
backbone_feats = self.backbone(img)
if self.train_cfg.teacher.backbone_at:
for i in self.train_cfg.teacher.backbone_at_idxes:
feats += (backbone_feats[i],)
if self.with_neck:
neck_feats = self.neck(backbone_feats)
if self.train_cfg.teacher.neck_at:
feats += neck_feats
outs = self.bbox_head(neck_feats)
else:
outs = self.bbox_head(backbone_feats)
with torch.no_grad():
t_feats = ()
t_backbone_feats = self.teacher_detector.backbone(img)
if self.train_cfg.teacher.backbone_at:
for i in self.train_cfg.teacher.backbone_at_idxes:
t_feats += (t_backbone_feats[i],)
if self.with_neck:
t_neck_feats = self.teacher_detector.neck(t_backbone_feats)
if self.train_cfg.teacher.neck_at:
t_feats += t_neck_feats
t_outs = self.teacher_detector.bbox_head(t_neck_feats)
else:
t_outs = self.teacher_detector.bbox_head(t_backbone_feats)
loss_inputs = (feats,) + outs + (t_feats,) + t_outs + (gt_bboxes, gt_labels, img_metas, self.train_cfg)
losses = self.bbox_head.loss(
*loss_inputs, gt_bboxes_ignore=gt_bboxes_ignore)
return losses
| 1.75 | 2 |
metaworld/envs/mujoco/sawyer_xyz/v2/sawyer_dial_turn_v2.py | yiwc/robotics-world | 681 | 3886 | <gh_stars>100-1000
import numpy as np
from gym.spaces import Box
from metaworld.envs import reward_utils
from metaworld.envs.asset_path_utils import full_v2_path_for
from metaworld.envs.mujoco.sawyer_xyz.sawyer_xyz_env import SawyerXYZEnv, _assert_task_is_set
class SawyerDialTurnEnvV2(SawyerXYZEnv):
TARGET_RADIUS = 0.07
def __init__(self):
hand_low = (-0.5, 0.40, 0.05)
hand_high = (0.5, 1, 0.5)
obj_low = (-0.1, 0.7, 0.0)
obj_high = (0.1, 0.8, 0.0)
goal_low = (-0.1, 0.73, 0.0299)
goal_high = (0.1, 0.83, 0.0301)
super().__init__(
self.model_name,
hand_low=hand_low,
hand_high=hand_high,
)
self.init_config = {
'obj_init_pos': np.array([0, 0.7, 0.0]),
'hand_init_pos': np.array([0, 0.6, 0.2], dtype=np.float32),
}
self.goal = np.array([0., 0.73, 0.08])
self.obj_init_pos = self.init_config['obj_init_pos']
self.hand_init_pos = self.init_config['hand_init_pos']
self._random_reset_space = Box(
np.array(obj_low),
np.array(obj_high),
)
self.goal_space = Box(np.array(goal_low), np.array(goal_high))
@property
def model_name(self):
return full_v2_path_for('sawyer_xyz/sawyer_dial.xml')
@_assert_task_is_set
def evaluate_state(self, obs, action):
(reward,
tcp_to_obj,
_,
target_to_obj,
object_grasped,
in_place) = self.compute_reward(action, obs)
info = {
'success': float(target_to_obj <= self.TARGET_RADIUS),
'near_object': float(tcp_to_obj <= 0.01),
'grasp_success': 1.,
'grasp_reward': object_grasped,
'in_place_reward': in_place,
'obj_to_target': target_to_obj,
'unscaled_reward': reward,
}
return reward, info
def _get_pos_objects(self):
dial_center = self.get_body_com('dial').copy()
dial_angle_rad = self.data.get_joint_qpos('knob_Joint_1')
offset = np.array([
np.sin(dial_angle_rad),
-np.cos(dial_angle_rad),
0
])
dial_radius = 0.05
offset *= dial_radius
return dial_center + offset
def _get_quat_objects(self):
return self.sim.data.get_body_xquat('dial')
def reset_model(self):
self._reset_hand()
self._target_pos = self.goal.copy()
self.obj_init_pos = self.init_config['obj_init_pos']
self.prev_obs = self._get_curr_obs_combined_no_goal()
if self.random_init:
goal_pos = self._get_state_rand_vec()
self.obj_init_pos = goal_pos[:3]
final_pos = goal_pos.copy() + np.array([0, 0.03, 0.03])
self._target_pos = final_pos
self.sim.model.body_pos[self.model.body_name2id('dial')] = self.obj_init_pos
self.dial_push_position = self._get_pos_objects() + np.array([0.05, 0.02, 0.09])
return self._get_obs()
def compute_reward(self, action, obs):
obj = self._get_pos_objects()
dial_push_position = self._get_pos_objects() + np.array([0.05, 0.02, 0.09])
tcp = self.tcp_center
target = self._target_pos.copy()
target_to_obj = (obj - target)
target_to_obj = np.linalg.norm(target_to_obj)
target_to_obj_init = (self.dial_push_position - target)
target_to_obj_init = np.linalg.norm(target_to_obj_init)
in_place = reward_utils.tolerance(
target_to_obj,
bounds=(0, self.TARGET_RADIUS),
margin=abs(target_to_obj_init - self.TARGET_RADIUS),
sigmoid='long_tail',
)
dial_reach_radius = 0.005
tcp_to_obj = np.linalg.norm(dial_push_position - tcp)
tcp_to_obj_init = np.linalg.norm(self.dial_push_position - self.init_tcp)
reach = reward_utils.tolerance(
tcp_to_obj,
bounds=(0, dial_reach_radius),
margin=abs(tcp_to_obj_init-dial_reach_radius),
sigmoid='gaussian',
)
gripper_closed = min(max(0, action[-1]), 1)
reach = reward_utils.hamacher_product(reach, gripper_closed)
tcp_opened = 0
object_grasped = reach
reward = 10 * reward_utils.hamacher_product(reach, in_place)
return (reward,
tcp_to_obj,
tcp_opened,
target_to_obj,
object_grasped,
in_place)
| 1.96875 | 2 |
yezdi/parser/parser.py | ragsagar/yezdi | 1 | 3887 | from yezdi.lexer.token import TokenType
from yezdi.parser.ast import Program, Statement, Participant, Title, LineStatement
class Parser:
def __init__(self, lexer):
self.lexer = lexer
self.current_token = None
self.peek_token = None
self.next_token()
self.next_token()
self.participants = {}
def next_token(self):
self.current_token, self.peek_token = self.peek_token, self.lexer.next_token()
def parse_program(self):
program = Program()
while self.current_token.type != TokenType.EOF:
statement = self.parse_statement()
if statement:
program.statements.append(statement)
self.next_token()
return program
def parse_statement(self):
if self.current_token.type == TokenType.IDENTIFIER:
return self.parse_line_statement()
elif self.current_token.type == TokenType.TITLE:
return self.parse_title()
return None
def parse_line_statement(self):
participant_literal = self.current_token.literal
if not self.peek_token.type in [TokenType.SOLID_LINE, TokenType.DASHED_LINE]:
return None
self.next_token()
participant = Participant(participant_literal)
line = LineStatement(self.current_token.type)
line.set_source(participant)
if not self.expect_peek(TokenType.IDENTIFIER):
return None
target = Participant(self.current_token.literal)
line.set_target(target)
if not self.expect_peek(TokenType.COLON):
return None
if self.expect_peek(TokenType.IDENTIFIER):
line.set_info(self.current_token.literal)
if self.peek_token.type not in [TokenType.NEWLINE, TokenType.EOF]:
return None
statement = Statement(line)
return statement
def get_participant(self, value):
if value in self.participants:
return self.participants[value]
else:
participant = Participant(value)
self.participants[value] = participant
return participant
def expect_peek(self, token_type):
if self.peek_token.type == token_type:
self.next_token()
return True
else:
return False
def parse_title(self):
if not self.expect_peek(TokenType.IDENTIFIER):
return None
title = Title(self.current_token.literal)
return Statement(title)
class ParserError(Exception):
pass
| 2.515625 | 3 |
scripts/check_categories.py | oberron/entolusis | 0 | 3888 | # list categories in category folder
from os import walk
from os.path import abspath,join, pardir
categories_folder = abspath(join(__file__,pardir,pardir,"category"))
post_folder = abspath(join(__file__,pardir,pardir,"_posts"))
site_categories = []
for root,directories,files in walk(categories_folder):
for f in files:
site_categories.append(f.split(".md")[0])
site_categories = set(site_categories)
for root,directories,files in walk(post_folder):
for f in files:
with open(join(root,f),'r',encoding="utf-8") as fi:
lines = fi.readlines()
for l in lines:
if l.find("categories")==0:
categories = l.split(":")[1]
for c in [" ","[","]","\n"]:
categories = categories.replace(c,"")
categories=categories.split(",")
if len(set(categories)-site_categories)>0:
print(f,set(categories)-site_categories)
break
print("done") | 2.96875 | 3 |
docsrc/makedoc.py | syoyo/soloud | 1 | 3889 | <reponame>syoyo/soloud<filename>docsrc/makedoc.py
#!/usr/bin/env python3
""" builds documentation files from multimarkdown (mmd) source
to various formats, including the web site and pdf.
"""
import subprocess
import glob
import os
import sys
import time
import shutil
src = [
"intro.mmd",
"downloads.mmd",
"quickstart.mmd",
"faq.mmd",
"dirstruct.mmd",
"premake.mmd",
"legal.mmd",
"concepts.mmd",
"concepts3d.mmd",
"voicemanagement.mmd",
"examples.mmd",
"foreign_interface.mmd",
"c_api.mmd",
"python_api.mmd",
"ruby_api.mmd",
"rpgmaker_api.mmd",
"bmx_api.mmd",
"gamemaker_api.mmd",
"cs_api.mmd",
"d_api.mmd",
"codegen.mmd",
"basics.mmd",
"attributes.mmd",
"faders.mmd",
"voicegroups.mmd",
"coremisc.mmd",
"core3d.mmd",
"audiosource.mmd",
"newsoundsources.mmd",
"wav.mmd",
"wavstream.mmd",
"speech.mmd",
"sfxr.mmd",
"modplug.mmd",
"monotone.mmd",
"tedsid.mmd",
"vizsn.mmd",
"vic.mmd",
"filters.mmd",
"biquadfilter.mmd",
"echofilter.mmd",
"lofifilter.mmd",
"flangerfilter.mmd",
"dcremovalfilter.mmd",
"fftfilter.mmd",
"bassboostfilter.mmd",
"waveshaperfilter.mmd",
"mixbus.mmd",
"queue.mmd",
"collider.mmd",
"attenuator.mmd",
"file.mmd",
"backends.mmd"
]
website_only = [
"downloads.mmd"
]
unknown = 0
for file in glob.glob("*.mmd"):
if file not in src:
unknown = 1
print(file + " not included in docs!")
if unknown:
print("Add the new files to makedoc.py, soloud.tex and htmlpre.txt.")
sys.exit()
datestring = time.strftime("%Y%m%d")
if not os.path.exists(datestring + "/web"):
os.makedirs(datestring + "/web")
if not os.path.exists("temp/"):
os.makedirs("temp/")
print("- -- --- -- - Generating single-file HTML docs")
callp = ["pandoc", "-s", "-t", "html5", "-f", "markdown-smart", "--metadata", 'title="SoLoud ' + datestring + '"', "-H", "singlehtml_head.txt", "-B", "singlehtml_body.txt", "--toc", "--self-contained", "--default-image-extension=png", "-o", datestring + "/soloud_" + datestring + ".html"]
for x in src:
if x not in website_only:
callp.append(x)
subprocess.call(callp)
print("- -- --- -- - Generating web site")
for x in src:
subprocess.call(["pandoc", "--template=html.pandoc", "-f", "markdown-smart", "--metadata", 'title="SoLoud ' + datestring + ' ' + x[:len(x)-4] + '"', "-B", "htmlpre.txt", "-A", "htmlpost.txt", "--default-image-extension=png", x, "-o", datestring + "/web/" + x[:len(x)-3]+"html.bak"])
with open(datestring + "/web/" + x[:len(x)-3]+"html", "w") as file_out:
with open(datestring + "/web/" + x[:len(x)-3]+"html.bak", "r") as file_in:
for line in file_in:
file_out.write(line.replace('code>', 'code>\n').replace('::','::<wbr>').replace('\xc2','').replace('\xa0',''))
if x == "intro.mmd":
if os.path.isfile(datestring + "/web/index.html"):
os.remove(datestring + "/web/index.html")
os.rename(datestring + "/web/intro.html", datestring + "/web/index.html")
print("- -- --- -- - Generating epub")
callp = ["pandoc", "-N", "--toc", "--epub-cover-image=images/cover.png", "-t", "epub3", "--default-image-extension=png", "-f", "markdown-smart", "--css=epub.css", "--epub-metadata=metadata.xml", "-o", datestring + "/soloud_" + datestring + ".epub", "title.txt"]
for x in src:
if x not in website_only:
callp.append(x)
subprocess.call(callp)
print("- -- --- -- - Converting epub -> mobi (kindlegen_output.txt)")
with open('kindlegen_output.txt', 'w') as outfile:
subprocess.call(["kindlegen", datestring + "/soloud_" + datestring + ".epub", "-c2"], stdout=outfile)
print("- -- --- -- - Generating LaTex")
for x in src:
if x not in website_only:
subprocess.call(["pandoc", "-t", "latex", "--listings", "--default-image-extension=pdf", "--top-level-division=chapter", x, "-o", "temp/" + x[:len(x)-3]+"tex.orig"])
with open("temp/" + x[:len(x)-3]+"tex", "w") as file_out:
with open("temp/" + x[:len(x)-3]+"tex.orig", "r") as file_in:
for line in file_in:
file_out.write(line.replace('\\begin{longtable}[]{@{}ll@{}}', '\\begin{tabulary}{\\textwidth}{lJ}').replace('\\begin{longtable}[]{@{}lll@{}}', '\\begin{tabulary}{\\textwidth}{lJJ}').replace('\\begin{longtable}[]{@{}llll@{}}', '\\begin{tabulary}{\\textwidth}{lJJJ}').replace('\\endhead','').replace('\\end{longtable}','\\end{tabulary}'))
print("- -- --- -- - Generating pdf (xelatex_output.txt)")
with open('xelatex_output.txt', 'w') as outfile:
subprocess.call(["xelatex", "SoLoud.tex"], stdout=outfile)
print("- -- --- -- - Generating pdf pass 2..")
subprocess.call(["xelatex", "SoLoud.tex"], stdout=outfile)
shutil.move("SoLoud.pdf", datestring + "/soloud_" + datestring + ".pdf")
print("- -- --- -- - Cleanup..")
tempsuffix = ["aux", "toc", "out", "log", "lg", "4ct", "4tc", "idv", "tmp", "xdv", "xref", "bak"]
for suffix in tempsuffix:
for file in glob.glob("*."+suffix):
os.remove(file)
for file in glob.glob(datestring + "/web/*."+suffix):
os.remove(file)
for file in glob.glob("temp/*"):
os.remove(file)
os.rmdir("temp")
print("- -- --- -- - Done - " + datestring)
| 1.976563 | 2 |
arch/api/base/utils/party.py | yzjba/FATE | 32 | 3890 | <filename>arch/api/base/utils/party.py
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
class Party(object):
"""
Uniquely identify
"""
def __init__(self, role, party_id):
self.role = role
self.party_id = party_id
def __hash__(self):
return (self.role, self.party_id).__hash__()
def __str__(self):
return f"Party(role={self.role}, party_id={self.party_id})"
def __repr__(self):
return self.__str__()
def __lt__(self, other):
return (self.role, self.party_id) < (other.role, other.party_id)
def __eq__(self, other):
return self.party_id == other.party_id and self.role == other.role
def to_pb(self):
from arch.api.proto import federation_pb2
return federation_pb2.Party(partyId=f"{self.party_id}", name=self.role)
| 2.21875 | 2 |
src/sentry/options/defaults.py | faulkner/sentry | 0 | 3891 | """
sentry.options.defaults
~~~~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2010-2014 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import, print_function
from sentry.logging import LoggingFormat
from sentry.options import (
FLAG_IMMUTABLE, FLAG_NOSTORE, FLAG_PRIORITIZE_DISK, FLAG_REQUIRED, FLAG_ALLOW_EMPTY,
register,
)
from sentry.utils.types import Dict, String, Sequence
# Cache
# register('cache.backend', flags=FLAG_NOSTORE)
# register('cache.options', type=Dict, flags=FLAG_NOSTORE)
# System
register('system.admin-email', flags=FLAG_REQUIRED)
register('system.support-email', flags=FLAG_ALLOW_EMPTY | FLAG_PRIORITIZE_DISK)
register('system.security-email', flags=FLAG_ALLOW_EMPTY | FLAG_PRIORITIZE_DISK)
register('system.databases', type=Dict, flags=FLAG_NOSTORE)
# register('system.debug', default=False, flags=FLAG_NOSTORE)
register('system.rate-limit', default=0, flags=FLAG_ALLOW_EMPTY | FLAG_PRIORITIZE_DISK)
register('system.secret-key', flags=FLAG_NOSTORE)
# Absolute URL to the sentry root directory. Should not include a trailing slash.
register('system.url-prefix', ttl=60, grace=3600, flags=FLAG_REQUIRED | FLAG_PRIORITIZE_DISK)
register('system.root-api-key', flags=FLAG_PRIORITIZE_DISK)
register('system.logging-format', default=LoggingFormat.HUMAN, flags=FLAG_NOSTORE)
# Redis
register(
'redis.clusters',
type=Dict,
default={
'default': {
'hosts': {
0: {
'host': '127.0.0.1',
'port': 6379,
}
},
},
},
flags=FLAG_NOSTORE | FLAG_IMMUTABLE
)
register('redis.options', type=Dict, flags=FLAG_NOSTORE)
# symbolizer specifics
register('dsym.cache-path', type=String, default='/tmp/sentry-dsym-cache')
# Mail
register('mail.backend', default='smtp', flags=FLAG_NOSTORE)
register('mail.host', default='localhost', flags=FLAG_REQUIRED | FLAG_PRIORITIZE_DISK)
register('mail.port', default=25, flags=FLAG_REQUIRED | FLAG_PRIORITIZE_DISK)
register('mail.username', flags=FLAG_REQUIRED | FLAG_ALLOW_EMPTY | FLAG_PRIORITIZE_DISK)
register('mail.password', flags=FLAG_REQUIRED | FLAG_ALLOW_EMPTY | FLAG_PRIORITIZE_DISK)
register('mail.use-tls', default=False, flags=FLAG_REQUIRED | FLAG_PRIORITIZE_DISK)
register('mail.subject-prefix', default='[Sentry] ', flags=FLAG_PRIORITIZE_DISK)
register('mail.from', default='root@localhost', flags=FLAG_REQUIRED | FLAG_PRIORITIZE_DISK)
register('mail.list-namespace', type=String, default='localhost', flags=FLAG_NOSTORE)
register('mail.enable-replies', default=False, flags=FLAG_PRIORITIZE_DISK)
register('mail.reply-hostname', default='', flags=FLAG_ALLOW_EMPTY | FLAG_PRIORITIZE_DISK)
register('mail.mailgun-api-key', default='', flags=FLAG_ALLOW_EMPTY | FLAG_PRIORITIZE_DISK)
# SMS
register('sms.twilio-account', default='', flags=FLAG_ALLOW_EMPTY | FLAG_PRIORITIZE_DISK)
register('sms.twilio-token', default='', flags=FLAG_ALLOW_EMPTY | FLAG_PRIORITIZE_DISK)
register('sms.twilio-number', default='', flags=FLAG_ALLOW_EMPTY | FLAG_PRIORITIZE_DISK)
# U2F
register('u2f.app-id', default='', flags=FLAG_ALLOW_EMPTY | FLAG_PRIORITIZE_DISK)
register('u2f.facets', default=(), type=Sequence,
flags=FLAG_ALLOW_EMPTY | FLAG_PRIORITIZE_DISK)
register('auth.ip-rate-limit', default=0, flags=FLAG_ALLOW_EMPTY | FLAG_PRIORITIZE_DISK)
register('auth.user-rate-limit', default=0, flags=FLAG_ALLOW_EMPTY | FLAG_PRIORITIZE_DISK)
register('api.rate-limit.org-create', default=5, flags=FLAG_ALLOW_EMPTY | FLAG_PRIORITIZE_DISK)
# Filestore
register('filestore.backend', default='filesystem', flags=FLAG_NOSTORE)
register('filestore.options', default={'location': '/tmp/sentry-files'}, flags=FLAG_NOSTORE)
| 1.59375 | 2 |
tools/unidatadownload.py | henryiii/backrefs | 0 | 3892 | """Download `Unicodedata` files."""
from __future__ import unicode_literals
import os
import zipfile
import codecs
from urllib.request import urlopen
__version__ = '2.2.0'
HOME = os.path.dirname(os.path.abspath(__file__))
def zip_unicode(output, version):
"""Zip the Unicode files."""
zipper = zipfile.ZipFile(os.path.join(output, 'unicodedata', '%s.zip' % version), 'w', zipfile.ZIP_DEFLATED)
target = os.path.join(output, 'unicodedata', version)
print('Zipping %s.zip...' % version)
for root, dirs, files in os.walk(target):
for file in files:
if file.endswith('.txt'):
zipper.write(os.path.join(root, file), arcname=file)
def unzip_unicode(output, version):
"""Unzip the Unicode files."""
unzipper = zipfile.ZipFile(os.path.join(output, 'unicodedata', '%s.zip' % version))
target = os.path.join(output, 'unicodedata', version)
print('Unzipping %s.zip...' % version)
os.makedirs(target)
for f in unzipper.namelist():
# Do I need backslash on windows? Or is it forward as well?
unzipper.extract(f, target)
def download_unicodedata(version, output=HOME, no_zip=False):
"""Download Unicode data scripts and blocks."""
ver = tuple([int(x) for x in version.split('.')])
files = [
'UnicodeData.txt',
'Scripts.txt',
'Blocks.txt',
'PropList.txt',
'DerivedCoreProperties.txt',
'DerivedNormalizationProps.txt',
'CompositionExclusions.txt',
'PropertyValueAliases.txt',
'PropertyAliases.txt',
'EastAsianWidth.txt',
'LineBreak.txt',
'HangulSyllableType.txt',
'DerivedAge.txt',
'auxiliary/WordBreakProperty.txt',
'auxiliary/SentenceBreakProperty.txt',
'auxiliary/GraphemeBreakProperty.txt',
'extracted/DerivedDecompositionType.txt',
'extracted/DerivedNumericType.txt',
'extracted/DerivedNumericValues.txt',
'extracted/DerivedJoiningType.txt',
'extracted/DerivedJoiningGroup.txt',
'extracted/DerivedCombiningClass.txt',
'emoji/emoji-data.txt'
]
files.append('ScriptExtensions.txt')
files.append('IndicPositionalCategory.txt')
files.append('IndicSyllabicCategory.txt')
files.append('BidiBrackets.txt')
if ver >= (11, 0, 0):
files.append('VerticalOrientation.txt')
http_url = 'http://www.unicode.org/Public/%s/ucd/' % version
ftp_url = 'ftp://ftp.unicode.org/Public/%s/ucd/' % version
destination = os.path.join(output, 'unicodedata', version)
if not os.path.exists(destination):
os.makedirs(destination)
zip_data = not no_zip
for f in files:
file_location = os.path.join(destination, os.path.basename(f))
retrieved = False
if not os.path.exists(file_location):
for url in (ftp_url, http_url):
furl = url + f
try:
print('Downloading: %s --> %s' % (furl, file_location))
response = urlopen(furl, timeout=30)
data = response.read()
except Exception:
print('Failed: %s' % url)
continue
with codecs.open(file_location, 'w', encoding='utf-8') as uf:
uf.write(data.decode('utf-8'))
retrieved = True
break
if not retrieved:
print('Failed to acquire all needed Unicode files!')
break
else:
retrieved = True
print('Skipping: found %s' % file_location)
if not retrieved:
zip_data = False
break
if zip_data and not os.path.exists(os.path.join(output, 'unicodedata', '%s.zip' % version)):
zip_unicode(output, version)
def get_unicodedata(version, output=HOME, no_zip=False):
"""Ensure we have Unicode data to generate Unicode tables."""
target = os.path.join(output, 'unicodedata', version)
zip_target = os.path.join(output, 'unicodedata', '%s.zip' % version)
if not os.path.exists(target) and os.path.exists(zip_target):
unzip_unicode(output, version)
# Download missing files if any. Zip if required.
download_unicodedata(version, output, no_zip)
if __name__ == '__main__':
import argparse
import unicodedata
parser = argparse.ArgumentParser(prog='unidatadownload', description='Generate a unicode property table.')
parser.add_argument('--version', action='version', version="%(prog)s " + __version__)
parser.add_argument('--output', default=HOME, help='Output file.')
parser.add_argument('--unicode-version', default=None, help='Force a specific Unicode version.')
args = parser.parse_args()
if args.unicode_version is None:
version = unicodedata.unidata_version
else:
version = args.unicode_version
get_unicodedata(version, output=args.output)
| 3.375 | 3 |
generator/cache/cache.py | biarmic/OpenCache | 5 | 3893 | # See LICENSE for licensing information.
#
# Copyright (c) 2021 Regents of the University of California and The Board
# of Regents for the Oklahoma Agricultural and Mechanical College
# (acting for and on behalf of Oklahoma State University)
# All rights reserved.
#
import debug
import datetime
from policy import associativity
from globals import OPTS, print_time
class cache:
"""
This is not a design module, but contains a cache design instance.
"""
def __init__(self, cache_config, name):
cache_config.set_local_config(self)
self.name = name
# Import the design module of the cache
if OPTS.associativity == associativity.DIRECT:
from direct_cache import direct_cache as cache
elif OPTS.associativity == associativity.N_WAY:
from n_way_cache import n_way_cache as cache
elif OPTS.associativity == associativity.FULLY:
# TODO: from full_cache import full_cache as cache
debug.error("Fully associative cache is not supported at the moment.", -1)
else:
debug.error("Invalid associativity.", -1)
self.c = cache(cache_config, name)
def config_write(self, paths):
""" Save the config files. """
self.c.config_write(paths)
def verilog_write(self, path):
""" Save the Verilog file. """
self.c.verilog_write(path)
def save(self):
""" Save all the output files. """
debug.print_raw("Saving output files...")
# Write the config files
start_time = datetime.datetime.now()
cpaths = {
"data": OPTS.output_path + OPTS.data_array_name + "_config.py",
"tag": OPTS.output_path + OPTS.tag_array_name + "_config.py",
"use": OPTS.output_path + OPTS.use_array_name + "_config.py"
}
if not OPTS.replacement_policy.has_sram_array(): del cpaths["use"]
for k, cpath in cpaths.items():
debug.print_raw("Config: Writing to {}".format(cpath))
self.config_write(cpaths)
print_time("Config", datetime.datetime.now(), start_time)
# Write the Verilog file
start_time = datetime.datetime.now()
vpath = OPTS.output_path + self.c.name + ".v"
debug.print_raw("Verilog: Writing to {}".format(vpath))
self.verilog_write(vpath)
print_time("Verilog", datetime.datetime.now(), start_time) | 2.265625 | 2 |
utils/data_loader.py | elieser1101/loglizer | 0 | 3894 | <gh_stars>0
#!/usr/bin/env python
# -*- coding: utf-8 -*-
__author__ = '<NAME>'
import pandas as pd
import os
import numpy as np
def hdfs_data_loader(para):
""" load the log sequence matrix and labels from the file path.
Args:
--------
para: the parameters dictionary
Returns:
--------
raw_data: log sequences matrix
label_data: labels matrix
"""
file_path = para['path'] + para['log_seq_file_name']
label_path = para['path'] + para['label_file_name']
# load log sequence matrix
pre_df = pd.read_csv(file_path, nrows=1, header=None, delimiter=r'\s+')
columns = pre_df.columns.tolist()
# remove the last column of block name
use_cols = columns[:-1]
data_df = pd.read_csv(file_path, delimiter=r'\s+', header=None, usecols =use_cols, dtype =int)
raw_data = data_df.as_matrix()
# load lables
label_df = pd.read_csv(label_path, delimiter=r'\s+', header=None, usecols = [0], dtype =int) # usecols must be a list
label_data = label_df.as_matrix()
print("The raw data shape is {} and label shape is {}".format(raw_data.shape, label_data.shape))
assert raw_data.shape[0] == label_data.shape[0]
print('The number of anomaly instances is %d' % sum(label_data))
return raw_data, label_data
def bgl_data_loader(para):
""" load the logs and the log_event_mapping from the file path.
Args:
--------
para: the parameters dictionary
Returns:
--------
raw_data: list of (label, time)
event_mapping_data: a list of event index, where each row index indicates a corresponding log
"""
file_path = para['path'] + para['log_file_name']
event_mapping_path = para['path'] + para['log_event_mapping']
# load data
data_df = pd.read_csv(file_path, delimiter=r'\s+', header=None, names = ['label','time'], usecols = para['select_column']) #, parse_dates = [1], date_parser=dateparse)
# convert to date time format
data_df['time'] = pd.to_datetime(data_df['time'], format="%Y-%m-%d-%H.%M.%S.%f")
# calculate the time interval since the start time
data_df['seconds_since'] = (data_df['time']-data_df['time'][0]).dt.total_seconds().astype(int)
# get the label for each log
data_df['label'] = (data_df['label'] != '-').astype(int)
raw_data = data_df[['label','seconds_since']].as_matrix()
# load the event mapping list
event_mapping = pd.read_csv(event_mapping_path, delimiter=r'\s+', header=None, usecols = [0], dtype =int)
event_mapping_data = event_mapping.as_matrix()
print("The raw data shape is {} and label shape is {}".format(raw_data.shape, event_mapping_data.shape))
assert raw_data.shape[0] == event_mapping_data.shape[0]
print('The number of anomaly logs is %d, but it requires further processing' % sum(raw_data[:, 0]))
return raw_data, event_mapping_data
def bgl_preprocess_data(para, raw_data, event_mapping_data):
""" split logs into sliding windows, built an event count matrix and get the corresponding label
Args:
--------
para: the parameters dictionary
raw_data: list of (label, time)
event_mapping_data: a list of event index, where each row index indicates a corresponding log
Returns:
--------
event_count_matrix: event count matrix, where each row is an instance (log sequence vector)
labels: a list of labels, 1 represents anomaly
"""
# create the directory for saving the sliding windows (start_index, end_index), which can be directly loaded in future running
if not os.path.exists(para['save_path']):
os.mkdir(para['save_path'])
log_size = raw_data.shape[0]
sliding_file_path = para['save_path']+'sliding_'+str(para['window_size'])+'h_'+str(para['step_size'])+'h.csv'
#=================divide into sliding windows=============#
start_end_index_list = [] # list of tuples, tuple contains two number, which represent the start and end of sliding time window
label_data, time_data = raw_data[:,0], raw_data[:, 1]
if not os.path.exists(sliding_file_path):
# split into sliding window
start_time = time_data[0]
start_index = 0
end_index = 0
# get the first start, end index, end time
for cur_time in time_data:
if cur_time < start_time + para['window_size']*3600:
end_index += 1
end_time = cur_time
else:
start_end_pair=tuple((start_index,end_index))
start_end_index_list.append(start_end_pair)
break
# move the start and end index until next sliding window
while end_index < log_size:
start_time = start_time + para['step_size']*3600
end_time = end_time + para['step_size']*3600
for i in range(start_index,end_index):
if time_data[i] < start_time:
i+=1
else:
break
for j in range(end_index, log_size):
if time_data[j] < end_time:
j+=1
else:
break
start_index = i
end_index = j
start_end_pair = tuple((start_index, end_index))
start_end_index_list.append(start_end_pair)
inst_number = len(start_end_index_list)
print('there are %d instances (sliding windows) in this dataset\n'%inst_number)
np.savetxt(sliding_file_path,start_end_index_list,delimiter=',',fmt='%d')
else:
print('Loading start_end_index_list from file')
start_end_index_list = pd.read_csv(sliding_file_path, header=None).as_matrix()
inst_number = len(start_end_index_list)
print('there are %d instances (sliding windows) in this dataset' % inst_number)
# get all the log indexes in each time window by ranging from start_index to end_index
expanded_indexes_list=[]
for t in range(inst_number):
index_list = []
expanded_indexes_list.append(index_list)
for i in range(inst_number):
start_index = start_end_index_list[i][0]
end_index = start_end_index_list[i][1]
for l in range(start_index, end_index):
expanded_indexes_list[i].append(l)
event_mapping_data = [row[0] for row in event_mapping_data]
event_num = len(list(set(event_mapping_data)))
print('There are %d log events'%event_num)
#=================get labels and event count of each sliding window =============#
labels = []
event_count_matrix = np.zeros((inst_number,event_num))
for j in range(inst_number):
label = 0 #0 represent success, 1 represent failure
for k in expanded_indexes_list[j]:
event_index = event_mapping_data[k]
event_count_matrix[j, event_index] += 1
if label_data[k]:
label = 1
continue
labels.append(label)
assert inst_number == len(labels)
print("Among all instances, %d are anomalies"%sum(labels))
assert event_count_matrix.shape[0] == len(labels)
return event_count_matrix, labels
def deepia_data_loader(para):
""" load the logs and the log_event_mapping from the file path.
Args:
--------
para: the parameters dictionary
Returns:
--------
raw_data: list of (label, time)
event_mapping_data: a list of event index, where each row index indicates a corresponding log
"""
file_path = para['path'] + para['log_file_name']
event_mapping_path = para['path'] + para['log_event_mapping']
# load data
data_df = pd.read_csv(file_path, delimiter=r'\s+', header=None, names=['month', 'day', 'hour'],
usecols=para['select_column']) # , parse_dates = [1], date_parser=dateparse)
# convert to date time format
data_df = data_df[['month', 'day', 'hour']].apply(lambda x: list(map(str, x)))
data_df['time'] = data_df[['month', 'day', 'hour']].apply(lambda x: '-'.join(x), axis=1) #
data_df['time'] = pd.to_datetime(data_df['time'], format="%b-%d-%H:%M:%S")
# calculate the time interval since the start time
data_df['seconds_since'] = (data_df['time'] - data_df['time'][0]).dt.total_seconds().astype(int)
# get the label for each log
# data_df['label'] = (data_df['label'] != '-').astype(int)
raw_data = data_df[['seconds_since']].as_matrix()
# load the event mapping list
event_mapping = pd.read_csv(event_mapping_path, delimiter=r'\s+', header=None, usecols = [0], dtype =int)
event_mapping_data = event_mapping.as_matrix()
print("The raw data shape is {} and label shape is {}".format(raw_data.shape, event_mapping_data.shape))
assert raw_data.shape[0] == event_mapping_data.shape[0]
#print('The number of anomaly logs is %d, but it requires further processing' % sum(raw_data[:, 0]))
return raw_data, event_mapping_data
def deepia_preprocess_data(para, raw_data, event_mapping_data):
""" split logs into sliding windows, built an event count matrix and get the corresponding label
Args:
--------
para: the parameters dictionary
raw_data: list of (label, time)
event_mapping_data: a list of event index, where each row index indicates a corresponding log
Returns:
--------
event_count_matrix: event count matrix, where each row is an instance (log sequence vector)
labels: a list of labels, 1 represents anomaly
"""
# create the directory for saving the sliding windows (start_index, end_index), which can be directly loaded in future running
if not os.path.exists(para['save_path']):
os.mkdir(para['save_path'])
log_size = raw_data.shape[0]
sliding_file_path = para['save_path']+'sliding_'+str(para['window_size'])+'h_'+str(para['step_size'])+'h.csv'
#=================divide into sliding windows=============#
start_end_index_list = [] # list of tuples, tuple contains two number, which represent the start and end of sliding time window
time_data = raw_data[:,0]
if not os.path.exists(sliding_file_path):
# split into sliding window
start_time = time_data[0]
start_index = 0
end_index = 0
# get the first start, end index, end time
for cur_time in time_data:
if cur_time < start_time + para['window_size']*3600:
end_index += 1
end_time = cur_time
else:
start_end_pair=tuple((start_index,end_index))
start_end_index_list.append(start_end_pair)
break
# move the start and end index until next sliding window
while end_index < log_size:
start_time = start_time + para['step_size']*3600
end_time = end_time + para['step_size']*3600
for i in range(start_index,end_index):
if time_data[i] < start_time:
i+=1
else:
break
for j in range(end_index, log_size):
if time_data[j] < end_time:
j+=1
else:
break
start_index = i
end_index = j
start_end_pair = tuple((start_index, end_index))
start_end_index_list.append(start_end_pair)
inst_number = len(start_end_index_list)
print('there are %d instances (sliding windows) in this dataset\n'%inst_number)
np.savetxt(sliding_file_path,start_end_index_list,delimiter=',',fmt='%d')
else:
print('Loading start_end_index_list from file')
start_end_index_list = pd.read_csv(sliding_file_path, header=None).as_matrix()
inst_number = len(start_end_index_list)
print('there are %d instances (sliding windows) in this dataset' % inst_number)
# get all the log indexes in each time window by ranging from start_index to end_index
expanded_indexes_list=[]
for t in range(inst_number):
index_list = []
expanded_indexes_list.append(index_list)
for i in range(inst_number):
start_index = start_end_index_list[i][0]
end_index = start_end_index_list[i][1]
for l in range(start_index, end_index):
expanded_indexes_list[i].append(l)
event_mapping_data = [row[0] for row in event_mapping_data]
event_num = len(list(set(event_mapping_data)))
print('There are %d log events'%event_num)
#=================get labels and event count of each sliding window =============#
event_count_matrix = np.zeros((inst_number,event_num))
for j in range(inst_number):
for k in expanded_indexes_list[j]:
event_index = event_mapping_data[k]
event_count_matrix[j, event_index] += 1
#print("Among all instances, %d are anomalies"%sum(labels))
return event_count_matrix
| 2.921875 | 3 |
aspx2url/aspx2url.py | marcocucinato/aspx2url | 0 | 3895 | <reponame>marcocucinato/aspx2url<filename>aspx2url/aspx2url.py
from __future__ import print_function
import re, sys, glob, getopt, os
def usage():
print('aspx2url v1.0')
print('Usage:')
print(sys.argv[0]+' -d -h filename(s)')
print('-d : Delete original file')
print('-h : This help')
def main():
try:
opts, args = getopt.getopt(sys.argv[1:], "hd")
except getopt.GetoptError as err:
print(str(err))
usage()
sys.exit(2)
deleteOriginal = False
for option,value in opts:
if option == '-h':
usage()
sys.exit()
elif option == '-d':
deleteOriginal = True
for origFilename in args:
with open(origFilename, "r") as f:
html_doc = f.read()
prog = re.compile('\<mso\:URL.*?\>(.*?),.*?\<\/mso\:URL\>', re.M)
result = prog.search(html_doc)
url = result.group(1);
filename = re.search('(.*?)\.aspx',origFilename).group(1)
fullFilename = filename+'.url'
with open(fullFilename, 'w') as out:
out.write('[InternetShortcut]\n')
out.write('URL='+url)
out.write('\n')
if deleteOriginal:
os.remove(origFilename)
if __name__ == '__main__':
main()
| 2.859375 | 3 |
pytype/tests/py2/test_stdlib.py | souravbadami/pytype | 1 | 3896 | <reponame>souravbadami/pytype
"""Tests of selected stdlib functions."""
from pytype.tests import test_base
class StdlibTests(test_base.TargetPython27FeatureTest):
"""Tests for files in typeshed/stdlib."""
def testPosix(self):
ty = self.Infer("""
import posix
x = posix.urandom(10)
""")
self.assertTypesMatchPytd(ty, """
posix = ... # type: module
x = ... # type: str
""")
def testXRange(self):
self.Check("""
import random
random.sample(xrange(10), 5)
""")
def testStringTypes(self):
ty = self.Infer("""
import types
if isinstance("", types.StringTypes):
x = 42
if isinstance(False, types.StringTypes):
y = 42
if isinstance(u"", types.StringTypes):
z = 42
""", deep=False)
self.assertTypesMatchPytd(ty, """
types = ... # type: module
x = ... # type: int
z = ... # type: int
""")
def testDefaultDict(self):
self.Check("""
import collections
import itertools
ids = collections.defaultdict(itertools.count(17).next)
""")
def testSysVersionInfoLt(self):
ty = self.Infer("""
import sys
if sys.version_info[0] < 3:
v = 42
else:
v = "hello world"
""")
self.assertTypesMatchPytd(ty, """
sys = ... # type: module
v = ... # type: int
""")
def testSysVersionInfoLe(self):
ty = self.Infer("""
import sys
if sys.version_info[0] <= 2:
v = 42
else:
v = "hello world"
""")
self.assertTypesMatchPytd(ty, """
sys = ... # type: module
v = ... # type: int
""")
def testSysVersionInfoEq(self):
ty = self.Infer("""
import sys
if sys.version_info[0] == 2:
v = 42
elif sys.version_info[0] == 3:
v = "hello world"
else:
v = None
""")
self.assertTypesMatchPytd(ty, """
sys = ... # type: module
v = ... # type: int
""")
def testSysVersionInfoGe(self):
ty = self.Infer("""
import sys
if sys.version_info[0] >= 3:
v = 42
else:
v = "hello world"
""")
self.assertTypesMatchPytd(ty, """
sys = ... # type: module
v = ... # type: str
""")
def testSysVersionInfoGt(self):
ty = self.Infer("""
import sys
if sys.version_info[0] > 2:
v = 42
else:
v = "hello world"
""")
self.assertTypesMatchPytd(ty, """
sys = ... # type: module
v = ... # type: str
""")
def testSysVersionInfoNamedAttribute(self):
ty = self.Infer("""
import sys
if sys.version_info.major == 2:
v = 42
else:
v = "hello world"
""")
self.assertTypesMatchPytd(ty, """
sys: module
v: int
""")
test_base.main(globals(), __name__ == "__main__")
| 2.421875 | 2 |
smmips/__init__.py | oicr-gsi/pysmmips | 0 | 3897 | <reponame>oicr-gsi/pysmmips
# -*- coding: utf-8 -*-
"""
Created on Tue Oct 20 16:04:52 2020
@author: rjovelin
"""
| 0.90625 | 1 |
tarentsocialwall/MongoDBClient.py | tarent/socialwall-backend | 0 | 3898 | <filename>tarentsocialwall/MongoDBClient.py
import random
from datetime import datetime
from passlib.handlers.sha2_crypt import sha256_crypt
from pymongo import MongoClient
from pymongo.errors import ConnectionFailure
from tarentsocialwall.SocialPost import SocialPost
from tarentsocialwall.User import User
from tarentsocialwall.Util import Util
class MongoDBClient:
__instance = None
@staticmethod
def getInstance():
""" Static access method. """
if MongoDBClient.__instance == None:
MongoDBClient()
client = None
db = None
random_social_post_list = None
reset_counter = None
def __init__(self, uri):
# connect to MongoDB, change the << MONGODB URL >> to reflect your own connection string
self.client = MongoClient(uri)
self.db = self.client.socialPosts
try:
# The ismaster command is cheap and does not require auth.
self.client.admin.command('ismaster')
except ConnectionFailure:
print("Server not available")
if MongoDBClient.__instance != None:
raise Exception("This class is a singleton!")
else:
MongoDBClient.__instance = self
self.update_all_socialposts()
# write social_post into mongo
def write_social_post(self, social_post: SocialPost):
existing_dict = None
try:
existing_dict = self.db.socialPosts.find_one({'externalId': social_post.externalId})
except Exception as ex:
print(ex)
existing_dict = None
if existing_dict is None:
self.db.socialPosts.insert_one(social_post.__dict__)
else:
update_identifier = {'externalId': social_post.externalId, 'source': social_post.source}
self.db.socialPosts.replace_one(update_identifier, social_post.__dict__)
return 0
# read random social_post from list
def get_random_social_post(self) -> SocialPost:
if len(self.random_social_post_list) == 0:
return None
else:
if self.reset_counter >= len(self.random_social_post_list):
# when we went through all posts once we reset counter and shuffle list
# so we dont repeat the same circle of posts every time
self.reset_counter = 0
random.shuffle(self.random_social_post_list)
post = self.random_social_post_list[self.reset_counter]
self.reset_counter = self.reset_counter + 1
print(post)
if post is None:
return None
social_post = SocialPost()
social_post.set_dictionary(post)
return social_post
# read custom social_post from mongo
def get_custom_social_post(self):
doc = list(self.db.socialPosts.aggregate([{'$match': {'source': 'custom post'}}]))
print(list(doc))
if doc is None:
return None
social_post_list = []
for post in doc:
custom_post_item = SocialPost()
custom_post_item.set_dictionary(post)
social_post_list.append(custom_post_item)
return social_post_list
def delete_post(self, external_id):
removed = self.db.socialPosts.delete_one({'externalId': external_id})
print(removed)
def write_access_token(self, access_token, source):
existing_dict = self.db.storeAccessToken.find_one({'source': access_token})
if existing_dict is None:
identifier = {'access_token': access_token, 'source': source}
self.db.storeAccessToken.insert_one(identifier)
else:
update_identifier = {'access_token': access_token, 'source': source}
self.db.storeAccessToken.replace_one(update_identifier, access_token)
return 0
def read_access_token(self, source):
existing_dict = self.db.storeAccessToken.find_one({'source': source})
return existing_dict
def get_google_calendar_posts(self):
timestamp_var = datetime.utcnow().timestamp()
doc = list(self.db.socialPosts.aggregate([
{'$match': {'validFrom': {'$lte': timestamp_var},
'validTo': {'$gte': timestamp_var},
'source': 'Google calendar'}},
{'$sort': {'start': 1}}
]))
if doc is None:
return None
social_post_list = []
for post in doc:
custom_post_item = SocialPost()
custom_post_item.set_dictionary(post)
social_post_list.append(custom_post_item)
return social_post_list
def get_users(self):
users_db = list(self.db.socialwall_users.find())
if users_db is None:
return None
users = []
for item in users_db:
if item['username'] is not 'admin':
user = User()
user.set_dictionary(item)
users.append(user)
return users
def read_user(self, username):
return self.db.socialwall_users.find_one({'username': username})
def write_user(self, user: User):
username_dict = self.db.socialwall_users.find_one({'username': user.username})
if username_dict is None:
self.db.socialwall_users.insert_one(user.__dict__)
else:
update_identifier = {'username': user.username}
self.db.socialwall_users.replace_one(update_identifier, user.__dict__)
return 0
def delete_user(self, user: User):
self.db.socialwall_users.delete_one({'username': user['username']})
def init_admin(self):
random_string = Util.randomString()
user = User()
user.username = 'admin'
user.password = <PASSWORD>(<PASSWORD>)
print("Admin password is '%s'" % random_string)
user.firstname = 'admin'
user.lastname = 'admin'
self.write_user(user)
#Get all valid social posts from db and shuffle them in random order
def update_all_socialposts(self):
timestamp = datetime.utcnow().timestamp()
self.random_social_post_list = list(self.db.socialPosts.aggregate(
[{'$match': {'validFrom': {'$lte': timestamp}, 'validTo': {'$gte': timestamp}}}]))
random.shuffle(self.random_social_post_list)
self.reset_counter = 0
| 2.5 | 2 |
src/mount_efs/__init__.py | Sodki/efs-utils | 0 | 3899 | #!/usr/bin/env python
#
# Copyright 2017-2018 Amazon.com, Inc. and its affiliates. All Rights Reserved.
#
# Licensed under the MIT License. See the LICENSE accompanying this file
# for the specific language governing permissions and limitations under
# the License.
#
#
# Copy this script to /sbin/mount.efs and make sure it is executable.
#
# You will be able to mount an EFS file system by its short name, by adding it
# to /etc/fstab. The syntax of an fstab entry is:
#
# [Device] [Mount Point] [File System Type] [Options] [Dump] [Pass]
#
# Add an entry like this:
#
# fs-deadbeef /mount_point efs _netdev 0 0
#
# Using the 'efs' type will cause '/sbin/mount.efs' to be called by 'mount -a'
# for this file system. The '_netdev' option tells the init system that the
# 'efs' type is a networked file system type. This has been tested with systemd
# (Amazon Linux 2, CentOS 7, RHEL 7, Debian 9, and Ubuntu 16.04), and upstart
# (Amazon Linux 2017.09).
#
# Once there is an entry in fstab, the file system can be mounted with:
#
# sudo mount /mount_point
#
# The script will add recommended mount options, if not provided in fstab.
import base64
import errno
import hashlib
import hmac
import json
import logging
import os
import pwd
import random
import re
import socket
import subprocess
import sys
import threading
import time
from contextlib import contextmanager
from datetime import datetime, timedelta
from logging.handlers import RotatingFileHandler
try:
import ConfigParser
from ConfigParser import NoOptionError, NoSectionError
except ImportError:
from configparser import ConfigParser, NoOptionError, NoSectionError
try:
from urllib.parse import quote_plus
except ImportError:
from urllib import quote_plus
try:
from urllib2 import URLError, HTTPError, build_opener, urlopen, Request, HTTPHandler
from urllib import urlencode
except ImportError:
from urllib.request import urlopen, Request
from urllib.error import URLError, HTTPError
from urllib.parse import urlencode
try:
import botocore.session
from botocore.exceptions import ClientError, NoCredentialsError, EndpointConnectionError
BOTOCORE_PRESENT = True
except ImportError:
BOTOCORE_PRESENT = False
VERSION = '1.28.2'
SERVICE = 'elasticfilesystem'
CONFIG_FILE = '/etc/amazon/efs/efs-utils.conf'
CONFIG_SECTION = 'mount'
CLIENT_INFO_SECTION = 'client-info'
CLIENT_SOURCE_STR_LEN_LIMIT = 100
CLOUDWATCH_LOG_SECTION = 'cloudwatch-log'
DEFAULT_CLOUDWATCH_LOG_GROUP = '/aws/efs/utils'
DEFAULT_RETENTION_DAYS = 14
# Cloudwatchlog agent dict includes cloudwatchlog botocore client, cloudwatchlog group name, cloudwatchlog stream name
CLOUDWATCHLOG_AGENT = None
LOG_DIR = '/var/log/amazon/efs'
LOG_FILE = 'mount.log'
STATE_FILE_DIR = '/var/run/efs'
PRIVATE_KEY_FILE = '/etc/amazon/efs/privateKey.pem'
DATE_ONLY_FORMAT = '%Y%m%d'
SIGV4_DATETIME_FORMAT = '%Y%m%dT%H%M%SZ'
CERT_DATETIME_FORMAT = '%y%m%d%H%M%SZ'
AWS_CREDENTIALS_FILE = os.path.expanduser(os.path.join('~' + pwd.getpwuid(os.getuid()).pw_name, '.aws', 'credentials'))
AWS_CONFIG_FILE = os.path.expanduser(os.path.join('~' + pwd.getpwuid(os.getuid()).pw_name, '.aws', 'config'))
CA_CONFIG_BODY = """dir = %s
RANDFILE = $dir/database/.rand
[ ca ]
default_ca = local_ca
[ local_ca ]
database = $dir/database/index.txt
serial = $dir/database/serial
private_key = %s
cert = $dir/certificate.pem
new_certs_dir = $dir/certs
default_md = sha256
preserve = no
policy = efsPolicy
x509_extensions = v3_ca
[ efsPolicy ]
CN = supplied
[ req ]
prompt = no
distinguished_name = req_distinguished_name
[ req_distinguished_name ]
CN = %s
%s
%s
%s
"""
# SigV4 Auth
ALGORITHM = 'AWS4-HMAC-SHA256'
AWS4_REQUEST = 'aws4_request'
HTTP_REQUEST_METHOD = 'GET'
CANONICAL_URI = '/'
CANONICAL_HEADERS_DICT = {
'host': '%s'
}
CANONICAL_HEADERS = '\n'.join(['%s:%s' % (k, v) for k, v in sorted(CANONICAL_HEADERS_DICT.items())])
SIGNED_HEADERS = ';'.join(CANONICAL_HEADERS_DICT.keys())
REQUEST_PAYLOAD = ''
FS_ID_RE = re.compile('^(?P<fs_id>fs-[0-9a-f]+)$')
EFS_FQDN_RE = re.compile(r'^(?P<fs_id>fs-[0-9a-f]+)\.efs\.(?P<region>[a-z0-9-]+)\.(?P<dns_name_suffix>[a-z0-9.]+)$')
AP_ID_RE = re.compile('^fsap-[0-9a-f]{17}$')
CREDENTIALS_KEYS = ['AccessKeyId', 'SecretAccessKey', 'Token']
ECS_URI_ENV = 'AWS_CONTAINER_CREDENTIALS_RELATIVE_URI'
ECS_TASK_METADATA_API = 'http://169.254.170.2'
WEB_IDENTITY_ROLE_ARN_ENV = 'AWS_ROLE_ARN'
WEB_IDENTITY_TOKEN_FILE_ENV = 'AWS_WEB_IDENTITY_TOKEN_FILE'
STS_ENDPOINT_URL = 'https://sts.amazonaws.com/'
INSTANCE_METADATA_TOKEN_URL = 'http://169.254.169.254/latest/api/token'
INSTANCE_METADATA_SERVICE_URL = 'http://169.254.169.254/latest/dynamic/instance-identity/document/'
INSTANCE_IAM_URL = 'http://169.254.169.254/latest/meta-data/iam/security-credentials/'
SECURITY_CREDS_ECS_URI_HELP_URL = 'https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task-iam-roles.html'
SECURITY_CREDS_WEBIDENTITY_HELP_URL = 'https://docs.aws.amazon.com/eks/latest/userguide/iam-roles-for-service-accounts.html'
SECURITY_CREDS_IAM_ROLE_HELP_URL = 'https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html'
DEFAULT_STUNNEL_VERIFY_LEVEL = 2
DEFAULT_STUNNEL_CAFILE = '/etc/amazon/efs/efs-utils.crt'
NOT_BEFORE_MINS = 15
NOT_AFTER_HOURS = 3
EFS_ONLY_OPTIONS = [
'accesspoint',
'awscredsuri',
'awsprofile',
'cafile',
'iam',
'netns',
'noocsp',
'ocsp',
'tls',
'tlsport',
'verify'
]
UNSUPPORTED_OPTIONS = [
'capath'
]
STUNNEL_GLOBAL_CONFIG = {
'fips': 'no',
'foreground': 'yes',
'socket': [
'l:SO_REUSEADDR=yes',
'a:SO_BINDTODEVICE=lo',
],
}
STUNNEL_EFS_CONFIG = {
'client': 'yes',
'accept': '127.0.0.1:%s',
'connect': '%s:2049',
'sslVersion': 'TLSv1.2',
'renegotiation': 'no',
'TIMEOUTbusy': '20',
'TIMEOUTclose': '0',
'TIMEOUTidle': '70',
'delay': 'yes',
}
WATCHDOG_SERVICE = 'amazon-efs-mount-watchdog'
SYSTEM_RELEASE_PATH = '/etc/system-release'
OS_RELEASE_PATH = '/etc/os-release'
RHEL8_RELEASE_NAME = 'Red Hat Enterprise Linux release 8'
CENTOS8_RELEASE_NAME = 'CentOS Linux release 8'
FEDORA_RELEASE_NAME = 'Fedora release'
SUSE_RELEASE_NAME = 'openSUSE Leap'
SKIP_NO_LIBWRAP_RELEASES = [RHEL8_RELEASE_NAME, CENTOS8_RELEASE_NAME, FEDORA_RELEASE_NAME, SUSE_RELEASE_NAME]
def fatal_error(user_message, log_message=None, exit_code=1):
if log_message is None:
log_message = user_message
sys.stderr.write('%s\n' % user_message)
logging.error(log_message)
publish_cloudwatch_log(CLOUDWATCHLOG_AGENT, 'Mount failed, %s' % log_message)
sys.exit(exit_code)
def get_target_region(config):
def _fatal_error(message):
fatal_error('Error retrieving region. Please set the "region" parameter in the efs-utils configuration file.', message)
metadata_exception = 'Unknown error'
try:
return config.get(CONFIG_SECTION, 'region')
except NoOptionError:
pass
try:
return get_region_from_instance_metadata()
except Exception as e:
metadata_exception = e
logging.warning('Region not found in config file and metadata service call failed, falling back '
'to legacy "dns_name_format" check')
try:
region = get_region_from_legacy_dns_format(config)
sys.stdout.write('Warning: region obtained from "dns_name_format" field. Please set the "region" '
'parameter in the efs-utils configuration file.')
return region
except Exception:
logging.warning('Legacy check for region in "dns_name_format" failed')
_fatal_error(metadata_exception)
def get_region_from_instance_metadata():
instance_identity = get_instance_identity_info_from_instance_metadata('region')
if not instance_identity:
raise Exception("Cannot retrieve region from instance_metadata")
return instance_identity
def get_instance_identity_info_from_instance_metadata(property):
ec2_metadata_unsuccessful_resp = 'Unsuccessful retrieval of EC2 metadata at %s.' % INSTANCE_METADATA_SERVICE_URL
ec2_metadata_url_error_msg = 'Unable to reach %s to retrieve EC2 instance metadata.' % INSTANCE_METADATA_SERVICE_URL
instance_identity = url_request_helper(INSTANCE_METADATA_SERVICE_URL, ec2_metadata_unsuccessful_resp,
ec2_metadata_url_error_msg, retry_with_new_header_token=True)
if instance_identity:
try:
return instance_identity[property]
except KeyError as e:
logging.warning('%s not present in %s: %s' % (property, instance_identity, e))
except TypeError as e:
logging.warning('response %s is not a json object: %s' % (instance_identity, e))
return None
def get_region_from_legacy_dns_format(config):
"""
For backwards compatibility check dns_name_format to obtain the target region. This functionality
should only be used if region is not present in the config file and metadata calls fail.
"""
dns_name_format = config.get(CONFIG_SECTION, 'dns_name_format')
if '{region}' not in dns_name_format:
split_dns_name_format = dns_name_format.split('.')
if '{dns_name_suffix}' in dns_name_format:
return split_dns_name_format[-2]
elif 'amazonaws.com' in dns_name_format:
return split_dns_name_format[-3]
raise Exception('Region not found in dns_name_format')
def get_aws_ec2_metadata_token():
try:
opener = build_opener(HTTPHandler)
request = Request(INSTANCE_METADATA_TOKEN_URL)
request.add_header('X-aws-ec2-metadata-token-ttl-seconds', 21600)
request.get_method = lambda: 'PUT'
res = opener.open(request)
return res.read()
except NameError:
headers = {'X-aws-ec2-metadata-token-ttl-seconds': 21600}
req = Request(INSTANCE_METADATA_TOKEN_URL, headers=headers, method='PUT')
res = urlopen(req)
return res.read()
def get_aws_security_credentials(use_iam, awsprofile=None, aws_creds_uri=None):
"""
Lookup AWS security credentials (access key ID and secret access key). Adapted credentials provider chain from:
https://boto3.amazonaws.com/v1/documentation/api/latest/guide/configuration.html and
https://docs.aws.amazon.com/sdk-for-java/v1/developer-guide/credentials.html
"""
if not use_iam:
return None, None
# attempt to lookup AWS security credentials through the credentials URI the ECS agent generated
if aws_creds_uri:
return get_aws_security_credentials_from_ecs(aws_creds_uri, True)
# attempt to lookup AWS security credentials in AWS credentials file (~/.aws/credentials)
# and configs file (~/.aws/config) with given awsprofile
if awsprofile:
return get_aws_security_credentials_from_awsprofile(awsprofile, True)
# attempt to lookup AWS security credentials through AWS_CONTAINER_CREDENTIALS_RELATIVE_URI environment variable
if ECS_URI_ENV in os.environ:
credentials, credentials_source = get_aws_security_credentials_from_ecs(os.environ[ECS_URI_ENV], False)
if credentials and credentials_source:
return credentials, credentials_source
# attempt to lookup AWS security credentials through AssumeRoleWithWebIdentity
# (e.g. for IAM Role for Service Accounts (IRSA) approach on EKS)
if WEB_IDENTITY_ROLE_ARN_ENV in os.environ and WEB_IDENTITY_TOKEN_FILE_ENV in os.environ:
credentials, credentials_source = get_aws_security_credentials_from_webidentity(
os.environ[WEB_IDENTITY_ROLE_ARN_ENV],
os.environ[WEB_IDENTITY_TOKEN_FILE_ENV],
False
)
if credentials and credentials_source:
return credentials, credentials_source
# attempt to lookup AWS security credentials with IAM role name attached to instance
# through IAM role name security credentials lookup uri
iam_role_name = get_iam_role_name()
if iam_role_name:
credentials, credentials_source = get_aws_security_credentials_from_instance_metadata(iam_role_name)
if credentials and credentials_source:
return credentials, credentials_source
error_msg = 'AWS Access Key ID and Secret Access Key are not found in AWS credentials file (%s), config file (%s), ' \
'from ECS credentials relative uri, or from the instance security credentials service' % \
(AWS_CREDENTIALS_FILE, AWS_CONFIG_FILE)
fatal_error(error_msg, error_msg)
def get_aws_security_credentials_from_awsprofile(awsprofile, is_fatal=False):
for file_path in [AWS_CREDENTIALS_FILE, AWS_CONFIG_FILE]:
if os.path.exists(file_path):
credentials = credentials_file_helper(file_path, awsprofile)
if credentials['AccessKeyId']:
return credentials, os.path.basename(file_path) + ':' + awsprofile
# Fail if credentials cannot be fetched from the given awsprofile
if is_fatal:
log_message = 'AWS security credentials not found in %s or %s under named profile [%s]' % \
(AWS_CREDENTIALS_FILE, AWS_CONFIG_FILE, awsprofile)
fatal_error(log_message)
else:
return None, None
def get_aws_security_credentials_from_ecs(aws_creds_uri, is_fatal=False):
ecs_uri = ECS_TASK_METADATA_API + aws_creds_uri
ecs_unsuccessful_resp = 'Unsuccessful retrieval of AWS security credentials at %s.' % ecs_uri
ecs_url_error_msg = 'Unable to reach %s to retrieve AWS security credentials. See %s for more info.' \
% (ecs_uri, SECURITY_CREDS_ECS_URI_HELP_URL)
ecs_security_dict = url_request_helper(ecs_uri, ecs_unsuccessful_resp, ecs_url_error_msg)
if ecs_security_dict and all(k in ecs_security_dict for k in CREDENTIALS_KEYS):
return ecs_security_dict, 'ecs:' + aws_creds_uri
# Fail if credentials cannot be fetched from the given aws_creds_uri
if is_fatal:
fatal_error(ecs_unsuccessful_resp, ecs_unsuccessful_resp)
else:
return None, None
def get_aws_security_credentials_from_webidentity(role_arn, token_file, is_fatal=False):
try:
with open(token_file, 'r') as f:
token = f.read()
except Exception as e:
if is_fatal:
unsuccessful_resp = 'Error reading token file %s: %s' % (token_file, e)
fatal_error(unsuccessful_resp, unsuccessful_resp)
else:
return None, None
webidentity_url = STS_ENDPOINT_URL + '?' + urlencode({
'Version': '2011-06-15',
'Action': 'AssumeRoleWithWebIdentity',
'RoleArn': role_arn,
'RoleSessionName': 'efs-mount-helper',
'WebIdentityToken': token
})
unsuccessful_resp = 'Unsuccessful retrieval of AWS security credentials at %s.' % STS_ENDPOINT_URL
url_error_msg = 'Unable to reach %s to retrieve AWS security credentials. See %s for more info.' % \
(STS_ENDPOINT_URL, SECURITY_CREDS_WEBIDENTITY_HELP_URL)
resp = url_request_helper(webidentity_url, unsuccessful_resp, url_error_msg, headers={'Accept': 'application/json'})
if resp:
creds = resp \
.get('AssumeRoleWithWebIdentityResponse', {}) \
.get('AssumeRoleWithWebIdentityResult', {}) \
.get('Credentials', {})
if all(k in creds for k in ['AccessKeyId', 'SecretAccessKey', 'SessionToken']):
return {
'AccessKeyId': creds['AccessKeyId'],
'SecretAccessKey': creds['SecretAccessKey'],
'Token': creds['SessionToken']
}, 'webidentity:' + ','.join([role_arn, token_file])
# Fail if credentials cannot be fetched from the given aws_creds_uri
if is_fatal:
fatal_error(unsuccessful_resp, unsuccessful_resp)
else:
return None, None
def get_aws_security_credentials_from_instance_metadata(iam_role_name):
security_creds_lookup_url = INSTANCE_IAM_URL + iam_role_name
unsuccessful_resp = 'Unsuccessful retrieval of AWS security credentials at %s.' % security_creds_lookup_url
url_error_msg = 'Unable to reach %s to retrieve AWS security credentials. See %s for more info.' % \
(security_creds_lookup_url, SECURITY_CREDS_IAM_ROLE_HELP_URL)
iam_security_dict = url_request_helper(security_creds_lookup_url, unsuccessful_resp,
url_error_msg, retry_with_new_header_token=True)
if iam_security_dict and all(k in iam_security_dict for k in CREDENTIALS_KEYS):
return iam_security_dict, 'metadata:'
else:
return None, None
def get_iam_role_name():
iam_role_unsuccessful_resp = 'Unsuccessful retrieval of IAM role name at %s.' % INSTANCE_IAM_URL
iam_role_url_error_msg = 'Unable to reach %s to retrieve IAM role name. See %s for more info.' % \
(INSTANCE_IAM_URL, SECURITY_CREDS_IAM_ROLE_HELP_URL)
iam_role_name = url_request_helper(INSTANCE_IAM_URL, iam_role_unsuccessful_resp,
iam_role_url_error_msg, retry_with_new_header_token=True)
return iam_role_name
def credentials_file_helper(file_path, awsprofile):
aws_credentials_configs = read_config(file_path)
credentials = {'AccessKeyId': None, 'SecretAccessKey': None, 'Token': None}
try:
access_key = aws_credentials_configs.get(awsprofile, 'aws_access_key_id')
secret_key = aws_credentials_configs.get(awsprofile, 'aws_secret_access_key')
session_token = aws_credentials_configs.get(awsprofile, 'aws_session_token')
credentials['AccessKeyId'] = access_key
credentials['SecretAccessKey'] = secret_key
credentials['Token'] = session_token
except NoOptionError as e:
if 'aws_access_key_id' in str(e) or 'aws_secret_access_key' in str(e):
logging.debug('aws_access_key_id or aws_secret_access_key not found in %s under named profile [%s]', file_path,
awsprofile)
if 'aws_session_token' in str(e):
logging.debug('aws_session_token not found in %s', file_path)
credentials['AccessKeyId'] = aws_credentials_configs.get(awsprofile, 'aws_access_key_id')
credentials['SecretAccessKey'] = aws_credentials_configs.get(awsprofile, 'aws_secret_access_key')
except NoSectionError:
logging.debug('No [%s] section found in config file %s', awsprofile, file_path)
return credentials
def get_aws_profile(options, use_iam):
awsprofile = options.get('awsprofile')
if not awsprofile and use_iam:
for file_path in [AWS_CREDENTIALS_FILE, AWS_CONFIG_FILE]:
aws_credentials_configs = read_config(file_path)
# check if aws access key id is found under [default] section in current file and return 'default' if so
try:
access_key = aws_credentials_configs.get('default', 'aws_access_key_id')
if access_key is not None:
return 'default'
except (NoSectionError, NoOptionError):
continue
return awsprofile
def url_request_helper(url, unsuccessful_resp, url_error_msg, headers={}, retry_with_new_header_token=False):
try:
req = Request(url)
for k, v in headers.items():
req.add_header(k, v)
request_resp = urlopen(req, timeout=1)
return get_resp_obj(request_resp, url, unsuccessful_resp)
except HTTPError as e:
# For instance enable with IMDSv2, Unauthorized 401 error will be thrown,
# to retrieve metadata, the header should embeded with metadata token
if e.code == 401 and retry_with_new_header_token:
token = get_aws_ec2_metadata_token()
req.add_header('X-aws-ec2-metadata-token', token)
request_resp = urlopen(req, timeout=1)
return get_resp_obj(request_resp, url, unsuccessful_resp)
err_msg = 'Unable to reach the url at %s: status=%d, reason is %s' % (url, e.code, e.reason)
except URLError as e:
err_msg = 'Unable to reach the url at %s, reason is %s' % (url, e.reason)
if err_msg:
logging.debug('%s %s', url_error_msg, err_msg)
return None
def get_resp_obj(request_resp, url, unsuccessful_resp):
if request_resp.getcode() != 200:
logging.debug(unsuccessful_resp + ' %s: ResponseCode=%d', url, request_resp.getcode())
return None
resp_body = request_resp.read()
resp_body_type = type(resp_body)
try:
if resp_body_type is str:
resp_dict = json.loads(resp_body)
else:
resp_dict = json.loads(resp_body.decode(request_resp.headers.get_content_charset() or 'us-ascii'))
return resp_dict
except ValueError as e:
logging.info('ValueError parsing "%s" into json: %s. Returning response body.' % (str(resp_body), e))
return resp_body if resp_body_type is str else resp_body.decode('utf-8')
def parse_options(options):
opts = {}
for o in options.split(','):
if '=' in o:
k, v = o.split('=')
opts[k] = v
else:
opts[o] = None
return opts
def get_tls_port_range(config):
lower_bound = config.getint(CONFIG_SECTION, 'port_range_lower_bound')
upper_bound = config.getint(CONFIG_SECTION, 'port_range_upper_bound')
if lower_bound >= upper_bound:
fatal_error('Configuration option "port_range_upper_bound" defined as %d '
'must be strictly greater than "port_range_lower_bound" defined as %d.'
% (upper_bound, lower_bound))
return lower_bound, upper_bound
def choose_tls_port(config, options):
if 'tlsport' in options:
ports_to_try = [int(options['tlsport'])]
else:
lower_bound, upper_bound = get_tls_port_range(config)
tls_ports = list(range(lower_bound, upper_bound))
# Choose a random midpoint, and then try ports in-order from there
mid = random.randrange(len(tls_ports))
ports_to_try = tls_ports[mid:] + tls_ports[:mid]
assert len(tls_ports) == len(ports_to_try)
sock = socket.socket()
for tls_port in ports_to_try:
try:
sock.bind(('localhost', tls_port))
sock.close()
return tls_port
except socket.error:
continue
sock.close()
if 'tlsport' in options:
fatal_error('Specified port [%s] is unavailable. Try selecting a different port.' % options['tlsport'])
else:
fatal_error('Failed to locate an available port in the range [%d, %d], try specifying a different port range in %s'
% (lower_bound, upper_bound, CONFIG_FILE))
def is_ocsp_enabled(config, options):
if 'ocsp' in options:
return True
elif 'noocsp' in options:
return False
else:
return config.getboolean(CONFIG_SECTION, 'stunnel_check_cert_validity')
def get_mount_specific_filename(fs_id, mountpoint, tls_port):
return '%s.%s.%d' % (fs_id, os.path.abspath(mountpoint).replace(os.sep, '.').lstrip('.'), tls_port)
def serialize_stunnel_config(config, header=None):
lines = []
if header:
lines.append('[%s]' % header)
for k, v in config.items():
if type(v) is list:
for item in v:
lines.append('%s = %s' % (k, item))
else:
lines.append('%s = %s' % (k, v))
return lines
def add_stunnel_ca_options(efs_config, config, options):
if 'cafile' in options:
stunnel_cafile = options['cafile']
else:
try:
stunnel_cafile = config.get(CONFIG_SECTION, 'stunnel_cafile')
except NoOptionError:
logging.debug('No CA file configured, using default CA file %s', DEFAULT_STUNNEL_CAFILE)
stunnel_cafile = DEFAULT_STUNNEL_CAFILE
if not os.path.exists(stunnel_cafile):
fatal_error('Failed to find certificate authority file for verification',
'Failed to find CAfile "%s"' % stunnel_cafile)
efs_config['CAfile'] = stunnel_cafile
def is_stunnel_option_supported(stunnel_output, stunnel_option_name):
supported = False
for line in stunnel_output:
if line.startswith(stunnel_option_name):
supported = True
break
if not supported:
logging.warning('stunnel does not support "%s"', stunnel_option_name)
return supported
def get_version_specific_stunnel_options():
stunnel_command = [_stunnel_bin(), '-help']
proc = subprocess.Popen(stunnel_command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=True)
proc.wait()
_, err = proc.communicate()
stunnel_output = err.splitlines()
check_host_supported = is_stunnel_option_supported(stunnel_output, b'checkHost')
ocsp_aia_supported = is_stunnel_option_supported(stunnel_output, b'OCSPaia')
return check_host_supported, ocsp_aia_supported
def _stunnel_bin():
return find_command_path('stunnel',
'Please install it following the instructions at '
'https://docs.aws.amazon.com/efs/latest/ug/using-amazon-efs-utils.html#upgrading-stunnel')
def find_command_path(command, install_method):
try:
env_path = '/sbin:/usr/sbin:/usr/local/sbin:/root/bin:/usr/local/bin:/usr/bin:/bin'
os.putenv('PATH', env_path)
path = subprocess.check_output(['which', command])
except subprocess.CalledProcessError as e:
fatal_error('Failed to locate %s in %s - %s' % (command, env_path, install_method), e)
return path.strip().decode()
def get_system_release_version():
try:
with open(SYSTEM_RELEASE_PATH) as f:
return f.read().strip()
except IOError:
logging.debug('Unable to read %s', SYSTEM_RELEASE_PATH)
try:
with open(OS_RELEASE_PATH) as f:
for line in f:
if 'PRETTY_NAME' in line:
return line.split('=')[1].strip()
except IOError:
logging.debug('Unable to read %s', OS_RELEASE_PATH)
return 'unknown'
def write_stunnel_config_file(config, state_file_dir, fs_id, mountpoint, tls_port, dns_name, verify_level, ocsp_enabled,
options, log_dir=LOG_DIR, cert_details=None):
"""
Serializes stunnel configuration to a file. Unfortunately this does not conform to Python's config file format, so we have to
hand-serialize it.
"""
mount_filename = get_mount_specific_filename(fs_id, mountpoint, tls_port)
global_config = dict(STUNNEL_GLOBAL_CONFIG)
if config.getboolean(CONFIG_SECTION, 'stunnel_debug_enabled'):
global_config['debug'] = 'debug'
if config.has_option(CONFIG_SECTION, 'stunnel_logs_file'):
global_config['output'] = config.get(CONFIG_SECTION, 'stunnel_logs_file').replace('{fs_id}', fs_id)
else:
global_config['output'] = os.path.join(log_dir, '%s.stunnel.log' % mount_filename)
efs_config = dict(STUNNEL_EFS_CONFIG)
efs_config['accept'] = efs_config['accept'] % tls_port
efs_config['connect'] = efs_config['connect'] % dns_name
efs_config['verify'] = verify_level
if verify_level > 0:
add_stunnel_ca_options(efs_config, config, options)
if cert_details:
efs_config['cert'] = cert_details['certificate']
efs_config['key'] = cert_details['privateKey']
check_host_supported, ocsp_aia_supported = get_version_specific_stunnel_options()
tls_controls_message = 'WARNING: Your client lacks sufficient controls to properly enforce TLS. Please upgrade stunnel, ' \
'or disable "%%s" in %s.\nSee %s for more detail.' % (CONFIG_FILE,
'https://docs.aws.amazon.com/console/efs/troubleshooting-tls')
if config.getboolean(CONFIG_SECTION, 'stunnel_check_cert_hostname'):
if check_host_supported:
efs_config['checkHost'] = dns_name
else:
fatal_error(tls_controls_message % 'stunnel_check_cert_hostname')
# Only use the config setting if the override is not set
if ocsp_enabled:
if ocsp_aia_supported:
efs_config['OCSPaia'] = 'yes'
else:
fatal_error(tls_controls_message % 'stunnel_check_cert_validity')
system_release_version = get_system_release_version()
if not any(release in system_release_version for release in SKIP_NO_LIBWRAP_RELEASES):
efs_config['libwrap'] = 'no'
stunnel_config = '\n'.join(serialize_stunnel_config(global_config) + serialize_stunnel_config(efs_config, 'efs'))
logging.debug('Writing stunnel configuration:\n%s', stunnel_config)
stunnel_config_file = os.path.join(state_file_dir, 'stunnel-config.%s' % mount_filename)
with open(stunnel_config_file, 'w') as f:
f.write(stunnel_config)
return stunnel_config_file
def write_tls_tunnel_state_file(fs_id, mountpoint, tls_port, tunnel_pid, command, files, state_file_dir, cert_details=None):
"""
Return the name of the temporary file containing TLS tunnel state, prefixed with a '~'. This file needs to be renamed to a
non-temporary version following a successful mount.
"""
state_file = '~' + get_mount_specific_filename(fs_id, mountpoint, tls_port)
state = {
'pid': tunnel_pid,
'cmd': command,
'files': files,
}
if cert_details:
state.update(cert_details)
with open(os.path.join(state_file_dir, state_file), 'w') as f:
json.dump(state, f)
return state_file
def test_tunnel_process(tunnel_proc, fs_id):
tunnel_proc.poll()
if tunnel_proc.returncode is not None:
out, err = tunnel_proc.communicate()
fatal_error('Failed to initialize TLS tunnel for %s' % fs_id,
'Failed to start TLS tunnel (errno=%d). stdout="%s" stderr="%s"'
% (tunnel_proc.returncode, out.strip(), err.strip()))
def poll_tunnel_process(tunnel_proc, fs_id, mount_completed):
"""
poll the tunnel process health every .5s during the mount attempt to fail fast if the tunnel dies - since this is not called
from the main thread, if the tunnel fails, exit uncleanly with os._exit
"""
while not mount_completed.is_set():
try:
test_tunnel_process(tunnel_proc, fs_id)
except SystemExit as e:
os._exit(e.code)
mount_completed.wait(.5)
def get_init_system(comm_file='/proc/1/comm'):
init_system = 'unknown'
try:
with open(comm_file) as f:
init_system = f.read().strip()
except IOError:
logging.warning('Unable to read %s', comm_file)
logging.debug('Identified init system: %s', init_system)
return init_system
def check_network_target(fs_id):
with open(os.devnull, 'w') as devnull:
rc = subprocess.call(['systemctl', 'status', 'network.target'], stdout=devnull, stderr=devnull, close_fds=True)
if rc != 0:
fatal_error('Failed to mount %s because the network was not yet available, add "_netdev" to your mount options' % fs_id,
exit_code=0)
def check_network_status(fs_id, init_system):
if init_system != 'systemd':
logging.debug('Not testing network on non-systemd init systems')
return
check_network_target(fs_id)
def start_watchdog(init_system):
if init_system == 'init':
proc = subprocess.Popen(
['/sbin/status', WATCHDOG_SERVICE], stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=True)
status, _ = proc.communicate()
if 'stop' in status:
with open(os.devnull, 'w') as devnull:
subprocess.Popen(['/sbin/start', WATCHDOG_SERVICE], stdout=devnull, stderr=devnull, close_fds=True)
elif 'start' in status:
logging.debug('%s is already running', WATCHDOG_SERVICE)
elif init_system == 'systemd':
rc = subprocess.call(['systemctl', 'is-active', '--quiet', WATCHDOG_SERVICE], close_fds=True)
if rc != 0:
with open(os.devnull, 'w') as devnull:
subprocess.Popen(['systemctl', 'start', WATCHDOG_SERVICE], stdout=devnull, stderr=devnull, close_fds=True)
else:
logging.debug('%s is already running', WATCHDOG_SERVICE)
else:
error_message = 'Could not start %s, unrecognized init system "%s"' % (WATCHDOG_SERVICE, init_system)
sys.stderr.write('%s\n' % error_message)
logging.warning(error_message)
def create_required_directory(config, directory):
mode = 0o750
try:
mode_str = config.get(CONFIG_SECTION, 'state_file_dir_mode')
try:
mode = int(mode_str, 8)
except ValueError:
logging.warning('Bad state_file_dir_mode "%s" in config file "%s"', mode_str, CONFIG_FILE)
except NoOptionError:
pass
try:
os.makedirs(directory, mode)
except OSError as e:
if errno.EEXIST != e.errno or not os.path.isdir(directory):
raise
@contextmanager
def bootstrap_tls(config, init_system, dns_name, fs_id, mountpoint, options, state_file_dir=STATE_FILE_DIR):
tls_port = choose_tls_port(config, options)
# override the tlsport option so that we can later override the port the NFS client uses to connect to stunnel.
# if the user has specified tlsport=X at the command line this will just re-set tlsport to X.
options['tlsport'] = tls_port
use_iam = 'iam' in options
ap_id = options.get('accesspoint')
cert_details = {}
security_credentials = None
client_info = get_client_info(config)
if use_iam:
aws_creds_uri = options.get('awscredsuri')
if aws_creds_uri:
kwargs = {'aws_creds_uri': aws_creds_uri}
else:
kwargs = {'awsprofile': get_aws_profile(options, use_iam)}
security_credentials, credentials_source = get_aws_security_credentials(use_iam, **kwargs)
if credentials_source:
cert_details['awsCredentialsMethod'] = credentials_source
if ap_id:
cert_details['accessPoint'] = ap_id
# additional symbol appended to avoid naming collisions
cert_details['mountStateDir'] = get_mount_specific_filename(fs_id, mountpoint, tls_port) + '+'
# common name for certificate signing request is max 64 characters
cert_details['commonName'] = socket.gethostname()[0:64]
cert_details['region'] = get_target_region(config)
cert_details['certificateCreationTime'] = create_certificate(config, cert_details['mountStateDir'],
cert_details['commonName'], cert_details['region'], fs_id,
security_credentials, ap_id, client_info,
base_path=state_file_dir)
cert_details['certificate'] = os.path.join(state_file_dir, cert_details['mountStateDir'], 'certificate.pem')
cert_details['privateKey'] = get_private_key_path()
cert_details['fsId'] = fs_id
start_watchdog(init_system)
if not os.path.exists(state_file_dir):
create_required_directory(config, state_file_dir)
verify_level = int(options.get('verify', DEFAULT_STUNNEL_VERIFY_LEVEL))
ocsp_enabled = is_ocsp_enabled(config, options)
stunnel_config_file = write_stunnel_config_file(config, state_file_dir, fs_id, mountpoint, tls_port, dns_name, verify_level,
ocsp_enabled, options, cert_details=cert_details)
tunnel_args = [_stunnel_bin(), stunnel_config_file]
if 'netns' in options:
tunnel_args = ['nsenter', '--net=' + options['netns']] + tunnel_args
# launch the tunnel in a process group so if it has any child processes, they can be killed easily by the mount watchdog
logging.info('Starting TLS tunnel: "%s"', ' '.join(tunnel_args))
tunnel_proc = subprocess.Popen(
tunnel_args, stdout=subprocess.PIPE, stderr=subprocess.PIPE, preexec_fn=os.setsid, close_fds=True)
logging.info('Started TLS tunnel, pid: %d', tunnel_proc.pid)
temp_tls_state_file = write_tls_tunnel_state_file(fs_id, mountpoint, tls_port, tunnel_proc.pid, tunnel_args,
[stunnel_config_file], state_file_dir, cert_details=cert_details)
try:
yield tunnel_proc
finally:
os.rename(os.path.join(state_file_dir, temp_tls_state_file), os.path.join(state_file_dir, temp_tls_state_file[1:]))
def get_nfs_mount_options(options):
# If you change these options, update the man page as well at man/mount.efs.8
if 'nfsvers' not in options and 'vers' not in options:
options['nfsvers'] = '4.1'
if 'rsize' not in options:
options['rsize'] = '1048576'
if 'wsize' not in options:
options['wsize'] = '1048576'
if 'soft' not in options and 'hard' not in options:
options['hard'] = None
if 'timeo' not in options:
options['timeo'] = '600'
if 'retrans' not in options:
options['retrans'] = '2'
if 'noresvport' not in options:
options['noresvport'] = None
if 'tls' in options:
options['port'] = options['tlsport']
def to_nfs_option(k, v):
if v is None:
return k
return '%s=%s' % (str(k), str(v))
nfs_options = [to_nfs_option(k, v) for k, v in options.items() if k not in EFS_ONLY_OPTIONS]
return ','.join(nfs_options)
def mount_nfs(dns_name, path, mountpoint, options):
if 'tls' in options:
mount_path = '127.0.0.1:%s' % path
else:
mount_path = '%s:%s' % (dns_name, path)
command = ['/sbin/mount.nfs4', mount_path, mountpoint, '-o', get_nfs_mount_options(options)]
if 'netns' in options:
command = ['nsenter', '--net=' + options['netns']] + command
logging.info('Executing: "%s"', ' '.join(command))
proc = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=True)
out, err = proc.communicate()
if proc.returncode == 0:
message = 'Successfully mounted %s at %s' % (dns_name, mountpoint)
logging.info(message)
publish_cloudwatch_log(CLOUDWATCHLOG_AGENT, message)
else:
message = 'Failed to mount %s at %s: returncode=%d, stderr="%s"' % (dns_name, mountpoint, proc.returncode, err.strip())
fatal_error(err.strip(), message, proc.returncode)
def usage(out, exit_code=1):
out.write('Usage: mount.efs [--version] [-h|--help] <fsname> <mountpoint> [-o <options>]\n')
sys.exit(exit_code)
def parse_arguments_early_exit(args=None):
"""Parse arguments, checking for early exit conditions only"""
if args is None:
args = sys.argv
if '-h' in args[1:] or '--help' in args[1:]:
usage(out=sys.stdout, exit_code=0)
if '--version' in args[1:]:
sys.stdout.write('%s Version: %s\n' % (args[0], VERSION))
sys.exit(0)
def parse_arguments(config, args=None):
"""Parse arguments, return (fsid, path, mountpoint, options)"""
if args is None:
args = sys.argv
fsname = None
mountpoint = None
options = {}
if len(args) > 1:
fsname = args[1]
if len(args) > 2:
mountpoint = args[2]
if len(args) > 4 and '-o' in args[:-1]:
options_index = args.index('-o') + 1
options = parse_options(args[options_index])
if not fsname or not mountpoint:
usage(out=sys.stderr)
fs_id, path = match_device(config, fsname)
return fs_id, path, mountpoint, options
def get_client_info(config):
client_info = {}
# source key/value pair in config file
if config.has_option(CLIENT_INFO_SECTION, 'source'):
client_source = config.get(CLIENT_INFO_SECTION, 'source')
if 0 < len(client_source) <= CLIENT_SOURCE_STR_LEN_LIMIT:
client_info['source'] = client_source
return client_info
def create_certificate(config, mount_name, common_name, region, fs_id, security_credentials, ap_id, client_info,
base_path=STATE_FILE_DIR):
current_time = get_utc_now()
tls_paths = tls_paths_dictionary(mount_name, base_path)
certificate_config = os.path.join(tls_paths['mount_dir'], 'config.conf')
certificate_signing_request = os.path.join(tls_paths['mount_dir'], 'request.csr')
certificate = os.path.join(tls_paths['mount_dir'], 'certificate.pem')
ca_dirs_check(config, tls_paths['database_dir'], tls_paths['certs_dir'])
ca_supporting_files_check(tls_paths['index'], tls_paths['index_attr'], tls_paths['serial'], tls_paths['rand'])
private_key = check_and_create_private_key(base_path)
if security_credentials:
public_key = os.path.join(tls_paths['mount_dir'], 'publicKey.pem')
create_public_key(private_key, public_key)
create_ca_conf(certificate_config, common_name, tls_paths['mount_dir'], private_key, current_time, region, fs_id,
security_credentials, ap_id, client_info)
create_certificate_signing_request(certificate_config, private_key, certificate_signing_request)
not_before = get_certificate_timestamp(current_time, minutes=-NOT_BEFORE_MINS)
not_after = get_certificate_timestamp(current_time, hours=NOT_AFTER_HOURS)
cmd = 'openssl ca -startdate %s -enddate %s -selfsign -batch -notext -config %s -in %s -out %s' % \
(not_before, not_after, certificate_config, certificate_signing_request, certificate)
subprocess_call(cmd, 'Failed to create self-signed client-side certificate')
return current_time.strftime(CERT_DATETIME_FORMAT)
def get_private_key_path():
"""Wrapped for mocking purposes in unit tests"""
return PRIVATE_KEY_FILE
def check_and_create_private_key(base_path=STATE_FILE_DIR):
# Creating RSA private keys is slow, so we will create one private key and allow mounts to share it.
# This means, however, that we have to include a locking mechanism to ensure that the private key is
# atomically created, as mounts occurring in parallel may try to create the key simultaneously.
key = get_private_key_path()
@contextmanager
def open_lock_file():
lock_file = os.path.join(base_path, 'efs-utils-lock')
f = os.open(lock_file, os.O_CREAT | os.O_DSYNC | os.O_EXCL | os.O_RDWR)
try:
lock_file_contents = 'PID: %s' % os.getpid()
os.write(f, lock_file_contents.encode('utf-8'))
yield f
finally:
os.close(f)
os.remove(lock_file)
def do_with_lock(function):
while True:
try:
with open_lock_file():
return function()
except OSError as e:
if e.errno == errno.EEXIST:
logging.info('Failed to take out private key creation lock, sleeping 50 ms')
time.sleep(0.05)
else:
raise
def generate_key():
if os.path.isfile(key):
return
cmd = 'openssl genpkey -algorithm RSA -out %s -pkeyopt rsa_keygen_bits:3072' % key
subprocess_call(cmd, 'Failed to create private key')
read_only_mode = 0o400
os.chmod(key, read_only_mode)
do_with_lock(generate_key)
return key
def create_certificate_signing_request(config_path, private_key, csr_path):
cmd = 'openssl req -new -config %s -key %s -out %s' % (config_path, private_key, csr_path)
subprocess_call(cmd, 'Failed to create certificate signing request (csr)')
def create_ca_conf(config_path, common_name, directory, private_key, date,
region, fs_id, security_credentials, ap_id, client_info):
"""Populate ca/req configuration file with fresh configurations at every mount since SigV4 signature can change"""
public_key_path = os.path.join(directory, 'publicKey.pem')
ca_extension_body = ca_extension_builder(ap_id, security_credentials, fs_id, client_info)
efs_client_auth_body = efs_client_auth_builder(public_key_path, security_credentials['AccessKeyId'],
security_credentials['SecretAccessKey'], date, region, fs_id,
security_credentials['Token']) if security_credentials else ''
efs_client_info_body = efs_client_info_builder(client_info) if client_info else ''
full_config_body = CA_CONFIG_BODY % (directory, private_key, common_name, ca_extension_body,
efs_client_auth_body, efs_client_info_body)
with open(config_path, 'w') as f:
f.write(full_config_body)
return full_config_body
def ca_extension_builder(ap_id, security_credentials, fs_id, client_info):
ca_extension_str = '[ v3_ca ]\nsubjectKeyIdentifier = hash'
if ap_id:
ca_extension_str += '\n1.3.6.1.4.1.4843.7.1 = ASN1:UTF8String:' + ap_id
if security_credentials:
ca_extension_str += '\n1.3.6.1.4.1.4843.7.2 = ASN1:SEQUENCE:efs_client_auth'
ca_extension_str += '\n1.3.6.1.4.1.4843.7.3 = ASN1:UTF8String:' + fs_id
if client_info:
ca_extension_str += '\n1.3.6.1.4.1.4843.7.4 = ASN1:SEQUENCE:efs_client_info'
return ca_extension_str
def efs_client_auth_builder(public_key_path, access_key_id, secret_access_key, date, region, fs_id, session_token=None):
public_key_hash = get_public_key_sha1(public_key_path)
canonical_request = create_canonical_request(public_key_hash, date, access_key_id, region, fs_id, session_token)
string_to_sign = create_string_to_sign(canonical_request, date, region)
signature = calculate_signature(string_to_sign, date, secret_access_key, region)
efs_client_auth_str = '[ efs_client_auth ]'
efs_client_auth_str += '\naccessKeyId = UTF8String:' + access_key_id
efs_client_auth_str += '\nsignature = OCTETSTRING:' + signature
efs_client_auth_str += '\nsigv4DateTime = UTCTIME:' + date.strftime(CERT_DATETIME_FORMAT)
if session_token:
efs_client_auth_str += '\nsessionToken = EXPLICIT:0,UTF8String:' + session_token
return efs_client_auth_str
def efs_client_info_builder(client_info):
efs_client_info_str = '[ efs_client_info ]'
for key, value in client_info.items():
efs_client_info_str += '\n%s = UTF8String:%s' % (key, value)
return efs_client_info_str
def create_public_key(private_key, public_key):
cmd = 'openssl rsa -in %s -outform PEM -pubout -out %s' % (private_key, public_key)
subprocess_call(cmd, 'Failed to create public key')
def subprocess_call(cmd, error_message):
"""Helper method to run shell openssl command and to handle response error messages"""
retry_times = 3
for retry in range(retry_times):
process = subprocess.Popen(cmd.split(), stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=True)
(output, err) = process.communicate()
rc = process.poll()
if rc != 0:
logging.error('Command %s failed, rc=%s, stdout="%s", stderr="%s"' % (cmd, rc, output, err), exc_info=True)
try:
process.kill()
except OSError:
# Silently fail if the subprocess has exited already
pass
else:
return output, err
error_message = '%s, error is: %s' % (error_message, err)
fatal_error(error_message, error_message)
def ca_dirs_check(config, database_dir, certs_dir):
"""Check if mount's database and certs directories exist and if not, create directories (also create all intermediate
directories if they don't exist)."""
if not os.path.exists(database_dir):
create_required_directory(config, database_dir)
if not os.path.exists(certs_dir):
create_required_directory(config, certs_dir)
def ca_supporting_files_check(index_path, index_attr_path, serial_path, rand_path):
"""Recreate all supporting openssl ca and req files if they're not present in their respective directories"""
if not os.path.isfile(index_path):
open(index_path, 'w').close()
if not os.path.isfile(index_attr_path):
with open(index_attr_path, 'w+') as f:
f.write('unique_subject = no')
if not os.path.isfile(serial_path):
with open(serial_path, 'w+') as f:
f.write('00')
if not os.path.isfile(rand_path):
open(rand_path, 'w').close()
def get_certificate_timestamp(current_time, **kwargs):
updated_time = current_time + timedelta(**kwargs)
return updated_time.strftime(CERT_DATETIME_FORMAT)
def get_utc_now():
"""
Wrapped for patching purposes in unit tests
"""
return datetime.utcnow()
def assert_root():
if os.geteuid() != 0:
sys.stderr.write('only root can run mount.efs\n')
sys.exit(1)
def read_config(config_file=CONFIG_FILE):
try:
p = ConfigParser.SafeConfigParser()
except AttributeError:
p = ConfigParser()
p.read(config_file)
return p
def bootstrap_logging(config, log_dir=LOG_DIR):
raw_level = config.get(CONFIG_SECTION, 'logging_level')
levels = {
'debug': logging.DEBUG,
'info': logging.INFO,
'warning': logging.WARNING,
'error': logging.ERROR,
'critical': logging.CRITICAL
}
level = levels.get(raw_level.lower())
level_error = False
if not level:
# delay logging error about malformed log level until after logging is configured
level_error = True
level = logging.INFO
max_bytes = config.getint(CONFIG_SECTION, 'logging_max_bytes')
file_count = config.getint(CONFIG_SECTION, 'logging_file_count')
handler = RotatingFileHandler(os.path.join(log_dir, LOG_FILE), maxBytes=max_bytes, backupCount=file_count)
handler.setFormatter(logging.Formatter(fmt='%(asctime)s - %(levelname)s - %(message)s'))
logger = logging.getLogger()
logger.setLevel(level)
logger.addHandler(handler)
if level_error:
logging.error('Malformed logging level "%s", setting logging level to %s', raw_level, level)
def get_dns_name(config, fs_id):
def _validate_replacement_field_count(format_str, expected_ct):
if format_str.count('{') != expected_ct or format_str.count('}') != expected_ct:
raise ValueError('DNS name format has an incorrect number of replacement fields')
dns_name_format = config.get(CONFIG_SECTION, 'dns_name_format')
if '{fs_id}' not in dns_name_format:
raise ValueError('DNS name format must include {fs_id}')
format_args = {'fs_id': fs_id}
expected_replacement_field_ct = 1
if '{region}' in dns_name_format:
expected_replacement_field_ct += 1
format_args['region'] = get_target_region(config)
if '{dns_name_suffix}' in dns_name_format:
expected_replacement_field_ct += 1
config_section = CONFIG_SECTION
region = format_args.get('region')
if region:
region_specific_config_section = '%s.%s' % (CONFIG_SECTION, region)
if config.has_section(region_specific_config_section):
config_section = region_specific_config_section
format_args['dns_name_suffix'] = config.get(config_section, 'dns_name_suffix')
logging.debug("Using dns_name_suffix %s in config section [%s]", format_args.get('dns_name_suffix'), config_section)
_validate_replacement_field_count(dns_name_format, expected_replacement_field_ct)
dns_name = dns_name_format.format(**format_args)
try:
socket.gethostbyname(dns_name)
except socket.gaierror:
fatal_error('Failed to resolve "%s" - check that your file system ID is correct.\nSee %s for more detail.'
% (dns_name, 'https://docs.aws.amazon.com/console/efs/mount-dns-name'),
'Failed to resolve "%s"' % dns_name)
return dns_name
def tls_paths_dictionary(mount_name, base_path=STATE_FILE_DIR):
tls_dict = {
'mount_dir': os.path.join(base_path, mount_name),
# every mount will have its own ca mode assets due to lack of multi-threading support in openssl
'database_dir': os.path.join(base_path, mount_name, 'database'),
'certs_dir': os.path.join(base_path, mount_name, 'certs'),
'index': os.path.join(base_path, mount_name, 'database/index.txt'),
'index_attr': os.path.join(base_path, mount_name, 'database/index.txt.attr'),
'serial': os.path.join(base_path, mount_name, 'database/serial'),
'rand': os.path.join(base_path, mount_name, 'database/.rand')
}
return tls_dict
def get_public_key_sha1(public_key):
# truncating public key to remove the header and footer '-----(BEGIN|END) PUBLIC KEY-----'
with open(public_key, 'r') as f:
lines = f.readlines()
lines = lines[1:-1]
key = ''.join(lines)
key = bytearray(base64.b64decode(key))
# Parse the public key to pull out the actual key material by looking for the key BIT STRING
# Example:
# 0:d=0 hl=4 l= 418 cons: SEQUENCE
# 4:d=1 hl=2 l= 13 cons: SEQUENCE
# 6:d=2 hl=2 l= 9 prim: OBJECT :rsaEncryption
# 17:d=2 hl=2 l= 0 prim: NULL
# 19:d=1 hl=4 l= 399 prim: BIT STRING
cmd = 'openssl asn1parse -inform PEM -in %s' % public_key
output, err = subprocess_call(cmd, 'Unable to ASN1 parse public key file, %s, correctly' % public_key)
key_line = ''
for line in output.splitlines():
if 'BIT STRING' in line.decode('utf-8'):
key_line = line.decode('utf-8')
if not key_line:
err_msg = 'Public key file, %s, is incorrectly formatted' % public_key
fatal_error(err_msg, err_msg)
key_line = key_line.replace(' ', '')
# DER encoding TLV (Tag, Length, Value)
# - the first octet (byte) is the tag (type)
# - the next octets are the length - "definite form"
# - the first octet always has the high order bit (8) set to 1
# - the remaining 127 bits are used to encode the number of octets that follow
# - the following octets encode, as big-endian, the length (which may be 0) as a number of octets
# - the remaining octets are the "value" aka content
#
# For a BIT STRING, the first octet of the value is used to signify the number of unused bits that exist in the last
# content byte. Note that this is explicitly excluded from the SubjectKeyIdentifier hash, per
# https://tools.ietf.org/html/rfc5280#section-4.2.1.2
#
# Example:
# 0382018f00...<subjectPublicKey>
# - 03 - BIT STRING tag
# - 82 - 2 length octets to follow (ignore high order bit)
# - 018f - length of 399
# - 00 - no unused bits in the last content byte
offset = int(key_line.split(':')[0])
key = key[offset:]
num_length_octets = key[1] & 0b01111111
# Exclude the tag (1), length (1 + num_length_octets), and number of unused bits (1)
offset = 1 + 1 + num_length_octets + 1
key = key[offset:]
sha1 = hashlib.sha1()
sha1.update(key)
return sha1.hexdigest()
def create_canonical_request(public_key_hash, date, access_key, region, fs_id, session_token=None):
"""
Create a Canonical Request - https://docs.aws.amazon.com/general/latest/gr/sigv4-create-canonical-request.html
"""
formatted_datetime = date.strftime(SIGV4_DATETIME_FORMAT)
credential = quote_plus(access_key + '/' + get_credential_scope(date, region))
request = HTTP_REQUEST_METHOD + '\n'
request += CANONICAL_URI + '\n'
request += create_canonical_query_string(public_key_hash, credential, formatted_datetime, session_token) + '\n'
request += CANONICAL_HEADERS % fs_id + '\n'
request += SIGNED_HEADERS + '\n'
sha256 = hashlib.sha256()
sha256.update(REQUEST_PAYLOAD.encode())
request += sha256.hexdigest()
return request
def create_canonical_query_string(public_key_hash, credential, formatted_datetime, session_token=None):
canonical_query_params = {
'Action': 'Connect',
# Public key hash is included in canonical request to tie the signature to a specific key pair to avoid replay attacks
'PublicKeyHash': quote_plus(public_key_hash),
'X-Amz-Algorithm': ALGORITHM,
'X-Amz-Credential': credential,
'X-Amz-Date': quote_plus(formatted_datetime),
'X-Amz-Expires': 86400,
'X-Amz-SignedHeaders': SIGNED_HEADERS,
}
if session_token:
canonical_query_params['X-Amz-Security-Token'] = quote_plus(session_token)
# Cannot use urllib.urlencode because it replaces the %s's
return '&'.join(['%s=%s' % (k, v) for k, v in sorted(canonical_query_params.items())])
def create_string_to_sign(canonical_request, date, region):
"""
Create a String to Sign - https://docs.aws.amazon.com/general/latest/gr/sigv4-create-string-to-sign.html
"""
string_to_sign = ALGORITHM + '\n'
string_to_sign += date.strftime(SIGV4_DATETIME_FORMAT) + '\n'
string_to_sign += get_credential_scope(date, region) + '\n'
sha256 = hashlib.sha256()
sha256.update(canonical_request.encode())
string_to_sign += sha256.hexdigest()
return string_to_sign
def calculate_signature(string_to_sign, date, secret_access_key, region):
"""
Calculate the Signature - https://docs.aws.amazon.com/general/latest/gr/sigv4-calculate-signature.html
"""
def _sign(key, msg):
return hmac.new(key, msg.encode('utf-8'), hashlib.sha256)
key_date = _sign(('AWS4' + secret_access_key).encode('utf-8'), date.strftime(DATE_ONLY_FORMAT)).digest()
add_region = _sign(key_date, region).digest()
add_service = _sign(add_region, SERVICE).digest()
signing_key = _sign(add_service, 'aws4_request').digest()
return _sign(signing_key, string_to_sign).hexdigest()
def get_credential_scope(date, region):
return '/'.join([date.strftime(DATE_ONLY_FORMAT), region, SERVICE, AWS4_REQUEST])
def match_device(config, device):
"""Return the EFS id and the remote path to mount"""
try:
remote, path = device.split(':', 1)
except ValueError:
remote = device
path = '/'
if FS_ID_RE.match(remote):
return remote, path
try:
primary, secondaries, _ = socket.gethostbyname_ex(remote)
hostnames = list(filter(lambda e: e is not None, [primary] + secondaries))
except socket.gaierror:
create_default_cloudwatchlog_agent_if_not_exist(config)
fatal_error(
'Failed to resolve "%s" - check that the specified DNS name is a CNAME record resolving to a valid EFS DNS '
'name' % remote,
'Failed to resolve "%s"' % remote
)
if not hostnames:
create_default_cloudwatchlog_agent_if_not_exist(config)
fatal_error(
'The specified domain name "%s" did not resolve to an EFS mount target' % remote
)
for hostname in hostnames:
efs_fqdn_match = EFS_FQDN_RE.match(hostname)
if efs_fqdn_match:
fs_id = efs_fqdn_match.group('fs_id')
expected_dns_name = get_dns_name(config, fs_id)
# check that the DNS name of the mount target matches exactly the DNS name the CNAME resolves to
if hostname == expected_dns_name:
return fs_id, path
else:
create_default_cloudwatchlog_agent_if_not_exist(config)
fatal_error('The specified CNAME "%s" did not resolve to a valid DNS name for an EFS mount target. '
'Please refer to the EFS documentation for mounting with DNS names for examples: %s'
% (remote, 'https://docs.aws.amazon.com/efs/latest/ug/mounting-fs-mount-cmd-dns-name.html'))
def is_nfs_mount(mountpoint):
cmd = ['stat', '-f', '-L', '-c', '%T', mountpoint]
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=True)
output, _ = p.communicate()
return output and 'nfs' in str(output)
def mount_tls(config, init_system, dns_name, path, fs_id, mountpoint, options):
if os.path.ismount(mountpoint) and is_nfs_mount(mountpoint):
sys.stdout.write("%s is already mounted, please run 'mount' command to verify\n" % mountpoint)
logging.warning("%s is already mounted, mount aborted" % mountpoint)
return
with bootstrap_tls(config, init_system, dns_name, fs_id, mountpoint, options) as tunnel_proc:
mount_completed = threading.Event()
t = threading.Thread(target=poll_tunnel_process, args=(tunnel_proc, fs_id, mount_completed))
t.daemon = True
t.start()
mount_nfs(dns_name, path, mountpoint, options)
mount_completed.set()
t.join()
def check_unsupported_options(options):
for unsupported_option in UNSUPPORTED_OPTIONS:
if unsupported_option in options:
warn_message = 'The "%s" option is not supported and has been ignored, as amazon-efs-utils relies on a built-in ' \
'trust store.' % unsupported_option
sys.stderr.write('WARN: %s\n' % warn_message)
logging.warning(warn_message)
del options[unsupported_option]
def check_options_validity(options):
if 'tls' in options:
if 'port' in options:
fatal_error('The "port" and "tls" options are mutually exclusive')
if 'tlsport' in options:
try:
int(options['tlsport'])
except ValueError:
fatal_error('tlsport option [%s] is not an integer' % options['tlsport'])
if 'ocsp' in options and 'noocsp' in options:
fatal_error('The "ocsp" and "noocsp" options are mutually exclusive')
if 'accesspoint' in options:
if 'tls' not in options:
fatal_error('The "tls" option is required when mounting via "accesspoint"')
if not AP_ID_RE.match(options['accesspoint']):
fatal_error('Access Point ID %s is malformed' % options['accesspoint'])
if 'iam' in options and 'tls' not in options:
fatal_error('The "tls" option is required when mounting via "iam"')
if 'awsprofile' in options and 'iam' not in options:
fatal_error('The "iam" option is required when mounting with named profile option, "awsprofile"')
if 'awscredsuri' in options and 'iam' not in options:
fatal_error('The "iam" option is required when mounting with "awscredsuri"')
if 'awscredsuri' in options and 'awsprofile' in options:
fatal_error('The "awscredsuri" and "awsprofile" options are mutually exclusive')
def bootstrap_cloudwatch_logging(config, fs_id=None):
if not check_if_cloudwatch_log_enabled(config):
return None
cloudwatchlog_client = get_botocore_client(config, 'logs')
if not cloudwatchlog_client:
return None
cloudwatchlog_config = get_cloudwatchlog_config(config, fs_id)
log_group_name = cloudwatchlog_config.get('log_group_name')
log_stream_name = cloudwatchlog_config.get('log_stream_name')
retention_days = cloudwatchlog_config.get('retention_days')
group_creation_completed = create_cloudwatch_log_group(cloudwatchlog_client, log_group_name)
if not group_creation_completed:
return None
put_retention_policy_completed = put_cloudwatch_log_retention_policy(cloudwatchlog_client, log_group_name, retention_days)
if not put_retention_policy_completed:
return None
stream_creation_completed = create_cloudwatch_log_stream(cloudwatchlog_client, log_group_name, log_stream_name)
if not stream_creation_completed:
return None
return {
'client': cloudwatchlog_client,
'log_group_name': log_group_name,
'log_stream_name': log_stream_name
}
def create_default_cloudwatchlog_agent_if_not_exist(config):
if not check_if_cloudwatch_log_enabled(config):
return None
global CLOUDWATCHLOG_AGENT
if not CLOUDWATCHLOG_AGENT:
CLOUDWATCHLOG_AGENT = bootstrap_cloudwatch_logging(config)
def get_botocore_client(config, service):
if not BOTOCORE_PRESENT:
logging.error('Failed to import botocore, please install botocore first.')
return None
session = botocore.session.get_session()
region = get_target_region(config)
iam_role_name = get_iam_role_name()
if iam_role_name:
credentials, _ = get_aws_security_credentials_from_instance_metadata(iam_role_name)
if credentials:
return session.create_client(service, aws_access_key_id=credentials['AccessKeyId'],
aws_secret_access_key=credentials['SecretAccessKey'],
aws_session_token=credentials['Token'], region_name=region)
return session.create_client(service, region_name=region)
def get_cloudwatchlog_config(config, fs_id=None):
log_group_name = DEFAULT_CLOUDWATCH_LOG_GROUP
if config.has_option(CLOUDWATCH_LOG_SECTION, 'log_group_name'):
log_group_name = config.get(CLOUDWATCH_LOG_SECTION, 'log_group_name')
retention_days = DEFAULT_RETENTION_DAYS
if config.has_option(CLOUDWATCH_LOG_SECTION, 'retention_in_days'):
retention_days = config.get(CLOUDWATCH_LOG_SECTION, 'retention_in_days')
log_stream_name = get_cloudwatch_log_stream_name(fs_id)
return {
'log_group_name': log_group_name,
'retention_days': int(retention_days),
'log_stream_name': log_stream_name
}
def get_cloudwatch_log_stream_name(fs_id=None):
instance_id = get_instance_identity_info_from_instance_metadata('instanceId')
if instance_id and fs_id:
log_stream_name = '%s - %s - mount.log' % (fs_id, instance_id)
elif instance_id:
log_stream_name = '%s - mount.log' % (instance_id)
elif fs_id:
log_stream_name = '%s - mount.log' % (fs_id)
else:
log_stream_name = 'default - mount.log'
return log_stream_name
def check_if_cloudwatch_log_enabled(config):
if config.has_option(CLOUDWATCH_LOG_SECTION, 'enabled'):
return config.getboolean(CLOUDWATCH_LOG_SECTION, 'enabled')
return False
def cloudwatch_create_log_group_helper(cloudwatchlog_client, log_group_name):
cloudwatchlog_client.create_log_group(
logGroupName=log_group_name
)
logging.info('Created cloudwatch log group %s' % log_group_name)
def create_cloudwatch_log_group(cloudwatchlog_client, log_group_name):
try:
cloudwatch_create_log_group_helper(cloudwatchlog_client, log_group_name)
except ClientError as e:
exception = e.response['Error']['Code']
if exception == 'ResourceAlreadyExistsException':
logging.debug('Log group %s already exist, %s' % (log_group_name, e.response))
return True
elif exception == 'LimitExceededException':
logging.error('Reached the maximum number of log groups that can be created, %s' % e.response)
return False
elif exception == 'OperationAbortedException':
logging.debug('Multiple requests to update the same log group %s were in conflict, %s' % (log_group_name, e.response))
return False
elif exception == 'InvalidParameterException':
logging.error('Log group name %s is specified incorrectly, %s' % (log_group_name, e.response))
return False
else:
handle_general_botocore_exceptions(e)
return False
except NoCredentialsError as e:
logging.warning('Credentials are not properly configured, %s' % e)
return False
except EndpointConnectionError as e:
logging.warning('Could not connect to the endpoint, %s' % e)
return False
except Exception as e:
logging.warning('Unknown error, %s.' % e)
return False
return True
def cloudwatch_put_retention_policy_helper(cloudwatchlog_client, log_group_name, retention_days):
cloudwatchlog_client.put_retention_policy(
logGroupName=log_group_name,
retentionInDays=retention_days
)
logging.debug('Set cloudwatch log group retention days to %s' % retention_days)
def put_cloudwatch_log_retention_policy(cloudwatchlog_client, log_group_name, retention_days):
try:
cloudwatch_put_retention_policy_helper(cloudwatchlog_client, log_group_name, retention_days)
except ClientError as e:
exception = e.response['Error']['Code']
if exception == 'ResourceNotFoundException':
logging.error('Log group %s does not exist, %s' % (log_group_name, e.response))
return False
elif exception == 'OperationAbortedException':
logging.debug('Multiple requests to update the same log group %s were in conflict, %s' % (log_group_name, e.response))
return False
elif exception == 'InvalidParameterException':
logging.error('Either parameter log group name %s or retention in days %s is specified incorrectly, %s'
% (log_group_name, retention_days, e.response))
return False
else:
handle_general_botocore_exceptions(e)
return False
except NoCredentialsError as e:
logging.warning('Credentials are not properly configured, %s' % e)
return False
except EndpointConnectionError as e:
logging.warning('Could not connect to the endpoint, %s' % e)
return False
except Exception as e:
logging.warning('Unknown error, %s.' % e)
return False
return True
def cloudwatch_create_log_stream_helper(cloudwatchlog_client, log_group_name, log_stream_name):
cloudwatchlog_client.create_log_stream(
logGroupName=log_group_name,
logStreamName=log_stream_name
)
logging.info('Created cloudwatch log stream %s in log group %s' % (log_stream_name, log_group_name))
def create_cloudwatch_log_stream(cloudwatchlog_client, log_group_name, log_stream_name):
try:
cloudwatch_create_log_stream_helper(cloudwatchlog_client, log_group_name, log_stream_name)
except ClientError as e:
exception = e.response['Error']['Code']
if exception == 'ResourceAlreadyExistsException':
logging.debug('Log stream %s already exist in log group %s, %s' % (log_stream_name, log_group_name, e.response))
return True
elif exception == 'InvalidParameterException':
logging.error('Either parameter log group name %s or log stream name %s is specified incorrectly, %s'
% (log_group_name, log_stream_name, e.response))
return False
elif exception == 'ResourceNotFoundException':
logging.error('Log group %s does not exist, %s' % (log_group_name, e.response))
return False
else:
handle_general_botocore_exceptions(e)
return False
except NoCredentialsError as e:
logging.warning('Credentials are not properly configured, %s' % e)
return False
except EndpointConnectionError as e:
logging.warning('Could not connect to the endpoint, %s' % e)
return False
except Exception as e:
logging.warning('Unknown error, %s.' % e)
return False
return True
def cloudwatch_put_log_events_helper(cloudwatchlog_agent, message, token=None):
kwargs = {
'logGroupName': cloudwatchlog_agent.get('log_group_name'),
'logStreamName': cloudwatchlog_agent.get('log_stream_name'),
'logEvents': [
{
'timestamp': int(round(time.time() * 1000)),
'message': message
}
]
}
if token:
kwargs['sequenceToken'] = token
cloudwatchlog_agent.get('client').put_log_events(**kwargs)
def publish_cloudwatch_log(cloudwatchlog_agent, message):
if not cloudwatchlog_agent or not cloudwatchlog_agent.get('client'):
return False
token = get_log_stream_next_token(cloudwatchlog_agent)
try:
cloudwatch_put_log_events_helper(cloudwatchlog_agent, message, token)
except ClientError as e:
exception = e.response['Error']['Code']
if exception == 'InvalidSequenceTokenException':
logging.debug('The sequence token is not valid, %s' % e.response)
return False
elif exception == 'InvalidParameterException':
logging.debug('One of the parameter to put log events is not valid, %s' % e.response)
return False
elif exception == 'DataAlreadyAcceptedException':
logging.debug('The event %s was already logged, %s' % (message, e.response))
return False
elif exception == 'UnrecognizedClientException':
logging.debug('The most likely cause is an invalid AWS access key ID or secret Key, %s' % e.response)
return False
elif exception == 'ResourceNotFoundException':
logging.error('Either log group %s or log stream %s does not exist, %s'
% (cloudwatchlog_agent.get('log_group_name'), cloudwatchlog_agent.get('log_stream_name'), e.response))
return False
else:
logging.debug('Unexpected error: %s' % e)
return False
except NoCredentialsError as e:
logging.warning('Credentials are not properly configured, %s' % e)
return False
except EndpointConnectionError as e:
logging.warning('Could not connect to the endpoint, %s' % e)
return False
except Exception as e:
logging.warning('Unknown error, %s.' % e)
return False
return True
def cloudwatch_describe_log_streams_helper(cloudwatchlog_agent):
return cloudwatchlog_agent.get('client').describe_log_streams(
logGroupName=cloudwatchlog_agent.get('log_group_name'),
logStreamNamePrefix=cloudwatchlog_agent.get('log_stream_name')
)
def get_log_stream_next_token(cloudwatchlog_agent):
try:
response = cloudwatch_describe_log_streams_helper(cloudwatchlog_agent)
except ClientError as e:
exception = e.response['Error']['Code']
if exception == 'InvalidParameterException':
logging.debug('Either parameter log group name %s or log stream name %s is specified incorrectly, %s'
% (cloudwatchlog_agent.get('log_group_name'), cloudwatchlog_agent.get('log_stream_name'), e.response))
elif exception == 'ResourceNotFoundException':
logging.debug('Either log group %s or log stream %s does not exist, %s'
% (cloudwatchlog_agent.get('log_group_name'), cloudwatchlog_agent.get('log_stream_name'), e.response))
else:
handle_general_botocore_exceptions(e)
return None
except NoCredentialsError as e:
logging.warning('Credentials are not properly configured, %s' % e)
return None
except EndpointConnectionError as e:
logging.warning('Could not connect to the endpoint, %s' % e)
return None
except Exception as e:
logging.warning('Unknown error, %s' % e)
return None
try:
log_stream = response['logStreams'][0]
return log_stream.get('uploadSequenceToken')
except (IndexError, TypeError, KeyError):
pass
return None
def handle_general_botocore_exceptions(error):
exception = error.response['Error']['Code']
if exception == 'ServiceUnavailableException':
logging.debug('The service cannot complete the request, %s' % error.response)
elif exception == 'AccessDeniedException':
logging.debug('User is not authorized to perform the action, %s' % error.response)
else:
logging.debug('Unexpected error: %s' % error)
def main():
parse_arguments_early_exit()
assert_root()
config = read_config()
bootstrap_logging(config)
fs_id, path, mountpoint, options = parse_arguments(config)
logging.info('version=%s options=%s', VERSION, options)
global CLOUDWATCHLOG_AGENT
CLOUDWATCHLOG_AGENT = bootstrap_cloudwatch_logging(config, fs_id)
check_unsupported_options(options)
check_options_validity(options)
init_system = get_init_system()
check_network_status(fs_id, init_system)
dns_name = get_dns_name(config, fs_id)
if 'tls' in options:
mount_tls(config, init_system, dns_name, path, fs_id, mountpoint, options)
else:
mount_nfs(dns_name, path, mountpoint, options)
if '__main__' == __name__:
main()
| 2.015625 | 2 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.