content
stringlengths
5
1.05M
class MC(type): classes = [] count = 0 def __prepare__(name, bases): return {'prepared': True} def __new__(cls, name, bases, namespace): MC.classes.append(name) return type.__new__(cls, name, bases, namespace) def __call__(cls): MC.count += 1 return type.__call__(cls, MC.count) class C(object, metaclass=MC): def __new__(cls, count): self = object.__new__(cls) self.count = count return self class D(metaclass=MC): pass assert MC == type(C) assert C == type(C()) assert MC.classes == ['C', 'D'] assert C().count == 2 assert C.prepared assert D.prepared
import csv import math import os from PIL import Image import numpy as np import torch from torch.utils.data import Dataset import torchvision.transforms as transforms class CancerDataset(Dataset): '''Dataset class to feed network with images and labels args: df (pandas df): dataframe containing two columns: image ids and target labels image_dir (string): directory with the images transform (callable, optional): optional transform to be applied on a sample. should be 'none' for testing ''' def __init__(self, df, image_dir, transform=None): self.labels = df.label self.im_names = df.id self.image_dir = image_dir # Transform to apply to each image - note that all images will be # converted to tensors if transform is not None: self.transform = transform else: self.transform = transforms.Compose([ transforms.ToTensor()]) def __len__(self): return len(self.labels) def __getitem__(self, idx): # Extract image from file im_name = self.im_names[idx] im_path = os.path.join(self.image_dir, im_name + '.tif') im = Image.open(im_path) # Transform image im = self.transform(im) label = np.array(int(self.labels[idx])) sample = {'image': im.type(torch.cuda.FloatTensor), 'label': torch.from_numpy(label).type(torch.cuda.LongTensor)} return sample
#!/usr/bin/python3 import sys # From AT Keyboard Scan Codes (Set 2) scancodes = { # letters 0x001C: "KEY_A", 0x0032: "KEY_B", 0x0021: "KEY_C", 0x0023: "KEY_D", 0x0024: "KEY_E", 0x002B: "KEY_F", 0x0034: "KEY_G", 0x0033: "KEY_H", 0x0043: "KEY_I", 0x003B: "KEY_J", 0x0042: "KEY_K", 0x004B: "KEY_L", 0x003A: "KEY_M", 0x0031: "KEY_N", 0x0044: "KEY_O", 0x004D: "KEY_P", 0x0015: "KEY_Q", 0x002D: "KEY_R", 0x001B: "KEY_S", 0x002C: "KEY_T", 0x003C: "KEY_U", 0x002A: "KEY_V", 0x001D: "KEY_W", 0x0022: "KEY_X", 0x0035: "KEY_Y", 0x001A: "KEY_Z", # numbers 0x0045: "KEY_0", 0x0016: "KEY_1", 0x001E: "KEY_2", 0x0026: "KEY_3", 0x0025: "KEY_4", 0x002E: "KEY_5", 0x0036: "KEY_6", 0x003D: "KEY_7", 0x003E: "KEY_8", 0x0046: "KEY_9", # function keys 0x0005: "KEY_F1", 0x0006: "KEY_F2", 0x0004: "KEY_F3", 0x000C: "KEY_F4", 0x0003: "KEY_F5", 0x000B: "KEY_F6", 0x0083: "KEY_F7", 0x000A: "KEY_F8", 0x0001: "KEY_F9", 0x0009: "KEY_F10", 0x0078: "KEY_F11", 0x0007: "KEY_F12", # keypad 0x0070: "KEY_KP0", 0x0069: "KEY_KP1", 0x0072: "KEY_KP2", 0x007A: "KEY_KP3", 0x006B: "KEY_KP4", 0x0073: "KEY_KP5", 0x0074: "KEY_KP6", 0x006C: "KEY_KP7", 0x0075: "KEY_KP8", 0x007D: "KEY_KP9", 0xE04A: "KEY_KPSLASH", 0x007C: "KEY_KPASTERISK", 0x007B: "KEY_KPMINUS", 0x0079: "KEY_KPPLUS", 0xE05A: "KEY_KPENTER", 0x0071: "KEY_KPDOT", # insert cluster 0xE070: "KEY_INSERT", 0xE06C: "KEY_HOME", 0xE07D: "KEY_PAGEUP", 0xE07A: "KEY_PAGEDOWN", 0xE069: "KEY_END", 0xE071: "KEY_DELETE", # arrows 0xE075: "KEY_UP", 0xE072: "KEY_DOWN", 0xE06B: "KEY_LEFT", 0xE074: "KEY_RIGHT", # control keys 0x0012: "KEY_LEFTSHIFT", 0x0014: "KEY_LEFTCTRL", 0xE01F: "KEY_LEFTMETA", 0x0011: "KEY_LEFTALT", 0x0059: "KEY_RIGHTSHIFT", 0xE059: "KEY_RIGHTSHIFT", # BUG: Nexys A7 reports Right Shift as EO 59 0xE014: "KEY_RIGHTCTRL", 0xE027: "KEY_RIGHTMETA", 0xE011: "KEY_RIGHTALT", 0xE02F: "KEY_COMPOSE", 0x0076: "KEY_ESC", # symbols / punctuation 0x000E: "KEY_GRAVE", 0x004E: "KEY_MINUS", 0x0055: "KEY_EQUAL", 0x005D: "KEY_BACKSLASH", 0x0054: "KEY_LEFTBRACE", 0x005B: "KEY_RIGHTBRACE", 0x004C: "KEY_SEMICOLON", 0x0052: "KEY_APOSTROPHE", 0x0041: "KEY_COMMA", 0x0049: "KEY_DOT", 0x004A: "KEY_SLASH", # spaces 0x0066: "KEY_BACKSPACE", 0x000D: "KEY_TAB", 0x0029: "KEY_SPACE", 0x005A: "KEY_ENTER", # keyboard control 0x0058: "KEY_CAPSLOCK", 0x007E: "KEY_SCROLLLOCK", 0x0077: "KEY_NUMLOCK", } # from /usr/include/linux/input-event-codes.h virtual_keys = { "KEY_RESERVED": 0, "KEY_ESC": 1, "KEY_1": 2, "KEY_2": 3, "KEY_3": 4, "KEY_4": 5, "KEY_5": 6, "KEY_6": 7, "KEY_7": 8, "KEY_8": 9, "KEY_9": 10, "KEY_0": 11, "KEY_MINUS": 12, "KEY_EQUAL": 13, "KEY_BACKSPACE": 14, "KEY_TAB": 15, "KEY_Q": 16, "KEY_W": 17, "KEY_E": 18, "KEY_R": 19, "KEY_T": 20, "KEY_Y": 21, "KEY_U": 22, "KEY_I": 23, "KEY_O": 24, "KEY_P": 25, "KEY_LEFTBRACE": 26, "KEY_RIGHTBRACE": 27, "KEY_ENTER": 28, "KEY_LEFTCTRL": 29, "KEY_A": 30, "KEY_S": 31, "KEY_D": 32, "KEY_F": 33, "KEY_G": 34, "KEY_H": 35, "KEY_J": 36, "KEY_K": 37, "KEY_L": 38, "KEY_SEMICOLON": 39, "KEY_APOSTROPHE": 40, "KEY_GRAVE": 41, "KEY_LEFTSHIFT": 42, "KEY_BACKSLASH": 43, "KEY_Z": 44, "KEY_X": 45, "KEY_C": 46, "KEY_V": 47, "KEY_B": 48, "KEY_N": 49, "KEY_M": 50, "KEY_COMMA": 51, "KEY_DOT": 52, "KEY_SLASH": 53, "KEY_RIGHTSHIFT": 54, "KEY_KPASTERISK": 55, "KEY_LEFTALT": 56, "KEY_SPACE": 57, "KEY_CAPSLOCK": 58, "KEY_F1": 59, "KEY_F2": 60, "KEY_F3": 61, "KEY_F4": 62, "KEY_F5": 63, "KEY_F6": 64, "KEY_F7": 65, "KEY_F8": 66, "KEY_F9": 67, "KEY_F10": 68, "KEY_NUMLOCK": 69, "KEY_SCROLLLOCK": 70, "KEY_KP7": 71, "KEY_KP8": 72, "KEY_KP9": 73, "KEY_KPMINUS": 74, "KEY_KP4": 75, "KEY_KP5": 76, "KEY_KP6": 77, "KEY_KPPLUS": 78, "KEY_KP1": 79, "KEY_KP2": 80, "KEY_KP3": 81, "KEY_KP0": 82, "KEY_KPDOT": 83, "KEY_F11": 87, "KEY_F12": 88, "KEY_KPENTER": 96, "KEY_RIGHTCTRL": 97, "KEY_KPSLASH": 98, "KEY_SYSRQ": 99, "KEY_RIGHTALT": 100, "KEY_HOME": 102, "KEY_UP": 103, "KEY_PAGEUP": 104, "KEY_LEFT": 105, "KEY_RIGHT": 106, "KEY_END": 107, "KEY_DOWN": 108, "KEY_PAGEDOWN": 109, "KEY_INSERT": 110, "KEY_DELETE": 111, "KEY_PAUSE": 119, "KEY_LEFTMETA": 125, "KEY_RIGHTMETA": 126, "KEY_COMPOSE": 127, } def main(*args): print("@0000") for scancode in range(0x00, 0x100): if scancode in scancodes: print("{0:02X}".format(virtual_keys[scancodes[scancode]])) else: print("{0:02X}".format(0x00)) for scancode in range(0xE000, 0xE100): if scancode in scancodes: print("{0:02X}".format(virtual_keys[scancodes[scancode]])) else: print("{0:02X}".format(0x00)) if __name__ == "__main__": main(sys.argv)
# list(map(int, input().split())) # int(input()) def main(X, K, D): x = abs(X) k = K shou, amari = x // D, x % D if shou >= k: # 0付近までたどり着かないとき x -= k * D k = 0 else: k -= (shou + 1) x -= (shou + 1) * D if k % 2: # 奇数のとき print(abs(x + D)) else: # 偶数の時 print(abs(x)) if __name__ == '__main__': X, K, D = list(map(int, input().split())) main(X, K, D)
# -*- coding: utf-8 -*- # @Time : 2018/6/21 上午10:14 # @Author : Azrael.Bai # @File : table_reflect.py FILE_LIST = [# "ModelDef_GongShang_GSGW.py", "ModelDef_GongShang_HZ.py", # "ModelDef_GongShang_QCC.py", # "ModelDef_GongShang_QXB.py", # "ModelDef_SheSu_DFFY.py", # "ModelDef_SheSu_HZ.py", # "ModelDef_SheSu_QCC.py", # "ModelDef_SheSu_QXB.py", # "ModelDef_SheSu_ZGFY.py", ] file_write = open("field_with_pojo.txt", 'w') import re reg = re.compile("={\'DBColName\'.+?}") model = "" if __name__ == '__main__': for f in FILE_LIST: with open(f, 'r') as filecontent: for line in filecontent.readlines(): if reg.findall(line): str = line.split('=')[1].strip() table_na = re.compile("\'DBColName\':\'(.+?)\'") if table_na.findall(str)[0] == "bold_id": model = line.split('=')[0].strip().replace("_bold_id","") pojo_na = re.compile("\'refPOJOColName\':\'(.+?)\'") pojo_chin = re.compile("\'PGFieldLabel\':\'(.+?)\'") file_write.write(table_na.findall(str)[0]+","+pojo_na.findall(str)[0]+"\n") file_write.close() # 排除bold_id,OfCompany,DengJiRen,DengJiShiJian
import numpy as np from scipy.optimize import fsolve import matplotlib.pyplot as plt conserved_variables = ('Depth', 'Momentum') primitive_variables = ('Depth', 'Velocity') left, middle, right = (0, 1, 2) def pospart(x): return np.maximum(1.e-15,x) def primitive_to_conservative(h, u): hu = h*u return h, hu def conservative_to_primitive(h, hu): assert np.all(h>=0) # We should instead check that hu is zero everywhere that h is u = hu/pospart(h) return h, u def cons_to_prim(q): return conservative_to_primitive(*q) def exact_riemann_solution(q_l, q_r, grav=1., force_waves=None, primitive_inputs=False): """Return the exact solution to the Riemann problem with initial states q_l, q_r. The solution is given in terms of a list of states, a list of speeds (each of which may be a pair in case of a rarefaction fan), and a function reval(xi) that gives the solution at a point xi=x/t. The input and output vectors are the conserved quantities. """ if primitive_inputs: h_l, u_l = q_l h_r, u_r = q_r hu_l = h_l*u_l hu_r = h_r*u_r else: h_l, u_l = conservative_to_primitive(*q_l) h_r, u_r = conservative_to_primitive(*q_r) hu_l = q_l[1] hu_r = q_r[1] # Compute left and right state sound speeds c_l = np.sqrt(grav*h_l) c_r = np.sqrt(grav*h_r) # Define the integral curves and hugoniot loci # Avoid warnings due to negative depths in fsolve calls integral_curve_1 = lambda h: u_l + 2*(np.sqrt(grav*h_l) - np.sqrt(grav*np.maximum(h,0))) integral_curve_2 = lambda h: u_r - 2*(np.sqrt(grav*h_r) - np.sqrt(grav*np.maximum(h,0))) hugoniot_locus_1 = lambda h: (h_l*u_l + (h-h_l)*(u_l - np.sqrt(grav*h_l*(1 + (h-h_l)/h_l) * (1 + (h-h_l)/(2*h_l)))))/h hugoniot_locus_2 = lambda h: (h_r*u_r + (h-h_r)*(u_r + np.sqrt(grav*h_r*(1 + (h-h_r)/h_r) * (1 + (h-h_r)/(2*h_r)))))/h # Check whether the 1-wave is a shock or rarefaction def phi_l(h): if (h>=h_l and force_waves!='raref') or force_waves=='shock': return hugoniot_locus_1(h) else: return integral_curve_1(h) # Check whether the 2-wave is a shock or rarefaction def phi_r(h): if (h>=h_r and force_waves!='raref') or force_waves=='shock': return hugoniot_locus_2(h) else: return integral_curve_2(h) ws = np.zeros(4) wave_types = ['', ''] dry_velocity_l = u_l + 2*np.sqrt(grav*h_l) dry_velocity_r = u_r - 2*np.sqrt(grav*h_r) if dry_velocity_l < dry_velocity_r: # Dry middle state h_m = 0 # This is a bit arbitrary: u_m = 0.5*(dry_velocity_l + dry_velocity_r) hu_m = u_m * h_m ws[0] = u_l - c_l ws[1] = dry_velocity_l ws[2] = dry_velocity_r ws[3] = u_r + c_r elif h_l == 0: # Dry left state; 2-rarefaction only h_m = 0 u_m = dry_velocity_r hu_m = u_m * h_m ws[0] = 0 ws[1] = 0 ws[2] = dry_velocity_r ws[3] = u_r + c_r elif h_r == 0: # Dry right state; 1-rarefaction only h_m = 0 u_m = dry_velocity_l hu_m = u_m * h_m ws[0] = u_l - c_l ws[1] = dry_velocity_l ws[2] = 0 ws[3] = 0 else: phi = lambda h: phi_l(h)-phi_r(h) # Compute middle state h, hu by finding curve intersection guess = (u_l-u_r+2.*np.sqrt(grav)*(np.sqrt(h_l)+np.sqrt(h_r)))**2./16./grav h_m,info, ier, msg = fsolve(phi, guess, full_output=True, xtol=1.e-14) # For strong rarefactions, sometimes fsolve needs help if ier!=1: h_m,info, ier, msg = fsolve(phi, guess,full_output=True,factor=0.1,xtol=1.e-10) # This should not happen: if ier!=1: print('Warning: fsolve did not converge.') print(msg) u_m = phi_l(h_m) hu_m = u_m * h_m # Find shock and rarefaction speeds if (h_m>h_l and force_waves!='raref') or force_waves=='shock': wave_types[0] = 'shock' ws[0] = (hu_l - hu_m) / (h_l - h_m) ws[1] = ws[0] else: wave_types[0] = 'raref' c_m = np.sqrt(grav * h_m) ws[0] = u_l - c_l ws[1] = u_m - c_m if (h_m>h_r and force_waves!='raref') or force_waves=='shock': wave_types[1] = 'shock' ws[2] = (hu_r - hu_m) / (h_r - h_m) ws[3] = ws[2] else: wave_types[0] = 'raref' c_m = np.sqrt(grav * h_m) ws[2] = u_m + c_m ws[3] = u_r + c_r # Find solution inside rarefaction fans (in primitive variables) def raref1(xi): RiemannInvariant = u_l + 2*np.sqrt(grav*h_l) h = ((RiemannInvariant - xi)**2 / (9*grav)) u = (xi + np.sqrt(grav*h)) hu = h*u return h, hu def raref2(xi): RiemannInvariant = u_r - 2*np.sqrt(grav*h_r) h = ((RiemannInvariant - xi)**2 / (9*grav)) u = (xi - np.sqrt(grav*h)) hu = h*u return h, hu q_m = np.squeeze(np.array((h_m, hu_m))) states = np.column_stack([q_l,q_m,q_r]) speeds = [[], []] if wave_types[0] is 'shock': speeds[0] = ws[0] else: speeds[0] = (ws[0],ws[1]) if wave_types[1] is 'shock': speeds[1] = ws[2] else: speeds[1] = (ws[2],ws[3]) def reval(xi): """ Function that evaluates the Riemann solution for arbitrary xi = x/t. Sets the solution to nan in an over-turning rarefaction wave for illustration purposes of this non-physical solution. """ rar1 = raref1(xi) rar2 = raref2(xi) h_out = (xi<=ws[0])*h_l + \ (xi>ws[0])*(xi<=ws[1])*rar1[0] + \ (xi>ws[1])*(xi<=ws[0])*1e9 + \ (xi>ws[1])*(xi<=ws[2])*h_m + \ (xi>ws[2])*(xi<=ws[3])*rar2[0] + \ (xi>ws[3])*(xi<=ws[2])*1e9 + \ (xi>ws[3])*h_r h_out[h_out>1e8] = np.nan hu_out = (xi<=ws[0])*hu_l + \ (xi>ws[0])*(xi<=ws[1])*rar1[1] + \ (xi>ws[1])*(xi<=ws[0])*1e9 + \ (xi>ws[1])*(xi<=ws[2])*hu_m + \ (xi>ws[2])*(xi<=ws[3])*rar2[1] + \ (xi>ws[3])*(xi<=ws[2])*1e9 + \ (xi>ws[3])*hu_r hu_out[hu_out>1e8] = np.nan return h_out, hu_out return states, speeds, reval, wave_types def integral_curve(h, hstar, hustar, wave_family, g=1., y_axis='u'): """ Return u or hu as a function of h for integral curves through (hstar, hustar). """ ustar = hustar / pospart(hstar) if wave_family == 1: if y_axis == 'u': return ustar + 2*(np.sqrt(g*hstar) - np.sqrt(g*h)) else: return h*ustar + 2*h*(np.sqrt(g*hstar) - np.sqrt(g*h)) else: if y_axis == 'u': return ustar - 2*(np.sqrt(g*hstar) - np.sqrt(g*h)) else: return h*ustar - 2*h*(np.sqrt(g*hstar) - np.sqrt(g*h)) def hugoniot_locus(h, hstar, hustar, wave_family, g=1., y_axis='u'): """ Return u or hu as a function of h for the Hugoniot locus through (hstar, hustar). """ ustar = hustar / hstar alpha = h - hstar d = np.sqrt(g*hstar*(1 + alpha/hstar)*(1 + alpha/(2*hstar))) if wave_family == 1: if y_axis == 'u': return (hustar + alpha*(ustar - d))/pospart(h) else: return hustar + alpha*(ustar - d) else: if y_axis == 'u': return (hustar + alpha*(ustar + d))/pospart(h) else: return hustar + alpha*(ustar + d) def phase_plane_curves(hstar, hustar, state, g=1., wave_family='both', y_axis='u', ax=None, plot_unphysical=False): """ Plot the curves of points in the h - u or h-hu phase plane that can be connected to (hstar,hustar). state = 'qleft' or 'qright' indicates whether the specified state is ql or qr. wave_family = 1, 2, or 'both' indicates whether 1-waves or 2-waves should be plotted. Colors in the plots indicate whether the states can be connected via a shock or rarefaction. """ if ax is None: fig, ax = plt.subplots() h = np.linspace(0, hstar, 200) if wave_family in [1,'both']: if state == 'qleft' or plot_unphysical: u = integral_curve(h, hstar, hustar, 1, g, y_axis=y_axis) ax.plot(h,u,'b', label='1-rarefactions') if state == 'qright' or plot_unphysical: u = hugoniot_locus(h, hstar, hustar, 1, g, y_axis=y_axis) ax.plot(h,u,'--r', label='1-shocks') if wave_family in [2,'both']: if state == 'qleft' or plot_unphysical: u = hugoniot_locus(h, hstar, hustar, 2, g, y_axis=y_axis) ax.plot(h,u,'--r', label='2-shocks') if state == 'qright' or plot_unphysical: u = integral_curve(h, hstar, hustar, 2, g, y_axis=y_axis) ax.plot(h,u,'b', label='2-rarefactions') h = np.linspace(hstar, 3, 200) if wave_family in [1,'both']: if state == 'qright' or plot_unphysical: u = integral_curve(h, hstar, hustar, 1, g, y_axis=y_axis) ax.plot(h,u,'--b', label='1-rarefactions') if state == 'qleft' or plot_unphysical: u = hugoniot_locus(h, hstar, hustar, 1, g, y_axis=y_axis) ax.plot(h,u,'r', label='1-shocks') if wave_family in [2,'both']: if state == 'qright' or plot_unphysical: u = hugoniot_locus(h, hstar, hustar, 2, g, y_axis=y_axis) ax.plot(h,u,'r', label='2-shocks') if state == 'qleft' or plot_unphysical: u = integral_curve(h, hstar, hustar, 2, g, y_axis=y_axis) ax.plot(h,u,'--b', label='2-rarefactions') # plot and label the point (hstar, hustar) ax.set_xlabel('Depth (h)') if y_axis == 'u': ustar = hustar/hstar ax.set_ylabel('Velocity (u)') else: ustar = hustar # Fake it ax.set_ylabel('Momentum (hu)') ax.plot([hstar],[ustar],'ko',markersize=5) ax.text(hstar + 0.1, ustar - 0.2, state, fontsize=13) def make_axes_and_label(x1=-.5, x2=6., y1=-2.5, y2=2.5): plt.plot([x1,x2],[0,0],'k') plt.plot([0,0],[y1,y2],'k') plt.axis([x1,x2,y1,y2]) plt.legend() plt.xlabel("h = depth",fontsize=15) plt.ylabel("hu = momentum",fontsize=15) def phase_plane_plot(q_l, q_r, g=1., ax=None, force_waves=None, y_axis='u'): r"""Plot the Hugoniot loci or integral curves in the h-u or h-hu plane.""" # Solve Riemann problem states, speeds, reval, wave_types = \ exact_riemann_solution(q_l, q_r, g, force_waves=force_waves) # Set plot bounds if ax is None: fig, ax = plt.subplots() x = states[0,:] if y_axis == 'hu': y = states[1,:] else: y = states[1,:]/pospart(states[0,:]) if states[0,middle] == 0: dry_velocity_l = states[1,left]/pospart(states[0,left]) + 2*np.sqrt(g*states[0,left]) dry_velocity_r = states[1,right]/pospart(states[0,right]) - 2*np.sqrt(g*states[0,right]) y[1] = 1./(np.abs(np.sign(dry_velocity_l))+np.abs(np.sign(dry_velocity_r))) * \ (dry_velocity_l+dry_velocity_r) xmax, xmin = max(x), min(x) ymax = max(abs(y)) dx = xmax - xmin ymax = max(abs(y)) ax.set_xlim(0, xmax + 0.5*dx) ax.set_ylim(-1.5*ymax, 1.5*ymax) ax.set_xlabel('Depth (h)') if y_axis == 'u': ax.set_ylabel('Velocity (u)') else: ax.set_ylabel('Momentum (hu)') # Plot curves h_l = states[0,left] h1 = np.linspace(1.e-2,h_l) h2 = np.linspace(h_l,xmax+0.5*dx) if wave_types[0] == 'shock': hu1 = hugoniot_locus(h1, h_l, states[1,left], wave_family=1, g=g, y_axis=y_axis) hu2 = hugoniot_locus(h2, h_l, states[1,left], wave_family=1, g=g, y_axis=y_axis) ax.plot(h1,hu1,'--r') ax.plot(h2,hu2,'r') else: hu1 = integral_curve(h1, h_l, states[1,left], wave_family=1, g=g, y_axis=y_axis) hu2 = integral_curve(h2, h_l, states[1,left], wave_family=1, g=g, y_axis=y_axis) ax.plot(h1,hu1,'b') ax.plot(h2,hu2,'--b') h_r = states[0,right] h1 = np.linspace(1.e-2,h_r) h2 = np.linspace(h_r,xmax+0.5*dx) if wave_types[1] == 'shock': hu1 = hugoniot_locus(h1, states[0,right], states[1,right], wave_family=2, g=g, y_axis=y_axis) hu2 = hugoniot_locus(h2, states[0,right], states[1,right], wave_family=2, g=g, y_axis=y_axis) ax.plot(h1,hu1,'--r') ax.plot(h2,hu2,'r') else: hu1 = integral_curve(h1, states[0,right], states[1,right], wave_family=2, g=g, y_axis=y_axis) hu2 = integral_curve(h2, states[0,right], states[1,right], wave_family=2, g=g, y_axis=y_axis) ax.plot(h1,hu1,'b') ax.plot(h2,hu2,'--b') for xp,yp in zip(x,y): ax.plot(xp,yp,'ok',markersize=10) # Label states for i,label in enumerate(('Left', 'Middle', 'Right')): ax.text(x[i] + 0.025*dx,y[i] + 0.025*ymax,label) def plot_hugoniot_loci(plot_1=True,plot_2=False,y_axis='hu'): h = np.linspace(0.001,3,100) hstar = 1.0 legend = plot_1*['1-loci'] + plot_2*['2-loci'] for hustar in np.linspace(-4,4,15): if plot_1: hu = hugoniot_locus(h,hstar,hustar,wave_family=1,y_axis=y_axis) plt.plot(h,hu,'-',color='cornflowerblue') if plot_2: hu = hugoniot_locus(h,hstar,hustar,wave_family=2,y_axis=y_axis) plt.plot(h,hu,'-',color='lightblue') plt.axis((0,3,-3,3)) plt.xlabel('depth h') if y_axis=='hu': plt.ylabel('momentum hu') else: plt.ylabel('velocity u') plt.title('Hugoniot loci') plt.legend(legend,loc=1) plt.show() def make_demo_plot_function(h_l=3., h_r=1., u_l=0., u_r=0): from matplotlib.mlab import find import matplotlib.pyplot as plt from exact_solvers import shallow_water from utils import riemann_tools plt.style.use('seaborn-talk') g = 1. q_l = shallow_water.primitive_to_conservative(h_l,u_l) q_r = shallow_water.primitive_to_conservative(h_r,u_r) x = np.linspace(-1.,1.,1000) states, speeds, reval, wave_types = shallow_water.exact_riemann_solution(q_l,q_r,g) # compute particle trajectories: def reval_rho_u(x): q = reval(x) rho = q[0] u = q[1]/q[0] rho_u = np.vstack((rho,u)) return rho_u x_traj, t_traj, xmax = \ riemann_tools.compute_riemann_trajectories(states, speeds, reval_rho_u, wave_types, i_vel=1, xmax=2, rho_left=h_l/4., rho_right=h_r/4.) num_vars = len(primitive_variables) def plot_shallow_water_demo(t=0.5, fig=0): if t == 0: q = np.zeros((2,len(x))) q[0,:] = q_l[0]*(x<=0) + q_r[0]*(x>0) q[1,:] = q_l[1]*(x<=0) + q_r[1]*(x>0) else: q = np.array(reval(x/t)) if t<0.02: q[1] = np.where(x<0, q_l[1], q_r[1]) primitive = shallow_water.conservative_to_primitive(q[0],q[1]) if fig == 0: fig = plt.figure(figsize=(18,6)) show_fig = True else: show_fig = False axes = [0]*num_vars for i in range(num_vars): axes[i] = fig.add_subplot(1,num_vars,i+1) q = primitive[i] plt.plot(x,q,'-k',linewidth=3) plt.title(primitive_variables[i]) axes[i].set_xlim(-1,1) if i==0: # plot stripes only on depth plot n = find(t > t_traj) if len(n)==0: n = 0 else: n = min(n.max(), len(t_traj)-1) for i in range(1, x_traj.shape[1]-1): j1 = find(x_traj[n,i] > x) if len(j1)==0: j1 = 0 else: j1 = min(j1.max(), len(x)-1) j2 = find(x_traj[n,i+1] > x) if len(j2)==0: j2 = 0 else: j2 = min(j2.max(), len(x)-1) # set advected color for density plot: if x_traj[0,i]<0: # shades of red for fluid starting from x<0 if np.mod(i,2)==0: c = [1,0,0] else: c = [1,0.8,0.8] else: # shades of blue for fluid starting from x<0 if np.mod(i,2)==0: c = [0,0,1] else: c = [0.8,0.8,1] plt.fill_between(x[j1:j2],q[j1:j2],0,color=c) axes[0].set_ylim(0,3.5) axes[1].set_ylim(-1,1) if show_fig: plt.show() return plot_shallow_water_demo
import tf_semseg, eval import tensorflow as tf def test_upernet_vitb_ade20k(): model = tf_semseg.model.pretrained.openmmlab.upernet_vitb_ade20k.create() predictor = lambda x: model(x, training=False) accuracy = eval.ade20k(predictor, tf_semseg.model.pretrained.openmmlab.upernet_vitb_ade20k.preprocess) print(f"Got accuracy {accuracy * 100.0}") assert accuracy > 0.909
x=10 print(x) # Change the value of (x) at any time x=12 print(x)
from pathlib import Path from tempfile import TemporaryDirectory from unittest.mock import patch from pypytranspy.cli import transpile_dir def test_transpile_dir_excludes_out_path(): with TemporaryDirectory() as tmp_dir: tmp_path = Path(tmp_dir) src_path = tmp_path / "src" out_path = tmp_path / "src" / "out" out_path.mkdir(parents=True) with patch(f"{transpile_dir.__module__}.{transpile_dir.__name__}") as m_transpile: transpile_dir(src_path, out_path) assert m_transpile.call_args is None
""" Views for VPN management API """ from django.http import HttpResponse from django.conf import settings from django.views.decorators.csrf import csrf_exempt import json from tempfile import mkstemp import os from sign import repository from models import State from utils import * @csrf_exempt def post_csr(request): if request.method == 'POST': try: State.objects.get(username=request.user.username).delete() except: pass session = State.objects.create(username=request.user.username) cert_repository = repository() cert_repository.prepare_repository() csr = request.REQUEST.get("csr") if csr is None: return HttpResponse(json.dumps({"success": False, "message": "Invalid request. Post CSR."})) filehandle, filename = mkstemp() os.close(filehandle) filehandle = open(filename, "w") filehandle.write(csr) filehandle.close() status, errors, fields = api_validate_csr(request.user.username, filename) if not status: os.remove(filename) return HttpResponse(json.dumps({"success": False, "message": "Invalid CSR"})) session.valid_csr = True session.csr_filename = filename session.save() status = api_gen_and_send_password(request.user.username) if not status.get("success"): return HttpResponse(json.dumps(status)) session.password = status.get("password") session.save() return HttpResponse(json.dumps({"success": True})) else: return HttpResponse(json.dumps({"success": False, "message": "Invalid request. Post CSR."})) @csrf_exempt def post_verification(request): try: session = State.objects.get(username=request.user.username) except: return HttpResponse(json.dumps({"success": False, "message": "No session available."})) if session.expired(): return HttpResponse(json.dumps({"success": False, "message": "Session expired."})) if not session.valid_csr: return HttpResponse(json.dumps({"success": False, "message": "No valid CSR uploaded."})) if request.method == 'POST': password = request.REQUEST.get("password") if password is None: return HttpResponse(json.dumps({"success": False, "message": "No password in request."})) valid_password = session.password csr_filename = session.csr_filename if password == valid_password: signing = api_sign_and_deploy(request.user.username, csr_filename) cn = signing.get_cn() if cn is None: return HttpResponse(json.dumps({"success": False, "message": "Internal error while signing."})) return HttpResponse(json.dumps({"success": True, "zip_url": settings.BASE_URL + "/api/zip/%s.zip" % cn})) return HttpResponse(json.dumps({"success": False, "message": "Wrong password"})) else: return HttpResponse(json.dumps({"success": False, "message": "Invalid request. Post password."}))
class Knife: length = "3in"
from .Client import * __copyright__ = 'Copyright (C) 2020 Atsushi Nakatsugawa' __version__ = '1.0.0' __license__ = 'MIT License' __author__ = 'Atsushi Nakatsugawa' __author_email__ = '[email protected]' __url__ = 'https://github.com/goofmint/CustomersMailCloudPy' __all__ = ['CustomersMailCloud']
multiplied_trees = 1 trees_hit = 0 with open("input.txt", "r") as puzzle_input: horizontal_index = 0 for line in puzzle_input: line = line.strip() current_line = line * multiplied_trees if horizontal_index >= len(current_line): multiplied_trees += 1 current_line = line * multiplied_trees if current_line[horizontal_index] == "#": trees_hit += 1 horizontal_index += 3 print(trees_hit)
import os from pathlib import Path import boto3 import botocore from moto import mock_s3 from pytest_mock import MockFixture from ignite.engine import Engine from smtools.torch.handlers import s3_copy @mock_s3 def test_s3_copy(tmp_path: Path, mocker: MockFixture): # Setup # local files to upload names = ["model.tar.gz", "trainer_checkpoint_123.pth", "log.sqlite"] for name in names: (tmp_path / name).write_text(f"This is {name}.") # trainer mock trainer = mocker.MagicMock(spec=Engine) trainer.iteration = 123 # S3 bucket mock s3 = boto3.resource("s3") bucket_name = "sample" s3.create_bucket(Bucket=bucket_name) # Excute s3_copy( trainer, pattern=f"{tmp_path}/model.tar.gz", bucket_name=bucket_name, key_prefix="{job_name}/model/iter_{trainer.iteration}", ) s3_copy( trainer, pattern=f"{tmp_path}/trainer_checkpoint_*.pth", bucket_name=bucket_name, key_prefix="{job_name}/trainer/iter_{trainer.iteration}", ) s3_copy( trainer, pattern=f"{tmp_path}/log.sqlite", bucket_name=bucket_name, ) # Check try: # res = s3.Object(bucket_name, "model/iter_123/model.tar.gz").get() bucket = s3.Bucket(bucket_name) object_summaries = bucket.objects.all() keys = tuple(sorted([obj_sum.key for obj_sum in object_summaries])) job_name = os.uname()[1] assert keys == tuple( sorted( [ f"{job_name}/model/iter_123/model.tar.gz", f"{job_name}/trainer/iter_123/trainer_checkpoint_123.pth", f"{job_name}/log.sqlite", ] ) ) except botocore.exceptions.ClientError as e: if e.response["Error"]["Code"] == "404": assert False, "The file doesn't exist." else: assert False, e
# Copyright (c) 2019 NVIDIA Corporation from app import app
from transformers import RobertaTokenizer, RobertaConfig from fairseq.data.encoders.fastbpe import fastBPE from fairseq.data import Dictionary from vncorenlp import VnCoreNLP from model.PhoBERT import PhoBERT from train_phobert import * import argparse parser = argparse.ArgumentParser(description='Process some integers.') parser.add_argument('--folder_model', type=str, default='./PhoBERT_base_transformers/model.bin') parser.add_argument('--config_path', type=str, default='./PhoBERT_base_transformers/config.json') parser.add_argument('--dict_path', type=str, default='./PhoBERT_base_transformers/dict.txt') parser.add_argument('--bpe_codes', type=str, default='./PhoBERT_base_transformers/bpe.codes') parser.add_argument('--rdrsegmenter_path', type=str, default='./VnCoreNLP/VnCoreNLP-1.1.1.jar') parser.add_argument('--path_train_data', type=str) parser.add_argument('--path_test_data', type=str) parser.add_argument('--load_data_from_pt', action='store_true') parser.add_argument('--path_log_file', type=str) parser.add_argument('--output_dir', type=str) parser.add_argument('--max_seq_length', type=int, default=256) parser.add_argument('--max_query_length', type=int, default=64) parser.add_argument('--batch_size', type=int, default=20) parser.add_argument('--num_labels', type=int, default=2) parser.add_argument('--learning_rate', type=float, default=3e-5) parser.add_argument('--gradient_accumulation_steps', type=int, default=5) parser.add_argument('--weight_decay', type=float, default=0.0) parser.add_argument('--adam_epsilon', type=float, default=1e-8) parser.add_argument('--max_grad_norm', type=float, default=1.0) parser.add_argument('--warmup_steps', type=int, default=0) parser.add_argument('--output_hidden_states', type=bool, default=True) parser.add_argument('--num_train_epochs', type=int, default=5) parser.add_argument('--save_steps', type=int, default=60) parser.add_argument('--device', type=str, default='cpu') parser.add_argument('--seed', type=int, default=27) parser.add_argument('--patience', type=int, default=20) args = parser.parse_args() bpe = fastBPE(args) # need remake config with device option for train with another cuda device config = RobertaConfig.from_pretrained(args.config_path) config = config.to_dict() config.update({"device": args.device}) config.update({"output_hidden_states": args.output_hidden_states}) config = RobertaConfig.from_dict(config) rdrsegmenter = VnCoreNLP(args.rdrsegmenter_path, annotators='wseg',max_heap_size='-Xmx500m') vocab = Dictionary() vocab.add_from_file(args.dict_path) model = PhoBERT.from_pretrained(args.folder_model, config=config) model = model.to(args.device) train_qa(args, rdrsegmenter, bpe, vocab, model)
# -*- coding: utf-8 -*- """ PyElectrica |1.1.4| Modulo Python con funciones útiles para resolver problemas específicos en Ingeniería Eléctrica relativos a los Circuitos y Máquinas Eléctricas. Funciones integradas en el módulo PyElectrica: ---------------------------------------------- ANÁLISIS DE CIRCUITOS ELÉCTRICOS ---------------------------------------------- * leyOhm * vNodos * vNodosV * iLazos * iLazosV * bode * bodeNB * escalon * c_1orden ---------------------------------------------- ANÁLISIS DE MÁQUINAS ELÉCTRICAS ---------------------------------------------- * mLineal_CC * compCA_GenSinc * par_vel * cepTransformador ---------------------------------------------- CALCULOS EN INSTALACIONES ELÉCTRICAS ---------------------------------------------- * imonoF * cTensionMono ---------------------------------------------- CONSTANTES Y FUNCIONES MATEMÁTICAS ---------------------------------------------- * pi - Constante pi = 3.141592653589793 * exp - Constante e : Número de Euler * cos - Función coseno * sin - Función seno * sqrt - Raíz cuadrada """ __author__ = "Isai Aragón Parada" __copyright__ = "Copyright 2018, Isai Aragón" __credits__ = "Isai Aragón Parada" __license__ = "MIT" __version__ = "1.1.3" __maintainer__ = "Isai Aragón Parada" __email__ = "[email protected]" __status__ = "En constante desarrollo" # ----------------------------------------------------------------------------- # Se importan los modulos necesarios. #import pylab #import numpy as np #from scipy import signal #from numpy.linalg import solve #import matplotlib.pyplot as plt #from numpy import linspace, arange, pi, cos, sin, exp, array, sqrt # ----------------------------------------------------------------------------- # --------------------- CIRCUITOS ELÉCTRICOS ------------------------------- # ----------------------------------------------------------------------------- # Función para calcular la Ley de Ohm, en base al parámetro con incognita '?'. def leyOhm(**param): """ Función para calcular la Ley de Ohm, en base al parámetro con incognita '?'. Ejemplo: leyOhm(V='?', I=3, R=4) V = Valor de tensión I = Valor corriente R = Valor de resistencia IMPORTANTE: Se debe indicar dos valores numericos y el valor que se quiere calcular indicando su valor con la cadena de texto: '?' Ejemplo: # Para calcular el voltaje: leyOhm(V='?', I=3, R=4) # Para calcular la resistencia: leyOhm(V=24, I=3.5, R='?') # Para calcular la corriente: leyOhm(V=12, I='?', R=5) ** La función acepta números complejos ** """ # Se importan los módulos necesarios. import numpy as np # Se resuelve la ecuacion de la Ley de Ohm. if param['V'] == '?': print('V =', np.ma.round((param['I'] * param['R']), 3), 'V') elif param['I'] == '?': print('I =', np.ma.round((param['V'] / param['R']), 3), 'A') elif param['R'] == '?': print('R =', np.ma.round((param['V'] / param['I']), 3), 'Ω') else: print('¡No hay nada que calcular!') print(''' (#_#)''') print('Si quieres que calcule algo,') print('tienes que indicar la incognita \'?\' en algun parametro.') # ----------------------------------------------------------------------------- # Función para calcular los diagramas de bode. Versión Consola. def bode(num, den): """ Función que genera los diagramas de Bode para una función de transferencia, indicada por su numerador (num) y denominador(den). Ejemplo: bode(num, den) num = valores en formato de lista, que contiene lo valores del númerador de la fución de transferencia. den = valores en formato de lista, que contiene los valores del denominador de la función de transferencia. """ # Se importan los módulos necesarios. import pylab from scipy import signal import matplotlib.pyplot as plt # Se determina el tamaño de la gráfica #import pylab pylab.rcParams['figure.figsize'] = (9, 6.5) # Se declara la función de transferencia, frecuencia (w), magnitud (mag) # y la fase. sistema = signal.TransferFunction(num, den) w, mag, fase = signal.bode(sistema) # Se generan las gráficas de la lo diagramas de Bode. # Diagrama de Amplitud plt.subplot(2, 1, 1) plt.semilogx(w, mag) plt.title('Diagramas de Bode') plt.ylabel('Amplitud $(dB)$') plt.grid(True, which='both', axis='both') # Diagrama de Fase plt.subplot(2, 1, 2) plt.semilogx(w, fase) plt.ylabel('Fase $(°)$') plt.xlabel('$ \omega \ (rad/seg) $') plt.grid(which='both', axis='both') # Se muestran los diagrmas en pantalla plt.show() # ----------------------------------------------------------------------------- # Función para calcular los diagramas de Bode. Versión Jupyter Notebook. def bodeNb(num, den): """ Función que genera los diagramas de Bode para una función de transferencia, indicada por su numerador (num) y denominador(den). Ejemplo: bodeNb(num, den) num = valores en formato de lista, que contiene lo valores del númerador de la fución de transferencia. den = valores en formato de lista, que contiene los valores del denominador de la función de transferencia. """ # Se importan los modulos necesarios. import pylab from scipy import signal import matplotlib.pyplot as plt # Se determina el tamaño de la gráfica #import pylab pylab.rcParams['figure.figsize'] = (9, 6.5) # Se declara la función de transferencia, frecuencia (w), magnitud (mag) # y la fase. sistema = signal.TransferFunction(num, den) w, mag, fase = signal.bode(sistema) # Se generan las gráficas de la lo diagramas de Bode. # Diagrama de Amplitud plt.figure() plt.semilogx(w, mag) plt.title('Diagramas de Bode') plt.ylabel('Amplitud $(dB)$') plt.grid(True, which='both', axis='both') # Diagrama de Fase plt.figure() plt.semilogx(w, fase) plt.ylabel('Fase $(°)$') plt.xlabel('$ \omega \ (rad/seg) $') plt.grid(which='both', axis='both') # Se muestran los diagrmas en pantalla plt.show() # ----------------------------------------------------------------------------- # Función para generar la respuesta escalón de una función de transferencia. def escalon(num, den): """ Función escalón, para generar la respuesta escalón en base a una función de transferencia. Ejemplo: escalon(num, den) num = valores en formato de lista, que contiene lo valores del númerador de la fución de transferencia. den = valores en formato de lista, que contiene los valores del denominador de la función de transferencia. """ # Se importan los modulos necesarios. import pylab from scipy import signal import matplotlib.pyplot as plt # Se determina el tamaño de la gráfica pylab.rcParams['figure.figsize'] = (9, 6.5) # Se declara la función de transferencia, se genera el tiempo (t), y # la respuesta escalon y(t). sistema = signal.TransferFunction(num, den) t, y = signal.step(sistema) # Se declara la gráfica la respuesta escalón . plt.plot(t, y, 'r') plt.title('Respuesta escalón') plt.xlabel('Tiempo $(s)$') plt.ylabel('Amplitud') plt.grid() # Se imprime en pantalla la gráfica de la respuesta escalón. plt.show() # ----------------------------------------------------------------------------- # Función para resolver un sistema de ecuaciones en forma matricial. # Generadas por el analisis nodal de un circuito eléctrico. def vNodos(A, B): """ Función vNodos, que resuelve un sistema de ecuaciones en forma matricial y entrega los correspondientes voltajes de nodo en base al sistema de ecuaciones del circuito. Ejemplo: vNodos(A, B) A = lista que define la matriz de coeficiente. B = lista que define la matriz del vector solución. """ # Se importan los modulos necesarios. import numpy as np from numpy.linalg import solve # Se resuelve el sistema de ecuaciones. V = solve(A, B) print('Los voltajes de nodo del circuito son:\n') # Se genera una iteración sobre cada uno de los valores de la matriz V. # Para imprimir la designación de cada voltaje con su respectivo valor. num = 0 for v in V: num += 1 print('v' + str(num), '=', np.ma.round(v[0], 3), 'Volts') # Se genera función que imprime las corrientes de lazo en forma de lista # para que puedan ser manipulados. def vNodosV(A, B): """ Función vNodos, que resuelve un sistema de ecuaciones en forma matricial y entrega los correspondientes voltajes de nodo en base al sistema de ecuaciones del circuito. Ejemplo: vNodosV(A, B) A = lista que define la matriz de coeficiente. B = lista que define la matriz del vector solución. """ # Se importan los modulos necesarios. from numpy.linalg import solve # Se resuelve el sistema de ecuaciones. return solve(A, B) # ----------------------------------------------------------------------------- # Función para resolver un sistema de ecuaciones en forma matricial. # Generadas por el analisis nodal de un circuito eléctrico. def iLazos(A, B): """ Función iLazos, que resuelve un sistema de ecuaciones en forma matricial y entrega las correspondientes corrientes de lazo en base al sistema de ecuaciones del circuito. Ejemplo: iLazos(A, B) A = lista que define la matriz de coeficiente. B = lista que define la matriz del vector solución. """ # Se importan los modulos necesarios. import numpy as np from numpy.linalg import solve # Se resuelve el sistema de ecuaciones. I = solve(A, B) print('Las corrientes de lazo del circuito son:\n') # Se genera una iteración sobre cada uno de los valores de la matriz I. # Para imprimir la designación de cada corriente con su respectivo valor. num = 0 for i in I: num += 1 print('i' + str(num), '=', np.ma.round(i[0], 3), 'Amperes') # Se genera función que imprime las corrientes de lazo en forma de lista # para que puedan ser manipulados. def iLazosV(A, B): """ Función iLazosF (Entrega lista de valores), que resuelve un sistema de ecuaciones en forma matricial y entrega las correspondientes corrientes de lazo en base al sistema de ecuaciones del circuito. Ejemplo: iLazosV(A, B) A = lista que define la matriz de coeficiente. B = lista que define la matriz del vector solución. """ # Se importan los modulos necesarios. from numpy.linalg import solve # Se resuelve el sistema de ecuaciones. return solve(A, B) # ----------------------------------------------------------------------------- # Función "cpo_RC" para generar la gráfica de en función del voltaje para # un circuito eléctrico RC de primer orden sin fuente. def c_1orden(**kwarg): """ Función para calcular y dibujar la gráfica de la curva de respuesta en un circuito RC o RL sin fuente, tomando como base a los parámetros indicados en la función. Ejemplo1: c_1orden(Vi=18, R=4.5 C=0.1) * Para circuitos RC Ejemplo1: c_1orden(Ii=18, R=1.4, L=0.5) * Para circuitos RL Donde: R = Resistencia del circuito t = el tiempo máximo a tomar en cuenta para la grafica *(Para mejores efectos visuales: 1 > t < 10) Vi = Voltaje inicial en el capacitor Ii = Corriente inicial en el inductor C = Valor del capacitor L = Valor del inductor """ # Se importan los módulos necesarios. import pylab import matplotlib.pyplot as plt from numpy import exp, linspace # Se determina el tamaño de la gráfica pylab.rcParams['figure.figsize'] = (9, 6.5) # Se declara el código para generar la curva de respuesta de un # circuito de primer orden. try: # Código para la curva del circuito RC. if ('Vi' in kwarg) & ('C' in kwarg): tau = kwarg['R'] * kwarg['C'] # El rango de tiempo se determina segun la teoria, que indica # que en un circuito de primer orden se llega a su estado # estable cuando el tiempo es igual a 5 veces el valor de tau. t = linspace(0, 5 * tau, num=200) # Se declara la ecuación v(t). v_t = kwarg['Vi'] * exp(-t / tau) # Se declara la ecuación de v(t) a 1/2 de la constante de # tiempo. v_t_med = kwarg['Vi'] * exp(-t / (tau * 0.5)) # Se declara la ecuación de i(x) con el doble de constante de # tiempo. v_t_dob = kwarg['Vi'] * exp(-t / (tau * 2)) # Se genera la curva de respuesta del circuito RC. plt.plot(t, v_t_med, color='green', label=r'$v/V_i(t) \ \ \ \tau = 0.5$') plt.plot( t, v_t, color='blue', label=r'$v/V_i(t) \ \ \ \tau = 1$') plt.plot( t, v_t_dob, color='red', label=r'$v/V_i(t) \ \ \ \tau = 2$') plt.legend() plt.title('Curva de respuesta del circuito RC') plt.ylabel('Tensión $(V)$') plt.xlabel('Tiempo $(s)$') plt.grid() plt.show() # Código para la curva del circuito RL. elif ('Ii' in kwarg) & ('L' in kwarg): tau = kwarg['R'] / kwarg['L'] t = linspace(0, 5 * tau, num=200) # Se declara la ecuación v(t). v_t = kwarg['Ii'] * exp(-t / tau) # Se declara la ecuación de v(t) a 1/2 de la constante de # tiempo. v_t_med = kwarg['Ii'] * exp(-t / (tau * 0.5)) # Se declara la ecuación de i(x) con el doble de constante de # tiempo. v_t_dob = kwarg['Ii'] * exp(-t / (tau * 2)) # Se genera la curva de respuesta del circuito RC. plt.plot( t, v_t_med, color='green', label=r'$i/I_i(t) \ \ \ \tau = 0.5$') plt.plot( t, v_t, color='blue', label=r'$i/I_i(t) \ \ \ \tau = 1$') plt.plot( t, v_t_dob, color='red', label=r'$i/I_i(t) \ \ \ \tau = 2$') plt.legend() plt.title('Curva de respuesta del circuito RL') plt.ylabel('Corriente $(I)$') plt.xlabel('Tiempo $(s)$') plt.grid() plt.show() else: print('¡Error de definición!') print('No ingresaste los parametros correctos.') print('Revisa los parametros ingresados.') except KeyError: print('¡ERROR de parametros!') print('Falta declarar algun parametro del circuito.') print('Revisa los parametros ingresados.') # ----------------------------------------------------------------------------- # ----------------------- MÁQUINAS ELÉCTRICAS ------------------------------ # ----------------------------------------------------------------------------- # Función para encontrar la magnitud de la fuerza inducida en un alambre, te- # niendo como datos la corriente(i), la longitud (l) y la densidad de flujo de # campo. def mLineal_CD(Vb=120, R=0.5, l=1, B=0.5): """ Función \"mLineal_CD\", util para calcular el comportamiento de una máquina lineal CD en base a los parámetros declarados. Ejemplo: mLineal_CD(Vb, R, l, B) Vb = Voltaje de la batería R = Resistencia del diagrama de la máquina lineal CD l = longitud del conductor en el campo magnético B = Vector de densidad de flujo magnético """ # Se importan los modulos necesarios. import pylab import matplotlib.pyplot as plt from numpy import linspace # Se determina el tamaño de la gráfica #import pylab pylab.rcParams['figure.figsize'] = (9, 6.5) # Se declara el rango de fuerzas a aplicar F = linspace(0, 50, num=50) # Se Calcula la corriente en el motor i = F / (l * B) # Se calcula el voltaje inducido eind = Vb - (i * R) # Se calcula la velocidad de la barra Vel = eind / (l * B) # Se grafica la velocidad en función de la fuerza aplicada plt.plot(F, Vel, 'b', label='Velocidad') plt.plot(F, i, 'r', label='Corriente') plt.plot(F, eind, 'g', label='Voltaje inducido') plt.title('Comportamiento de la maquina lineal CD') plt.xlabel('Fuerza (N)') plt.ylabel('Velocidad barra (m/s) / Corriente (A) / $e_{ind}$ (V)') plt.legend(loc='best') plt.grid() plt.show() # ----------------------------------------------------------------------------- # Función "compCA_GenSinc" que genera una la gráfica de la componente AC # de la corriente de falla de un generador síncrono. def compCA_GenSinc(Sbase=(100 * 10**6), Vbase=(13.8 * 10**3), Xs=1.0, X1p=0.25, X2p=0.12, T1p=1.10, T2p=0.04): """ Función \"compCA_GenSinc\" para calcular la componente CA de la corriente de falla de un generador síncrono en base a los parámetros ingresados. Ejemplo: compCA_GenSinc(Sbase, Vbase, Xs, X1p, X2p, T1p, T2p) Donde: Sbase = Potencia aparente del generador síncrono Vbase = Voltaje base del generador síncrono Xs = Reactancia síncrona del generador síncrono X1p = Reactancia transitoria X2p = Reactancia subtrancitoria T1p = Constante de tiempo de la corriente transitoria T2p = Constante de tiempo de la corriente subtrancitoria """ # Se importan los modulos necesarios. import pylab import matplotlib.pyplot as plt from numpy import linspace, pi, sin, exp, sqrt # Se determina el tamaño de la gráfica #import pylab pylab.rcParams['figure.figsize'] = (9, 6.5) # Se calcula la componente ac de la corriente t = linspace(0.0, 5.0, num=155) Ibase = Sbase / (sqrt(3) * Vbase) I2p = (1.0 / X2p) * Ibase I1p = (1.0 / X1p) * Ibase Iss = (1.0 / Xs) * Ibase It = (I2p - I1p) * exp(-t / T2p) + (I1p - Iss) * exp(-t / T1p) + Iss Isen = It * sin(2 * pi * 60 * t) # Se grafica la componente ac de la corriente plt.plot(t, It, 'r') plt.plot(t, Isen, 'b') plt.plot(t, -It, 'r') plt.title('Componente CA de corriente de falla en generador síncrono') plt.xlabel('tiempo (s)') plt.ylabel('Corriente de corto circuito (A)') plt.grid() plt.show() # ----------------------------------------------------------------------------- # Función "par_vel" que genera una la gráfica de la curva Par-Velocidad # de un motor de inducción. def par_vel(Vn=460, Polos=4, R1=0.641, X1=1.106, R2=0.332, X2=0.464, Xm=26.3): """ Función \"par_vel\" para calcular y generar la gráfica de la curva Par-Velocidad de un motor de inducción con rotor devanado y/o rotor jaula de ardilla. Ejemplo: par_vel(Vn, Polos, R1, X1, R2, X2, Xm) Donde: Vn = Voltaje nominal del motor Polos = Número de polos del motor f = Frecuencia de operación del motor R1 = Resistencia del estator X1 = Reactancia del estator R2 = Resistencia del rotor X2 = Reactancia del rotor Xm = Reactancia de magnetización """ # Se importan los modulos necesarios. import pylab import matplotlib.pyplot as plt from numpy import arange, pi, sqrt # Se determina el tamaño de la gráfica #import pylab pylab.rcParams['figure.figsize'] = (9, 6.5) # Se preparan las variables para el cálculo Vfase = Vn / sqrt(3) f = 60 ns = 120 * f / Polos ws = ns * (2 * pi / 1) * (1 / 60) s = arange(0.001, 1.0, 0.001) # Se calcula el voltaje y la impedancia de Thevenin Vth = Vfase * (Xm / sqrt(R1**2 + (X1 + Xm)**2)) Zth = ((1j * Xm) * (R1 + 1j * X1)) / (R1 + 1j * (X1 + Xm)) Rth = Zth.real Xth = Zth.imag # Se calcula la característica par-velocidad nm = (1 - s) * ns # Se calcula el Par para la resistencia original del rotor t_ind = (3 * Vth**2 * R2 / s) / ( ws * ((Rth + R2 / s)**2 + (Xth + X2)**2)) # Se calcula el Par para el doble de la resistencia del rotor t_ind2 = (3 * Vth**2 * (2 * R2) / s) / ( ws * ((Rth + (2 * R2) / s)**2 + (Xth + X2)**2)) # Se generan las curvas Par-Velocidad plt.plot(nm, t_ind, 'b', label='$R_2 \ $ Original') plt.plot(nm, t_ind2, 'r-.', label='$R_2 \ $ Duplicada') plt.title('Curva Par-Velocidad del motor de inducción') plt.xlabel('$n_m$, $r/min$') plt.ylabel('$\\tau_{ind} $, $N*M$') plt.legend() plt.grid() plt.show() # ----------------------------------------------------------------------------- # Función "cepTransformador" que analiza y entrega el circuito equivalente en # el lado primario de un tranformador en base a sus parametros de las pruebas # de corto circuito y circuito abierto. def cepTransformador(Voc, Ioc, Poc, Vsc, Isc, Psc): """ Función \"cepTransformador\" para calcular el circuito equivalente de un transformador en el lado primário, en función de sus parametros de las pruebas de circuito abierto y corto circuito. Ejemplo: cepTransformador(Voc, Ioc, Poc, Vsc, Isc, Psc) Donde: Voc = Voltaje en prueba de circuito abierto en el lado primario. Ioc = Corriente en prueba de circuito abierto en el lado primario. Poc = Potencia en prueba de circuito abierto en el lado primario. Vsc = Voltaje en prueba de corto circuito en el lado primario. Isc = Corriente en prueba de corto circuito en el lado primario. Psc = Potencia en prueba de corto circuito en el lado primario. """ # Se importan los módulos necesarios. from cmath import rect from numpy import arccos, sqrt import SchemDraw as schem import SchemDraw.elements as e from numpy.ma import round as roundC # Análisis de circuito abierto. FP_oc = Poc / (Voc * Ioc) # FP de circuito abierto Ye = rect(Ioc / Voc, - arccos(FP_oc)) Rc = roundC(sqrt((1 / Ye.real * (1/1000))**2), 2) Xm = roundC(sqrt((1 / Ye.imag * (1/1000))**2), 2) # Análisis de cortocircuito. FP_sc = Psc / (Vsc * Isc) # FP de cortocircuito Zse = rect(Vsc / Isc, - arccos(FP_sc)) Req = roundC(sqrt(Zse.real**2), 2) Xeq = roundC(sqrt(Zse.imag**2), 2) # Se imprimen los valores en pantalla. print('Rc', '\t', '=', '\t', Rc, 'kOhms') print('Xm', '\t', '=', '\t', Xm, 'kOhms') print('Req', '\t', '=', '\t', Req, 'Ohms') print('Xeq', '\t', '=', '\t', Xeq, 'Ohms') # Se genera el diagrama del circuito equivalente. d = schem.Drawing() # Terminal voltaje primario positivo. Vp_mas = d.add(e.DOT_OPEN, label='+') d.add(e.LINE, d='right', l=6) # Nodo 1 D1 = d.add(e.DOT) # Resistencia y Reactancia equivalente en serie. d.add(e.LINE, d='right', l=2, xy=D1.start) eReq = d.add(e.RES, botlabel='{} $\Omega$'.format(Req)) eReq.add_label('$R_{eq}$', loc='top') d.add(e.LINE, d='right', l=1, xy=eReq.end) eXeq = d.add(e.INDUCTOR, botlabel='j{} $\Omega$'.format(Xeq)) eXeq.add_label('$jX_{eq}$', loc='top') d.add(e.LINE, d='right', l=2, xy=eXeq.end) # Terminal voltaje secundario positivo. Vs_mas = d.add(e.DOT_OPEN, label='+') # Nodo 2 d.add(e.LINE, d='down', l=3, xy=D1.start) D2 = d.add(e.DOT) d.add(e.LINE, d='right', l=3, xy=D2.start) d.add(e.LINE, d='down', l=1) # Reactancia Xm en paralelo. eXm = d.add(e.INDUCTOR, botlabel='$j{} \ k\Omega$'.format(Xm)) eXm.add_label('$jX_m$', loc='top') d.add(e.LINE, d='down', l=1) d.add(e.LINE, d='left', l=3) # Nodo 3 D3 = d.add(e.DOT) d.add(e.LINE, d='left', l=3) d.add(e.LINE, d='up', l=1) # Resistencia Rc en paralelo eRc = d.add(e.RES, botlabel='{} k$\Omega$'.format(Rc)) eRc.add_label('$R_c$', loc='top') d.add(e.LINE, d='up', l=1) d.add(e.LINE, d='right', l=3) # Nodo 4 d.add(e.LINE, d='down', l=3, xy=D3.start) D4 = d.add(e.DOT) # Terminal Voltaje secundario negativo. d.add(e.LINE, d='right', l=11, xy=D4.start) Vs_menos = d.add(e.DOT_OPEN, label='-') # Terminal Voltaje primario negativo. d.add(e.LINE, d='left', l=6, xy=D4.start) Vp_menos = d.add(e.DOT_OPEN, label='-') # Etiqueta invisible de terminal de voltaje primario. d.add(e.GAP_LABEL, label='$V_p$', endpts=[Vp_mas.start, Vp_menos.start]) # Etiqueta invisible de terminal de voltaje secundario. d.add(e.GAP_LABEL, label='$V_s$', endpts=[Vs_mas.start, Vs_menos.start]) # Se dibuja el diagrama. d.draw() # ----------------------------------------------------------------------------- # --------------------- INSTALACIONES ELÉCTRICAS --------------------------- # ----------------------------------------------------------------------------- # ----------------------------------------------------------------------------- # Función para calcular la corriente de la carga de un circuito eléctrico en # en diseño de instalaciones eléctricas. def imonoF(VA, Vn): """ Función para calcular la corriente de un circuito eléctrico en el diseño de instalaciones eléctricas residenciales. La función también recomienda el calibre de cable adecuado para la corriente del circuito. Sintaxis: imonoF(VA, Vn) Donde: VA = Volt-Ampere de la carga o del circuito a calcular. Vn = Voltaje nominal de fase a neutro. """ i = VA / Vn def resultado(): # Imprimir los resultados en pantalla. print('\nPara una carga de {0} VA conectada a {1} V.'.format(VA, Vn)) print('La corriente calculada es de: {} A.'.format(round(i, 2))) print('El calibre de cable recomendado es del {}, tipo THW o similar.' .format(cable)) # Determinar el calibre de conductor. # Basado en capacidad de cables tipo THW y similares. if i <= 20: cable = '#14 AWG' resultado() elif i <= 25: cable = '#12 AWG' resultado() elif i <= 35: cable = '#10 AWG' resultado() elif i <= 50: cable = '#8 AWG' resultado() elif i > 50: cable = '' print('\nLa corriente calculada es de {} A.'.format(round(i, 2))) print('El calibre del conductor será demasiado grande.') print('Considera dividir la carga del circuito.') print('O considera cambiar la tensión del circuito.') # ----------------------------------------------------------------------------- # Función para calcular la caída de tensión en un conductor usando la formula # de caída de tensión. def cTensionMono(l, I, Vn, c): """ Función cTension para calcular la caída de tensión en un conductor, utilizando la formula de caída de tensión de instalaciones eléctricas. Sintaxis: cTensionMono(l, I, Vn, c) Donde: l = Longitud hasta la última carga del circuito. I = Corriente que pasara en el conductor. Vn = Tensión nominal a neutro. c = Calibre del conductor. """ if c == 14: s = 2.08 #mm2 elif c == 12: s = 3.31 #mm2 elif c == 10: s = 5.26 #mm2 elif c == 8: s = 8.37 #mm2 elif c == 6: s = 13.3 #mm2 else: s = 0 if s != 0: ex100 = 4 * l * I / (Vn * s) print('\nLos datos ingresados son los siguientes:') print('La longitud del circuito es de {} Mts.'.format(l)) print('La corriente en el circuito es de {} A.'.format(I)) print('El voltaje de operación del circuito es de {} V.'.format(Vn)) print('El calibre del conductor seleccionado es del #{} AWG.' .format(c)) print('La caída de tensión del circuito es de {} %.' .format(round(ex100,2))) else: print('\nEl calibre del conductor no esta incluido o normalizado.') # ----------------------------------------------------------------------------- if __name__ == "__main__": print('Este es el modulo \"PyElectrica\" para Python.') print('Útil en la solución de problemas de Ingeniería Eléctrica.') input('Presiona <Enter> para salir.')
import csv import json import os import datetime from dotenv import load_dotenv import requests load_dotenv() def to_usd(my_price): return "${0:,.2f}".format(my_price) api_key = os.environ.get("ALPHADVANTAGE_API_KEY") #print(api_key) def get_response(symbol): request_url = f"https://www.alphavantage.co/query?function=TIME_SERIES_DAILY&symbol={symbol}&apikey={api_key}" response = requests.get(request_url) #print(type(response)) #print(response.status_code) #print(response.text) parsed_response = json.loads(response.text) return parsed_response def transform_response(parsed_response): tsd = parsed_response ["Time Series (Daily)"] rows = [] for date, daily_prices in tsd.items(): # see: https://github.com/prof-rossetti/georgetown-opim-243-201901/blob/master/notes/python/datatypes/dictionaries.md row = { "timestamp": date, "open": float(daily_prices["1. open"]), "high": float(daily_prices["2. high"]), "low": float(daily_prices["3. low"]), "close": float(daily_prices["4. close"]), "volume": int(daily_prices["5. volume"]) } rows.append(row) return rows def buy_sell(latest_close, recent_low): if (latest_close < ((.20*recent_low)+recent_low)): return "BUY" else: return "SELL" def rec_reason(latest_close, recent_low): if (latest_close < ((.20*recent_low)+recent_low)): return "Buy shares when the latest close price is less than 20 percent above the recent low price" else: return "Sell shares when the latest close price is more than 20 percent of recent low price" def write_to_csv(rows, csv_filepath): # rows should be a list of dictionaries # csv_filepath should be a string filepath pointing to where the data should be written csv_headers = ["timestamp", "open", "high", "low", "close", "volume"] with open(csv_filepath, "w") as csv_file: writer = csv.DictWriter(csv_file, fieldnames=csv_headers) writer.writeheader() # uses fieldnames set above for row in rows: writer.writerow(row) return True if __name__ == "__main__": time_now = datetime.datetime.now() #> datetime.datetime(2019, 3, 3, 14, 44, 57, 139564) #INFORMATION INPUTS symbol = input("Please specify a stock symbol (e.g. AMZN) and press enter: ") #validate input #options = [symbol] #if symbol not in options: # print("Invalid entry. Please input a valid stock symbol") # exit() parsed_response = get_response(symbol) if "Error Message" in parsed_response: print("Ivalid entry. Please select valid symbol") exit() last_refreshed = parsed_response["Meta Data"] ["3. Last Refreshed"] rows = transform_response(parsed_response) latest_close = rows[0]["close"] high_prices = [row["high"] for row in rows] # list comprehension for mapping purposes! low_prices = [row["low"] for row in rows] # list comprehension for mapping purposes! recent_high = max(high_prices) recent_low = min(low_prices) #latest_day = dates[0] #latest_close = tsd[latest_day]["4. close"] #high_prices = [] #low_prices =[] #for target_list in dates: # high_price =tsd[latest_day]["2. high"] # high_prices.append(float(high_price)) # low_price =tsd[latest_day]["3. low"] # low_prices.append(float(low_price)) # recent_high = max(high_prices) #recent_low = min(low_prices) #breakpoint() #dates = list(tsd.keys()) #breakpoint() #INFORMATION OUTPUTS #csv_file_path = "data/prices.csv" # a relative filepath #csv_file_path =os.path.join(os.path.dirname(__file__), "..", "data", "prices.csv") #csv_headers = ["timestamp", "open", "high", "low", "close", "volume"] #with open(csv_file_path, "w") as csv_file: # "w" means "open the file for writing" # writer = csv.DictWriter(csv_file, fieldnames=csv_headers) # writer.writeheader() # uses fieldnames set above # for date in dates: # daily_prices = tsd[date] # writer.writerow({ # "timestamp": date, # "open": daily_prices["1. open"], # "high": daily_prices["2. high"], # "low": daily_prices["3. low"], # "close": daily_prices["4. close"], # "volume": daily_prices["5. volume"] # }) csv_file_path = os.path.join(os.path.dirname(__file__), "..", "data", "prices.csv") formatted_time_now = time_now.strftime("%Y-%m-%d %H:%M:%S") #> '2019-03-03 14:45:27' csv_file_path = csv_file_path.split("..")[1] #> data/prices.csv print("-------------------------") print(f"SYMBOL: {symbol}") print("-------------------------") print("REQUESTING STOCK MARKET DATA...") print(f"REQUEST AT: {formatted_time_now}") print("-------------------------") print(f"LATEST DAY: {last_refreshed}") print(f"LATEST CLOSE: {to_usd(float(latest_close))}") print(f"RECENT HIGH: {to_usd(float(recent_high))}") print(f"RECENT LOW: {to_usd(float(recent_low))}") print("-------------------------") recommendation = buy_sell(latest_close, recent_low) print("RECOMMENDATION: " + str(recommendation)) reason = rec_reason(latest_close, recent_low) print("RECOMMENDATION REASON: " +str(reason)) print("-------------------------") print(f"WRITING DATA TO CSV: {csv_file_path}") print("-------------------------") print("HAPPY INVESTING!") print("-------------------------")
import numpy as np import pylab data = np.frombuffer(open('out.bin','rb').read(),dtype='uint8') dt=1.0/32000 * 1000#msec pylab.plot(np.arange(0,len(data)*dt,dt),data) pylab.show()
import datetime as dt import pymms from pymms.sdc import mrmms_sdc_api as sdc from pymms.sdc import selections as sel from metaarray import metabase, metaarray, metatime from matplotlib import pyplot as plt import pathlib def time_to_orbit(time, sc='mms1', delta=10): ''' Identify the orbit in which a time falls. Parameters ---------- time : `datetime.datetime` Time within the orbit sc : str Spacecraft identifier delta : int Number of days around around the time of interest in which to search for the orbit. Should be the duration of at least one orbit. Returns ------- orbit : int Orbit during which `time` occurs ''' # sdc.mission_events filters by date, and the dates are right-exclusive: # [tstart, tstop). For it to return data on the date of `time`, `time` # must be rounded up to the next day. Start the time interval greater # than one orbit prior than the start time. The desired orbit should then # be the last orbit in the list tstop = dt.datetime.combine(time.date() + dt.timedelta(days=delta), dt.time(0, 0, 0)) tstart = tstop - dt.timedelta(days=2*delta) orbits = sdc.mission_events('orbit', tstart, tstop, sc=sc) orbit = None for idx in range(len(orbits['tstart'])): if (time > orbits['tstart'][idx]) and (time < orbits['tend'][idx]): orbit = orbits['start_orbit'][idx] if orbit is None: ValueError('Did not find correct orbit!') return orbit def get_sroi(start, sc='mms1'): ''' Get the start and stop times of the SROIs, the sub-regions of interest within the orbit. Parameters ---------- start : `datetime.datetime` or int Time within an orbit or an orbit number. If time, note that the duration of the SROIs are shorter than that of the orbit so it is possible that `start` is not bounded by the start and end of the SROIs themselves. sc : str Spacecraft identifier Returns ------- tstart, tend : `datetime.datetime` Start and end time of the SROIs ''' # Convert a time stamp to an orbit number if isinstance(start, dt.datetime): start = time_to_orbit(start, sc=sc) # Get the Sub-Regions of Interest sroi = sdc.mission_events('sroi', start, start, sc=sc) return sroi['tstart'], sroi['tend'] def plot_selections_in_sroi(sc, tstart, tstop=dt.datetime.now(), outdir=None, **kwargs): # Get orbit range start_orbit = time_to_orbit(tstart) stop_orbit = time_to_orbit(tstop) outdir = pathlib.Path(outdir) if not outdir.exists(): outdir.mkdir() fname_fmt = 'burst_selections_orbit-{0}_sroi-{1}.png' # Step through each orbit for offset in range(stop_orbit-start_orbit+1): # Get the SROI start and end times orbit = start_orbit + offset sroi = sdc.mission_events('sroi', int(orbit), int(orbit), sc=sc) for i in (0,2): try: fig, axes = plot_burst_selections(sc, sroi['tstart'][i], sroi['tend'][i], **kwargs ) except Exception as e: print('Failed on orbit-{0} SROI-{1}'.format(orbit, i+1)) print(e) continue # Update title and selections limits axes[0][0].set_title('{0} Orbit {1} SROI{2}' .format(sc.upper(), orbit, i+1)) # Save the figure if outdir is not None: plt.savefig(outdir / fname_fmt.format(orbit, i+1)) plt.close(fig) def plot_sroi(sc, tstart, sroi=1): tstart, tend = get_sroi(tstart, sc) fig, axes = plot_burst_selections(sc, tstart[sroi-1], tend[sroi-1]) #fig.set_size_inches(6.5, 8) plt.show() def plot_burst_selections(sc, start_date, end_date, outdir=(pymms.config['data_root'] + '/figures/burst_selections/'), sitl_file=None, abs_file=None, gls_file=None, img_fmt=None ): figsize=(5.5, 7) mode = 'srvy' level = 'l2' # FGM b_vname = '_'.join((sc, 'fgm', 'b', 'gse', mode, level)) api = sdc.MrMMS_SDC_API(sc, 'fgm', mode, level, start_date=start_date, end_date=end_date) files = api.download_files() files = sdc.sort_files(files)[0] fgm_data = metaarray.from_pycdf(files, b_vname, tstart=start_date, tend=end_date) # FPI DIS fpi_mode = 'fast' ni_vname = '_'.join((sc, 'dis', 'numberdensity', fpi_mode)) espec_i_vname = '_'.join((sc, 'dis', 'energyspectr', 'omni', fpi_mode)) api = sdc.MrMMS_SDC_API(sc, 'fpi', fpi_mode, level, optdesc='dis-moms', start_date=start_date, end_date=end_date) files = api.download_files() files = sdc.sort_files(files)[0] ni_data = metaarray.from_pycdf(files, ni_vname, tstart=start_date, tend=end_date) especi_data = metaarray.from_pycdf(files, espec_i_vname, tstart=start_date, tend=end_date) # FPI DES ne_vname = '_'.join((sc, 'des', 'numberdensity', fpi_mode)) espec_e_vname = '_'.join((sc, 'des', 'energyspectr', 'omni', fpi_mode)) api = sdc.MrMMS_SDC_API(sc, 'fpi', fpi_mode, level, optdesc='des-moms', start_date=start_date, end_date=end_date) files = api.download_files() files = sdc.sort_files(files)[0] ne_data = metaarray.from_pycdf(files, ne_vname, tstart=start_date, tend=end_date) espece_data = metaarray.from_pycdf(files, espec_e_vname, tstart=start_date, tend=end_date) # Grab selections if abs_file is None: abs_data = sel.selections('abs', start_date, end_date, sort=True, combine=True, latest=True) else: abs_data = sel.read_csv(abs_file, start_time=start_date, stop_time=end_date) if sitl_file is None: sitl_data = sel.selections('sitl+back', start_date, end_date, sort=True, combine=True, latest=True) else: sitl_data = sel.read_csv(sitl_file, start_time=start_date, stop_time=end_date) if gls_file is None: gls_data = sel.selections('gls', start_date, end_date, sort=True, combine=True, latest=True) else: gls_data = sel.read_csv(gls_file, start_time=start_date, stop_time=end_date) # SITL data time series t_abs = [start_date] x_abs = [0] for selection in abs_data: t_abs.extend([selection.start_time, selection.start_time, selection.stop_time, selection.stop_time]) x_abs.extend([0, selection.fom, selection.fom, 0]) t_abs.append(end_date) x_abs.append(0) abs = metaarray.MetaArray(x_abs, x0=metatime.MetaTime(t_abs)) t_sitl = [start_date] x_sitl = [0] for selection in sitl_data: t_sitl.extend([selection.start_time, selection.start_time, selection.stop_time, selection.stop_time]) x_sitl.extend([0, selection.fom, selection.fom, 0]) t_sitl.append(end_date) x_sitl.append(0) sitl = metaarray.MetaArray(x_sitl, x0=metatime.MetaTime(t_sitl)) t_gls = [start_date] x_gls = [0] for selection in gls_data: t_gls.extend([selection.start_time, selection.start_time, selection.stop_time, selection.stop_time]) x_gls.extend([0, selection.fom, selection.fom, 0]) t_gls.append(end_date) x_gls.append(0) gls = metaarray.MetaArray(x_gls, x0=metatime.MetaTime(t_gls)) # Set attributes to make plot pretty especi_data.plot_title = sc.upper() especi_data.title = 'DEF' especi_data.x1.title = '$E_{ion}$\n(eV)' espece_data.title = 'DEF\n(keV/(cm^2 s sr keV))' espece_data.x1.title = '$E_{e-}$\n(eV)' fgm_data.title = 'B\n(nT)' fgm_data.label = ['Bx', 'By', 'Bz', '|B|'] ni_data.title = 'N\n($cm^{-3}$)' ne_data.title = 'N\n($cm^{-3}$)' abs.lim = (0, 200) abs.title = 'ABS' gls.lim = (0, 200) gls.title = 'GLS' sitl.lim = (0, 200) sitl.title = 'SITL' # Plot fig, axes = metabase.MetaCache.plot( (especi_data, espece_data, fgm_data, ni_data, abs, gls, sitl), figsize=figsize ) plt.subplots_adjust(left=0.2, right=0.80, top=0.93) # Save the figure if img_fmt is not None: # Make sure the output directory exists outdir = pathlib.Path(outdir) if not outdir.exists(): outdir.mkdir(parents=True) # Create the file name fname = ('burst_selections_{0}_{1}.{2}' .format(dt.datetime.strftime(start_date, '%Y%m%d%H%M%S'), dt.datetime.strftime(end_date, '%Y%m%d%H%M%S'), img_fmt) ) plt.savefig(outdir / fname) return fig, axes if __name__ == '__main__': import argparse parser = argparse.ArgumentParser(description='Plot burst selections.') parser.add_argument('sc', type=str, help='MMS spacecraft identifier') parser.add_argument('tstart', type=str, help='Start of time interval, formatted as ' \ '%%Y-%%m-%%dT%%H:%%M:%%S') parser.add_argument('tend', type=str, help='End of time interval, formatted as ' \ '%%Y-%%m-%%dT%%H:%%M:%%S') parser.add_argument('--sroi', action='store_true', help='Make one plot per SROI') parser.add_argument('-d', '--directory', type=str, help='Directory in which to save figures', default=(pymms.config['data_root'] + '/figures/burst_selections')) parser.add_argument('-s', '--sitl-file', type=str, help='CSV file containing SITL selections') parser.add_argument('-a', '--abs-file', type=str, help='CSV file containing ABS selections') parser.add_argument('-g', '--gls-file', type=str, help='CSV file containing GLS selections') parser.add_argument('-f', '--fig-type', type=str, help='Type of image to create (png, jpg, etc.)') args = parser.parse_args() start_date = dt.datetime.strptime(args.tstart, '%Y-%m-%dT%H:%M:%S') end_date = dt.datetime.strptime(args.tend, '%Y-%m-%dT%H:%M:%S') if args.sroi: func = plot_selections_in_sroi else: func = plot_burst_selections fig_axes = func(args.sc, start_date, end_date, outdir=args.directory, sitl_file=args.sitl_file, abs_file=args.abs_file, gls_file=args.gls_file, img_fmt=args.fig_type) plt.show()
from typing import Callable, Optional, Any import flax.linen as nn import jax.numpy as jnp Dtype = Any class SimCLR(nn.Module): model_cls: Callable frontend_cls: Optional[Callable] = None embedding_dim: int = 512 dtype: Dtype = jnp.float32 @nn.compact def __call__(self, inputs, train: bool = True): """ Inputs must have even numbered batch size with top N/2 indices being the anchors and bottom N/2 being corresponding positives """ outputs = inputs if self.frontend_cls is not None: outputs = self.frontend_cls(dtype=jnp.float32, name="frontend")(outputs) outputs = outputs[Ellipsis, jnp.newaxis] outputs = outputs.astype(self.dtype) encoder = self.model_cls(num_classes=None, dtype=self.dtype, name='encoder') fc = nn.Dense(self.embedding_dim, use_bias=False, name='embedding_fc') encoded = encoder(outputs, train=train) embedding = fc(encoded) return embedding
import os.path import glob from .util import split2list from .listdataset import ListDataset from random import shuffle def make_dataset(input_dir,split): plyfiles = [] for dirs in os.listdir(input_dir): tempDir = os.path.join(input_dir,dirs) for input in glob.iglob(os.path.join(tempDir,'*.npy')): input = os.path.basename(input) root_filename = input[:-4] plyinput = dirs + '/' + root_filename + '.npy' plyfiles.append([plyinput]) if split== None: return plyfiles else: return split2list(plyfiles, split, default_split=split) def shapenet(input_root, split, co_transforms= None, input_transforms = None,args=None,give_name=False): [train_list,valid_list] = make_dataset(input_root, split) train_dataset = ListDataset(input_root,train_list,co_transforms, input_transforms,args,mode='train',give_name=give_name) shuffle(valid_list) valid_dataset = ListDataset(input_root,valid_list,co_transforms, input_transforms,args,mode='valid',give_name=give_name) return train_dataset,valid_dataset
import importlib from django.conf import settings def update_dns_record(dns_record, ip): if dns_record.provider: config = settings.DYNAMICDNS_PROVIDERS[dns_record.provider] mod_path, mod_name = config['plugin'].rsplit('.', 1) DnsPlugin = getattr(importlib.import_module(mod_path, package=mod_name), mod_name) dns_plugin = DnsPlugin(dns_record.domain, config) dns_plugin.update(ip)
# coding=utf-8 from numpy import mat, shape, zeros # 隐马尔科链模型前向算法 def hmm_forward(A, PI, B, O): M = shape(PI)[0] # 观测序列大小 N = shape(A)[1] # 状态序列大小 T = M alpha = mat(zeros((M, N))) P = 0.0 for i in range(N): alpha[0, i] = PI[i, 0] * B[i, 0] for t in range(T - 1): for i in range(N): temp_value = 0.0; for j in range(N): temp_value += alpha[t, j] * A[j, i] index = 0 if (O[t + 1, 0] == 0): index = 0 else: index = 1 alpha[t + 1, i] = temp_value * B[i, index] for i in range(N): P += alpha[T - 1, i] return P, alpha # 隐马尔科链模型后向算法 def hmm_backword(A, PI, B, O): T, N = shape(A) beta = mat(zeros((T, N))) P = 0.0 beta[T - 1, :] = 1 t = T - 2 while t >= 0: for i in range(N): temp_value = 0.0 for j in range(N): index = 0 if (O[t + 1, 0] == 0): index = 0 else: index = 1 temp_value += A[i, j] * B[j, index] * beta[t + 1, j] beta[t, i] = temp_value t -= 1 for i in range(N): index = 0 if (O[0, 0] == 0): index = 0 else: index = 1 P += PI[i, 0] * B[i, index] * beta[0, i] return P, beta if __name__ == "__main__": A = mat([[0.5, 0.2, 0.3], [0.3, 0.5, 0.2], [0.2, 0.3, 0.5]]) B = mat([[0.5, 0.5], [0.4, 0.6], [0.7, 0.3]]) PI = mat([[0.2], [0.4], [0.4]]) # 红,白,红 O = mat([[0], [1], [0]]) P, alpha = hmm_forward(A, PI, B, O) print(P) print("--------------------------------------") P, beta = hmm_backword(A, PI, B, O) print(P)
from django.urls import path from . import views app_name = 'cars' # /cars/ urlpatterns = [ path('', views.rental_review, name='rental_review'), path('thank_you/', views.thank_you, name='thank_you'), ]
"""The sample compares different boosting algorithms on 20newsgroups dataset """ __copyright__ = """ Copyright © 2017-2021 ABBYY Production LLC Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ __license__ = 'Apache 2.0' import neoml import numpy as np import itertools import time # Get data from sklearn.datasets import fetch_20newsgroups_vectorized train_data = fetch_20newsgroups_vectorized(subset='train') test_data = fetch_20newsgroups_vectorized(subset='test') def accuracy(model, X, y): """Returns the accuracy of model on the given data""" correct = sum(1 for label, probs in zip(y, model.classify(X)) if label == np.argmax(probs)) return float(correct)/len(y) # These arguments will be used for every builder_type shared_kwargs = { 'loss' : 'binomial', 'iteration_count' : 100, 'learning_rate' : 0.1, 'subsample' : 1., 'subfeature' : 0.25, 'random_seed' : 1234, 'max_depth' : 6, 'max_node_count' : -1, 'l1_reg' : 0., 'l2_reg' : 1., 'prune' : 0., 'thread_count' : 1, } # Train and test boosting for every builder type for builder in ['full', 'hist', 'multi_full']: start = time.time() boost_kwargs = { **shared_kwargs, 'builder_type' : builder} classifier = neoml.GradientBoost.GradientBoostClassifier(**boost_kwargs) model = classifier.train(train_data.data, train_data.target) run_time = time.time() - start acc = accuracy(model, test_data.data, test_data.target) print(f'{builder} Accuracy: {acc:.4f} Time: {run_time:.2f} sec.')
from django.test import TestCase, Client from django.urls import reverse from django.test.utils import setup_test_environment from bs4 import BeautifulSoup import re import time from projects.models import * from projects.forms import * client = Client() # length of base template, used to test for empty pages LEN_BASE = 2600 class BaseWebsiteTestCase(TestCase): def setUp(self): super() def test_homepage_load(self): url = reverse("projects:home") response = self.client.get(url) self.assertEqual(response.status_code, 200) def test_homepage_not_empty(self): url = reverse("projects:home") response = self.client.get(url) self.assertGreater(len(response.content), LEN_BASE) def test_project_list_load(self): url = reverse("projects:projects_list") response = self.client.get(url) self.assertEqual(response.status_code, 200) def test_project_list_not_empty(self): url = reverse("projects:projects_list") response = self.client.get(url) self.assertGreater(len(response.content), LEN_BASE) def test_project_students_load(self): url = reverse("projects:students") response = self.client.get(url) self.assertEqual(response.status_code, 200) def test_project_students_not_empty(self): url = reverse("projects:students") response = self.client.get(url) self.assertGreater(len(response.content), LEN_BASE) def test_project_educators_load(self): url = reverse("projects:educators") response = self.client.get(url) self.assertEqual(response.status_code, 200) def test_project_educators_not_empty(self): url = reverse("projects:educators") response = self.client.get(url) self.assertGreater(len(response.content), LEN_BASE) def test_project_leaders_load(self): url = reverse("projects:leaders") response = self.client.get(url) self.assertEqual(response.status_code, 200) def test_project_leaders_not_empty(self): url = reverse("projects:leaders") response = self.client.get(url) self.assertGreater(len(response.content), LEN_BASE) VERBOSE = False class TraverseLinksTest(TestCase): def setUp(self): # By default, login as superuser um = OpenSUTDUserManager() um.create_user("tom", display_name="Tom Magnanti", display_picture="https://via.placeholder.com/150", graduation_year=2018, pillar="ISTD", password="tompassword") self.client = Client() self.superuser = User.objects.get(username="tom") self.client.login(username="tom", password="tompassword") @classmethod def setUpTestData(cls): pm = OpenSUTDProjectManager() um = OpenSUTDUserManager() pm.create_project(project_uid="ACAD_00001", title="OpenSUTD Web Platform", caption="Sample project 1", category="ACAD", url="https://github.com/OpenSUTD/web-platform-prototype", poster_url="https://via.placeholder.com/150", featured_image="https://via.placeholder.com/150") um.create_user("dick", display_name="Dick Tan", display_picture="https://via.placeholder.com/150", graduation_year=2019, pillar="ISTD") um.create_user("jane", display_name="Jane Tan", display_picture="https://via.placeholder.com/150", graduation_year=2021, pillar="ESD") pm.create_project(project_uid="ACAD_00002", title="RandomZZZZZ", caption="Sample project 2", category="ACAD", url="https://github.com/OpenSUTD/web-platform-prototype", poster_url="https://via.placeholder.com/150", featured_image="https://via.placeholder.com/150") pm.set_project_status("ACAD_00001", "ACCEPT") pm.add_user_to_project("ACAD_00001", "dick") pm.add_user_to_project("ACAD_00001", "jane") pm.add_tag_to_project( "ACAD_00001", "rand1,rand2,education,student,policy") pm.add_user_to_project("ACAD_00002", "jane") pm.add_tag_to_project( "ACAD_00002", "rand1,rand2,education,student,policy") def test_traverse_urls(self): # Fill these lists as needed with your site specific URLs to check and to avoid to_traverse_list = ["/", "/projects/", "/students/", "/educators/", "/leaders/"] to_avoid_list = ["javascript:history\.back()", "https://*", "javascript:history\.go\(-1\)", "^mailto:.*"] done_list = [] error_list = [] source_of_link = dict() for link in to_traverse_list: source_of_link[link] = "initial" (to_traverse_list, to_avoid_list, done_list, error_list, source_of_link) = \ self.recurse_into_path( to_traverse_list, to_avoid_list, done_list, error_list, source_of_link) print("END REACHED\nStats:") if VERBOSE: print("\nto_traverse_list = " + str(to_traverse_list)) if VERBOSE: print("\nto_avoid_list = " + str(to_avoid_list)) if VERBOSE: print("\nsource_of_link = " + str(source_of_link)) if VERBOSE: print("\ndone_list = " + str(done_list)) print("Followed " + str(len(done_list)) + " links successfully") print("Avoided " + str(len(to_avoid_list)) + " links") if error_list: print("!! " + str(len(error_list)) + " error(s) : ") for error in error_list: print(str(error) + " found in page " + source_of_link[error[0]]) print("Errors found traversing links") assert False else: print("No errors") def recurse_into_path(self, to_traverse_list, to_avoid_list, done_list, error_list, source_of_link): """ Dives into first item of to_traverse_list Returns: (to_traverse_list, to_avoid_list, done_list, source_of_link) """ if to_traverse_list: url = to_traverse_list.pop() if not match_any(url, to_avoid_list): print("\nSurfing to " + str(url) + ", discovered in " + str(source_of_link[url])) response = self.client.get(url, follow=True) if response.status_code == 200: soup = BeautifulSoup(response.content, "html.parser") text = soup.get_text() for link in soup.find_all("a"): new_link = link.get("href") if VERBOSE: print(" Found link: " + str(new_link)) if match_any(new_link, to_avoid_list): if VERBOSE: print(" Avoiding it") elif new_link in done_list: if VERBOSE: print(" Already done, ignoring") elif new_link in to_traverse_list: if VERBOSE: print(" Already in to traverse list, ignoring") else: if VERBOSE: print( " New, unknown link: Storing it to traverse later") source_of_link[new_link] = url to_traverse_list.append(new_link) done_list.append(url) if VERBOSE: print("Done") else: error_list.append((url, response.status_code)) to_avoid_list.append(url) if VERBOSE: print("Diving into next level") return self.recurse_into_path(to_traverse_list, to_avoid_list, done_list, error_list, source_of_link) else: # Nothing to traverse if VERBOSE: print("Returning to upper level") return to_traverse_list, to_avoid_list, done_list, error_list, source_of_link def match_any(my_string, regexp_list): if my_string: combined = "(" + ")|(".join(regexp_list) + ")" return re.match(combined, my_string) else: # "None" as string always matches return True class SecuredPageTestCase(TestCase): def setUp(self): pm = OpenSUTDProjectManager() pm.create_project(project_uid="ACAD_00001", title="OpenSUTD Web Platform", caption="Sample project 1", category="ACAD", url="https://github.com/OpenSUTD/web-platform-prototype", poster_url="https://via.placeholder.com/150", featured_image="https://via.placeholder.com/150") um = OpenSUTDUserManager() um.create_user("tom", display_name="Tom Magnanti", display_picture="https://via.placeholder.com/150", graduation_year=2018, pillar="ISTD") def test_auth_approval_view(self): url = reverse("projects:approval") response = self.client.get(url) self.assertEqual(response.status_code, 302) def test_auth_submit_view(self): url = reverse("projects:submit_new") response = self.client.get(url) self.assertEqual(response.status_code, 302) def test_auth_submit_reject(self): url = reverse("projects:reject", args=("ACAD_00001",)) response = self.client.get(url) self.assertEqual(response.status_code, 302) def test_auth_submit_approve(self): url = reverse("projects:approve", args=("ACAD_00001",)) response = self.client.get(url) self.assertEqual(response.status_code, 302) def test_auth_user_edit(self): url = reverse("projects:user_edit", args=("tom",)) response = self.client.get(url) self.assertEqual(response.status_code, 302) def test_auth_project_edit(self): url = reverse("projects:project_edit", args=("ACAD_00001",)) response = self.client.get(url) self.assertEqual(response.status_code, 302) def test_auth_project_bypass(self): url = reverse("projects:project_page_bypass", args=("ACAD_00001",)) response = self.client.get(url) # actually a custom 404 page self.assertEqual(response.status_code, 200) class SubmissionFormTest(TestCase): def setUp(self): self.client = Client() um = OpenSUTDUserManager() um.create_user("tom", display_name="Tom Magnanti", display_picture="https://via.placeholder.com/150", graduation_year=2018, pillar="ISTD", password="tompassword") self.client.login(username="tom", password="tompassword") def test_submission_form_entry(self): response = self.client.get(reverse("projects:submit_new")) self.assertEqual(response.status_code, 200) # test submission mechanism form = SubmissionForm({"project_name": "test", "caption": "test caption", "category": "ACAD", "featured_image": "http://pluspng.com/img-png/user-png-icon-male-user-icon-512.png", "github_url": "https://github.com/OpenSUTD/web-platform-prototype", "poster_url": "http://pluspng.com/img-png/user-png-icon-male-user-icon-512.png"}) self.assertEqual(form.is_valid(), True) def test_submission_form_entry_invalid(self): response = self.client.get(reverse("projects:submit_new")) self.assertEqual(response.status_code, 200) # test submission mechanism form = SubmissionForm({"project_name": "", "caption": "", "category": "", "featured_image": "", "github_url": "", "poster_url": ""}) self.assertEqual(form.is_valid(), False) def test_submission_form_entry_not_github(self): response = self.client.get(reverse("projects:submit_new")) self.assertEqual(response.status_code, 200) # test submission mechanism form = SubmissionForm({"project_name": "test", "caption": "test caption", "category": "ACAD", "featured_image": "http://pluspng.com/img-png/user-png-icon-male-user-icon-512.png", "github_url": "https://lolcats.com/OpenSUTD/web-platform-prototype", "poster_url": "http://pluspng.com/img-png/user-png-icon-male-user-icon-512.png"}) self.assertEqual(form.is_valid(), False) class UserProfileFormTest(TestCase): def setUp(self): self.client = Client() um = OpenSUTDUserManager() um.create_user("tom", display_name="Tom Magnanti", display_picture="https://via.placeholder.com/150", graduation_year=2018, pillar="ISTD", password="tompassword") self.client.login(username="tom", password="tompassword") def test_submission_form_entry(self): # test user can actually get to the page response = self.client.get( reverse("projects:user_edit", args=("tom",))) self.assertEqual(response.status_code, 200) # test submission mechanism form = UserProfileForm({"display_name": "tom2", "display_picture": "http://pluspng.com/img-png/user-png-icon-male-user-icon-512.png", "graduation_year": 2019, "pillar": "ISTD", "bio": "Hi I am Tom", "contact_email": "[email protected]", "personal_links": "tlkh.design"}) self.assertEqual(form.is_valid(), True) def test_submission_form_entry_invalid(self): # test user can actually get to the page response = self.client.get( reverse("projects:user_edit", args=("tom",))) self.assertEqual(response.status_code, 200) # test submission mechanism form = UserProfileForm({"display_name": "", "display_picture": "", "graduation_year": 2019, "pillar": "", "bio": "", "contact_email": "", "personal_links": ""}) self.assertEqual(form.is_valid(), False) class ProjectEditFormTest(TestCase): def setUp(self): self.client = Client() um = OpenSUTDUserManager() um.create_user("tom", display_name="Tom Magnanti", display_picture="https://via.placeholder.com/150", graduation_year=2018, pillar="ISTD", password="tompassword") pm = OpenSUTDProjectManager() pm.create_project(project_uid="ACAD_00001", title="OpenSUTD Web Platform", caption="Sample project 1", category="ACAD", url="https://github.com/OpenSUTD/web-platform-prototype", poster_url="https://via.placeholder.com/150", featured_image="https://via.placeholder.com/150") pm.set_project_status("ACAD_00001", "ACCEPT") self.client.login(username="tom", password="tompassword") def test_submission_form_entry_invalid(self): # test user can actually get to the page response = self.client.get( reverse("projects:project_edit", args=("ACAD_00001",))) self.assertEqual(response.status_code, 200) # test submission mechanism form = ProjectEditForm({"title": "", "caption": "", "featured_image": "", "url": "", "poster_url": ""}) self.assertEqual(form.is_valid(), False) def test_submission_form_entry(self): # test user can actually get to the page response = self.client.get( reverse("projects:project_edit", args=("ACAD_00001",))) self.assertEqual(response.status_code, 200) # test submission mechanism form = ProjectEditForm({"title": "lalalal", "caption": "lalalal", "featured_image": "lalalal.com", "url": "https://github.com/OpenSUTD/web-platform-prototype", "poster_url": "lalalal.com"}) self.assertEqual(form.is_valid(), True) class LogintoSecuredPageTestCase(TestCase): def setUp(self): self.client = Client() um = OpenSUTDUserManager() um.create_user("tom", display_name="Tom Magnanti", display_picture="https://via.placeholder.com/150", graduation_year=2018, pillar="ISTD", password="tompassword") pm = OpenSUTDProjectManager() pm.create_project(project_uid="ACAD_00001", title="OpenSUTD Web Platform", caption="Sample project 1", category="ACAD", url="https://github.com/OpenSUTD/web-platform-prototype", poster_url="https://via.placeholder.com/150", featured_image="https://via.placeholder.com/150") self.client.login(username="tom", password="tompassword") def test_login_approval_view(self): response = self.client.get(reverse("projects:approval")) self.assertEqual(response.status_code, 200) def test_login_submission_view(self): response = self.client.get(reverse("projects:submit_new")) self.assertEqual(response.status_code, 200) def test_login_user_edit(self): url = reverse("projects:user_edit", args=("tom",)) response = self.client.get(url) self.assertEqual(response.status_code, 200) def test_login_project_edit(self): pm = OpenSUTDProjectManager() pm.set_project_status("ACAD_00001", "ACCEPT") url = reverse("projects:project_edit", args=("ACAD_00001",)) response = self.client.get(url) self.assertEqual(response.status_code, 200) class UserTestCase(TestCase): def setUp(self): um = OpenSUTDUserManager() um.create_user("tom", display_name="Tom Magnanti", display_picture="https://via.placeholder.com/150", graduation_year=2018, pillar="ISTD") um.create_user("jane", display_name="Jane Tan", display_picture="https://via.placeholder.com/150", graduation_year=2021, pillar="ESD") def test_user_get_name(self): tom = User.objects.get(username="tom") self.assertEqual(tom.display_name, "Tom Magnanti") jane = User.objects.get(username="jane") self.assertEqual(jane.display_name, "Jane Tan") def test_user_get_year(self): tom = User.objects.get(username="tom") self.assertEqual(tom.graduation_year, 2018) jane = User.objects.get(username="jane") self.assertEqual(jane.graduation_year, 2021) def test_user_get_pillar(self): tom = User.objects.get(username="tom") self.assertEqual(tom.pillar, "ISTD") jane = User.objects.get(username="jane") self.assertEqual(jane.pillar, "ESD") # test user profile page contents def test_user_page_load(self): url = reverse("projects:user", args=("tom",)) response = self.client.get(url) self.assertEqual(response.status_code, 200) url = reverse("projects:user", args=("jane",)) response = self.client.get(url) self.assertEqual(response.status_code, 200) def test_user_page_not_empty(self): url = reverse("projects:user", args=("tom",)) response = self.client.get(url) self.assertGreater(len(response.content), LEN_BASE) url = reverse("projects:user", args=("jane",)) response = self.client.get(url) self.assertGreater(len(response.content), LEN_BASE) def test_user_page_name(self): url = reverse("projects:user", args=("tom",)) response = str(self.client.get(url).content) self.assertEqual("Tom Magnanti" in response, True) url = reverse("projects:user", args=("jane",)) response = str(self.client.get(url).content) self.assertEqual("Jane Tan" in response, True) def test_user_page_year(self): url = reverse("projects:user", args=("tom",)) response = str(self.client.get(url).content) self.assertEqual("2018" in response, True) url = reverse("projects:user", args=("jane",)) response = str(self.client.get(url).content) self.assertEqual("2021" in response, True) def test_user_page_pillar(self): url = reverse("projects:user", args=("tom",)) response = str(self.client.get(url).content) self.assertEqual("ISTD" in response, True) url = reverse("projects:user", args=("jane",)) response = str(self.client.get(url).content) self.assertEqual("ESD" in response, True) def test_user_page_performance(self): start = time.time() for i in range(10): url = reverse("projects:user", args=("tom",)) response = self.client.get(url) url = reverse("projects:user", args=("jane",)) response = self.client.get(url) duration = time.time() - start self.assertLess(duration, 1.5) class ProjectShowcaseTestCase(TestCase): def setUp(self): pm = OpenSUTDProjectManager() pm.create_project(project_uid="ACAD_00001", title="OpenSUTD Web Platform", caption="Sample project 1", category="ACAD", url="https://github.com/OpenSUTD/web-platform-prototype", poster_url="https://via.placeholder.com/150", featured_image="https://via.placeholder.com/150") um = OpenSUTDUserManager() um.create_user("tom", display_name="Tom Magnanti", display_picture="https://via.placeholder.com/150", graduation_year=2018, pillar="ISTD") um.create_user("jane", display_name="Jane Tan", display_picture="https://via.placeholder.com/150", graduation_year=2021, pillar="ESD") def test_project_properties(self): proj = Project.objects.get(project_uid="ACAD_00001") self.assertEqual(proj.title, "OpenSUTD Web Platform") def test_add_user_project(self): pm = OpenSUTDProjectManager() pm.add_user_to_project("ACAD_00001", "tom") proj = Project.objects.get(project_uid="ACAD_00001") self.assertEqual(len(proj.users.all()), 1) pm.add_user_to_project("ACAD_00001", "jane") self.assertEqual(len(proj.users.all()), 2) def test_add_tag_project(self): pm = OpenSUTDProjectManager() pm.add_tag_to_project("ACAD_00001", "rand1,rand2") proj = Project.objects.get(project_uid="ACAD_00001") self.assertEqual(len(proj.tags.all()), 2) def test_add_del_user_project(self): tom = User.objects.get(username="tom") jane = User.objects.get(username="jane") proj = Project.objects.get(project_uid="ACAD_00001") proj.users.add(tom) proj.users.add(jane) proj.users.remove(jane) self.assertEqual(len(proj.users.all()), 1) def test_project_page_not_approved(self): pm = OpenSUTDProjectManager() pm.set_project_status("ACAD_00001", "REJECT") url = reverse("projects:project_page", args=("ACAD_00001",)) response = self.client.get(url) self.assertEqual(response.status_code, 200) self.assertEqual("Error 404: Page Not Found!" in str( response.content), True) self.assertGreater(len(response.content), LEN_BASE) def test_project_page_approved(self): pm = OpenSUTDProjectManager() pm.set_project_status("ACAD_00001", "ACCEPT") url = reverse("projects:project_page", args=("ACAD_00001",)) response = self.client.get(url) self.assertEqual(response.status_code, 200) self.assertGreater(len(response.content), LEN_BASE) def test_project_page_name(self): pm = OpenSUTDProjectManager() pm.set_project_status("ACAD_00001", "ACCEPT") url = reverse("projects:project_page", args=("ACAD_00001",)) response = str(self.client.get(url).content) self.assertEqual("OpenSUTD Web Platform" in response, True) def test_project_tag(self): pm = OpenSUTDProjectManager() pm.set_project_status("ACAD_00001", "ACCEPT") pm.add_tag_to_project("ACAD_00001", "tag1,tag2") url = reverse("projects:project_page", args=("ACAD_00001",)) response = str(self.client.get(url).content) self.assertEqual("tag1" in response, True) self.assertEqual("tag2" in response, True) def test_project_page_contents(self): pm = OpenSUTDProjectManager() pm.set_project_status("ACAD_00001", "ACCEPT") url = reverse("projects:project_page", args=("ACAD_00001",)) response = str(self.client.get(url).content) # print(response) # test top and bottom of contents # this does not pass on Travis for Pull Request builds # due to them disabling env variables for security reasons #self.assertEqual("Prototype for the Eventual OpenSUTD Web Platform" in response, True) #self.assertEqual("Data Model" in response, True) self.assertGreater(len(response), LEN_BASE) def test_project_page_load(self): pm = OpenSUTDProjectManager() pm.set_project_status("ACAD_00001", "ACCEPT") url = reverse("projects:project_page", args=("ACAD_00001",)) response = self.client.get(url) self.assertEqual(response.status_code, 200) def test_project_page_not_empty(self): pm = OpenSUTDProjectManager() pm.set_project_status("ACAD_00001", "ACCEPT") url = reverse("projects:project_page", args=("ACAD_00001",)) response = str(self.client.get(url).content) self.assertGreater(len(response), LEN_BASE) def test_project_author_name(self): pm = OpenSUTDProjectManager() pm.set_project_status("ACAD_00001", "ACCEPT") pm.add_user_to_project("ACAD_00001", "tom") url = reverse("projects:project_page", args=("ACAD_00001",)) response = str(self.client.get(url).content) self.assertEqual("Tom Magnanti" in response, True) def test_project_author_pillar(self): pm = OpenSUTDProjectManager() pm.set_project_status("ACAD_00001", "ACCEPT") pm.add_user_to_project("ACAD_00001", "tom") url = reverse("projects:project_page", args=("ACAD_00001",)) response = str(self.client.get(url).content) self.assertEqual("ISTD" in response, True) def test_project_list_page(self): pm = OpenSUTDProjectManager() pm.set_project_status("ACAD_00001", "ACCEPT") url = reverse("projects:projects_list") response = str(self.client.get(url).content) self.assertEqual("OpenSUTD Web Platform" in response, True) self.assertEqual("Sample project 1" in response, True) def test_project_page_performance(self): start = time.time() for _ in range(10): url = reverse("projects:project_page", args=("ACAD_00001",)) response = self.client.get(url) duration = time.time() - start self.assertLess(duration, 1.5)
from django.core.exceptions import ObjectDoesNotExist from cctool.common.lib.analyses.controllability import control_analysis as CA_Analysis from cctool.common.lib.analyses.controllability import controllability_visualization as CA_Visualization from cctool.common.lib.analyses.downstream import downstream_analysis as DSA_Analysis from cctool.common.lib.analyses.downstream import downstream_visualization as DSA_Visualization from cctool.common.lib.analyses.network import network_analysis as NA_Analysis from cctool.common.lib.analyses.network import network_visualization as NA_Visualization from cctool.common.lib.analyses.upstream import upstream_analysis as USA_Analysis from cctool.common.lib.analyses.upstream import upstream_visualization as USA_Visualization from cctool.graphs.models.models import Graph, Analysis, Node, NodePlus from cctool.taskapp.celery import app as cctoolapp @cctoolapp.task(bind=True) def compute_controllability_analysis(self, graph_id, analysis_id): try: graph = Graph.objects.get(pk=graph_id) except ObjectDoesNotExist: raise Exception(f'Graph object with pk: {graph_id}, does not exist!') try: analysis = Analysis.objects.get(pk=analysis_id) except ObjectDoesNotExist: raise Exception(f'Analysis object with pk: {analysis_id}, does not exist!') nodes = graph.nodes.all().select_subclasses() edges = graph.edges.all().select_subclasses() number_of_nodes = len(nodes) # Analysis connections = dict() node_controllabilities = dict() for node in nodes: node_controllabilities[node.identifier] = node.controllability sources = node.sources.all() for edge in sources: connections.setdefault(node.identifier, set()).add(edge.target.identifier) analysis_data = dict() ranked_by_node_controllability = dict() (control_configurations, stems, frequencies) = CA_Analysis.find_controllability(connections, number_of_nodes) stems = {key:(dict(map(reversed,value.items()))) for (key,value) in stems.items()} analysis_data['controlConfigurations'] = control_configurations analysis_data['stems'] = stems analysis_data['frequencies'] = frequencies (ranked_control_configurations, ranked_stems) = CA_Analysis.rank_by_node_controllability(control_configurations, stems, node_controllabilities) ranked_by_node_controllability['controlConfigurations'] = ranked_control_configurations ranked_by_node_controllability['stems'] = ranked_stems analysis_data['rankedByNodeControllability'] = ranked_by_node_controllability analysis.data = analysis_data analysis.save() # Visualization graph_options = CA_Visualization.generate_graph_options() graph_structure = dict() nodes_data = list() for node in nodes: data = node.to_json(use_dict=True) vis = CA_Visualization.generate_node_options(node, analysis) nodes_data.append(dict(**data, **vis)) edges_data = list() for edge in edges: data = edge.to_json(use_dict=True) vis = CA_Visualization.generate_edge_options(edge, analysis) edges_data.append(dict(**data, **vis)) graph_structure['nodes'] = nodes_data graph_structure['edges'] = edges_data graph_legend = CA_Visualization.generate_legend() analysis.visualization.options = graph_options analysis.visualization.structure = graph_structure analysis.visualization.legend = graph_legend analysis.visualization.save() return @cctoolapp.task(bind=True) def compute_outcome_analysis(self, graph_id, analysis_id): try: graph = Graph.objects.get(pk=graph_id) except ObjectDoesNotExist: raise Exception(f'Graph object with pk: {graph_id}, does not exist!') try: analysis = Analysis.objects.get(pk=analysis_id) except ObjectDoesNotExist: raise Exception(f'Analysis object with pk: {analysis_id}, does not exist!') analysis_data = dict() graph_structure = dict() root_nodes = NodePlus.objects.filter(graph=graph, tags__contains='Outcome') analysis_data['root_nodes'] = [root_node.identifier for root_node in root_nodes] analysis_data['root_nodes_labels'] = [root_node.label for root_node in root_nodes] for root_node in root_nodes: root_node_analysis_data = dict() upstream_nodes_and_levels = USA_Analysis.find_upstream_nodes(graph, [root_node]) upstream_nodes = [entry['node'] for entry in upstream_nodes_and_levels.values()] subgraph = USA_Analysis.form_upstream_subgraph(graph, upstream_nodes) root_node_analysis_data['root_node'] = root_node.identifier root_node_analysis_data['upstream_nodes_and_levels'] = {key:value['level'] for key,value in upstream_nodes_and_levels.items()} root_node_analysis_data['upstream_nodes'] = [node.identifier for node in upstream_nodes] analysis_data[root_node.identifier] = root_node_analysis_data root_node_graph_structure = dict() nodes_data = list() for node in subgraph['nodes']: data = node.to_json(use_dict=True) vis = USA_Visualization.generate_node_options(node, root_node_analysis_data) nodes_data.append(dict(**data, **vis)) edges_data = list() for edge in subgraph['edges']: data = edge.to_json(use_dict=True) vis = USA_Visualization.generate_edge_options(edge, root_node_analysis_data) edges_data.append(dict(**data, **vis)) root_node_graph_structure['nodes'] = nodes_data root_node_graph_structure['edges'] = edges_data graph_structure[root_node.identifier] = root_node_graph_structure graph_options = USA_Visualization.generate_graph_options() graph_legend = USA_Visualization.generate_legend() analysis.data = analysis_data analysis.save() analysis.visualization.options = graph_options analysis.visualization.structure = graph_structure analysis.visualization.legend = graph_legend analysis.visualization.save() return @cctoolapp.task(bind=True) def compute_intervention_analysis(self, graph_id, analysis_id): try: graph = Graph.objects.get(pk=graph_id) except ObjectDoesNotExist: raise Exception(f'Graph object with pk: {graph_id}, does not exist!') try: analysis = Analysis.objects.get(pk=analysis_id) except ObjectDoesNotExist: raise Exception(f'Analysis object with pk: {analysis_id}, does not exist!') analysis_data = dict() graph_structure = dict() root_nodes = NodePlus.objects.filter(graph=graph, tags__contains='Intervention') analysis_data['root_nodes'] = [root_node.identifier for root_node in root_nodes] analysis_data['root_nodes_labels'] = [root_node.label for root_node in root_nodes] for root_node in root_nodes: root_node_analysis_data = dict() downstream_nodes_and_levels = DSA_Analysis.find_downstream_nodes(graph, [root_node]) downstream_nodes = [entry['node'] for entry in downstream_nodes_and_levels.values()] subgraph = DSA_Analysis.form_downstream_subgraph(graph, downstream_nodes) root_node_analysis_data['root_node'] = root_node.identifier root_node_analysis_data['downstream_nodes_and_levels'] = {key:value['level'] for key,value in downstream_nodes_and_levels.items()} root_node_analysis_data['downstream_nodes'] = [node.identifier for node in downstream_nodes] analysis_data[root_node.identifier] = root_node_analysis_data root_node_graph_structure = dict() nodes_data = list() for node in subgraph['nodes']: data = node.to_json(use_dict=True) vis = DSA_Visualization.generate_node_options(node, root_node_analysis_data) nodes_data.append(dict(**data, **vis)) edges_data = list() for edge in subgraph['edges']: data = edge.to_json(use_dict=True) vis = DSA_Visualization.generate_edge_options(edge, root_node_analysis_data) edges_data.append(dict(**data, **vis)) root_node_graph_structure['nodes'] = nodes_data root_node_graph_structure['edges'] = edges_data graph_structure[root_node.identifier] = root_node_graph_structure graph_options = DSA_Visualization.generate_graph_options() graph_legend = DSA_Visualization.generate_legend() analysis.data = analysis_data analysis.save() analysis.visualization.options = graph_options analysis.visualization.structure = graph_structure analysis.visualization.legend = graph_legend analysis.visualization.save() return @cctoolapp.task(bind=True) def compute_network_analysis(self, graph_id, analysis_id): try: graph = Graph.objects.get(pk=graph_id) except ObjectDoesNotExist: raise Exception(f'Graph object with pk: {graph_id}, does not exist!') try: analysis = Analysis.objects.get(pk=analysis_id) except ObjectDoesNotExist: raise Exception(f'Analysis object with pk: {analysis_id}, does not exist!') analysis_data = dict() graph_structure = dict() measures = ['degree', 'in-degree', 'out-degree', 'closeness', 'betweenness'] subjective_measures = ['controllability', 'vulnerability', 'importance'] analysis_data = NA_Analysis.find_network_analysis(graph, measures, subjective_measures) for measure in analysis_data: graph_structure[measure] = dict() nodes_data = list() for node in graph.nodes.all().select_subclasses(): data = node.to_json(use_dict=True) vis = NA_Visualization.generate_node_options(node, analysis_data[measure]) nodes_data.append(dict(**data, **vis)) edges_data = list() for edge in graph.edges.all().select_subclasses(): data = edge.to_json(use_dict=True) vis = NA_Visualization.generate_edge_options(edge, analysis_data[measure]) edges_data.append(dict(**data, **vis)) graph_structure[measure]['nodes'] = nodes_data graph_structure[measure]['edges'] = edges_data graph_options = NA_Visualization.generate_graph_options() graph_legend = NA_Visualization.generate_legend() analysis.data = analysis_data analysis.save() analysis.visualization.options = graph_options analysis.visualization.structure = graph_structure analysis.visualization.legend = graph_legend analysis.visualization.save() return @cctoolapp.task(bind=True) def compute_xcs_analysis(self, graph_id, analysis_id): graph = Graph.objects.get(pk=graph_id) return
# coding:utf8 # def_function.py def print_two (*args): # 参数个数不确定 arg1, arg2 = args # 参数解包 print ("arg1: %r, arg2: %r"% (arg1, arg2)) # 传递参数 return def print_two_again (arg1, arg2): print ("arg1: %r, arg2: %r"% (arg1, arg2)) return def print_one (arg1): print ("arg1: %r"% arg1) return def print_none(): print ("I go nothing.") return print_two ("kevin", "bob") print_two ('kevin', 'bob') print_one ("hello") print_none()
# -*- coding: utf-8 -*- """ Created on Fri Jan 11 17:13:51 2019 @author: Ganesh Patil """ num = int(input("Enter a number = ")) if (num > 1): for i in range (2,num): if(num % i) == 0: print(num, "it is not a prime number") break else: print(num, "it is a prime number") else: print(num, "it is not a prime number")
import traceback def format_exception(e): return traceback.format_exception_only(type(e), e)[0].rstrip()
prsnt_game_credits = 0 prsnt_game_profile_banner_selection = 1 prsnt_game_custom_battle_designer = 2 prsnt_game_multiplayer_admin_panel = 3 prsnt_multiplayer_welcome_message = 4 prsnt_multiplayer_team_select = 5 prsnt_multiplayer_troop_select = 6 prsnt_multiplayer_item_select = 7 prsnt_multiplayer_message_1 = 8 prsnt_multiplayer_message_2 = 9 prsnt_multiplayer_message_3 = 10 prsnt_multiplayer_round_time_counter = 11 prsnt_multiplayer_team_score_display = 12 prsnt_multiplayer_flag_projection_display = 13 prsnt_multiplayer_flag_projection_display_bt = 14 prsnt_multiplayer_destructible_targets_display = 15 prsnt_multiplayer_respawn_time_counter = 16 prsnt_multiplayer_stats_chart = 17 prsnt_multiplayer_stats_chart_deathmatch = 18 prsnt_multiplayer_escape_menu = 19 prsnt_multiplayer_poll_menu = 20 prsnt_multiplayer_show_players_list = 21 prsnt_multiplayer_show_maps_list = 22 prsnt_multiplayer_show_factions_list = 23 prsnt_multiplayer_show_number_of_bots_list = 24 prsnt_multiplayer_poll = 25 prsnt_tutorial_show_mouse_movement = 26 prsnt_name_kingdom = 27 prsnt_change_color = 28 prsnt_banner_selection = 29 prsnt_custom_banner = 30 prsnt_banner_charge_positioning = 31 prsnt_banner_charge_selection = 32 prsnt_banner_background_selection = 33 prsnt_banner_flag_type_selection = 34 prsnt_banner_flag_map_type_selection = 35 prsnt_color_selection = 36 prsnt_marshall_selection = 37 prsnt_sliders = 38 prsnt_arena_training = 39 prsnt_retirement = 40 prsnt_budget_report = 41 prsnt_game_before_quit = 42 prsnt_multiplayer_duel_start_counter = 43 prsnt_dplmc_autoloot_upgrade_management = 44 prsnt_dplmc_policy_management = 45 prsnt_dplmc_peace_terms = 46 prsnt_jrider_faction_relations_report = 47 prsnt_dplmc_set_vassal_title = 48 prsnt_dplmc_auto_sell_options = 49 prsnt_dplmc_shopping_list_of_food = 50 prsnt_jrider_character_relation_report = 51 prsnt_redefine_keys = 52 prsnt_companion_overview = 53 prsnt_modify_slots = 54 prsnt_deposit_withdraw_money = 55 prsnt_order_display = 56 prsnt_formation_mod_option = 57 prsnt_battle = 58 prsnt_food_options = 59 prsnt_world_map = 60 prsnt_three_card = 61 prsnt_dices_game = 62 prsnt_bank = 63 prsnt_bank_quickview = 64 prsnt_customize_armor = 65 prsnt_mini_map = 66 prsnt_mini_map_bar = 67 prsnt_blackjack = 68 prsnt_cstm_start_name_kingdom = 69 prsnt_cstm_view_custom_troop_tree = 70 prsnt_cstm_customise_troop = 71 prsnt_taragoth_lords_report = 72 prsnt_tournament_options_panel = 73 prsnt_tpe_ranking_display = 74 prsnt_tpe_final_display = 75 prsnt_tpe_team_display = 76 prsnt_tpe_design_settings = 77 prsnt_tpe_credits = 78 prsnt_faction_troop_tree = 79 prsnt_troop_note = 80 prsnt_mod_option = 81 prsnt_custom_character_creation = 82 prsnt_c3_kingdom_finalize = 83 prsnt_recruit_volunteers = 84
# Liberaries from rest_framework.response import Response from rest_framework.permissions import IsAuthenticated from rest_framework import viewsets from functools import partial # Local modules. from assessment.models import Question, Assessment from assessment.middlewares.validators.field_validators import get_object_or_404 from assessment.helpers.permission import StaffAuthenticatedPermission, AllowedUserPermission from assessment.serializers.question import QuestionSerializer, EagerLoadQuestionSerializer from assessment.helpers.get_all_endpoints import get_paginated_and_query_filtered_data class QuestionViewSet(viewsets.ModelViewSet): """ API endpoint that allows users to be viewed or edited. """ permission_classes = (IsAuthenticated, partial(AllowedUserPermission,['GET'], StaffAuthenticatedPermission),) serializer_class = QuestionSerializer queryset = Question.objects.all() #pylint: disable=E1101 list = get_paginated_and_query_filtered_data(Question, QuestionSerializer, EagerLoadQuestionSerializer) def destroy(self, request, pk=None): question = get_object_or_404(Question, pk) assessment_id = question.assessments_id assessment = get_object_or_404(Assessment, assessment_id) assessment.total_mark -= question.mark assessment.save() question.delete() return Response(None, 204)
#!/usr/bin/env python # -*- coding:utf-8 -*- from setuptools import setup, find_packages import opps REQUIREMENTS = [i.strip() for i in open("requirements.txt").readlines()] dependency_links = [ 'http://github.com/avelino/django-googl/tarball/master#egg=django-googl', 'http://github.com/opps/opps-piston/tarball/master#egg=opps-piston', ] classifiers = [ 'Development Status :: 5 - Production/Stable', 'Environment :: Web Environment', "Intended Audience :: Developers", "License :: OSI Approved :: MIT License", "Operating System :: OS Independent", "Framework :: Django", 'Programming Language :: Python', "Programming Language :: Python :: 2.7", "Operating System :: OS Independent", "Topic :: Internet :: WWW/HTTP :: Dynamic Content", 'Topic :: Software Development :: Libraries :: Application Frameworks', 'Topic :: Software Development :: Libraries :: Python Modules'] try: long_description = open('README.rst').read() except: long_description = opps.__description__ setup(name='opps', version=opps.__version__, description=opps.__description__, long_description=long_description, classifiers=classifiers, keywords='opps cms django apps magazines websites', author=opps.__author__, author_email=opps.__email__, url='http://oppsproject.org', download_url="https://github.com/opps/opps/tarball/master", license=opps.__license__, packages=find_packages(exclude=('doc', 'docs', 'example')), namespace_packages=['opps'], package_dir={'opps': 'opps'}, install_requires=REQUIREMENTS, dependency_links=dependency_links, scripts=['opps/bin/opps-admin.py'], include_package_data=True, test_suite='runtests')
import csv import pandas as pd import math import json import numpy as np import itertools from . import dailydata_to_weeklydata as dw from .impose_none import impose_none as impn #TODO 因為天貓的main_info有兩個資訊 我想要在使用dw的時候可以一次把兩個資訊(Volume,Price)都處理完畢 #TODO 最大值 Max_info 有回傳出去 我想要增加最小值的回傳 #TODO 所以如果是在天貓的情況Max_info Min_info 會各有兩個資料,要決定哪一個index要填入Main_info的資訊 def get_csv(datasource, mainInfo, timeperiod="week"): if datasource == "Zhongguancun": Read_data = pd.read_csv("static/data/Zhongguancun_V2.csv", encoding="utf-8") # 如果現在不是在虛擬環境下的話路徑使用 # Read_data = pd.read_csv("static/data/Zhongguancun.csv") product_info = {} product_list = Read_data["Product"].unique() for product in product_list: temp = Read_data[Read_data['Product'] == product] df = pd.pivot_table( temp, index=["Timestamp"], values=["Dealprice"], ) df['Timestamp'] = df.index tempdata = df.to_dict(orient='list') tempdata = {product:tempdata} product_info.update(tempdata) # 中國 5G 手機的資料 elif datasource == "1H2020": Read_data = pd.read_csv("static/data/1H2020.csv", encoding="utf-8") # 如果現在不是在虛擬環境下的話路徑使用 # Read_data = pd.read_csv("static/data/Zhongguancun.csv") product_info = {} product_list = Read_data["Product"].unique() for product in product_list: temp = Read_data[Read_data['Product'] == product] df = pd.pivot_table( temp, index=["Timestamp"], values=["Dealprice"], ) df['Timestamp'] = df.index # orient 使用 ='list' 就是使用 list 去把資料存起來 然後外層包一個dictionary # ex: {'Dealprice': [4488, 4488, 4488],'Timestamp':['2020-04-14', '2020-04-15', '2020-04-16']} tempdata = df.to_dict(orient='list') # print(tempdata) tempdata = {product:tempdata} product_info.update(tempdata) else: Read_data = pd.read_csv("static/data/Tmall5g.csv", encoding="utf-8") product_info = {} product_list = Read_data["Product"].unique() for product in product_list: temp = Read_data[Read_data['Product'] == product] df = pd.pivot_table( temp, index=["Timestamp"], values=["Dealnumber", "Dealprice"], aggfunc={"Dealnumber": np.sum, "Dealprice": np.mean}, ) df['Dealnumber'] = df['Dealnumber'].diff(1) df['Timestamp'] = df.index df.dropna(inplace=True) tempdata = df.to_dict(orient='list') tempdata = {product:tempdata} # dictionary 資料擴展的方法 把每個產品一個一個加到裡面 product_info.update(tempdata) if timeperiod == "week": product_info = dw.dailydata_to_weeklydata(product_info, mainInfo) # print(product_info) # 如果數據是 後來才開始 Update 的話 前面的數字要補 Nan 才能將資料對齊到對的時間 # 我們使用 impn function 來做這件事 product_info = impn(product_info,'Dealprice') max_info = [] timestamp_index_in_list = [] for key,value in product_info.items(): max_info.append(value[mainInfo]) timestamp_index_in_list.append(value['Timestamp']) # Max 函數如果碰到mutlti dimension 會先以axis = 1 的方向sum # itertools 的用法 https://docs.python.org/zh-cn/3/library/itertools.html # itertools.chain.from_iterable() 會把目標陣列展開 (flatten array) # 試看看直接計算資料裡面有沒有 NaN try: # if datasource == "1H2020": # print(pd.core.common.flatten(max_info)) max_info = max(itertools.chain.from_iterable(max_info)) except: # 如果 要計算的 list 裡面 有 Nan 要先把list 轉成 nparray 再利用 np.nanmax來忽略 nan 進行計算 max_info = list(itertools.chain.from_iterable(max_info)) max_info = np.array(max_info, dtype=np.float64) max_info = np.nanmax(max_info) power = math.floor(math.log(max_info, 10)) # 找尋長度最大的陣列 timestamp_index_in_list = max(timestamp_index_in_list, key=len) product_info["X_axis"] = timestamp_index_in_list product_info["Max_info"] = math.ceil(max_info / (10 ** power)) * (10 ** power) product_info['Main_info'] = mainInfo product_info['Time_period'] = timeperiod product_info['Step'] = 10**power/2 # print(product_info) return json.dumps(product_info, default=str)
#%% import libraries import numpy as np import pandas as pd import re from nltk.tokenize import word_tokenize from nltk.corpus import stopwords from nltk.stem.porter import PorterStemmer from nltk.stem.wordnet import WordNetLemmatizer from sklearn.feature_extraction.text import CountVectorizer from sklearn.feature_extraction.text import TfidfTransformer from sklearn.feature_extraction.text import TfidfVectorizer from sklearn.model_selection import GridSearchCV from sklearn.model_selection import train_test_split from sklearn.ensemble import RandomForestClassifier from sklearn.multioutput import MultiOutputClassifier from sklearn.metrics import confusion_matrix from sklearn.metrics import accuracy_score from sklearn.metrics import accuracy_score from sklearn.metrics import recall_score from sklearn.metrics import classification_report from sklearn.metrics import confusion_matrix from sklearn.pipeline import Pipeline, FeatureUnion import logging import sys import pickle import sqlite3 import nltk nltk.download('punkt') nltk.download('stopwords') nltk.download('wordnet') # read data from SQLlite def read_data(db_name): conn = sqlite3.connect(db_name) df = pd.read_sql('SELECT * FROM messages', conn) return df def tokenize(text): #porter = PorterStemmer() lemmatizer = WordNetLemmatizer() inner_text = re.sub(r"[^a-zA-Z0-9]"," ", text.lower()) tokens = word_tokenize(inner_text) stop_words = stopwords.words('english') tokens = [lemmatizer.lemmatize(word) for word in tokens if word not in stop_words] return tokens def train_model(df): pipeline = Pipeline([ ('vect', CountVectorizer(tokenizer=tokenize)), ('tfidf', TfidfTransformer()), ('clf', MultiOutputClassifier(RandomForestClassifier(class_weight='balanced'), n_jobs=1)), ]) parameters = { #'vect__ngram_range': ((1,1),(1, 2)), 'tfidf__use_idf': (True, False) } model = GridSearchCV(pipeline, param_grid=parameters, cv=2, verbose=1) X = df['message'] Y = df[df.columns[3:]] X_train, X_test, y_train, y_test = train_test_split(X, Y, random_state = 42) model.fit(X_train, y_train) y_pred = model.predict(X_test) logging.info('Classification Report') logging.info(classification_report(y_test, y_pred)) logging.info("Accuracy Score:") logging.info(accuracy_score(y_test, y_pred)) return model def save_model(model, file_name): # save the model to disk pickle.dump(model, open(file_name, 'wb')) def main(): logging.basicConfig(level=logging.INFO, format='%(asctime)s %(levelname)-8s %(message)s', datefmt='%Y-%m-%d %H:%M:%S', handlers=[ logging.FileHandler("train_classifier.log"), logging.StreamHandler() ]) try: db_name = argv[1] file_name = argv[2] except: try: db_name = r"..\data\disaster_tweets.db" file_name ='model.pkl' except: logging.exception('Database not found') raise logging.info('Training the model...') model = train_model(read_data(db_name)) logging.info('Saving the model...') save_model(model, file_name) if __name__ == "__main__": main() # %%
# Copyright 2020,2021 Sony Corporation. # Copyright 2021 Sony Group Corporation. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import nnabla as nn import nnabla.parametric_functions as PF import nnabla.functions as F import nnabla.initializer as I import numpy as np from functools import partial from .base import ( pf_convolution, get_channel_axis, get_spatial_shape, ) from . import registry class MobileNetBase(object): def __init__(self, num_classes=1000, test=True, depth_mul=1.0, channel_last=False): self.num_classes = num_classes self.test = test self.depth_mul = depth_mul self.channel_last = channel_last self.act_map = dict( relu=partial(F.relu, inplace=True), relu6=F.relu6, hswish=self.hswish, linear=lambda x: x) def hswish(self, x): return x * F.relu6(x + 3.0) / 6.0 def conv_bn_act(self, x, maps=32, kernel=(3, 3), stride=(1, 1), group=1, act="linear", name="conv-bn"): conv_opts = dict(stride=stride, group=group, channel_last=self.channel_last, with_bias=False) axes = [get_channel_axis(self.channel_last)] with nn.parameter_scope(name): h = pf_convolution(x, maps, kernel, **conv_opts) h = PF.batch_normalization( h, axes, batch_stat=not self.test, decay_rate=0.99) h = self.act_map[act](h) return h def conv_act(self, x, maps=32, kernel=(3, 3), stride=(1, 1), group=1, act="linear", name="conv-bn"): conv_opts = dict(stride=stride, group=group, channel_last=self.channel_last, with_bias=False) with nn.parameter_scope(name): h = pf_convolution(x, maps, kernel, **conv_opts) h = self.act_map[act](h) return h def conv_bn_relu(self, x, maps=32, kernel=(3, 3), stride=(1, 1), group=1, name="conv-bn"): h = self.conv_bn_act(x, maps, kernel, stride=stride, group=group, act="relu", name=name) return h def conv_bn_relu6(self, x, maps=32, kernel=(3, 3), stride=(1, 1), group=1, name="conv-bn"): h = self.conv_bn_act(x, maps, kernel, stride=stride, group=group, act="relu6", name=name) return h def conv_bn(self, x, maps=32, kernel=(3, 3), stride=(1, 1), group=1, name="conv-bn"): h = self.conv_bn_act(x, maps, kernel, stride=stride, group=group, act="linear", name=name) return h class MobileNetV1(MobileNetBase): def __init__(self, num_classes=1000, test=True, depth_mul=1.0, channel_last=False): super(MobileNetV1, self).__init__( num_classes, test, depth_mul, channel_last) # TODO: where to multiply depth_mul def depthwise_separable_conv(self, x, maps=32, stride=(1, 1), name="conv-ds"): c = x.shape[get_channel_axis(self.channel_last)] with nn.parameter_scope(name): h = self.conv_bn_relu( x, c, (3, 3), stride=stride, group=c, name="conv-dw") h = self.conv_bn_relu(h, maps, (1, 1), stride=( 1, 1), group=1, name="conv-pw") return h def __call__(self, x): h = self.conv_bn_relu(x, 32, stride=(2, 2), name="first-conv") h = self.depthwise_separable_conv( h, 64, stride=(1, 1), name="conv-ds-1") h = self.depthwise_separable_conv( h, 128, stride=(2, 2), name="conv-ds-2") h = self.depthwise_separable_conv( h, 128, stride=(1, 1), name="conv-ds-3") h = self.depthwise_separable_conv( h, 256, stride=(2, 2), name="conv-ds-4") h = self.depthwise_separable_conv( h, 256, stride=(1, 1), name="conv-ds-5") h = self.depthwise_separable_conv( h, 512, stride=(2, 2), name="conv-ds-6") h = self.depthwise_separable_conv( h, 512, stride=(1, 1), name="conv-ds-7") h = self.depthwise_separable_conv( h, 512, stride=(1, 1), name="conv-ds-8") h = self.depthwise_separable_conv( h, 512, stride=(1, 1), name="conv-ds-9") h = self.depthwise_separable_conv( h, 512, stride=(1, 1), name="conv-ds-10") h = self.depthwise_separable_conv( h, 512, stride=(1, 1), name="conv-ds-11") h = self.depthwise_separable_conv( h, 1024, stride=(2, 2), name="conv-ds-12") h = self.depthwise_separable_conv( h, 1024, stride=(1, 1), name="conv-ds-13") pool_shape = get_spatial_shape(x.shape, self.channel_last) h = F.average_pooling(h, pool_shape, channel_last=self.channel_last) h = PF.affine(h, self.num_classes, w_init=I.NormalInitializer(0.01), name="linear") return h, {} class MobileNetV2(MobileNetBase): def __init__(self, num_classes=1000, test=True, depth_mul=1.0, channel_last=False): super(MobileNetV2, self).__init__( num_classes, test, depth_mul, channel_last) self.init_maps = 32 self.settings = [ # t, c, n, s: expansion factor, maps, num blocks, stride [1, 16, 1, 1], [6, 24, 2, 2], [6, 32, 3, 2], [6, 64, 4, 2], [6, 96, 3, 1], [6, 160, 3, 2], [6, 320, 1, 1] ] def inverted_residual(self, x, maps=32, kernel=(3, 3), stride=(1, 1), ef=6, act="relu", name="inv-resblock"): h = x c = h.shape[get_channel_axis(self.channel_last)] hmaps = round(c * ef) omaps = maps def first_layer(h): with nn.parameter_scope(name): h = self.conv_bn_relu6( h, hmaps, kernel, stride=stride, group=hmaps, name="conv-dw") h = self.conv_bn(h, omaps, (1, 1), stride=( 1, 1), name="conv-pw-linear") return h def other_layer(h): h = self.conv_bn_relu6( h, hmaps, (1, 1), stride=(1, 1), name="conv-pw") h = self.conv_bn_relu6( h, hmaps, kernel, stride=stride, group=hmaps, name="conv-dw") h = self.conv_bn(h, omaps, (1, 1), stride=( 1, 1), name="conv-pw-linear") return h with nn.parameter_scope(name): if ef == 1: h = first_layer(h) else: h = other_layer(h) use_res_connect = (stride == (1, 1) and c == omaps) if use_res_connect: h = x + h return h def __call__(self, x): # First conv h = self.conv_bn_relu6(x, int(self.init_maps * self.depth_mul), stride=(2, 2), name="first-conv") # Inverted residual blocks for i, elm in enumerate(self.settings): t, c, n, s = elm # TODO: where to multiply depth_mul c = round(c * self.depth_mul) mbconv_s = partial(self.inverted_residual, maps=c, stride=(s, s), ef=t) mbconv_1 = partial(self.inverted_residual, maps=c, stride=(1, 1), ef=t) for j in range(n): name = "mbconv-{:02d}-{:02d}".format(i, j) h = mbconv_s(h, name=name) if j == 0 else mbconv_1( h, name=name) # Last conv h = self.conv_bn_relu6(h, int(1280 * self.depth_mul), kernel=(1, 1), name="last-conv") # Classifier if not self.test: h = F.dropout(h, 0.2) pool_shape = get_spatial_shape(x.shape, self.channel_last) h = F.average_pooling(h, pool_shape, channel_last=self.channel_last) h = PF.affine(h, self.num_classes, w_init=I.NormalInitializer(0.01), name="linear") return h, {} class MobileNetV3(MobileNetBase): def __init__(self, num_classes=1000, test=True, depth_mul=1.0, mode="large", channel_last=False): super(MobileNetV3, self).__init__( num_classes, test, depth_mul, channel_last) self.mode = mode if mode not in ["large", "small"]: raise ValueError( "mode should be in [{}, {}]".format("large", "small")) large_settings = [ # maps, kernel, stride, expansion factor, activation, se [16, (3, 3), (1, 1), 1, "relu", False], [24, (3, 3), (2, 2), 4, "relu", False], [24, (3, 3), (1, 1), 3, "relu", False], [40, (5, 5), (2, 2), 3, "relu", True], [40, (5, 5), (1, 1), 3, "relu", True], [40, (5, 5), (1, 1), 3, "relu", True], [80, (3, 3), (2, 2), 6, "hswish", False], [80, (3, 3), (1, 1), 2.5, "hswish", False], [80, (3, 3), (1, 1), 2.3, "hswish", False], [80, (3, 3), (1, 1), 2.3, "hswish", False], [112, (3, 3), (1, 1), 6, "hswish", True], [112, (3, 3), (1, 1), 6, "hswish", True], [160, (5, 5), (2, 2), 6, "hswish", True], [160, (5, 5), (1, 1), 6, "hswish", True], [160, (5, 5), (1, 1), 6, "hswish", True] ] small_settings = [ # maps, kernel, stride, expansion, activation, se [16, (3, 3), (2, 2), 1, "relu", True], [24, (3, 3), (2, 2), 4.5, "relu", False], [24, (3, 3), (1, 1), 3.66, "relu", False], [40, (5, 5), (2, 2), 4, "hswish", True], [40, (5, 5), (1, 1), 6, "hswish", True], [40, (5, 5), (1, 1), 6, "hswish", True], [48, (5, 5), (1, 1), 3, "hswish", True], [48, (5, 5), (1, 1), 3, "hswish", True], [96, (5, 5), (2, 2), 6, "hswish", True], [96, (5, 5), (1, 1), 6, "hswish", True], [96, (5, 5), (1, 1), 6, "hswish", True], ] self.maps0 = 16 self.maps1 = 960 if mode == "large" else 576 self.maps2 = 1280 if mode == "large" else 1024 self.settings = large_settings if mode == "large" else small_settings def squeeze_and_excite(self, x, rr=4, name="squeeze-and-excite"): s = x c = x.shape[get_channel_axis(self.channel_last)] cr = c // rr conv_opts = dict(channel_last=self.channel_last, with_bias=True) pool_shape = get_spatial_shape(x.shape, self.channel_last) h = F.average_pooling(x, pool_shape, channel_last=self.channel_last) with nn.parameter_scope(name): with nn.parameter_scope("fc1"): h = pf_convolution(h, cr, (1, 1), **conv_opts) h = F.relu(h) with nn.parameter_scope("fc2"): h = pf_convolution(h, c, (1, 1), **conv_opts) h = F.hard_sigmoid(h) h = h * s return h def inverted_residual(self, x, maps=32, kernel=(3, 3), stride=(1, 1), ef=6, act="relu", se=False, name="inv-resblock"): h = x c = h.shape[get_channel_axis(self.channel_last)] hmaps = round(c * ef) omaps = maps with nn.parameter_scope(name): h = self.conv_bn_act(h, hmaps, (1, 1), (1, 1), act=act, name="conv-pw") if ef != 1 else h h = self.conv_bn_act(h, hmaps, kernel, stride, group=hmaps, act=act, name="conv-dw") h = self.squeeze_and_excite( h, name="squeeze-and-excite") if se else h h = self.conv_bn(h, omaps, (1, 1), stride=( 1, 1), name="conv-pw-linear") use_res_connect = (stride == (1, 1) and c == omaps) if use_res_connect: h = x + h return h def __call__(self, x): # First conv h = self.conv_bn_act(x, int(self.maps0 * self.depth_mul), stride=(2, 2), act="hswish", name="first-conv") # Inverted residual blocks for i, elm in enumerate(self.settings): maps, kernel, stride, ef, act, se = elm maps = round(maps * self.depth_mul) name = "mbconv-{:03d}".format(i) h = self.inverted_residual( h, maps, kernel, stride, ef, act, se, name=name) # Conv -> Avepool -> Conv h = self.conv_bn_act(h, int(self.maps1 * self.depth_mul), (1, 1), act="hswish", name="last-conv-1") pool_shape = get_spatial_shape(x.shape, self.channel_last) h = F.average_pooling(h, pool_shape, channel_last=self.channel_last) h = self.conv_act(h, int(self.maps2 * self.depth_mul), (1, 1), act="hswish", name="last-conv-2") # Classifier if not self.test: h = F.dropout(h, 0.2) h = PF.affine(h, self.num_classes, w_init=I.NormalInitializer(0.01), name="linear") return h, {} def mobilenet_v1(x, num_classes=1000, test=True, depth_mul=1.0, channel_last=False): net = MobileNetV1(num_classes, test, depth_mul, channel_last) return net(x) def mobilenet_v2(x, num_classes=1000, test=True, depth_mul=1.0, channel_last=False): net = MobileNetV2(num_classes, test, depth_mul, channel_last) return net(x) def mobilenet_v3_large(x, num_classes=1000, test=True, depth_mul=1.0, channel_last=False): net = MobileNetV3(num_classes, test, depth_mul, "large", channel_last) return net(x) def mobilenet_v3_small(x, num_classes=1000, test=True, depth_mul=1.0, channel_last=False): net = MobileNetV3(num_classes, test, depth_mul, "small", channel_last) return net(x) # Register arch functions for v in ["v1", "v2", "v3_large", "v3_small"]: fn_name = 'mobilenet_{}'.format(v) registry.register_arch_fn(fn_name, eval(fn_name))
from ._neural_net_regressor import NN_Regressor from ._rf_regressor import RF_Regressor from ._rf_classifier import RF_Classifier
import sys import pathlib from django.core.management import execute_from_command_line from cobl import configure if __name__ == "__main__": ci, cfg_path = 0, None for i, arg in enumerate(sys.argv): if arg.endswith('.ini') and pathlib.Path(arg).exists(): ci = i break if ci: cfg_path = pathlib.Path(sys.argv.pop(ci)) configure(cfg_path) execute_from_command_line(sys.argv)
# ------------------------------ # 560. Subarray Sum Equals K # # Description: # Given an array of integers and an integer k, you need to find the total number of # continuous subarrays whose sum equals to k. # # Example 1: # Input:nums = [1,1,1], k = 2 # Output: 2 # # Note: # The length of the array is in range [1, 20,000]. # The range of numbers in the array is [-1000, 1000] and the range of the integer k # is [-1e7, 1e7]. # # Version: 1.0 # 10/29/19 by Jianfa # ------------------------------ class Solution: def subarraySum(self, nums: List[int], k: int) -> int: preSum = 0 count = 0 sumDict = {0: 1} # if there is a subarray whose sum is k, can get sumDict[0] = 1 for n in nums: preSum += n if preSum - k in sumDict: # if there exists a sum of subarray nums[:i] that is preSum - k # then sum of subarray between current number and nums[i] is k count += sumDict[preSum - k] sumDict[preSum] = sumDict.get(preSum, 0) + 1 return count # Used for testing if __name__ == "__main__": test = Solution() # ------------------------------ # Summary: # Idea from: https://leetcode.com/problems/subarray-sum-equals-k/discuss/102106/Java-Solution-PreSum-%2B-HashMap # I thought about the accumulated sum, but I didn't think about using the map to store # previous accumulated sum, so that it helps to check if the difference between current # sum and a certain sum is k.
# from . import StorageMetadataDTO
#!/usr/bin/env python import re import subprocess import sys from typing import Final from .common_types import HtmlCode ## Depends on having Calibre's `ebook-convert` in your PATH. ## On MacOS, install Calibre then: ## export PATH="${PATH}:/Applications/calibre.app/Contents/MacOS" ## On Debian, `sudo apt install calibre` should do it. EBOOK_CONVERT: Final = "ebook-convert" def ebook_convert(src: str) -> None: with open(src) as f: contents = HtmlCode(f.read()) info: dict[str, str] = {} for field in ["authors", "comments", "title"]: m = re.search(rf"^{field}: (.*)$", contents, flags=re.M) if m: info[field] = m.group(1) name = re.sub(r"\.html$", "", src) for fmt in ["epub"]: # ["mobi", "epub"]: output = "%s.%s" % (name, fmt) options = [ "--chapter", "//*[name()='h1' or name()='h2' or name()='h3']", "--language", "en_us", ] for field, value in info.items(): options += ["--" + field, value] cmd = [EBOOK_CONVERT, src, output] + options print(cmd) subprocess.run(cmd, check=True) if __name__ == "__main__": for arg in sys.argv[1:]: ebook_convert(arg)
class Solution(object): # @param A : tuple of integers # @return an integer def maxSubArray(self, A): i=0 j=0 running_sum = 0 max_sum = float("-inf") for i in range(1,len(A)): running_sum += A[i] if running_sum > max_sum: max_sum = running_sum if(running_sum < 0): running_sum = 0 return max_sum A= [-2,1,-3,4,-1,2,1,-5,4] s = Solution() print(s.maxSubArray(A))
from django.db.models.signals import post_save from django.dispatch.dispatcher import receiver from plans.models import Order, Invoice, UserPlan, Plan from plans.signals import order_completed, activate_user_plan from accounts.models import User @receiver(post_save, sender=Order) def create_proforma_invoice(sender, instance, created, **kwargs): """ For every Order if there are defined billing_data creates invoice proforma, which is an order confirmation document """ if created: Invoice.create(instance, Invoice.INVOICE_TYPES['PROFORMA']) @receiver(order_completed) def create_invoice(sender, **kwargs): Invoice.create(sender, Invoice.INVOICE_TYPES['INVOICE']) @receiver(post_save, sender=Invoice) def send_invoice_by_email(sender, instance, created, **kwargs): if created: instance.send_invoice_by_email() @receiver(post_save, sender=User) def set_default_user_plan(sender, instance, created, **kwargs): """ Creates default plan for the new user but also extending an account for default grace period. """ if created: default_plan = Plan.get_default_plan() if default_plan is not None: UserPlan.objects.create(user=instance, plan=default_plan, active=False, expire=None) # Hook to django-registration to initialize plan automatically after user has confirm account @receiver(activate_user_plan) def initialize_plan_generic(sender, user, **kwargs): try: user.userplan.initialize() except UserPlan.DoesNotExist: return try: from registration.signals import user_activated @receiver(user_activated) def initialize_plan_django_registration(sender, user, request, **kwargs): try: user.userplan.initialize() except UserPlan.DoesNotExist: return except ImportError: pass # Hook to django-getpaid if it is installed try: from getpaid.signals import user_data_query @receiver(user_data_query) def set_user_email_for_getpaid(sender, order, user_data, **kwargs): user_data['email'] = order.user.email except ImportError: pass
# coding: utf-8 """ Notation and containers for multi-dimensional MIDI style data """ from functools import total_ordering from numpy import array, dot, exp, log from .temperament import JI_5LIMIT, mod_comma, canonize, canonize2, JI_ISLAND, JI_7LIMIT, JI_11LIMIT, JI_3_7, canonize_3_7, canonize2_3_7, canonize_7_11, JI_7_11 from .util import note_unicode, rwh_primes1, append_prime from hewmp.parser import parse_text, realize from hewmp.event import Note as HEWMPNote LYDIAN = ("F", "C", "G", "D", "A", "E", "B") LYDIAN_INDEX_A = LYDIAN.index("A") REFERENCE_OCTAVE = 4 def notate(threes, fives, twos=None, horogram="JI"): """ Gives the notation for a 5-limit pitch vector in terms of letter, sharp signs, arrows and octaves. """ if horogram == "JI": index = LYDIAN_INDEX_A + threes + fives*4 sharps = index // len(LYDIAN) letter = LYDIAN[index % len(LYDIAN)] arrows = -fives if twos is None: if horogram == "JI": return letter, sharps, arrows if horogram == "dicot": permutation = [0, 4, 1, 5, 2, 6, 3] num = fives + 2*threes return notate((num//len(permutation))*len(permutation) + permutation[num % len(permutation)], 0, horogram="JI") if horogram == "blackwood": threes = threes - ((threes + 1)//5)*5 if threes == 3: return notate(threes + 4*fives + 4, -1, horogram="JI") return notate(threes + 4*fives, 0, horogram="JI") if horogram == "magic": fifths_19edo = [0, 7, 14, 2, 9, 16, 4, 11, 18, 6, 13, 1, 8, 15, 3, 10, 17, 5, 12] index = fives + 5*threes edo19 = (threes*30 + fives*44) % 19 meantone = (fifths_19edo[edo19] + 9) % 19 - 9 arrows = index // 19 return notate(meantone + arrows*4, -arrows, horogram="JI") if horogram == "würschmidt": fifths_31edo = [0, 19, 7, 26, 14, 2, 21, 9, 28, 16, 4, 23, 11, 30, 18, 6, 25, 13, 1, 20, 8, 27, 15, 3, 22, 10, 29, 17, 5, 24, 12] index = fives + 8*threes edo31 = (threes*49 + fives*72) % 31 meantone = (fifths_31edo[edo31] + 11) % 31 - 11 arrows = index // 31 return notate(meantone + arrows*4, -arrows, horogram="JI") threes, fives = canonize(threes, fives, horogram=horogram) return notate(threes, fives, horogram="JI") if horogram == "JI": edo12 = twos*12 + threes*19 + fives*28 octaves = REFERENCE_OCTAVE + (edo12 + 9)//12 return letter, sharps, arrows, octaves if horogram == "magic": letter, sharps, arrows = notate(threes, fives, horogram="magic") index = fives + 5*threes corrections = [0, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5, 6, 6] correction = 2*index + (index // 19)*6 + corrections[index % 19] return letter, sharps, arrows, twos + REFERENCE_OCTAVE + correction twos, threes, fives = canonize2(twos, threes, fives, horogram=horogram) return notate(threes, fives, twos=twos, horogram="JI") def notate_island(threes, supermajors, twos=None, horogram="JI", flatward=False): """ Gives the notation for a 2.3.13/5 subgroup pitch vector in terms of letter, (half) sharp signs, arrows and octaves. """ if horogram == "JI": if flatward: index = LYDIAN_INDEX_A + threes + supermajors*4 - ((supermajors+1)//2)*5 sharps = index // len(LYDIAN) - 0.5*(supermajors % 2) letter = LYDIAN[index % len(LYDIAN)] arrows = (supermajors + 1)//2 else: index = LYDIAN_INDEX_A + threes + supermajors*4 - (supermajors//2)*5 sharps = index // len(LYDIAN) + 0.5*(supermajors % 2) letter = LYDIAN[index % len(LYDIAN)] arrows = supermajors // 2 if twos is None: if horogram == "JI": return letter, sharps, arrows if horogram == "barbados": letter, sharps, arrows = notate_island(threes, supermajors, horogram="JI") return letter, sharps, 0 if horogram == "JI": edo24 = twos*24 + threes*38 + supermajors*33 octaves = REFERENCE_OCTAVE + (edo24 + 18)//24 return letter, sharps, arrows, octaves if horogram == "barbados": letter, sharps, arrows, octaves = notate_island(threes, supermajors, twos=twos, horogram="JI") return letter, sharps, 0, octaves raise ValueError("Unknown temperament") def notate_3_7(threes, sevens, twos=None, horogram="JI"): """ Gives the notation for a 2.3.7 subgroup pitch vector in terms of letter, sharp signs, (sagittal septimal) arrows and octaves. """ if horogram == "JI": index = LYDIAN_INDEX_A + threes - sevens*2 sharps = index // len(LYDIAN) letter = LYDIAN[index % len(LYDIAN)] arrows = -sevens if twos is None: if horogram == "JI": return letter, sharps, arrows if horogram == "slendric": fifths_5edo = [0, 2, 4, 1, 3] index = sevens -3*threes edo5 = (threes*8 + sevens*14) % 5 archy = (fifths_5edo[edo5] + 2) % 5 - 2 arrows = index // 5 return notate_3_7(archy - arrows*2, arrows, horogram="JI") threes, sevens = canonize_3_7(threes, sevens, horogram=horogram) return notate_3_7(threes, sevens, horogram="JI") if horogram == "JI": edo12 = twos*12 + threes*19 + sevens*34 octaves = REFERENCE_OCTAVE + (edo12 + 9)//12 return letter, sharps, arrows, octaves twos, threes, sevens = canonize2_3_7(twos, threes, sevens, horogram=horogram) return notate_3_7(threes, sevens, twos=twos, horogram="JI") DARK_24EDO = { 0: ("C", -0.0), 1: ("D", -1.5), 2: ("D", -1.0), 3: ("D", -0.5), 4: ("D", -0.0), 5: ("E", -1.5), 6: ("E", -1.0), 7: ("E", -0.5), 8: ("E", -0.0), 9: ("F", -0.5), 10: ("F", -0.0), 11: ("G", -1.5), 12: ("G", -1.0), 13: ("G", -0.5), 14: ("G", -0.0), 15: ("A", -1.5), 16: ("A", -1.0), 17: ("A", -0.5), 18: ("A", -0.0), 19: ("B", -1.5), 20: ("B", -1.0), 21: ("B", -0.5), 22: ("B", -0.0), 23: ("C", -0.5), } LIGHT_24EDO = { 0: ("C", 0.0), 1: ("C", 0.5), 2: ("C", 1.0), 3: ("C", 1.5), 4: ("D", 0.0), 5: ("D", 0.5), 6: ("D", 1.0), 7: ("D", 1.5), 8: ("E", 0.0), 9: ("E", 0.5), 10: ("F", 0.0), 11: ("F", 0.5), 12: ("F", 1.0), 13: ("F", 1.5), 14: ("G", 0.0), 15: ("G", 0.5), 16: ("G", 1.0), 17: ("G", 1.5), 18: ("A", 0.0), 19: ("A", 0.5), 20: ("A", 1.0), 21: ("A", 1.5), 22: ("B", 0.0), 23: ("B", 0.5), } def notate_7_11(sevens, elevens, twos=None, horogram="JI"): """ Gives the notation for a 2.7.11 subgroup pitch vector in terms of letter, (half) sharp signs, (frostburn?) arrows and octaves. """ # Note: Centers around C TODO: Consider centering around A if horogram == "JI": index = elevens + 4*sevens edo24 = 83*index if index < 0: stratum = 1 + index // 24 letter, sharps = DARK_24EDO[(edo24 - 4*stratum)%len(DARK_24EDO)] if stratum: # Preserve signed zeros sharps += 2*stratum else: stratum = index // 24 # Note: Theres some room to do {edo24 - 2*stratum; sharps += stratum} here as B# is unused, but not enough it turns out. letter, sharps = LIGHT_24EDO[(edo24 - 4*stratum)%len(LIGHT_24EDO)] sharps += 2*stratum arrows = sevens if twos is None: if horogram == "JI": return letter, sharps, arrows if horogram == "orga": superfourths_31edo = [0, 20, 9, 29, 18, 7, 27, 16, 5, 25, 14, 3, 23, 12, 1, 21, 10, 30, 19, 8, 28, 17, 6, 26, 15, 4, 24, 13, 2, 22, 11] index = sevens + 8*elevens edo31 = (sevens*87 + elevens*107) % 31 frostburn = (superfourths_31edo[edo31] + 11) % 31 - 11 arrows = index // 31 return notate_7_11(-arrows, frostburn + arrows*4, horogram="JI") sevens, elevens = canonize_7_11(sevens, elevens, horogram=horogram) return notate_7_11(sevens, elevens, horogram="JI") if horogram == "JI": edo24 = 24*twos + 83*elevens + 67*sevens # TODO: Figure out if this is right at all octaves = REFERENCE_OCTAVE + edo24//24 return letter, sharps, arrows, octaves raise ValueError("Unknown temperament") def note_unicode_5limit(threes, fives, twos=None, horogram="JI"): octaves = None if twos is None: letter, sharps, arrows = notate(threes, fives, horogram=horogram) else: letter, sharps, arrows, octaves = notate(threes, fives, twos=twos, horogram=horogram) return note_unicode(letter, sharps, arrows, octaves) PRIMES = rwh_primes1(100) class JustIntonation: def __init__(self, num_primes, base_freq=440): while len(PRIMES) < num_primes: append_prime(PRIMES) self.mapping = log(PRIMES[:num_primes]) self.base_freq = base_freq def pitch_to_freq_rads(self, pitch): return exp(dot(pitch, self.mapping)) * self.base_freq, 0 JI = {} class EqualTemperament: def __init__(self, divisions=12, divided=2, base_freq=440): self.divisions = divisions self.divided = divided self.base_freq = base_freq def pitch_to_freq_rads(self, pitch): return self.base_freq * self.divided ** (pitch / self.divisions) @total_ordering class Note: """ Notes carry pitch vector, duration, note on time, note on velocity and note off velocity data. A Note instance is associated with a tuning for interpreting pitch vectors as frequencies """ def __init__(self, pitch=None, duration=None, time=None, velocity=0.7, off_velocity=0.5, tuning=None): self.pitch = pitch self.duration = duration self.time = time self.velocity = velocity self.off_velocity = off_velocity if tuning is None and not (isinstance(pitch, int) or isinstance(pitch, float)): if len(pitch) not in JI: JI[len(pitch)] = JustIntonation(len(pitch)) tuning = JI[len(pitch)] self.tuning = tuning @property def off_time(self): if self.duration is None or self.time is None: return None return self.time + self.duration @off_time.setter def off_time(self, value): self.duration = value - self.time @property def freq(self): if self.pitch is None: return None return self.tuning.pitch_to_freq_rads(self.pitch)[0] @property def rads(self): if self.pitch is None: return None return self.tuning.pitch_to_freq_rads(self.pitch)[1] def __lt__(self, other): return self.freq < other.freq def __eq__(self, other): return self.tuning.equals(self.pitch, other.pitch) def __repr__(self): try: p = tuple(self.pitch) except TypeError: p = self.pitch return "{}({}, {}, {}, {}, {})".format(self.__class__.__name__, p, self.duration, self.time, self.velocity, self.off_velocity) def copy(self): return self.__class__(self.pitch[:], self.duration, self.time, self.velocity, self.off_velocity, self.tuning) class HEWMPWrapper: def __init__(self, base): self.base = base @property def freq(self): return float(self.base.real_frequency) @property def duration(self): return float(self.base.real_gate_length) @property def time(self): return float(self.base.real_time) @property def off_time(self): return self.time + self.duration @property def velocity(self): return float(self.base.velocity) @property def rads(self): return float(self.base.pitch.phase) def sonorities(notes, tolerance=1e-6): """ Break notes into groups that sound together. """ notes = list(sorted(notes, key=lambda n: n.time)) result = [] sonority = [] while notes: time = notes[0].time for note in sonority[:]: if note.off_time < time + tolerance: sonority.remove(note) while notes and abs(notes[0].time - time) < tolerance: sonority.append(notes.pop(0)) result.append((time, sonority[:])) if sonority: off_time = max(note.off_time for note in sonority) result.append((off_time, [])) return result def from_midi(filename): import mido midi = mido.MidiFile(filename) results = [] for track in midi.tracks: result = [] note_ons = {} current_time = 0 for message in track: current_time += message.time if message.type == "note_on": note_ons[(message.channel, message.note)] = (current_time, message.velocity) if message.type == "note_off": on_time, on_velocity = note_ons.pop((message.channel, message.note)) result.append(Note(message.note, current_time - on_time, on_time, on_velocity, message.velocity)) for key, value in note_ons.items(): channel, note = key on_time, on_velocity = value result.append(Note(note, current_time - on_time, on_time, on_velocity)) results.append(result) return results def from_hewmp(text): tracks, _ = parse_text(text) tracks = realize(tracks) notes = [] for track in tracks: for event in track.events: if isinstance(event, HEWMPNote): notes.append(HEWMPWrapper(event)) return notes
from .file_lock import FileLock import os import json from json import JSONDecodeError import threading import time DEBUG_MODE = False class ReadWrite: """ An object that allows many simultaneous read lock but one exclusive write lock""" def __init__(self, file_name): self.target_file = os.path.join(os.path.dirname(__file__), "%s.jsonl" % file_name) self.all_data_file = os.path.join(os.path.dirname(__file__), "%s.jsonl"% "all_data") self._readers = 0 def read(self, time_step, resp={}): """Acquire a lock""" read_flag = False while not read_flag: with FileLock(self.target_file, lock_file_contents="Owning thread reader"): if DEBUG_MODE: print("Reader acquire a lock at timestep: %s" % time_step) if os.path.getsize(self.target_file) != 0: with open(self.target_file, 'r') as data_file: read_info = json.loads(data_file.readline().rstrip('\n|\r')) if read_info["timestep"] == time_step: for key in read_info.keys(): resp[key] = read_info[key] data_file.close() # has data if len(resp.keys()) == 3: # clean up the file with open(self.target_file, 'w') as data_file: pass # write out the new data with open(self.all_data_file, 'a') as all_data: all_data.write("%s\n"%json.dumps(resp)) all_data.close() if DEBUG_MODE: print("Reader reading...") read_flag = True if DEBUG_MODE: print("Reader release a lock...") time.sleep(0.5) def write(self, time_step, thread_name, value): """ time_step = the time step of the record thread_name = model name value = dict contains the values """ write_flag = False while not write_flag: with FileLock(self.target_file, lock_file_contents="Owning thread %s"%(thread_name)): if DEBUG_MODE: print("writer: %s acquire a lock at timestep: %s"% (thread_name, time_step)) write_info = {} if os.path.getsize(self.target_file) == 0: # empty file, the process gain the leader status write_info['timestep'] = time_step write_info[thread_name] = value write_flag = True else: with open(self.target_file, 'r') as data_file: write_info = json.loads(data_file.readline().rstrip('\n|\r')) if write_info["timestep"] == time_step: write_info[thread_name] = value write_flag = True if write_flag: # write: if DEBUG_MODE: print("Writer %s writing at timestep: %s" %(thread_name, time_step)) with open(self.target_file, 'w') as data_file: write_str = json.dumps(write_info) # write back data_file.write("%s" % write_str) if DEBUG_MODE: print("writer: %s release a lock after write "% thread_name) else: if DEBUG_MODE: print("writer: %s release a lock "% thread_name) time.sleep(0.1)
from .cityscapes import CitySegmentation from .coco import COCOSegmentation from .person import PersonSegmentation datasets = { 'citys': CitySegmentation, 'coco': COCOSegmentation, 'person': PersonSegmentation, } def get_segmentation_dataset(name, **kwargs): """Segmentation Datasets""" return datasets[name.lower()](**kwargs)
# Generated by Django 2.0.6 on 2018-06-05 05:45 from django.conf import settings from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): initial = True dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ] operations = [ migrations.CreateModel( name='Ticket', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('sku', models.CharField(default='dfAL1arLkac', help_text='Unique code for refrence to supervisors', max_length=15)), ('title', models.CharField(max_length=20)), ('message', models.TextField(max_length=526)), ('priority', models.CharField(choices=[('1', 'Low'), ('2', 'Normal'), ('3', 'High'), ('4', 'Urgant')], default=('1', 'Low'), max_length=1)), ('status', models.CharField(choices=[('0', 'Closed'), ('1', 'Open')], default=('0', 'Closed'), max_length=1)), ('order_date', models.DateTimeField(auto_now=True)), ], options={ 'ordering': ('-priority', '-status', '-order_date'), }, ), migrations.CreateModel( name='TicketAttachment', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('name', models.CharField(max_length=50, null=True)), ('file_address', models.CharField(max_length=526, null=True)), ], ), migrations.CreateModel( name='TicketDepartment', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('department_name', models.CharField(max_length=20, null=True)), ('users', models.ManyToManyField(help_text='list of users in a department', to=settings.AUTH_USER_MODEL, verbose_name='users in dept')), ], ), migrations.CreateModel( name='TicketReply', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('message', models.TextField(max_length=526, null=True)), ('request_type', models.CharField(choices=[('202', 'Answered'), ('201', 'Request')], max_length=3, null=True)), ('submit_date', models.DateTimeField(auto_now=True)), ('attachments', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='ticket.TicketAttachment')), ('submited_by', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL)), ], options={ 'verbose_name': 'Ticket Reply Message', 'verbose_name_plural': 'Ticket Reply Messages', 'ordering': ('request_type', 'submit_date'), }, ), migrations.AddField( model_name='ticketattachment', name='reply_attachment', field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='ticket.TicketReply'), ), migrations.AddField( model_name='ticketattachment', name='ticket_attachment', field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='ticket.Ticket'), ), migrations.AddField( model_name='ticket', name='department', field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='ticket.TicketDepartment'), ), migrations.AddField( model_name='ticket', name='reply', field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='ticket.TicketReply'), ), migrations.AddField( model_name='ticket', name='submited_by', field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL), ), ]
# Copyright 2016 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. import logging import os import sys import time from gpu_tests import gpu_integration_test from gpu_tests import pixel_test_pages from gpu_tests import skia_gold_integration_test_base from telemetry.util import image_util test_harness_script = r""" var domAutomationController = {}; domAutomationController._proceed = false; domAutomationController._readyForActions = false; domAutomationController._succeeded = undefined; domAutomationController._finished = false; domAutomationController._originalLog = window.console.log; domAutomationController._messages = ''; domAutomationController.log = function(msg) { domAutomationController._messages += msg + "\n"; domAutomationController._originalLog.apply(window.console, [msg]); } domAutomationController.send = function(msg) { domAutomationController._proceed = true; let lmsg = msg.toLowerCase(); if (lmsg == "ready") { domAutomationController._readyForActions = true; } else { domAutomationController._finished = true; // Do not squelch any previous failures. Show any new ones. if (domAutomationController._succeeded === undefined || domAutomationController._succeeded) domAutomationController._succeeded = (lmsg == "success"); } } window.domAutomationController = domAutomationController; """ class PixelIntegrationTest( skia_gold_integration_test_base.SkiaGoldIntegrationTestBase): """GPU pixel tests backed by Skia Gold and Telemetry.""" test_base_name = 'Pixel' @classmethod def Name(cls): """The name by which this test is invoked on the command line.""" return 'pixel' @classmethod def GenerateGpuTests(cls, options): cls.SetParsedCommandLineOptions(options) namespace = pixel_test_pages.PixelTestPages pages = namespace.DefaultPages(cls.test_base_name) pages += namespace.GpuRasterizationPages(cls.test_base_name) pages += namespace.ExperimentalCanvasFeaturesPages(cls.test_base_name) pages += namespace.PaintWorkletPages(cls.test_base_name) # pages += namespace.NoGpuProcessPages(cls.test_base_name) # The following pages should run only on platforms where SwiftShader is # enabled. They are skipped on other platforms through test expectations. # pages += namespace.SwiftShaderPages(cls.test_base_name) if sys.platform.startswith('darwin'): pages += namespace.MacSpecificPages(cls.test_base_name) # Unfortunately we don't have a browser instance here so can't tell # whether we should really run these tests. They're short-circuited to a # certain degree on the other platforms. pages += namespace.DualGPUMacSpecificPages(cls.test_base_name) if sys.platform.startswith('win'): pages += namespace.DirectCompositionPages(cls.test_base_name) pages += namespace.LowLatencySwapChainPages(cls.test_base_name) pages += namespace.HdrTestPages(cls.test_base_name) for p in pages: yield(p.name, skia_gold_integration_test_base.GPU_RELATIVE_PATH + p.url, (p)) def RunActualGpuTest(self, test_path, *args): page = args[0] # Some pixel tests require non-standard browser arguments. Need to # check before running each page that it can run in the current # browser instance. self.RestartBrowserIfNecessaryWithArgs(self._AddDefaultArgs( page.browser_args)) url = self.UrlOfStaticFilePath(test_path) # This property actually comes off the class, not 'self'. tab = self.tab tab.Navigate(url, script_to_evaluate_on_commit=test_harness_script) tab.action_runner.WaitForJavaScriptCondition( 'domAutomationController._proceed', timeout=300) do_page_action = tab.EvaluateJavaScript( 'domAutomationController._readyForActions') try: if do_page_action: # The page action may itself signal test failure via self.fail(). self._DoPageAction(tab, page) self._RunSkiaGoldBasedPixelTest(do_page_action, page) finally: test_messages = self._TestHarnessMessages(tab) if test_messages: logging.info('Logging messages from the test:\n' + test_messages) if do_page_action or page.restart_browser_after_test: self._RestartBrowser( 'Must restart after page actions or if required by test') if do_page_action and self._IsDualGPUMacLaptop(): # Give the system a few seconds to reliably indicate that the # low-power GPU is active again, to avoid race conditions if the next # test makes assertions about the active GPU. time.sleep(4) def GetExpectedCrashes(self, args): """Returns which crashes, per process type, to expect for the current test. Args: args: The list passed to _RunGpuTest() Returns: A dictionary mapping crash types as strings to the number of expected crashes of that type. Examples include 'gpu' for the GPU process, 'renderer' for the renderer process, and 'browser' for the browser process. """ # args[0] is the PixelTestPage for the current test. return args[0].expected_per_process_crashes def _RunSkiaGoldBasedPixelTest(self, do_page_action, page): """Captures and compares a test image using Skia Gold. Raises an Exception if the comparison fails. Args: do_page_action: a bool indicating if an action was run on the page. page: the GPU PixelTestPage object for the test. """ tab = self.tab # Actually run the test and capture the screenshot. if not tab.EvaluateJavaScript('domAutomationController._succeeded'): self.fail('page indicated test failure') screenshot = tab.Screenshot(5) if screenshot is None: self.fail('Could not capture screenshot') dpr = tab.EvaluateJavaScript('window.devicePixelRatio') if page.test_rect: screenshot = image_util.Crop( screenshot, int(page.test_rect[0] * dpr), int(page.test_rect[1] * dpr), int(page.test_rect[2] * dpr), int(page.test_rect[3] * dpr)) build_id_args = self._GetBuildIdArgs() # Compare images against approved images/colors. if page.expected_colors: # Use expected colors instead of hash comparison for validation. self._ValidateScreenshotSamplesWithSkiaGold( tab, page, screenshot, dpr, build_id_args) return image_name = self._UrlToImageName(page.name) self._UploadTestResultToSkiaGold( image_name, screenshot, tab, page, build_id_args=build_id_args) def _DoPageAction(self, tab, page): getattr(self, '_' + page.optional_action)(tab, page) # Now that we've done the page's specific action, wait for it to # report completion. tab.action_runner.WaitForJavaScriptCondition( 'domAutomationController._finished', timeout=300) def _TestHarnessMessages(self, tab): return tab.EvaluateJavaScript('domAutomationController._messages') def _AssertLowPowerGPU(self): if self._IsDualGPUMacLaptop(): if not self._IsIntelGPUActive(): self.fail('Low power GPU should have been active but wasn\'t') def _AssertHighPerformanceGPU(self): if self._IsDualGPUMacLaptop(): if self._IsIntelGPUActive(): self.fail('High performance GPU should have been active but wasn\'t') # # Optional actions pages can take. # These are specified as methods taking the tab and the page as # arguments. # def _CrashGpuProcess(self, tab, page): # Crash the GPU process. # # This used to create a new tab and navigate it to # chrome://gpucrash, but there was enough unreliability # navigating between these tabs (one of which was created solely # in order to navigate to chrome://gpucrash) that the simpler # solution of provoking the GPU process crash from this renderer # process was chosen. tab.EvaluateJavaScript('chrome.gpuBenchmarking.crashGpuProcess()') def _SwitchTabs(self, tab, page): if not tab.browser.supports_tab_control: self.fail('Browser must support tab control') dummy_tab = tab.browser.tabs.New() dummy_tab.Activate() # Wait for 2 seconds so that new tab becomes visible. dummy_tab.action_runner.Wait(2) tab.Activate() def _RunTestWithHighPerformanceTab(self, tab, page): if not self._IsDualGPUMacLaptop(): # Short-circuit this test. logging.info('Short-circuiting test because not running on dual-GPU Mac ' 'laptop') tab.EvaluateJavaScript('initialize(false)') tab.action_runner.WaitForJavaScriptCondition( 'domAutomationController._readyForActions', timeout=30) tab.EvaluateJavaScript('runToCompletion()') return # Reset the ready state of the harness. tab.EvaluateJavaScript('domAutomationController._readyForActions = false') high_performance_tab = tab.browser.tabs.New() high_performance_tab.Navigate(self.UrlOfStaticFilePath( skia_gold_integration_test_base.GPU_RELATIVE_PATH + 'functional_webgl_high_performance.html'), script_to_evaluate_on_commit=test_harness_script) high_performance_tab.action_runner.WaitForJavaScriptCondition( 'domAutomationController._finished', timeout=30) # Wait a few seconds for the GPU switched notification to propagate # throughout the system. time.sleep(5) # Switch back to the main tab and quickly start its rendering, while the # high-power GPU is still active. tab.Activate() tab.EvaluateJavaScript('initialize(true)') tab.action_runner.WaitForJavaScriptCondition( 'domAutomationController._readyForActions', timeout=30) # Close the high-performance tab. high_performance_tab.Close() # Wait for ~15 seconds for the system to switch back to the # integrated GPU. time.sleep(15) # Run the page to completion. tab.EvaluateJavaScript('runToCompletion()') def _RunLowToHighPowerTest(self, tab, page): is_dual_gpu = self._IsDualGPUMacLaptop() tab.EvaluateJavaScript('initialize(' + ('true' if is_dual_gpu else 'false') + ')') # The harness above will take care of waiting for the test to # complete with either a success or failure. def _RunOffscreenCanvasIBRCWebGLTest(self, tab, page): self._AssertLowPowerGPU() tab.EvaluateJavaScript('setup()') # Wait a few seconds for any (incorrect) GPU switched # notifications to propagate throughout the system. time.sleep(5) self._AssertLowPowerGPU() tab.EvaluateJavaScript('render()') def _RunOffscreenCanvasIBRCWebGLHighPerfTest(self, tab, page): self._AssertLowPowerGPU() tab.EvaluateJavaScript('setup(true)') # Wait a few seconds for any (incorrect) GPU switched # notifications to propagate throughout the system. time.sleep(5) self._AssertHighPerformanceGPU() tab.EvaluateJavaScript('render()') @classmethod def ExpectationsFiles(cls): return [ os.path.join(os.path.dirname(os.path.abspath(__file__)), 'test_expectations', 'pixel_expectations.txt')] def load_tests(loader, tests, pattern): del loader, tests, pattern # Unused. return gpu_integration_test.LoadAllTestsInModule(sys.modules[__name__])
#!/usr/bin/env python import os, runpy this_dir = os.path.dirname(os.path.abspath(__file__)) runpy.run_path(f"{this_dir}/a.py") from nspawn.build import * name = "bionic" version = "18.04" image_url = f"file://localhost/tmp/nspawn/repo/ubuntu/base/{name}-{version}.tar.gz" booter_url = f"https://cloud-images.ubuntu.com/minimal/releases/{name}/release/ubuntu-{version}-minimal-cloudimg-amd64-root.tar.xz" # declare image identity IMAGE(image_url) # provision dependency image PULL(booter_url) # configure container profile WITH( Boot="yes", # auto-find image init program Quiet="yes", # suppress "press to escape" message KeepUnit="yes", # use service unit as nspawn scope Register="yes", # expose service unit with machinectl ) # copy local resources COPY("/etc") COPY(path="/root") SH("rm -rf /etc/resolv.conf /etc/securetty") SH("apt-get update") SH("apt-get install -y mc htop") SH("apt-get install -y iputils-ping iproute2") SH("apt-get install -y openssh-server") SH("apt-get purge -y unattended-upgrades") SH("systemctl disable networkd-dispatcher") SH("systemctl enable systemd-networkd") SH("systemctl enable systemd-resolved") SH("systemctl enable ssh") # publish image PUSH()
import os, sys, time, argparse from utils import get_info, error, warn, success, display_info_dict def main(): parser = argparse.ArgumentParser() parser.add_argument('-d', help='Pass district name, it should match with what you have added in input.json, it is case sensitive.') args = parser.parse_args() if args.d: district = args.d filefolder = os.getcwd() filepath = os.path.join(filefolder, "input.json") if os.path.exists(filepath): input_json = get_info(filepath) if "districts" in input_json: if district in input_json["districts"]: district_info = input_json["districts"][district] display_info_dict(district_info) if isinstance(district_info["refresh_rate"], int): refresh_rate = int(district_info["refresh_rate"]) else: # Set default refresh_rate as 5 seconds refresh_rate = 5 else: error(f"Missing {district} details in input.json") error("Exiting script...") sys.exit(1) else: print(f"Missing {filepath}") print("Exiting script...") sys.exit(1) cmd = f"python -u api.py -d {district}" while True: os.system(cmd) for i in range(refresh_rate, 0, -1): msg = f"Next update in {i} seconds.." print(msg, end="\r", flush=True) sys.stdout.flush() time.sleep(1) else: error("District name argument is missing, Pass district name, it should match with what you have added in input.json, it is case sensitive.") error("Try -h or --help for more details.") error("Exiting script...") sys.exit(1) if __name__ == '__main__': main()
import my_setting.utils as utils import pandas as pd import os from enum import IntEnum import csv import abc from abc import ABCMeta import re from multiprocessing import Pool from functools import partial, reduce import time import chinese_converter as cc from collections import Counter class DatasetType(IntEnum): LABELED: int = 0 UNLABELED: int = 1 TEST: int = 2 SENTIMENTRELEVENTCORPUS: int = 3 class DataCleaningStep(metaclass=ABCMeta): def __init__(self, *args): start_time = time.time() self.run(args[0]) end_time = time.time() # Unlabeled数据集不应该执行且输出LabelCheck信息 if (len(args[0].columns) == 7 or args[1] != 'LabelCheck'): print(f'已执行数据清洗步骤:{self.__doc__.strip()},用时:{round(end_time - start_time, 2)}s') @abc.abstractmethod def run(self, dataset: pd.DataFrame, n_cores: int = 8): pass def _regexp_sub(self, regexp: str, x: list, substr: str = ''): """ 清除满足根据正则表达式的列表x中的内容 :param regexp: 这则表达式 :param x: 内容列表 :return: 清理后的内容列表 """ data = [] for content in x: new_content = re.sub(regexp, substr, str(content), flags=re.U) data.append(new_content) return data def _regexp_findall(self, regexp, x: list): """ 返回满足正则表达式的字符 :param regexp: :param x: :return: """ return [re.findall(regexp, pattern) for pattern in x] def _to_simplified(self, x): return [cc.to_simplified(proc) for proc in x] def _unqualified_label(self, x): ''' :param x:[index, series] :return: 不合格index, [index] ''' res = [] for index, row in x: if ((row[6] == '0') | (row[6] == '1') | (row[6] == '-1') | (row[6] == 0) | (row[6] == 1) | (row[6] == -1)): continue else: res.append(index) return res class Batch: """ 数据分块,用户多进程加速 """ def __init__(self, n_group, data): """ 构造函数 :param n_group: 待分组数 :param data: 待分数据集 """ self._n_group = n_group self._data = data n_data = len(self._data) groups = [] if int(n_data / self._n_group) == 0: groups.append((0, n_data)) else: b = int(n_data / self._n_group) for i in range(self._n_group): if i != self._n_group - 1: groups.append((i * b, i * b + b)) else: groups.append((i * b, n_data)) self._groups = groups def __iter__(self): for s in self._groups: yield self._data[s[0]: s[1]] class DataCleaningToolSet: """ 数据清理步骤的集合 """ def __init__(self): # self._tools = [tool for tool in dir(self) if # not tool.startswith('__') and not tool.endswith('__') and tool[0].isupper()] # 显式排序,配合Dataset中tool_whether_do的值 self._tools = ['LabelCheck', 'DropExtraContentInTheEnd', 'DropHashtagAndAtReply', 'TraditionalChineseToSimplifiedChinese'] def __getitem__(self, item): return getattr(self, item) class LabelCheck(DataCleaningStep): ''' Label有各种噪音,暂时舍弃 ''' # TODO 等待讨论噪音标签的处理方法 def run(self, dataset: pd.DataFrame, n_cores=10): # 如果是test,则直接pass if len(dataset.columns) == 7: # 非比赛数据,即自己搜集的数据label被解析成int,因此加入int类型判断 # TODO 查找为什么会被解析成int print('---开始清洗Label noise---') with Pool(n_cores) as p: res = p.map(self._unqualified_label, Batch(n_cores, list(dataset.iterrows()))) res = reduce(lambda x, y: x + y, res) dataset.drop(res, inplace=True) dataset.set_index(['id'], inplace=True) dataset.sentiment = dataset.sentiment.astype(int) # 分类训练时,n_class >=0 & n_class <= max_classes # 因此把-1映射到0,0映射到1,1映射到2 dataset.sentiment = dataset.sentiment + 1 class DropExtraContentInTheEnd(DataCleaningStep): """ 去除微博内容最后的"?"和"展开全文c" """ def run(self, dataset: pd.DataFrame, n_cores=10): with Pool(n_cores) as p: exp = '\?展开全文c$|\?$|O网页链接' res = p.map(partial(self._regexp_sub, exp), Batch(n_cores, dataset.content.to_list())) res = reduce(lambda x, y: x + y, res) dataset.drop('content', inplace=True, axis=1) dataset.insert(2, 'content', res) class DropHashtagAndAtReply(DataCleaningStep): """ 去除"@账号名称"与"#hashtag"中的内容 """ def run(self, dataset: pd.DataFrame, n_cores=8): with Pool(n_cores) as p: exp = r'(#.*#)|(//@.*:)|(【.*】)' res = p.map(partial(self._regexp_sub, exp), Batch(n_cores, dataset.content.to_list())) res = reduce(lambda x, y: x + y, res) dataset.drop('content', inplace=True, axis=1) dataset.insert(2, 'content', res) class TraditionalChineseToSimplifiedChinese(DataCleaningStep): """ 繁体中文转为简体中文 """ def run(self, dataset: pd.DataFrame, n_cores: int = 8): with Pool(n_cores) as p: res = p.map(self._to_simplified, Batch(n_cores, dataset.content.to_list())) res = reduce(lambda x, y: x + y, res) dataset.drop('content', inplace=True, axis=1) dataset.insert(2, 'content', res) @property def tools(self): return self._tools class Dataset(pd.DataFrame): def __init__(self, path: str, type_: int, tool_whether_do=4): """ 数据文件类初始化函数,直接继承自DataFrame :param path: 数据文件路径 :param type_: 数据文件类型 :param tool_whether_do: 数据清洗执行到哪步 依次是'LabelCheck', 'DropExtraContentInTheEnd', 'DropHashtagAndAtReply', 'TraditionalChineseToSimplifiedChinese' """ # 以下为读取数据部分 pd.DataFrame.__init__(self) assert os.path.exists(path), '数据文件路径错误!' if type_ == DatasetType.SENTIMENTRELEVENTCORPUS: self._data = pd.read_csv(path, usecols=[1, 2, 3, 4, 5], )._data print('已读入标注数据集') self.index.name = 'ID' else: dateparser = lambda x: pd.datetime.strptime(x, '%m月%d日 %H:%M') if type_ == DatasetType.LABELED: df = pd.read_csv(path, usecols=[0, 1, 2, 3, 4, 5, 6], parse_dates=['微博发布时间'], date_parser=dateparser) # 配合动态设置batch_size使用,如果不启用功能,则注释该行 df.sort_values(by='微博发布时间', inplace=True) self._data = df._data print('已读入标注数据集') self.columns = ['id', 'datetime', 'poster', 'content', 'image', 'video', 'sentiment'] self.index.name = 'ID' else: # 若要区分train unlabel test,则可基于本else代码段重新改写 df = pd.read_csv(path, index_col=['微博id'], parse_dates=['微博发布时间'], date_parser=dateparser) # 配合动态设置batch_size使用,如果不启用功能,则注释该行 df.sort_values(by='微博发布时间', inplace=True) self._data = df._data print('已读入无标注数据集') self.columns = ['datetime', 'poster', 'content', 'image', 'video'] self.index.name = 'ID' self._cleaned_data = None self._cat_hashtags = None # 以下为注册要执行的数据清理工具部分 self.tool_set = DataCleaningToolSet() self.registered_tools = [] self.register_data_clean_tools(self.tool_set.tools, tool_whether_do) def register_data_clean_tools(self, tools: list, flag: int): cnt = 1 for tool in tools: assert tool in self.tool_set.tools, f"清洗工具{tool}不存在" if cnt > flag: break self.registered_tools.append(tool) cnt += 1 @property def cleaned_data(self): """ 获取清洗过的数据,原数据集中的内容不变。若数据没有清洗,则进行清洗;若数据清洗过,则直接返回清洗过的数据。 :return: """ if self._cleaned_data is not None: return self._cleaned_data else: self._cleaned_data = self.copy(deep=True) for tool in self.registered_tools: self.tool_set[tool](self._cleaned_data, tool) return self._cleaned_data def _find_hashtags(self, x): return [re.findall('#(.+?)#', str(content)) for content in x] def _list_reduce(self, l): res = [] for l1 in l: for l2 in l1: if len(l2) != 0: res.extend(l2) return res @property def stat_hashtags(self, n_core=8): """ 获取数据集的hashtag :return: """ p = Pool(n_core) if self._cat_hashtags is not None: return self._cat_hashtags else: print('正在统计hashtag...', sep='') p = Pool(processes=n_core) res = p.map(self._find_hashtags, Batch(n_core, self.content.to_list())) res = p.map(self._list_reduce, Batch(3, res)) res = reduce(lambda x, y: x + y, res) self._cat_hashtags = Counter(res) print('完毕') return self._cat_hashtags class LabeledDataset(Dataset): def __init__(self, flag: int = 2, path: str = utils.cfg.get('ORIGINAL_DATA', 'train_labeled_path')): Dataset.__init__(self, path, DatasetType.LABELED, flag) class UnlabeledDataset(Dataset): def __init__(self, flag: int = 1, path: str = utils.cfg.get('ORIGINAL_DATA', 'train_unlabeled_path')): Dataset.__init__(self, path, DatasetType.UNLABELED, flag) class TestDataset(Dataset): def __init__(self, flag: int = 2, path: str = utils.cfg.get('ORIGINAL_DATA', 'test_path')): Dataset.__init__(self, path, DatasetType.TEST, flag) def submit(self, path: str = utils.cfg.get('PROCESSED_DATA', 'submit_csv_path')): """ 生成排行榜提交文件 :param path: 排行榜文件的输出路径 :return: """ with open(path, 'w') as f: writer = csv.DictWriter(f, ['id', 'y']) writer.writeheader() for idx, row in self.iterrows(): item = {'id': str(idx), 'y': str(row['sentiment'] - 1)} # -1 是因为预测时标签为自然数,而提交结果却是-1,0,1 writer.writerow(item) def fill_result(self, res: list): """ 填充预测结果 :param res: 结果 :return: """ if 'sentiment' in self.columns: self.drop('sentiment', inplace=True) # 原始self.insert(-1,,,)报错unbounded slice # http://sofasofa.io/forum_main_post.php?postid=1003010 self.insert(self.shape[1], 'sentiment', res) def sentiment_relevent_corpus(): ''' 处理情感分析领域相关语料 :return: ''' # https://zhuanlan.zhihu.com/p/80029681 # region weibo_senti_100k数据集 weibo_senti_100k = 1 if weibo_senti_100k == 1: senti = pd.read_csv( '/home/zhw/PycharmProjects/nCovSentimentAnalysis/Data/SentimentRelevantCorpus/unzip/chineseNIP_weibo_senti_100k.csv', encoding='utf-8') columns_titles = ['review', 'label'] senti = senti.reindex(columns=columns_titles) # 比赛数据 label是str类型,而不是nt类型 senti['label'] = senti['label'].apply(lambda x: '-1' if x == 0 else '1') senti.columns = ['content', 'sentiment'] # 插空列,保持和比赛数据格式一致 senti = senti.reindex(columns=['datetime', 'poster', 'content', 'image', 'video', 'sentiment']) senti.to_csv('relevent_senti_100k.csv', index=False) # endregion # region simplifyweibo_4_moods senti = pd.read_csv( '/home/zhw/PycharmProjects/nCovSentimentAnalysis/Data/SentimentRelevantCorpus/unzip/simplifyweibo_4_moods.csv', encoding='utf-8') columns_titles = ['review', 'label'] senti = senti.reindex(columns=columns_titles) senti['label'] = senti['label'].apply(lambda x: '-1' if x != 0 else '1') senti.columns = ['content', 'sentiment'] senti = senti.reindex(columns=['datetime', 'poster', 'content', 'image', 'video', 'sentiment']) senti.to_csv('simplify_weibo_360k.csv', index=False) # endregion def sample_add_sentiment(): ''' 数据的输入是用模型打好了伪标签的900k csv(testdataset.submit函数生成)和原始900k csv 目的是按一定比例抽样,获取900k csv中的一部分映射了标签的数据,送入模型和100k结合再训练 参考https://stackoverflow.com/questions/37047420/how-to-drop-rows-of-pandas-dataframe-with-same-value-based-on-condition-in-diffe ''' # TODO 由于UnlabelDataset初始化得先读入900k等一系列操作,耗时大,暂时不放在UnlabelDataset,等待未来优化 # TODO 随机采样,有一定概率在train_unlabel_sample.insert报错Length of values does not match length of index # 暂时解决方案:重试。等待完善删除所有重复 assert os.path.exists(utils.cfg.get('PROCESSED_DATA', 'unlabel_pseudo_path')), 'unlabel_pseudo文件路径错误或不存在或命名错误!' sentiment_polar = pd.read_csv(utils.cfg.get('PROCESSED_DATA', 'unlabel_pseudo_path'), encoding='utf-8') train_unlabel = pd.read_csv(utils.cfg.get('ORIGINAL_DATA', 'train_unlabel_path'), encoding='utf-8') train_unlabel.columns = ['ID', 'datetime', 'poster', 'content', 'image', 'video'] train_unlabel.insert(loc=6, column='sentiment', value=sentiment_polar['y'].to_list()) train_unlabel_sample = train_unlabel.sample(frac=0.1) # frac是抽样比例 train_unlabel_sample.to_csv(utils.cfg.get('PROCESSED_DATA', 'unlabel_sample_path'), index=False) # 合并文件 li = [] label = pd.read_csv(utils.cfg.get('ORIGINAL_DATA', 'train_labeled_path'), encoding='utf-8', header=0) label.columns = ['ID', 'datetime', 'poster', 'content', 'image', 'video', 'sentiment'] li.append(label), li.append(train_unlabel_sample) mix_lable_unlabel = pd.concat(li, axis=0, ignore_index=True) # 适配Dataset label 判断里index_col='微博id' mix_lable_unlabel.columns = ['微博id', 'datetime', 'poster', 'content', 'image', 'video', 'sentiment'] mix_lable_unlabel.to_csv(utils.cfg.get('PROCESSED_DATA', 'mix_label_unlabel_path'), index=False) # if __name__ == '__main__': # # testset = LabeledDataset() # # print(testset.cleaned_data) # # sample_add_sentiment()
from picsellia.client import Client from picsellia_yolov5.utils import to_yolo, find_matching_annotations, edit_model_yaml, generate_yaml, Opt, setup_hyp from picsellia_yolov5.utils import send_run_to_picsellia from picsellia_yolov5.yolov5.train import train import argparse import sys import os import subprocess import yaml import torch from picsellia.pxl_exceptions import AuthenticationError os.chdir('picsellia') if 'api_token' not in os.environ: raise AuthenticationError("You must set an api_token to run this image") api_token = os.environ['api_token'] if "host" not in os.environ: host = "https://app.picsellia.com/sdk/v2/" else: host = os.environ["host"] if "experiment_id" in os.environ: experiment_id = os.environ['experiment_id'] experiment = Client.Experiment(api_token=api_token, host=host) exp = experiment.checkout(experiment_id, tree=True, with_file=True) else: if "experiment_name" in os.environ and "project_token" in os.environ: project_token = os.environ['project_token'] experiment_name = os.environ['experiment_name'] experiment = Client.Experiment(api_token=api_token, project_token=project_token, host=host) exp = experiment.checkout(experiment_name, tree=True, with_file=True) else: raise AuthenticationError("You must either set the experiment id or the project token + experiment_name") experiment.dl_annotations() experiment.dl_pictures() experiment.generate_labelmap() experiment.log('labelmap', experiment.label_map, 'labelmap', replace=True) YOLODIR = 'YOLO-{}'.format(experiment_name) train_set, test_set = to_yolo(pxl_annotations_dict=exp.dict_annotations,labelmap=exp.label_map, base_imgdir=exp.png_dir, targetdir=YOLODIR, prop=0.7, copy_image=False) train_split = { 'x': train_set["categories"], 'y': train_set["train_repartition"], 'image_list': train_set["image_list"], } experiment.log('train-split', train_split, 'bar', replace=True) test_split = { 'x': test_set["categories"], 'y': test_set["train_repartition"], 'image_list': test_set["image_list"], } experiment.log('test-split', test_split, 'bar', replace=True) generate_yaml(yamlname=experiment_name, targetdir=YOLODIR, labelmap=exp.label_map) cfg = edit_model_yaml(label_map=exp.label_map, experiment_name=experiment_name, config_path=exp.config_dir) hyp, opt, device = setup_hyp(experiment_name, cfg, exp.checkpoint_dir, exp.get_data('parameters'), exp.label_map) train(hyp, opt, opt.device, pxl=exp) send_run_to_picsellia(exp, YOLODIR)
#!/usr/bin/env python3 # Project Euler : Problem 3 # Largest prime factor from math import sqrt # Plus grand premier diviseur de n def PGPD(n): pMax = n div = 2 while n != 1: if n % div == 0: pMax = div while n % div == 0: n /= div div += 1 return pMax print(PGPD(600851475143)) # Solution : 6857 # Notes : # En utilisant la décomposition d'un nombre en facteurs premiers # n = p_1^a_1 * p_2^a_2 * ... * p_m^a_m # On remarque que le premier nombre qui divise n est son premier facteur # premier, ici p_1. # Ainsi, en divisant n, a_1 fois, on trouve : # n' = p_2^a_2 * ... * p_m^a_m # On réitère la methode jusqu'à ce que n possède la valeur 1 et alors, # le dernier facteur trouvé est le plus grand facteur premier divisant n. # Ici, on incrémente le diviseur testé par 1. On pourrait améliorer cela pour # avoir un maximumm de nombres premiers. Une amélioration simple serait # d'incrémenter de 2 à partir de div = 3
INITIAL_GUID = _GUID class _GUID(INITIAL_GUID): def __init__(self, Data1=None, Data2=None, Data3=None, Data4=None, name=None, strid=None): data_tuple = (Data1, Data2, Data3, Data4) self.name = name self.strid = strid if all(data is None for data in data_tuple): return super(_GUID, self).__init__() if any(data is None for data in data_tuple): raise ValueError("All or none of (Data1, Data2, Data3, Data4) should be None") super(_GUID, self).__init__(Data1, Data2, Data3, Data4) def __repr__(self): notpresent = object() # Handle IID created without '__init__' (like ctypes-ptr deref) if getattr(self, "strid", notpresent) is notpresent: self.strid = self.to_string() if self.strid is None: return super(_GUID, self).__repr__() if getattr(self, "name", notpresent) is notpresent: self.name = None if self.name is None: return '<IID "{0}">'.format(self.strid.upper()) return '<IID "{0}({1})">'.format(self.strid.upper(), self.name) def to_string(self): data4_format = "{0:02X}{1:02X}-" + "".join("{{{i}:02X}}".format(i=i + 2) for i in range(6)) data4_str = data4_format.format(*self.Data4) return "{0:08X}-{1:04X}-{2:04X}-".format(self.Data1, self.Data2, self.Data3) + data4_str def update_strid(self): new_strid = self.to_string() self.strid = new_strid @classmethod def from_string(cls, iid): part_iid = iid.split("-") datas = [int(x, 16) for x in part_iid[:3]] datas.append(int(part_iid[3][:2], 16)) datas.append(int(part_iid[3][2:], 16)) for i in range(6): datas.append(int(part_iid[4][i * 2:(i + 1) * 2], 16)) return cls.from_raw(*datas, strid=iid) @classmethod def from_raw(cls, Data1, Data2, Data3, Data41, Data42, Data43, Data44, Data45, Data46, Data47, Data48, **kwargs): return cls(Data1, Data2, Data3, (BYTE*8)(Data41, Data42, Data43, Data44, Data45, Data46, Data47, Data48), **kwargs) def __eq__(self, other): if not isinstance(other, (_GUID, INITIAL_GUID)): return NotImplemented return (self.Data1, self.Data2, self.Data3, self.Data4[:]) == (other.Data1, other.Data2, other.Data3, other.Data4[:])
expected_output = { "local_as": 100, "total_established_peers": 3, "total_peers": 4, "vrf": { "default": { "router_id": "10.1.1.1", "vrf_peers": 4, "vrf_established_peers": 3, "local_as": 100, "neighbor": { "10.51.1.101": { "last_write": "never", "state": "idle", "notifications_received": 0, "last_flap": "00:30:22", "notifications_sent": 2, "remote_port": 0, "local_port": 0, "last_read": "never", "remote_as": 300, "connections_dropped": 2 }, "2001:db8:4:1::1:1": { "last_write": "00:00:04", "state": "established", "notifications_received": 0, "last_flap": "12:11:57", "notifications_sent": 0, "remote_port": 179, "local_port": 30942, "last_read": "0.599405", "remote_as": 100, "connections_dropped": 1 }, "2001:db8:1900:1::1:101": { "last_write": "00:00:13", "state": "established", "notifications_received": 0, "last_flap": "02:29:16", "notifications_sent": 5, "remote_port": 32874, "local_port": 179, "last_read": "00:00:15", "remote_as": 300, "connections_dropped": 5 }, "192.168.4.1": { "last_write": "00:00:20", "state": "established", "notifications_received": 0, "last_flap": "12:12:01", "notifications_sent": 0, "remote_port": 37583, "local_port": 179, "last_read": "00:00:04", "remote_as": 100, "connections_dropped": 1 } } } } }
from fastapi import APIRouter, Body, BackgroundTasks import logging import aiohttp import asyncio from typing import Dict, Any from pydantic import BaseModel import uuid from src.api_composition_proxy.configurations import ServiceConfigurations from src.api_composition_proxy import helpers from src.jobs import store_data_job from src.helper import get_job_id logger = logging.getLogger(__name__) router = APIRouter() class Data(BaseModel): data: Any = None async def _get_redirect(session, url: str, alias: str) -> Dict[str, Any]: async with session.get(url) as response: response_json = await response.json() resp = {alias: {"response": response_json, "status_code": response.status}} logger.info(f"response: {resp}") return resp @router.get("/{redirect_path:path}") async def get_redirect(redirect_path: str) -> Dict[str, Any]: logger.info(f"GET redirect to: /{redirect_path}") async with aiohttp.ClientSession(timeout=aiohttp.ClientTimeout(total=2)) as session: tasks = [ asyncio.ensure_future( _get_redirect(session, helpers.customized_redirect_builder(k, v, redirect_path, ServiceConfigurations.customized_redirect_map), k) ) for k, v in ServiceConfigurations.urls.items() ] responses = await asyncio.gather(*tasks) logger.info(f"responses: {responses}") return responses async def _post_redirect(session, url: str, data: Dict[Any, Any], alias: str) -> Dict[str, Any]: async with session.post(url, json=data) as response: response_json = await response.json() resp = {alias: {"response": response_json, "status_code": response.status}} logger.info(f"response: {resp}") return resp @router.post("/{redirect_path:path}") async def post_redirect(redirect_path: str, data: Data, background_tasks: BackgroundTasks) -> Dict[str, Any]: data.data["job_id"] = get_job_id() logger.info(f'POST redirect to: /{redirect_path} as {data.data["job_id"]}') if ServiceConfigurations.enqueue: store_data_job._save_data_job(data.data, data.data["job_id"], background_tasks, True) async with aiohttp.ClientSession(timeout=aiohttp.ClientTimeout(total=2)) as session: tasks = [ asyncio.ensure_future( _post_redirect(session, helpers.customized_redirect_builder(k, v, redirect_path, ServiceConfigurations.customized_redirect_map), data.data, k) ) for k, v in ServiceConfigurations.urls.items() ] responses = await asyncio.gather(*tasks) logger.info(f"responses: {responses}") return responses
from django.core.management.base import BaseCommand from money.models import Post, Tarif from django.contrib.auth.models import User from logs.models import Logs from userprofile.models import UserProfile import requests import lxml.html from local_site.settings import TYPE_CHOICES, LOGSTATUSYES, LOGSTATUSNO # python manage.py moneyparse def money_check(check_user): """ Нужно чтоб были импортированы: UserProfile, User, Post, Logs requests, lxml.html """ pr_user = UserProfile.objects.get(user_id=check_user.id) try: r = requests.post( 'http://www.wimagic.com.ua/1.php', # 'http://www.wimagicss.com.ua/1.php', data = { 'login':check_user.username, 'pass':pr_user.pwd } ) html = lxml.html.fromstring(r.text) money_r = html.xpath("/html/body/table/tr[1]/td[4]/text()")[0] new = Post(money=money_r, user=check_user) new.save() log = Logs(log_user = check_user, log_type = 1, log_status = LOGSTATUSYES,) log.save() # return 1 except: log = Logs(log_user = check_user, log_type = 1, log_status = LOGSTATUSNO,) log.save() # return 0 class Command(BaseCommand): # Задаём текст помощи, который будет # отображён при выполнении команды # python manage.py createtags --help help = 'Auto money check for all users' def handle(self, *args, **options): money_pay_users = User.objects.filter(is_superuser=False) for i in money_pay_users: money_check(i) # self.stdout.write(i.username) #
"""Top-level package for django-clone.""" __author__ = """Tonye Jack""" __email__ = "[email protected]" __version__ = "2.5.3" from model_clone.admin import CloneModelAdmin, CloneModelAdminMixin from model_clone.mixins.clone import CloneMixin from model_clone.utils import create_copy_of_instance __all__ = [ "CloneMixin", "CloneModelAdmin", "CloneModelAdminMixin", "create_copy_of_instance", ]
import subprocess import os import platform import pytest def _run_test(testname): dirname = os.path.split(__file__)[0] exename = os.path.join(dirname, 'bin', 'Python.DomainReloadTests.exe') args = [exename, testname] if platform.system() != 'Windows': args = ['mono'] + args proc = subprocess.Popen(args) proc.wait() assert proc.returncode == 0 @pytest.mark.skipif(platform.system() == 'Darwin', reason='FIXME: macos can\'t find the python library') def test_rename_class(): _run_test('class_rename') @pytest.mark.skipif(platform.system() == 'Darwin', reason='FIXME: macos can\'t find the python library') def test_rename_class_member_static_function(): _run_test('static_member_rename') @pytest.mark.skipif(platform.system() == 'Darwin', reason='FIXME: macos can\'t find the python library') def test_rename_class_member_function(): _run_test('member_rename') @pytest.mark.skipif(platform.system() == 'Darwin', reason='FIXME: macos can\'t find the python library') def test_rename_class_member_field(): _run_test('field_rename') @pytest.mark.skipif(platform.system() == 'Darwin', reason='FIXME: macos can\'t find the python library') def test_rename_class_member_property(): _run_test('property_rename') @pytest.mark.skipif(platform.system() == 'Darwin', reason='FIXME: macos can\'t find the python library') def test_rename_namespace(): _run_test('namespace_rename') @pytest.mark.skipif(platform.system() == 'Darwin', reason='FIXME: macos can\'t find the python library') def test_field_visibility_change(): _run_test("field_visibility_change") @pytest.mark.skipif(platform.system() == 'Darwin', reason='FIXME: macos can\'t find the python library') def test_method_visibility_change(): _run_test("method_visibility_change") @pytest.mark.skipif(platform.system() == 'Darwin', reason='FIXME: macos can\'t find the python library') def test_property_visibility_change(): _run_test("property_visibility_change") @pytest.mark.skipif(platform.system() == 'Darwin', reason='FIXME: macos can\'t find the python library') def test_class_visibility_change(): _run_test("class_visibility_change") @pytest.mark.skip(reason='FIXME: Domain reload fails when Python points to a .NET object which points back to Python objects') @pytest.mark.skipif(platform.system() == 'Darwin', reason='FIXME: macos can\'t find the python library') def test_method_parameters_change(): _run_test("method_parameters_change") @pytest.mark.skipif(platform.system() == 'Darwin', reason='FIXME: macos can\'t find the python library') def test_method_return_type_change(): _run_test("method_return_type_change") @pytest.mark.skipif(platform.system() == 'Darwin', reason='FIXME: macos can\'t find the python library') def test_field_type_change(): _run_test("field_type_change") @pytest.mark.skipif(platform.system() == 'Darwin', reason='FIXME: macos can\'t find the python library') @pytest.mark.xfail(reason="Events not yet serializable") def test_rename_event(): _run_test('event_rename') @pytest.mark.skipif(platform.system() == 'Darwin', reason='FIXME: macos can\'t find the python library') @pytest.mark.xfail(reason="newly instanced object uses PyType_GenericAlloc") def test_construct_removed_class(): _run_test("construct_removed_class") @pytest.mark.skipif(platform.system() == 'Darwin', reason='FIXME: macos can\'t find the python library') def test_out_to_ref_param(): _run_test("out_to_ref_param") @pytest.mark.skipif(platform.system() == 'Darwin', reason='FIXME: macos can\'t find the python library') def test_ref_to_out_param(): _run_test("ref_to_out_param") @pytest.mark.skipif(platform.system() == 'Darwin', reason='FIXME: macos can\'t find the python library') def test_ref_to_in_param(): _run_test("ref_to_in_param") @pytest.mark.skipif(platform.system() == 'Darwin', reason='FIXME: macos can\'t find the python library') def test_in_to_ref_param(): _run_test("in_to_ref_param") @pytest.mark.skipif(platform.system() == 'Darwin', reason='FIXME: macos can\'t find the python library') def test_nested_type(): _run_test("nested_type")
# Generated by Django 2.0 on 2018-04-22 21:23 import django.db.models.deletion import django.utils.timezone from django.db import migrations, models import imagekit.models.fields class Migration(migrations.Migration): dependencies = [("events", "0027_add_category_topic_slug")] operations = [ migrations.CreateModel( name="Presentation", fields=[ ( "id", models.AutoField( auto_created=True, primary_key=True, serialize=False, verbose_name="ID", ), ), ( "status", models.SmallIntegerField( choices=[(-1, "Declined"), (0, "Proposed"), (1, "Accepted")], db_index=True, default=0, ), ), ( "start_time", models.DateTimeField( blank=True, db_index=True, null=True, verbose_name="Start Time" ), ), ( "created_time", models.DateTimeField( db_index=True, default=django.utils.timezone.now ), ), ( "created_by", models.ForeignKey( null=True, on_delete=django.db.models.deletion.SET_NULL, to="events.UserProfile", ), ), ( "event", models.ForeignKey( on_delete=django.db.models.deletion.CASCADE, related_name="presentations", to="events.Event", ), ), ], ), migrations.CreateModel( name="Speaker", fields=[ ( "id", models.AutoField( auto_created=True, primary_key=True, serialize=False, verbose_name="ID", ), ), ( "avatar", imagekit.models.fields.ProcessedImageField( blank=True, upload_to="avatars", verbose_name="Photo Image" ), ), ("title", models.CharField(blank=True, max_length=256, null=True)), ("bio", models.TextField(blank=True)), ], ), migrations.CreateModel( name="SpeakerRequest", fields=[ ( "id", models.AutoField( auto_created=True, primary_key=True, serialize=False, verbose_name="ID", ), ), ( "event", models.ForeignKey( on_delete=django.db.models.deletion.CASCADE, to="events.Event" ), ), ], ), migrations.CreateModel( name="Talk", fields=[ ( "id", models.AutoField( auto_created=True, primary_key=True, serialize=False, verbose_name="ID", ), ), ("title", models.CharField(max_length=256)), ("abstract", models.TextField()), ( "talk_type", models.SmallIntegerField( choices=[ (0, "Presentation"), (1, "Workshop"), (2, "Panel"), (3, "Roundtable"), (4, "Q & A"), (5, "Demonstration"), ], default=0, verbose_name="Type", ), ), ( "web_url", models.URLField(blank=True, null=True, verbose_name="Website"), ), ], ), migrations.AlterModelOptions( name="category", options={"verbose_name_plural": "Categories"} ), migrations.AlterModelOptions( name="country", options={"ordering": ("name",), "verbose_name_plural": "Countries"}, ), migrations.AlterModelOptions( name="eventseries", options={"verbose_name_plural": "Event series"} ), migrations.AlterField( model_name="category", name="slug", field=models.CharField(blank=True, max_length=256), ), migrations.AlterField( model_name="topic", name="description", field=models.TextField(blank=True) ), migrations.AlterField( model_name="topic", name="slug", field=models.CharField(blank=True, max_length=256), ), migrations.AddField( model_name="talk", name="category", field=models.ForeignKey( null=True, on_delete=django.db.models.deletion.SET_NULL, to="events.Category", ), ), migrations.AddField( model_name="talk", name="speaker", field=models.ForeignKey( on_delete=django.db.models.deletion.CASCADE, to="events.Speaker" ), ), migrations.AddField( model_name="talk", name="topics", field=models.ManyToManyField(blank=True, to="events.Topic"), ), migrations.AddField( model_name="speakerrequest", name="topics", field=models.ManyToManyField(blank=True, to="events.Topic"), ), migrations.AddField( model_name="speaker", name="categories", field=models.ManyToManyField(blank=True, to="events.Category"), ), migrations.AddField( model_name="speaker", name="topics", field=models.ManyToManyField(blank=True, to="events.Topic"), ), migrations.AddField( model_name="speaker", name="user", field=models.ForeignKey( on_delete=django.db.models.deletion.CASCADE, to="events.UserProfile" ), ), migrations.AddField( model_name="presentation", name="talk", field=models.ForeignKey( null=True, on_delete=django.db.models.deletion.SET_NULL, related_name="presentations", to="events.Talk", ), ), ]
import csv from PatientEntry import PatientEntry from Recipient import Recipient from EmailHandler import EmailHandler from FormattedBodyText import FormattedBodyText def get_data(file: str) -> list: """extract our data from our source file Args: file (str): filename Returns: list: list of PatientEntry objects """ data = [] with open(file) as f: csv_reader = csv.reader(f, delimiter=",") # get the index for each heading (allow for changing order) # NB: the next function moves the CSV reader on 1 row, so don't need to allow for heading skipping in loop heading_indices = get_heading_indices(next(csv_reader)) for row in csv_reader: patient_entry = PatientEntry(heading_indices, row) if patient_entry.consent: data.append(patient_entry) return data def get_email_addresses(file: str) -> dict: """get a dictionary mapping recipient names to email addresses Args: file (str): filename of our CSV file with email addresses Returns: dict: dictionary mapping recipient names to email addresses """ data = dict() with open(file) as f: csv_reader = csv.reader(f, delimiter=",") next(csv_reader) for row in csv_reader: data[row[0]] = row[1] return data def get_heading_indices(row: list) -> dict: """generates a dictionary mapping desired headings to row indices to allow for changing order of columns in source data Args: row (list): row of data from CSV file Returns: dict: dictionary of heading matched with row index """ headings = [ "Date Dispensed", "Patient Name", "Street", "Town or City", "Birth Date", "PPSN No", "Gender", "Qty", "Script Dispensed As", "Directions Expanded", "Contract GP Name", "Contract GP Address", ] heading_indices = dict() for heading in headings: heading_indices[heading] = row.index(heading) return heading_indices def create_recipient_dict(data: list, email_addresses: dict) -> dict: """create our dictionary of recipients based on the data we have. use existing email address data to populate each recipient if available Args: data (list): list of PMR entries Returns: dict: dictionary of recipients and their matching PMR entries """ recipient_dict = dict() for entry in data: if entry.consent: recipient = entry.gp entry_summary = entry.entry_summary() if recipient in recipient_dict: # if our recipient is in the dictionary, add the entry to their section recipient_dict[recipient].add_patient_entry(entry_summary) else: # if our recipient is not in the dictionary, add them to the dictionary # check if we have an email address saved for the recipient already email = "" if recipient in email_addresses: email = email_addresses[recipient] recipient_dict[recipient] = Recipient( entry.gp, entry.gp_address, email, entry_summary, ) return recipient_dict def compose_email_details( recipient_dict: dict, ) -> list: # TODO: remove debug print output """for each recipient in list, create email details Args: recipient_dict (dict): dictionary of recipients and their matching PMR entries Returns: list: list of lists of email details """ email_list = [] for key in recipient_dict: recipient = recipient_dict[key] name = recipient.name surname = recipient.surname patients = recipient.patients email = recipient.email print("Composing email for {} ({} patients)".format(name, len(patients))) body_object = FormattedBodyText("email_template.html") body = body_object.format_body(surname, patients) subject = "Vaccine Report - {}".format(name) email_list.append([email, subject, body]) return email_list def email_list_iterate(account_str: str, email_list: list): """trigger email creation for each item in our list Args: account_str (str): term to search Outlook account for when sending email email_list (list): list of individual email details """ for mail_details in email_list: email_object = EmailHandler(account_str) email_object.create_email(mail_details) def main(): # find the file with data data = get_data("Flu Vacc Report.csv") # get existing email contacts email_addresses = get_email_addresses("Healthmail Addresses.csv") # generate list of recipients recipient_dict = create_recipient_dict(data, email_addresses) # create email details for all recipients email_list = compose_email_details(recipient_dict) # send all emails from the specified account email_list_iterate("healthmail", email_list) if __name__ == "__main__": main()
import os import time from pathlib import Path from urllib.parse import urlparse import attr import falcon.testing import psycopg2 import pytest from psycopg2.extras import execute_values from brightsky.db import get_connection, migrate @pytest.fixture(scope='session') def data_dir(): return Path(os.path.dirname(__file__)) / 'data' @pytest.fixture(scope='session') def _database(): if not os.getenv('BRIGHTSKY_DATABASE_URL'): pytest.skip('See README for running database-based tests.') url = urlparse(os.getenv('BRIGHTSKY_DATABASE_URL')) postgres_url = f'postgres://{url.netloc}' db_name = url.path.lstrip('/') assert db_name with psycopg2.connect(postgres_url) as conn: conn.autocommit = True with conn.cursor() as cur: cur.execute(f'DROP DATABASE IF EXISTS {db_name}') cur.execute(f'CREATE DATABASE {db_name}') migrate() yield if hasattr(get_connection, '_pool'): get_connection._pool.closeall() with psycopg2.connect(postgres_url) as conn: conn.autocommit = True with conn.cursor() as cur: cur.execute(f'DROP DATABASE {db_name}') @attr.s class TestConnection: """Wrapper for database connection with some rough convenience functions""" conn = attr.ib() def insert(self, table, rows): with self.cursor() as cur: fields = tuple(rows[0]) field_placeholders = [f'%({field})s' for field in fields] execute_values( cur, f"INSERT INTO {table} ({', '.join(fields)}) VALUES %s", rows, template=f"({', '.join(field_placeholders)})") self.conn.commit() def fetch(self, sql): with self.conn.cursor() as cur: cur.execute(sql) rows = cur.fetchall() self.conn.commit() return rows def table(self, name): return self.fetch(f'SELECT * FROM {name}') def __getattr__(self, name): return getattr(self.conn, name) @pytest.fixture def db(_database): with get_connection() as conn: yield TestConnection(conn) with conn.cursor() as cur: cur.execute(""" DELETE FROM parsed_files; DELETE FROM synop; DELETE FROM weather; DELETE FROM sources; REFRESH MATERIALIZED VIEW current_weather; """) @pytest.fixture(scope='session') def api(): from brightsky.web import app return falcon.testing.TestClient(app) def pytest_configure(config): # Dirty mock so we don't download the station list on every test run from brightsky.utils import _converter # Must contain all stations that we use in test data _converter.dwd_to_wmo = { 'XXX': '01028', 'YYY': '01049', '01766': '10315', '04911': '10788', '05484': 'M031', } _converter.wmo_to_dwd = dict( reversed(x) for x in _converter.dwd_to_wmo.items()) _converter.last_update = time.time()
from django.core.management.base import NoArgsCommand, CommandError; from ldap_login.ldapUtils import ldapManager; from ldap_login.models import user,group,Role; from datetime import datetime; import traceback; class Command(NoArgsCommand): """Import LDAP users from Active Directory. Uses the ldapUtils backend. Creates the users in our databases else uses existings users. Updates group bindings and full names for existing and new users also. """ args = None; help = "imports LDAP users from Active Directory into database" can_import_settings = True exclusion_list = ['exam','Domain Controllers']; #list of OUs we do not want to handle at all. def handle_noargs(self, **options): #**options is a dictionary of keyword parameters beyond those defined try: l = ldapManager(); groups = l.getGroups() for g in groups: if g in self.exclusion_list : continue; print "-" * 60 print '\nProcessing group %s' % g; #does this group exist in our database ? try: groupObj = group.objects.get(name=g); print "Using existing group %s" % g except group.DoesNotExist: groupObj = group(name=g,created_on=datetime.now()); groupObj.save(); print "Created group %s" % g; finally: users = l.getUsers(ou=g,attrs=['sAMAccountName','displayName']); for u in users: print "-" * 20 username = u['sAMAccountName'][0]; #because we get a dictionary of lists from ldap! print '\nSearching for existing user with username : %s ' % username; try: userObj = user.objects.get(pk=username) print "Using existing user %s " % userObj except user.DoesNotExist: userObj = user(pk=username); userObj.created_on = datetime.now(); print "Created user %s " % userObj; except Exception as e: print 'An unknown exception occured! '; print e; print traceback.print_exc(); finally: #so that we update these properties for all user if 'displayName' in u: userObj.fullname = u['displayName'][0] #because it's a dictionary of lists! else: userObj.fullname = userObj.pk #Don't forget to assign role! if username.startswith('0') or username.startswith('1'): userObj.role = Role.objects.get_or_create(name='student')[0] else: userObj.role = Role.objects.get_or_create(name='faculty')[0] userObj.save(); #the following must be done after saving #refer: http://stackoverflow.com/questions/7837033/valueerror-cannot-add-instance-is-on-database-default-value-is-on-databas userObj.groups.add(groupObj); #add this user to the group; except KeyError as e: print 'KeyError happened in the structure :' print e.message print 'Structure:', u print except Exception as e: print 'Some unexpected exception occured!'; print e; print traceback.print_exc()
def is_palindrome(n): s = str(n) tens = len(s) - 1 i = 0 while i <= tens / 2: if not s[i] == s[tens - i]: return False i += 1 return True total = 0 for n in range(1000000): if is_palindrome(n): base_2 = int(bin(n)[2:]) if is_palindrome(base_2): total += n print total
from bs4 import BeautifulSoup import requests import yaml import os # Channel RSS feed to scrape for video data url = "https://www.youtube.com/feeds/videos.xml?channel_id=UC964lfWIMojN48cA6FK2Fhg" html = requests.get(url) soup = BeautifulSoup(html.text, "lxml") vidlist = [] for entry in soup.find_all("entry"): video = {} # We want just the title and the video ID for title in entry.find_all("title"): print(title.text) video["title"] = title.text for id in entry.find_all("yt:videoid"): print(id.text) video["ytid"] = id.text vidlist.append(video) # Saved to vidlist yaml file that is read by the website generator vidlist_path = os.path.join(os.path.dirname(__file__), "_data", "vidlist.yml") yaml.dump(vidlist, open(vidlist_path, "w"))
arr = [] b = False with open("input","r") as f: for i in f.readlines(): arr = arr + [int(i.rstrip("\n"))] length = len(arr) for i in range(0,length): for j in range(0,length): for k in range(0,length): if (arr[i]+arr[j]+arr[k] == 2020): print("Result = ", arr[i]*arr[j]*arr[k]) b = True if (b): break
# Copyright (c) 2018 Marco Giusti import os.path from cffi import FFI curdir = os.path.abspath(os.path.dirname(__file__)) c_source = r''' #include "ace.h" ''' ffibuilder = FFI() ffibuilder.set_source( '_ace', c_source, libraries=['ace'], include_dirs=[curdir] ) ffibuilder.cdef(open(os.path.join('src', 'ace.h')).read()) if __name__ == '__main__': ffibuilder.compile(verbose=True)
""" Restore ckpt from meta file and JUST display the shape/name of all the layers """ import tensorflow as tf import numpy as np config = tf.ConfigProto( log_device_placement=True, allow_soft_placement=True ) with tf.Session(config=config) as sess: new_saver = tf.train.import_meta_graph( '../save_model/seperate/onet/onet-850000.meta') # load graph for var in tf.trainable_variables(): #get the param names print(var.name) #print parameters' names new_saver.restore(sess, tf.train.latest_checkpoint( '../save_model/seperate/onet/')) # find the newest training result all_vars = tf.trainable_variables() for v in all_vars: v_4d = np.array(sess.run(v)) #get the real parameters print(v_4d.shape) print("finished!")
#!/usr/bin/env python # -*- coding: utf-8 -*- """base.py: Contains toml parser and basic data class.""" from mruns import __version__ from pathlib import Path from dataclasses import dataclass from dataclasses_json import dataclass_json from typing import Optional, Callable, List, Dict, Tuple, Any, ClassVar, Iterator from mreports import NB from mbf_genomes import EnsemblGenome from mbf_externals import ExternalAlgorithm from pandas import DataFrame from .util import ( filter_function, read_toml, df_to_markdown_table, fill_incoming, assert_uniqueness, ) from pprint import PrettyPrinter from mbf_genomics.genes.anno_tag_counts import _NormalizationAnno, _FastTagCounter import dataclasses import pandas as pd import pypipegraph as ppg import tomlkit import logging import sys import inspect import json import mbf_genomes import mbf_externals import mbf_align import mbf_genomics import mbf_comparisons __author__ = "Marco Mernberger" __copyright__ = "Copyright (c) 2020 Marco Mernberger" __license__ = "mit" _logger = logging.getLogger(__name__) _required_fields = { "project": ["name", "analysis_type", "run_ids"], "samples": ["df_samples", "reverse_reads", "kit", "stranded"], "alignment": ["species", "revision", "aligner"], "genes": [], "comparison": [], "reports": [], } @dataclass class Analysis: run_toml: Path project: Dict[str, Any] samples: Dict[str, Any] alignment: Dict[str, Any] genes: Dict[str, Any] comparison: Dict[str, Any] downstream: Dict[str, Any] reports: Dict[str, Any] combination: Dict[str, Any] __allowed_types: ClassVar[List[str]] = ["RNAseq"] __known_kits: ClassVar[List[str]] = ["QuantSeq", "NextSeq"] __accepted_species: ClassVar[List[str]] = ["Homo_sapiens", "Mus_musculus"] @property def incoming(self): """ The incoming folder with all external data. """ if "incoming" in self.project: return Path(self.project["incoming"]) return Path("incoming") @property def analysis_type(self): """ The analysis type to perform. """ return self.project["analysis_type"] @property def name(self): """ The project name. """ return self.project["name"] @property def run_ids(self): """ A list of all run_ids from the sequencer. """ return self.project["run_ids"] @property def path_to_samples_df(self): return self.incoming / self.samples["df_samples"] @property def path_to_combination_df(self): if "set_operations" in self.combination: return self.incoming / self.combination["set_operations"] else: return None @classmethod def get_comparison_methods(cls): module = sys.modules["mbf_comparisons.methods"] ret = {} for name, obj in inspect.getmembers(module): if inspect.isclass(obj): ret[name] = obj return ret def __post_init__(self): """ Cleans up after initialization. """ self.get_fastqs() self._verify() if ppg.inside_ppg(): self.set_genome() self.verify_samples() self.comparisons_to_do = self.parse_comparisons() def set_genome(self): self._genome = mbf_genomes.EnsemblGenome( self.alignment["species"], self.alignment["revision"] ) def parse_single_comparisons( self, comparison_group: str, method_name: str, comparison_type: str ): comparisons_to_do = {} seen = set() path = self.comparison[comparison_group][method_name]["path"] df_in = pd.read_csv(path, sep="\t") method, options = self.comparison_method(comparison_group, method_name) for _, row in df_in.iterrows(): comparison_name = f"{row['comparison_name']}({method_name})" if comparison_name in seen: raise ValueError( "Duplicate comparison name {comparison_name} in {path}." ) comparisons_to_do[comparison_name] = { "type": comparison_type, "cond1": row["a"], "cond2": row["b"], "method": method, "method_name": method_name, "options": options, } return comparisons_to_do def parse_multi_comparisons( self, comparison_group: str, method_name: str, comparison_type: str ): df_factor_path = self.comparison[comparison_group][method_name]["path"] multi_comparisons_to_do = {} df_factor = pd.read_csv(df_factor_path, sep="\t") method, options = self.comparison_method(comparison_group, method_name) for multi_group, df_factor_group in df_factor.groupby("comparison_group"): for multi_comp_name, df_comp in df_factor_group.groupby("comparison"): main_factor = df_comp["main"].values[0] comparison_name = f"{multi_comp_name}({method_name})" assert len(df_comp["main"].unique()) == 1 interactions = None if "interaction" in df_comp.columns: interactions = df_comp["interaction"].values[0] factors = [x for x in df_comp.columns.values if ":" in x] rename = {} factor_reference = {} other_factor = None for factor_ref in factors: factor, ref = factor_ref.split(":") factor_reference[factor] = ref rename[factor_ref] = factor if factor != main_factor: other_factor = factor if other_factor is None: raise ValueError("Only one factor in {df_factor_path}.") df_factors = df_comp.rename(columns=rename) ##### # for main_level in df_comp[main_factor].unique(): # if main_level == factor_reference[main_factor]: # continue # print("main", main_level) # for genotype in df_comp[other_factor].unique(): # if genotype == factor_reference[other_factor]: # continue # print("other", genotype) # column_prefix_effect = f"{main_level}:{factor_reference[main_factor]}({main_factor}) effect for {genotype}:{factor_reference[other_factor]}({other_factor})" # column_prefixes = [column_prefix_effect] # if len(interaction) > 0: # column_prefix_diff = f"{main_level}:{factor_reference[main_factor]}({main_factor}) effect difference for {genotype}:{factor_reference[other_factor]}({other_factor})" # column_prefixes.append(column_prefix_diff) ##### multi_comparisons_to_do[comparison_name] = { "type": comparison_type, "main_factor": main_factor, "factor_reference": factor_reference, "df_factor": df_factors, "interaction": interactions, "method": method, "options": options, "method_name": method_name, "multi_comp_name": multi_comp_name } return multi_comparisons_to_do def parse_comparisons(self): comparisons_to_do = {} for group in self.comparison: comparisons_to_do[group] = {} for method_name in self.comparison[group]: comp_type = self.comparison[group][method_name]["type"] if comp_type == "ab": comparisons_to_do[group].update( self.parse_single_comparisons(group, method_name, comp_type) ) elif comp_type == "multi": comparisons_to_do[group].update( self.parse_multi_comparisons(group, method_name, comp_type) ) else: raise ValueError(f"Don't know what to do with type {comp_type}.") return comparisons_to_do def _verify(self): """ Verifies the information given in the ru.toml file. This checks the presence of certain required fields and performs some checks on the values provided within the sections. This is called directly after instantiation if Analysis. Raises ------ ValueError If required fields in the toml file sections are missing. ValueError If the toml file does not contain a required section. NotImplementedError If an analysis type is specified, that is currently not supported. FileNotFoundError If a run folder is not present in incoming. FileNotFoundError If no samples table exists in incoming. ValueError If the specified kit is not supported. ValueError If the species specified is not supported. """ # assert all required fields are present for key in _required_fields: if hasattr(self, key): attr_dict = getattr(self, key) for field in _required_fields[key]: if field not in attr_dict: raise ValueError( f"Required field '{field}' in section {key} is missing." ) else: raise ValueError( f"Required section '{key}' missing from {str(self.run_toml)}." ) # assert the type if self.analysis_type not in self.__allowed_types: raise NotImplementedError(f"{self.analysis_type} is not yet implemented.") # check run ids for run_id in self.run_ids: if not (self.incoming / run_id).exists(): print(self.incoming.absolute()) raise FileNotFoundError( f"Folder {run_id} not present in '{str(self.incoming)}'." ) # TODO automatically pulliong the data from rose/incoming ... # check samples if not Path(self.path_to_samples_df).exists(): raise FileNotFoundError( f"No samples.tsv in {self.incoming}. Please create it." ) kit = self.samples["kit"] if kit not in self.__known_kits: raise ValueError( f"Provided kit {kit} is not known, currently supported are {str(self.__known_kits)}." ) # check alignment species = self.alignment["species"] if species not in self.__accepted_species: # for some donwstream analysis we can only handle mouse and human automatically raise ValueError( f"Provided species {species} not in {str(self.__accepted_species)}." ) @property def genome(self) -> EnsemblGenome: """ Returns an instance of the specified ensembl genome. Species and revision are obtained from the run.toml and the appropriate genome is instantiated here. Returns ------- EnsemblGenome The genome to use. """ return self._genome def aligner(self) -> ExternalAlgorithm: """ Returns an instance of the specified aligner and parameters for the run. This looks up the aligner classes in mbf_externals.aligners and returns an instance of the specified aligner, if such a class exists. Returns ------- ExternalAlgorithm The aligner to be used. dict the aligner parameters. Raises ------ ValueError If the aligner name does not match to a Class in the module. """ module = sys.modules["mbf_externals.aligners"] aligner_name = self.alignment["aligner"] if not hasattr(module, aligner_name): raise ValueError( f"No aligner named {aligner_name} found in mbf_externals.aligners.py." ) aligner_ = getattr(module, aligner_name) aligner = aligner_() params = {} if "parameters" in self.alignment: params = self.alignment["parameter"] return aligner, params def sample_df(self) -> DataFrame: """ Reads and returns the DataFrame containing the samples to be analyzed. Returns ------- DataFrame DataFrame with samples to be analyzed. """ df_samples = pd.read_csv(self.path_to_samples_df, sep="\t") return df_samples def combination_df(self): if self.path_to_combination_df is not None: return pd.read_csv(self.path_to_combination_df, sep="\t") return None def verify_samples(self): """ Checks the samples and groups tables that are supposed to be in incoming. Checks for the correct columns to be present and the file existence. Raises ------ ValueError If the samples table does not contain all required columns. ValueError If vids are assigned twice. ValueError If no groups are specified for the samples. FileNotFoundError If the group table file specified by the group column in samples table does not exist. ValueError If the group table is missing required columns. """ df_samples = self.sample_df() columns = ["number", "sample", "prefix", "comment", "vid"] not_present = set(columns).difference(set(df_samples.columns.values)) if len(not_present) > 0: raise ValueError( f"The samples table {self.path_to_samples_df} does not contain the following required columns {not_present}." ) group_columns = [] vids = df_samples["vid"].dropna() duplicate_vids = vids[vids.duplicated()].values if len(duplicate_vids) > 0: raise ValueError( f"The following vids where assigned twice: {list(duplicate_vids)}." ) for c in df_samples.columns: if c.startswith("group"): group_columns.append(c) if len(group_columns) == 0: raise ValueError( f"No column starting with 'group' in {self.path_to_samples_df}. This is needed to define groups for comparisons." ) for col in group_columns: fpath = self.incoming / f"{col}.tsv" if not fpath.exists(): raise FileNotFoundError( "Group column {col} specified, but no file {str(fpath)} found." ) df_groups = pd.read_csv(fpath, sep="\t") columns = ["a", "b", "comparison_name", "comment"] not_present = set(columns).difference(set(df_groups.columns.values)) if len(not_present) > 0: raise ValueError( f"The groups table {str(fpath)} does not contain the following required columns {not_present}." ) def fastq_processor(self) -> Any: """ Returns an appropriate fastq processor. This is based on the kit provided. Returns ------- Any mbf_align.fastq2. class instance. """ kit = self.samples["kit"] if kit == "QuantSeq": fastq_processor = mbf_align.fastq2.UMIExtractAndTrim( umi_length=6, cut_5_prime=4, cut_3_prime=0 ) return fastq_processor elif kit == "NextSeq": return mbf_align.fastq2.Straight() else: raise NotImplementedError # TODO: read processor from toml for more fine-grained control def raw_counter(self) -> _FastTagCounter: kit = self.samples["kit"] stranded = self.samples["stranded"] if kit == "QuantSeq": if stranded: return mbf_genomics.genes.anno_tag_counts.ExonSmartStrandedRust else: raise NotImplementedError elif kit == "NextSeq": if stranded: return mbf_genomics.genes.anno_tag_counts.ExonSmartStrandedRust else: raise NotImplementedError else: raise NotImplementedError # TODO: to toml for more fine-grained control def norm_counter(self) -> _NormalizationAnno: kit = self.samples["kit"] stranded = self.samples["stranded"] if stranded: if kit == "QuantSeq": return mbf_genomics.genes.anno_tag_counts.NormalizationCPM elif kit == "NextSeq": return mbf_genomics.genes.anno_tag_counts.NormalizationTPM else: raise NotImplementedError # TODO: to toml for more fine-grained control else: raise NotImplementedError def report(self) -> NB: """ Generates a NB to collect plots with a name given in the run.toml. Returns ------- NB The NB instance to use. """ dependencies = [ ppg.FunctionInvariant("FI_ana", self.summary_markdown), ppg.FunctionInvariant("FI_ana", self.summary_markdown), ] if "name" in self.reports: nb = NB(self.reports["name"], dependencies=dependencies) else: nb = NB("run_report", dependencies=dependencies) return nb def has_gene_filter_specified(self) -> bool: """ Wether the genes should be filtered prior to DEG analysis. If a genes.filter section is given in the run.toml, this is true. Filter conditions specified in this section are used to create a filter function to use. Returns ------- bool True, if gene.filter is supplied in run.toml. """ return "filter" in self.genes def genes_filter(self) -> Callable: """ Returns a filter function getter that takes a list of expression columns to filter by. The filter function is generated based on the run.toml genes.filter subsection. Returns ------- Callable Callable that returns a DataFrame filter function and takes a list of columns to filter by. Raises ------ ValueError If no filter criteria for genes have been specified but this is called. """ if not self.has_gene_filter_specified(): raise ValueError("No filters have been specified in run.toml.") filter_spec = self.genes["filter"] threshold = filter_spec.get("cpm_threshold", None) canonical = filter_spec.get("canonical", True) canonical_chromosomes = None if canonical: canonical_chromosomes = self._genome.get_true_chromosomes() biotypes = filter_spec.get("biotypes", None) at_least = filter_spec.get("at_least", 1) return filter_function(threshold, at_least, canonical_chromosomes, biotypes) def comparison_method(self, group_name: str, method: str) -> Any: """ Returns an instance of a comparison method. This is intended as a parameter for mbf_comparisons.Comparison.__init__. This looks up the method classes in mbf_comparions.methods and returns an instance of the specified method, if such a class exists. Optional paramaters can be specified in run.toml with 'parameter' key. Parameters ---------- method : str Name of the method to be used. Returns ------- Any A class from mbf_comparisons.methods. Raises ------ ValueError If the method is not found in the module. """ module = sys.modules["mbf_comparisons.methods"] if not hasattr(module, method): raise ValueError( f"No method named {method} found in mbf_comparisons.methods.py." ) method_ = getattr(module, method) options = { "laplace_offset": self.comparison[group_name][method].get( "laplace_offset", 0 ), "include_other_samples_for_variance": self.comparison[group_name][ method ].get("include_other_samples_for_variance", True), } if "parameters" in self.comparison[group_name][method]: parameters = self.comparison[group_name][method]["parameters"] return method_(**parameters), options else: return method_(), options def deg_filter_expressions(self, condition_group: str, method: str) -> List[Any]: """ Returns the filter expression used to filter the DE genes after runnning the comparison. This defaults to selecting the logFC >= 1 and FDR <= 0.05. Parameters ---------- method : str The method for which the expression is used. Returns ------- List[Any] List of filter expressions. """ default = [[["FDR", "<=", 0.05], ["log2FC", "|>", 1]]] if "filter_expressions" in self.comparison[condition_group][method]: expr = self.comparison[condition_group][method]["filter_expressions"] return expr else: return default @classmethod def deg_filter_expression_as_str(self, filter_expr: List[List[Any]]) -> str: """ Turns the filter expression into a str used as suffix for filtered genes names. Parameters ---------- filter_expr : List[List[Any]] The expression to be stringified. Returns ------- str string representation of filter_expr. """ x = "_".join(["".join([str(x) for x in exp]) for exp in filter_expr]) return x def pretty(self) -> str: """ Returns a pretty string for the class instance. Returns ------- str String representation with indent. """ pp = PrettyPrinter(indent=4) d = vars(self).copy() d["genome"] = d["_genome"] del d["_genome"] return "Analysis(\n" + pp.pformat(d) + "\n)" # def summary(self) -> str: # """ # Generates a run report summary and returns it as string. # # This is intended for double checking the analysis settings and # should countain all the information inferred from the run.toml. # # Returns # ------- # str # Summary of run settings. # """ # pp = PrettyPrinter(indent=4) # report_header = f"Analysis from toml file '{self.run_toml}'\n\n" # report_header += "Specification\n-------------\n" + self.pretty() + "\n\n" # report_header += f"Genome used: {self.genome.name}\n" # aligner, aligner_params = self.aligner() # report_header += ( # f"Aligner used: {aligner.name} with parameter {aligner_params}\n" # ) # report_header += f"Run-IDs: {pp.pformat(self.run_ids)}\n" # report_header += ( # f"Fastq-Processor: {self.fastq_processor().__class__.__name__}\n" # ) # raw_counter = self.raw_counter() # norm_counter = self.norm_counter() # report_header += f"Raw counter: {raw_counter.__name__}\n" # report_header += f"Norm counter: {norm_counter.__name__}\n" # report_header += "\nSamples\n-------\n" # df_samples = self.sample_df() # report_header += pp.pformat(df_samples) # conditions = [x for x in df_samples.columns if x.startswith("group")] # report_header += "\n\nComparisons requested\n---------------------\n" # comparisons_to_do: Dict[str, List] = {} # for condition in conditions: # comparisons_to_do[condition] = [] # df_in = pd.read_csv(f"incoming/{condition}.tsv", sep="\t") # for _, row in df_in.iterrows(): # comparisons_to_do[condition].append( # (row["a"], row["b"], row["comparison_name"]) # ) # report_header += f"Comparison group: '{condition}'\n" # report_header += pp.pformat(df_in) + "\n" # report_header += f"\nGenes\n-----\n" # genes_used_name = f"Genes_{self.genome.name}" # if self.has_gene_filter_specified(): # report_header += "Genes filtered prior to DE analysis by: \n" # if ( # "canonical" in self.genes["filter"] # and self.genes["filter"]["canonical"] # ): # report_header += "- canonical chromosomes only\n" # genes_used_name += "_canonical" # if "biotypes" in self.genes["filter"]: # at_least = self.genes["filter"].get("at_least", 1) # report_header += ( # f"- biotype in {pp.pformat(self.genes['filter']['biotypes'])}\n" # ) # genes_used_name += "_biotypes" # if "cpm_threshold" in self.genes["filter"]: # threshold = self.genes["filter"]["cpm_threshold"] # report_header += f"- at least {at_least} samples with normalized expression >= {threshold}\n" # genes_used_name += f"_{at_least}samples>={threshold}" # report_header += f"Genes used: {genes_used_name}\n" # report_header += f"\nComparisons\n-----------\n" # for condition_group in self.comparison: # report_header += f"From '{condition_group}':\n" # comp_type = self.comparison[condition_group]["type"] # if comp_type == "ab": # for comparison_name in self.comparisons_to_do[condition_group]: # params = self.comparisons_to_do[condition_group][comparison_name] # if params["options"]["include_other_samples_for_variance"]: # x = "fit on all samples" # else: # x = "fit on conditions" # desc = f"- compare {params['cond1']} vs {params['cond2']} using {params['method_name']} (offset={params['options']['laplace_offset']}, {x}) \n" # report_header += desc # else: # for comparison_name in self.comparisons_to_do[condition_group]: # params = self.comparisons_to_do[condition_group][comparison_name] # factors = ",".join( # [f"{x}({y})" for x, y in params["factor_reference"].items()] # ) # desc = f"- compare {comparison_name} with {factors} using {params['method_name']} (offset={params['options']['laplace_offset']}, {x}) \n" # report_header += desc # report_header += f"\nDownstream Analysis\n-------------------\n" # for downstream in self.downstream: # if downstream == "pathway_analysis": # for pathway_method in self.downstream[downstream]: # if pathway_method == "ora": # collections = ["h"] # if "collections" in self.downstream[downstream][pathway_method]: # collections = self.downstream[downstream][pathway_method][ # "collections" # ] # report_header += f"Over-Representation Analysis (ORA)\n" # report_header += f"Collections used: {collections}\n" # if pathway_method == "gsea": # collections = ["h"] # if "collections" in self.downstream[downstream][pathway_method]: # collections = self.downstream[downstream][pathway_method][ # "collections" # ] # parameter = {"permutations": 1000} # if "parameter" in self.downstream[downstream][pathway_method]: # parameter = self.downstream[downstream][pathway_method][ # "parameter" # ] # report_header += "Gene Set Enrichment Analysis (GSEA)\n" # report_header += f"Collections: {collections}\n" # report_header += f"Parameters: {parameter}\n" # combination_df = self.combination_df() # if combination_df is not None: # report_header += ( # f"\nSet operations on comparisons\n-----------------------------\n" # ) # report_header += pp.pformat(combination_df) # return report_header def summary_markdown(self) -> str: """ Generates a run report summary and returns it as markdown string. This is intended for double checking the analysis settings and should countain all the information inferred from the run.toml. Returns ------- str Summary of run settings. """ def __comp_header(comp_type): if comp_type == "ab": return "Comparisons pairwise: \n" elif comp_type == "multi": return "Comparisons multi-factor: \n" else: raise ValueError( f"Don't know what to do with comparison type {comp_type}." ) pp = PrettyPrinter(indent=4) report_header = f"## Analysis from toml file '{self.run_toml}'\n" report_header += f"Genome used: {self.genome.name} \n" aligner, aligner_params = self.aligner() report_header += ( f"Aligner used: {aligner.name} with parameter {aligner_params} \n" ) report_header += f"Run-IDs: {pp.pformat(self.run_ids)} \n" report_header += ( f"Fastq-Processor: {self.fastq_processor().__class__.__name__} \n" ) raw_counter = self.raw_counter() norm_counter = self.norm_counter() report_header += f"Raw counter: {raw_counter.__name__} \n" report_header += f"Norm counter: {norm_counter.__name__} \n" report_header += "\n### Samples \n \n" df_samples = self.sample_df() report_header += df_to_markdown_table(df_samples) report_header += "\n\n### Comparisons requested \n" for group_name in self.comparison: report_header += f"Comparison group: '{group_name} \n'" for method_name in self.comparison[group_name]: report_header += f"\nMethod: {method_name}" comp_type = self.comparison[group_name][method_name]["type"] if comp_type == "ab": report_header += "(a vs b) \n\n" elif comp_type == "multi": report_header += "(multi) \n\n" else: raise ValueError("Don't know what to do with type {comp_type}.") filepath = self.comparison[group_name][method_name][ "path" ] ## ensure field df_in = pd.read_csv(filepath, sep="\t") report_header += df_to_markdown_table(df_in) + "\n" report_header += f"\n### Genes \n" genes_used_name = f"Genes_{self.genome.name}" if self.has_gene_filter_specified(): report_header += "Genes filtered prior to DE analysis by: \n" if ( "canonical" in self.genes["filter"] and self.genes["filter"]["canonical"] ): report_header += "- canonical chromosomes only\n" genes_used_name += "_canonical" if "biotypes" in self.genes["filter"]: at_least = self.genes["filter"].get("at_least", 1) report_header += ( f"- biotype in {pp.pformat(self.genes['filter']['biotypes'])}\n" ) genes_used_name += "_biotypes" if "cpm_threshold" in self.genes["filter"]: threshold = self.genes["filter"]["cpm_threshold"] report_header += f"- at least {at_least} samples with normalized expression >= {threshold}\n" genes_used_name += f"_{at_least}samples>={threshold}" report_header += f"Genes used: {genes_used_name} \n" report_header += f"\n### Comparisons \n" for condition_group in self.comparison: report_header += f"From '{condition_group}': \n" for comparison_name in self.comparisons_to_do[condition_group]: params = self.comparisons_to_do[condition_group][comparison_name] comp_type = params["type"] if comp_type == "ab": if params["options"]["include_other_samples_for_variance"]: x = "fit on all samples" else: x = "fit on conditions" desc = f"- compare {params['cond1']} vs {params['cond2']} using {params['method_name']} (offset={params['options']['laplace_offset']}, {x}) \n" report_header += desc else: factors = ",".join( [f"{x}({y})" for x, y in params["factor_reference"].items()] ) desc = f"- compare {comparison_name} with {factors} using {params['method_name']} (offset={params['options']['laplace_offset']}, {x}) \n" report_header += desc report_header += f"\n### Downstream Analysis \n" for downstream in self.downstream: if downstream == "pathway_analysis": for pathway_method in self.downstream[downstream]: if pathway_method == "ora": collections = ["h"] if "collections" in self.downstream[downstream][pathway_method]: collections = self.downstream[downstream][pathway_method][ "collections" ] report_header += f"\nOver-Representation Analysis (ORA) \n" report_header += f"Collections used: {collections} \n" if pathway_method == "gsea": collections = ["h"] if "collections" in self.downstream[downstream][pathway_method]: collections = self.downstream[downstream][pathway_method][ "collections" ] parameter = {"permutations": 1000} if "parameter" in self.downstream[downstream][pathway_method]: parameter = self.downstream[downstream][pathway_method][ "parameter" ] report_header += "\nGene Set Enrichment Analysis (GSEA) \n" report_header += f"Collections: {collections} \n" report_header += f"Parameters: {parameter} \n" combination_df = self.combination_df() if combination_df is not None: report_header += f"\n### Set operations on comparisons \n" report_header += df_to_markdown_table(combination_df) return report_header def specification(self): report_spec = "\n### Specification \n" + self.pretty() return report_spec def combinations(self) -> Iterator: df_combinations = self.combination_df() if df_combinations is not None: for _, row in df_combinations.iterrows(): condition_group = row["condition_group"] new_name_prefix = row["combined_name"] comparisons_to_add = row["comparison_names"].split(",") assert_uniqueness(comparisons_to_add) if row["operation"] == "difference": operations = "Set difference" def generator(new_name, genes_to_combine): return mbf_genomics.genes.genes_from.FromDifference( new_name, genes_to_combine[0], genes_to_combine[1:], sheet_name="Differences", ) elif row["operation"] == "intersection": operations = "Intersection" def generator(new_name, genes_to_combine): return mbf_genomics.genes.genes_from.FromIntersection( new_name, genes_to_combine, sheet_name="Intersections" ) elif row["operation"] == "union": operations = "Union" def generator(new_name, genes_to_combine): return mbf_genomics.genes.genes_from.FromAny( new_name, genes_to_combine, sheet_name="Unions" ) yield condition_group, new_name_prefix, comparisons_to_add, generator, operations def get_fastqs(self): fill_incoming(self.run_ids) def analysis(req_file: Path = Path("run.toml")) -> Analysis: """ Returns a new ANalysis instance from a given toml file. Parameters ---------- req_file : Path, optional Path to toml file, by default Path("run.toml"). Returns ------- Analysis A new Analysis instance. """ return Analysis(req_file, **read_toml(req_file))
#Um programa que lê a idade de um atleta e mostra sua classificação idade = int(input('Quantos anos você tem? ')) if idade <= 9: print('Sua classificação é: mirim') elif idade <= 14: print('Sua classificação é: infantil') elif idade <= 19: print('Sua classificação é: junior') elif idade <= 20: print('Sua classificação é: senior') else: print('Sua classificação é: master')
import os import sys from secret import FLAG from Crypto.Cipher import AES def pad(text): padding = 16 - (len(text) % 16) return text + bytes([padding] * padding) def encrypt(text): cipher = AES.new(key, AES.MODE_OFB, iv) return cipher.encrypt(pad(text)) def nonce(): randtext = list(os.urandom(len(FLAG.encode()))) randtext = list(map(lambda x: (x + 0x7F) % 0xFF, randtext)) randtext = bytes(randtext) return encrypt(randtext).hex() if __name__ == '__main__': iv = os.urandom(16) key = os.urandom(32) print(f'flag: {encrypt(FLAG.encode()).hex()}') while True: print('> nonce') print('> server.py') print('> exit') cmd = input('> Command: ') if cmd == 'exit': sys.exit(1) elif cmd == 'nonce': print(nonce()) elif cmd == 'server.py': print(open('./server.py', 'r').read()) else: print(encrypt(b'Bad hacker').hex())
from uuid import uuid4 from django.contrib.auth.models import User from django.db import models from django.db.models import fields from django.db.models.expressions import F, Value from tastypie.models import create_api_key from django.forms.models import model_to_dict # Create your models here. class TimestampMixin(models.Model): class Meta: abstract = True created = fields.DateTimeField(auto_now_add=True) updated = fields.DateTimeField(auto_now=True) class Product(TimestampMixin, models.Model): class Meta: app_label = 'trackbuild' unique_together = ('user', 'name') ordering = ['user', 'name'] get_latest_by = 'created' name = fields.CharField(max_length=50) #: user user = models.ForeignKey(User) def get_latest_release(self, major=None, minor=None, patch=None): z = lambda v : v if v and v != "+" else None opts = dict(major=z(major), minor=z(minor), patch=z(patch)) opts = { k:v for k,v in opts.iteritems() if v} try: return self.releases.filter(**opts).latest() except Release.DoesNotExist: pass try: return self.releases.latest() except: return None def __unicode__(self): return u"%s %s" % (self.user, self.name or 'unknown') class Release(TimestampMixin, models.Model): class Meta: app_label = 'trackbuild' unique_together = ('user', 'product', 'name', 'major', 'minor', 'patch') ordering = ['user', 'product', 'name'] get_latest_by = 'created' #: product product = models.ForeignKey(Product, related_name='releases') #: release name name = fields.CharField(max_length=50) #: major version major = fields.IntegerField(default=0) #: minor version minor = fields.IntegerField(default=0) #: patch version patch = fields.IntegerField(default=0) #: user user = models.ForeignKey(User) #: predecessor release previous = models.ForeignKey('Release', related_name='followers', null=True) @classmethod def from_release(self, release, name=None, major=0, minor=0, patch=0): """ create a new release """ opts=dict(name=name or release.name, product=release.product, user=release.user) new_release = Release.objects.create(**opts) # check for changes to major, minor, patch if major == "+": new_release.major = release.major + 1 elif major and (isinstance(major, int) or major.isdigit()): new_release.major = major else: new_release.major = release.major if minor == "+": new_release.minor = release.minor + 1 elif minor and (isinstance(minor, int) or minor.isdigit()): new_release.minor = int(minor) else: new_release.minor = release.minor if patch == "+": new_release.patch = release.patch + 1 elif patch and (isinstance(patch, int) or patch.isdigit()): new_release.patch = int(patch) else: new_release.patch = release.patch new_release.save() return new_release @property def text(self): return self.__unicode__() def __unicode__(self): return u"%s-%s %d.%d.%d" % (self.product, self.name, self.major, self.minor, self.patch) def save(self, *args, **kwargs): if not self.name: self.name = self.product.name if self.product else uuid4() super(Release, self).save(*args, **kwargs) class Build(TimestampMixin, models.Model): class Meta: app_label = 'trackbuild' unique_together = ('release', 'buildid') ordering = ['updated'] get_latest_by = 'updated' release = models.ForeignKey(Release, related_name='builds') buildid = models.CharField(max_length=100, default=uuid4) tag = models.CharField(max_length=100) user = models.ForeignKey(User) buildno = models.IntegerField(default=0) @property def max_count(self): """ get the count of builds for a given release + 1 this supports concurrent access https://docs.djangoproject.com/en/1.7/ref/models/queries/#f-expressions """ counter, created = BuildCounter.objects.get_or_create(release=self.release) counter.count = F('count') + 1 counter.save() return BuildCounter.objects.get(release=self.release).count def save(self, *args, **kwargs): if not self.buildno: self.buildno = self.max_count super(Build, self).save(*args, **kwargs) def __unicode__(self): return u'%s %s' % (self.release, self.buildno) class BuildCounter(models.Model): """ A simple build counter This tracks the number of builds for each release on behalf of the Build model. """ class Meta: app_label = 'trackbuild' release = models.ForeignKey(Release, related_name='buildcounter') count = models.IntegerField(default=0) models.signals.post_save.connect(create_api_key, sender=User)
nose mock ipython oauth2==1.9.0.post1 urllib3==1.25 httplib2==0.10.3
#!/bin/bash import os import shlex import subprocess import sys from pathlib import Path def start_setup(): python_version = None pip_version = None # get os os_name = sys.platform print("os is %s" % (os_name)) if os_name.startswith("win"): raise Exception("Try on linux system") else: try: python_version = subprocess.check_output(["python3", "-V"]).decode("utf-8") except: python_version = "" if python_version.startswith('Python'): python_version = "python3" pip_version = "pip3" else: try: python_version = subprocess.check_output(["python", "-V"]).decode("utf-8") except: print("No python found please install Python3.5 or greater") if python_version.startswith('Python 3'): python_version = "python" else: raise Exception("Required Python 3, found %s" % (sys.version_info)) print("Proceeding with python - %s and pip - %s" % (python_version, pip_version)) # install virtualenv subprocess.call(["%s" % (pip_version), "install", "virtualenv"]) init_command = "%s -m venv .venv" % (python_version) if not os_name.startswith('win'): if not os_name.startswith('linux'): activator_string = '''source .venv/bin/activate ;''' else: activator_string = "" # requirements = [" --upgrade pip","bcrypt", "certifi", "cffi", # "chardet", "cryptography", "Django==2.2", "python_on_whales", # "django-rest-swagger==2.2.0", "djangorestframework==3.12.4", # "idna==2.10", "paramiko==2.7.2", "pycparser", # "PyNaCl", "pytz", "PyYAML", "requests", "six", # "sqlparse", "uritemplate==3.0.1", "urllib3==1.26.4", # "django-cors-headers", "docker"] os.system(init_command) # for req in requirements: if not os_name.startswith('win'): if not os_name.startswith('linux'): activator_string += "pip install --upgrade pip;" activator_string += "pip install -r fs-server/requirements.txt;" else: activator_string += ".venv/bin/pip install --upgrade pip;" activator_string += '''.venv/bin/pip install -r fs-server/requirements.txt;''' print(activator_string) os.system(activator_string) os.system("touch .setup_complete") # run_server() def run_server(): print("Server starting...") os_name = sys.platform cmdline = "0.0.0.0:8000" if len(sys.argv) > 1: cmdline = " ".join(map(shlex.quote, sys.argv[1:])) #start server if not os.name.startswith('win'): if not os_name.startswith('linux'): os.system("source .venv/bin/activate;python fs-server/manage.py makemigrations;python fs-server/manage.py migrate;python fs-server/manage.py runserver %s;" % cmdline) else: os.system(".venv/bin/python fs-server/manage.py makemigrations;.venv/bin/python fs-server/manage.py migrate;.venv/bin/python fs-server/manage.py runserver %s;" % cmdline) if __name__ == "__main__": if not (Path(".setup_complete").is_file() and Path(".venv").is_dir()): start_setup() run_server()
"""Methods to perform regular database maintenance.""" from backend.lib.database.postgres import connect from backend.lib.timer import timed from psycopg2.extensions import ISOLATION_LEVEL_AUTOCOMMIT def _execute_outside_of_transaction_block(query): """ Execute a SQL statement outside of a transaction block. Bypasses the transaction start enforced by the Python DB-API. """ engine = connect.create_db_engine() connection = engine.raw_connection() connection.set_isolation_level(ISOLATION_LEVEL_AUTOCOMMIT) with connection.cursor() as cur: cur.execute(query) connection.close() @timed def vacuum(): """ Vacuum all tables to remove ghost rows, then gather helpful statistics for the query optimizer. This should be run after any substantial INSERT, UPDATE, or DELETE statements. """ _execute_outside_of_transaction_block('VACUUM ANALYZE') @timed def cluster(): """Re-organize all physical tables to have records in the order of their clustered indexes.""" # FIXME: Include specific indexes / columns to cluster on. The current implementation only # re-clusters tables on existing clustered indexes. _execute_outside_of_transaction_block('CLUSTER')
import base64 from django.conf import settings from django.http import JsonResponse from healthpoint.registry import get_health_checks def _is_authenticated_request(request): # Superusers and staff members are always correctly authenticated. user = getattr(request, "user", None) if user is not None and (user.is_staff or user.is_superuser): return True # Requests with basic authentication credentials are only # authenticated when they match the settings. ba_username = getattr(settings, "HEALTHPOINT_BASICAUTH_USERNAME", None) ba_password = getattr(settings, "HEALTHPOINT_BASICAUTH_PASSWORD", None) authorization = request.META.get("HTTP_AUTHORIZATION") if ba_username and ba_password and authorization: method, _, auth = authorization.partition(" ") if method.lower() == "basic": auth = base64.b64decode(auth.strip()).decode("utf8") username, _, password = auth.partition(":") return username == ba_username and password == ba_password return False def health(request): tests = set(request.GET.getlist("test")) tests_left = set(tests) data = {"success": {}, "error": {}} status = 200 is_authenticated_request = _is_authenticated_request(request) authentication_required = getattr(settings, "HEALTHPOINT_AUTH_REQUIRED", False) if authentication_required and not is_authenticated_request: # The settings require a succesfully authenticated request # Abort the health checks if this is not the case... status = 403 data = {} else: # Perform the actual health checks if the authentication is successful # or not required. for health_check in get_health_checks(): func = ".".join([health_check.__module__, health_check.__qualname__]) if tests and func not in tests: continue tests_left.discard(func) success, detail = health_check() data["success" if success else "error"][func] = detail if not success: status = 503 if tests_left: if status == 200: status = 404 for test in tests_left: data["error"][test] = "Unknown health check" # Only successfully authenticated requests are allowed to see the # full details of the results. if not is_authenticated_request: data = {} # Return the response return JsonResponse(data, status=status)
"""2018 - Day 4 Part 1: Repose Record. You've sneaked into another supply closet - this time, it's across from the prototype suit manufacturing lab. You need to sneak inside and fix the issues with the suit, but there's a guard stationed outside the lab, so this is as close as you can safely get. As you search the closet for anything that might help, you discover that you're not the first person to want to sneak in. Covering the walls, someone has spent an hour starting every midnight for the past few months secretly observing this guard post! They've been writing down the ID of the one guard on duty that night - the Elves seem to have decided that one guard was enough for the overnight shift - as well as when they fall asleep or wake up while at their post (your puzzle input). For example, consider the following records, which have already been organized into chronological order: [1518-11-01 00:00] Guard #10 begins shift [1518-11-01 00:05] falls asleep [1518-11-01 00:25] wakes up [1518-11-01 00:30] falls asleep [1518-11-01 00:55] wakes up [1518-11-01 23:58] Guard #99 begins shift [1518-11-02 00:40] falls asleep [1518-11-02 00:50] wakes up [1518-11-03 00:05] Guard #10 begins shift [1518-11-03 00:24] falls asleep [1518-11-03 00:29] wakes up [1518-11-04 00:02] Guard #99 begins shift [1518-11-04 00:36] falls asleep [1518-11-04 00:46] wakes up [1518-11-05 00:03] Guard #99 begins shift [1518-11-05 00:45] falls asleep [1518-11-05 00:55] wakes up Timestamps are written using year-month-day hour:minute format. The guard falling asleep or waking up is always the one whose shift most recently started. Because all asleep/awake times are during the midnight hour (00:00 - 00:59), only the minute portion (00 - 59) is relevant for those events. Visually, these records show that the guards are asleep at these times: Date ID Minute 000000000011111111112222222222333333333344444444445555555555 012345678901234567890123456789012345678901234567890123456789 11-01 #10 .....####################.....#########################..... 11-02 #99 ........................................##########.......... 11-03 #10 ........................#####............................... 11-04 #99 ....................................##########.............. 11-05 #99 .............................................##########..... The columns are Date, which shows the month-day portion of the relevant day; ID, which shows the guard on duty that day; and Minute, which shows the minutes during which the guard was asleep within the midnight hour. (The Minute column's header shows the minute's ten's digit in the first row and the one's digit in the second row.) Awake is shown as ., and asleep is shown as #. Note that guards count as asleep on the minute they fall asleep, and they count as awake on the minute they wake up. For example, because Guard #10 wakes up at 00:25 on 1518-11-01, minute 25 is marked as awake. If you can figure out the guard most likely to be asleep at a specific time, you might be able to trick that guard into working tonight so you can have the best chance of sneaking in. You have two strategies for choosing the best guard/minute combination. Strategy 1: Find the guard that has the most minutes asleep. What minute does that guard spend asleep the most? In the example above, Guard #10 spent the most minutes asleep, a total of 50 minutes (20+25+5), while Guard #99 only slept for a total of 30 minutes (10+10+10). Guard #10 was asleep most during minute 24 (on two days, whereas any other minute the guard was asleep was only seen on one day). While this example listed the entries in chronological order, your entries are in the order you found them. You'll need to organize them before they can be analyzed. What is the ID of the guard you chose multiplied by the minute you chose? (In the above example, the answer would be 10 * 24 = 240.) """ from __future__ import annotations import datetime as dt import re from collections import defaultdict from dataclasses import dataclass from enum import Enum from operator import itemgetter from typing import DefaultDict from typing import List from typing import Optional from typing import Tuple RECORD_PATTERN = r"\[(.*)\] (?:(G)uard #(\d+)|(f)|(w))" class Event(Enum): """Possible guard events.""" NEW = 1 # New guard on duty AWAKE = 2 # A guard awakes ASLEEP = 3 # A guard falls asleep @dataclass(frozen=True) class Record: """Guard event record item.""" time: dt.datetime event: Event guard: Optional[int] = None # If new guard is on duty @classmethod def parse(cls, line: str) -> Record: """Convert one line into Event instance.""" match = re.match(RECORD_PATTERN, line) if match: stamp, new, guard, asleep, awake = match.groups() else: raise ValueError("Unknown record format") time = dt.datetime.strptime(stamp, "%Y-%m-%d %H:%M") if new: event = Event.NEW elif asleep: event = Event.ASLEEP elif awake: event = Event.AWAKE else: raise ValueError("Unknown event") return cls(time, event, int(guard) if guard else None) @classmethod def parse_all(cls, data: str) -> List[Record]: """Convert a bunch of lines into Record instances.""" records = [cls.parse(line) for line in data.strip().split("\n")] return sorted(records, key=lambda x: x.time) @classmethod def parse_task(cls, task: str) -> DefaultDict[int, List[int]]: """Parse the task into a dict of guard_id: list of minutes.""" guard, start, end = 0, 0, 0 minutes: DefaultDict[int, List[int]] = defaultdict(lambda: [0] * 60) records = cls.parse_all(task) for record in records: if record.event == Event.NEW: assert record.guard # Should not be None guard = record.guard elif record.event == Event.ASLEEP: start = record.time.minute elif record.event == Event.AWAKE: end = record.time.minute for minute in range(start, end): minutes[guard][minute] += 1 return minutes def total_minutes(guard_minutes: Tuple[int, List[int]]) -> int: """Sum all sleepy minutes of the given guard.""" _, minutes = guard_minutes return sum(minutes) def solve(task: str) -> int: """Find most sleepy guard and his most sleepy minute value.""" minutes = Record.parse_task(task) guard, _ = max(minutes.items(), key=total_minutes) minute, _ = max(enumerate(minutes[guard]), key=itemgetter(1)) return guard * minute
import itertools from amplifier import Amplifier, one_amplifier_running puzzle_input = [] with open('day-7/input.txt', 'r') as file: puzzle_input = [int(i.strip()) for i in file.read().split(',')] def part_1(puzzle_input): permutations = list(itertools.permutations(range(0, 5), 5)) highest_output = 0 for option in permutations: amplifiers = [] for i in range(5): amp = Amplifier(puzzle_input) amp.append_input(option[i]) amplifiers.append(amp) for i in range(4): amplifiers[i].set_next( amplifiers[i + 1] ) amplifiers[0].append_input(0) for amp in amplifiers: amp.execute() highest_output = max( highest_output, *amplifiers[-1].saved_output ) return highest_output def part_2(puzzle_input): permutations = list(itertools.permutations(range(5, 10), 5)) highest_output = 0 for option in permutations: amplifiers = [] for i in range(5): amp = Amplifier(puzzle_input) amp.append_input(option[i]) amplifiers.append(amp) for i in range(5): amplifiers[i].set_next( amplifiers[(i + 1) % 5] ) amplifiers[0].append_input(0) i = 0 while one_amplifier_running(amplifiers): amplifiers[i].execute() i = (i + 1) % 5 highest_output = max( highest_output, *amplifiers[-1].saved_output ) return highest_output print(part_1(puzzle_input)) print(part_2(puzzle_input))
from ...isa.inst import * import numpy as np class Fcvt_wu_s(Inst): name = 'fcvt.wu.s' def golden(self): if 'val1' in self.keys(): if self['val1'] < 0 or np.isneginf(self['val1']): return 0 if self['val1'] > ((1<<32)-1) or np.isposinf(self['val1']) or np.isnan(self['val1']): return (1<<32)-1 return int(self['val1'])
import socket import json from flask import Flask, request, render_template, Response from camera import VideoCamera from flask import jsonify # from flask_cors import CORS, cross_origin import car import logging # Import SDK packages from AWSIoTPythonSDK.MQTTLib import AWSIoTMQTTClient import os from iot_connection import IotConnection app = Flask(__name__, static_url_path='') con = IotConnection() # CORS(app) my_car = [None] * 1 def gen(camera): while True: frame = camera.get_frame() yield (b'--frame\r\n' b'Content-Type: image/jpeg\r\n\r\n' + frame + b'\r\n\r\n') @app.route('/video_feed') def video_feed(): return Response(gen(VideoCamera()), mimetype='multipart/x-mixed-replace; boundary=frame') @app.route("/<int:signal>") def move(signal): ret = my_car[0].move(signal) con.publish({'signal': signal}) return jsonify({'status': ret}) @app.route("/") def control(): my_car[0] = car.Car() return render_template('charlie.html', title='Car Control', ip=_get_ip()) def _get_ip(): s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) s.connect(("8.8.8.8", 80)) ip = s.getsockname()[0] s.close() return ip if __name__ == '__main__': app.run(host='0.0.0.0', port=5002)
import numpy as np def donor_acceptor_direction_vector(molecule, feat_type, atom_indx, coords, conformer_idx): """ Compute the direction vector for an H bond donor or H bond acceptor feature Parameters ---------- molecule : rdkit.Chem.rdchem.Mol Molecule that contains the feature which direction vector will be computed. feat_type : str Type of feature. Wheter is donor or acceptor. atom_indx : int Index of the H bond acceptor or donor atom. coords : numpy.ndarray; shape(3,) Coordiantes of the H bond acceptor or donor atom. conformer_idx : int Index of the conformer for which the direction vector will be computed. Returns ------- direction : numpy.ndarray; shape(3,) Coordinates of the direction vector. """ direction = np.zeros((3,)) atom = molecule.GetAtomWithIdx(atom_indx) for a in atom.GetNeighbors(): if a.GetSymbol() == "H": continue position = molecule.GetConformer(conformer_idx).GetAtomPosition(a.GetIdx()) direction[0] += position.x - coords[0] direction[1] += position.y - coords[1] direction[2] += position.z - coords[2] if feat_type == "Donor": direction = -direction return direction def aromatic_direction_vector(molecule, atom_indxs, conformer_idx): """ Compute the direction vector for an aromatic feature. Parameters ---------- molecule : rdkit.Chem.rdchem.Mol Molecule that contains the feature which direction vector will be computed. atom_indxs : tuple of int Indices of the aromatic atoms. conformer_idx : int Index of the conformer for which the direction vector will be computed. Returns ------- direction : numpy.ndarray; shape(3,) Coordinates of the direction vector. """ coords = np.zeros((3, 3)) # Take just the first three atoms for j, idx in enumerate(atom_indxs[0:3]): position = molecule.GetConformer(conformer_idx).GetAtomPosition(idx) coords[j, 0] = position.x coords[j, 1] = position.y coords[j, 2] = position.z # Find the vector normal to the plane defined by the three atoms u = coords[1, :] - coords[0, :] v = coords[2, :] - coords[0, :] direction = np.cross(u, v) return direction
''' TODO: checkpoint/restart * for each .map() call: * log initial psets * record results to disk in chunks as they come in * on restart, rerun missing or provide previous results * handle multiple map() calls in a single user program ''' import datetime import os import sys import socket logger_filename = None saw_traceback = False def atomic_create_ish(filenames): ''' Figure out which on this list of filenames does not already exist. Not safe on NFS filesystems. ''' for f in filenames: try: fd = open(f, 'xt') global logger_filename logger_filename = f except FileExistsError: continue break else: raise ValueError('unable to open a unique logfile, rerun') return fd logfd = None def init(pslogger_prefix='.paramsurvey-', pslogger_fd=None, **kwargs): # always log if pslogger_fd is set # otherwise, never log within pytest. if pslogger_fd is None and 'PYTEST_CURRENT_TEST' in os.environ: return middle = datetime.datetime.utcnow().strftime('%Y%m%d-%H%M%S') middleplus = datetime.datetime.utcnow().strftime('%Y%m%d-%H%M%S.%f') middles = (middle, middleplus, middleplus+'a') global logfd if pslogger_fd: logfd = pslogger_fd global logger_filename logger_filename = '(internal)' else: logfd = atomic_create_ish([pslogger_prefix+m+'.log' for m in middles]) print('paramsurvey starttime', middleplus, file=logfd) print('hostname', socket.gethostname(), file=logfd) print('command line', repr(sys.argv), file=logfd) for e in sorted(['PYTHONPATH', 'VIRTUAL_ENV', 'CONDA_DEFAULT_ENV', 'CONDA_PREFIX']): if e in os.environ: print(e, os.environ[e], file=logfd) for p in sorted(['PARAMSURVEY', 'SINGULARITY']): s = repr([x for x in os.environ if x.startswith(p)]) if s != '[]': print(p, 'env vars', s, file=logfd) print('python sys.version', ' '.join(sys.version.splitlines()), file=logfd) print('python modules:', file=logfd) for k in sorted(sys.modules): v = sys.modules[k] ver = getattr(v, '__version__', None) if ver is not None: print(' ', k, ver, file=logfd) logfd.flush() def log(*args, stderr=True): if logfd: print(*args, file=logfd) logfd.flush() if stderr: print(*args, file=sys.stderr) sys.stderr.flush() def traceback(what): global saw_traceback saw_traceback = what def finalize(): now = datetime.datetime.utcnow().strftime('%Y%m%d-%H%M%S') if logfd: print('paramsurvey endtime', now, file=logfd) print('paramsurvey endtime', now, file=sys.stderr) global saw_traceback if saw_traceback and logger_filename: print('Exceptions with tracebacks from your code are in', logger_filename, file=sys.stderr) saw_traceback = False # only print once, even if paramsurvey.map() is called multiple times
#!/usr/bin/env python # -*- coding:utf-8 -*- import os, sys, shutil import xml.dom.minidom import json import urllib def decodeXML(xmlFiledata): dom = xml.dom.minidom.parseString(xmlFiledata) items = dom.getElementsByTagName("item") result = [] for item in items: dictstr, wordstr, explainstr = decodeItem(item) result.append((dictstr, wordstr, explainstr)) # return valus is "list of 3-tuples" return result def decodeItem(item): dict = item.getElementsByTagName("dict")[0] word = item.getElementsByTagName("word")[0] explain = item.getElementsByTagName("explain")[0] dictstr = dict.childNodes[0].data wordstr = word.childNodes[0].data explainstr = HexStringToString(explain.childNodes[0].data) return dictstr, wordstr, explainstr def HexStringToString(hexString): # convert hex string to utf8 string # example: "%2c%e3%80" -> "\x2C\xE3\x80" bytes = [] hexStr = ''.join( hexString.split("%") ) for i in range(0, len(hexStr), 2): bytes.append( chr( int (hexStr[i:i+2], 16 ) ) ) # decode as utf8 try: string = ''.join( bytes ).decode("utf-8") except UnicodeDecodeError: return hexString return string if __name__ == '__main__': srcXmlDir = os.path.join(os.path.dirname(__file__), 'pali-dict-software-web1version/xml/') srcJsonFile1 = os.path.join(os.path.dirname(__file__), '../gae/libs/json/dicPrefixWordLists.json') srcJsonFile2 = os.path.join(os.path.dirname(__file__), '../gae/libs/json/dicPrefixGroup.json') dstAppEngDir = os.path.join(os.path.dirname(__file__), 'app-engine-json/') if not os.path.exists(srcXmlDir): print(srcXmlDir + ' does not exist!') sys.exit(1) if not os.path.exists(srcJsonFile1): print(srcJsonFile1 + ' does not exist!') sys.exit(1) if not os.path.exists(srcJsonFile2): print(srcJsonFile2 + ' does not exist!') sys.exit(1) # If old deployment folders exist, delete them. if os.path.exists(dstAppEngDir): # remove all dirs and sub-dirs shutil.rmtree(dstAppEngDir) with open(srcJsonFile1, 'r') as f: dicPrefixWordLists = json.loads(f.read()) with open(srcJsonFile2, 'r') as f: dicPrefixGroup = json.loads(f.read()) for firstCharOfWord in dicPrefixGroup: groupNum = dicPrefixGroup[firstCharOfWord] dstSubDir = os.path.join(dstAppEngDir, urllib.quote('jsons%d/json/%s' % (groupNum, firstCharOfWord.encode('utf-8'))).replace('%', 'Z') ) if not os.path.exists(dstSubDir): os.makedirs(dstSubDir) # for all words start with the same first char for word in dicPrefixWordLists[firstCharOfWord]: srcFilePath = os.path.join( os.path.join( srcXmlDir, firstCharOfWord ), word + u'.xml') dstFilePath = os.path.join( dstSubDir, urllib.quote(word.encode('utf-8') + '.json').replace('%', 'Z') ) # covert xml to json, andi then save them. with open(srcFilePath, 'r') as fsrc: with open(dstFilePath, 'w') as fdst: fdst.write(json.dumps(decodeXML(fsrc.read()))) # generate app.yaml for each version dstAppYamlPath = os.path.join(dstAppEngDir, u'jsons%d/app.yaml' % groupNum) # if app.yaml already exists, continue forloop if os.path.exists(dstAppYamlPath): continue with open(dstAppYamlPath, 'w') as f: f.write(u'application: palidictionary\n') f.write(u'version: jsons%d\n' % groupNum) f.write(u'runtime: python27\n') f.write(u'api_version: 1\n') f.write(u'threadsafe: true\n') f.write(u'\n') f.write(u'handlers:\n') f.write(u'- url: /json\n') f.write(u' static_dir: json\n') f.write(u' mime_type: application/json\n') f.write(u' http_headers:\n') f.write(u' Access-Control-Allow-Origin: "*"\n')
ngsi_data= \ { 'originator':u'', 'subscriptionId': '195bc4c6-882e-40ce-a98f-e9b72f87bdfd', 'contextResponses': [ { 'contextElement': {'attributes': [ { 'contextValue': 'ford5', 'type': 'string', 'name': 'brand40' }, { 'contextValue': 'ford6', 'type': 'string', 'name': 'brand50' } ], 'entityId': { 'type': 'Car', 'id': 'Car31', 'isPattern':True }, 'domainMetadata': [ { 'type': 'point', 'name': 'location', 'value': { 'latitude': 49.406393, 'longitude': 8.684208 } } ] }, 'statusCode': { 'code': 200, 'reasonPhrase': 'OK' } } ] } convert_data_output= \ { '@context': [ 'https://forge.etsi.org/gitlab/NGSI-LD/NGSI-LD/raw/master/coreContext/ngsi-ld-core-context.jsonld', { 'Car': 'http://example.org/Car', 'brand40': 'http://example.org/brand40', 'brand50': 'http://example.org/brand50' } ], 'brand50': { 'type': 'Property', 'value': 'ford6' }, 'brand40': { 'type': 'Property', 'value': 'ford5' }, 'type': 'Vehicle', 'id': 'urn:ngsi-ld:Car31' }
""" Generated by CHARMM-GUI (http://www.charmm-gui.org) omm_readinputs.py This module is for reading inputs in OpenMM. Correspondance: [email protected] or [email protected] Last update: March 29, 2017 """ from simtk.unit import * from simtk.openmm import * from simtk.openmm.app import * class _OpenMMReadInputs(): def __init__(self): self.mini_nstep = 0 # Number of steps for minimization self.mini_Tol = 1.0 # Minimization energy tolerance self.gen_vel = 'no' # Generate initial velocities self.gen_temp = 300.0 # Temperature for generating initial velocities (K) self.gen_seed = None # Seed for generating initial velocities self.nstep = 0 # Number of steps to run self.dt = 0.002 # Time-step (ps) self.nstout = 100 # Writing output frequency (steps) self.nstdcd = 0 # Wrtiing coordinates trajectory frequency (steps) self.coulomb = PME # Electrostatic cut-off method self.ewald_Tol = 0.0005 # Ewald error tolerance self.vdw = 'Force-switch' # vdW cut-off method self.r_on = 1.0 # Switch-on distance (nm) self.r_off = 1.2 # Switch-off distance (nm) self.temp = 300.0 # Temperature (K) self.fric_coeff = 1 # Friction coefficient for Langevin dynamics self.pcouple = 'no' # Turn on/off pressure coupling self.p_ref = 1.0 # Pressure (Pref or Pxx, Pyy, Pzz; bar) self.p_type = 'membrane' # MonteCarloBarotat type self.p_scale = True, True, True # For MonteCarloAnisotropicBarostat self.p_XYMode = MonteCarloMembraneBarostat.XYIsotropic # For MonteCarloMembraneBarostat self.p_ZMode = MonteCarloMembraneBarostat.ZFree # For MonteCarloMembraneBarostat self.p_tens = 0.0 # Sulface tension for MonteCarloMembraneBarostat (dyne/cm) self.p_freq = 15 # Pressure coupling frequency (steps) self.cons = HBonds # Constraints method self.rest = 'no' # Turn on/off restraints self.fc_bb = 0.0 # Positional restraint force constant for protein backbone (kJ/mol/nm^2) self.fc_sc = 0.0 # Positional restraint force constant for protein side-chain (kJ/mol/nm^2) self.fc_mpos = 0.0 # Positional restraint force constant for micelle lipids (kJ/mol/nm^2) self.fc_lpos = 0.0 # Positional restraint force constant for lipids (kJ/mol/nm^2) self.fc_ldih = 0.0 # Dihedral restraint force constant for lipids (kJ/mol/rad^2) self.fc_cdih = 0.0 # Dihedral restraint force constant for carbohydrates (kJ/mol/rad^2) def read(self, inputFile): for line in open(inputFile, 'r'): if line.find('#') >= 0: line = line.split('#')[0] line = line.strip() if len(line) > 0: segments = line.split('=') input_param = segments[0].upper().strip() try: input_value = segments[1].strip() except: input_value = None if input_value: if input_param == 'MINI_NSTEP': self.mini_nstep = int(input_value) if input_param == 'MINI_TOL': self.mini_Tol = float(input_value) if input_param == 'GEN_VEL': if input_value.upper() == 'YES': self.gen_vel = 'yes' if input_value.upper() == 'NO': self.gen_vel = 'no' if input_param == 'GEN_TEMP': self.gen_temp = float(input_value) if input_param == 'GEN_SEED': self.gen_seed = int(input_value) if input_param == 'NSTEP': self.nstep = int(input_value) if input_param == 'DT': self.dt = float(input_value) if input_param == 'NSTOUT': self.nstout = int(input_value) if input_param == 'NSTDCD': self.nstdcd = int(input_value) if input_param == 'COULOMB': if input_value.upper() == 'NOCUTOFF': self.coulomb = NoCutoff if input_value.upper() == 'CUTOFFNONPERIODIC': self.coulomb = CutoffNonPeriodic if input_value.upper() == 'CUTOFFPERIODIC': self.coulomb = CutoffPeriodic if input_value.upper() == 'EWALD': self.coulomb = Ewald if input_value.upper() == 'PME': self.coulomb = PME if input_param == 'EWALD_TOL': self.ewald_Tol = float(input_value) if input_param == 'VDW': if input_value.upper() == 'FORCE-SWITCH': self.vdw = 'Force-switch' if input_param == 'R_ON': self.r_on = float(input_value) if input_param == 'R_OFF': self.r_off = float(input_value) if input_param == 'TEMP': self.temp = float(input_value) if input_param == 'FRIC_COEFF': self.fric_coeff = float(input_value) if input_param == 'PCOUPLE': if input_value.upper() == 'YES': self.pcouple = 'yes' if input_value.upper() == 'NO': self.pcouple = 'no' if input_param == 'P_REF': if input_value.find(',') < 0: self.p_ref = float(input_value) else: Pxx = float(input_value.split(',')[0]) Pyy = float(input_value.split(',')[1]) Pzz = float(input_value.split(',')[2]) self.p_ref = Pxx, Pyy, Pzz if input_param == 'P_TYPE': if input_value.upper() == 'ISOTROPIC': self.p_type = 'isotropic' if input_value.upper() == 'MEMBRANE': self.p_type = 'membrane' if input_value.upper() == 'ANISOTROPIC': self.p_type = 'anisotropic' if input_param == 'P_SCALE': scaleX = True scaleY = True scaleZ = True if input_value.upper().find('X') < 0: scaleX = False if input_value.upper().find('Y') < 0: scaleY = False if input_value.upper().find('Z') < 0: scaleZ = False self.p_scale = scaleX, scaleY, scaleZ if input_param == 'P_XYMODE': if input_value.upper() == 'XYISOTROPIC': self.p_XYMode = MonteCarloMembraneBarostat.XYIsotropic if input_value.upper() == 'XYANISOTROPIC': self.p_XYMode = MonteCarloMembraneBarostat.XYAnisotropic if input_param == 'P_ZMODE': if input_value.upper() == 'ZFREE': self.p_ZMode = MonteCarloMembraneBarostat.ZFree if input_value.upper() == 'ZFIXED': self.p_ZMode = MonteCarloMembraneBarostat.ZFixed if input_value.upper() == 'CONSTANTVOLUME': self.p_ZMode = MonteCarloMembraneBarostat.ConstantVolume if input_param == 'P_TENS': self.p_tens = float(input_value) if input_param == 'P_FREQ': self.p_freq = int(input_value) if input_param == 'CONS': if input_value.upper() == 'NONE': self.cons = None if input_value.upper() == 'HBONDS': self.cons = HBonds if input_value.upper() == 'ALLBONDS': self.cons = AllBonds if input_value.upper() == 'HANGLES': self.cons = HAngles if input_param == 'REST': if input_value.upper() == 'YES': self.rest = 'yes' if input_value.upper() == 'NO': self.rest = 'no' if input_param == 'FC_BB': self.fc_bb = float(input_value) if input_param == 'FC_SC': self.fc_sc = float(input_value) if input_param == 'FC_MPOS': self.fc_mpos = float(input_value) if input_param == 'FC_LPOS': self.fc_lpos = float(input_value) if input_param == 'FC_LDIH': self.fc_ldih = float(input_value) if input_param == 'FC_CDIH': self.fc_cdih = float(input_value) return self def read_inputs(inputFile): return _OpenMMReadInputs().read(inputFile)
# run test with # python -m tests\test_echomodel.py import importlib import unittest import echo2rasa.tools.echomodel as echomodel class TestEchoModel(unittest.TestCase): def get_restaurant_intent(self, model): intents = model.model["interactionModel"]["languageModel"]["intents"] for intent in intents: if intent['name'] == 'request_restaurant': return intent return None def test_invocation_name(self): model = echomodel.EchoModel("test", None, None, None) self.assertEqual( model.model["interactionModel"]["languageModel"]["invocationName"], "test") def test_intents(self): model = echomodel.EchoModel("test", ".\\tests\\resources\\domain.yml", None, None) model._import_domain() intents = model.model["interactionModel"]["languageModel"]["intents"] self.assertIn({'name': 'request_restaurant', 'samples': []}, intents) def test_utterances(self): model = echomodel.EchoModel("test", ".\\tests\\resources\\domain.yml", ".\\tests\\resources\\nlu.md", None) model._import_domain() model._import_nlu_file() restaurant_intent = self.get_restaurant_intent(model) self.assertIn( "i am looking for any place that serves {cuisine} food for {num_people}", model._intent_dir['request_restaurant']['samples']) def test_slot_type_mapping(self): model = echomodel.EchoModel("test", ".\\tests\\resources\\domain.yml", ".\\tests\\resources\\nlu.md", ".\\tests\\resources\\echo_domain.yml") model._import_domain() model._import_nlu_file() model._add_echo_conf() self.assertEqual( "AMAZON.NUMBER", model._slots_dir["num_people"]["type"]) def test_intent_slots(self): model = echomodel.EchoModel("test", ".\\tests\\resources\\domain.yml", ".\\tests\\resources\\nlu.md", ".\\tests\\resources\\echo_domain.yml") model._import_domain() model._import_nlu_file() model._add_echo_conf() model._update_intent_slotlist() restaurant_intent = self.get_restaurant_intent(model) cuisine_slot = None for slot in restaurant_intent['slots']: if slot["name"] == "cuisine": cuisine_slot = slot break self.assertEqual("cuisine", cuisine_slot["type"]) def test_entity_types(self): # check for example entry: # { # "name": { # "value": "gastropub", # "synonyms": [ # "gastro pub" # ] # } # }, model = echomodel.EchoModel("test", ".\\tests\\resources\\domain.yml", ".\\tests\\resources\\nlu.md", ".\\tests\\resources\\echo_domain.yml") model._import_domain() model._import_nlu_file() model._add_echo_conf() model._update_intent_slotlist() model._import_entity_definitions() types = model.model["interactionModel"]["languageModel"]["types"] gastropub_type = None for type_ in types: if type_["name"] == "cuisine": for value in type_["values"]: if value["name"]["value"] == "gastropub": gastropub_type = value break print(gastropub_type) self.assertIn("gastro pub", gastropub_type["name"]["synonyms"]) if __name__ == '__main__': unittest.main()
""" 用户常量 """ class UserConst: BLOGGER_ID: int = 1 # 博主id SILENCE: int = 1 # 禁言状态
#!/usr/bin/env python3 import re from typing import Any, Dict, Generator import lxml.html from venues.abstract_venue import AbstractVenue class Yotalo(AbstractVenue): def __init__(self): super().__init__() self.url = "https://yo-talo.fi/ohjelma/" self.name = "Yo-talo" self.city = "Tampere" self.country = "Finland" def normalize_string(self, s: str): rm_newlines = s.replace("\n", " ") rm_spaces = re.sub("\s+", " ", rm_newlines) rm_left_padding = re.sub("^\s+", "", rm_spaces) rm_right_padding = re.sub("\s+$", "", rm_left_padding) return rm_right_padding def month_to_number(self, month: str) -> int: if month == "tammi": return 1 elif month == "helmi": return 2 elif month == "maalis": return 3 elif month == "huhti": return 4 elif month == "touko": return 5 elif month == "kesä": return 6 elif month == "heinä": return 7 elif month == "elo": return 8 elif month == "syys": return 9 elif month == "loka": return 10 elif month == "marras": return 11 elif month == "joulu": return 12 raise ValueError(f"Month mapping for '{month}' not implemented") def parse_date(self, tag: lxml.html.HtmlElement) -> str: day = "".join(tag[0].xpath('./div[@class="event-day"]/text()')) day = int(day) month = "".join(tag[0].xpath('./div[@class="event-month"]/text()')) month = self.month_to_number(month) year = "".join(tag[0].xpath('./div[@class="event-year"]/text()')) year = int(year) return f"{year:04d}-{month:02d}-{day:02d}" def parse_info(self, event: lxml.html.HtmlElement) -> str: title = "".join(event[0].xpath('./div[@class="event-title"]/h3/text()')) title = self.normalize_string(title) description = "".join(event[0].xpath('./div[@class="event-details"]')[0].text_content()) description = self.normalize_string(description) return f"{title}: {description}" def parse_event(self, event: lxml.html.HtmlElement) -> Dict[str, Any]: date = self.parse_date(event.xpath('./div[@class="event-date"]/div[@class="start-date"]')) event_name = self.parse_info(event.xpath('./div[contains(@class, "event-info single-day")]')) price = self.parse_price(event_name) return {"venue": self.get_venue_name(), "date": date, "name": event_name, "price": price} def parse_events(self, data: bytes) -> Generator[Dict[str, Any], None, None]: doc = lxml.html.fromstring(data) for item in doc.xpath('/html/body//div[@class="event-list"]/ul[@class="event-list-view"]' '/li[contains(@class, "event live")]'): yield self.parse_event(item) if __name__ == '__main__': import requests y = Yotalo() r = requests.get(y.url) for e in y.parse_events(r.content): for k, v in e.items(): print(f"{k:>10s}: {v}") print()
''' For the masked peptides, see if we can find them in the MS data; ''' import glob, sys, os, numpy, regex from glbase3 import * import matplotlib.pyplot as plot sys.path.append('../../') import shared res = {} all_matches = glload('../results_gene.glb') pep_hit = [] for pep in all_matches: if pep['insideTE'] != 'None': if 'LTR:ERVK:HERVK' in pep['insideTE']: pep_hit.append(pep['peptide_string']) print('Found {0} HERVK peptides'.format(len(pep_hit))) pep_hit = list(set(pep_hit)) print('Found {0} unique HERVK peptides'.format(len(pep_hit))) print(pep_hit) phoenix = genelist('phoenix.fa', format=format.fasta) # See if simple matches will do it first. finds = {seq['name']: [] for seq in phoenix} finds['Not Found'] = [] for p in pep_hit: found = False for seq in phoenix: if p in seq['seq']: finds[seq['name']].append({'pep_seq': p, 'num_mismatch': 0}) found = True continue # try n bp mismatch until pass: for num_mismatch in (1,2,3): # There are no useful matches after 3 m = regex.findall("(%s){e<=%s}" % (p, num_mismatch), seq['seq']) if m: # In testing, only ever 1 hit; finds[seq['name']].append({'pep_seq': p, 'num_mismatch': num_mismatch}) found = True break if not found: # Do another round looking for way wilder ones: for seq in phoenix: # try n bp mismatch until pass: for num_mismatch in (4,5,6,7, 8, 9, 10, 11, 12, 13, 14, 15, 16): m = regex.findall("(%s){s<=%s}" % (p, num_mismatch), seq['seq']) if m: # In testing, only ever 1 hit; if found: # We have a find already: if num_mismatch < found['num_mismatch']: # Only replace if a better match found = {'typ': seq['name'], 'pep_seq': p, 'num_mismatch': num_mismatch} else: found = {'typ': seq['name'], 'pep_seq': p, 'num_mismatch': num_mismatch} break #if found: # break again # break # Add the best match: if found: typ = found['typ'] del found['typ'] finds[typ].append(found) else: finds['Not Found'].append({'pep_seq': p, 'num_mismatch': -1}) # Save a summary table: oh = open('pep_matches.tsv', 'wt') oh.write('{0}\n'.format('\t'.join(['HERVK', 'peptide', 'num_mismatch']))) for typ in finds: for hit in finds[typ]: oh.write('{0}\n'.format('\t'.join([typ, hit['pep_seq'], str(hit['num_mismatch'])]))) oh.close() print('Found {0}/{1} ({2:.1f}%) peptides in the HERVK seq'.format(len(sum(finds.values(), [])), len(pep_hit), len(sum(finds.values(), []))/len(pep_hit) * 100)) print(finds)
# Copyright 2012-2013 OpenStack, LLC. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import importlib import os from congressclient import exceptions def env(*vars, **kwargs): """Search for the first defined of possibly many env vars Returns the first environment variable defined in vars, or returns the default defined in kwargs. """ for v in vars: value = os.environ.get(v, None) if value: return value return kwargs.get('default', '') def import_class(import_str): """Returns a class from a string including module and class :param import_str: a string representation of the class name :rtype: the requested class """ mod_str, _sep, class_str = import_str.rpartition('.') mod = importlib.import_module(mod_str) return getattr(mod, class_str) def get_client_class(api_name, version, version_map): """Returns the client class for the requested API version :param api_name: the name of the API, e.g. 'compute', 'image', etc :param version: the requested API version :param version_map: a dict of client classes keyed by version :rtype: a client class for the requested API version """ try: client_path = version_map[str(version)] except (KeyError, ValueError): msg = "Invalid %s client version '%s'. must be one of: %s" % ( (api_name, version, ', '.join(version_map.keys()))) raise exceptions.UnsupportedVersion(msg) return import_class(client_path) def format_long_dict_list(data): """Return a formatted string. :param data: a list of dicts :rtype: a string formatted to {a:b, c:d}, {e:f, g:h} """ newdata = [str({str(key): str(value) for key, value in d.iteritems()}) for d in data] return ',\n'.join(newdata) + '\n' def format_dict(data): """Return a formatted string. :param data: a dict :rtype: a string formatted to {a:b, c:d} """ if not isinstance(data, dict): return str(data) return str({str(key): str(value) for key, value in data.items()}) def format_list(data): """Return a formatted strings :param data: a list of strings :rtype: a string formatted to a,b,c """ return ', '.join(data) def get_dict_properties(item, fields, mixed_case_fields=[], formatters={}): """Return a tuple containing the item properties. :param item: a single dict resource :param fields: tuple of strings with the desired field names :param mixed_case_fields: tuple of field names to preserve case :param formatters: dictionary mapping field names to callables to format the values """ row = [] for field in fields: if field in mixed_case_fields: field_name = field.replace(' ', '_') else: field_name = field.lower().replace(' ', '_') data = item[field_name] if field_name in item else '' if field in formatters: row.append(formatters[field](data)) else: row.append(data) return tuple(row) def get_resource_id_from_name(name, results): # FIXME(arosen): move to common lib and add tests... name_match = None id_match = None double_name_match = False for result in results['results']: if result['id'] == name: id_match = result['id'] if result['name'] == name: if name_match: double_name_match = True name_match = result['id'] if not double_name_match and name_match: return name_match if double_name_match and not id_match: # NOTE(arosen): this should only occur is using congress # as admin and multiple tenants use the same datsource name. raise exceptions.Conflict( "Multiple resources have this name %s. Please specify id." % name) if id_match: return id_match raise exceptions.NotFound("Resource %s not found" % name)
# Code generated by `typeddictgen`. DO NOT EDIT. """V2beta2PodsMetricSourceDict generated type.""" from typing import TypedDict from kubernetes_typed.client import V2beta2MetricIdentifierDict, V2beta2MetricTargetDict V2beta2PodsMetricSourceDict = TypedDict( "V2beta2PodsMetricSourceDict", { "metric": V2beta2MetricIdentifierDict, "target": V2beta2MetricTargetDict, }, total=False, )
# -*- coding: utf-8 -*- from django.conf import settings from django.http import HttpResponse from django_distill import distill_url, distill_path, distill_re_path def test_no_param_view(reqest): return HttpResponse(b'test', content_type='application/octet-stream') def test_positional_param_view(reqest, param): return HttpResponse(b'test' + param.encode(), content_type='application/octet-stream') def test_named_param_view(reqest, param=None): return HttpResponse(b'test' + param.encode(), content_type='application/octet-stream') def test_no_param_func(): return None def test_positional_param_func(): return ('12345',) def test_named_param_func(): return [{'param': 'test'}] urlpatterns = [ distill_url(r'^url/$', test_no_param_view, name='url-no-param', distill_func=test_no_param_func, distill_file='test'), distill_url(r'^url/([\d]+)$', test_positional_param_view, name='url-positional-param', distill_func=test_positional_param_func), distill_url(r'^url/(?P<param>[\w]+)$', test_named_param_view, name='url-named-param', distill_func=test_named_param_func), ] if settings.HAS_RE_PATH: urlpatterns += [ distill_re_path(r'^re_path/$', test_no_param_view, name='re_path-no-param', distill_func=test_no_param_func, distill_file='test'), distill_re_path(r'^re_path/([\d]+)$', test_positional_param_view, name='re_path-positional-param', distill_func=test_positional_param_func), distill_re_path(r'^re_path/(?P<param>[\w]+)$', test_named_param_view, name='re_path-named-param', distill_func=test_named_param_func), ] if settings.HAS_PATH: urlpatterns += [ distill_path('path/', test_no_param_view, name='path-no-param', distill_func=test_no_param_func, distill_file='test'), distill_path('path/<int>', test_positional_param_view, name='path-positional-param', distill_func=test_positional_param_func), distill_path('path/<str:param>', test_named_param_view, name='path-named-param', distill_func=test_named_param_func), ] # eof
import sys, string, os def separator(): printf("=========================================") separator() printf(" ==> RandomX Mining Presets Wizard <== ") separator() printf(" /===========\ /==\") printf(" | [-----] | | |") printf(" | | | | | | |==|") printf(" | [-----] | | | /==/ |==|") printf(" | /========/ | |/ / ") printf(" | | /=========\ | / / /=========\ /========\ |==|") printf(" | | | /---\ | | \ \ | /---\ | / _____/ | |") printf(" | | | | | | | |\ \ | \---/ | | /_____ | |") printf(" | | | \---/ | | | \ \ | ______/ |___ / | |") printf(" |==| \=========/ | | \ \ \=========\ \=======/ |==|") separator() separator() printf("What currency do you want to mine? (full name, no spaces) >>> ") currency = input("") for file in os.listdir("/Users/darren/Desktop/test"): if file.startswith("00 " + currency): f = file os.system(f)
# coding=utf-8 import os import json global_config = { "gpu_id": "0,1,4,7,9,15", "async_loading": true, "shuffle": true, "data_aug": true, "num_epochs": 3000, "img_height": 320, "img_width": 320, "num_channels": 3, "batch_size": 96, "dataloader_workers": 1, "learning_rate_g": 1e-4, "learning_rate_decay_g": 0.9, "learning_rate_decay_epoch_g": 100, "learning_rate_d": 1e-4, "learning_rate_decay_d": 0.9, "learning_rate_decay_epoch_d": 100, "every_d": 3, "every_g": 1, "save_path": "your_save_path", "save_name": "model.pth", } if __name__ == '__main__': config = global_config print(config['gpu_id']) print('done')
class Solution: def nthSuperUglyNumber(self, n: int, primes: List[int]) -> int: cnt, ans = 1, [1] dp = [0 for _ in range(len(primes))] while cnt < n: next_ugly , min_index = -1 , -1 for i in range(len(primes)): if primes[i] * ans[dp[i]] == ans[-1]: dp[i] += 1 if next_ugly == -1 or next_ugly > primes[i] * ans[dp[i]]: min_index = i next_ugly = primes[i] * ans[dp[i]] dp[min_index] += 1 ans.append(next_ugly) cnt += 1 return ans[-1]
#!/usr/bin/python import os, sys from PIL import ImageFile import Image def thumb(infile, ext): size = 250, 250 outfile = os.path.splitext(infile)[0] + "_thumb" + ext types = {'.jpg' : 'JPEG', '.jpeg' : 'JPEG', '.png' : 'PNG', '.gif' : 'GIF'} im = Image.open(infile) im = im.convert('RGB') im.thumbnail(size, Image.ANTIALIAS) if ext.lower() == '.jpg' or ext.lower() == '.jpeg': try: im.save(outfile, types[ext.lower()], quality=80, optimize=True, progressive=True) except IOError: ImageFile.MAXBLOCK = im.size[0] * im.size[1] im.save(outfile, types[ext.lower()], quality=80, optimize=True, progressive=True) else: try: im.save(outfile, types[ext.lower()], quality=85) except IOError, e: print e thumb(sys.argv[1], sys.argv[2])
#!/usr/bin/env python import re from setuptools import setup READMEFILE = 'README.rst' VERSIONFILE = 'pytest_wholenodeid.py' VSRE = r"""^__version__ = ['"]([^'"]*)['"]""" def get_version(): version_file = open(VERSIONFILE, 'rt').read() return re.search(VSRE, version_file, re.M).group(1) setup( name='pytest-wholenodeid', version=get_version(), description='pytest addon for displaying the whole node id for failures', long_description=open(READMEFILE).read(), license='Simplified BSD License', author='Will Kahn-Greene', author_email='[email protected]', keywords='py.test pytest', url='https://github.com/willkg/pytest-wholenodeid', zip_safe=True, py_modules=['pytest_wholenodeid'], entry_points={ 'pytest11': [ 'wholenodeid = pytest_wholenodeid' ] }, install_requires=[ 'pytest>=2.0' ], classifiers=[ 'Development Status :: 4 - Beta', 'Environment :: Console', 'License :: OSI Approved :: BSD License', 'Operating System :: MacOS :: MacOS X', 'Operating System :: POSIX', 'Operating System :: POSIX :: Linux', 'Operating System :: Unix', 'Topic :: Software Development :: Testing', 'Topic :: Software Development :: Libraries', 'Topic :: Utilities', 'Programming Language :: Python :: 2', 'Programming Language :: Python :: 2.7', ] )
# MIT License # Copyright (c) 2020 Ali Ghadirzadeh # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. import torch import torch.nn as nn import torch.optim as optim import torch.nn.functional as F from torch.utils.data import Dataset, DataLoader import numpy as np import math class TorchDataset(Dataset): def __init__(self, data_in, data_out): self.data_in = torch.clone(data_in) self.data_out = torch.clone(data_out) def __len__(self): return len(self.data_in) def __getitem__(self, idx): return self.data_in[idx], self.data_out[idx] class FullyConnectedNetwork(nn.Module): def __init__(self, dim_input, dim_output, num_neurons = 64): super(FullyConnectedNetwork,self).__init__() self.dim_input = dim_input self.dim_output = dim_output self.fc1 = nn.Linear(dim_input, num_neurons) self.fc2 = nn.Linear(num_neurons, num_neurons) self.fc3 = nn.Linear(num_neurons, num_neurons) self.fc4 = nn.Linear(num_neurons, num_neurons) self.fc5 = nn.Linear(num_neurons, dim_output) def forward(self,x): x=x.view(-1,self.dim_input) x = F.relu(self.fc1(x)) x = F.relu(self.fc2(x)) x = F.relu(self.fc3(x)) x = F.relu(self.fc4(x)) x = self.fc5(x) return x class SupervisedLearner(nn.Module): def __init__(self, dim_input, dim_output, num_neurons=64, device='cpu'): super(SupervisedLearner,self).__init__() self.device = device self.net = FullyConnectedNetwork(dim_input, dim_output,num_neurons).to(self.device) self.init_param() def train_model(self, train_data_loader, test_data_loader, num_epoch=10000, learning_rate=0.001): self.train() optimizer = optim.Adam(self.net.parameters(), learning_rate) for epoch in range(num_epoch): sum_loss = 0.0 for x, y in train_data_loader: optimizer.zero_grad() yhat = self.net.forward(x) loss = F.mse_loss(y, yhat) loss.backward() optimizer.step() sum_loss += loss.item()/(x.size(0)*x.size(1)) avg_loss = sum_loss / len(train_data_loader) if epoch % 100 == 0 or epoch == (num_epoch-1): train_loss = math.sqrt(avg_loss) test_loss = self.evaluate_model(test_data_loader) print('{:d} train loss: {:.6e} \t test loss: {:.6e}'.format(epoch, train_loss, test_loss)) self.train() def init_param(self): for m in self.modules(): if isinstance(m, nn.Conv1d): torch.nn.init.normal_(m.weight, std=0.01) if m.bias is not None: torch.nn.init.constant_(m.bias, 0) elif isinstance(m, nn.BatchNorm1d): torch.nn.init.constant_(m.weight, 1) torch.nn.init.constant_(m.bias, 0) elif isinstance(m, nn.Linear): torch.nn.init.normal_(m.weight, std=1e-3) if m.bias is not None: torch.nn.init.constant_(m.bias, 0) def evaluate_model(self, data_loader): self.eval() sum_loss = 0.0 for x,y in data_loader: yhat = self.net.forward(x) loss = F.mse_loss(y, yhat) sum_loss += loss.item()/(x.size(0)*x.size(1)) avg_loss = sum_loss / (len(data_loader)) return math.sqrt(avg_loss)
from .api import UserSerializer from django.contrib.auth import authenticate, login from django.shortcuts import render from django.contrib.auth import get_user_model from rest_framework.decorators import api_view from rest_framework.response import Response from rest_framework import status def app(request): return render(request, "app.html", {}) @api_view(['POST']) def auth_signinup(request): """ Simple api view to register or authenticate/log in. """ valid_fields = [f.name for f in get_user_model()._meta.fields] serialized = UserSerializer(data=request.DATA) provided_data = { field: data for (field, data) in request.DATA.items() if field in valid_fields } if serialized.is_valid(): # Create user user = get_user_model().objects.create_user( **provided_data ) user = authenticate(**provided_data) # Also log user in login(request, user) return Response( UserSerializer(instance=user).data, status=status.HTTP_201_CREATED ) else: # Try to autenticate if 'email' in provided_data and 'password' in provided_data: user = authenticate(**provided_data) if user is not None: if user.is_active: login(request, user) return Response( {'auth': 'Logged in!'}, status=status.HTTP_200_OK ) else: return Response( {'auth': 'Inactive account.'}, status=status.HTTP_401_UNAUTHORIZED ) else: return Response( {'auth': 'Email/password combo is incorrect.'}, status=status.HTTP_401_UNAUTHORIZED ) # Otherwise, just return Response( serialized._errors, status=status.HTTP_400_BAD_REQUEST )