content
stringlengths 5
1.05M
|
---|
#!/usr/bin/python3
import sys
import os
import subprocess
import numpy as np
import nibabel as nib
import colormaps
import time
FIXED_OUT_N4 = 'n4out-fixed.nii.gz'
FLOATING_OUT_N4 = 'n4out-floating.nii.gz'
FIXED_OUT_BET = 'betout-fixed.nii.gz'
FLOATING_OUT_BET = 'betout-floating.nii.gz'
REG_OUT = 'warped.nii.gz'
REG_MAT = 'out0GenericAffine.mat'
def vistarsier_compare(c, p, min_val=-1., max_val=5., min_change=0.8, max_change=3.):
""" VisTarsier's compare operation
Parameters
----------
c : ndarray
The current volume
p : ndarray
The prior volume
min_val : float
The minimum value (measured in standard deviations) to consider
max_val : float
The maximum value (measured in standard deviations) to consider
min_change : float
The minimum change of value (measured in standard deviations) to consider
max_change : float
The maximum change of value (measured in standard deviations) to consider
Returns
-------
change : ndarray
The relevant change in signal.
"""
print('Starting VisTarsier comparison...')
# Get standard deviations for current and prior
pstd = p.std()
cstd = c.std()
# Align prior standard deviation to current
p = ((p - p.mean()) / pstd) * cstd + c.mean();
# Here we could plot a histogram (which should show rough alignment)
#minrange = np.min((p.min(), c.min()))
#maxrange = np.max((p.max(), c.max()))
#phist = np.histogram(p, 256, (minrange,maxrange))
#chist = np.histogram(c, 256, (minrange,maxrange))
#Calculate change
change = c - p
# Ignore change outside of minimuim and maximum values
change[c < min_val*cstd] = 0
change[p < min_val*cstd] = 0
change[c > max_val*cstd] = 0
change[p > max_val*cstd] = 0
change[np.abs(change) < min_change*cstd] = 0
change[np.abs(change) > max_change*cstd] = 0
print('...VisTarsier comparison complete.')
return change
def pre_process(floating, fixed):
"""Pre processes the nifti images using:
ANTs N4BiasFieldCorrection -> FSL bet -> ANTs antsRegistration
Parameters
----------
floating : string
Path to the floating nifti image
fixed : string
Path to the fixed nifti image
Returns
-------
(floating, fixed) : tuple
floating : string
Path to the pre-processed floating image
fixed : string
Path to the pre-processed floating image
"""
print('Preprocesing...')
# ANTS N4 Bias correction
print('N4BiasFieldCorrection for fixed volume started...')
p1 = subprocess.Popen(['N4BiasFieldCorrection', '-i', fixed, '-o', FIXED_OUT_N4])
print('N4BiasFieldCorrection for floating volume started...')
p2 = subprocess.Popen(['N4BiasFieldCorrection', '-i', floating, '-o', FLOATING_OUT_N4])
p1.wait()
print('...N4BiasFieldCorrection for fixed volume complete.')
p2.wait()
print('...N4BiasFieldCorrection for floating volume complete.')
print('FSL BET2 for fixed volume started...')
p2 = subprocess.Popen(['fsl5.0-bet', FIXED_OUT_N4, FIXED_OUT_BET, '-f','0.4','-R'])
print('FSL BET2 for floating volume started...')
p1 = subprocess.Popen(['fsl5.0-bet', FLOATING_OUT_N4, FLOATING_OUT_BET, '-f','0.4','-R'])
p1.wait()
print('...FSL BET2 for fixed volume complete.')
p2.wait()
print('...FSL BET2 for floating volume complete.')
os.remove(FLOATING_OUT_N4)
os.remove(FIXED_OUT_N4)
print('Starting antsRegistration...')
subprocess.run([
'antsRegistration',
'--dimensionality','3', # Run ANTS on 3 dimensional image
'--float', '1',
'--interpolation', 'Linear',
'--use-histogram-matching', '0',
'--initial-moving-transform', f'[{FIXED_OUT_BET},{FLOATING_OUT_BET},1]',
'--transform', 'Affine[0.1]',
'--metric', f'MI[{FIXED_OUT_BET},{FLOATING_OUT_BET},1,32,Regular,0.25]', # Use mutal information (we're not normalizing intensity)
'--convergence', '[1000x500x250x100,1e-6,10]',
'--shrink-factors', '8x4x2x1',
'--smoothing-sigmas', '3x2x1x0vox',
'--output', f'[out,{REG_OUT}]'
])
print('...antsRegistration complete.')
os.remove(FLOATING_OUT_BET)
os.remove(REG_MAT)
return (REG_OUT,FIXED_OUT_BET)
def display_change(current, change):
current = current.copy()
current -= np.min(current)
current /= np.max(current)
current *= 255
current = colormaps.greyscale()[current.astype('int')]
# Get increase and decrease
inc_change = change.clip(0, float('inf'))
dec_change = change.clip(float('-inf'), 1)
# Convert to color values
inc_change -= np.min(inc_change)
if np.max(inc_change) != 0:
inc_change /= np.max(inc_change)
inc_change *= 255
inc_change = colormaps.redscale()[inc_change.astype('int')]
# Convert to color values
dec_change -= np.min(dec_change)
if np.max(dec_change) != 0:
dec_change /= np.max(dec_change)
dec_change *= 255
dec_change = colormaps.reverse_greenscale()[dec_change.astype('int')]
# Apply increased signal colour
inc_out = current.copy().astype('float64')
inc_change = inc_change.astype('float64')
inc_out[:,:,:,0] = inc_change[:,:,:,0]*inc_change[:,:,:,1]/255 + (255-inc_change[:,:,:,0])*current[:,:,:,0]/255
inc_out[:,:,:,1] = inc_change[:,:,:,0]*inc_change[:,:,:,2]/255 + (255-inc_change[:,:,:,0])*current[:,:,:,1]/255
inc_out[:,:,:,2] = inc_change[:,:,:,0]*inc_change[:,:,:,3]/255 + (255-inc_change[:,:,:,0])*current[:,:,:,2]/255
# Apply decreased signal colour
dec_out = current.copy().astype('float64')
dec_change = dec_change.astype('float64')
dec_out[:,:,:,0] = dec_change[:,:,:,0]*dec_change[:,:,:,1]/255 + (255-dec_change[:,:,:,0])*current[:,:,:,0]/255
dec_out[:,:,:,1] = dec_change[:,:,:,0]*dec_change[:,:,:,2]/255 + (255-dec_change[:,:,:,0])*current[:,:,:,1]/255
dec_out[:,:,:,2] = dec_change[:,:,:,0]*dec_change[:,:,:,3]/255 + (255-dec_change[:,:,:,0])*current[:,:,:,2]/255
return (inc_out.astype('uint8'), dec_out.astype('uint8'))
def cleanup():
if os.path.exists(FIXED_OUT_N4): os.remove(FIXED_OUT_N4)
if os.path.exists(FLOATING_OUT_N4): os.remove(FLOATING_OUT_N4)
if os.path.exists(FIXED_OUT_BET): os.remove(FIXED_OUT_BET)
if os.path.exists(FLOATING_OUT_BET): os.remove(FLOATING_OUT_BET)
if os.path.exists(REG_OUT): os.remove(REG_OUT)
if os.path.exists(REG_MAT): os.remove(REG_MAT)
def save_in_color(data, q_form, path):
# Create a datatype that nibabel can understand and save...
rgb_dtype = np.dtype([('R', 'u1'), ('G', 'u1'), ('B', 'u1')])
# Apply the datatype
data = data.copy().view(dtype=rgb_dtype).reshape(data.shape[0:3])
img = nib.Nifti1Image(data, q_form)
nib.save(img, path)
if __name__ == '__main__':
start = time.process_time()
#parse args
if len(sys.argv) < 3:
print("Vistarsier requires at least a current and prior study.")
print("Usage: vistarsier.py [prior.nii] [current.nii] [output-prefix](optional)")
exit(100)
try:
# Initialise variables
prior_path = sys.argv[1]
current_path = sys.argv[2]
output_prefix = ""
if len(sys.argv) > 3:
output_prefix = sys.argv[3]
print('Using:')
print(' Prior : ', prior_path)
print(' Current : ', current_path)
print('Out prefix : ', output_prefix)
print('*****************************************************************')
print('')
# Run biascorrection | skull stripping | registration
prior_proc, current_proc = pre_process(prior_path, current_path)
# Load pre-processed images
pimg = nib.load(prior_proc)
cimg = nib.load(current_proc)
# Calculate change
change = vistarsier_compare(cimg.get_fdata(), pimg.get_fdata())
# Apply colourmaps
print('Applying colormaps...')
inc_output, dec_output = display_change(cimg.get_fdata(), change)
# Save everything
save_in_color(inc_output, cimg.header.get_qform(), f"{output_prefix}vt-increase.nii.gz")
save_in_color(dec_output, cimg.header.get_qform(), f"{output_prefix}vt-decrease.nii.gz")
print('...ALL DONE!')
print(time.process_time() - start)
finally:
# Get rid of temp files
cleanup()
|
# Python 3
# Polygonal spiral
import turtle
t = turtle.Pen()
# gui integer input + default value
sides = int(turtle.numinput("Number of sides", "How many sides your spiral have?", 4))
for m in range(5, 75):
t.left(360/sides + 5)
t.width(m//25+1)
t.penup()
t.forward(m*4)
t.pendown()
# draw in even corners circles
if (m % 2 == 0):
for n in range(sides):
t.circle(m/3)
t.right(360/sides)
else:
# and rects on odds
for n in range(sides):
t.forward(m)
t.right(360/sides)
|
import time
from threading import Condition
class TooManyErrorsOccured(Exception):
pass
class Counter:
def __init__(
self,
total: int,
progress_interval: int = 1,
threshold_errors: "int | None" = None,
) -> None:
self._processed = 0
self._success = 0
self._log_interval = progress_interval
self._total = total
self._start_at = time.perf_counter()
self._cond_processed = Condition()
self._cond_success = Condition()
self._threshold_errors = threshold_errors or int(total / 2)
@property
def processed(self) -> int:
return self._processed
@property
def success(self) -> int:
return self._success
@property
def start_at(self) -> float:
return self._start_at
@property
def total(self) -> int:
return self._total
@property
def percent(self) -> float:
return self.rate * 100
@property
def rate(self) -> float:
return self.processed / self._total
@property
def throughput(self) -> float:
return self.processed / self.elapsed_sec
@property
def elapsed_sec(self) -> float:
return time.perf_counter() - self.start_at
@staticmethod
def make_sec_readable(sec: int) -> str:
ss = int((sec % 60))
mm = int((sec / 60) % 60)
hh = int(sec // (60 * 60))
return f"{hh:02}:{mm:02}:{ss:02}"
@property
def elapsed(self) -> str:
return self.make_sec_readable(self.elapsed_sec)
@property
def eta(self) -> str:
eta = (self._total - self.processed) / self.throughput
return f"ETA={self.make_sec_readable(eta)}"
@property
def progress(self) -> str:
return f"{self.percent:.1f}[%]={self.processed}/{self._total}"
def __repr__(self) -> str:
return f"{self.progress}, " f"{1/self.throughput:.3f}[sec/iter], " f"{self.eta}"
def log_progress(self, log_fn=print) -> None:
if self.processed % self._log_interval == 0 or self.processed == self.total:
log_fn(self)
def count_processed(self, d: int) -> None:
with self._cond_processed:
self._processed += d
def count_success(self, d: int) -> None:
with self._cond_success:
self._success += d
def raise_for_many_errors(self) -> None:
if self.processed - self.success > self._threshold_errors:
raise TooManyErrorsOccured
|
import serial
import struct
import time
import threading
import warnings
from .message import Message
from enums.PTPMode import PTPMode
from enums.CommunicationProtocolIDs import CommunicationProtocolIDs
from enums.ControlValues import ControlValues
class Dobot:
def __init__(self, port, verbose=False):
threading.Thread.__init__(self)
self._on = True
self.verbose = verbose
self.lock = threading.Lock()
self.ser = serial.Serial(port,
baudrate=115200,
parity=serial.PARITY_NONE,
stopbits=serial.STOPBITS_ONE,
bytesize=serial.EIGHTBITS)
is_open = self.ser.isOpen()
if self.verbose:
print('pydobot: %s open' % self.ser.name if is_open else 'failed to open serial port')
self._set_queued_cmd_start_exec()
self._set_queued_cmd_clear()
self._set_ptp_joint_params(200, 200, 200, 200, 200, 200, 200, 200)
self._set_ptp_coordinate_params(velocity=200, acceleration=200)
self._set_ptp_jump_params(10, 200)
self._set_ptp_common_params(velocity=100, acceleration=100)
self._get_pose()
"""
Gets the current command index
"""
def _get_queued_cmd_current_index(self):
msg = Message()
msg.id = CommunicationProtocolIDs.GET_QUEUED_CMD_CURRENT_INDEX
response = self._send_command(msg)
idx = struct.unpack_from('L', response.params, 0)[0]
return idx
"""
Gets the real-time pose of the Dobot
"""
def _get_pose(self):
msg = Message()
msg.id = CommunicationProtocolIDs.GET_POSE
response = self._send_command(msg)
self.x = struct.unpack_from('f', response.params, 0)[0]
self.y = struct.unpack_from('f', response.params, 4)[0]
self.z = struct.unpack_from('f', response.params, 8)[0]
self.r = struct.unpack_from('f', response.params, 12)[0]
self.j1 = struct.unpack_from('f', response.params, 16)[0]
self.j2 = struct.unpack_from('f', response.params, 20)[0]
self.j3 = struct.unpack_from('f', response.params, 24)[0]
self.j4 = struct.unpack_from('f', response.params, 28)[0]
if self.verbose:
print("pydobot: x:%03.1f \
y:%03.1f \
z:%03.1f \
r:%03.1f \
j1:%03.1f \
j2:%03.1f \
j3:%03.1f \
j4:%03.1f" %
(self.x, self.y, self.z, self.r, self.j1, self.j2, self.j3, self.j4))
return response
def _read_message(self):
time.sleep(0.1)
b = self.ser.read_all()
if len(b) > 0:
msg = Message(b)
if self.verbose:
print('pydobot: <<', msg)
return msg
return
def _send_command(self, msg, wait=False):
self.lock.acquire()
self._send_message(msg)
response = self._read_message()
self.lock.release()
if not wait:
return response
expected_idx = struct.unpack_from('L', response.params, 0)[0]
if self.verbose:
print('pydobot: waiting for command', expected_idx)
while True:
current_idx = self._get_queued_cmd_current_index()
if current_idx != expected_idx:
time.sleep(0.1)
continue
if self.verbose:
print('pydobot: command %d executed' % current_idx)
break
return response
def _send_message(self, msg):
time.sleep(0.1)
if self.verbose:
print('pydobot: >>', msg)
self.ser.write(msg.bytes())
"""
Executes the CP Command
"""
def _set_cp_cmd(self, x, y, z):
msg = Message()
msg.id = CommunicationProtocolIDs.SET_CP_CMD
msg.ctrl = ControlValues.THREE
msg.params = bytearray(bytes([0x01]))
msg.params.extend(bytearray(struct.pack('f', x)))
msg.params.extend(bytearray(struct.pack('f', y)))
msg.params.extend(bytearray(struct.pack('f', z)))
msg.params.append(0x00)
return self._send_command(msg)
"""
Sets the status of the gripper
"""
def _set_end_effector_gripper(self, enable=False):
msg = Message()
msg.id = CommunicationProtocolIDs.SET_GET_END_EFFECTOR_GRIPPER
msg.ctrl = ControlValues.THREE
msg.params = bytearray([])
msg.params.extend(bytearray([0x01]))
if enable is True:
msg.params.extend(bytearray([0x01]))
else:
msg.params.extend(bytearray([0x00]))
return self._send_command(msg)
"""
Sets the status of the suction cup
"""
def _set_end_effector_suction_cup(self, enable=False):
msg = Message()
msg.id = CommunicationProtocolIDs.SET_GET_END_EFFECTOR_SUCTION_CUP
msg.ctrl = ControlValues.THREE
msg.params = bytearray([])
msg.params.extend(bytearray([0x01]))
if enable is True:
msg.params.extend(bytearray([0x01]))
else:
msg.params.extend(bytearray([0x00]))
return self._send_command(msg)
"""
Sets the velocity ratio and the acceleration ratio in PTP mode
"""
def _set_ptp_joint_params(self, v_x, v_y, v_z, v_r, a_x, a_y, a_z, a_r):
msg = Message()
msg.id = CommunicationProtocolIDs.SET_GET_PTP_JOINT_PARAMS
msg.ctrl = ControlValues.THREE
msg.params = bytearray([])
msg.params.extend(bytearray(struct.pack('f', v_x)))
msg.params.extend(bytearray(struct.pack('f', v_y)))
msg.params.extend(bytearray(struct.pack('f', v_z)))
msg.params.extend(bytearray(struct.pack('f', v_r)))
msg.params.extend(bytearray(struct.pack('f', a_x)))
msg.params.extend(bytearray(struct.pack('f', a_y)))
msg.params.extend(bytearray(struct.pack('f', a_z)))
msg.params.extend(bytearray(struct.pack('f', a_r)))
return self._send_command(msg)
"""
Sets the velocity and acceleration of the Cartesian coordinate axes in PTP mode
"""
def _set_ptp_coordinate_params(self, velocity, acceleration):
msg = Message()
msg.id = CommunicationProtocolIDs.SET_GET_PTP_COORDINATE_PARAMS
msg.ctrl = ControlValues.THREE
msg.params = bytearray([])
msg.params.extend(bytearray(struct.pack('f', velocity)))
msg.params.extend(bytearray(struct.pack('f', velocity)))
msg.params.extend(bytearray(struct.pack('f', acceleration)))
msg.params.extend(bytearray(struct.pack('f', acceleration)))
return self._send_command(msg)
"""
Sets the lifting height and the maximum lifting height in JUMP mode
"""
def _set_ptp_jump_params(self, jump, limit):
msg = Message()
msg.id = CommunicationProtocolIDs.SET_GET_PTP_JUMP_PARAMS
msg.ctrl = ControlValues.THREE
msg.params = bytearray([])
msg.params.extend(bytearray(struct.pack('f', jump)))
msg.params.extend(bytearray(struct.pack('f', limit)))
return self._send_command(msg)
"""
Sets the velocity ratio, acceleration ratio in PTP mode
"""
def _set_ptp_common_params(self, velocity, acceleration):
msg = Message()
msg.id = CommunicationProtocolIDs.SET_GET_PTP_COMMON_PARAMS
msg.ctrl = ControlValues.THREE
msg.params = bytearray([])
msg.params.extend(bytearray(struct.pack('f', velocity)))
msg.params.extend(bytearray(struct.pack('f', acceleration)))
return self._send_command(msg)
"""
Executes PTP command
"""
def _set_ptp_cmd(self, x, y, z, r, mode, wait):
msg = Message()
msg.id = CommunicationProtocolIDs.SET_PTP_CMD
msg.ctrl = ControlValues.THREE
msg.params = bytearray([])
msg.params.extend(bytearray([mode]))
msg.params.extend(bytearray(struct.pack('f', x)))
msg.params.extend(bytearray(struct.pack('f', y)))
msg.params.extend(bytearray(struct.pack('f', z)))
msg.params.extend(bytearray(struct.pack('f', r)))
return self._send_command(msg, wait)
"""
Clears command queue
"""
def _set_queued_cmd_clear(self):
msg = Message()
msg.id = CommunicationProtocolIDs.SET_QUEUED_CMD_CLEAR
msg.ctrl = ControlValues.ONE
return self._send_command(msg)
"""
Start command
"""
def _set_queued_cmd_start_exec(self):
msg = Message()
msg.id = CommunicationProtocolIDs.SET_QUEUED_CMD_START_EXEC
msg.ctrl = ControlValues.ONE
return self._send_command(msg)
"""
Stop command
"""
def _set_queued_cmd_stop_exec(self):
msg = Message()
msg.id = CommunicationProtocolIDs.SET_QUEUED_CMD_STOP_EXEC
msg.ctrl = ControlValues.ONE
return self._send_command(msg)
def close(self):
self._on = False
self.lock.acquire()
self.ser.close()
if self.verbose:
print('pydobot: %s closed' % self.ser.name)
self.lock.release()
def go(self, x, y, z, r=0.):
warnings.warn('go() is deprecated, use move_to() instead')
self.move_to(x, y, z, r)
def move_to(self, x, y, z, r, wait=False):
self._set_ptp_cmd(x, y, z, r, mode=PTPMode.MOVL_XYZ, wait=wait)
def suck(self, enable):
self._set_end_effector_suction_cup(enable)
def grip(self, enable):
self._set_end_effector_gripper(enable)
def speed(self, velocity=100., acceleration=100.):
self._set_ptp_common_params(velocity, acceleration)
self._set_ptp_coordinate_params(velocity, acceleration)
def pose(self):
response = self._get_pose()
x = struct.unpack_from('f', response.params, 0)[0]
y = struct.unpack_from('f', response.params, 4)[0]
z = struct.unpack_from('f', response.params, 8)[0]
r = struct.unpack_from('f', response.params, 12)[0]
j1 = struct.unpack_from('f', response.params, 16)[0]
j2 = struct.unpack_from('f', response.params, 20)[0]
j3 = struct.unpack_from('f', response.params, 24)[0]
j4 = struct.unpack_from('f', response.params, 28)[0]
return x, y, z, r, j1, j2, j3, j4
|
from pyecharts.charts import Bar
from pyecharts.render import make_snapshot
from pyecharts import options as opts
from pyecharts.globals import ThemeType
from snapshot_selenium import snapshot
# 在使用 Pandas&Numpy 时,确保将数值类型转换为 python 原生的 int/float。比如整数类型请确保为 int,而不是 numpy.int32
bar = (
Bar(init_opts=opts.InitOpts(theme=ThemeType.LIGHT))
.add_xaxis(["衬衫", "羊毛衫", "雪纺衫", "裤子", "高跟鞋", "袜子"])
.add_yaxis("商家A", [5, 20, 36, 10, 75, 90])
.add_yaxis("商家B", [15, 6, 45, 20, 35, 66])
# .set_global_opts(title_opts=opts.TitleOpts(title="主标题", subtitle="副标题"))
.set_global_opts(title_opts={"text": "主标题", "subtext": "副标题"})
)
# bar.render('./charts/bar.html')
make_snapshot(snapshot, bar.render(), "./charts/bar.pdf")
|
# -*- coding: utf-8 -*-
"""
@project : WechatTogether
@Time : 2020/9/9 14:21
@Auth : AJay13
@File :interface_article_list.py
@IDE :PyCharm
@Motto:ABC(Always Be Coding)
"""
# 文章管理的接口:文章列表、删除文章、修改文章、文章加如周报、文章前台隐藏、批量删除文章
# 公众号管理接口:社区提交公众号、公众号任务、公众号采集
## 社区提交公众号展示:社区公众号展示、社区添加公众号、删除社区公众号、修改社区公众号、收录社区公众号
## 公众号任务:公众号展示、添加公众号、删除公众号、同步公众号、复活僵尸号、监控公众号、修改公众号
## 公众号采集:展示公众号、删除公众号
__all__ = ['InterfaceAccountList','InterfaceArticleFlag']
from flask import views
from sqlalchemy import and_
import config
from exts import db
from apis.common import response_code
from apis.common.api_version import api_version
from apis.common.auth import login_required
from apis.v1.account.verify_account import AccountListForm,ArticleFlagForm
from apps.admin.models import WechatArticle, WechatArticleList,WechatAccount
## 公众号采集:展示公众号
class InterfaceAccountList(views.MethodView):
'''
公众号管理的展示公众号接口
'''
@api_version
# @login_required # 自动完成认证
def get(self, version):
form = AccountListForm().validate_for_api() # 验证表单
page = int(form.page.data)
limit = int(form.limit.data)
account_name =form.account_name.data
start = (page - 1) * limit
end = start + limit
# 条件查询
account_data = []
account_obj = WechatAccount.query
if account_name:
account_search = account_obj.filter(
and_(WechatAccount.account.like("%" + account_name + "%"),
)).order_by(
WechatAccount.spider_time.desc())
else:
account_search = account_obj.order_by(WechatAccount.spider_time.desc())
accounts = account_search.slice(start, end)
total = account_search.count()
# 查询所有正在处于监听队列的数据做成字典
for i in accounts:
account = {}
account['id'] = i.id
account['account_name'] = i.account
account['account_id'] = getattr(i,'__biz')
account['head_url'] = i.head_url
account['summary'] = i.summary
account['qr_code'] = i.qr_code
account['verify'] = i.verify
account['spider_time'] = str(i.spider_time)
account_data.append(account)
return response_code.LayuiSuccess(message='查询成功!', data=account_data, count=total)
class InterfaceArticleFlag(views.MethodView):
'''
# 如果flag=1 精华文章。else 普通文章
'''
@api_version
@login_required # 自动完成认证
def post(self, version):
form = ArticleFlagForm().validate_for_api() # 验证表单
id =form.id.data
flag =form.flag.data
wechat_article = WechatArticle.query.get(id)
if wechat_article:
if wechat_article.flag != flag:
wechat_article.flag = flag
db.session.commit()
return response_code.LayuiSuccess(message='文章:“{}”修改成功!'.format(wechat_article.title))
return response_code.ParameterException(message='已经被被人修改,刷新看看!!')
return response_code.ParameterException(message='修改失败!')
|
from datetime import datetime
from random import randint
class Pessoa:
ano_atual = int(datetime.strftime(datetime.now(), '%Y'))
def __init__(self, nome, idade, comendo=False, falando=False):
self.nome = nome
self.idade = idade
self.comendo = comendo
self.falando = falando
def comer(self,alimento):
if self.comendo:
print(f'{self.nome} já está comendo.')
return
if self.falando:
print(f'{self.nome} não pode comer falando')
return
print(f'{self.nome} está comendo {alimento}.')
self.comendo=True
def parar_comer(self):
if not self.comendo:
print(f'{self.nome} não está comendo.')
return
print(f'{self.nome} parou de comer.')
self.comendo=False
def falar(self, assunto):
if self.comendo:
print(f'{self.nome} não pode falar comendo.')
return
if self.falando:
print(f'{self.nome} já está falando.')
return
print(f'{self.nome} está falando sobre {assunto}')
self.falando=True
def parar_falar(self):
if not self.falando:
print(f'{self.nome} não está falando.')
return
print(f'{self.nome} parou de falar.')
self.falando=False
def get_ano_de_nascimento(self):
return self.ano_atual-self.idade
@classmethod
def por_ano_de_nascimento(cls, nome, ano_de_nascimenro):
idade = cls.ano_atual - ano_de_nascimenro
return cls(nome, idade)
@staticmethod
def gera_id():
rand = randint(10000, 19999)
return rand
#class BlocoDeNota:
# def __init__(self,tipodefolha,folhas,altura,largura,pagina,destacavel=True):
# self.folhas=folhas
# self.tipodefolha=str(tipodefolha)
# self.destacavel=destacavel
# self.altura=altura
# self.largura=largura
# self.pagina=int(pagina)
# def destacar(self):
# if self.destacavel:
# self.folhas-1
# print(f'Folha destacada, agora restam {self.folhas}')
# if not self.destacavel:
# print(f'Este bloco não é destacavel')
# return
#
# def escrever(self):
# if self.pagina==0:
# print('O bloco está fechado, você não pode escrever.')
# return
# if self.pagina !=0:
# print(f'Você está escrevendo na {self.pagina}° página')
# def abrir(self,pag):
# if self.pagina == pag:
# print(f'O bloco já se encontra na {pag}° página.')
# return
# if self.pagina != pag:
# print(f'Abrindo o bloco na {pag}° pagina.')
# self.pagina==pag
# return
# def fechar(self):
# if self.pagina==0:
# print(f'O bloco já se encontra fechado.')
# return
# if self.pagina != 0:
# print(f'Fechando o bloco.')
# self.pagina==0
class Produto:
def __init__(self, nome, preco):
self.nome = nome
self.preco = preco
def desconto(self, percentual):
self.preco = self.preco - (self.preco*(percentual/100))
#Getter
@property
def preco(self):
return self._preco
#Setter
@preco.setter
def preco(self,valor):
if isinstance(valor,str):
valor=float(valor.replace('R$',''))
self._preco = valor
@property
def nome(self):
return self._nome
@nome.setter
def nome(self, valor):
self._nome = valor.title()
class A:
vc =123
def __init__(self):
self.vc = 222
|
# Generated by Django 2.1.7 on 2019-03-25 18:56
from django.db import migrations
def make_nodes(apps, schema_editor):
from treebeard.mp_tree import MP_Node
from froide.helper.tree_utils import _inc_path, add_children
def get_children(node):
return GeoRegion.objects.filter(part_of=node).order_by("name")
GeoRegion = apps.get_model("georegion", "GeoRegion")
root_regions = GeoRegion.objects.filter(part_of__isnull=True).order_by("name")
last_root = None
for georegion in root_regions:
if last_root is None:
newpath = MP_Node._get_path(None, 1, 1)
else:
newpath = _inc_path(last_root.path)
georegion.depth = 1
georegion.path = newpath
georegion.save()
last_root = georegion
for georegion in root_regions:
add_children(georegion, get_children)
class Migration(migrations.Migration):
dependencies = [
("georegion", "0007_auto_20190325_1956"),
]
operations = [migrations.RunPython(make_nodes)]
|
import sys
import attr
from redis import Redis
def main():
redis_url = sys.argv[1]
basename = sys.argv[2]
friendly_name = sys.argv[3]
redis = Redis.from_url(redis_url)
redis.set("character:" + basename + ":name", friendly_name)
if __name__ == "__main__":
main()
|
import datetime
import transaction
from nthuion.models import Comment, Issue, Solution, Tag
from .common import ManyUserTest
from .base import WebTest
class BaseCommentTest(ManyUserTest):
def create_issue(self, user_id, is_anonymous=False):
"""
create an issue with the given user id and is_anonymous
returns the id of the created issue
"""
with transaction.manager:
issue = Issue(
author_id=user_id,
is_anonymous=is_anonymous,
tags=Tag.from_names(self.session, ['tag']),
title='title',
content='content'
)
self.session.add(issue)
self.session.flush()
return issue.id
def create_solution(self, user_id, issue_id=None):
"""
create a solution with the given user id and issue id,
returns the id of the created solution
"""
with transaction.manager:
solution = Solution(
author_id=user_id,
tags=Tag.from_names(self.session, ['tag']),
title='title',
content='content'
)
self.session.add(solution)
self.session.flush()
return solution.id
def create_comment(self, user_id, parent_id, content):
"""
create a comment with the given user id, parent id and content
returns the id of the created comment
"""
with transaction.manager:
comment = Comment(
author_id=user_id,
content=content,
parent_id=parent_id
)
self.session.add(comment)
self.session.flush()
return comment.id
class CommentListTest(WebTest):
def test_no_listing_available(self):
self.app.options('/api/comments', status=404)
class CommentViewTest(BaseCommentTest):
def test_get_comment_on_solution_comment(self):
sid = self.create_solution(self.u1)
cid = self.create_comment(self.u2, sid, 'comment content')
data = self.app.get(
'/api/comments/{}'.format(cid),
).json
assert cid == data['id']
assert 'comment content' == data['content']
assert self.u2 == data['author']['id']
assert 'ctime' in data
assert data['ctime'] is not None
assert 'mtime' in data
assert data['mtime'] is None
assert data['user_vote'] == 0
def test_get_comment_on_issue_comment(self):
sid = self.create_solution(self.u2)
cid = self.create_comment(self.u3, sid, 'cc')
data = self.app.get(
'/api/comments/{}'.format(cid)
).json
assert cid == data['id']
assert 'cc' == data['content']
assert self.u3 == data['author']['id']
def test_get_comment_on_comment_comment(self):
sid = self.create_solution(self.u3)
c1id = self.create_comment(self.u1, sid, 'cc1')
c2id = self.create_comment(self.u2, c1id, 'cc2')
data = self.app.get(
'/api/comments/{}'.format(c2id)
).json
assert c2id == data['id']
assert 'cc2' == data['content']
assert self.u2 == data['author']['id']
def _test_update(self, cid, token, key, value):
self.app.put_json(
'/api/comments/{}'.format(cid),
{key, value},
headers=self.make_token_header(token)
)
def _test_put_comment_on(self, parent_id):
cid = self.create_comment(self.u2, parent_id, 'contentx')
assert ('contentx', self.u2, parent_id) == self.session.query(
Comment.content, Comment.author_id, Comment.parent_id).filter(
Comment.id == cid).first()
res = self.app.put_json(
'/api/comments/{}'.format(cid),
{'content': 'updated content'},
headers=self.make_token_header(self.tok2)
)
assert 'updated content' == res.json['content']
assert 'updated content' == \
self.session.query(Comment).get(cid).content
assert 'ctime' in res.json
assert res.json['ctime'] is not None
assert 'mtime' in res.json
assert res.json['mtime'] is not None
self.app.put_json(
'/api/comments/{}'.format(cid),
{'content': 'not allowed to access'},
headers=self.make_token_header(self.tok1),
status=403
)
self.app.put_json(
'/api/comments/{}'.format(cid),
{'content': 'not logged in'},
status=401
)
def test_put_comment_on_solution(self):
self._test_put_comment_on(self.create_solution(self.u2))
def test_put_comment_on_issue(self):
self._test_put_comment_on(self.create_issue(self.u3))
def test_put_comment_on_comment(self):
self._test_put_comment_on(
self.create_comment(
self.u1,
self.create_solution(
self.u2
),
'content'
)
)
def test_comment_on_comment(self):
pcid = self.create_comment(self.u1, self.create_issue(self.u2), 'xxx')
data = self.app.post_json(
'/api/comments/{}/comments'.format(pcid),
{'content': 'my content'},
headers=self.make_token_header(self.tok3)
).json
assert pcid == data['parent']['id']
assert 'comment' == data['parent']['type']
assert self.u3 == data['author']['id']
assert 0 == data['votes']
class CommentVoteTest(BaseCommentTest):
"""Test vote on comment, copied from IssueVoteTest"""
def setUp(self):
super().setUp()
self.cid = self.create_comment(
self.u1,
self.create_issue(self.u2),
'content'
)
self.token_header = self.make_token_header(self.tok1)
def assertVoteValue(self, value):
resp = self.app.get(
'/api/comments/{}/vote'.format(self.cid),
headers=self.token_header
)
self.assertEqual(
{
'value': value
},
resp.json
)
def voteUp(self, after):
res = self.app.put_json(
'/api/comments/{}/vote'.format(self.cid),
{'value': 1},
headers=self.token_header
)
self.assertEqual(
after,
res.json['votes'],
)
def voteDown(self, after):
res = self.app.put_json(
'/api/comments/{}/vote'.format(self.cid),
{'value': -1},
headers=self.token_header
)
self.assertEqual(
after,
res.json['votes'],
)
def unvote(self, after):
res = self.app.delete(
'/api/comments/{}/vote'.format(self.cid),
headers=self.token_header
)
self.assertEqual(
after,
res.json['votes'],
)
def test_vote_zero(self):
self.assertVoteValue(0)
def test_vote_up(self):
self.voteUp(1)
self.assertVoteValue(1)
def test_vote_down(self):
self.voteDown(-1)
self.assertVoteValue(-1)
def test_vote_multiple(self):
self.assertVoteValue(0)
self.voteDown(-1)
self.assertVoteValue(-1)
self.unvote(0)
self.assertVoteValue(0)
self.voteUp(1)
self.assertVoteValue(1)
self.voteDown(-1)
self.assertVoteValue(-1)
class CommentCommentQueryTest(BaseCommentTest):
def setUp(self):
super().setUp()
sid = self.create_solution(self.u3)
self.cid = self.create_comment(self.u1, sid, 'cc1')
for i in range(10):
with transaction.manager:
self.app.post_json(
'/api/comments/{}/comments'.format(self.cid),
{
'content': str(i)
},
headers=self.make_token_header(self.tok3)
)
with transaction.manager:
self.session.query(Comment).filter(Comment.content == '9')\
.one().ctime -= datetime.timedelta(days=1)
def test_ordering(self):
jobj = self.app.get(
'/api/comments/{}/comments'.format(self.cid)
).json
assert 10 == len(jobj['data'])
assert '9' == jobj['data'][0]['content']
for i, comment in enumerate(jobj['data'][1:]):
assert str(i) == comment['content']
def test_limit(self):
jobj = self.app.get(
'/api/comments/{}/comments?limit=1'.format(self.cid)
).json
assert 1 == len(jobj['data'])
assert '9' == jobj['data'][0]['content']
def test_offset(self):
jobj = self.app.get(
'/api/comments/{}/comments?offset=2'.format(self.cid)
).json
assert 8 == len(jobj['data'])
for i, comment in enumerate(jobj['data'], start=1): # skip 9, 0
assert str(i) == comment['content']
def test_limit_and_offset(self):
jobj = self.app.get(
'/api/comments/{}/comments?limit=3&offset=8'.format(self.cid)
).json
assert 2 == len(jobj['data'])
assert '7' == jobj['data'][0]['content']
assert '8' == jobj['data'][1]['content']
|
#!/usr/bin/env python
from __future__ import print_function, division
import numpy as np
# SAA, whcih includes the SouthWest Polar Horn region
saacols = np.loadtxt('saa_lonlat.txt')
# For .reg file need to convert lon to [0,360)
lon = saacols[:,0]
lon[lon<0]+= 360.0
saacols[:,0] = lon
outfile = file('saa.reg','w')
print("-polygon(",file=outfile,end="")
coordstring = ", ".join(["{0}".format(x) for x in saacols.flatten()])
print(coordstring,file=outfile,end="")
print(")",file=outfile)
outfile.close()
outfile = file('polarhorns.reg','w')
## Northern Polar Horn
nphcols = np.loadtxt('nph_lonlat.txt')
# For .reg file need to convert lon to [0,360)
lon = nphcols[:,0]
lon[lon<0]+= 360.0
nphcols[:,0] = lon
print("-polygon(",file=outfile,end="")
coordstring = ", ".join(["{0}".format(x) for x in nphcols.flatten()])
print(coordstring,file=outfile,end="")
print(")",file=outfile)
# North-Eastern Polar Horn
nephcols = np.loadtxt('neph_lonlat.txt')
# For .reg file need to convert lon to [0,360)
lon = nephcols[:,0]
lon[lon<0]+= 360.0
nephcols[:,0] = lon
print("-polygon(",file=outfile,end="")
coordstring = ", ".join(["{0}".format(x) for x in nephcols.flatten()])
print(coordstring,file=outfile,end="")
print(")",file=outfile)
# Southern Polar Horn
sphcols = np.loadtxt('sph_lonlat.txt')
# For .reg file need to convert lon to [0,360)
lon = sphcols[:,0]
lon[lon<0]+= 360.0
sphcols[:,0] = lon
print("-polygon(",file=outfile,end="")
coordstring = ", ".join(["{0}".format(x) for x in sphcols.flatten()])
print(coordstring,file=outfile,end="")
print(")",file=outfile)
outfile.close()
|
#!/usr/bin/python
# script preparing tab file to plot in R for small RNA profiling
# version 1 29-1-2012
# Usage plotter.py <bowtie input> <min size> <max size> <normalization factor> <tabular output>
import sys
def acquisition (file2parse, sizerange):
F = open (file2parse)
plus_table = {}
minus_table = {}
for line in F:
field = line.split()
coordinate = int( field[3] )
strand = field[1]
sequence = field[4]
size = len (sequence )
if strand == "+" and size in sizerange:
plus_table[coordinate] = plus_table.get(coordinate, 0) + 1
if strand == "-" and size in sizerange:
coordinate = coordinate + size -1 # 23-11-2012 : this line was missing ! it is a BUG that probably altered the Nature maps :-((
minus_table[coordinate] = minus_table.get(coordinate, 0) + 1
return plus_table, minus_table
def output_table (plus_table, minus_table, Nfactor, output):
Nfactor = float(Nfactor)
plus_coordinates = set( plus_table.keys() )
minus_coordinates = set( minus_table.keys() )
coords = sorted (plus_coordinates.union (minus_coordinates) )
## added 23-2-2013 to have, instead, exaustive coordinates
## coords = range (min(coords), max(coords) + 1)
##
OUT = open (output, "w")
print >> OUT, "coord\tplus\tminus"
for coordinate in coords :
print >> OUT, "%s\t%s\t%s" % ( coordinate, plus_table.get(coordinate, 0)*Nfactor, - minus_table.get(coordinate, 0)*Nfactor )
def sizing (minsize, maxsize) :
size_range = range ( int (minsize), int (maxsize) + 1 )
return size_range
plus_table, minus_table = acquisition (sys.argv[1], sizing ( sys.argv[2], sys.argv[3] ) )
output_table ( plus_table, minus_table, sys.argv[4], sys.argv[5] )
|
import json
from tests.base_test import BaseTestCase
class UpdateQuestionTestCase(BaseTestCase):
def setUp(self):
super().setUp()
self.url = '/questions/'
def add_question(self):
"""
Create a dummy question
"""
user = self.create_user(self.user)
question = self.create_question(self.question, user.id)
return self.get_user_token(user), question.id
def test_update_question(self):
"""
Test updating a question
"""
user_token, question_id = self.add_question()
headers = self.get_request_header(user_token)
data = json.dumps(self.update_question)
url = f'{self.url}{question_id}'
response = self.test_client.put(url, headers=headers, data=data)
data = json.loads(response.data.decode())
self.assertEqual(response.status_code, 200)
self.assertEqual(data['question']['id'], question_id)
def test_update_question_invalid_id(self):
"""
Test updating a question with invalid ID
"""
user_token, _ = self.add_question()
headers = self.get_request_header(user_token)
data = json.dumps(self.update_question)
url = f'{self.url}0'
response = self.test_client.put(url, headers=headers, data=data)
self.assertEqual(response.status_code, 400)
def test_update_question_invalid_request(self):
"""
Test updating a question with invalid request body
"""
user_token, question_id = self.add_question()
headers = self.get_request_header(user_token)
data = json.dumps(self.invalid_update_question)
url = f'{self.url}{question_id}'
response = self.test_client.put(url, headers=headers, data=data)
self.assertEqual(response.status_code, 400)
def test_update_question_not_authenticated(self):
"""
Test updating a question when not logged in
"""
_, quesion_id = self.add_question()
headers = self.get_request_header()
data = json.dumps(self.update_question)
url = f'{self.url}{quesion_id}'
response = self.test_client.put(url, headers=headers, data=data)
self.assertEqual(response.status_code, 401)
def test_update_question_not_authorized(self):
"""
Test updatin another users questions
"""
_, quesion_id = self.add_question()
user = self.create_user(self.other_user)
user_token = self.get_user_token(user)
headers = self.get_request_header(user_token)
data = json.dumps(self.update_question)
url = f'{self.url}{quesion_id}'
response = self.test_client.put(url, headers=headers, data=data)
self.assertEqual(response.status_code, 403)
|
from crhelper import CfnResource
import json
import boto3
helper = CfnResource()
fwclient = boto3.client('network-firewall')
ec2client = boto3.client('ec2')
@helper.create
@helper.update
def addRouteTableToIGW(event, _):
routes = event['ResourceProperties']['Routes']
print(routes)
firewallName = event['ResourceProperties']['FirewallName']
print("FirewallName: "+firewallName)
fw = fwclient.describe_firewall(FirewallName=firewallName)
fwstate = fw['FirewallStatus']['SyncStates']
gatewayId=event['ResourceProperties']['GatewayId']
print(json.dumps(fwstate,indent=2))
routeTable = ec2client.create_route_table(
VpcId=event['ResourceProperties']['VpcId'],
TagSpecifications=[ {
'ResourceType': 'route-table',
'Tags': [
{
'Key': 'Name',
'Value': '3x3-igw'
}
]
}
]
)
routeTableId = routeTable['RouteTable']['RouteTableId']
for route in routes:
az = route['AvailabilityZone']
protectedSubnet = route['ProtectedSubnet']
vpce = fwstate[az]['Attachment']['EndpointId']
print("VPCE: "+vpce)
ec2subnet = ec2client.describe_subnets(
Filters=[
{
'Name': 'subnet-id',
'Values': [protectedSubnet]
}
]
)
protectedCidrBlock = ec2subnet['Subnets'][0]['CidrBlock']
print("Protected Cidr Block: "+protectedCidrBlock)
ec2client.create_route(
RouteTableId=routeTableId,
DestinationCidrBlock=protectedCidrBlock,
VpcEndpointId=vpce,
)
print("Associating....")
ec2client.associate_route_table(
RouteTableId=routeTableId,
GatewayId=gatewayId
)
print("Associated...")
return routeTableId
@helper.delete
def deleteRouteTable(event, _):
toDelete = event['PhysicalResourceId']
print("Deleting route table: "+toDelete)
tables = ec2client.describe_route_tables(
Filters=[
{
'Name': 'route-table-id',
'Values': [
toDelete
]
}
]
)
for table in tables['RouteTables']:
for route in table['Routes']:
ec2client.delete_route(
DestinationCidrBlock=route['DestinationCidrBlock']
)
def handler(event, context):
print(event)
helper(event, context) |
import time
import pygame as pg
import sys
from game import Game
from screens.settings_screen import SettingsScreen
from screens.game_screen import GameScreen
from settings import *
OPTION_COLOR = (128, 135, 239)
SELECTED_OPTION_COLOR = (255, 255, 255)
INITIAL_V_GAP = 140
V_SPACING = 5
class Menu(GameScreen):
def __init__(self, menu, display):
super().__init__(display)
self.menu = menu
self.menu_rects = {}
self.last_axis_motion = 0.0
def run(self):
self.draw() # draw first time to ignore self.updated
while self.playing:
self.dt = self.clock.tick(FPS) / 1000
self.events()
self.update()
self.draw()
def events(self):
self.updated = False
action = None
for event in pg.event.get():
if event.type == pg.QUIT:
quit_game(self)
# keyboard
if event.type == pg.KEYDOWN:
if event.key == pg.K_ESCAPE:
quit_game(self)
if event.key == pg.K_DOWN:
action = 'down'
if event.key == pg.K_UP:
action = 'up'
if event.key == pg.K_RETURN:
action = 'enter'
# mouse
if event.type == pg.MOUSEMOTION:
self.mousex, self.mousey = pg.mouse.get_pos()
for i in range(len(self.menu_rects.items())):
if self.menu_rects[i].collidepoint(self.mousex, self.mousey):
self.menu['selected_option'] = i
self.updated = True
break
if event.type == pg.MOUSEBUTTONDOWN:
for i in range(len(self.menu_rects.items())):
if self.menu_rects[i].collidepoint(self.mousex, self.mousey):
action = 'enter'
break
# joystick
if event.type == pg.JOYBUTTONDOWN:
if event.button == J_BUTTONS['A']:
action = 'enter'
if event.type == pg.JOYAXISMOTION:
if event.dict['axis'] == 1:
if time.time() >= self.last_axis_motion + 0.3:
if event.dict['value'] < -JOYSTICK_THRESHOLD:
action = 'up'
self.last_axis_motion = time.time()
elif event.dict['value'] > JOYSTICK_THRESHOLD:
action = 'down'
self.last_axis_motion = time.time()
if action == 'down':
self.menu["selected_option"] += 1
self.menu["selected_option"] %= len(self.menu["options"])
self.updated = True
elif action == 'up':
self.menu["selected_option"] -= 1
self.menu["selected_option"] %= len(self.menu["options"])
self.updated = True
elif action == 'enter':
self.menu["options"][self.menu["selected_option"]]["func"](self)
def update(self):
pass
def draw(self):
if self.updated:
self.display.fill(BG_COLOR)
self.draw_game_title()
self.draw_options()
pg.display.flip()
def draw_options(self):
count = 0
x_offset = 0
for option in self.menu["options"]:
if self.menu["selected_option"] == count:
color = SELECTED_OPTION_COLOR
else:
color = OPTION_COLOR
rend = self.font.render(option["name"], True, color)
if x_offset == 0:
x_offset = SCREEN_WIDTH // 2 - rend.get_width() // 2
rect = rend.get_rect().move(
x_offset,
INITIAL_V_GAP + (rend.get_height() + V_SPACING) * count)
self.menu_rects[count] = rect
self.display.blit(rend, rect)
count += 1
def draw_game_title(self):
surface = self.font.render(GAME_TITLE, True, (255, 255, 255))
x = SCREEN_WIDTH // 2 - surface.get_width() // 2
y = 40
self.display.blit(surface, (x, y))
def new_game(self):
self.playing = False
game = Game(self.display)
game.load()
game.run()
def quit_game(self):
pg.quit()
sys.exit()
def load_game(self):
print("LOAD GAME")
# TODO: finish load game option
def settings(self):
print("SETTINGS")
SettingsScreen(self.display).run()
self.updated = True
def main_menu(display):
return Menu({
"selected_option": 0,
"options": [
{
"name": "NEW GAME",
"func": new_game
},
{
"name": "LOAD GAME",
"func": load_game
},
{
"name": "SETTINGS",
"func": settings
},
{
"name": "QUIT",
"func": quit_game
},
]
}, display)
|
#objective: look for all 3-regular graphs of size 8.
import numpy as np
from collections import Counter
from itertools import permutations
import igraph
# how does the loop works?
# I have a list called Astack; each element is a list of two things:
# 1st is a matrix; second a integer
# integer: -1 means it's no use to keep working on the matrix as it can't work (not enough room for 1s for instnace)
# 1 means it is a correct case of application
# 0 means we can't tell yet
#
# a while loop is running as long as we have some 0s inside.
#
# TREATMENT:
# will do as:
# while flag = true:
# flag = false
# flag_i, A, Message= (treatment fctn)
# A = Sym(A)
# error_dect = check_col, check_liin(A)
# if error_dect :
# flag = true
#
def main():
A,message = init()
print(message)
Astack = [[A, 0]]
worktodoB, index = worktodo(Astack)
while worktodoB:
print("wokr: ", worktodoB)
Bstack = treatment(Astack[index][0])
statestack = [Astack[i][1] for i in range(len(Astack))]
if Bstack[0][1] == 0:
Astack = Astack + Bstack
else:
Astack[index] = Bstack[0]
worktodoB, index = worktodo(Astack)
#print(Astack)
print(Astack)
c=0
goodStack = []
for AX in Astack:
A = AX[0]
if np.count_nonzero(A==1) == 24:
c+=1
goodStack.append(post_treatment(A))
print(" a total of " + str(c) + " matrixes shoud be observed.")
for i in range(len(goodStack)):
path = "Internship-GSCOP21/images/k-reg-all-3-8/"+str(i+1)+".png"
print(path)
if i == 9:
print(goodStack[i])
#graph = igraph.Graph.Adjacency( np.ndarray.tolist((goodStack[i] > 0)), mode='undirected')
#out = igraph.plot(graph, path)
def post_treatment(A):
'''
returns A post-treated; ie all the -1 are 0 now.
output format: A.
'''
for i in range(8):
for j in range(8):
if A[i,j]==-1:
A[i,j]=0
return A
def worktodo(Astack):
'''
determines if the stack requires more work to do or not.
return bool, index
if bool = True, index is a matrix where there is work to be done.
'''
for i in range(len(Astack)):
if Astack[i][1] == 0:
return True, i
return False, -1
def treatment(A):
'''
treat the matrix A as much as possible using availables functions.
returns [[A, int]].
int = -1 : the matrix is no use.
int = 1: the matrix is done
int = 0: is a list of elements. ( a stack) as we had to make a choice.
'''
flag = True
while flag == True:
flag = False
flag1, flag2, A, message = forbid(A)
A, flag11 = sym(A)
print(message)
#A, flag2 , message = fill_lin(A)
#A = sym(A)
#print(message)
A, flag3 , message = fill_col(A)
A, flag12 = sym(A)
print(message)
flag = flag2 or flag3
flag_col, message = check_col(A)
print(message)
if flag_col == False or flag12 == False or flag11 == False or flag1 == False:
return([[A, -1]])
if np.count_nonzero(A == 1) == 24: #in that case the matrix has enough 1s; and not too much has it passed the flag_cols/lins test
return([[A, 1]])
else:
for i in range(8):
if np.count_nonzero(A[i]==0)>0:
print("Choice made on col "+ str(i))
Astack = choice(A, i)
out = []
for x in range(len(Astack)):
out.append([Astack[x], 0])
return out
def sym(A):
'''
Symetrize the matrix A.
Returns A, bool
'''
for i in range(8):
for j in range(8):
if A[i,j]*A[j,i] <0 : #ie they have been assigned to different signs (not 0) which is a failure
return A, False
else:
d = A[i,j]+A[j,i]
A[i,j], A[j,i] = int(d/max(1, abs(d))), int(d/max(1, abs(d)))
return A, True
def check_col(A):
'''
check if all the cols of A are valid; ie no more than 5 -1 and no more than 3 1.
'''
for i in range(8):
counter = Counter(np.ndarray.tolist(A[i]))
m1, p1 = counter[-1], counter[1]
if m1 > 5 or p1 > 3:
return False, "Col. error: "+ str(i) + "."
return True, "Col check: ok."
def check_lin(A):
'''
check if all the lines of A are valid; ie no more than 5 -1 and no more than 3 1.
'''
for i in range(8):
counter = Counter(np.ndarray.tolist(A[i]))
m1, p1 = counter[-1], counter[1]
if m1 > 5 or p1 > 3:
return False, "Lin. error: " + str(i) + "."
return True, "Lin. check: ok."
def init():
'''
return a matrix of size 8 corresponding to the correct size, with correct initial settings.
format output: A, message
'''
A = np.zeros((8,8))
A[0, 1] = 1
A[0,2] = 1
A[0,3] = 1
A[1, 4]= 1
A[1,5] = 1
for i in range(8):
A[i,i] = -1
A, flag = sym(A)
return A, "Matrix initialiazed."
def forbid(A):
'''
on every line, then every col, every time there is two 1s, check the associated numbers are not together (to prevent triangles) and put "-1" where needed.
returns: bool, bool, A, message. If an error is detected (ie there should be a -1 but there is a 1 immediatly return a False, A, str)
1st bool is error detection; second is modification
'''
c0 = np.count_nonzero(A == 0)
if c0 == 0:
return False, False, A, "no modification made by forbid as A is already complete."
for i in range(8):
L = []
flag2 = False
for j in range(8):
if A[i,j]==1:
L.append(j)
if len(L)==2:
a=L[0]
b=L[1]
if A[a,b] == 1 or A[b,a] == 1:
return False, False, A, "Forbid error: "+ str(a)+", "+str(b)+'.'
else:
A[a,b] = -1
A[b,a] = -1
flag2 = True
index = i
elif len(L) == 3:
a,b,c = L[0], L[1], L[2]
if A[a,b] == 1 or A[b,a] == 1 or A[a,c] == 1 or A[c,a] == 1 or A[b,c] == 1 or A[c,b] == 1:
return False,False, A, "Forbid error: "+ str(a)+", "+str(b)+ ', '+ str(c)+'.'
else:
A[a,b]= -1
A[b,a]= -1
A[a,c]= -1
A[c,a]= -1
A[b,c]= -1
A[c,b]= -1
flag2 = True
index = i
if flag2:
message ="Forbid did modify A at index " + str(index)
else:
message = 'Forbid did no modification.'
return True, flag2, A, message
def mod(A,i,j, val=1):
'''
Set i,j at val. val shall be 1 or -1.
'''
A[i,j] = val
return A
def fill_col(A):
'''
If possible, will fill a col.
flag: bool to modelize a modification was made.
'''
flag = False
message = 'filled cols: '
for i in range(8):
counter = Counter(np.ndarray.tolist(A[i]))
m1, p1 = counter[-1], counter[1]
if m1 == 5 and p1 != 3:
flag = True
message += str(i) + ' '
for j in range(8):
if A[i,j] == 0:
mod(A, i,j)
elif p1 == 3 and m1 !=5:
flag = True
message += str(i) + ' '
for j in range(8):
if A[i,j] == 0:
mod(A, i,j, -1)
if flag:
return A, flag, message
return A, flag, 'no modification by fill_col. (flag value: ' + str(flag) + ')'
def fill_lin(A):
'''
If possible, will fill a lin.
flag: bool to modelize a modification was made.
'''
flag = False
message = 'filled lins: '
for j in range(8):
m1 = 0
p1 = 0
for i in range(8):
m1 += -1&A[i,j]
if m1 == 5:
flag = True
message += str(i) + ' '
for i in range(8):
if A[i,j] == 0:
mod(A, i,j)
if p1 == 3:
flag = True
message += str(i) + ' '
for i in range(8):
if A[i,j] == 0:
mod(A, i,j, -1)
if flag:
return A, flag, message
return A, flag, 'no modification by fill_lin.'
def choice(A, i):
'''
makes a choice for the i-th line.
return a list a matrix. The i-th line must have at least an ambiguity, for instance 4 -1 and 2 1.
The ambiguity is not checked in this function.
'''
L= []
m1 = 0
p1 = 0
L = np.ndarray.tolist(A[i])
counter= Counter(L)
perm_list = [1]*(3-counter[1]) + [-1]*(5-counter[-1])
p = [list(p) for p in permutations(perm_list)]
# remove duplicates
new_p = []
for elem in p:
if elem not in new_p:
new_p.append(elem)
p = new_p
out_lists = []
for x in range(len(p)):
out_lists.append([int(i) if i!=0 else p[x].pop() for i in L])
n = len(out_lists)
#return(out_lists)
Astack=[]
for x in range(n):
A[i] = np.array(out_lists[x])
Astack.append(np.array(A, copy=True))
return Astack
main() |
from flask import Flask, flash, render_template, abort, session, redirect, request
from flask_sqlalchemy import SQLAlchemy
from flask_admin import Admin
from flask_admin.contrib.sqla import ModelView
from datetime import datetime
import sqlalchemy
from flask_mail import Mail, Message
from config import mail_username, mail_password
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = "sqlite:///posts.db"
app.config['SECRET_KEY'] = "RAJATSAXENA14"
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
app.config['MAIL_SERVER'] = "smtp-mail.outlook.com"
app.config['MAIL_PORT'] = 587
app.config['MAIL_USE_TLS'] = True
app.config['MAIL_USE_SSL'] = False
app.config['MAIL_USERNAME'] = mail_username
app.config['MAIL_PASSWORD'] = mail_password
mail = Mail(app)
db = SQLAlchemy(app)
admin = Admin(app)
class Posts(db.Model):
id = db.Column(db.Integer, primary_key=True)
title = db.Column(db.String(255), nullable=False)
subtitle = db.Column(db.String(255), nullable=True)
content = db.Column(db.Text, nullable=False)
author = db.Column(db.String(255))
date_posted = db.Column(db.DateTime)
slug = db.Column(db.String(255))
views = db.Column(db.Integer,default=0)
comments = db.Column(db.Integer,default=0)
class Comments(db.Model):
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(200), unique=False, nullable=False)
email = db.Column(db.String(200), unique=False, nullable=False)
message = db.Column(db.Text, nullable=False)
post_id = db.Column(db.Integer, db.ForeignKey('posts.id', ondelete='CASCADE'), nullable=False)
posts = db.relationship('Posts', backref=db.backref('posts',lazy=True, passive_deletes=True))
date_pub = db.Column(db.DateTime, nullable=False, default=datetime.utcnow)
class SecureModelView(ModelView):
def is_accessible(self):
if "logged_in" in session:
return True
else:
abort(403)
admin.add_view(SecureModelView(Posts, db.session))
admin.add_view(SecureModelView(Comments, db.session))
@app.route("/")
def homepage():
posts = Posts.query.all()
return render_template("index.html", posts=posts)
@app.route("/about")
def about():
return render_template("about.html")
@app.route("/post/<string:slug>", methods=['POST','GET'])
def post(slug):
try:
post = Posts.query.filter_by(slug=slug).one()
comment = Comments.query.filter_by(post_id=post.id).all()
post.views = post.views + 1
db.session.commit()
Thanks = ""
if request.method == "POST":
post_id = post.id
name = request.form.get('name')
email = request.form.get('email')
message = request.form.get('message')
comment = Comments(name=name, email=email, message=message, post_id=post_id)
db.session.add(comment)
post.comments = post.comments + 1
db.session.commit()
flash('Posted sucessfully!', 'success')
return redirect(request.url)
return render_template("post.html", post=post, comment=comment, Thanks=Thanks)
except sqlalchemy.orm.exc.NoResultFound:
abort(404)
@app.route("/contact", methods=['GET', 'POST'])
def contact():
if request.method == "POST":
name = request.form.get('name')
email = request.form.get('email')
phone = request.form.get('phone')
message = request.form.get('message')
msg = Message(
subject=f"Mail from {name}", body=f"Name: {name}\nE-Mail: {email}\nPhone: {phone}\n\n\n{message}", sender=mail_username, recipients=['[email protected]'])
mail.send(msg)
return render_template("contact.html", success=True)
return render_template("contact.html")
@app.route("/auth", methods=["GET", "POST"])
def login():
if request.method == "POST":
if request.form.get("username") == "admin" and request.form.get("password") == "rajatsaxena":
session['logged_in'] = True
return redirect("/admin")
else:
return render_template("login.html", failed=True)
return render_template("login.html")
@app.route("/logout")
def logout():
session.clear()
return redirect("/")
if __name__ == "__main__":
app.run(debug=True)
|
# https://leetcode.com/problems/count-number-of-teams
class Solution:
def numTeams(self, rating: List[int]) -> int:
N = len(rating)
if N < 3:
return 0
res = 0
for i in range(N):
for j in range(i + 1, N):
if rating[i] < rating[j]:
for k in range(j + 1, N):
if rating[j] < rating[k]:
res += 1
for j in range(i + 1, N):
if rating[i] > rating[j]:
for k in range(j + 1, N):
if rating[j] > rating[k]:
res += 1
return res
|
#!/usr/bin/python3
import sys, re, math, time
#The main function, runs the program, takes inputFile as a parameter
def main(inputFile):
#Cities is a dictionary that contains all the information from the inputFile
cities = inputFunction(inputFile)
#Start time will begin the timer in order to track how long the process takes
startTime = time.time()
#This line will build the initial route using the nearest neighbor algorithm
trip = buildRoute(cities)
#This line will optimize the solution using 2-OPT
trip = twoOpt(cities, trip)
#End time will mark the end of the timer
endTime = time.time()
#Total time will be how ever long the inputFile took to process and optimize
totalTime = endTime - startTime
#The outputFunction will output a .tour file
outputFunction(inputFile, trip)
#The run time is printed for ease of use
print("Run-time: " + str(totalTime), 's')
#The inputFunction takes the inputFile and reads all the data, placing it into a dictionary called cities
def inputFunction(inputFile):
#The file is opened
file = open(inputFile, 'r')
#The first line is read which contains the number of cities
line = file.readline()
#Another temp var named lines is used for the while loop bound
lines = line
#A temp var i is used to track how many lines have been read
i = 0
#The cities dictionary is declared
cities = dict()
#In the while loop, information about the cities is read
while i < int(lines):
#A new line is read from the inputFile
line = file.readline()
#The entire line is parsed
line_parse = re.findall(r'[^,;\s]+', line)
#The key is set to the city number and the x, y coords are set as the values
cities[int(line_parse[0])] = [int(line_parse[1]), int(line_parse[2])]
#i is increased to keep track of how many lines have been read
i += 1
#The file is closed
file.close()
#Return the cities dictionary
return cities
#The buildRoute function utilizes the nearest neighbor algorithm to setup a route that will be optimized
def buildRoute(cities):
currentCity = 0
route = [currentCity]
#This unvisited var will keep track of the currently unvisited cities
unvisited = {x for x in cities.keys()}
#The first city is removed because it is the currentCity
unvisited.remove(currentCity)
#Total distance is initalized to 0
totalDist = 0
#While there is still unvisited cities, this for loop will run
while (len(unvisited) > 0):
#We want to choose the city with the lowest edge weight
#the minDist var is set to an arbitrarily high number because we're using it to compare later on
minDist = 99999
#For loop that checks all unvisited cities
for city in unvisited:
#The distance between the currentCity and the next city is calculated
distance = distanceBetween(cities, currentCity, city)
#If the calculated distance is less than the minDist var, enter this if
if distance < minDist:
#The minDist is set to distance because that is the new minDist
minDist = distance
#nextCity is set to continue the for loop and comparisons
nextCity = city
#The totalDist val is updated to include the most recent treck
totalDist += minDist
#Set currentCity to move to the next unvisited city
currentCity = nextCity
#Add the currentCity to the tour
route += [currentCity]
#Remove the currentCity from the unvisited var
unvisited.remove(currentCity)
#Add the last leg of the tour to the totalDist
totalDist += distanceBetween(cities, route[0], route[len(route) - 1])
#Return the total distance
return [totalDist] + route
#The twoOpt function will optimize the route built by the nearest neighbor algorithm
def twoOpt(cities, route):
#The first distance is declared
distance = route[0]
#The route is set to the rest of the values in cities
route = route[1:]
#The outer for loop runs 2 values less than cities is long
for i in range(len(cities) - 2):
#The inner for loop runs 1 less time than cities is long
for j in range(i + 1, len(cities) - 1):
#dist1 is the current distance between a pair of points
dist1 = distanceBetween(cities, route[i], route[i + 1]) + distanceBetween(cities, route[j], route[j + 1])
#dist2 is the distance when those points are swapped
dist2 = distanceBetween(cities, route[i], route[j]) + distanceBetween(cities, route[i + 1], route[j + 1])
#If the swapped point is better, we want to use it
if dist2 < dist1:
#This line performs the point swap operation, assigning the new route to those swapped points
newRoute = route[:i + 1] + route[j:i:-1] + route[j + 1:]
#The distance is calculated
distance = distance - dist1 + dist2
route = newRoute
#The distance is returned
return [distance] + route
#the distanceBetween function will find the distance between two cities
def distanceBetween(cities, city1, city2):
#This is the y coord
citiesY = cities.get(city1)[1] - cities.get(city2)[1]
#This is the x coord
citiesX = cities.get(city1)[0] - cities.get(city2)[0]
#the distance between them is returned
return int(round(math.sqrt(citiesX * citiesX + citiesY * citiesY)))
#the outputFunction will output a new file with the name inputFile + ".tour"
def outputFunction(inputFileName, trip):
#Add .tour to the fileName
fileName = inputFileName + ".tour"
#Write each visited city into the new .tour file
with open(fileName, 'w') as f:
#For loop cycles through all of the cities that were visited
for value in trip:
f.write(str(value) + '\n')
#Close the file
f.close()
#Calls the main function
if __name__ == '__main__':
main(sys.argv[1]) |
"""
python-dirtt - Directory Tree Templater
(c) 2012 Robert Moggach and contributors
Licensed under the MIT license: http://www.opensource.org/licenses/mit-license.php
dirtt is a standalone tool and library used to generate
directory and file structures from xml templates that describe
repeatedly used filesystem layouts such as project structures
or elements therein.
It provides a subclassed implementation of xml.sax.handler ContentHandler
with internal methods that read,parse,render,and execute builds of
user defined XML directory tree templates.
"""
__all__ = ['general','io','looper','template','introspection']
from general import *
import io
import looper
import template
|
from faker import Factory
from homes_to_let.models import LettingNote
from homes_to_let.factories.letting_factory import LettingFactory
import factory
fake = Factory.create('en_GB')
class LettingNoteFactory(factory.DjangoModelFactory):
class Meta:
model = LettingNote
property = factory.SubFactory(LettingFactory)
text = fake.text(100)
|
# -*- coding: utf-8 -*-
"""
test_djangocms_highlightjs
------------
Tests for `djangocms_highlightjs` modules module.
"""
from __future__ import absolute_import, print_function, unicode_literals
from cms.views import details
from django.contrib.auth.models import AnonymousUser
from djangocms_highlightjs.models import HighlightText
from . import BaseTest
class TestHighlightjsModels(BaseTest):
example_text = """
def print_hello():
print(\"hello world!\")
"""
def test_add_plugin(self):
from cms.api import add_plugin
page_1, page_2 = self.get_pages()
data = {
'filename': 'test.py',
'body': self.example_text,
'theme': 'dark'
}
placeholder = page_1.placeholders.get(slot='content')
add_plugin(placeholder, 'HighlightPlugin', 'en', **data)
page_1.set_as_homepage()
page_1.publish('en')
# Get published page
public = page_1.get_public_object()
# plugin is the plugin instance we're going to render
plugin = public.placeholders.get(slot='content').get_plugins_list()[0]
request = self.get_page_request(public, AnonymousUser())
response = details(request, '')
self.assertContains(response, '<link rel="stylesheet" href="/static/djangocms_highlightjs/themes/dark.css">')
self.assertContains(response, """<pre id="highlight-%s" class="highlight-js">\n\t<strong>test.py</strong>\n\t<code>
def print_hello():
print("hello world!")
</code>
</pre>""" % plugin.pk)
self.assertContains(response, '<script src="/static/djangocms_highlightjs/js/highlight.pack.js"></script>')
def test_model_save(self):
plugin = HighlightText()
plugin.body = self.example_text
plugin.theme = 'arta'
plugin.save()
self.assertEqual(self.example_text, plugin.body)
def test_model_str(self):
plugin = HighlightText()
plugin.body = self.example_text
plugin.theme = 'arta'
plugin.save()
self.assertEqual('def print_hello():', str(plugin))
def test_model_str_filename(self):
plugin = HighlightText()
plugin.body = self.example_text
plugin.theme = 'arta'
plugin.filename = 'my_file.py'
plugin.save()
self.assertEqual(plugin.filename, str(plugin))
|
import os
import json
from uuid import uuid4
import logging
import shutil
import threading
from filelock import FileLock
from apluslms_file_transfer.server.utils import (get_update_files,
whether_allow_renew,
tempdir_path,)
from apluslms_file_transfer.exceptions import (GetFileUpdateError,
PublishError,
error_print,)
logger = logging.getLogger(__name__)
def files_to_update(upload_dir, course_name, upload_file_type, manifest_client, data):
""" Compare the files between the server and the client, and then get the files to update.
:param upload_dir: the directory path where the course directory located
:param str course_name: the name of the course to upload/update
:param str upload_file_type: the file type of the uploaded files
:param dict manifest_client: the file manifest in the client side
:param data: the initial dictionary containing info to send back to the client
:return: the dictionary that contains the manifest of the updated files to send back to the client
:rtype: dict
"""
course_dir = os.path.join(upload_dir, course_name)
# if the course has not been uploaded yet, upload all the files
if not os.path.exists(course_dir) or not os.path.isdir(course_dir):
data['exist'] = False
files_update = {'files_new': manifest_client,
'files_update': {},
'files_keep': {},
'files_remove': {}}
# else if the course already exists
else:
with open(os.path.join(course_dir, 'manifest.json'), 'r') as manifest_srv_file:
manifest_srv = json.load(manifest_srv_file)
if not whether_allow_renew(manifest_srv, manifest_client, upload_file_type):
raise GetFileUpdateError('Abort: the client version is older than server version')
data['exist'] = True # indicate the course exists in the server
# compare the files between the client side and the server side
# get list of files to upload / update
files_update = get_update_files(manifest_srv, manifest_client)
# get a unique process id for this uploading process
process_id = str(uuid4())
data['process_id'] = process_id
# create a temp directory where the files will be uploaded to
temp_dir = tempdir_path(upload_dir, course_name, process_id)
os.mkdir(temp_dir)
# Store the files will be updated in a temp json file
with open(os.path.join(temp_dir, 'files_to_update.json'), 'w') as f:
# f.write(json.dumps(files_to_update, sort_keys=True, indent=4))
json.dump(files_update, f, sort_keys=True, indent=4)
data['files_new'], data['files_update'] = files_update['files_new'], files_update['files_update']
return data
def publish_files(upload_dir, course_name, file_type, temp_course_dir, res_data):
""" Publish the uploaded files into the server
:param upload_dir: the directory path where the course directory located
:param course_name: the name of the course to upload/update
:param file_type: the file type of the uploaded files
:param temp_course_dir: the temporary directory where the uploaded files located
:param data: the initial dictionary that contains the info to send back to the client
:return: the dictionary that contains the info to send back to the client
:rtype: dict
"""
# if the course does exist, rename the temp dir
course_dir = os.path.join(upload_dir, course_name)
if not os.path.exists(course_dir):
os.rename(temp_course_dir, course_dir)
# if the course already exist
else:
manifest_srv_file = os.path.join(course_dir, 'manifest.json')
manifest_client_file = os.path.join(temp_course_dir, 'manifest.json')
lock_f = os.path.join(upload_dir, course_name + '.lock')
lock = FileLock(lock_f)
try:
with open(manifest_client_file, 'r') as f:
manifest_client = json.load(f)
with lock.acquire(timeout=1):
with open(manifest_srv_file, 'r') as f:
manifest_srv = json.load(f)
if not whether_allow_renew(manifest_srv, manifest_client, file_type):
return PublishError('Abort: the client version is older than server version')
os.rename(course_dir, course_dir + '_old')
os.rename(temp_course_dir, course_dir)
shutil.rmtree(course_dir + '_old')
os.remove(lock_f)
except:
logger.debug(error_print())
os.remove(lock_f)
raise PublishError(error_print())
res_data['msg'] = 'The course is successfully uploaded'
return res_data
def start_cleanup(static_path, cleanup_time):
""" The function for cleaning up the redundant directories in the server.
The redundant directories are those fail to be published.
:param static_path: the path of the course directories locate in
:param cleanup_time: the time interval (seconds) of the action execution
"""
def cleanup():
dirs = next(os.walk(static_path))[1]
for temp_dir in [d for d in dirs if d.startswith('temp')]:
shutil.rmtree(os.path.join(static_path, temp_dir))
# Set the next thread to happen
global cleanup_thread
cleanup_thread = threading.Timer(cleanup_time, cleanup)
cleanup_thread.daemon = True
cleanup_thread.start()
global cleanup_thread
cleanup_thread = threading.Timer(cleanup_time, cleanup)
cleanup_thread.daemon = True
cleanup_thread.start()
|
import numpy as np
import torch
class FAN(object):
def __init__(self):
import face_alignment
self.model = face_alignment.FaceAlignment(
face_alignment.LandmarksType._2D, flip_input=False,device='cpu')
def run(self, image):
'''
image: 0-255, uint8, rgb, [h, w, 3]
return: detected box list
'''
out = self.model.get_landmarks(image)
if out is None:
return [0]
else:
kpt = out[0].squeeze()
left = np.min(kpt[:, 0])
right = np.max(kpt[:, 0])
top = np.min(kpt[:,1])
bottom = np.max(kpt[:,1])
bbox = [left,top,right,bottom]
return bbox,'kpt68'
class MTCNN(object):
def __init__(self, device = 'cpu'):
'''
https://github.com/timesler/facenet-pytorch/blob/master/examples/infer.ipynb
'''
from facenet_pytorch import MTCNN as mtcnn
self.device = device
self.model = mtcnn(keep_all=True)
def run(self, input):
'''
image: 0-255, uint8, rgb, [h, w, 3]
return: detected box
'''
out = self.model.detect(input[None,...])
if out[0][0] is None:
return [0]
else:
bbox = out[0][0].squeeze()
return bbox, 'bbox'
|
from flask import render_template, flash, Blueprint, url_for, redirect
from flaskwebsite.sarcasm_classifier.forms import TextInputForm
from flaskwebsite.sarcasm_classifier.function import load_model, predict
sarcasm_classifier = Blueprint("sarcasm_classifier", __name__)
@sarcasm_classifier.route("/sarcasm", methods=['GET', 'POST'])
def sarcasm():
form = TextInputForm()
if form.validate_on_submit():
text = form.text.data
learn = load_model()
result = predict(text, learn)
if result == 0:
flash("This is not sarcastic", "success")
elif result == 1:
flash("This is sarcastic", "danger")
else:
flash("An error has occurred. Please try again")
return redirect(url_for('sarcasm_classifier.sarcasm'))
return render_template("sarcasm.html", title="Sarcasm Detection", form=form)
|
import matplotlib.pyplot as plt
import matplotlib as mpl
from matplotlib.gridspec import GridSpec
import numpy as np
import torch
def toy_plot(model, x, Y, weights, components):
fig = plt.figure(tight_layout=True, figsize=(15, 10))
gs = GridSpec(2, 6)
recon_axes = [fig.add_subplot(gs[0, i * 2 : (i + 1) * 2]) for i in range(3)]
comp_ax = fig.add_subplot(gs[1, :3])
weight_ax = fig.add_subplot(gs[1, 3:])
with torch.no_grad():
recon = model.reconstruct(model.H, model.W).cpu()
ax = recon_axes[0]
ax.plot(x, Y[0, :], label="Truth")
ax.plot(x, recon[0, :].data.numpy(), label="Reconstruct")
ax = recon_axes[1]
ax.plot(x, Y[Y.shape[0] // 2, :], label="Truth")
ax.plot(x, recon[recon.shape[0] // 2, :].data.numpy(), label="Reconstruct")
ax = recon_axes[2]
ax.plot(x, Y[-1, :], label="Truth")
ax.plot(x, recon[-1, :].data.numpy(), label="Reconstruct")
for ax in recon_axes:
ax.legend()
H = model.H.data.cpu()
ax = comp_ax
for i in range(H.shape[0]):
ax.plot(x, H[i, :], label=f"Learned Component {i}")
ax.plot(x, components[i, :], "--k", label=f"True Component {i}")
ax.set_title("Learned Components")
ax.legend()
W = model.W.data.cpu()
ax = weight_ax
for i in range(W.shape[1]):
ax.plot(W[:, i], label=f"Learned Weights {i}")
ax.plot(weights.T[:, i], "--k", label=f"True Weights {i}")
ax.set_title("Learned Weights")
ax.legend()
return fig
def decomp_plot(nmf, T, axes=None):
H = nmf.H.data.numpy()
W = nmf.W.data.numpy()
fig, axes = plt.subplots(1, 3, figsize=(15, 4))
ax = axes[0]
for i in range(H.shape[0]):
ax.plot(H[i, :] / H[i, :].max() + i)
ax.set_title("Stacked Normalized Components")
ax = axes[1]
for i in range(W.shape[1]):
ax.plot(T, W[:, i])
ax.set_title("Weights")
ax = axes[2]
for i in range(W.shape[1]):
ax.plot(T, W[:, i] / W[:, i].max())
ax.set_title("Normalized Weights")
return fig, axes
def waterfall(ax, xs, ys, alphas, color="k", sampling=1, offset=0.2, **kwargs):
indicies = range(0, xs.shape[0])[::sampling]
for plt_i, idx in enumerate(indicies):
y = ys[idx, :] + plt_i * offset
x = xs[idx, :]
ax.plot(x, y, color=color, alpha=alphas[idx], **kwargs)
return ax
def summary_plot(
sub_Q,
sub_I,
alphas,
axes=None,
sax=None,
components=None,
comax=None,
cmap="tab10",
alt_ordinate=None,
offset=1.0,
summary_fig=False,
):
"""
Example plotting of NMF results. Not necessarily for Bluesky deployment
Parameters
----------
sub_Q: array
Q to plot in I(Q)
sub_I: array
I to plot in I(Q)
alphas: array
transparencies of multiple repeated plots of I(Q)
axes: optional existing axes for waterfalls
sax: optional axes for summary figure
cmap: mpl colormap
alt_ordinate: array
Array len sub_I.shape[0], corresponding to an alternative labeled dimension for which to order the stacked plots
summary_fig: bool
Whether to include separate figure of alphas over the ordinate
Returns
-------
fig, axes
"""
n_components = alphas.shape[1]
cmap = mpl.cm.get_cmap(cmap)
norm = mpl.colors.Normalize(vmin=0, vmax=n_components)
# Create alternative ordinate for the waterfall/stacking
if alt_ordinate is not None:
idxs, labels = list(
zip(*sorted(zip(range(sub_I.shape[0]), alt_ordinate), key=lambda x: x[1]))
)
else:
idxs = list(range(sub_I.shape[0]))
xs = sub_Q[idxs, :]
ys = sub_I[idxs, :]
alphas = alphas[idxs, :]
# Order by proxy center of mass of class in plot regime. Makes the plots feel like a progression not random.
alpha_ord = np.argsort(np.matmul(np.arange(alphas.shape[0]), alphas))
if axes is None:
fig, axes = plt.subplots(
int(np.ceil(np.sqrt(n_components))), int(np.ceil(np.sqrt(n_components)))
)
axes = axes.reshape(-1)
else:
axes = np.ravel(axes)
for i, ax in enumerate(axes):
if i < n_components:
i_a = alpha_ord[i]
color = cmap(norm(i))
alpha = (alphas[:, i_a] - np.min(alphas[:, i_a])) / (
np.max(alphas[:, i_a]) - np.min(alphas[:, i_a])
)
waterfall(ax, xs, ys, alpha, color=color, offset=offset)
else:
ax.set_visible = False
if summary_fig:
if sax is None:
sfig, sax = plt.subplots(figsize=(6, 6))
sx = np.arange(0, alphas.shape[0])
for i in range(alphas.shape[1]):
sax.plot(
sx,
alphas[:, alpha_ord[i]],
color=cmap(norm(i)),
label=f"Component {i + 1}",
)
if components is not None:
if comax is None:
comfig, comax = plt.subplots(figsize=(6, 6))
for i in range(components.shape[0]):
kernel_width = xs.shape[1] - components.shape[1] + 1
if kernel_width == 1:
comax.plot(
xs[0][:],
components[alpha_ord[i], :] + i,
color=cmap(norm(i)),
)
else:
comax.plot(
xs[0][kernel_width // 2 : -kernel_width // 2 + 1],
components[alpha_ord[i], :] + i,
color=cmap(norm(i)),
)
return
|
from django import forms
from .models import UserNotifications
from .models import UserPhonenumber
from registration.forms import RegistrationForm
class NotificationForm(forms.ModelForm):
datestart = forms.DateField(label = "Start Date")
dateend = forms.DateField(label = "End Date")
class Meta:
model = UserNotifications
fields = ('datestart','dateend',)
class PhonenumberForm(forms.ModelForm):
class Meta:
model = UserPhonenumber
fields = ('phonenumber',)
labels = {
'phonenumber':"Valid US Phone Number"
}
|
from django.db import models
from swampdragon.models import SelfPublishModel
from .serializers import CompanySerializer, StaffSerializer, DocumentSerializer, CompanyOwnerSerializer
class Company(SelfPublishModel, models.Model):
name = models.CharField(max_length=100)
serializer_class = CompanySerializer
def __str__(self):
return self.name
class CompanyOwner(SelfPublishModel, models.Model):
name = models.CharField(max_length=100)
company = models.OneToOneField(Company)
serializer_class = CompanyOwnerSerializer
def __str__(self):
return self.name
class Staff(SelfPublishModel, models.Model):
name = models.CharField(max_length=100)
company = models.ForeignKey(Company, related_name='staff')
serializer_class = StaffSerializer
def __str__(self):
return self.name
class Document(SelfPublishModel, models.Model):
title = models.CharField(max_length=100)
content = models.TextField()
staff = models.ManyToManyField(Staff, related_name='documents', null=True, blank=True)
serializer_class = DocumentSerializer
def __str__(self):
return self.title
|
# -*- coding: utf-8 -*-
SECRET_KEY = "secret key"
#When False the files are served via a Nginx location clause
SERVE_STATIC = False
#Hard coded in clients
#The subdomain "www" is the master domain.
#The site root does 301 to www, which does not work here!
FILE_SERVER_ROOT = 'https://www.XXXXX.dyndns.org/sfa/seafhttp'
MEDIA_URL = '/sfa/media/'
COMPRESS_URL = MEDIA_URL
STATIC_URL = MEDIA_URL + 'assets/'
SITE_ROOT = '/sfa/'
LOGIN_URL = '/sfa/accounts/login/'
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': 'seahub1-db',
'USER': 'seafile1',
'PASSWORD': '<password>',
'HOST': '127.0.0.1',
'PORT': '3306'
}
}
|
import urllib.request
import json
class CurrencyCoverter:
def __init__(self, url):
request = urllib.request.Request(url, headers={'User-Agent': 'Currency Converter'})
data = urllib.request.urlopen(request).read()
data = json.loads(data.decode('utf-8'))
self.conversion_rates = data['conversion_rates']
def Convert(self, amount, from_money, to_money):
original_amount = amount
if from_money.upper() != 'USD':
amount = amount / self.conversion_rates[from_money.upper()]
elif from_money.upper() == 'USD':
return str(original_amount) + " ("+ from_money + ") is equal to " + (str(amount * self.conversion_rates[to_money])) + " ("+ to_money + ")"
else:
return original_amount, amount * self.conversion_rates[to_money]
url = 'https://v6.exchangerate-api.com/v6/af370279909b3852fbc7252f/latest/USD'
converter = CurrencyCoverter(url)
|
from setuptools import setup, find_packages
setup(
name='social-auth-steemconnect',
version='0.0.3',
packages=find_packages(),
author='Krzysztof @noisy Szumny',
author_email='[email protected]',
description='SteemConnect backend for python-social-auth.',
long_description=open('README.md').read(),
license='LICENSE',
url='https://github.com/wise-team/python-social-auth-steemconnect',
keywords='django social auth oauth2 social-auth steem steemconnect steemit',
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.4',
],
install_requires=[
'python-social-auth',
]
)
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon May 4 15:24:52 2020
@author: frederik
SSVEP Flicker Activity extractor
Takes a 240fps recording, and extracts the activity of blinking red light
"""
import cv2
import numpy as np
#Creating file for saving activity
saveFile = open("activity.txt","w")
cap = cv2.VideoCapture("source.mp4")
f = 0
# Check if camera opened successfully
if (cap.isOpened()== False):
print("Error opening video stream or file")
# Read until video is completed
while(cap.isOpened()):
# Capture frame-by-frame
ret, frame = cap.read()
if ret == True:
#Getting a binary image of the activity.
binary = cv2.inRange(frame, (0, 0, 200), (100, 255, 255))
# Save the white pixel count
count = str(cv2.countNonZero(binary))
f +=1 #Counts which frame and thus the time.
saveFile.write(count + "," + str(f) +"/240" "\n")
# Display the resulting frame
cv2.namedWindow("Frame", cv2.WINDOW_NORMAL) # Create window with free diemnsions
cv2.imshow('Frame',frame)
cv2.namedWindow("Binary", cv2.WINDOW_NORMAL) # Create window with free diemnsions
cv2.imshow('Binary',binary)
# Press Q on keyboard to exit
if cv2.waitKey(25) & 0xFF == ord('q'):
break
# Break the loop
else:
break
# When everything done, release the video capture object
cap.release()
# Closes all the frames
cv2.destroyAllWindows()
saveFile.close()
|
import numpy as np
import numpy.linalg as la
from auxiliary import *
A = np.array([
[64, 2, 3, 61, 60, 6],
[ 9, 55, 54, 12, 13, 51],
[17, 47, 46, 20, 21, 43],
[40, 26, 27, 37, 36, 30],
[32, 34, 35, 29, 28, 38],
[41, 23, 22, 44, 45, 19],
[49, 15, 14, 52, 53, 11],
[ 8, 58, 59, 5, 4, 62],
], dtype=float)
U, s, Vt = la.svd(A)
mprint('amat', A, '%3g')
mprint('uCorrect', U)
vprint('sCorrect', s)
mprint('vtCorrect', Vt)
m, n = np.shape(A)
ns = min([m, n])
S = np.zeros((m, n))
for i in range(ns):
S[i,i] = s[i]
USVt = np.dot(U, np.dot(S, Vt))
print 'A =\n', A
print 'USVt =\n', USVt
print np.allclose(A, USVt)
|
import copy
import ntpath
import pathlib
import posixpath
import sys
import unittest
from test.support import verbose
try:
# If we are in a source tree, use the original source file for tests
SOURCE = (pathlib.Path(__file__).absolute().parent.parent.parent / "Modules/getpath.py").read_bytes()
except FileNotFoundError:
# Try from _testcapimodule instead
from _testinternalcapi import get_getpath_codeobject
SOURCE = get_getpath_codeobject()
class MockGetPathTests(unittest.TestCase):
def __init__(self, *a, **kw):
super().__init__(*a, **kw)
self.maxDiff = None
def test_normal_win32(self):
"Test a 'standard' install layout on Windows."
ns = MockNTNamespace(
argv0=r"C:\Python\python.exe",
real_executable=r"C:\Python\python.exe",
)
ns.add_known_xfile(r"C:\Python\python.exe")
ns.add_known_file(r"C:\Python\Lib\os.py")
ns.add_known_dir(r"C:\Python\DLLs")
expected = dict(
executable=r"C:\Python\python.exe",
base_executable=r"C:\Python\python.exe",
prefix=r"C:\Python",
exec_prefix=r"C:\Python",
module_search_paths_set=1,
module_search_paths=[
r"C:\Python\python98.zip",
r"C:\Python\Lib",
r"C:\Python\DLLs",
],
)
actual = getpath(ns, expected)
self.assertEqual(expected, actual)
def test_buildtree_win32(self):
"Test an in-build-tree layout on Windows."
ns = MockNTNamespace(
argv0=r"C:\CPython\PCbuild\amd64\python.exe",
real_executable=r"C:\CPython\PCbuild\amd64\python.exe",
)
ns.add_known_xfile(r"C:\CPython\PCbuild\amd64\python.exe")
ns.add_known_file(r"C:\CPython\Lib\os.py")
ns.add_known_file(r"C:\CPython\PCbuild\amd64\pybuilddir.txt", [""])
expected = dict(
executable=r"C:\CPython\PCbuild\amd64\python.exe",
base_executable=r"C:\CPython\PCbuild\amd64\python.exe",
prefix=r"C:\CPython",
exec_prefix=r"C:\CPython",
build_prefix=r"C:\CPython",
_is_python_build=1,
module_search_paths_set=1,
module_search_paths=[
r"C:\CPython\PCbuild\amd64\python98.zip",
r"C:\CPython\Lib",
r"C:\CPython\PCbuild\amd64",
],
)
actual = getpath(ns, expected)
self.assertEqual(expected, actual)
def test_venv_win32(self):
"""Test a venv layout on Windows.
This layout is discovered by the presence of %__PYVENV_LAUNCHER__%,
specifying the original launcher executable. site.py is responsible
for updating prefix and exec_prefix.
"""
ns = MockNTNamespace(
argv0=r"C:\Python\python.exe",
ENV___PYVENV_LAUNCHER__=r"C:\venv\Scripts\python.exe",
real_executable=r"C:\Python\python.exe",
)
ns.add_known_xfile(r"C:\Python\python.exe")
ns.add_known_xfile(r"C:\venv\Scripts\python.exe")
ns.add_known_file(r"C:\Python\Lib\os.py")
ns.add_known_dir(r"C:\Python\DLLs")
ns.add_known_file(r"C:\venv\pyvenv.cfg", [
r"home = C:\Python"
])
expected = dict(
executable=r"C:\venv\Scripts\python.exe",
prefix=r"C:\Python",
exec_prefix=r"C:\Python",
base_executable=r"C:\Python\python.exe",
base_prefix=r"C:\Python",
base_exec_prefix=r"C:\Python",
module_search_paths_set=1,
module_search_paths=[
r"C:\Python\python98.zip",
r"C:\Python\DLLs",
r"C:\Python\Lib",
r"C:\Python",
],
)
actual = getpath(ns, expected)
self.assertEqual(expected, actual)
def test_registry_win32(self):
"""Test registry lookup on Windows.
On Windows there are registry entries that are intended for other
applications to register search paths.
"""
hkey = rf"HKLM\Software\Python\PythonCore\9.8-XY\PythonPath"
winreg = MockWinreg({
hkey: None,
f"{hkey}\\Path1": "path1-dir",
f"{hkey}\\Path1\\Subdir": "not-subdirs",
})
ns = MockNTNamespace(
argv0=r"C:\Python\python.exe",
real_executable=r"C:\Python\python.exe",
winreg=winreg,
)
ns.add_known_xfile(r"C:\Python\python.exe")
ns.add_known_file(r"C:\Python\Lib\os.py")
ns.add_known_dir(r"C:\Python\DLLs")
expected = dict(
module_search_paths_set=1,
module_search_paths=[
r"C:\Python\python98.zip",
"path1-dir",
# should not contain not-subdirs
r"C:\Python\Lib",
r"C:\Python\DLLs",
],
)
actual = getpath(ns, expected)
self.assertEqual(expected, actual)
ns["config"]["use_environment"] = 0
ns["config"]["module_search_paths_set"] = 0
ns["config"]["module_search_paths"] = None
expected = dict(
module_search_paths_set=1,
module_search_paths=[
r"C:\Python\python98.zip",
r"C:\Python\Lib",
r"C:\Python\DLLs",
],
)
actual = getpath(ns, expected)
self.assertEqual(expected, actual)
def test_symlink_normal_win32(self):
"Test a 'standard' install layout via symlink on Windows."
ns = MockNTNamespace(
argv0=r"C:\LinkedFrom\python.exe",
real_executable=r"C:\Python\python.exe",
)
ns.add_known_xfile(r"C:\LinkedFrom\python.exe")
ns.add_known_xfile(r"C:\Python\python.exe")
ns.add_known_link(r"C:\LinkedFrom\python.exe", r"C:\Python\python.exe")
ns.add_known_file(r"C:\Python\Lib\os.py")
ns.add_known_dir(r"C:\Python\DLLs")
expected = dict(
executable=r"C:\LinkedFrom\python.exe",
base_executable=r"C:\LinkedFrom\python.exe",
prefix=r"C:\Python",
exec_prefix=r"C:\Python",
module_search_paths_set=1,
module_search_paths=[
r"C:\Python\python98.zip",
r"C:\Python\Lib",
r"C:\Python\DLLs",
],
)
actual = getpath(ns, expected)
self.assertEqual(expected, actual)
def test_symlink_buildtree_win32(self):
"Test an in-build-tree layout via symlink on Windows."
ns = MockNTNamespace(
argv0=r"C:\LinkedFrom\python.exe",
real_executable=r"C:\CPython\PCbuild\amd64\python.exe",
)
ns.add_known_xfile(r"C:\LinkedFrom\python.exe")
ns.add_known_xfile(r"C:\CPython\PCbuild\amd64\python.exe")
ns.add_known_link(r"C:\LinkedFrom\python.exe", r"C:\CPython\PCbuild\amd64\python.exe")
ns.add_known_file(r"C:\CPython\Lib\os.py")
ns.add_known_file(r"C:\CPython\PCbuild\amd64\pybuilddir.txt", [""])
expected = dict(
executable=r"C:\LinkedFrom\python.exe",
base_executable=r"C:\LinkedFrom\python.exe",
prefix=r"C:\CPython",
exec_prefix=r"C:\CPython",
build_prefix=r"C:\CPython",
_is_python_build=1,
module_search_paths_set=1,
module_search_paths=[
r"C:\CPython\PCbuild\amd64\python98.zip",
r"C:\CPython\Lib",
r"C:\CPython\PCbuild\amd64",
],
)
actual = getpath(ns, expected)
self.assertEqual(expected, actual)
def test_buildtree_pythonhome_win32(self):
"Test an out-of-build-tree layout on Windows with PYTHONHOME override."
ns = MockNTNamespace(
argv0=r"C:\Out\python.exe",
real_executable=r"C:\Out\python.exe",
ENV_PYTHONHOME=r"C:\CPython",
)
ns.add_known_xfile(r"C:\Out\python.exe")
ns.add_known_file(r"C:\CPython\Lib\os.py")
ns.add_known_file(r"C:\Out\pybuilddir.txt", [""])
expected = dict(
executable=r"C:\Out\python.exe",
base_executable=r"C:\Out\python.exe",
prefix=r"C:\CPython",
exec_prefix=r"C:\CPython",
# This build_prefix is a miscalculation, because we have
# moved the output direction out of the prefix.
# Specify PYTHONHOME to get the correct prefix/exec_prefix
build_prefix="C:\\",
_is_python_build=1,
module_search_paths_set=1,
module_search_paths=[
r"C:\Out\python98.zip",
r"C:\CPython\Lib",
r"C:\Out",
],
)
actual = getpath(ns, expected)
self.assertEqual(expected, actual)
def test_normal_posix(self):
"Test a 'standard' install layout on *nix"
ns = MockPosixNamespace(
PREFIX="/usr",
argv0="python",
ENV_PATH="/usr/bin",
)
ns.add_known_xfile("/usr/bin/python")
ns.add_known_file("/usr/lib/python9.8/os.py")
ns.add_known_dir("/usr/lib/python9.8/lib-dynload")
expected = dict(
executable="/usr/bin/python",
base_executable="/usr/bin/python",
prefix="/usr",
exec_prefix="/usr",
module_search_paths_set=1,
module_search_paths=[
"/usr/lib/python98.zip",
"/usr/lib/python9.8",
"/usr/lib/python9.8/lib-dynload",
],
)
actual = getpath(ns, expected)
self.assertEqual(expected, actual)
def test_buildpath_posix(self):
"""Test an in-build-tree layout on POSIX.
This layout is discovered from the presence of pybuilddir.txt, which
contains the relative path from the executable's directory to the
platstdlib path.
"""
ns = MockPosixNamespace(
argv0=r"/home/cpython/python",
PREFIX="/usr/local",
)
ns.add_known_xfile("/home/cpython/python")
ns.add_known_xfile("/usr/local/bin/python")
ns.add_known_file("/home/cpython/pybuilddir.txt", ["build/lib.linux-x86_64-9.8"])
ns.add_known_file("/home/cpython/Lib/os.py")
ns.add_known_dir("/home/cpython/lib-dynload")
expected = dict(
executable="/home/cpython/python",
prefix="/usr/local",
exec_prefix="/usr/local",
base_executable="/home/cpython/python",
build_prefix="/home/cpython",
_is_python_build=1,
module_search_paths_set=1,
module_search_paths=[
"/usr/local/lib/python98.zip",
"/home/cpython/Lib",
"/home/cpython/build/lib.linux-x86_64-9.8",
],
)
actual = getpath(ns, expected)
self.assertEqual(expected, actual)
def test_venv_posix(self):
"Test a venv layout on *nix."
ns = MockPosixNamespace(
argv0="python",
PREFIX="/usr",
ENV_PATH="/venv/bin:/usr/bin",
)
ns.add_known_xfile("/usr/bin/python")
ns.add_known_xfile("/venv/bin/python")
ns.add_known_file("/usr/lib/python9.8/os.py")
ns.add_known_dir("/usr/lib/python9.8/lib-dynload")
ns.add_known_file("/venv/pyvenv.cfg", [
r"home = /usr/bin"
])
expected = dict(
executable="/venv/bin/python",
prefix="/usr",
exec_prefix="/usr",
base_executable="/usr/bin/python",
base_prefix="/usr",
base_exec_prefix="/usr",
module_search_paths_set=1,
module_search_paths=[
"/usr/lib/python98.zip",
"/usr/lib/python9.8",
"/usr/lib/python9.8/lib-dynload",
],
)
actual = getpath(ns, expected)
self.assertEqual(expected, actual)
def test_symlink_normal_posix(self):
"Test a 'standard' install layout via symlink on *nix"
ns = MockPosixNamespace(
PREFIX="/usr",
argv0="/linkfrom/python",
)
ns.add_known_xfile("/linkfrom/python")
ns.add_known_xfile("/usr/bin/python")
ns.add_known_link("/linkfrom/python", "/usr/bin/python")
ns.add_known_file("/usr/lib/python9.8/os.py")
ns.add_known_dir("/usr/lib/python9.8/lib-dynload")
expected = dict(
executable="/linkfrom/python",
base_executable="/linkfrom/python",
prefix="/usr",
exec_prefix="/usr",
module_search_paths_set=1,
module_search_paths=[
"/usr/lib/python98.zip",
"/usr/lib/python9.8",
"/usr/lib/python9.8/lib-dynload",
],
)
actual = getpath(ns, expected)
self.assertEqual(expected, actual)
def test_symlink_buildpath_posix(self):
"""Test an in-build-tree layout on POSIX.
This layout is discovered from the presence of pybuilddir.txt, which
contains the relative path from the executable's directory to the
platstdlib path.
"""
ns = MockPosixNamespace(
argv0=r"/linkfrom/python",
PREFIX="/usr/local",
)
ns.add_known_xfile("/linkfrom/python")
ns.add_known_xfile("/home/cpython/python")
ns.add_known_link("/linkfrom/python", "/home/cpython/python")
ns.add_known_xfile("/usr/local/bin/python")
ns.add_known_file("/home/cpython/pybuilddir.txt", ["build/lib.linux-x86_64-9.8"])
ns.add_known_file("/home/cpython/Lib/os.py")
ns.add_known_dir("/home/cpython/lib-dynload")
expected = dict(
executable="/linkfrom/python",
prefix="/usr/local",
exec_prefix="/usr/local",
base_executable="/linkfrom/python",
build_prefix="/home/cpython",
_is_python_build=1,
module_search_paths_set=1,
module_search_paths=[
"/usr/local/lib/python98.zip",
"/home/cpython/Lib",
"/home/cpython/build/lib.linux-x86_64-9.8",
],
)
actual = getpath(ns, expected)
self.assertEqual(expected, actual)
def test_custom_platlibdir_posix(self):
"Test an install with custom platlibdir on *nix"
ns = MockPosixNamespace(
PREFIX="/usr",
argv0="/linkfrom/python",
PLATLIBDIR="lib64",
)
ns.add_known_xfile("/usr/bin/python")
ns.add_known_file("/usr/lib64/python9.8/os.py")
ns.add_known_dir("/usr/lib64/python9.8/lib-dynload")
expected = dict(
executable="/linkfrom/python",
base_executable="/linkfrom/python",
prefix="/usr",
exec_prefix="/usr",
module_search_paths_set=1,
module_search_paths=[
"/usr/lib64/python98.zip",
"/usr/lib64/python9.8",
"/usr/lib64/python9.8/lib-dynload",
],
)
actual = getpath(ns, expected)
self.assertEqual(expected, actual)
def test_venv_macos(self):
"""Test a venv layout on macOS.
This layout is discovered when 'executable' and 'real_executable' match,
but $__PYVENV_LAUNCHER__ has been set to the original process.
"""
ns = MockPosixNamespace(
os_name="darwin",
argv0="/usr/bin/python",
PREFIX="/usr",
ENV___PYVENV_LAUNCHER__="/framework/Python9.8/python",
real_executable="/usr/bin/python",
)
ns.add_known_xfile("/usr/bin/python")
ns.add_known_xfile("/framework/Python9.8/python")
ns.add_known_file("/usr/lib/python9.8/os.py")
ns.add_known_dir("/usr/lib/python9.8/lib-dynload")
ns.add_known_file("/framework/Python9.8/pyvenv.cfg", [
"home = /usr/bin"
])
expected = dict(
executable="/framework/Python9.8/python",
prefix="/usr",
exec_prefix="/usr",
base_executable="/usr/bin/python",
base_prefix="/usr",
base_exec_prefix="/usr",
module_search_paths_set=1,
module_search_paths=[
"/usr/lib/python98.zip",
"/usr/lib/python9.8",
"/usr/lib/python9.8/lib-dynload",
],
)
actual = getpath(ns, expected)
self.assertEqual(expected, actual)
def test_symlink_normal_macos(self):
"Test a 'standard' install layout via symlink on macOS"
ns = MockPosixNamespace(
os_name="darwin",
PREFIX="/usr",
argv0="python",
ENV_PATH="/linkfrom:/usr/bin",
# real_executable on macOS matches the invocation path
real_executable="/linkfrom/python",
)
ns.add_known_xfile("/linkfrom/python")
ns.add_known_xfile("/usr/bin/python")
ns.add_known_link("/linkfrom/python", "/usr/bin/python")
ns.add_known_file("/usr/lib/python9.8/os.py")
ns.add_known_dir("/usr/lib/python9.8/lib-dynload")
expected = dict(
executable="/linkfrom/python",
base_executable="/linkfrom/python",
prefix="/usr",
exec_prefix="/usr",
module_search_paths_set=1,
module_search_paths=[
"/usr/lib/python98.zip",
"/usr/lib/python9.8",
"/usr/lib/python9.8/lib-dynload",
],
)
actual = getpath(ns, expected)
self.assertEqual(expected, actual)
def test_symlink_buildpath_macos(self):
"""Test an in-build-tree layout via symlink on macOS.
This layout is discovered from the presence of pybuilddir.txt, which
contains the relative path from the executable's directory to the
platstdlib path.
"""
ns = MockPosixNamespace(
os_name="darwin",
argv0=r"python",
ENV_PATH="/linkfrom:/usr/bin",
PREFIX="/usr/local",
# real_executable on macOS matches the invocation path
real_executable="/linkfrom/python",
)
ns.add_known_xfile("/linkfrom/python")
ns.add_known_xfile("/home/cpython/python")
ns.add_known_link("/linkfrom/python", "/home/cpython/python")
ns.add_known_xfile("/usr/local/bin/python")
ns.add_known_file("/home/cpython/pybuilddir.txt", ["build/lib.macos-9.8"])
ns.add_known_file("/home/cpython/Lib/os.py")
ns.add_known_dir("/home/cpython/lib-dynload")
expected = dict(
executable="/linkfrom/python",
prefix="/usr/local",
exec_prefix="/usr/local",
base_executable="/linkfrom/python",
build_prefix="/home/cpython",
_is_python_build=1,
module_search_paths_set=1,
module_search_paths=[
"/usr/local/lib/python98.zip",
"/home/cpython/Lib",
"/home/cpython/build/lib.macos-9.8",
],
)
actual = getpath(ns, expected)
self.assertEqual(expected, actual)
# ******************************************************************************
DEFAULT_NAMESPACE = dict(
PREFIX="",
EXEC_PREFIX="",
PYTHONPATH="",
VPATH="",
PLATLIBDIR="",
PYDEBUGEXT="",
VERSION_MAJOR=9, # fixed version number for ease
VERSION_MINOR=8, # of testing
PYWINVER=None,
EXE_SUFFIX=None,
ENV_PATH="",
ENV_PYTHONHOME="",
ENV_PYTHONEXECUTABLE="",
ENV___PYVENV_LAUNCHER__="",
argv0="",
py_setpath="",
real_executable="",
executable_dir="",
library="",
winreg=None,
build_prefix=None,
venv_prefix=None,
)
DEFAULT_CONFIG = dict(
home=None,
platlibdir=None,
pythonpath=None,
program_name=None,
prefix=None,
exec_prefix=None,
base_prefix=None,
base_exec_prefix=None,
executable=None,
base_executable="",
stdlib_dir=None,
platstdlib_dir=None,
module_search_paths=None,
module_search_paths_set=0,
pythonpath_env=None,
argv=None,
orig_argv=None,
isolated=0,
use_environment=1,
use_site=1,
)
class MockNTNamespace(dict):
def __init__(self, *a, argv0=None, config=None, **kw):
self.update(DEFAULT_NAMESPACE)
self["config"] = DEFAULT_CONFIG.copy()
self["os_name"] = "nt"
self["PLATLIBDIR"] = "DLLs"
self["PYWINVER"] = "9.8-XY"
self["VPATH"] = r"..\.."
super().__init__(*a, **kw)
if argv0:
self["config"]["orig_argv"] = [argv0]
if config:
self["config"].update(config)
self._files = {}
self._links = {}
self._dirs = set()
self._warnings = []
def add_known_file(self, path, lines=None):
self._files[path.casefold()] = list(lines or ())
self.add_known_dir(path.rpartition("\\")[0])
def add_known_xfile(self, path):
self.add_known_file(path)
def add_known_link(self, path, target):
self._links[path.casefold()] = target
def add_known_dir(self, path):
p = path.rstrip("\\").casefold()
while p:
self._dirs.add(p)
p = p.rpartition("\\")[0]
def __missing__(self, key):
try:
return getattr(self, key)
except AttributeError:
raise KeyError(key) from None
def abspath(self, path):
if self.isabs(path):
return path
return self.joinpath("C:\\Absolute", path)
def basename(self, path):
return path.rpartition("\\")[2]
def dirname(self, path):
name = path.rstrip("\\").rpartition("\\")[0]
if name[1:] == ":":
return name + "\\"
return name
def hassuffix(self, path, suffix):
return path.casefold().endswith(suffix.casefold())
def isabs(self, path):
return path[1:3] == ":\\"
def isdir(self, path):
if verbose:
print("Check if", path, "is a dir")
return path.casefold() in self._dirs
def isfile(self, path):
if verbose:
print("Check if", path, "is a file")
return path.casefold() in self._files
def ismodule(self, path):
if verbose:
print("Check if", path, "is a module")
path = path.casefold()
return path in self._files and path.rpartition(".")[2] == "py".casefold()
def isxfile(self, path):
if verbose:
print("Check if", path, "is a executable")
path = path.casefold()
return path in self._files and path.rpartition(".")[2] == "exe".casefold()
def joinpath(self, *path):
return ntpath.normpath(ntpath.join(*path))
def readlines(self, path):
try:
return self._files[path.casefold()]
except KeyError:
raise FileNotFoundError(path) from None
def realpath(self, path, _trail=None):
if verbose:
print("Read link from", path)
try:
link = self._links[path.casefold()]
except KeyError:
return path
if _trail is None:
_trail = set()
elif link.casefold() in _trail:
raise OSError("circular link")
_trail.add(link.casefold())
return self.realpath(link, _trail)
def warn(self, message):
self._warnings.append(message)
if verbose:
print(message)
class MockWinreg:
HKEY_LOCAL_MACHINE = "HKLM"
HKEY_CURRENT_USER = "HKCU"
def __init__(self, keys):
self.keys = {k.casefold(): v for k, v in keys.items()}
self.open = {}
def __repr__(self):
return "<MockWinreg>"
def __eq__(self, other):
return isinstance(other, type(self))
def open_keys(self):
return list(self.open)
def OpenKeyEx(self, hkey, subkey):
if verbose:
print(f"OpenKeyEx({hkey}, {subkey})")
key = f"{hkey}\\{subkey}".casefold()
if key in self.keys:
self.open[key] = self.open.get(key, 0) + 1
return key
raise FileNotFoundError()
def CloseKey(self, hkey):
if verbose:
print(f"CloseKey({hkey})")
hkey = hkey.casefold()
if hkey not in self.open:
raise RuntimeError("key is not open")
self.open[hkey] -= 1
if not self.open[hkey]:
del self.open[hkey]
def EnumKey(self, hkey, i):
if verbose:
print(f"EnumKey({hkey}, {i})")
hkey = hkey.casefold()
if hkey not in self.open:
raise RuntimeError("key is not open")
prefix = f'{hkey}\\'
subkeys = [k[len(prefix):] for k in sorted(self.keys) if k.startswith(prefix)]
subkeys[:] = [k for k in subkeys if '\\' not in k]
for j, n in enumerate(subkeys):
if j == i:
return n.removeprefix(prefix)
raise OSError("end of enumeration")
def QueryValue(self, hkey):
if verbose:
print(f"QueryValue({hkey})")
hkey = hkey.casefold()
if hkey not in self.open:
raise RuntimeError("key is not open")
try:
return self.keys[hkey]
except KeyError:
raise OSError()
class MockPosixNamespace(dict):
def __init__(self, *a, argv0=None, config=None, **kw):
self.update(DEFAULT_NAMESPACE)
self["config"] = DEFAULT_CONFIG.copy()
self["os_name"] = "posix"
self["PLATLIBDIR"] = "lib"
super().__init__(*a, **kw)
if argv0:
self["config"]["orig_argv"] = [argv0]
if config:
self["config"].update(config)
self._files = {}
self._xfiles = set()
self._links = {}
self._dirs = set()
self._warnings = []
def add_known_file(self, path, lines=None):
self._files[path] = list(lines or ())
self.add_known_dir(path.rpartition("/")[0])
def add_known_xfile(self, path):
self.add_known_file(path)
self._xfiles.add(path)
def add_known_link(self, path, target):
self._links[path] = target
def add_known_dir(self, path):
p = path.rstrip("/")
while p:
self._dirs.add(p)
p = p.rpartition("/")[0]
def __missing__(self, key):
try:
return getattr(self, key)
except AttributeError:
raise KeyError(key) from None
def abspath(self, path):
if self.isabs(path):
return path
return self.joinpath("/Absolute", path)
def basename(self, path):
return path.rpartition("/")[2]
def dirname(self, path):
return path.rstrip("/").rpartition("/")[0]
def hassuffix(self, path, suffix):
return path.endswith(suffix)
def isabs(self, path):
return path[0:1] == "/"
def isdir(self, path):
if verbose:
print("Check if", path, "is a dir")
return path in self._dirs
def isfile(self, path):
if verbose:
print("Check if", path, "is a file")
return path in self._files
def ismodule(self, path):
if verbose:
print("Check if", path, "is a module")
return path in self._files and path.rpartition(".")[2] == "py"
def isxfile(self, path):
if verbose:
print("Check if", path, "is an xfile")
return path in self._xfiles
def joinpath(self, *path):
return posixpath.normpath(posixpath.join(*path))
def readlines(self, path):
try:
return self._files[path]
except KeyError:
raise FileNotFoundError(path) from None
def realpath(self, path, _trail=None):
if verbose:
print("Read link from", path)
try:
link = self._links[path]
except KeyError:
return path
if _trail is None:
_trail = set()
elif link in _trail:
raise OSError("circular link")
_trail.add(link)
return self.realpath(link, _trail)
def warn(self, message):
self._warnings.append(message)
if verbose:
print(message)
def diff_dict(before, after, prefix="global"):
diff = []
for k in sorted(before):
if k[:2] == "__":
continue
if k == "config":
diff_dict(before[k], after[k], prefix="config")
continue
if k in after and after[k] != before[k]:
diff.append((k, before[k], after[k]))
if not diff:
return
max_k = max(len(k) for k, _, _ in diff)
indent = " " * (len(prefix) + 1 + max_k)
if verbose:
for k, b, a in diff:
if b:
print("{}.{} -{!r}\n{} +{!r}".format(prefix, k.ljust(max_k), b, indent, a))
else:
print("{}.{} +{!r}".format(prefix, k.ljust(max_k), a))
def dump_dict(before, after, prefix="global"):
if not verbose or not after:
return
max_k = max(len(k) for k in after)
for k, v in sorted(after.items(), key=lambda i: i[0]):
if k[:2] == "__":
continue
if k == "config":
dump_dict(before[k], after[k], prefix="config")
continue
try:
if v != before[k]:
print("{}.{} {!r} (was {!r})".format(prefix, k.ljust(max_k), v, before[k]))
continue
except KeyError:
pass
print("{}.{} {!r}".format(prefix, k.ljust(max_k), v))
def getpath(ns, keys):
before = copy.deepcopy(ns)
failed = True
try:
exec(SOURCE, ns)
failed = False
finally:
if failed:
dump_dict(before, ns)
else:
diff_dict(before, ns)
return {
k: ns['config'].get(k, ns.get(k, ...))
for k in keys
}
|
# -*- coding: utf-8 -*-
"""Views."""
from __future__ import unicode_literals
import os
import uuid
from collections import defaultdict
from django.template import Context, Template, TemplateSyntaxError
from django.conf import settings
from django.shortcuts import get_object_or_404, render
from django.contrib.auth.decorators import login_required
from django.http import HttpResponse, Http404
import views_playbooks
from .models import ConfigurationTemplate, Category
from .models import Playbook
from .lib.spotmax import netspot
from .lib.spotmax import spotmax
def get_config(variables, template):
"""Generate config.
Args:
variables: dict, key = variable name
value = variable value
template: ConfigurationTemplate object
Returns:
config_file, config, error: tuple, file path, config, error
"""
# Generate configuration
try:
tpl = Template(template.template)
config = tpl.render(Context(variables))
error = False
except TemplateSyntaxError as syntax_error:
config = str(syntax_error)
error = True
# Check if temp directory exists
if not os.path.exists(settings.TEMP_DIRECTORY):
os.makedirs(settings.TEMP_DIRECTORY)
# Write configuration to a file
config_filename = str(uuid.uuid4())
file_path = os.path.join(settings.TEMP_DIRECTORY, config_filename)
with open(file_path, 'w') as config_file:
config_file.write(config.encode('utf-8').strip())
return file_path, config, error
@login_required
def index(request):
"""Index view."""
# Dict to hold templates per category
templates = defaultdict()
# Get all categories
categories = Category.objects.order_by('-name')
# Get all templates per category
for category in categories:
# Get all templates
templates[category] = ConfigurationTemplate.objects.filter(category=category).order_by('-name')
return render(
request,
'templify/templates.htm',
context={'templates': templates},
)
@login_required
def download(request, filename):
"""File download."""
file_path = os.path.join(settings.TEMP_DIRECTORY, filename)
if os.path.exists(file_path):
with open(file_path, 'rb') as filehandle:
response = HttpResponse(filehandle.read(), content_type="application/text/plain")
response['Content-Disposition'] = 'inline; filename=' + os.path.basename(file_path)
return response
raise Http404
@login_required
def generate_config(request):
"""Generate config view."""
# Get template
template_id = request.POST.get('template_id', None)
asset_name = request.POST.get('asset_name', None)
template = get_object_or_404(ConfigurationTemplate, pk=template_id)
# Remove unnecessary POST data
data = dict(request.POST)
try:
del data['csrfmiddlewaretoken']
del data['template_id']
except KeyError:
pass
# Variables to be used in template
variables = defaultdict()
# Get group variables if asset_name is available
inventory = netspot.NetSPOT()
if asset_name:
asset_details = inventory.search(asset_name, key='asset')[0]
# Get variable for each group
for group in asset_details['groups']:
group_details = spotmax.SPOTGroup().search(group, key='group')[0]
# Get user variables if any
if group_details['variables']:
for variable in group_details['variables']:
variables.update(variable)
# Get variables from user input
# Overwrite existing variables. User input is more specific.
for variable in data:
# If interface, split and strip each interface in the list
if variable == 'interfaces':
interfaces = data[variable][0].split(',')
if len(interfaces) == 1:
variables['interface'] = interfaces[0].strip()
variables['interfaces'] = [interfaces[0].strip()]
else:
variables['interface'] = ''
variables['interfaces'] = [x.strip() for x in interfaces]
else:
variables[variable] = data[variable][0]
file_path, config, error = get_config(variables, template)
# Get playbooks that accepts templates as input
playbooks = Playbook.objects.filter(template_input=True)
# If a playbook is specified go directly to playbook input
if template.playbook:
return views_playbooks.playbook_input(request,
playbook_id=template.playbook.id,
config_file=file_path,
template=template.template)
return render(
request,
'templify/config.htm',
context={'config': config,
'asset_name': asset_name,
'config_filename': file_path,
'template': template,
'error': error,
'playbooks': playbooks,
'template_id': template_id},
)
@login_required
def template_input(request, template_id):
"""Playbook input view."""
# Get template
template = get_object_or_404(ConfigurationTemplate, pk=template_id)
# Check if port and asset is provided
asset_name = request.POST.get('asset_name', None)
interfaces = request.POST.getlist('interfaces', [])
return render(
request,
'templify/template.htm',
context={'template': template,
'asset_name': asset_name,
'interfaces_str': ', '.join(interfaces),
'interfaces': interfaces},
)
|
from github import Github
from github_token import GITHUB_TOKEN, user, password
from inky import InkyPHAT
inky_display = InkyPHAT("red")
# Set up the display
if colour == "auto":
from inky.auto import auto
inky_display = auto()
colour = inky_display.colour
else:
inky_display = InkyPHAT(colour)
inky_display.set_border(inky_display.BLACK)
inky_display.v_flip = True
g1 = Github(GITHUB_TOKEN)
g2 = Github(user, password)
user = g2.get_user()
notifications = user.get_notifications()
print(notifications)
for notification in notifications:
print (notification)
|
#
# Copyright (c) 2020 by Delphix. All rights reserved.
#
#######################################################################################################################
"""
Main Exception Class: UserConvertibleException
|-Two Types of exceptions:
|-DatabaseException - Superclass for any DB related Exception
|-PluginException - Superclass for any plugin run-time related Exception
"""
#######################################################################################################################
from dlpx.virtualization.platform.exceptions import UserError
class UserConvertibleException(Exception):
def __init__(self, message, action, error_string):
super(UserConvertibleException, self).__init__(message)
self.user_error = UserError(message, action, error_string)
def to_user_error(self):
return self.user_error
# DB Exceptions
class DatabaseException(UserConvertibleException):
def __init__(self, message, action, error_string):
super(DatabaseException, self).__init__(message, action, error_string)
# Currently for any DB action failing
class MySQLDBException(DatabaseException):
def __init__(self, message=""):
message = "An Error occurred during a DB Operation: " + message
super(MySQLDBException, self).__init__(message,
"Please check the error & re-try",
"Not able perform the requested DB Operation")
# PLUGIN EXCEPTIONS
# Exceptions related to plugin operation like discovery, linking, virtualization are being handled using this.
class PluginException(UserConvertibleException):
def __init__(self, message, action, error_string):
super(PluginException, self).__init__(message, action, error_string)
class RepositoryDiscoveryError(PluginException):
def __init__(self, message=""):
message = "Not able to search repository information, " + message
super(RepositoryDiscoveryError, self).__init__(message,
"Please check the MySQL DB installation on the environment",
"Failed to search repository information")
class MountPathError(PluginException):
def __init__(self, message=""):
message = "Failed to create mount path because another file system is already mounted " + message
super(MountPathError, self).__init__(message,
"Please re-try after the previous operation is completed",
"Please check the logs for more details")
# This exception will be raised when failed to find source config
class SourceConfigDiscoveryError(PluginException):
def __init__(self, message=""):
message = "Failed to find source config, " + message
super(SourceConfigDiscoveryError, self).__init__(message,
"An Error occured while peforming Source Discovery",
"Not able to find source")
# This exception is used for all Linking Failures
class LinkingException(PluginException):
def __init__(self, message=""):
message = "Failed to link source, " + message
super(LinkingException, self).__init__(message,
"Please review the error log and re-try",
"Unable to Link dSource")
# This exception is used for all VDB Failures
class VirtualTargetException(PluginException):
def __init__(self, message=""):
message = "Failed while performing a VDB Operation, " + message
super(VirtualTargetException, self).__init__(message,
"Please review the error log and re-try",
"Error while performing the VDB")
# Exception for Generic Errors
class GenericUserError(UserConvertibleException):
def __init__(self, message="", action="", error_string=""):
if not message:
message = "Internal error occurred, retry again"
if not action:
action = "Please check logs for more details"
if not error_string:
error_string = "Default error string"
super(GenericUserError, self).__init__(message, action, error_string)
|
import sys
from textwrap import dedent
MINIMUM_VERSION = (2, 4)
def check_version():
if sys.version_info < MINIMUM_VERSION:
min_version_str = '.'.join(str(x) for x in MINIMUM_VERSION)
print("Python >= %s required. You are using:\n%s" % (min_version_str, sys.version))
print(dedent("""
######################################################################
Visit http://www.python.org and download a more recent version of
Python.
You should install this version in addition to your current version
(rather than upgrading your current version) because your system might
depend on the current version. After installing the newer version, for
instance version 3.2, simply invoke DenyHosts explicitly with the new
version of python, eg:
$ python3.4 %s
######################################################################
""") % ' '.join(sys.argv))
sys.exit(1)
check_version()
|
#!/usr/bin/env python3
import argparse
import re
import pandas as pd
def main():
args = parse_arguments()
clades = pd.read_csv(args.clades)
clade_assignments = {}
for vcf in args.inputs:
df = load_vcf(vcf)
name = re.sub(r"^.*/|\..*$", "", vcf)
clade_assignments[name] = get_clade(df, clades)
# write output
with open(args.output, "w") as f:
f.write("sample,clade\n")
f.write("\n".join(','.join(x) for x in clade_assignments.items()))
def get_clade(sample_variants: pd.DataFrame, clades: pd.DataFrame) -> str:
# special case: wildtype (should have . as ref in all its "mutations")
# so, if we don't find any mutation in the same positions, we assume it's wildtype
wt = clades.query("ref == '.'")
wt_name = wt.clade.unique()[0]
if len(wt.merge(sample_variants, on=["pos"])) == 0:
return wt_name
# count how many mutations each clade has
clade_nmutations = (
clades.query("clade != @wt_name")
.groupby("clade")
.size()
.to_frame("n")
.reset_index()
)
selected_clade = ("None", 0)
for t in clade_nmutations.itertuples():
name = t.clade
matched = len(sample_variants.merge(clades.query("clade == @name")))
# if we find all the mutations of this clade and the number of mutations
# is higher than the current selected clade, choose this clade
if matched == t.n and t.n > selected_clade[1]:
selected_clade = (name, t.n)
return selected_clade[0]
def load_vcf(path: str) -> pd.DataFrame:
df = pd.read_csv(
path, comment="#", sep="\t", usecols=[1, 3, 4], names=["pos", "ref", "alt"]
)
return df
def parse_arguments():
parser = argparse.ArgumentParser(
description="Assigns clades from VCF using a list with each clade and its mutations"
)
parser.add_argument(
"--clade-mutation-list",
"-m",
required=True,
dest="clades",
help="CSV file containing list with each clade and its defining mutations,"
+ " cols: (clade,pos,ref,alt)",
)
parser.add_argument(
"--output",
"-o",
required=True,
help="ouput CSV containing each sample with its clade (basename of files will be used as sample)",
)
parser.add_argument(
"--input-vcfs",
"-i",
required=True,
nargs="+",
dest="inputs",
help="input VCFs to determine clade",
)
return parser.parse_args()
if __name__ == "__main__":
main()
|
import sys
# Clear module cache to force reloading all modules of this package.
# See https://github.com/emmetio/sublime-text-plugin/issues/35
prefix = __package__ + "." # don't clear the base package
for module_name in [
module_name
for module_name in sys.modules
if module_name.startswith(prefix) and module_name != __name__
]:
del sys.modules[module_name]
import sublime_plugin
from .rspec.rspec_print import rspec_print
from .rspec.execute_spec import ExecuteSpec
from .rspec.task_context import TaskContext
from .rspec.switch_between_code_and_test import SwitchBetweenCodeAndTest
from .rspec.last_copy import LastCopy
from .rspec.create_spec_file import CreateSpecFile
from .rspec.output import Output
class TestCurrentLineCommand(sublime_plugin.TextCommand):
def run(self, edit):
rspec_print("Running rspec")
context = TaskContext(self, edit)
ExecuteSpec(context).current()
class TestCurrentFileCommand(sublime_plugin.TextCommand):
def run(self, edit):
rspec_print("Running rspec")
context = TaskContext(self, edit, spec_target_is_file=True)
ExecuteSpec(context).current()
class RunLastSpecCommand(sublime_plugin.TextCommand):
def run(self, edit):
rspec_print("Running last rspec command")
context = TaskContext(self, edit)
ExecuteSpec(context).last_run()
class CopyLastCommand(sublime_plugin.TextCommand):
def run(self, edit):
rspec_print("Running copy last rspec command")
LastCopy.run()
class DisplayOutputPanelCommand(sublime_plugin.TextCommand):
def run(self, edit):
rspec_print("Displaying output panel")
context = TaskContext(self, edit)
context.display_output_panel()
class SwitchBetweenCodeAndTestCommand(sublime_plugin.TextCommand):
def run(self, edit):
rspec_print("Switching between code and test")
context = TaskContext(self, edit)
SwitchBetweenCodeAndTest(context).run()
class CreateSpecFileCommand(sublime_plugin.TextCommand):
def run(self, edit):
rspec_print("Creating spec file")
context = TaskContext(self, edit)
CreateSpecFile(context).run()
def plugin_unloaded():
# Destroy output panels because the default syntax is going to disappear.
# This prevents error messages on plugin upgrade.
Output.destroy()
|
#!/usr/bin/env python3
"""
Yandex Transport/Masstransit Webdriver API. This module is to be used together
with YandexTransportProxy project (https://github.com/OwlSoul/YandexTransportProxy).
It provides some limited access to Yandex Masstransit API. While it's difficult to get all the masstransit data
of one city using this thing, it makes it possible to get a data for particular stop or route, which you can
use in various automation systems (take an example, the alarm which will ring when your pretty infrequent bus departs
from terminal station).
"""
__author__ = "Yury D."
__credits__ = ["Yury D.", "Pavel Lutskov", "Yury Alexeev"]
__license__ = "MIT"
__version__ = "1.0.2"
__maintainer__ = "Yury D."
__email__ = "[email protected]"
__status__ = "Beta"
import socket
import json
import uuid
import threading
from yandex_transport_webdriver_api.logger import Logger
# NOTE: This project uses camelCase for function names. While PEP8 recommends using snake_case for these,
# the project in fact implements the "quasi-API" for Yandex Masstransit, where names are in camelCase,
# for example, get_stop_info. Correct naming for this function according to PEP8 would be get_stop_info.
# Thus, the decision to use camelCase was made. In fact, there are a bunch of python projects which use
# camelCase, like Robot Operating System.
# I also personally find camelCase more prettier than the snake_case.
class YandexTransportProxy:
"""
YandexTransportProxy class, provides proxy access to Yandex Transport Masstransit API.
"""
# Result error codes
RESULT_OK = 0
RESULT_NO_DATA = 1
RESULT_GET_ERROR = 2
RESULT_NO_YANDEX_DATA = 3
def __init__(self, host, port):
# Host and port of Yandex Transport Proxy server
self.host = host
self.port = port
# Buffer size, to receive data.
self.buffer_size = 4096
# Logger
self.log = Logger(Logger.NONE)
# Log buffer messages to a file
self.log_buffer = False
self.log_buffer_file = 'ytapi-wd-python.log'
def callback_function_example(self, data):
"""
Example of Callback function. This will be called each time complete JSON arrives fo
:param data: JSON data message receive
:return:
"""
print("Received data:" + str(data))
class ListenerThread(threading.Thread):
"""
Listener Thread class, one is created for each incoming query.
"""
# pylint: disable = R0913
def __init__(self, app, sock, query_id, command, callback):
super().__init__()
self.app = app
self.command = command
self.query_id = query_id
self.sock = sock
self.callback_function = callback
# pylint: enable = R0913
def run(self):
self.app._single_query_blocking(self.sock, self.command, self.callback_function)
self.app._disconnect(self.sock)
self.app.log.debug("Listener thread for query with ID="+str(self.query_id) + " terminated.")
def _single_query_blocking(self, sock, command, callback=None):
"""
Execute single blocking query
:param sock: socket
:param command: command to execute
:param callback: if not None, will function be called each time JSON arrives
Function format: def callback(data)
:return: array of dictionaries containing: {method, received data},
"""
result = []
command = command + '\n'
sock.sendall(bytes(command, 'utf-8'))
completed = False
buffer = ''
while not completed:
# Receive data from the server
data = sock.recv(self.buffer_size)
response = bytes(data).decode('utf-8')
for c in response:
if c == '\0':
# Logging messages to a buffer file if asked to
# This is a workaround measure against
# Issue #1: JSON Parsing Error for Vehicle Data Collection
if self.log_buffer:
f = open(self.log_buffer_file, 'a', encoding='utf-8')
f.write("Buffer length: " + str(len(buffer)) + "\n")
f.write(buffer)
f.write('\n')
try:
json_data = json.loads(buffer, encoding='utf-8')
except Exception as e:
# Raise an exception, this might be problematic.
if self.log_buffer:
f.write("! FAILED !\n")
f.write('\n')
f.close()
sock.close()
raise Exception("Exception (_single_query_blocking) : JSON loads : "+str(e))
if self.log_buffer:
f.write("! SUCCESS !\n")
f.write('\n')
f.close()
buffer = ''
# Executing callback if asked to
if callback is not None:
callback(json_data)
# TODO: probably make these separate functions and remove from here
# Check if errors occurred
if 'error' in json_data:
if json_data['error'] != self.RESULT_OK:
#return result, self.RESULT_JSON_HAS_ERROR, buffer
raise Exception("Exception(_single_query_blocking): "
"Yandex Transport Proxy Server signalled an error: " + json_data['message'])
# Check if expect_more_data is present and is false
if 'expect_more_data' in json_data:
# pylint: disable=C0121
if not json_data['expect_more_data']:
completed = True
# pylint: enable=C0121
if 'data' in json_data:
result.append({'method': json_data['method'], 'data': json_data["data"]})
else:
buffer += c
self.log.debug("Processing complete for query: " + command.strip())
return result
def _connect(self):
"""
Connect to the server.
:return: connection socket
"""
self.log.debug("Connecting to server " + str(self.host) + ":" + str(self.port))
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
error = "OK"
try:
sock.connect((self.host, self.port))
except socket.error as e:
self.log.error(" Socket error:" + str(e))
sock = None
error = str(e)
self.log.debug("Connected to server " + str(self.host) + ":" + str(self.port))
return sock, error
def _disconnect(self, sock):
"""
Disconnect from the server
:param sock: socket
:return: none
"""
if sock is not None:
self.log.debug("Disconnecting from the server " + str(self.host) + ":" + str(self.port))
sock.close()
else:
self.log.error("Socket is empty!")
def _execute_get_query(self, command, payload, query_id=None,
blocking=True, timeout=0,
callback=None):
"""
Meta-command to implement getXXX requests.
:param command: string, command to implement, for example get_stop_info
:param payload: string, command payload, url of stop or route
:param query_id: string, query_id to be passed to the server, all responses to this query will return with
this value
:param blocking: boolean, blocking or non-blocking
:param timeout: integer, connection timeout value, 0 to switch off
:param callback: callback function to be called each time response JSON arrives, for non-blocking scenario
:return: array of received data (strings containing JSON)
"""
sock, error = self._connect()
if sock is None:
raise Exception("Exception (_execure_get_query): Failed to connect to server,"
" host = " + str(self.host) + "," +
" post = " + str(self.port))
# Generate UUID if it is not set
if query_id is None:
query_id = uuid.uuid4()
command = command + '?' + 'id=' + str(query_id) + '?' + payload
self.log.debug("Executing query: " + command)
if blocking:
# This might take a while, will block
if timeout > 0:
sock.settimeout(timeout)
result = self._single_query_blocking(sock, command)
self._disconnect(sock)
else:
# This will return immediately, will not block
result = ''
self.ListenerThread(self, sock, query_id, command, callback).start()
# Well, turns out if len(result) > 0 is less productive than if result.
# This ugly thing is a "result", pun not intended.
if blocking:
if result: # if len(result) > 0
return result
raise Exception("Exception (_execute_get_query): No data is received") # if len(result) == 0
return None
# ---------------------------------------------------------------------------------------------------------------- #
# ---- SERVER CONTROL METHODS ---- #
# #
# These are the methods to control and test.py Yandex Transport Proxy server behaviour. #
# ---------------------------------------------------------------------------------------------------------------- #
# NOTE: there are 5 parameters for get... methods, not counting self. All are important.
# Linter will need to deal with it.
# pylint: disable = R0913
def get_echo(self, text, query_id=None, blocking=True, timeout=0, callback=None):
"""
Test command, will echo back the text. Note, the "echo" query is added to the Query Queue of the
YandexTransportProxy server, and will be executed only then it is its turn.
:param query_id: string, ID of the query to send to the server, all responses to this query will
contain this exact ID.
Default is None, in this case it will be randomly generated,
You can get it from the callback function by using data['id']
if your callback function is like this: callback_fun(data)
:param text: string, anything you like for-example "Testing"
:param blocking: boolean, default is True, will block until the final response will be received.
Note: this may take a while, several seconds and more.
:param timeout: integer, default is off, will raise a socket.timeout exception is no data is received
during this period.
Mind the server delay between processing queries, this value definitely should be bigger!
If set to 0 - will wait indefinitely.
:param callback: Callback function to call when a new JSON is received.
Used if block is set to False.
:return: for blocking mode: string, should be equal to text parameter.
for non-blocking mode: empty string
"""
result = self._execute_get_query('getEcho', text, query_id, blocking, timeout, callback)
if blocking:
return result[-1]['data']
else:
return
# ---------------------------------------------------------------------------------------------------------------- #
# ---- CORE API METHODS ---- #
# #
# These are the methods which implement access to identically named Yandex Transport API functions. #
# Each one usually returns pretty huge amount of data in JSON format. #
# ---------------------------------------------------------------------------------------------------------------- #
def get_stop_info(self, url, query_id=None, blocking=True, timeout=0, callback=None):
"""
Request information about one mass transit stop from Yandex API.
:param query_id: string, ID of the query to send to the server, all responses to this query will
contain this exact ID.
Default is None, in this case it will be randomly generated,
You can get it from the callback function by using data['id']
if your callback function is like this: callback_fun(data)
:param url: Yandex Maps URL of the stop.
:param blocking: boolean, default is True, will block until the final response will be received.
Note: this may take a while, several seconds and more.
:param timeout: integer, default is off, will raise a socket.timeout exception is no data is received
during this period.
Mind the server delay between processing queries, this value definitely should be bigger!
If set to 0 - will wait indefinitely.
:param callback: Callback function to call when a new JSON is received.
Used if block is set to False.
:return: for blocking mode: dictionary containing information about requested stop. Use
json.dumps() function to get original Yandex API JSON.
for non-blocking mode: empty string
"""
result = self._execute_get_query('getStopInfo', url, query_id, blocking, timeout, callback)
if blocking:
return result[-1]['data']
else:
return
def get_line(self, url, query_id=None, blocking=True, timeout=0, callback=None):
"""
Request information about one mass transit line from Yandex API.
:param query_id: string, ID of the query to send to the server, all responses to this query will
contain this exact ID.
Default is None, in this case it will be randomly generated,
You can get it from the callback function by using data['id']
if your callback function is like this: callback_fun(data)
:param url: Yandex Maps URL of the stop.
:param blocking: boolean, default is True, will block until the final response will be received.
Note: this may take a while, several seconds and more.
:param timeout: integer, default is off, will raise a socket.timeout exception is no data is received
during this period.
Mind the server delay between processing queries, this value definitely should be bigger!
If set to 0 - will wait indefinitely.
:param callback: Callback function to call when a new JSON is received.
Used if block is set to False.
:return: for blocking mode: dictionary containing information about requested stop. Use
json.dumps() function to get original Yandex API JSON.
for non-blocking mode: empty string
"""
result = self._execute_get_query('getLine', url, query_id, blocking, timeout, callback)
if blocking:
return result[-1]['data']
else:
return
def get_route_info(self, url, query_id=None, blocking=True, timeout=0, callback=None):
"""
Request information about one mass transit route from Yandex API.
:param query_id: string, ID of the query to send to the server, all responses to this query will
contain this exact ID.
Default is None, in this case it will be randomly generated,
You can get it from the callback function by using data['id']
if your callback function is like this: callback_fun(data)
:param url: Yandex Maps URL of the route.
:param blocking: boolean, default is True, will block until the final response will be received.
Note: this may take a while, several seconds and more.
:param timeout: integer, default is off, will raise a socket.timeout exception is no data is received
during this period.
Mind the server delay between processing queries, this value definitely should be bigger!
If set to 0 - will wait indefinitely.
:param callback: Callback function to call when a new JSON is received.
Used if block is set to False.
:return: for blocking mode: dictionary containing information about requested route. Use
json.dumps() function to get original Yandex API JSON.
for non-blocking mode: empty string
"""
result = self._execute_get_query('getRouteInfo', url, query_id, blocking, timeout, callback)
if blocking:
return result[-1]['data']
else:
return
def get_vehicles_info(self, url, query_id=None, blocking=True, timeout=0, callback=None):
"""
Request information about vehicles of one mass transit route from Yandex API.
Seems to be deprecated as 03-25-2019
:param query_id: string, ID of the query to send to the server, all responses to this query will
contain this exact ID.
Default is None, in this case it will be randomly generated,
You can get it from the callback function by using data['id']
if your callback function is like this: callback_fun(data)
:param url: Yandex Maps URL of the route.
:param blocking: boolean, default is True, will block until the final response will be received.
Note: this may take a while, several seconds and more.
:param timeout: integer, default is off, will raise a socket.timeout exception is no data is received
during this period.
Mind the server delay between processing queries, this value definitely should be bigger!
If set to 0 - will wait indefinitely.
:param callback: Callback function to call when a new JSON is received.
Used if block is set to False.
:return: for blocking mode: dictionary containing information about vehicles of requested route. Use
json.dumps() function to get original Yandex API JSON.
for non-blocking mode: empty string
"""
result = self._execute_get_query('getVehiclesInfo', url, query_id, blocking, timeout, callback)
if blocking:
return result[-1]['data']
else:
return
def get_vehicles_info_with_region(self, url, query_id=None, blocking=True, timeout=0, callback=None):
"""
Request information about vehicles of one mass transit route from Yandex API.
New method starting 03-25-2019, now includes "region" info.
:param query_id: string, ID of the query to send to the server, all responses to this query will
contain this exact ID.
Default is None, in this case it will be randomly generated,
You can get it from the callback function by using data['id']
if your callback function is like this: callback_fun(data)
:param url: Yandex Maps URL of the route.
:param blocking: boolean, default is True, will block until the final response will be received.
Note: this may take a while, several seconds and more.
:param timeout: integer, default is off, will raise a socket.timeout exception is no data is received
during this period.
Mind the server delay between processing queries, this value definitely should be bigger!
If set to 0 - will wait indefinitely.
:param callback: Callback function to call when a new JSON is received.
Used if block is set to False.
:return: for blocking mode: dictionary containing information about vehicles of requested route. Use
json.dumps() function to get original Yandex API JSON.
for non-blocking mode: empty string
"""
result = self._execute_get_query('getVehiclesInfoWithRegion', url, query_id, blocking, timeout, callback)
if blocking:
return result[-1]['data']
else:
return
def get_layer_regions(self, url, query_id=None, blocking=True, timeout=0, callback=None):
"""
I have absolutely no idea that this thing does at the moment.
:param query_id: string, ID of the query to send to the server, all responses to this query will
contain this exact ID.
Default is None, in this case it will be randomly generated,
You can get it from the callback function by using data['id']
if your callback function is like this: callback_fun(data)
:param url: Yandex Maps URL of the route.
:param blocking: boolean, default is True, will block until the final response will be received.
Note: this may take a while, several seconds and more.
:param timeout: integer, default is off, will raise a socket.timeout exception is no data is received
during this period.
Mind the server delay between processing queries, this value definitely should be bigger!
If set to 0 - will wait indefinitely.
:param callback: Callback function to call when a new JSON is received.
Used if block is set to False.
:return: for blocking mode: dictionary containing information about some weird stuff. Use
json.dumps() function to get original Yandex API JSON.
for non-blocking mode: empty string
"""
result = self._execute_get_query('getLayerRegions', url, query_id, blocking, timeout, callback)
if blocking:
return result[-1]['data']
else:
return
def get_all_info(self, url, query_id=None, blocking=True, timeout=0, callback=None):
"""
Wildcard method, will return ALL Yandex Masstransit API responses from given URL.
For example, "route" url will return get_route_info and get_vehicles_info in sequence.
:param query_id: string, ID of the query to send to the server, all responses to this query will
contain this exact ID.
Default is None, in this case it will be randomly generated,
You can get it from the callback function by using data['id']
if your callback function is like this: callback_fun(data)
:param url: Yandex Maps URL of the route.
:param blocking: boolean, default is True, will block until the final response will be received.
Note: this may take a while, several seconds and more.
:param timeout: integer, default is off, will raise a socket.timeout exception is no data is received
during this period.
Mind the server delay between processing queries, this value definitely should be bigger!
If set to 0 - will wait indefinitely.
:param callback: Callback function to call when a new JSON is received.
Used if block is set to False.
:return: for blocking mode: array of dictionaries in the following format:
{'method': method, 'data': data}
where method - the API method called
(get_stop_info, get_route_info, get_vehicles_info)
data - another dictionary containing all data for given method.
for non-blocking mode: empty string
"""
result = self._execute_get_query('getAllInfo', url, query_id, blocking, timeout, callback)
if blocking:
return result
else:
return
# pylint: enable = R0913
# ---------------------------------------------------------------------------------------------------------------- #
# ---- PARSING METHODS ---- #
# #
# Basically, Core API methods are more than enough. Parsing methods are used to simplify several tasks, #
# like getting only the list of stops for a route, counting vehicles on the route, #
# counting how many stops a buss will need to pass till it arrives to desired stop, #
# or just providing all information for Information Displays (bus number, bus route, time to wait). #
# Practically all of them are static stateless methods, which accept getXXXInfo as input. #
# -----------------------------------------------------------------------------------------------------------------#
@staticmethod
def count_vehicles_on_route(vehicles_info, with_region=True):
"""
Count vehicles on the route. As simple as counting number of elements in
vehicle_info['data']['vehicles'].
:param vehicles_info: data from get_vehicles_info or get_vehicles_info_with_region method
:param with_region: True by default, if true, vehicles_info is expected to be from getVehiclesIfoWithRegion,
if False - from get_vehicles_info.
:return:
"""
if vehicles_info is None:
return None
# If data received from get_vehicles_info_with_region
if with_region:
return len(vehicles_info['data']['vehicles'])
# SEEMS DEPRECATED: if data received from get_vehicles_info
return len(vehicles_info['data'])
if __name__ == '__main__':
print("Do not run this module on its own!")
|
from __future__ import absolute_import, division, print_function
import numpy as np
import numpy.testing as npt
import isoensemble
from sklearn.datasets import load_boston
from sklearn.ensemble import RandomForestClassifier
import time
def get_leaf_counts(pmdtrf):
numtrees=len(pmdtrf.estimators_) #np.int(self.rfc.get_params()['n_estimators'])
num_leaves=np.zeros(numtrees,dtype='float')
for itree in np.arange(numtrees):
#num_leaves[itree]=len(pmdtrf.estimators_[itree].tree_.leaf_nodes)
num_leaves[itree]=pmdtrf.estimators_[itree].tree_array.leaf_ids_obj.curr_size
return num_leaves
def get_peak_leaves(pmdtrf):
numtrees=len(pmdtrf.estimators_) #np.int(self.rfc.get_params()['n_estimators'])
num_leaves=np.zeros(numtrees,dtype='float')
for itree in np.arange(numtrees):
#num_leaves[itree]=pmdtrf.estimators_[itree].tree_.peak_leaves
num_leaves[itree]=pmdtrf.estimators_[itree].tree_array.peak_leaves
return num_leaves
def get_num_iterations(pmdtrf):
numtrees=len(pmdtrf.estimators_) #np.int(self.rfc.get_params()['n_estimators'])
num_leaves=np.zeros(numtrees,dtype='float')
for itree in np.arange(numtrees):
#num_leaves[itree]=pmdtrf.estimators_[itree].tree_.num_iterations
num_leaves[itree]=pmdtrf.estimators_[itree].num_iterations
return num_leaves
def load_data_set():
# Load data
data = load_boston()
y = data['target']
X = data['data']
features = data['feature_names']
# Specify monotone features
incr_feat_names = ['RM']#['RM', 'RAD']
decr_feat_names = ['CRIM', 'LSTAT'] # ['CRIM', 'DIS', 'LSTAT']
# get 1 based indices of incr and decr feats
incr_feats = [i + 1 for i in np.arange(len(features)) if
features[i] in incr_feat_names]
decr_feats = [i + 1 for i in np.arange(len(features)) if
features[i] in decr_feat_names]
# Convert to classification problem
# Multi-class
y_multiclass = y.copy()
thresh1 = 15
thresh2 = 21
thresh3 = 27
y_multiclass[y > thresh3] = 3
y_multiclass[np.logical_and(y > thresh2, y <= thresh3)] = 2
y_multiclass[np.logical_and(y > thresh1, y <= thresh2)] = 1
y_multiclass[y <= thresh1] = 0
# Binary
y_binary = y.copy()
thresh = 21 # middle=21
y_binary[y_binary < thresh] = -1
y_binary[y_binary >= thresh] = +1
return X, y_binary, y_multiclass, incr_feats, decr_feats
# Load data
max_N = 400
np.random.seed(13) # comment out for changing random training set
X, y_binary, y_multiclass, incr_feats, decr_feats = load_data_set()
indx_train=np.random.permutation(np.arange(X.shape[0]))[0:max_N]
inx_test=np.asarray([i for i in np.arange(max_N) if i not in indx_train ])
X_train=X[indx_train,:]
X_test=X[inx_test,:]
y_train=dict()
y_test=dict()
n_classes=dict()
y_train['binary']=y_binary[indx_train]
y_train['multiclass']=y_multiclass[indx_train]
y_test['binary']=y_binary[inx_test]
y_test['multiclass']=y_multiclass[inx_test]
n_classes['binary']=2
n_classes['multiclass']=4
def test_model_fit():
# Specify hyperparams for model solution
n_estimators = 100#200
mtry = 3
mt_type='ict'
require_abs_impurity_redn=True
feat_data_types='auto'
base_tree_algo='scikit' # isotree
normalise_nmt_nodes=2
min_split_weight=0.25
split_criterion='both_sides_have_min_sample_wgt'
split_class='parent_class'
split_weight='hybrid_prob_empirical'
min_split_weight_type='prop_N' #num_pts
simplify=False
acc_correct={'multiclass-nmt': 0.752,
'binary-nmt': 0.84799999999999998,
'multiclass-mt': 0.74399999999999999,
'binary-mt': 0.85599999999999998}
acc_correct_scikit={'multiclass-mt': 0.76800000000000002,
'binary-nmt': 0.86399999999999999,
'binary-mt': 0.872,
'multiclass-nmt': 0.72799999999999998}
acc=dict()
oob_score=dict()
for response in ['multiclass']:#,binary'multiclass']: #'multiclass']:#
y_train_=y_train[response]
y_test_=y_test[response]
n_classes_=n_classes[response]
for constr in ['mt']:#,'nmt']:
clf = isoensemble.IsoRandomForestClassifier(n_estimators=n_estimators,
criterion='gini_l1',
random_state=11,
feat_data_types=feat_data_types,
max_features=mtry,
monotonicity_type=None if constr=='nmt' else mt_type,
normalise_nmt_nodes=normalise_nmt_nodes,
require_abs_impurity_redn=require_abs_impurity_redn,
incr_feats=incr_feats if constr =='mt' else None,
decr_feats=decr_feats if constr =='mt' else None,
oob_score=True,
base_tree_algo=base_tree_algo,
min_split_weight=min_split_weight,
min_split_weight_type=min_split_weight_type,
split_criterion=split_criterion,
split_class=split_class,
split_weight=split_weight,
simplify=simplify
)
# Assess fit
start=time.time()
clf.fit(X_train, y_train_)
solve_durn=time.time()-start
print('solve took: ' + str(solve_durn) + ' secs')
#
y_pred = clf.predict(X_test)
acc[response + '-' + constr] = np.sum(y_test_ == y_pred) / len(y_test_)
oob_score[response + '-' + constr]=clf.oob_score_ #[(clf_sk.tree_.node_count+1.)/2., len(clf_mydt.tree_.leaf_nodes), len(clf_iso.tree_.leaf_nodes), len(clf_oa.tree_.leaf_nodes)]
#print(acc[response + '-' + constr])
print(np.mean(get_peak_leaves(clf)))
print(np.mean(get_leaf_counts(clf)))
# Measure monotonicity
# mcc[response + '-' + constr] = np.mean(clf.calc_mcc(X_test,incr_feats=incr_feats, decr_feats=decr_feats))
print('acc: ' + str(acc))
print('n oob_score: ', str(oob_score))
# BENCHMARK binary MT acc: 0.864, time: 25.7secs
#for key in acc.keys():
# npt.assert_almost_equal(acc[key],acc_correct_scikit[key])
# print('mcc: ' + str(mcc))
# npt.assert_almost_equal(clf.oob_score_, 0.85999999999)
# npt.assert_almost_equal(acc_mc, 0.944999999999)
def benchmark_against_scikit():
# binary should match, nulti-class could be different because
# pmsvm uses montone ensembling but scikit uses OVR.
#
# Specify hyperparams for model solution
n_estimators = 200
mtry = 3
require_abs_impurity_redn=True
feat_data_types='auto'
acc=dict()
oob_score=dict()
solve_time=dict()
# Solve models
for response in ['multiclass','binary']:#,'multiclass']:
y_train_=y_train[response]
y_test_=y_test[response]
n_classes_=n_classes[response]
for model in ['isotree','scikit']:
if model=='isotree':
clf = isoensemble.IsoRandomForestClassifier(n_estimators=n_estimators,
criterion='gini',
random_state=11,
feat_data_types=feat_data_types,
max_features=mtry,
monotonicity_type=None,
normalise_nmt_nodes=0,
require_abs_impurity_redn=require_abs_impurity_redn,
incr_feats=None,
decr_feats=None,
oob_score=True
)
#clf_iso=clf
else:
clf = RandomForestClassifier(n_estimators=n_estimators,
criterion='gini',
random_state=11,
max_features=mtry,
oob_score=True)
# Assess fit
start=time.time()
clf.fit(X_train, y_train_)
durn=time.time()-start
#
#test constraints are satisifed
#res=clf.predict(clf.constraints[0][0,:,1])-clf.predict(clf.constraints[0][0,:,0])
# if model=='pmrf':
# support_vectors[response + '-' + model]= clf.support_vectors_[0][0,:]
# n_support_vectors[response + '-' + model]= np.mean(clf.n_support_)
# dual_coef[response + '-' + model]=np.flip(np.sort(np.abs(clf.dual_coef_[0])),axis=0)
# else:
# support_vectors[response + '-' + model]= clf.support_vectors_[0]
# n_support_vectors[response + '-' + model]= np.sum(clf.n_support_[0:n_classes[response]])
# dual_coef[response + '-' + model]=np.flip(np.sort(np.abs(clf.dual_coef_[0])),axis=0)
y_pred = clf.predict(X_test)
#oob_scores[response + '-' + model] = clf.oob_score_
solve_time[response + '-' + model]=durn
acc[response + '-' + model] = np.sum(y_test_ == y_pred) / len(y_test_)
oob_score[response + '-' + model]=clf.oob_score_
#oob_scores[response + '-' + model] = clf.oob_score_
#print(acc[response + '-' + model])
# Measure monotonicity
#mcc[response + '-' + constr] = np.mean(clf.calc_mcc(X,incr_feats=incr_feats, decr_feats=decr_feats))
print('acc: ' + str(acc))
print('n oob_score: ', str(oob_score))
#print(n_support_vectors)
#print(solve_time)
# pmsvm_coefs=dual_coef['binary-pmrf']
# scikit_coefs=dual_coef['binary-scikit']
# min_len=np.min([pmsvm_coefs.shape[0],scikit_coefs.shape[0]])
# diff=np.sum(np.abs(scikit_coefs[0:min_len]-pmsvm_coefs[0:min_len]))/np.sum(np.abs(scikit_coefs[0:min_len]))
# print('dual coef abs diff: ' + str(diff))
#print(support_vectors)
test_model_fit()
#benchmark_against_scikit()
|
"""
Cosmological tools
"""
import numpy as np
from scipy.integrate import quad
import matplotlib.pyplot as plt
class CosmologicalTools:
def __init__(self, OmegaM=0.2726, OmegaL=0, OmegaR=0, h=0.704):
# initializing the cosmology
self.OmegaM = OmegaM # matter density parameter
self.OmegaL = OmegaL # dark energy density parameter
self.OmegaR = OmegaR # radiation density parameter
self.OmegaK = 1.0 - (OmegaM + OmegaL + OmegaR) # curvature parameter
self.h = h # normalization for the hubble parameter
self.H0 = h * 100 # hubble constant at z=0 100*h km/s/Mpc
# physical constants
self.c = 299792.458 # km/s
self.G = 6.67408e-11 # m^3/(kg s^2)
# to help determine distance measure
if self.OmegaK > 0:
k = -1
self.Rc = np.sqrt((-k * self.c ** 2) / ((self.H0 ** 2) * self.OmegaK)) # radius of curvature
### ###
### Here are cosmography tools ###
### ###
def HubbleParameterZ(self, z):
"""
Hubble parameter as a function of redshift
Redshift can be entered as a number or an array
Returns in units of km/s/Mpc
"""
Omz = self.OmegaM * (1 + z) ** 3
Olz = self.OmegaL
Orz = self.OmegaR * (1 + z) ** 4
Okz = self.OmegaK * (1 + z) ** 2
Hz = self.H0 * np.sqrt(Omz + Olz + Orz + Okz)
return Hz
def OmegaMZ(self, z):
"""
Matter density parameter as a function of z
Redshift can be entered as a number or an array
"""
H = self.H0 / self.HubbleParameterZ(z)
Omz = self.OmegaM * (1 + z) ** 3
return Omz * (H ** 2)
def OmegaLZ(self, z):
"""
Dark energy density parameter as a function of z
Redshift can be entered as a number or an array
"""
H = self.H0 / self.HubbleParameterZ(z)
Olz = self.OmegaL
return Olz * (H ** 2)
def OmegaRZ(self, z):
"""
Dark energy density parameter as a function of z
Redshift can be entered as a number or an array
"""
H = self.H0 / self.HubbleParameterZ(z)
Orz = self.OmegaR * (1 + z) ** 4
return Orz * (H ** 2)
def OmegaKZ(self, z):
"""
Curvature parameter as a function of z
Redshift can be entered as a number or an array
"""
H = self.H0 / self.HubbleParameterZ(z)
Okz = self.OmegaK * (1 + z) ** 2
return Okz * (H ** 2)
def comovingDistance(self, z):
"""
Calculates the comoving distance as a fucntion of redshift
Redshift must be a number (not an array!)
Returns the comoving distance between 0 and z in Mpc
"""
def integrand(x):
return self.c / self.HubbleParameterZ(x)
return quad(integrand, 0, z)[0]
def distanceMeasure(self, z):
"""
Calculates the distance measure in the case that the universe is open or flat
Redshift must be a number
Returns in Mpc
"""
if self.OmegaK > 0:
return self.Rc * np.sinh(self.comovingDistance(z) / self.Rc)
else:
return self.comovingDistance(z)
def angularDiameter(self, z):
"""
Angular diameter distance as a function of redshift
Redshift must be a number - integrates between 0 and z
Returns in Mpc/rad
"""
return self.distanceMeasure(z) / (1 + z)
def luminosityDistance(self, z):
"""
Luminosity distance as a function of redshift
Redshift must be a number
Returns in Mpc
"""
return self.distanceMeasure(z) * (1 + z)
def distanceModulus(self, z):
"""
Distance modulus as z
Redshift must be a number
"""
return 5 * np.log10(self.luminosityDistance(z) * 1e6 / 10) # convert to pc in argument
def lookbackTime(self, z):
# Lookback time at z
# Redshift must be a number
# From z=0 to z and returns in gigayears
def integrand(x):
return (self.HubbleParameterZ(x) * (1 + x)) ** (-1)
return quad(integrand, 0, z)[0] * 9.77799e2
### ###
### Here are halo tools ###
### ###
def deltavir(self, z):
# ---
# Input(s): redshift (might not be necessary?)
# Return(s): virial overdensity (dimensionless)
# ...
# From: Bryan and Norman 1998 arXiv:astro-ph/9710107v1
# ---
x = self.OmegaMZ(z) - 1
A = 18 * np.pi ** 2
B = 82 * x
C = 39 * x ** 2
deltac = A + B - C
return deltac / self.OmegaMZ(z)
def cvir(self, mvir):
"""
Input(s): mvir in Msun
Return(s): virial concentration (dimensionless)
...
From: Klypin 2011 arxiv:1002.3660v4
"""
# TODO: this should be redshift dependent...
return 9.6 * ((mvir * self.h / 1e12)**(-0.075))
def f(self,x):
return np.log(1 + x) - (x / (1 + x))
def c200(self, cvir, z):
"""
...
From: van der Marel 2012 arXiv:1205.6864v1
"""
q = 2.058 # 200/(self.OmegaMZ(z)*self.deltavir(z)) # does omega M need to be z-dependent?
error = 1
i = 0
guess = cvir
c200val = 0
while (i < 100) or (error < 1e-10): # take 100 steps with tolerance of 1e-10
c200val = cvir * (self.f(guess) / (q * self.f(cvir))) ** (1. / 3.)
error = np.abs(cvir - c200val)
guess = c200val
i = i + 1
return c200val
def m200frac(self, cvir, z):
"""
...
From: van der Marel 2012 arXiv:1205.6864v1
"""
A = self.f(self.c200(cvir, z))
B = self.f(cvir)
return A / B
def afrac(self,c): #a/rs
A = (2*self.f(c))**(-1/2)
B = 1/c
return (A-B)**(-1)
def MHfrac(self,c): # Mh/Mvir
A = self.afrac(c)**2
B = 2*self.f(10)
return A/B
def rvir(self, z, mvir):
'''
In physical units! kpc
...
From van der Marel 2012 arXiv:1205.6864v1
'''
# be sure mvir is in Msun, not 10^10 Msun
A = self.deltavir(z)*self.OmegaMZ(z)/97.2
B = mvir*self.h/1e12 # divide by 1e12 since Mvir is in Msun
# divide by 100 if Mvir is in 10^10 Msun
return (206/self.h)*(B/A)**(1/3)
|
import pytest
from cpc_fusion._utils.abi import (
filter_by_name,
)
ABI_FUNC_1 = {
"constant": False,
"inputs": [],
"name": "func_1",
"outputs": [],
"type": "function",
}
ABI_CONSTRUCTOR = {
"constant": False,
"inputs": [],
"type": "constructor",
}
ABI_FALLBACK = {
"constant": False,
"type": "fallback",
}
ABI_FUNC_2_SIG_A = {
"constant": False,
"inputs": [
{"name": "a", "type": "uint256"},
],
"name": "func_2",
"outputs": [],
"type": "function",
}
ABI_FUNC_2_SIG_B = {
"constant": False,
"inputs": [
{"name": "a", "type": "uint256"},
{"name": "b", "type": "uint256"},
],
"name": "func_2",
"outputs": [],
"type": "function",
}
ABI_FUNC_3 = {
"constant": False,
"inputs": [],
"name": "func_3",
"outputs": [],
"type": "function",
}
ABI = [
ABI_CONSTRUCTOR,
ABI_FALLBACK,
ABI_FUNC_1,
ABI_FUNC_2_SIG_A,
ABI_FUNC_2_SIG_B,
ABI_FUNC_3,
]
@pytest.mark.parametrize(
'name,expected',
(
('func_1', [ABI_FUNC_1]),
('func_2', [ABI_FUNC_2_SIG_A, ABI_FUNC_2_SIG_B]),
('func_3', [ABI_FUNC_3]),
('does_not_exist', []),
)
)
def test_filter_by_arguments(name, expected):
actual_matches = filter_by_name(name, ABI)
assert actual_matches == expected
|
import os
from setuptools import setup, find_packages
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
setup(
name="aiobooru",
version="0.1.2",
author="Yui Yukihira",
author_email="[email protected]",
description="A danbooru API helper using aiohttp",
license="MIT",
packages=find_packages(where='src'),
long_description=read('README.md'),
long_description_content_type='text/markdown',
install_requires=["aiohttp>=3.5.4"],
extras_require={"aiofiles": ["aiofiles"]},
package_dir={"":"src"},
python_requires='>=3.7.0',
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
url='https://github.com/YuiYukihira/aiobooru'
) |
#!/usr/bin/python
""" Task Monitor Class """
from __future__ import print_function
import textwrap
import time
import sys
from vctools import Logger
class Tasks(Logger):
""" Manage VMware Tasks """
def __init__(self):
pass
@classmethod
def question_and_answer(cls, host, **answered):
"""
Method handles the questions and answers provided by the program.
Args:
host (obj): VirtualMachine object
answered (dict): A key value pair of already answered questions.
"""
if host.runtime.question:
try:
qid = host.runtime.question.id
if not qid in answered.keys():
# systemd does not provide a mechanism for disabling cdrom lock
if 'CD-ROM door' in host.runtime.question.text:
choices = {}
for option in host.runtime.question.choice.choiceInfo:
choices.update({option.key : option.label})
for key, val in choices.iteritems():
if 'Yes' in val:
answer = key
else:
print('\n')
print('\n'.join(textwrap.wrap(host.runtime.question.text, 80)))
choices = {}
for option in host.runtime.question.choice.choiceInfo:
choices.update({option.key : option.label})
sys.stdout.write('\t%s: %s' % (option.key, option.label))
warn = textwrap.dedent("""\
Warning: The VM may be in a suspended
state until this question is answered.""").strip()
print(textwrap.fill(warn, width=80))
while True:
answer = raw_input('\nPlease select number: ').strip()
# check if answer is an appropriate number
if int(answer) <= len(choices.keys()) - 1:
break
else:
continue
if answer:
host.AnswerVM(qid, str(answer))
answered.update({qid:answer})
return answered
# pass onto next iteration during race condition in task_monitor while loop
except AttributeError:
pass
return None
@classmethod
def task_monitor(cls, task, question=True, host=False):
"""
Method monitors the state of called task and outputs the current status.
Some tasks require that questions be answered before completion, and are
optional arguments in the case that some tasks don't require them. It
will continually check for questions while in progress. The VM object is
required if the question argument is True.
Args:
task (obj): TaskManager object
question (bool): Enable or Disable Question
host (obj): VirtualMachine object
Returns:
boolean (bool): True if successful or False if error
"""
# keep track of answered questions
answered = {}
while task.info.state == 'running':
while task.info.progress:
if question and host:
result = Tasks.question_and_answer(host, **answered)
if result:
answered.update(result)
if isinstance(task.info.progress, int):
sys.stdout.write(
'\r[' + task.info.state + '] | ' + str(task.info.progress)
)
sys.stdout.flush()
if task.info.progress == 100:
sys.stdout.write(
'\r[' + task.info.state + '] | ' + str(task.info.progress)
)
sys.stdout.flush()
break
else:
sys.stdout.flush()
break
# pause method to ensure a state of error or success is caught.
time.sleep(5)
if task.info.state == 'error':
# collect all the error messages we can find
errors = []
errors.append(task.info.error.msg)
for items in task.info.error.faultMessage:
errors.append(items.message)
sys.stdout.write('\r[' + task.info.state + '] | ' + ' '.join(errors) + '\n')
Tasks.logger.info('[' + task.info.state + '] | ' + ' '.join(errors))
sys.stdout.flush()
return False
if task.info.state == 'success':
sys.stdout.write('\r[' + task.info.state + '] | task successfully completed.\n')
Tasks.logger.info('[ %s ] task successfully completed.', task.info.state)
sys.stdout.flush()
return True
|
# ################################################################################################
# ------------------------------------------------------------------------------------------------
# File: vision_tool.py
# Author: Luis Monteiro
#
# Created on nov 17, 2019, 22:00 PM
# ------------------------------------------------------------------------------------------------
# ################################################################################################
#
# -----------------------------------------------------------------------------
# VisionTool
# -----------------------------------------------------------------------------
class VisionTool:
#
# -----------------------------------------------------
# process
# -----------------------------------------------------
#
def process(self, frame):
raise RuntimeError("VisionTool::process not found")
# ################################################################################################
# ------------------------------------------------------------------------------------------------
# End
# ------------------------------------------------------------------------------------------------
# ################################################################################################
|
# https://leetcode.com/problems/group-anagrams/
# Medium
# Time Complexity: O(n * s log s) where n = len(list) and s = length of longest string in the list
# We can reduce the time complexity by counting sort
# Space Complexity: O(n)
class Solution:
def groupAnagrams(self, strs: List[str]) -> List[List[str]]:
d = {}
for i in range(len(strs)):
word = ''.join(sorted(strs[i]))
d[word] = d.get(word, []) + [strs[i]]
return d.values()
|
from clients import Client
class Account:
def __init__(self, clients, number, balance=0):
self.balance = 0
self.clients = clients
self.number = number
self.operations = []
self.deposit(balance)
def resume(self):
print('-' * 34)
print(f'CC Número: {self.number} Saldo: {self.balance:10.2f}')
for client in self.clients:
print(f'\nCliente: {client.name}\nTelefone: {client.phone}')
print('-' * 34)
def withdraw(self, value):
if self.balance >= value:
self.balance -= value
self.operations.append(['SAQUE', value])
return True
else:
print(f'\nSaldo insuficiente!\n')
return False
def deposit(self, value):
self.balance += value
self.operations.append(['DEPÓSITO', value])
def extract(self):
print(f'Extrato CC Nº {self.number}\n')
for operation in self.operations:
print(f'{operation[0]:10s} {operation[1]:10.2f}')
print(f'\n Saldo: {self.balance:10.2f}\n')
class SpecialAccount(Account):
def __init__(self, clients, number, balance=0, limit=0):
Account.__init__(self, clients, number, balance)
self.limit = limit
def withdraw(self, value):
if self.balance + self.limit >= value:
self.balance -= value
self.operations.append(['SAQUE', value])
return True
else:
return Account.withdraw(self, value)
def extract(self):
Account.extract(self)
print(f' Limite: {self.limit:9.2f}')
print(f'Disponível: {self.limit + self.balance:9.2f}\n')
|
"""empty message
Revision ID: 3dcef2e3c442
Revises: 1349a2c924f4
Create Date: 2019-03-29 07:23:12.851270
"""
# revision identifiers, used by Alembic.
revision = '3dcef2e3c442'
down_revision = '1349a2c924f4'
from alembic import op
import sqlalchemy as sa
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_constraint('section_name_key', 'section', type_='unique')
op.drop_constraint('uq_section_order', 'section', type_='unique')
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_unique_constraint('uq_section_order', 'section', ['order'])
op.create_unique_constraint('section_name_key', 'section', ['name'])
# ### end Alembic commands ###
|
from __future__ import absolute_import
from django.db.models.query import QuerySet
from .base import BaseAPITestCase
from contentcuration.models import Channel
from contentcuration.models import ContentNode
from contentcuration.models import DEFAULT_CONTENT_DEFAULTS
from contentcuration.viewsets.channel import ChannelSerializer as BaseChannelSerializer
from contentcuration.viewsets.common import ContentDefaultsSerializer
from contentcuration.viewsets.contentnode import ContentNodeSerializer
def ensure_no_querysets_in_serializer(object):
# values and values_list return list-like QuerySet objects, which can cause troubles if we aggregate the
# output into a larger json dict. DRF apparently catches and fixes this under the hood.
for field in object:
# If it's not a base type, that means it is not being serialized properly.
assert not isinstance(object[field], QuerySet), "{} is not serialized".format(
field
)
class ContentNodeSerializerTestCase(BaseAPITestCase):
def test_repr_doesnt_evaluate_querysets(self):
node_ids = [
"00000000000000000000000000000003",
"00000000000000000000000000000004",
"00000000000000000000000000000005",
]
objects = ContentNodeSerializer(
ContentNode.objects.filter(node_id__in=node_ids), many=True
)
object = ContentNodeSerializer(
ContentNode.objects.get(node_id=node_ids[0])
)
# Ensure we don't evaluate querysets when repr is called on a Serializer. See docs for
# no_field_eval_repr in contentcuration/serializers.py for more info.
obj_string = repr(object)
assert "QuerySet" not in obj_string, "object __repr__ contains queryset: {}".format(obj_string)
objs_string = repr(objects)
assert "QuerySet" not in objs_string, "objects __repr__ contains queryset: {}".format(objs_string)
class ContentDefaultsSerializerTestCase(BaseAPITestCase):
def test_create(self):
s = ContentDefaultsSerializer(data={})
self.assertTrue(s.is_valid())
self.assertEqual(DEFAULT_CONTENT_DEFAULTS, s.save())
def test_create__merge(self):
defaults = dict(
author="Buster",
aggregator="Aggregators R US",
provider="USA",
copyright_holder="Learning Equality",
license="Special Permissions",
license_description="Things go here",
auto_derive_video_thumbnail=False,
)
s = ContentDefaultsSerializer(data=defaults)
self.assertTrue(s.is_valid())
defaults.update(
auto_derive_audio_thumbnail=True,
auto_derive_document_thumbnail=True,
auto_derive_html5_thumbnail=True,
auto_derive_exercise_thumbnail=True,
auto_randomize_questions=True,
mastery_model="num_correct_in_a_row_5",
m_value=5,
n_value=5,
language=None,
)
self.assertEqual(defaults, s.save())
def test_update(self):
defaults = dict(author="Buster")
s = ContentDefaultsSerializer(defaults, data={})
self.assertTrue(s.is_valid())
self.assertEqual(defaults, s.save())
def test_update__merge(self):
defaults = dict(author="Buster", aggregator="Aggregators R US", provider="USA",)
s = ContentDefaultsSerializer(
defaults, data=dict(author="Duster", provider="Canada",)
)
self.assertTrue(s.is_valid())
self.assertEqual(
dict(author="Duster", aggregator="Aggregators R US", provider="Canada",),
s.save(),
)
def test_validate_license(self):
defaults = dict(license="")
s = ContentDefaultsSerializer(
defaults, data=dict(license="This license does not exist")
)
self.assertFalse(s.is_valid())
class ContentDefaultsSerializerUseTestCase(BaseAPITestCase):
class ChannelSerializer(BaseChannelSerializer):
content_defaults = ContentDefaultsSerializer(partial=True)
class Meta:
model = Channel
fields = (
"id",
"content_defaults",
)
read_only_fields = ("id",)
nested_writes = True
def test_save__create(self):
s = self.ChannelSerializer(
data=dict(
name="New test channel",
description="This is the best test channel",
content_defaults=dict(author="Buster"),
)
)
self.assertTrue(s.is_valid())
c = s.save()
defaults = DEFAULT_CONTENT_DEFAULTS.copy()
defaults.update(author="Buster")
self.assertEqual(defaults, c.content_defaults)
def test_save__update(self):
c = Channel(
name="New test channel",
description="This is the best test channel",
content_defaults=dict(author="Buster"),
)
c.save()
s = self.ChannelSerializer(
c, data=dict(content_defaults=dict(license="Special Permissions"))
)
self.assertTrue(s.is_valid())
c = s.save()
self.assertEqual(
dict(author="Buster", license="Special Permissions"), c.content_defaults
)
|
name = "MySpice"
from . import MySpice
|
# -*- coding: utf-8 -*-
"""
Created on Tue Oct 13 09:47:17 2020
@author: cheritie
"""
from astropy.io import fits as pfits
import numpy as np
from AO_modules.calibration.CalibrationVault import calibrationVault
from AO_modules.calibration.InteractionMatrix import interactionMatrix
from AO_modules.tools.tools import emptyClass,createFolder, read_fits
def ao_calibration_from_ao_obj(ao_obj, nameFolderIntMat = None, nameIntMat = None, nameFolderBasis = None, nameBasis = None, nMeasurements=50, index_modes = None, get_basis = True):
# check if the name of the basis is specified otherwise take the nominal name
if nameBasis is None:
if ao_obj.dm.isM4:
initName = 'M2C_M4_'
else:
initName = 'M2C_'
try:
nameBasis = initName+str(ao_obj.param['resolution'])+'_res'+ao_obj.param['extra']
except:
nameBasis = initName+str(ao_obj.param['resolution'])+'_res'
ao_calib_object = emptyClass()
# check if a name for the origin folder is specified
if nameFolderBasis is None:
nameFolderBasis = ao_obj.param['pathInput']
createFolder(nameFolderBasis)
## %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# get the modal basis :
try:
print('Loading the KL Modal Basis from: ' + nameFolderBasis+nameBasis )
M2C = read_fits(nameFolderBasis+ nameBasis+'.fits')
if index_modes is None:
M2C = M2C[:,:ao_obj.param['nModes']]
else:
M2C = M2C[:,index_modes]
if get_basis:
ao_obj.dm.coefs = M2C
ao_obj.tel*ao_obj.dm
basis = np.reshape(ao_obj.tel.OPD,[ao_obj.tel.resolution**2,M2C.shape[1]])
ao_calib_object.basis = basis
if ao_obj.param['getProjector']:
print('Computing the pseudo-inverse of the modal basis...')
cross_product_basis = np.matmul(basis.T,basis)
non_diagonal_elements = np.sum(np.abs(cross_product_basis))-np.trace(cross_product_basis)
criteria = 1-np.abs(np.trace(cross_product_basis)-non_diagonal_elements)/np.trace(cross_product_basis)
if criteria <= 1e-3:
print('Diagonality criteria: ' + str(criteria) + ' -- using the fast computation')
projector = np.diag(1/np.diag(cross_product_basis))@basis.T
else:
print('Diagonality criteria: ' + str(criteria) + ' -- using the slow computation')
projector = np.linalg.pinv(basis)
ao_calib_object.projector = projector
except:
print('ERROR: No file found! Taking a zonal basis instead..' )
M2C = np.eye(ao_obj.dm.nValidAct)
## %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
if nameFolderIntMat is None:
nameFolderIntMat = ao_obj.param['pathInput']+ao_obj.param['name']+'/'
createFolder(nameFolderIntMat)
#% get the interaction matrix :
if nameIntMat is None:
if ao_obj.wfs.tag == 'pyramid':
try:
# case where the system name has an extra attribute
nameIntMat = 'zonal_interaction_matrix_'+str(ao_obj.param['resolution'])+'_res_'+str(ao_obj.param['modulation'])+'_mod_'+str(ao_obj.param['postProcessing'])+'_psfCentering_'+str(ao_obj.param['psfCentering'])+ao_obj.param['extra']
except:
nameIntMat = 'zonal_interaction_matrix_'+str(ao_obj.param['resolution'])+'_res_'+str(ao_obj.param['modulation'])+'_mod_'+str(ao_obj.param['postProcessing'])+'_psfCentering_'+str(ao_obj.param['psfCentering'])
if ao_obj.wfs.tag == 'shackHartmann':
if ao_obj.wfs.is_geometric:
nature = 'geometric'
else:
nature = 'diffractive'
try:
# case where the system name has an extra attribute
nameIntMat = 'zonal_interaction_matrix_'+str(ao_obj.param['resolution'])+'_res_'+str(ao_obj.wfs.nValidSubaperture)+'_subap_'+nature+'_'+ao_obj.param['extra']
except:
nameIntMat = 'zonal_interaction_matrix_'+str(ao_obj.param['resolution'])+'_res_'+str(ao_obj.wfs.nValidSubaperture)+'_subap_'+nature
try:
print('Loading Interaction matrix '+nameIntMat+'...')
imat = read_fits(nameFolderIntMat+nameIntMat+'.fits')
calib = calibrationVault(imat@M2C)
print('Done!')
except:
print('ERROR! Computingh the zonal interaction matrix')
M2C_zon = np.eye(ao_obj.dm.nValidAct)
stroke =1e-9 # 1 nm amplitude
calib = interactionMatrix(ao_obj.ngs,ao_obj.atm,ao_obj.tel,ao_obj.dm,ao_obj.wfs,M2C_zon,stroke,phaseOffset = 0,nMeasurements = nMeasurements)
# save output in fits file
hdr=pfits.Header()
hdr['TITLE'] = 'INTERACTION MATRIX'
empty_primary = pfits.PrimaryHDU(header=hdr)
# primary_hdu = pfits.ImageHDU(calib.D.astype(np.float32))
primary_hdu = pfits.ImageHDU(calib.D)
hdu = pfits.HDUList([empty_primary, primary_hdu])
hdu.writeto(nameFolderIntMat + nameIntMat + '.fits', overwrite=True)
## %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
#% get the modal gains matrix :
nameExtra = '_r0_'+str(100*ao_obj.atm.r0)+'_cm_'+ao_obj.param['opticalBand']+'_band_fitting_'+str(ao_obj.param['nModes'])+'_KL'
try:
nameModalGains = 'modal_gains'+ao_obj.param['extra']+nameExtra
except:
nameModalGains = 'modal_gains'+nameExtra
try:
data_gains = read_fits(nameFolderIntMat+ nameModalGains+'.fits')
print('Using Modal Gains loaded from '+str(nameFolderIntMat+ nameModalGains+'.fits'))
except:
data_gains = np.ones(M2C.shape[1])
print('No Modal Gains found. All gains set to 1')
ao_calib_object.gOpt = np.diag(1/data_gains)
ao_calib_object.M2C = M2C
ao_calib_object.calib = calib
return ao_calib_object
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# same function using an ao object as an input
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
def ao_calibration(ngs, tel, atm, dm, wfs, param, nameFolderIntMat = None, nameIntMat = None, nameFolderBasis = None, nameBasis = None, nMeasurements=50, index_modes = None, get_basis = True):
# check if the name of the basis is specified otherwise take the nominal name
if nameBasis is None:
if dm.isM4:
initName = 'M2C_M4_'
else:
initName = 'M2C_'
try:
nameBasis = initName+str(param['resolution'])+'_res'+param['extra']
except:
nameBasis = initName+str(param['resolution'])+'_res'
ao_calib_object = emptyClass()
# check if a name for the origin folder is specified
if nameFolderBasis is None:
nameFolderBasis = param['pathInput']
createFolder(nameFolderBasis)
## %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# get the modal basis :
try:
print('Loading the KL Modal Basis from: ' + nameFolderBasis+nameBasis )
M2C = read_fits(nameFolderBasis+ nameBasis+'.fits')
if index_modes is None:
M2C = M2C[:,:param['nModes']]
else:
M2C = M2C[:,index_modes]
if get_basis or param['getProjector']:
dm.coefs = M2C
tel*dm
basis = np.reshape(tel.OPD,[tel.resolution**2,M2C.shape[1]])
ao_calib_object.basis = basis
if param['getProjector']:
print('Computing the pseudo-inverse of the modal basis...')
cross_product_basis = np.matmul(basis.T,basis)
non_diagonal_elements = np.sum(np.abs(cross_product_basis))-np.trace(cross_product_basis)
criteria = 1-np.abs(np.trace(cross_product_basis)-non_diagonal_elements)/np.trace(cross_product_basis)
if criteria <= 1e-3:
print('Diagonality criteria: ' + str(criteria) + ' -- using the fast computation')
projector = np.diag(1/np.diag(cross_product_basis))@basis.T
else:
print('Diagonality criteria: ' + str(criteria) + ' -- using the slow computation')
projector = np.linalg.pinv(basis)
ao_calib_object.projector = projector
except:
print('ERROR: No file found! Taking a zonal basis instead..' )
M2C = np.eye(dm.nValidAct)
## %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
if nameFolderIntMat is None:
nameFolderIntMat = param['pathInput']+param['name']+'/'
createFolder(nameFolderIntMat)
#% get the interaction matrix :
if nameIntMat is None:
if wfs.tag == 'pyramid':
try:
# case where the system name has an extra attribute
nameIntMat = 'zonal_interaction_matrix_'+str(param['resolution'])+'_res_'+str(param['modulation'])+'_mod_'+str(param['postProcessing'])+'_psfCentering_'+str(param['psfCentering'])+param['extra']
except:
nameIntMat = 'zonal_interaction_matrix_'+str(param['resolution'])+'_res_'+str(param['modulation'])+'_mod_'+str(param['postProcessing'])+'_psfCentering_'+str(param['psfCentering'])
if wfs.tag == 'shackHartmann':
if wfs.is_geometric:
nature = 'geometric'
else:
nature = 'diffractive'
try:
# case where the system name has an extra attribute
nameIntMat = 'zonal_interaction_matrix_'+str(param['resolution'])+'_res_'+str(wfs.nValidSubaperture)+'_subap_'+nature+'_'+param['extra']
except:
nameIntMat = 'zonal_interaction_matrix_'+str(param['resolution'])+'_res_'+str(wfs.nValidSubaperture)+'_subap_'+nature
try:
print('Loading Interaction matrix '+nameIntMat+'...')
imat = read_fits(nameFolderIntMat+nameIntMat+'.fits')
calib = calibrationVault(imat@M2C)
print('Done!')
except:
M2C_zon = np.eye(dm.nValidAct)
stroke =1e-9 # 1 nm amplitude
calib = interactionMatrix(ngs, atm, tel, dm, wfs, M2C_zon, stroke, phaseOffset = 0, nMeasurements = nMeasurements)
# save output in fits file
hdr=pfits.Header()
hdr['TITLE'] = 'INTERACTION MATRIX'
empty_primary = pfits.PrimaryHDU(header=hdr)
# primary_hdu = pfits.ImageHDU(calib.D.astype(np.float32))
primary_hdu = pfits.ImageHDU(calib.D)
hdu = pfits.HDUList([empty_primary, primary_hdu])
hdu.writeto(nameFolderIntMat+nameIntMat+'.fits',overwrite=True)
calib = calibrationVault(calib.D@M2C)
## %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
#% get the modal gains matrix :
nameExtra = '_r0_'+str(100*atm.r0)+'_cm_'+param['opticalBand']+'_band_fitting_'+str(param['nModes'])+'_KL'
try:
nameModalGains = 'modal_gains'+param['extra']+nameExtra
except:
nameModalGains = 'modal_gains'+nameExtra
try:
data_gains = read_fits(nameFolderIntMat+ nameModalGains+'.fits')
print('Using Modal Gains loaded from '+str(nameFolderIntMat+ nameModalGains+'.fits'))
except:
data_gains = np.ones(M2C.shape[1])
print('No Modal Gains found. All gains set to 1')
ao_calib_object.gOpt = np.diag(1/data_gains)
ao_calib_object.M2C = M2C
ao_calib_object.calib = calib
return ao_calib_object
def get_modal_gains_from_ao_obj(ao_obj, nameFolderIntMat = None):
if nameFolderIntMat is None:
nameFolderIntMat = ao_obj.param['pathInput']+ao_obj.param['name']+'/'
createFolder(nameFolderIntMat)
## %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
#% get the modal gains matrix :
nameExtra = '_r0_'+str(100*ao_obj.atm.r0)+'_cm_'+ao_obj.param['opticalBand']+'_band_fitting_'+str(ao_obj.param['nModes'])+'_KL'
try:
nameModalGains = 'modal_gains'+ao_obj.param['extra']+nameExtra
except:
nameModalGains = 'modal_gains'+nameExtra
print('Looking for Modal Gains loaded from '+str(nameFolderIntMat+ nameModalGains+'.fits'))
try:
data_gains = read_fits(nameFolderIntMat+ nameModalGains+'.fits')
print('Using Modal Gains loaded from '+str(nameFolderIntMat+ nameModalGains+'.fits'))
except:
data_gains = np.ones(ao_obj.param['nModes'])
print('No Modal Gains found. All gains set to 1')
gOpt = np.diag(1/data_gains)
return gOpt
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# same function using an ao object as an input
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
def get_modal_gains(ngs, tel, atm, dm, wfs, param, nameFolderIntMat = None):
## %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
if nameFolderIntMat is None:
nameFolderIntMat = param['pathInput']+param['name']+'/'
createFolder(nameFolderIntMat)
#% get the modal gains matrix :
nameExtra = '_r0_'+str(100*atm.r0)+'_cm_'+param['opticalBand']+'_band_fitting_'+str(param['nModes'])+'_KL'
try:
nameModalGains = 'modal_gains'+param['extra']+nameExtra
except:
nameModalGains = 'modal_gains'+nameExtra
try:
data_gains = read_fits(nameFolderIntMat+ nameModalGains+'.fits')
print('Using Modal Gains loaded from '+str(nameFolderIntMat+ nameModalGains+'.fits'))
except:
data_gains = np.ones(param['nModes'])
print('No Modal Gains found. All gains set to 1')
gOpt = np.diag(1/data_gains)
return gOpt |
import unittest
import json
from config.loader import neo_config
from api import NeoAPI
from config.database import db
from models.User import User as UserModel
from utils.testutils import authenticate_user
from gevent import monkey
monkey.patch_all()
class AccountCreate(unittest.TestCase):
def setUp(self):
neo_config.load_config()
neo_config.set_project_variables()
neo = NeoAPI(neo_config)
self.api = neo.activate_testing()
db.session.query(UserModel).delete()
db.session.commit()
def test_valid_request(self):
json_data = {
"email": "[email protected]",
"last_name": "Last Name",
"password": "VerySecurePassword",
"first_name": "First Name",
"birthday": "1995-12-25"
}
response = self.api.post('/account/create', data=json.dumps(json_data), content_type='application/json')
response_json = json.loads(response.data)
assert response.status_code == 201
assert response_json['success'] is True
def test_empty_json(self):
todo = {}
response = self.api.post('/account/create', data=json.dumps(todo), content_type='application/json')
response_json = json.loads(response.data)
assert response.status_code == 400
assert response_json['success'] is False
def test_missing_email(self):
json_data = {
"last_name": "Last Name",
"password": "VerySecurePassword",
"first_name": "First Name",
"birthday": "1995-12-25"
}
response = self.api.post('/account/create', data=json.dumps(json_data), content_type='application/json')
response_json = json.loads(response.data)
assert response.status_code == 400
assert response_json['success'] is False
def test_missing_last_name(self):
json_data = {
"email": "[email protected]",
"password": "VerySecurePassword",
"first_name": "First Name",
"birthday": "1995-12-25"
}
response = self.api.post('/account/create', data=json.dumps(json_data), content_type='application/json')
response_json = json.loads(response.data)
assert response.status_code == 400
assert response_json['success'] is False
def test_missing_password(self):
json_data = {
"email": "[email protected]",
"last_name": "Last Name",
"first_name": "First Name",
"birthday": "1995-12-25"
}
response = self.api.post('/account/create', data=json.dumps(json_data), content_type='application/json')
response_json = json.loads(response.data)
assert response.status_code == 400
assert response_json['success'] is False
def test_missing_first_name(self):
json_data = {
"email": "[email protected]",
"last_name": "Last Name",
"password": "VerySecurePassword",
"birthday": "1995-12-25"
}
response = self.api.post('/account/create', data=json.dumps(json_data), content_type='application/json')
response_json = json.loads(response.data)
assert response.status_code == 400
assert response_json['success'] is False
def test_missing_birthday(self):
json_data = {
"email": "[email protected]",
"last_name": "Last Name",
"password": "VerySecurePassword",
"first_name": "First Name"
}
response = self.api.post('/account/create', data=json.dumps(json_data), content_type='application/json')
response_json = json.loads(response.data)
assert response.status_code == 400
assert response_json['success'] is False
def test_unreadable_string_birthday(self):
json_data = {
"email": "[email protected]",
"last_name": "Last Name",
"password": "VerySecurePassword",
"first_name": "First Name",
"birthday": "mqksdmlqskdmlqskdqmlsdkqmsd"
}
response = self.api.post('/account/create', data=json.dumps(json_data), content_type='application/json')
response_json = json.loads(response.data)
assert response.status_code == 500
assert response_json['success'] is False
def test_used_mail(self):
new_user = UserModel(
email="[email protected]",
password="password",
first_name="first_name",
last_name="last_name",
birthday="1999-02-02"
)
db.session.add(new_user)
db.session.commit()
json_data = {
"email": "[email protected]",
"last_name": "Last Name",
"password": "VerySecurePassword",
"first_name": "First Name",
"birthday": "1995-12-25"
}
response = self.api.post('/account/create', data=json.dumps(json_data), content_type='application/json')
response_json = json.loads(response.data)
assert response.status_code == 409
assert response_json['success'] is False
class AccountLogin(unittest.TestCase):
def setUp(self):
neo_config.load_config()
neo_config.set_project_variables()
neo = NeoAPI(neo_config)
self.api = neo.activate_testing()
db.session.query(UserModel).delete()
db.session.commit()
new_user = UserModel(
email="[email protected]",
password="password",
first_name="first_name",
last_name="last_name",
birthday="1999-02-02"
)
db.session.add(new_user)
db.session.commit()
def test_valid_request(self):
json_data = {
"email": "[email protected]",
"password": "password"
}
response = self.api.post('/account/login', data=json.dumps(json_data), content_type='application/json')
response_json = json.loads(response.data)
assert response.status_code == 200
assert response_json['success'] is True
def test_missing_email(self):
json_data = {
"password": "password"
}
response = self.api.post('/account/login', data=json.dumps(json_data), content_type='application/json')
response_json = json.loads(response.data)
assert response.status_code == 400
assert response_json['success'] is False
def test_missing_password(self):
json_data = {
"email": "[email protected]"
}
response = self.api.post('/account/login', data=json.dumps(json_data), content_type='application/json')
response_json = json.loads(response.data)
assert response.status_code == 400
assert response_json['success'] is False
def test_wrong_password(self):
json_data = {
"email": "[email protected]",
"password": "passwordWrong"
}
response = self.api.post('/account/login', data=json.dumps(json_data), content_type='application/json')
response_json = json.loads(response.data)
assert response.status_code == 401
assert response_json['success'] is False
def test_double_login(self):
json_data = {
"email": "[email protected]",
"password": "password"
}
response = self.api.post('/account/login', data=json.dumps(json_data), content_type='application/json')
response_json = json.loads(response.data)
assert response.status_code == 200
assert response_json['success'] is True
response = self.api.post('/account/login', data=json.dumps(json_data), content_type='application/json')
response_json = json.loads(response.data)
assert response.status_code == 200
assert response_json['success'] is True
class AccountApiToken(unittest.TestCase):
def setUp(self):
neo_config.load_config()
neo_config.set_project_variables()
neo = NeoAPI(neo_config)
self.api = neo.activate_testing()
self.user1 = db.session.query(UserModel).filter(UserModel.email == "[email protected]").first()
if self.user1 is None:
self.user1 = UserModel(email="[email protected]", password="test", first_name="firstname",
last_name="lastname", birthday="1995-12-12")
db.session.commit()
self.token1 = authenticate_user(self.api, self.user1, "test")
def test_valid_token(self):
json_data = {
"token": self.token1,
}
response = self.api.post('/token/verify', data=json.dumps(json_data), content_type='application/json')
response_json = json.loads(response.data)
assert response.status_code == 200
assert response_json['success'] is True
def test_invalid_token(self):
json_data = {
"token": "tetetetet",
}
response = self.api.post('/token/verify', data=json.dumps(json_data), content_type='application/json')
response_json = json.loads(response.data)
assert response.status_code == 401
assert response_json['success'] is False
|
from cave.morphers.base import Morph
class DeviantArt(Morph):
NAME = 'DeviantArt'
URL = 'deviantart.com'
IMAGE_PATTERN = '.thorpedo-thumb-link img'
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Operations for embeddings."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import clip_ops
# Imports gradient definitions.
from tensorflow.python.ops import data_flow_grad # pylint: disable=unused-import
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import kv_variable_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import sparse_ops
from tensorflow.python.ops import variables
from tensorflow.python.ops import fused_embedding_ops
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util.tf_export import tf_export
def _clip(params, ids, max_norm):
"""Helper function for _embedding_lookup_and_transform.
This function optionally clips embeddings to an l2-norm of max_norm.
Args:
params: A `Tensor` of embeddings retrieved by `gather`.
ids: The `ids` argument that was passed to `gather`.
max_norm: If not `None`, each embedding is clipped if its l2-norm is larger
than this value.
Returns:
A `Tensor` with the same type as `params`.
"""
def _rank(x):
"""Helper function to retrieve the rank of a tensor.
Args:
x: Something convertible to `Tensor`.
Returns:
Either a pair `(rank, True)` where `rank` is an integer or a pair
`(rank, False)` where `rank` is an integer `Tensor`. In either case,
`rank` is the rank of `x`.
"""
rank = ops.convert_to_tensor(x).get_shape().ndims
if rank:
return rank, True
else:
return array_ops.rank(x), False
if max_norm is None:
return params
ids_rank, ids_static = _rank(ids)
params_rank, params_static = _rank(params)
return clip_ops.clip_by_norm(
params,
max_norm,
axes=(list(range(ids_rank, params_rank)) if ids_static and params_static
else math_ops.range(ids_rank, params_rank)))
def _gather_fae(ids, blocknums, embs, params):
concat_embs=[]
indices = math_ops.range(0, array_ops.squeeze(array_ops.shape(ids)), 1)
for i in range(len(embs)):
indice_cnt = array_ops.expand_dims(array_ops.boolean_mask(indices, math_ops.greater_equal(blocknums, i+1)), 1)
#scatter_shape=tensor_shape.TensorShape([ids.get_shape()[0], params._ev_list[i].shape()[0]])
concat_emb=array_ops.scatter_nd(indices=indice_cnt, updates=embs[i], shape=array_ops.shape(embs[0]))
concat_embs.append(concat_emb)
return array_ops.concat(concat_embs, 1)
def _embedding_lookup_and_transform(params,
ids,
partition_strategy="mod",
name=None,
max_norm=None,
transform_fn=None,
ev_init_value=None,
blocknums=None,
counts=None):
"""Helper function for embedding_lookup and _compute_sampled_logits.
This function is a generalization of embedding_lookup that optionally
applies a caller-specified transformation to each embedding. This is
done through the `transform_fn` argument. If provided, the function is
applied to each partitioned tensor of retrieved embeddings, colocated
with the embeddings. This function will be called with a single `Tensor`
argument of the same type as the `params` tensor and should return a
`Tensor`. The shape of the argument will be the same as `params` except
for the size of the first dimension. The first dimension of the result's
shape must be the same size as the argument's.
Args:
params: See embedding_lookup.
ids: See embedding_lookup.
partition_strategy: See embedding_lookup.
name: See embedding_lookup.
max_norm: See embedding_lookup.
transform_fn: An optional function to apply to each retrieved embedding. If
max_norm is provided, transform_fn is applied to the norm-limited
embeddings.
Returns:
See embedding_lookup for details.
Raises:
ValueError: If `params` is empty.
"""
from tensorflow.python.ops.hash_table import hash_table
from tensorflow.python.ops.hash_table import embedding
if isinstance(params, hash_table.HashTable) or isinstance(params, hash_table.DistributedHashTable):
return embedding.embedding_lookup(params, ids, name=name)[0]
if isinstance(params, list) and len(params) == 1:
if isinstance(params[0], hash_table.HashTable) or isinstance(params[0], hash_table.DistributedHashTable):
return embedding.embedding_lookup(params[0], ids, name=name)[0]
if params is None:
raise ValueError("params must be specified")
if isinstance(params, (list, tuple)) and not params:
raise ValueError("Need at least one param")
if isinstance(params, variables.PartitionedVariable):
params = list(params) # Iterate to get the underlying Variables.
if not isinstance(params, list):
params = [params]
if isinstance(params[0], kv_variable_ops.MultiHashVariable):
if params[0].mhvconfig.strategy == "Q-R":
ids_tensor = ops.convert_to_tensor(ids, dtypes.int64)
ids_Q = math_ops.floordiv(ids_tensor, params[0].mhvconfig.size[0][0])
ids_R = math_ops.floormod(ids_tensor, params[0].mhvconfig.size[1][0])
result_Q = _embedding_lookup_and_transform(params[0]._val_list[0], ids_Q)
result_R = _embedding_lookup_and_transform(params[0]._val_list[1], ids_R)
if params[0].mhvconfig.operation == "add":
return math_ops.add(result_Q, result_R)
if params[0].mhvconfig.operation == "mul":
return math_ops.multiply(result_Q, result_R)
if params[0].mhvconfig.operation == "concat":
return array_ops.concat([result_Q, result_R], 1)
with ops.name_scope(name, "embedding_lookup", params + [ids]) as name:
np = len(params) # Number of partitions
# Preserve the resource variable status to avoid accidental dense reads.
if not any(
isinstance(p, resource_variable_ops.ResourceVariable) for p in params):
params = ops.convert_n_to_tensor_or_indexed_slices(params, name="params")
ids = ops.convert_to_tensor(ids, name="ids")
if np == 1 and (not transform_fn or ids.get_shape().ndims == 1):
if isinstance(params[0], kv_variable_ops.DynamicEmbeddingVariable):
if blocknums is None:
raise ValueError("blocknums must be valid for dynamic embedding variable")
ids_nozero = array_ops.boolean_mask(ids, math_ops.greater_equal(blocknums, 1))
blocknums_nozero = array_ops.boolean_mask(blocknums, math_ops.greater_equal(blocknums, 1))
with ops.colocate_with(params[0].mainev()):
embs = params[0].sparse_read(ids_nozero, blocknums_nozero)
embs_nozero = _gather_fae(ids_nozero, blocknums_nozero, embs, params[0])
indices = math_ops.range(0, array_ops.squeeze(array_ops.shape(ids)), 1)
indice_cnt = array_ops.expand_dims(array_ops.boolean_mask(indices, math_ops.greater_equal(blocknums, 1)), 1)
return array_ops.scatter_nd(indices=indice_cnt, updates=embs_nozero, shape=[array_ops.shape(ids)[0], array_ops.shape(embs_nozero)[1]])
else:
with ops.colocate_with(params[0]):
result = _clip(array_ops.gather(params[0], ids, name=name,
ev_init_value=ev_init_value,
counts=counts),
ids, max_norm)
if transform_fn:
result = transform_fn(result)
return result
# Make sure the final result does not have colocation contraints on the
# params. Similar to the case np > 1 where parallel_dynamic_stitch is
# outside the scioe of all with ops.colocate_with(params[p]).
return array_ops.identity(result)
else:
# Flatten the ids. There are two cases where we need to do this.
# - There is more than one params tensor.
# - There is a transform_fn and ids is not statically known to be 1-D.
# We must flatten in this case because transform_fn expects a flat
# tensor of embeddings.
flat_ids = array_ops.reshape(ids, [-1])
original_indices = math_ops.range(array_ops.size(flat_ids))
# Create p_assignments and set new_ids depending on the strategy.
if blocknums is None and isinstance(params[0], kv_variable_ops.DynamicEmbeddingVariable):
raise ValueError("blocknums must be valid for dynamic embedding variable")
if isinstance(params[0], kv_variable_ops.EmbeddingVariable):
new_ids = flat_ids
p_assignments = flat_ids % 1000 % np
elif partition_strategy == "mod":
p_assignments = flat_ids % np
new_ids = flat_ids // np
elif partition_strategy == "div":
# Compute num_total_ids as the sum of dim-0 of params, then assign to
# partitions based on a constant number of ids per partition. Optimize
# if we already know the full shape statically.
dim_0_size = tensor_shape.Dimension(
tensor_shape.dimension_value(params[0].get_shape()[0]))
for p in xrange(1, np):
dim_0_size += tensor_shape.Dimension(
tensor_shape.dimension_value(params[p].get_shape()[0]))
if dim_0_size.value:
num_total_ids = constant_op.constant(dim_0_size.value, flat_ids.dtype)
else:
dim_0_sizes = []
for p in xrange(np):
param_p_dim = tensor_shape.dimension_value(params[p].get_shape()[0])
if param_p_dim is not None:
dim_0_sizes.append(param_p_dim)
else:
with ops.colocate_with(params[p]):
dim_0_sizes.append(array_ops.shape(params[p])[0])
num_total_ids = math_ops.reduce_sum(
math_ops.cast(array_ops.stack(dim_0_sizes), flat_ids.dtype))
ids_per_partition = num_total_ids // np
extras = num_total_ids % np
p_assignments = math_ops.maximum(flat_ids // (ids_per_partition + 1),
(flat_ids - extras) //
ids_per_partition)
# Emulate a conditional using a boolean indicator tensor
new_ids = array_ops.where(p_assignments < extras,
flat_ids % (ids_per_partition + 1),
(flat_ids - extras) % ids_per_partition)
else:
raise ValueError("Unrecognized partition strategy: " +
partition_strategy)
# Cast partition assignments to int32 for use in dynamic_partition.
# There really should not be more than 2^32 partitions.
p_assignments = math_ops.cast(p_assignments, dtypes.int32)
# Partition list of ids based on assignments into np separate lists
gather_ids = data_flow_ops.dynamic_partition(new_ids, p_assignments, np)
gather_blocknums = None
gather_ev_init_value = None
if isinstance(params[0], kv_variable_ops.DynamicEmbeddingVariable):
gather_blocknums = data_flow_ops.dynamic_partition(blocknums, p_assignments, np)
if ev_init_value is not None:
gather_ev_init_value = data_flow_ops.dynamic_partition(ev_init_value, p_assignments, np)
# Similarly, partition the original indices.
pindices = data_flow_ops.dynamic_partition(original_indices,
p_assignments, np)
# Do np separate lookups, finding embeddings for plist[p] in params[p]
partitioned_result = []
for p in range(np):
pids = gather_ids[p]
if isinstance(params[p], kv_variable_ops.DynamicEmbeddingVariable):
pblocknums = gather_blocknums[p]
embs = []
pids_nozero = array_ops.boolean_mask(pids, math_ops.greater_equal(pblocknums , 1))
pblocknums_nozero = array_ops.boolean_mask(pblocknums, math_ops.greater_equal(pblocknums, 1))
for i in range(params[p].blocknum()):
with ops.colocate_with(params[p]._ev_list[i]):
evids = array_ops.boolean_mask(pids_nozero, math_ops.greater_equal(pblocknums_nozero, i + 1))
gathered_emb = params[p]._ev_list[i].sparse_read(evids, name=None)
embs.append(gathered_emb)
result_nozero = _gather_fae(pids_nozero, pblocknums_nozero, embs, params[p])
# suplement blocknum equal to zero
indices = math_ops.range(0, array_ops.squeeze(array_ops.shape(pids)), 1)
indice_cnt = array_ops.expand_dims(array_ops.boolean_mask(indices, math_ops.greater_equal(pblocknums, 1)), 1)
result = array_ops.scatter_nd(indices=indice_cnt, updates=result_nozero, shape=[array_ops.shape(pids)[0], array_ops.shape(result_nozero)[1]])
partitioned_result.append(result)
else:
with ops.colocate_with(params[p]):
if ev_init_value is None:
new_ev_init_value = None
else:
new_ev_init_value = gather_ev_init_value[p]
result = array_ops.gather(params[p], pids, ev_init_value=new_ev_init_value, counts=counts)
if transform_fn:
# If transform_fn is provided, the clip_by_norm precedes
# the transform and hence must be co-located. See below
# for the counterpart if transform_fn is not proveded.
result = transform_fn(_clip(result, pids, max_norm))
partitioned_result.append(result)
# Stitch these back together
ret = data_flow_ops.parallel_dynamic_stitch(
pindices, partitioned_result, name=name)
# Determine the static element shape.
if isinstance(params[0], kv_variable_ops.EmbeddingVariable) or \
isinstance(params[0], kv_variable_ops.DynamicEmbeddingVariable):
if transform_fn is None:
element_shape_s = params[0].get_shape()[:]
for p in params[1:]:
element_shape_s = element_shape_s.merge_with(p.get_shape()[:])
else:
element_shape_s = ret.get_shape()[:]
else:
if transform_fn is None:
element_shape_s = params[0].get_shape()[1:]
for p in params[1:]:
element_shape_s = element_shape_s.merge_with(p.get_shape()[1:])
else:
element_shape_s = ret.get_shape()[1:]
# Compute the dynamic element shape.
if element_shape_s.is_fully_defined():
element_shape_d = element_shape_s
elif transform_fn is None:
# It's important that we compute params[0].shape on the right device
# to avoid data motion.
with ops.colocate_with(params[0]):
params_shape = array_ops.shape(params[0])
element_shape_d = params_shape[1:]
else:
element_shape_d = array_ops.shape(ret)[1:]
# Reshape to reverse the flattening of ids.
ret = array_ops.reshape(
ret, array_ops.concat([array_ops.shape(ids), element_shape_d], 0))
# Normally the reshape is sufficient, but setting shape explicitly
# teaches shape inference that params[1:].get_shape() matters
# (in the case that transform_fn is None).
ret.set_shape(ids.get_shape().concatenate(element_shape_s))
if not transform_fn:
# If transform_fn was provided, the clip_by_norm was done above.
ret = _clip(ret, ids, max_norm)
return ret
@tf_export(v1=["nn.embedding_lookup"])
def embedding_lookup(
params,
ids,
partition_strategy="mod",
name=None,
validate_indices=True, # pylint: disable=unused-argument
max_norm=None,
ev_init_value=None,
blocknums=None,
counts=None):
"""Looks up `ids` in a list of embedding tensors.
This function is used to perform parallel lookups on the list of
tensors in `params`. It is a generalization of
`tf.gather`, where `params` is
interpreted as a partitioning of a large embedding tensor. `params` may be
a `PartitionedVariable` as returned by using `tf.compat.v1.get_variable()`
with a
partitioner.
If `len(params) > 1`, each element `id` of `ids` is partitioned between
the elements of `params` according to the `partition_strategy`.
In all strategies, if the id space does not evenly divide the number of
partitions, each of the first `(max_id + 1) % len(params)` partitions will
be assigned one more id.
If `partition_strategy` is `"mod"`, we assign each id to partition
`p = id % len(params)`. For instance,
13 ids are split across 5 partitions as:
`[[0, 5, 10], [1, 6, 11], [2, 7, 12], [3, 8], [4, 9]]`
If `partition_strategy` is `"div"`, we assign ids to partitions in a
contiguous manner. In this case, 13 ids are split across 5 partitions as:
`[[0, 1, 2], [3, 4, 5], [6, 7, 8], [9, 10], [11, 12]]`
The results of the lookup are concatenated into a dense
tensor. The returned tensor has shape `shape(ids) + shape(params)[1:]`.
Args:
params: A single tensor representing the complete embedding tensor, or a
list of P tensors all of same shape except for the first dimension,
representing sharded embedding tensors. Alternatively, a
`PartitionedVariable`, created by partitioning along dimension 0. Each
element must be appropriately sized for the given `partition_strategy`.
ids: A `Tensor` with type `int32` or `int64` containing the ids to be looked
up in `params`.
partition_strategy: A string specifying the partitioning strategy, relevant
if `len(params) > 1`. Currently `"div"` and `"mod"` are supported. Default
is `"mod"`.
name: A name for the operation (optional).
validate_indices: DEPRECATED. If this operation is assigned to CPU, values
in `indices` are always validated to be within range. If assigned to GPU,
out-of-bound indices result in safe but unspecified behavior, which may
include raising an error.
max_norm: If not `None`, each embedding is clipped if its l2-norm is larger
than this value.
Returns:
A `Tensor` with the same type as the tensors in `params`.
Raises:
ValueError: If `params` is empty.
"""
return _embedding_lookup_and_transform(
params=params,
ids=ids,
partition_strategy=partition_strategy,
name=name,
max_norm=max_norm,
transform_fn=None,
ev_init_value=ev_init_value,
blocknums=blocknums,
counts=counts)
@tf_export("nn.embedding_lookup", v1=[])
def embedding_lookup_v2(params, ids, max_norm=None, name=None):
"""Looks up `ids` in a list of embedding tensors.
This function is used to perform parallel lookups on the list of
tensors in `params`. It is a generalization of
`tf.gather`, where `params` is
interpreted as a partitioning of a large embedding tensor. `params` may be
a `PartitionedVariable` as returned by using `tf.compat.v1.get_variable()`
with a
partitioner.
If `len(params) > 1`, each element `id` of `ids` is partitioned between
the elements of `params` according to the `partition_strategy`.
In all strategies, if the id space does not evenly divide the number of
partitions, each of the first `(max_id + 1) % len(params)` partitions will
be assigned one more id.
The `partition_strategy` is always `"div"` currently. This means that we
assign ids to partitions in a contiguous manner. For instance, 13 ids are
split across 5 partitions as:
`[[0, 1, 2], [3, 4, 5], [6, 7, 8], [9, 10], [11, 12]]`
The results of the lookup are concatenated into a dense
tensor. The returned tensor has shape `shape(ids) + shape(params)[1:]`.
Args:
params: A single tensor representing the complete embedding tensor, or a
list of P tensors all of same shape except for the first dimension,
representing sharded embedding tensors. Alternatively, a
`PartitionedVariable`, created by partitioning along dimension 0. Each
element must be appropriately sized for the 'div' `partition_strategy`.
ids: A `Tensor` with type `int32` or `int64` containing the ids to be looked
up in `params`.
max_norm: If not `None`, each embedding is clipped if its l2-norm is larger
than this value.
name: A name for the operation (optional).
Returns:
A `Tensor` with the same type as the tensors in `params`.
Raises:
ValueError: If `params` is empty.
"""
return embedding_lookup(params, ids, "div", name, max_norm=max_norm)
def _tile_combine_embedding(embeddings, segment_ids, column_ids, sp_shape):
column_ids = math_ops.cast(column_ids, dtypes.int32)
sp_shape = math_ops.cast(sp_shape, dtypes.int32)
segment_ids = segment_ids * sp_shape[1] + column_ids
total_size = sp_shape[0] * sp_shape[1]
embeddings = math_ops.unsorted_segment_sum(embeddings, segment_ids, total_size)
embeddings = array_ops.reshape(
embeddings, [sp_shape[0], sp_shape[1] * array_ops.shape(embeddings)[-1]])
return embeddings
@tf_export(v1=["nn.embedding_lookup_sparse"])
def embedding_lookup_sparse(params,
sp_ids,
sp_weights,
partition_strategy="mod",
name=None,
combiner=None,
max_norm=None,
blocknums=None):
"""Computes embeddings for the given ids and weights.
This op assumes that there is at least one id for each row in the dense tensor
represented by sp_ids (i.e. there are no rows with empty features), and that
all the indices of sp_ids are in canonical row-major order.
It also assumes that all id values lie in the range [0, p0), where p0
is the sum of the size of params along dimension 0.
Args:
params: A single tensor representing the complete embedding tensor, or a
list of P tensors all of same shape except for the first dimension,
representing sharded embedding tensors. Alternatively, a
`PartitionedVariable`, created by partitioning along dimension 0. Each
element must be appropriately sized for the given `partition_strategy`.
sp_ids: N x M `SparseTensor` of int64 ids where N is typically batch size
and M is arbitrary.
sp_weights: either a `SparseTensor` of float / double weights, or `None` to
indicate all weights should be taken to be 1. If specified, `sp_weights`
must have exactly the same shape and indices as `sp_ids`.
partition_strategy: A string specifying the partitioning strategy, relevant
if `len(params) > 1`. Currently `"div"` and `"mod"` are supported. Default
is `"mod"`. See `tf.nn.embedding_lookup` for more details.
name: Optional name for the op.
combiner: A string specifying the reduction op. Currently "mean", "sqrtn",
"tile" and "sum" are supported. "sum" computes the weighted sum of the
embedding results for each row. "mean" is the weighted sum divided by the
total weight. "sqrtn" is the weighted sum divided by the square root of the
sum of the squares of the weights.
max_norm: If not `None`, each embedding is clipped if its l2-norm is larger
than this value, before combining.
Returns:
A dense tensor representing the combined embeddings for the
sparse ids. For each row in the dense tensor represented by `sp_ids`, the op
looks up the embeddings for all ids in that row, multiplies them by the
corresponding weight, and combines these embeddings as specified.
In other words, if
`shape(combined params) = [p0, p1, ..., pm]`
and
`shape(sp_ids) = shape(sp_weights) = [d0, d1, ..., dn]`
then
`shape(output) = [d0, d1, ..., dn-1, p1, ..., pm]`.
For instance, if params is a 10x20 matrix, and sp_ids / sp_weights are
```python
[0, 0]: id 1, weight 2.0
[0, 1]: id 3, weight 0.5
[1, 0]: id 0, weight 1.0
[2, 3]: id 1, weight 3.0
```
with `combiner`="mean", then the output will be a 3x20 matrix where
```python
output[0, :] = (params[1, :] * 2.0 + params[3, :] * 0.5) / (2.0 + 0.5)
output[1, :] = (params[0, :] * 1.0) / 1.0
output[2, :] = (params[1, :] * 3.0) / 3.0
```
Raises:
TypeError: If `sp_ids` is not a `SparseTensor`, or if `sp_weights` is
neither `None` nor `SparseTensor`.
ValueError: If `combiner` is not one of {"mean", "sqrtn", "sum"}.
"""
if combiner is None:
logging.warn("The default value of combiner will change from \"mean\" "
"to \"sqrtn\" after 2016/11/01.")
combiner = "mean"
if combiner not in ("mean", "sqrtn", "sum", "tile"):
raise ValueError("combiner must be one of 'mean', 'sqrtn' or 'sum'")
if isinstance(params, variables.PartitionedVariable):
params = list(params) # Iterate to get the underlying Variables.
if not isinstance(params, list):
params = [params]
if not isinstance(sp_ids, sparse_tensor.SparseTensor):
raise TypeError("sp_ids must be SparseTensor")
ignore_weights = sp_weights is None
if not ignore_weights:
if not isinstance(sp_weights, sparse_tensor.SparseTensor):
raise TypeError("sp_weights must be either None or SparseTensor")
sp_ids.values.get_shape().assert_is_compatible_with(
sp_weights.values.get_shape())
sp_ids.indices.get_shape().assert_is_compatible_with(
sp_weights.indices.get_shape())
sp_ids.dense_shape.get_shape().assert_is_compatible_with(
sp_weights.dense_shape.get_shape())
# TODO(yleon): Add enhanced node assertions to verify that sp_ids and
# sp_weights have equal indices and shapes.
with ops.name_scope(name, "embedding_lookup_sparse",
params + [sp_ids]) as name:
segment_ids = sp_ids.indices[:, 0]
if segment_ids.dtype != dtypes.int32:
segment_ids = math_ops.cast(segment_ids, dtypes.int32)
ids = sp_ids.values
if isinstance(params[0], kv_variable_ops.EmbeddingVariable) and params[0]._filter_freq == 0:
ids, idx = array_ops.unique(ids)
counts = None
else:
ids, idx, counts = array_ops.unique_with_counts(ids)
uniqued_blocknums = None
if blocknums is not None:
if idx is None:
raise ValueError("blocknums now require unqiue index to be generagted")
else:
uniqued_blocknums = math_ops.unsorted_segment_max(blocknums, idx, array_ops.squeeze(array_ops.shape(ids), 0))
embeddings = embedding_lookup(
params, ids, partition_strategy=partition_strategy, max_norm=max_norm,
blocknums=uniqued_blocknums, counts = counts)
if embeddings.dtype in (dtypes.float16, dtypes.bfloat16):
embeddings = math_ops.cast(embeddings, dtypes.float32)
if not ignore_weights:
weights = sp_weights.values
if weights.dtype != embeddings.dtype:
weights = math_ops.cast(weights, embeddings.dtype)
embeddings = array_ops.gather(embeddings, idx)
# Reshape weights to allow broadcast
ones = array_ops.fill(
array_ops.expand_dims(array_ops.rank(embeddings) - 1, 0), 1)
bcast_weights_shape = array_ops.concat([array_ops.shape(weights), ones],
0)
orig_weights_shape = weights.get_shape()
weights = array_ops.reshape(weights, bcast_weights_shape)
# Set the weight shape, since after reshaping to bcast_weights_shape,
# the shape becomes None.
if embeddings.get_shape().ndims is not None:
weights.set_shape(
orig_weights_shape.concatenate(
[1 for _ in range(embeddings.get_shape().ndims - 1)]))
embeddings *= weights
if combiner == "sum":
embeddings = math_ops.segment_sum(embeddings, segment_ids, name=name)
elif combiner == "mean":
embeddings = math_ops.segment_sum(embeddings, segment_ids)
weight_sum = math_ops.segment_sum(weights, segment_ids)
embeddings = math_ops.div(embeddings, weight_sum, name=name)
elif combiner == "sqrtn":
embeddings = math_ops.segment_sum(embeddings, segment_ids)
weights_squared = math_ops.pow(weights, 2)
weight_sum = math_ops.segment_sum(weights_squared, segment_ids)
weight_sum_sqrt = math_ops.sqrt(weight_sum)
embeddings = math_ops.div(embeddings, weight_sum_sqrt, name=name)
elif combiner == "tile":
column_ids = sp_ids.indices[:, 1]
embeddings = _tile_combine_embedding(embeddings,
segment_ids,
column_ids,
sp_ids.dense_shape)
else:
assert False, "Unrecognized combiner"
else:
assert idx is not None
if combiner == "sum":
embeddings = math_ops.sparse_segment_sum(
embeddings, idx, segment_ids, name=name)
elif combiner == "mean":
embeddings = math_ops.sparse_segment_mean(
embeddings, idx, segment_ids, name=name)
elif combiner == "sqrtn":
embeddings = math_ops.sparse_segment_sqrt_n(
embeddings, idx, segment_ids, name=name)
elif combiner == "tile":
embeddings = array_ops.gather(embeddings, idx)
column_ids = sp_ids.indices[:, 1]
embeddings = _tile_combine_embedding(embeddings,
segment_ids,
column_ids,
sp_ids.dense_shape)
else:
assert False, "Unrecognized combiner"
return embeddings
@tf_export(v1=["nn.adaptive_embedding_lookup_sparse"])
def adaptive_embedding_lookup_sparse(hash_params,
ev_params,
sp_ids,
hash_ev_ids,
sp_weights,
partition_strategy="mod",
name=None,
combiner=None,
max_norm=None,
bucket_size=None,
adaptive_mask_tensor=None,
blocknums=None):
"""Computes embeddings for the given ids and weights.
This op assumes that there is at least one id for each row in the dense tensor
represented by sp_ids (i.e. there are no rows with empty features), and that
all the indices of sp_ids are in canonical row-major order.
It also assumes that all id values lie in the range [0, p0), where p0
is the sum of the size of params along dimension 0.
Args:
hash_params: A single tensor representing the complete embedding tensor,
by normal Variable.
or a list of P tensors all of same shape except for the first dimension,
representing sharded embedding tensors. Alternatively, a
`PartitionedVariable`, created by partitioning along dimension 0. Each
element must be appropriately sized for the given `partition_strategy`.
ev_params: A single tensor representing the complete embedding tensor,
by EmbeddingVariable
or a list of P tensors all of same shape except for the first dimension,
representing sharded embedding tensors. Alternatively, a
`PartitionedVariable`, created by partitioning along dimension 0. Each
element must be appropriately sized for the given `partition_strategy`.
sp_ids: N x M SparseTensor of int64 ids (typically from FeatureValueToId),
where N is typically batch size and M is arbitrary.
sp_weights: either a SparseTensor of float / double weights, or None to
indicate all weights should be taken to be 1. If specified, sp_weights
must have exactly the same shape and indices as sp_ids.
partition_strategy: A string specifying the partitioning strategy, relevant
if `len(params) > 1`. Currently `"div"` and `"mod"` are supported. Default
is `"mod"`. See `tf.nn.embedding_lookup` for more details.
name: Optional name for the op.
combiner: A string specifying the reduction op. Currently "mean", "sqrtn"
and "sum" are supported.
"sum" computes the weighted sum of the embedding results for each row.
"mean" is the weighted sum divided by the total weight.
"sqrtn" is the weighted sum divided by the square root of the sum of the
squares of the weights.
max_norm: If provided, each embedding is normalized to have l2 norm equal
to max_norm before combining.
Returns:
A dense tensor representing the combined embeddings for the
sparse ids. For each row in the dense tensor represented by sp_ids, the op
looks up the embeddings for all ids in that row, multiplies them by the
corresponding weight, and combines these embeddings as specified.
In other words, if
shape(combined params) = [p0, p1, ..., pm]
and
shape(sp_ids) = shape(sp_weights) = [d0, d1, ..., dn]
then
shape(output) = [d0, d1, ..., dn-1, p1, ..., pm].
For instance, if params is a 10x20 matrix, and sp_ids / sp_weights are
[0, 0]: id 1, weight 2.0
[0, 1]: id 3, weight 0.5
[1, 0]: id 0, weight 1.0
[2, 3]: id 1, weight 3.0
with `combiner`="mean", then the output will be a 3x20 matrix where
output[0, :] = (params[1, :] * 2.0 + params[3, :] * 0.5) / (2.0 + 0.5)
output[1, :] = params[0, :] * 1.0
output[2, :] = params[1, :] * 3.0
Raises:
TypeError: If sp_ids is not a SparseTensor, or if sp_weights is neither
None nor SparseTensor.
ValueError: If combiner is not one of {"mean", "sqrtn", "sum"}.
"""
if combiner is None:
#logging.warn("The default value of combiner will change from \"mean\" "
# "to \"sqrtn\" after 2016/11/01.")
combiner = "mean"
if combiner not in ("mean", "sqrtn", "sum"):
raise ValueError("combiner must be one of 'mean', 'sqrtn' or 'sum'")
# convert hash and ev to list
if isinstance(hash_params, variables.PartitionedVariable):
hash_params = list(hash_params) # Iterate to get the underlying Variables.
if not isinstance(hash_params, list):
hash_params = [hash_params]
if isinstance(ev_params, variables.PartitionedVariable):
ev_params = list(ev_params) # Iterate to get the underlying Variables.
if not isinstance(ev_params, list):
ev_params = [ev_params]
if len(hash_params) < 1 or len(ev_params) < 1:
raise ValueError("Missing hash_params: %s, ev_params:." % hash_params, ev_params)
if not isinstance(sp_ids, sparse_tensor.SparseTensor):
raise TypeError("sp_ids must be SparseTensor")
ignore_weights = sp_weights is None
if not ignore_weights:
if not isinstance(sp_weights, sparse_tensor.SparseTensor):
raise TypeError("sp_weights must be either None or SparseTensor")
sp_ids.values.get_shape().assert_is_compatible_with(
sp_weights.values.get_shape())
sp_ids.indices.get_shape().assert_is_compatible_with(
sp_weights.indices.get_shape())
sp_ids.dense_shape.get_shape().assert_is_compatible_with(
sp_weights.dense_shape.get_shape())
# TODO(yleon): Add enhanced node assertions to verify that sp_ids and
# sp_weights have equal indices and shapes.
if not ignore_weights:
raise ValueError("AdaptiveEmbedding lookup not support not ignore weights")
if adaptive_mask_tensor is None:
raise ValueError("AdaptiveEmbedding lookup not support not ignore weights")
with ops.name_scope(name, "embedding_lookup_sparse",
ev_params + [sp_ids]) as name:
segment_ids = sp_ids.indices[:, 0]
if segment_ids.dtype != dtypes.int32:
segment_ids = math_ops.cast(segment_ids, dtypes.int32)
ids = sp_ids.values
flat_ids = array_ops.reshape(ids, [-1])
original_indices = math_ops.range(array_ops.size(flat_ids))
parts = data_flow_ops.dynamic_partition(original_indices, adaptive_mask_tensor, 2)
spids_part = data_flow_ops.dynamic_partition(flat_ids, adaptive_mask_tensor, 2)
hash_ids, hash_idx = array_ops.unique(spids_part[0])
#ev_ids, ev_idx = array_ops.unique(spids_part[1])
hash_embeddings = embedding_lookup(
hash_params, hash_ids, partition_strategy=partition_strategy, max_norm=max_norm,
blocknums=None)
ev_init_value = embedding_lookup(
hash_params, hash_ev_ids, partition_strategy=partition_strategy, max_norm=max_norm,
blocknums=None)
ev_embeddings = embedding_lookup(
ev_params, spids_part[1], partition_strategy=partition_strategy, max_norm=max_norm,
ev_init_value=ev_init_value,
blocknums=None)
if (hash_idx is not None):
hash_segment_ids = math_ops.range(0, array_ops.squeeze(array_ops.shape(hash_idx)), 1)
#ev_segment_ids = math_ops.range(0, array_ops.squeeze(array_ops.shape(spids_part[1])), 1)
if combiner == "sum":
hash_embeddings = math_ops.sparse_segment_sum(
hash_embeddings, hash_idx, hash_segment_ids, name=name+"_hash")
#ev_embeddings = math_ops.sparse_segment_sum(
# ev_embeddings, ev_idx, ev_segment_ids, name=name+"_ev")
elif combiner == "mean":
hash_embeddings = math_ops.sparse_segment_mean(
hash_embeddings, hash_idx, hash_segment_ids, name=name+"_hash")
#ev_embeddings = math_ops.sparse_segment_mean(
# ev_embeddings, ev_idx, ev_segment_ids, name=name+"_ev")
elif combiner == "sqrtn":
hash_embeddings = math_ops.sparse_segment_sqrt_n(
hash_embeddings, hash_idx, hash_segment_ids, name=name+"_hash")
#ev_embeddings = math_ops.sparse_segment_sqrt_n(
# ev_embeddings, ev_idx, ev_segment_ids, name=name+"_ev")
else:
assert False, "Unrecognized combiner"
else:
if combiner == "sum":
embeddings = math_ops.segment_sum(
embeddings, segment_ids, name=name)
elif combiner == "mean":
embeddings = math_ops.segment_mean(
embeddings, segment_ids, name=name)
elif combiner == "sqrtn":
embeddings = math_ops.segment_sqrt_n(
embeddings, segment_ids, name=name)
else:
assert False, "Unrecognized combiner"
embeddings_result = data_flow_ops.dynamic_stitch(parts, [hash_embeddings, ev_embeddings])
return embeddings_result
@tf_export("nn.embedding_lookup_sparse", v1=[])
def embedding_lookup_sparse_v2(params,
sp_ids,
sp_weights,
combiner=None,
max_norm=None,
name=None):
"""Computes embeddings for the given ids and weights.
This op assumes that there is at least one id for each row in the dense tensor
represented by sp_ids (i.e. there are no rows with empty features), and that
all the indices of sp_ids are in canonical row-major order.
It also assumes that all id values lie in the range [0, p0), where p0
is the sum of the size of params along dimension 0.
Args:
params: A single tensor representing the complete embedding tensor, or a
list of P tensors all of same shape except for the first dimension,
representing sharded embedding tensors. Alternatively, a
`PartitionedVariable`, created by partitioning along dimension 0. Each
element must be appropriately sized for ``"div"`` `partition_strategy`.
sp_ids: N x M `SparseTensor` of int64 ids where N is typically batch size
and M is arbitrary.
sp_weights: either a `SparseTensor` of float / double weights, or `None` to
indicate all weights should be taken to be 1. If specified, `sp_weights`
must have exactly the same shape and indices as `sp_ids`.
combiner: A string specifying the reduction op. Currently "mean", "sqrtn",
"tile" and "sum" are supported. "sum" computes the weighted sum of the
embedding results for each row. "mean" is the weighted sum divided by the
total weight. "sqrtn" is the weighted sum divided by the square root of the
sum of the squares of the weights.
max_norm: If not `None`, each embedding is clipped if its l2-norm is larger
than this value, before combining.
name: Optional name for the op.
Returns:
A dense tensor representing the combined embeddings for the
sparse ids. For each row in the dense tensor represented by `sp_ids`, the op
looks up the embeddings for all ids in that row, multiplies them by the
corresponding weight, and combines these embeddings as specified.
In other words, if
`shape(combined params) = [p0, p1, ..., pm]`
and
`shape(sp_ids) = shape(sp_weights) = [d0, d1, ..., dn]`
then
`shape(output) = [d0, d1, ..., dn-1, p1, ..., pm]`.
For instance, if params is a 10x20 matrix, and sp_ids / sp_weights are
```python
[0, 0]: id 1, weight 2.0
[0, 1]: id 3, weight 0.5
[1, 0]: id 0, weight 1.0
[2, 3]: id 1, weight 3.0
```
with `combiner`="mean", then the output will be a 3x20 matrix where
```python
output[0, :] = (params[1, :] * 2.0 + params[3, :] * 0.5) / (2.0 + 0.5)
output[1, :] = (params[0, :] * 1.0) / 1.0
output[2, :] = (params[1, :] * 3.0) / 3.0
```
Raises:
TypeError: If `sp_ids` is not a `SparseTensor`, or if `sp_weights` is
neither `None` nor `SparseTensor`.
ValueError: If `combiner` is not one of {"mean", "sqrtn", "sum"}.
"""
return embedding_lookup_sparse(params, sp_ids, sp_weights, "div", name,
combiner, max_norm)
@tf_export("nn.embedding_lookup_sparse_multi_dim")
def embedding_lookup_sparse_multi_dim(params,
sp_ids,
sp_weights,
partition_strategy="mod",
name=None,
combiners=None,
max_norm=None,
weight_axis=-1):
"""Computes embeddings for the given ids and weights like
embedding_lookup_sparse.
Args:
params: A single tensor representing the complete embedding tensor, or a
list of P tensors all of same shape except for the first dimension,
representing sharded embedding tensors. Alternatively, a
`PartitionedVariable`, created by partitioning along dimension 0. Each
element must be appropriately sized for ``"div"`` `partition_strategy`.
sp_ids: N x M `SparseTensor` of int64 ids where N is typically batch size
and M is arbitrary.
sp_weights: either a `SparseTensor` of float / double weights, or `None` to
indicate all weights should be taken to be 1. If specified, `sp_weights`
must have exactly the same shape and indices as `sp_ids`.
combiners: A list of string specifying the reduction op. Currently "mean",
"sqrtn", "tile" and "sum" are supported. "sum" computes the weighted sum of
the embedding results for each row. "mean" is the weighted sum divided by the
total weight. "sqrtn" is the weighted sum divided by the square root of the
sum of the squares of the weights.
max_norm: If not `None`, each embedding is clipped if its l2-norm is larger
than this value, before combining.
name: Optional name for the op.
weight_axis: Specify axis to use weight.
Returns:
A dense tensor representing the combined embeddings for the
sparse ids. For each row in the dense tensor represented by `sp_ids`, the op
looks up the embeddings for all ids in that row, multiplies them by the
corresponding weight, and combines these embeddings as specified.
Raises:
TypeError: If `sp_ids` is not a `SparseTensor`, or if `sp_weights` is
neither `None` nor `SparseTensor`.
ValueError: If `combiner` is not one of {"mean", "sqrtn", "sum", "tile"}.
"""
if combiners is None:
logging.warn("The default value of combiner will change from \"mean\" "
"to \"sqrtn\" after 2016/11/01.")
combiners = ["mean"]
if not isinstance(combiners, (list, tuple)):
combiners = (combiners,)
for comb in combiners:
if comb not in ("mean", "sqrtn", "sum", "max", "min", "tile"):
raise ValueError("combiner must be one of 'mean', 'sqrtn', 'sum', 'max' or 'min'")
if isinstance(params, variables.PartitionedVariable):
params = list(params) # Iterate to get the underlying Variables.
if not isinstance(params, list):
params = [params]
if not isinstance(sp_ids, sparse_tensor.SparseTensor):
raise TypeError("sp_ids must be SparseTensor")
if not len(sp_ids.shape) == len(combiners) + 1:
raise ValueError("SparseTensor to embedding lookup rank should be combiner nums -1,"
"sparse tensor rank: {}, num combiners: {}".format(len(sp_ids.shape), len(combiners)))
if weight_axis is None:
weight_axis = -1
if weight_axis < 0:
weight_axis = len(sp_ids.shape) + weight_axis
ignore_weights = sp_weights is None
if not ignore_weights:
if not isinstance(sp_weights, sparse_tensor.SparseTensor):
raise TypeError("sp_weights must be either None or SparseTensor")
sp_ids.values.get_shape().assert_is_compatible_with(
sp_weights.values.get_shape())
sp_ids.indices.get_shape().assert_is_compatible_with(
sp_weights.indices.get_shape())
sp_ids.dense_shape.get_shape().assert_is_compatible_with(
sp_weights.dense_shape.get_shape())
with ops.name_scope(name, "embedding_lookup_sparse",
params + [sp_ids]) as name:
ids = sp_ids.values
ids, idx = array_ops.unique(ids)
embeddings = embedding_lookup(
params, ids, partition_strategy=partition_strategy, max_norm=max_norm)
if embeddings.dtype in (dtypes.float16, dtypes.bfloat16):
embeddings = math_ops.to_float(embeddings)
weights = None if sp_weights is None else sp_weights.values
embeddings, _ = _combine_embedding(embeddings,
sp_ids.indices,
sp_ids.dense_shape,
combiners,
unique_idx=idx,
weights=weights,
weight_axis=weight_axis,
name=name)
return embeddings
def _internal_combine(embeddings, segment_ids, combiner,
weights=None, max_size=None, seg_offset=None,
use_weight=False,
name=None):
if combiner == "sum":
embeddings = math_ops.segment_sum(embeddings, segment_ids, name=name)
elif combiner == "mean":
if use_weight:
embeddings = math_ops.segment_sum(embeddings, segment_ids)
weight_sum = math_ops.segment_sum(weights, segment_ids)
embeddings = math_ops.div(embeddings, weight_sum, name=name)
else:
embeddings = math_ops.segment_mean(embeddings, segment_ids, name=name)
elif combiner == "sqrtn":
embeddings = math_ops.segment_sum(embeddings, segment_ids)
if use_weight:
weights = math_ops.pow(weights, 2)
else:
weights = array_ops.ones_like(segment_ids)
weight_sum = math_ops.segment_sum(weights, segment_ids)
weight_sum_sqrt = math_ops.sqrt(weight_sum)
embeddings = math_ops.div(embeddings, weight_sum_sqrt, name=name)
elif combiner == "max":
embeddings = math_ops.segment_max(embeddings, segment_ids, name=name)
elif combiner == "min":
embeddings = math_ops.segment_min(embeddings, segment_ids, name=name)
elif combiner == "tile":
assert seg_offset is not None and max_size is not None, \
"seg_offset or max_size not set when combine with tile"
seg_offset = math_ops.cast(seg_offset, dtypes.int32)
max_size = math_ops.cast(max_size, dtypes.int32)
dynamic_ids = seg_offset + segment_ids * max_size
full_size = (math_ops.reduce_max(segment_ids) + 1) * max_size
embeddings = math_ops.unsorted_segment_sum(embeddings, dynamic_ids, full_size)
embeddings = array_ops.reshape(
embeddings, [-1, array_ops.shape(embeddings)[-1] * max_size])
else:
assert False, "Unrecognized combiner"
if weights is not None:
weights = math_ops.segment_mean(weights, segment_ids)
return embeddings, weights
def _get_valid_embeddings(embeddings, weights, segment_ids, cur_indices, next_segment_ids):
valid_index, valid_idx = array_ops.unique(next_segment_ids)
embeddings = array_ops.gather(embeddings, valid_index)
weights = array_ops.gather(weights, valid_index)
segment_ids = math_ops.segment_max(segment_ids, valid_idx)
cur_indices = math_ops.segment_max(cur_indices, valid_idx)
return embeddings, weights, segment_ids, cur_indices
def _combine_embedding(embeddings,
indices,
dense_shape,
combiners,
segment_ids=None,
unique_idx=None,
weights=None,
weight_axis=1,
name=None):
assert weight_axis > 0, "weight_axis should more than 1 in " \
"_internal_embedding_combine, current weight_axis: {}".format(weight_axis)
if segment_ids is None:
segment_ids = indices[:, 0]
if segment_ids.dtype != dtypes.int32:
segment_ids = math_ops.cast(segment_ids, dtypes.int32)
embeddings = array_ops.gather(embeddings, unique_idx)
if weights is None:
use_weight = False
weights = array_ops.ones([array_ops.shape(embeddings)[0]], dtype=dtypes.float32)
else:
use_weight = True
if weights.dtype != embeddings.dtype:
weights = math_ops.cast(weights, embeddings.dtype)
# Reshape weights to allow broadcast
ones = array_ops.fill(
array_ops.expand_dims(array_ops.rank(embeddings) - 1, 0), 1)
bcast_weights_shape = array_ops.concat([array_ops.shape(weights), ones],
0)
orig_weights_shape = weights.get_shape()
weights = array_ops.reshape(weights, bcast_weights_shape)
# Set the weight shape, since after reshaping to bcast_weights_shape,
# the shape becomes None.
if embeddings.get_shape().ndims is not None:
weights.set_shape(
orig_weights_shape.concatenate(
[1 for _ in range(embeddings.get_shape().ndims - 1)]))
embeddings *= weights
segment_ids_list = [segment_ids]
for i in range(len(combiners) - 1):
tmp_indices = math_ops.cast(indices[:, i + 1], dtypes.int32)
segment_ids = segment_ids * math_ops.cast(dense_shape[i + 1], dtypes.int32) + tmp_indices
segment_ids_list.append(segment_ids)
for i in range(len(combiners)):
axis = len(combiners) - i
if not i == 0:
cur_indices = indices[:, axis]
embeddings, weights, segment_ids, cur_indice_offset = \
_get_valid_embeddings(embeddings,
weights,
segment_ids_list[axis - 1],
cur_indices,
segment_ids_list[axis])
else:
cur_indice_offset = indices[:, axis]
segment_ids = segment_ids_list[axis - 1]
embeddings, weights = _internal_combine(embeddings,
segment_ids,
combiners[axis - 1],
weights=weights,
max_size=dense_shape[axis],
seg_offset=cur_indice_offset,
use_weight=use_weight and (weight_axis == axis),
name=name + str(axis))
return embeddings, weights
@tf_export("nn.safe_embedding_lookup_sparse", v1=[])
def safe_embedding_lookup_sparse_v2(embedding_weights,
sparse_ids,
sparse_weights=None,
combiner="mean",
default_id=None,
max_norm=None,
name=None):
"""Lookup embedding results, accounting for invalid IDs and empty features.
The partitioned embedding in `embedding_weights` must all be the same shape
except for the first dimension. The first dimension is allowed to vary as the
vocabulary size is not necessarily a multiple of `P`. `embedding_weights`
may be a `PartitionedVariable` as returned by using
`tf.compat.v1.get_variable()` with a
partitioner.
Invalid IDs (< 0) are pruned from input IDs and weights, as well as any IDs
with non-positive weight. For an entry with no features, the embedding vector
for `default_id` is returned, or the 0-vector if `default_id` is not supplied.
The ids and weights may be multi-dimensional. Embeddings are always aggregated
along the last dimension.
Note: when doing embedding lookup on `embedding_weights`, "div" partition
strategy will be used. Support for other partition strategy will be added
later.
Args:
embedding_weights: A list of `P` float `Tensor`s or values representing
partitioned embedding `Tensor`s. Alternatively, a `PartitionedVariable`
created by partitioning along dimension 0. The total unpartitioned shape
should be `[e_0, e_1, ..., e_m]`, where `e_0` represents the vocab size
and `e_1, ..., e_m` are the embedding dimensions.
sparse_ids: `SparseTensor` of shape `[d_0, d_1, ..., d_n]` containing the
ids. `d_0` is typically batch size.
sparse_weights: `SparseTensor` of same shape as `sparse_ids`, containing
float weights corresponding to `sparse_ids`, or `None` if all weights are
be assumed to be 1.0.
combiner: A string specifying how to combine embedding results for each
entry. Currently "mean", "sqrtn", "tile" and "sum" are supported, with
"mean" the default.
default_id: The id to use for an entry with no features.
max_norm: If not `None`, all embeddings are l2-normalized to max_norm before
combining.
name: A name for this operation (optional).
Returns:
Dense `Tensor` of shape `[d_0, d_1, ..., d_{n-1}, e_1, ..., e_m]`.
Raises:
ValueError: if `embedding_weights` is empty.
"""
return safe_embedding_lookup_sparse(
embedding_weights,
sparse_ids,
sparse_weights=sparse_weights,
combiner=combiner,
default_id=default_id,
name=name,
partition_strategy="div",
max_norm=max_norm)
@tf_export(v1=["nn.safe_embedding_lookup_sparse"])
def safe_embedding_lookup_sparse(embedding_weights,
sparse_ids,
sparse_weights=None,
combiner="mean",
default_id=None,
name=None,
partition_strategy="div",
max_norm=None,
prune=True):
"""Lookup embedding results, accounting for invalid IDs and empty features.
The partitioned embedding in `embedding_weights` must all be the same shape
except for the first dimension. The first dimension is allowed to vary as the
vocabulary size is not necessarily a multiple of `P`. `embedding_weights`
may be a `PartitionedVariable` as returned by using
`tf.compat.v1.get_variable()` with a
partitioner.
Invalid IDs (< 0) are pruned from input IDs and weights, as well as any IDs
with non-positive weight. For an entry with no features, the embedding vector
for `default_id` is returned, or the 0-vector if `default_id` is not supplied.
The ids and weights may be multi-dimensional. Embeddings are always aggregated
along the last dimension.
Args:
embedding_weights: A list of `P` float `Tensor`s or values representing
partitioned embedding `Tensor`s. Alternatively, a `PartitionedVariable`
created by partitioning along dimension 0. The total unpartitioned shape
should be `[e_0, e_1, ..., e_m]`, where `e_0` represents the vocab size
and `e_1, ..., e_m` are the embedding dimensions.
sparse_ids: `SparseTensor` of shape `[d_0, d_1, ..., d_n]` containing the
ids. `d_0` is typically batch size.
sparse_weights: `SparseTensor` of same shape as `sparse_ids`, containing
float weights corresponding to `sparse_ids`, or `None` if all weights are
be assumed to be 1.0.
combiner: A string specifying how to combine embedding results for each
entry. Currently "mean", "sqrtn" and "sum" are supported, with "mean" the
default.
default_id: The id to use for an entry with no features.
name: A name for this operation (optional).
partition_strategy: A string specifying the partitioning strategy. Currently
`"div"` and `"mod"` are supported. Default is `"div"`.
max_norm: If not `None`, all embeddings are l2-normalized to max_norm before
combining.
Returns:
Dense `Tensor` of shape `[d_0, d_1, ..., d_{n-1}, e_1, ..., e_m]`.
Raises:
ValueError: if `embedding_weights` is empty.
"""
if embedding_weights is None:
raise ValueError("Missing embedding_weights %s." % embedding_weights)
if isinstance(embedding_weights, variables.PartitionedVariable):
embedding_weights = list(embedding_weights) # get underlying Variables.
if not isinstance(embedding_weights, list):
embedding_weights = [embedding_weights]
if len(embedding_weights) < 1:
raise ValueError("Missing embedding_weights %s." % embedding_weights)
dtype = sparse_weights.dtype if sparse_weights is not None else None
tmp_embedding_weights = []
for w in embedding_weights:
from tensorflow.python.ops.hash_table import hash_table
if not isinstance(w, (hash_table.DistributedHashTable, hash_table.HashTable)):
if not (isinstance(w, resource_variable_ops.ResourceVariable) and dtype in (None, w.dtype)):
w = ops.convert_to_tensor(w, dtype=dtype)
tmp_embedding_weights.append(w)
embedding_weights = tmp_embedding_weights
with ops.name_scope(name, "embedding_lookup", embedding_weights +
[sparse_ids, sparse_weights]) as scope:
# Reshape higher-rank sparse ids and weights to linear segment ids.
original_shape = sparse_ids.dense_shape
original_rank_dim = tensor_shape.dimension_value(
sparse_ids.dense_shape.get_shape()[0])
original_rank = (
array_ops.size(original_shape)
if original_rank_dim is None else original_rank_dim)
sparse_ids = sparse_ops.sparse_reshape(sparse_ids, [
math_ops.reduce_prod(
array_ops.slice(original_shape, [0], [original_rank - 1])),
array_ops.gather(original_shape, original_rank - 1)
])
if sparse_weights is not None:
sparse_weights = sparse_tensor.SparseTensor(sparse_ids.indices,
sparse_weights.values,
sparse_ids.dense_shape)
if prune:
# Prune invalid ids and weights.
sparse_ids, sparse_weights = _prune_invalid_ids(sparse_ids, sparse_weights)
if combiner != "sum":
sparse_ids, sparse_weights = _prune_invalid_weights(
sparse_ids, sparse_weights)
# Fill in dummy values for empty features, if necessary.
sparse_ids, is_row_empty = sparse_ops.sparse_fill_empty_rows(
sparse_ids, default_id or 0)
if sparse_weights is not None:
sparse_weights, _ = sparse_ops.sparse_fill_empty_rows(sparse_weights, 1.0)
result = embedding_lookup_sparse(
embedding_weights,
sparse_ids,
sparse_weights,
combiner=combiner,
partition_strategy=partition_strategy,
name=None if default_id is None else scope,
max_norm=max_norm)
if default_id is None:
# Broadcast is_row_empty to the same shape as embedding_lookup_result,
# for use in Select.
is_row_empty = array_ops.tile(
array_ops.reshape(is_row_empty, [-1, 1]),
array_ops.stack([1, array_ops.shape(result)[1]]))
result = array_ops.where(
is_row_empty, array_ops.zeros_like(result), result, name=scope)
# Reshape back from linear ids back into higher-dimensional dense result.
final_result = array_ops.reshape(
result,
array_ops.concat([
array_ops.slice(
math_ops.cast(original_shape, dtypes.int32), [0],
[original_rank - 1]),
array_ops.slice(array_ops.shape(result), [1], [-1])
], 0))
final_result.set_shape(
tensor_shape.unknown_shape(
(tensor_shape.Dimension(original_rank_dim) - 1).value).concatenate(
result.get_shape()[1:]))
return final_result
def fused_safe_embedding_lookup_sparse(embedding_weights,
sparse_ids,
sparse_weights=None,
combiner="mean",
default_id=None,
name=None,
partition_strategy="div",
max_norm=None,
prune=True):
"""Functionally the same as safe_embedding_lookup_sparse but using fused embedding
lookup ops in this method.
"""
logging.info("Is using fused embedding lookup for this scope {}".format(name))
if embedding_weights is None:
raise ValueError("Missing embedding_weights %s." % embedding_weights)
if isinstance(embedding_weights, variables.PartitionedVariable):
embedding_weights = list(embedding_weights) # get underlying Variables.
if not isinstance(embedding_weights, list):
embedding_weights = [embedding_weights]
if len(embedding_weights) < 1:
raise ValueError("Missing embedding_weights %s." % embedding_weights)
dtype = sparse_weights.dtype if sparse_weights is not None else None
tmp_embedding_weights = []
for w in embedding_weights:
from tensorflow.python.ops.hash_table import hash_table
if not isinstance(w, (hash_table.DistributedHashTable, hash_table.HashTable)):
if not (isinstance(w, resource_variable_ops.ResourceVariable) and dtype in (None, w.dtype)):
w = ops.convert_to_tensor(w, dtype=dtype)
tmp_embedding_weights.append(w)
embedding_weights = tmp_embedding_weights
with ops.name_scope(name, "fused_embedding_lookup", embedding_weights +
[sparse_ids, sparse_weights]) as scope:
# Reshape higher-rank sparse ids and weights to linear segment ids.
original_shape = sparse_ids.dense_shape
original_rank_dim = tensor_shape.dimension_value(
sparse_ids.dense_shape.get_shape()[0])
original_rank = (
array_ops.size(original_shape)
if original_rank_dim is None else original_rank_dim)
sparse_ids = sparse_ops.sparse_reshape(sparse_ids, [
math_ops.reduce_prod(
array_ops.slice(original_shape, [0], [original_rank - 1])),
array_ops.gather(original_shape, original_rank - 1)
])
if sparse_weights is not None:
sparse_weights = sparse_tensor.SparseTensor(sparse_ids.indices,
sparse_weights.values,
sparse_ids.dense_shape)
result = fused_embedding_ops.fused_embedding_lookup_sparse(
embedding_weights,
sparse_ids,
sparse_weights=sparse_weights,
combiner=combiner,
partition_strategy=partition_strategy,
name=None if default_id is None else scope,
max_norm=max_norm,
default_id=default_id,
prune_invalid_ids=True
)
# Reshape back from linear ids back into higher-dimensional dense result.
final_result = array_ops.reshape(
result,
array_ops.concat([
array_ops.slice(
math_ops.cast(original_shape, dtypes.int32), [0],
[original_rank - 1]),
array_ops.slice(array_ops.shape(result), [1], [-1])
], 0))
final_result.set_shape(
tensor_shape.unknown_shape(
(tensor_shape.Dimension(original_rank_dim) - 1).value).concatenate(
result.get_shape()[1:]))
return final_result
@tf_export("nn.safe_embedding_lookup_multi_dim")
def safe_embedding_lookup_multi_dim(embedding_weights,
sparse_ids,
sparse_weights=None,
combiners=['mean'],
name=None,
partition_strategy='div',
max_norm=None,
weight_axis=1,
prune=True):
if combiners is None:
combiners = ["mean"]
if not isinstance(combiners, (list, tuple)):
combiners = (combiners,)
for comb in combiners:
if comb not in ("mean", "sqrtn", "sum", "max", "min", "tile"):
raise ValueError("combiner must be one of 'mean', 'sqrtn', 'sum', 'max', 'min' or 'tile'")
combiners = list(combiners)
real_combiner_size = len(combiners)
tile_combiner_nums = sum([1 if comb == 'tile' else 0 for comb in combiners])
if sparse_ids.shape is not None and sparse_ids.shape.rank > len(combiners) + 1:
tile_num = (sparse_ids.shape.rank - 1 - len(combiners))
combiners = ['tile'] * tile_num + combiners
if embedding_weights is None:
raise ValueError('Missing embedding_weights %s.' % embedding_weights)
if isinstance(embedding_weights, variables.PartitionedVariable):
embedding_weights = list(embedding_weights) # get underlying Variables.
if not isinstance(embedding_weights, list):
embedding_weights = [embedding_weights]
if len(embedding_weights) < 1:
raise ValueError('Missing embedding_weights %s.' % embedding_weights)
dtype = sparse_weights.dtype if sparse_weights is not None else None
embedding_weights = [
ops.convert_to_tensor(w, dtype=dtype) for w in embedding_weights
]
with ops.name_scope(name, 'embedding_lookup',
embedding_weights + [sparse_ids,
sparse_weights]) as scope:
# Reshape higher-rank sparse ids and weights to linear segment ids.
original_shape = sparse_ids.dense_shape
if prune:
# Prune invalid ids and weights.
sparse_ids, sparse_weights = _prune_invalid_ids(sparse_ids, sparse_weights)
if 'sum' not in combiners:
sparse_ids, sparse_weights = _prune_invalid_weights(
sparse_ids, sparse_weights)
result = embedding_lookup_sparse_multi_dim(
embedding_weights,
sparse_ids,
sparse_weights,
combiners=combiners,
partition_strategy=partition_strategy,
name=None,
max_norm=max_norm,
weight_axis=weight_axis)
batch_size = math_ops.cast(original_shape[0], dtype=dtypes.int32)
pad_list = [[0, batch_size - array_ops.shape(result)[0]], [0, 0]]
result = array_ops.pad(result, pad_list)
output_shape = array_ops.concat([
array_ops.slice(math_ops.cast(original_shape, dtypes.int32),
[0],
[array_ops.size(original_shape) - real_combiner_size]),
[-1]
], 0)
result = array_ops.reshape(result, output_shape)
return result
@tf_export("nn.safe_adaptive_embedding_lookup_sparse")
def safe_adaptive_embedding_lookup_sparse(hash_embedding_weights,
ev_embedding_weights,
sparse_ids,
hash_ev_ids,
sparse_weights=None,
combiner=None,
default_id=None,
name=None,
partition_strategy="div",
max_norm=None,
bucket_size=None,
adaptive_mask_tensor=None,
blocknums=None):
"""Lookup embedding results, accounting for invalid IDs and empty features.
The partitioned embedding in `embedding_weights` must all be the same shape
except for the first dimension. The first dimension is allowed to vary as the
vocabulary size is not necessarily a multiple of `P`. `embedding_weights`
may be a `PartitionedVariable` as returned by using `tf.get_variable()` with a
partitioner.
Invalid IDs (< 0) are pruned from input IDs and weights, as well as any IDs
with non-positive weight. For an entry with no features, the embedding vector
for `default_id` is returned, or the 0-vector if `default_id` is not supplied.
The ids and weights may be multi-dimensional. Embeddings are always aggregated
along the last dimension.
Args:
hash_embedding_weights: A list of `P` float tensors or values representing
partitioned embedding tensors by hash-bucket size variable.
Alternatively, a `PartitionedVariable`,
created by partitioning along dimension 0. The total unpartitioned
shape should be `[e_0, e_1, ..., e_m]`, where `e_0` represents the
vocab size and `e_1, ..., e_m` are the embedding dimensions.
embedding_weights: A list of `P` float tensors or values representing
partitioned embedding tensors by EmbeddingVariable.
Alternatively, a `PartitionedVariable`,
created by partitioning along dimension 0. The total unpartitioned
shape should be `[e_0, e_1, ..., e_m]`, where `e_0` represents the
vocab size and `e_1, ..., e_m` are the embedding dimensions.
sparse_ids: `SparseTensor` of shape `[d_0, d_1, ..., d_n]` containing the
ids. `d_0` is typically batch size.
sparse_weights: `SparseTensor` of same shape as `sparse_ids`, containing
float weights corresponding to `sparse_ids`, or `None` if all weights
are be assumed to be 1.0.
combiner: A string specifying how to combine embedding results for each
entry. Currently "mean", "sqrtn" and "sum" are supported, with "mean"
the default.
default_id: The id to use for an entry with no features.
name: A name for this operation (optional).
partition_strategy: A string specifying the partitioning strategy.
Currently `"div"` and `"mod"` are supported. Default is `"div"`.
max_norm: If not None, all embeddings are l2-normalized to max_norm before
combining.
Returns:
Dense tensor of shape `[d_0, d_1, ..., d_{n-1}, e_1, ..., e_m]`.
Raises:
ValueError: if `embedding_weights` is empty.
"""
if combiner is None:
#logging.warn("The default value of combiner will change from \"mean\" "
# "to \"sqrtn\" after 2016/11/01.")
combiner = "mean"
dtype = sparse_weights.dtype if sparse_weights is not None else None
if isinstance(hash_embedding_weights, variables.PartitionedVariable):
hash_embedding_weights = list(hash_embedding_weights)
if not isinstance(hash_embedding_weights, list):
hash_embedding_weights = [hash_embedding_weights]
hash_embedding_weights = [
ops.convert_to_tensor(w, dtype=dtype) for w in hash_embedding_weights
]
check_ops.assert_same_float_dtype(hash_embedding_weights +
[sparse_weights])
'''
if not isinstance(embedding_weights[0],
(kv_variable_ops.EmbeddingVariable, kv_variable_ops.DynamicEmbeddingVariable)):
'''
with ops.name_scope(name, "embedding_lookup",
hash_embedding_weights + [sparse_ids,
sparse_weights]) as scope:
# Reshape higher-rank sparse ids and weights to linear segment ids.
original_shape = sparse_ids.dense_shape
original_rank_dim = sparse_ids.dense_shape.get_shape()[0]
original_rank = (
array_ops.size(original_shape)
if original_rank_dim.value is None
else original_rank_dim.value)
sparse_ids = sparse_ops.sparse_reshape(sparse_ids, [
math_ops.reduce_prod(
array_ops.slice(original_shape, [0], [original_rank - 1])),
array_ops.gather(original_shape, original_rank - 1)])
if sparse_weights is not None:
sparse_weights = sparse_tensor.SparseTensor(
sparse_ids.indices,
sparse_weights.values, sparse_ids.dense_shape)
# Prune invalid ids and weights.
sparse_ids, sparse_weights = _prune_invalid_ids(
sparse_ids, sparse_weights)
# Fill in dummy values for empty features, if necessary.
sparse_ids, is_row_empty = sparse_ops.sparse_fill_empty_rows(sparse_ids,
default_id or
0)
if sparse_weights is not None:
sparse_weights, _ = sparse_ops.sparse_fill_empty_rows(sparse_weights, 1.0)
result = adaptive_embedding_lookup_sparse(
hash_embedding_weights,
ev_embedding_weights,
sparse_ids,
hash_ev_ids,
sparse_weights,
combiner=combiner,
partition_strategy=partition_strategy,
name=None if default_id is None else scope,
max_norm=max_norm,
bucket_size=bucket_size,
adaptive_mask_tensor=adaptive_mask_tensor,
blocknums=blocknums)
if default_id is None:
# Broadcast is_row_empty to the same shape as embedding_lookup_result,
# for use in Select.
is_row_empty = array_ops.tile(
array_ops.reshape(is_row_empty, [-1, 1]),
array_ops.stack([1, array_ops.shape(result)[1]]))
result = array_ops.where(is_row_empty,
array_ops.zeros_like(result),
result,
name=scope)
# Reshape back from linear ids back into higher-dimensional dense result.
final_result = array_ops.reshape(
result,
array_ops.concat([
array_ops.slice(
math_ops.cast(original_shape, dtypes.int32), [0],
[original_rank - 1]),
array_ops.slice(array_ops.shape(result), [1], [-1])
], 0))
final_result.set_shape(tensor_shape.unknown_shape(
(original_rank_dim - 1).value).concatenate(result.get_shape()[1:]))
return final_result
def _prune_invalid_ids(sparse_ids, sparse_weights):
"""Prune invalid IDs (< 0) from the input ids and weights."""
is_id_valid = math_ops.greater_equal(sparse_ids.values, 0)
if sparse_weights is not None:
is_id_valid = math_ops.logical_and(
is_id_valid,
array_ops.ones_like(sparse_weights.values, dtype=dtypes.bool))
sparse_ids = sparse_ops.sparse_retain(sparse_ids, is_id_valid)
if sparse_weights is not None:
sparse_weights = sparse_ops.sparse_retain(sparse_weights, is_id_valid)
return sparse_ids, sparse_weights
def _prune_invalid_weights(sparse_ids, sparse_weights):
"""Prune invalid weights (< 0) from the input ids and weights."""
if sparse_weights is not None:
is_weights_valid = math_ops.greater(sparse_weights.values, 0)
sparse_ids = sparse_ops.sparse_retain(sparse_ids, is_weights_valid)
sparse_weights = sparse_ops.sparse_retain(sparse_weights, is_weights_valid)
return sparse_ids, sparse_weights
|
from django.contrib.auth.decorators import login_required
from django.shortcuts import render
from rest_framework.response import Response
from .models import Chatroom,Message
from rest_framework import status, permissions
from .serializers import ChatroomSerializer
from django.core import serializers
import json
from rest_framework.decorators import api_view
from django.http import HttpResponse
#@login_required
@api_view(['GET'])
def allRooms(request):
#id_user = request.user.id
all_objects = list(Chatroom.objects.all()) + list(Message.objects.all())
chatrooms = serializers.serialize('json', all_objects)
return Response(json.loads(chatrooms), status=status.HTTP_200_OK)
def getInfoRoom(request,room_name):
print("Je passe " + room_name)
return HttpResponse() |
import paddle
from paddle import nn
import math
import numpy as np
def _cfg(url='', **kwargs):
return {
'url': url,
'num_classes': 2, 'input_size': (3, 600, 600), 'pool_size': None,
'crop_pct': .9, 'interpolation': 'bicubic',
'mean': (0.5, 0.5, 0.5), 'std': (0.5, 0.5, 0.5),
'first_conv': 'pixel_embed.proj', 'classifier': 'head',
**kwargs
}
default_cfgs = {
'tnt_s_patch16_224': _cfg(
url='',
mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5),
),
'tnt_b_patch16_224': _cfg(
mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5),
),
}
class Identity(nn.Layer):
r"""A placeholder identity operator that is argument-insensitive.
Args:
args: any argument (unused)
kwargs: any keyword argument (unused)
Examples::
>>> m = nn.Identity(54, unused_argument1=0.1, unused_argument2=False)
>>> input = torch.randn(128, 20)
>>> output = m(input)
>>> print(output.size())
torch.Size([128, 20])
"""
def __init__(self, *args, **kwargs):
super(Identity, self).__init__()
def forward(self, inputs):
return inputs
def drop_path(x, drop_prob: float = 0., training: bool = False):
"""Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).
This is the same as the DropConnect impl I created for EfficientNet, etc networks, however,
the original name is misleading as 'Drop Connect' is a different form of dropout in a separate paper...
See discussion: https://github.com/tensorflow/tpu/issues/494#issuecomment-532968956 ... I've opted for
changing the layer and argument names to 'drop path' rather than mix DropConnect as a layer name and use
'survival rate' as the argument.
"""
if drop_prob == 0. or not training:
return x
keep_prob = 1 - drop_prob
shape = (x.shape[0],) + (1,) * (x.ndim - 1) # work with diff dim tensors, not just 2D ConvNets
random_tensor = keep_prob + paddle.rand(shape=shape, dtype=x.dtype, device=x.device)
random_tensor.floor() # binarize
output = x.divide(keep_prob) * random_tensor
return output
class DropPath(nn.Layer):
"""Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).
"""
def __init__(self, drop_prob=None):
super(DropPath, self).__init__()
self.drop_prob = drop_prob
def forward(self, x):
return drop_path(x, self.drop_prob, self.training)
class Attention(nn.Layer):
'''
注意力部分
'''
def __init__(self, dim, hidden_dim, num_heads=8, qkv_bias=False, attn_drop=0., proj_drop=0.):
super(Attention, self).__init__()
self.hidden_dim = hidden_dim
self.num_heads = num_heads
head_dim = hidden_dim // num_heads
self.head_dim = head_dim
self.scale = head_dim ** -0.5
self.qk = nn.Linear(dim, hidden_dim * 2, bias_attr=qkv_bias)
self.v = nn.Linear(dim, dim, bias_attr=qkv_bias)
self.attn_drop = nn.Dropout(attn_drop) # no inplace
self.proj = nn.Linear(dim, dim)
self.proj_drop = nn.Dropout(proj_drop)
def forward(self, inputs):
x = inputs
B, N, C = x.shape
qk = self.qk(x).reshape((B, N, 2, self.num_heads, self.head_dim)).transpose((2, 0, 3, 1, 4))
q, k = qk[0], qk[1]
v = self.v(x).reshape((B, N, self.num_heads, -1)).transpose((0, 2, 1, 3))
attn = paddle.matmul(q, k.transpose((0, 1, 3, 2))) * self.scale
attn = paddle.nn.functional.softmax(attn, axis=-1)
attn = self.attn_drop(attn)
x = paddle.matmul(attn, v).transpose((0, 2, 1, 3)).reshape((B, N, -1))
x = self.proj(x)
x = self.proj_drop(x)
return x
class Mlp(nn.Layer):
def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.):
super(Mlp, self).__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
self.fc1 = nn.Linear(in_features, hidden_features)
self.act = act_layer()
self.fc2 = nn.Linear(hidden_features, out_features)
self.drop = nn.Dropout(drop)
def forward(self, x):
x = self.fc1(x)
x = self.act(x)
x = self.drop(x)
x = self.fc2(x)
x = self.drop(x)
return x
class Block(nn.Layer):
""" TNT Block
"""
def __init__(self, dim, in_dim, num_pixel, num_heads=12, in_num_head=4, mlp_ratio=4.,
qkv_bias=False, drop=0., attn_drop=0., drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm):
super(Block, self).__init__()
# Inner transformer
self.norm_in = norm_layer(in_dim)
self.attn_in = Attention(
in_dim, in_dim, num_heads=in_num_head, qkv_bias=qkv_bias,
attn_drop=attn_drop, proj_drop=drop)
self.norm_mlp_in = norm_layer(in_dim)
self.mlp_in = Mlp(in_features=in_dim, hidden_features=int(in_dim * 4),
out_features=in_dim, act_layer=act_layer, drop=drop)
self.norm1_proj = norm_layer(in_dim)
self.proj = nn.Linear(in_dim * num_pixel, dim, bias_attr=True)
# Outer transformer
self.norm_out = norm_layer(dim)
self.attn_out = Attention(
dim, dim, num_heads=num_heads, qkv_bias=qkv_bias,
attn_drop=attn_drop, proj_drop=drop)
self.drop_path = DropPath(drop_path) if drop_path > 0. else Identity()
self.norm_mlp = norm_layer(dim)
self.mlp = Mlp(in_features=dim, hidden_features=int(dim * mlp_ratio),
out_features=dim, act_layer=act_layer, drop=drop)
def forward(self, pixel_embed, patch_embed):
# inner
pixel_embed = pixel_embed + self.drop_path(self.attn_in(self.norm_in(pixel_embed)))
pixel_embed = pixel_embed + self.drop_path(self.mlp_in(self.norm_mlp_in(pixel_embed)))
# outer
B, N, C = patch_embed.shape
patch_embed[:, 1:] = patch_embed[:, 1:] + self.proj(self.norm1_proj(pixel_embed).reshape((B, N - 1, -1)))
patch_embed = patch_embed + self.drop_path(self.attn_out(self.norm_out(patch_embed)))
patch_embed = patch_embed + self.drop_path(self.mlp(self.norm_mlp(patch_embed)))
return pixel_embed, patch_embed
class PixelEmbed(nn.Layer):
""" Image to Pixel Embedding
"""
def __init__(self, img_size=224, patch_size=16, in_chans=3, in_dim=48, stride=4):
super(PixelEmbed, self).__init__()
num_patches = (img_size // patch_size) ** 2
self.img_size = img_size
self.num_patches = num_patches
self.in_dim = in_dim
new_patch_size = math.ceil(patch_size / stride)
self.new_patch_size = new_patch_size
self.proj = nn.Conv2D(in_chans, self.in_dim, kernel_size=7, padding=3, stride=stride)
def forward(self, x, pixel_pos):
B, C, H, W = x.shape
assert H == self.img_size and W == self.img_size, \
f"Input image size ({H}*{W}) doesn't match model ({self.img_size}*{self.img_size})."
x = self.proj(x)
x = nn.functional.unfold(x=x, kernel_sizes=self.new_patch_size, strides=self.new_patch_size)
x = x.transpose((0, 2, 1)).reshape((B * self.num_patches, self.in_dim, self.new_patch_size, self.new_patch_size))
x = x + pixel_pos
x = x.reshape((B * self.num_patches, self.in_dim, -1)).transpose((0, 2, 1))
return x
class TNT(nn.Layer):
""" Transformer in Transformer - https://arxiv.org/abs/2103.00112
"""
def __init__(self, img_size=224, patch_size=16, in_chans=3, num_classes=1000, embed_dim=768, in_dim=48, depth=12,
num_heads=12, in_num_head=4, mlp_ratio=4., qkv_bias=False, drop_rate=0., attn_drop_rate=0.,
drop_path_rate=0., norm_layer=nn.LayerNorm, first_stride=4):
super(TNT, self).__init__()
self.num_classes = num_classes
self.num_features = self.embed_dim = embed_dim # num_features for consistency with other models
self.pixel_embed = PixelEmbed(
img_size=img_size, patch_size=patch_size, in_chans=in_chans, in_dim=in_dim, stride=first_stride)
num_patches = self.pixel_embed.num_patches
self.num_patches = num_patches
new_patch_size = self.pixel_embed.new_patch_size
num_pixel = new_patch_size ** 2
self.norm1_proj = norm_layer(num_pixel * in_dim)
self.proj = nn.Linear(num_pixel * in_dim, embed_dim)
self.norm2_proj = norm_layer(embed_dim)
# 创建参数
self.cls_token = paddle.create_parameter((1, 1, embed_dim), 'float32', attr=nn.initializer.Assign(paddle.zeros((1, 1, embed_dim))))
self.patch_pos = paddle.create_parameter((1, num_patches + 1, embed_dim), 'float32', attr=nn.initializer.Assign(paddle.zeros((1, num_patches + 1, embed_dim))))
self.pixel_pos = paddle.create_parameter((1, in_dim, new_patch_size, new_patch_size), 'float32', attr=nn.initializer.Assign(paddle.zeros((1, in_dim, new_patch_size, new_patch_size))))
self.pos_drop = nn.Dropout(p=drop_rate)
dpr = [x for x in paddle.linspace(0, drop_path_rate, depth)] # stochastic depth decay rule
blocks = []
for i in range(depth):
blocks.append(Block(
dim=embed_dim, in_dim=in_dim, num_pixel=num_pixel, num_heads=num_heads, in_num_head=in_num_head,
mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, drop=drop_rate, attn_drop=attn_drop_rate,
drop_path=dpr[i], norm_layer=norm_layer))
self.blocks = nn.LayerList(blocks)
self.norm = norm_layer(embed_dim)
self.head = nn.Linear(embed_dim, num_classes) if num_classes > 0 else nn.Identity()
with paddle.no_grad():
self.cls_token = paddle.create_parameter(self.cls_token.shape, 'float32', attr=nn.initializer.Assign(paddle.normal(self.cls_token, std=.02)))
self.patch_pos = paddle.create_parameter(self.patch_pos.shape, 'float32', attr=nn.initializer.Assign(paddle.normal(self.patch_pos, std=.02)))
self.pixel_pos = paddle.create_parameter(self.pixel_pos.shape, 'float32', attr=nn.initializer.Assign(paddle.normal(self.pixel_pos, std=.02)))
self.apply(self._init_weights)
def _init_weights(self, m):
if isinstance(m, nn.Linear):
with paddle.no_grad():
m.weight = paddle.create_parameter(m.weight.shape, 'float32', attr=nn.initializer.Assign(paddle.normal(m.weight, std=.02)))
if isinstance(m, nn.Linear) and m.bias is not None:
m.bias = paddle.create_parameter(m.bias.shape, 'float32', attr=nn.initializer.Constant(value=0.))
elif isinstance(m, nn.LayerNorm):
m.bias = paddle.create_parameter(m.bias.shape, 'float32', attr=nn.initializer.Constant(value=0.))
m.weight = paddle.create_parameter(m.weight.shape, 'float32', attr=nn.initializer.Constant(value=1.))
def no_weight_decay(self):
return {'patch_pos', 'pixel_pos', 'cls_token'}
def get_classifier(self):
return self.head
def reset_classifier(self, num_classes, global_pool=''):
self.num_classes = num_classes
self.head = nn.Linear(self.embed_dim, num_classes) if num_classes > 0 else nn.Identity()
def forward_features(self, x):
B = x.shape[0]
pixel_embed = self.pixel_embed(x, self.pixel_pos)
patch_embed = self.norm2_proj(self.proj(self.norm1_proj(pixel_embed.reshape((B, self.num_patches, -1)))))
patch_embed = paddle.concat((self.cls_token.expand([B, self.cls_token.shape[1],self.cls_token.shape[2]]), patch_embed), axis=1) # expand
patch_embed = patch_embed + self.patch_pos
patch_embed = self.pos_drop(patch_embed)
for blk in self.blocks:
pixel_embed, patch_embed = blk(pixel_embed, patch_embed)
patch_embed = self.norm(patch_embed)
return patch_embed[:, 0]
def forward(self, x):
x = self.forward_features(x)
x = self.head(x)
return x
def tnt_s_patch16_224(pretrained=False, **kwargs):
model = TNT(patch_size=16, embed_dim=384, in_dim=24, depth=12, num_heads=6, in_num_head=4,
qkv_bias=False, **kwargs)
model.default_cfg = default_cfgs['tnt_s_patch16_224']
if pretrained:
load_pretrained(
model, num_classes=model.num_classes, in_chans=kwargs.get('in_chans', 3))
return model
def tnt_b_patch16_224(pretrained=False, **kwargs):
model = TNT(patch_size=16, embed_dim=640, in_dim=40, depth=12, num_heads=10, in_num_head=4,
qkv_bias=False, **kwargs)
model.default_cfg = default_cfgs['tnt_b_patch16_224']
if pretrained:
load_pretrained(
model, num_classes=model.num_classes, in_chans=kwargs.get('in_chans', 3))
return model |
import py
from py.__.test.conftesthandle import Conftest
class TestConftestValueAccessGlobal:
def setup_class(cls):
# if we have "global" conftests (i.e. no __init__.py
# and thus no further import scope) it should still all work
# because "global" conftests are imported with a
# mangled module name (related to their actual path)
cls.basedir = d = py.test.ensuretemp(cls.__name__)
d.ensure("adir/conftest.py").write("a=1 ; Directory = 3")
d.ensure("adir/b/conftest.py").write("b=2 ; a = 1.5")
def test_basic_init(self):
conftest = Conftest()
conftest.setinitial([self.basedir.join("adir")])
assert conftest.rget("a") == 1
def test_onimport(self):
l = []
conftest = Conftest(onimport=l.append)
conftest.setinitial([self.basedir.join("adir")])
assert len(l) == 2 # default + the one
assert conftest.rget("a") == 1
assert conftest.rget("b", self.basedir.join("adir", "b")) == 2
assert len(l) == 3
def test_immediate_initialiation_and_incremental_are_the_same(self):
conftest = Conftest()
snap0 = len(conftest._path2confmods)
conftest.getconftestmodules(self.basedir)
snap1 = len(conftest._path2confmods)
#assert len(conftest._path2confmods) == snap1 + 1
conftest.getconftestmodules(self.basedir.join('adir'))
assert len(conftest._path2confmods) == snap1 + 1
conftest.getconftestmodules(self.basedir.join('b'))
assert len(conftest._path2confmods) == snap1 + 2
def test_default_Module_setting_is_visible_always(self):
for path in self.basedir.parts():
conftest = Conftest(path)
#assert conftest.lget("Module") == py.test.collect.Module
assert conftest.rget("Module") == py.test.collect.Module
def test_default_has_lower_prio(self):
conftest = Conftest(self.basedir.join("adir"))
assert conftest.rget('Directory') == 3
#assert conftest.lget('Directory') == py.test.collect.Directory
def test_value_access_not_existing(self):
conftest = Conftest(self.basedir)
py.test.raises(KeyError, "conftest.rget('a')")
#py.test.raises(KeyError, "conftest.lget('a')")
def test_value_access_by_path(self):
conftest = Conftest(self.basedir)
assert conftest.rget("a", self.basedir.join('adir')) == 1
#assert conftest.lget("a", self.basedir.join('adir')) == 1
assert conftest.rget("a", self.basedir.join('adir', 'b')) == 1.5
#assert conftest.lget("a", self.basedir.join('adir', 'b')) == 1
#assert conftest.lget("b", self.basedir.join('adir', 'b')) == 2
#assert py.test.raises(KeyError,
# 'conftest.lget("b", self.basedir.join("a"))'
#)
def test_value_access_with_init_one_conftest(self):
conftest = Conftest(self.basedir.join('adir'))
assert conftest.rget("a") == 1
#assert conftest.lget("a") == 1
def test_value_access_with_init_two_conftests(self):
conftest = Conftest(self.basedir.join("adir", "b"))
conftest.rget("a") == 1.5
#conftest.lget("a") == 1
#conftest.lget("b") == 1
def test_value_access_with_confmod(self):
topdir = self.basedir.join("adir", "b")
topdir.ensure("xx", dir=True)
conftest = Conftest(topdir)
mod, value = conftest.rget_with_confmod("a", topdir)
assert value == 1.5
path = py.path.local(mod.__file__)
assert path.dirpath() == self.basedir.join("adir", "b")
assert path.purebasename == "conftest"
class TestConftestValueAccessInPackage(TestConftestValueAccessGlobal):
def setup_class(cls):
TestConftestValueAccessGlobal.__dict__['setup_class'](cls)
d = cls.basedir
d.ensure("adir/__init__.py")
d.ensure("adir/b/__init__.py")
|
# Generated by Django 2.0.7 on 2018-08-04 05:19
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Allergen',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(db_index=True, max_length=250)),
('slug', models.SlugField(max_length=250)),
],
options={
'ordering': ('name',),
},
),
migrations.CreateModel(
name='Category',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(choices=[('FRU', 'Fruit'), ('VEG', 'Vegetables'), ('MEA', 'Meat'), ('SEA', 'Seafood'), ('DEL', 'Deli'), ('BAK', 'Bakery'), ('DAI', 'Dairy'), ('EGG', 'Eggs'), ('FRI', 'Fridge'), ('PAN', 'Pantry'), ('FRE', 'Freezer'), ('DRI', 'Drinks')], max_length=1)),
('slug', models.SlugField(max_length=200, unique=True)),
],
options={
'ordering': ('name',),
},
),
migrations.CreateModel(
name='Ingredient',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(db_index=True, max_length=250)),
('slug', models.SlugField(max_length=250)),
],
options={
'ordering': ('name',),
},
),
migrations.CreateModel(
name='Product',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(db_index=True, max_length=250)),
('slug', models.SlugField(max_length=250)),
('description', models.TextField(blank=True)),
('allergens', models.ManyToManyField(to='allergyshop.Allergen')),
('category', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='products', to='allergyshop.Category')),
('ingredients', models.ManyToManyField(to='allergyshop.Ingredient')),
],
options={
'ordering': ('name',),
},
),
migrations.CreateModel(
name='Store',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(db_index=True, max_length=250)),
('slug', models.SlugField(max_length=250)),
],
options={
'ordering': ('name',),
},
),
migrations.AddField(
model_name='product',
name='stores',
field=models.ManyToManyField(to='allergyshop.Store'),
),
]
|
from unittest.mock import patch, mock_open, MagicMock
import pytest
from cincoconfig.encryption import KeyFile, AesProvider, EncryptionError, XorProvider, SecureValue
class StubProvider:
def __init__(self):
self.encrypt = MagicMock(return_value=b'ciphertext')
self.decrypt = MagicMock(return_value=b'plaintext')
class TestKeyFile:
@patch.object(KeyFile, '_KeyFile__load_key')
def test_with(self, load_key):
kf = KeyFile('asdf.txt')
with kf as kf2:
kf._KeyFile__key = 'asdf'
assert kf is kf2
assert kf._KeyFile__refcount == 1
load_key.assert_called_once_with()
with kf as kf3:
assert kf._KeyFile__refcount == 2
assert kf3 is kf
assert kf._KeyFile__key == 'asdf'
assert kf._KeyFile__refcount == 1
assert kf._KeyFile__key == 'asdf'
assert kf._KeyFile__refcount == 0
assert kf._KeyFile__key is None
@patch('cincoconfig.encryption.open', new_callable=mock_open, read_data=b'x'*32)
@patch.object(KeyFile, '_validate_key')
@patch.object(KeyFile, 'generate_key')
def test_load_key_exists(self, genkey_mock, validate_mock, open_mock):
kf = KeyFile('asdf.txt')
kf._KeyFile__load_key()
assert kf._KeyFile__key == b'x' * 32
open_mock.assert_called_once_with('asdf.txt', 'rb')
validate_mock.assert_called_once_with()
genkey_mock.assert_not_called()
@patch('cincoconfig.encryption.open')
@patch.object(KeyFile, '_KeyFile__generate_key')
def test_load_generate(self, genkey_mock, open_mock):
open_mock.side_effect = IOError()
kf = KeyFile('asdf.txt')
kf._KeyFile__load_key()
open_mock.assert_called_once_with('asdf.txt', 'rb')
genkey_mock.assert_called_once_with()
@patch('cincoconfig.encryption.open', new_callable=mock_open)
@patch('os.urandom')
def test_generate_key(self, urandom_mock, open_mock):
urandom_mock.return_value = b'x' * 32
kf = KeyFile('asdf.txt')
kf.generate_key()
open_mock.assert_called_once_with('asdf.txt', 'wb')
handle = open_mock()
handle.write.assert_called_once_with(b'x' * 32)
urandom_mock.assert_called_once_with(32)
def test_validate_none(self):
kf = KeyFile('asdf')
with pytest.raises(EncryptionError):
kf._validate_key()
def test_validate_short(self):
kf = KeyFile('asdf')
kf._KeyFile__key = b'x' * 31
with pytest.raises(EncryptionError):
kf._validate_key()
def test_validate_success(self):
kf = KeyFile('asdf')
kf._KeyFile__key = b'x' * 32
kf._validate_key()
def test_get_provider_aes(self):
kf = KeyFile('asdf.txt')
kf._KeyFile__key = b'x' * 32
provider, method = kf._get_provider('aes')
assert isinstance(provider, AesProvider)
assert provider._AesProvider__key == b'x' * 32
assert method == 'aes'
def test_get_provider_xor(self):
kf = KeyFile('asdf.txt')
kf._KeyFile__key = b'x' * 32
provider, method = kf._get_provider('xor')
assert isinstance(provider, XorProvider)
assert provider._XorProvider__key == b'x' * 32
assert method == 'xor'
def test_get_provider_unknown(self):
kf = KeyFile('asdf.txt')
kf._KeyFile__key = b'x' * 32
with pytest.raises(TypeError):
kf._get_provider('balh')
def test_get_provider_no_key(self):
kf = KeyFile('asdf.txt')
with pytest.raises(TypeError):
kf._get_provider('xor')
def test_encrypt(self):
provider = StubProvider()
kf = KeyFile('asdf.txt')
kf._KeyFile__key = b'x' * 32
kf._get_provider = MagicMock(return_value=(provider, 'test'))
secret = kf.encrypt('hello', 'test')
provider.encrypt.assert_called_once_with(b'hello')
kf._get_provider.assert_called_once_with('test')
assert secret == SecureValue('test', b'ciphertext')
def test_decrypt(self):
provider = StubProvider()
kf = KeyFile('asdf.txt')
kf._KeyFile__key = b'x' * 32
kf._get_provider = MagicMock(return_value=(provider, 'test'))
text = kf.decrypt(SecureValue('test', b'hello'))
provider.decrypt.assert_called_once_with(b'hello')
kf._get_provider.assert_called_once_with('test')
assert text == b'plaintext'
def test_encrypt_nokey(self):
kf = KeyFile('asdf.txt')
with pytest.raises(TypeError):
kf.encrypt(b'hello')
def test_decrypt_nokey(self):
kf = KeyFile('asdf.txt')
with pytest.raises(TypeError):
kf.decrypt(b'asdf')
def test_encrypt_best_aes(self):
kf = KeyFile('asdf.txt')
kf._KeyFile__key = b'x' * 32
provider, method = kf._get_provider(method='best')
assert isinstance(provider, AesProvider)
assert method == 'aes'
@patch('cincoconfig.encryption.AES_AVAILABLE', False)
def test_encrypt_best_xor(self):
kf = KeyFile('asdf.txt')
kf._KeyFile__key = b'x' * 32
provider, method = kf._get_provider(method='best')
assert isinstance(provider, XorProvider)
assert method == 'xor'
class TestAesProvider:
def test_encrypt_decrypt(self):
provider = AesProvider(b'x' * 32)
secret = provider.encrypt(b'hello world')
plaintext = provider.decrypt(secret)
assert len(secret) == 32
assert plaintext == b'hello world'
assert secret != plaintext
@patch('cincoconfig.encryption.AES_AVAILABLE', False)
def test_aes_unavailable(self):
with pytest.raises(TypeError):
AesProvider(b'x' * 32)
def test_decrypt_bad_value(self):
provider = AesProvider(b'x' * 32)
with pytest.raises(EncryptionError):
provider.decrypt(b'x' * 31)
class TestXorProvider:
def test_encrypt_decrypt(self):
provider = XorProvider(b'x' * 32)
secret = provider.encrypt(b'hello world')
plaintext = provider.decrypt(secret)
assert len(secret) == len(b'hello world')
assert secret != plaintext
assert plaintext == b'hello world'
|
from django.apps import AppConfig
class UserProfileConfig(AppConfig):
name = 'user_profile'
|
# -*- python -*-
# -*- coding: utf-8 -*-
#
# (c) 2013-2021 parasim inc
# (c) 2010-2021 california institute of technology
# all rights reserved
#
# Author(s): Lijun Zhu
# the package
import altar
# my protocol
from .DataObs import DataObs as data
# declaration
class DataL2(altar.component, family="altar.data.datal2", implements=data):
"""
The observed data with L2 norm
"""
data_file = altar.properties.path(default="data.txt")
data_file.doc = "the name of the file with the observations"
observations = altar.properties.int(default=1)
observations.doc = "the number of observed data"
cd_file = altar.properties.path(default=None)
cd_file.doc = "the name of the file with the data covariance matrix"
cd_std = altar.properties.float(default=1.0)
cd_std.doc = "the constant covariance for data"
merge_cd_with_data = altar.properties.bool(default=False)
merge_cd_with_data.doc = "whether to merge cd with data"
norm = altar.norms.norm()
norm.default = altar.norms.l2()
norm.doc = "the norm to use when computing the data log likelihood"
@altar.export
def initialize(self, application):
"""
Initialize data obs from model
"""
# get the input path from model
self.error = application.error
# get the number of samples
self.samples = application.job.chains
# load the data and covariance
self.ifs = application.pfs["inputs"]
self.loadData()
# compute inverse of covariance, normalization
self.initializeCovariance(cd=self.cd)
# all done
return self
def evalLikelihood(self, prediction, likelihood, residual=True, batch=None):
"""
compute the datalikelihood for prediction (samples x observations)
"""
#depending on convenience, users can
# copy dataobs to their model and use the residual as input of prediction
# or compute prediction from forward model and subtract the dataobs here
batch = batch if batch is not None else likelihood.shape
# go through the residual of each sample
for idx in range(batch):
# extract it
dp = prediction.getRow(idx)
# subtract the dataobs if residual is not pre-calculated
if not residual:
dp -= self.dataobs
if self.merge_cd_with_data:
# cd already merged, no need to multiply it by cd
likelihood[idx] = self.normalization - 0.5 * self.norm.eval(v=dp)
else:
likelihood[idx] = self.normalization - 0.5 * self.norm.eval(v=dp, sigma_inv=self.cd_inv)
# all done
return self
def dataobsBatch(self):
"""
Get a batch of duplicated dataobs
"""
if self.dataobs_batch is None:
self.dataobs_batch = altar.matrix(shape=(self.samples, self.observations))
# for each sample
for sample in range(self.samples):
# make the corresponding column a copy of the data vector
self.dataobs_batch.setColumn(sample, self.dataobs)
return self.dataobs_batch
def loadData(self):
"""
load data and covariance
"""
# grab the input dataspace
ifs = self.ifs
# next, the observations
try:
# get the path to the file
df = ifs[self.data_file]
# if the file doesn't exist
except ifs.NotFoundError:
# grab my error channel
channel = self.error
# complain
channel.log(f"missing observations: no '{self.data_file}' {ifs.path()}")
# and raise the exception again
raise
# if all goes well
else:
# allocate the vector
self.dataobs= altar.vector(shape=self.observations)
# and load the file contents into memory
self.dataobs.load(df.uri)
if self.cd_file is not None:
# finally, the data covariance
try:
# get the path to the file
cf = ifs[self.cd_file]
# if the file doesn't exist
except ifs.NotFoundError:
# grab my error channel
channel = self.error
# complain
channel.log(f"missing data covariance matrix: no '{self.cd_file}'")
# and raise the exception again
raise
# if all goes well
else:
# allocate the matrix
self.cd = altar.matrix(shape=(self.observations, self.observations))
# and load the file contents into memory
self.cd.load(cf.uri)
else:
# use a constant covariance
self.cd = self.cd_std
return
def initializeCovariance(self, cd):
"""
For a given data covariance cd, compute L2 likelihood normalization, inverse of cd in Cholesky decomposed form,
and merge cd with data observation, d-> L*d with cd^{-1} = L L*
:param cd:
:return:
"""
# grab the number of observations
observations = self.observations
if isinstance(cd, altar.matrix):
# normalization
self.normalization = self.computeNormalization(observations=observations, cd=cd)
# inverse matrix
self.cd_inv = self.computeCovarianceInverse(cd=cd)
# merge cd to data
if self.merge_cd_with_data:
Cd_inv = self.cd_inv
self.dataobs = altar.blas.dtrmv( Cd_inv.upperTriangular, Cd_inv.opNoTrans, Cd_inv.nonUnitDiagonal,
Cd_inv, self.dataobs)
elif isinstance(cd, float):
# cd is standard deviation
from math import log, pi as π
self.normalization = -0.5*log(2*π*cd)*observations;
self.cd_inv = 1.0/self.cd
if self.merge_cd_with_data:
self.dataobs *= self.cd_inv
# all done
return self
def updateCovariance(self, cp):
"""
Update data covariance with cp, cd -> cd + cp
:param cp: a matrix with shape (obs, obs)
:return:
"""
# make a copy of cp
cchi = cp.clone()
# add cd (scalar or matrix)
cchi += self.cd
self.initializeCovariance(cd=cchi)
return self
def computeNormalization(self, observations, cd):
"""
Compute the normalization of the L2 norm
"""
# support
from math import log, pi as π
# make a copy of cd
cd = cd.clone()
# compute its LU decomposition
decomposition = altar.lapack.LU_decomposition(cd)
# use it to compute the log of its determinant
logdet = altar.lapack.LU_lndet(*decomposition)
# all done
return - (log(2*π)*observations + logdet) / 2;
def computeCovarianceInverse(self, cd):
"""
Compute the inverse of the data covariance matrix
"""
# make a copy so we don't destroy the original
cd = cd.clone()
# perform the LU decomposition
lu = altar.lapack.LU_decomposition(cd)
# invert; this creates a new matrix
inv = altar.lapack.LU_invert(*lu)
# compute the Cholesky decomposition
inv = altar.lapack.cholesky_decomposition(inv)
# and return it
return inv
# local variables
normalization = 0
ifs = None
samples = None
dataobs = None
dataobs_batch = None
cd = None
cd_inv = None
error = None
# end of file
|
import uvicorn
from fastapi import FastAPI, status
from fastapi.middleware.cors import CORSMiddleware
from app.core.config import settings
def get_application():
_app = FastAPI(title=settings.PROJECT_NAME)
_app.add_middleware(
CORSMiddleware,
allow_origins=[str(origin) for origin in settings.BACKEND_CORS_ORIGINS],
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)
return _app
app = get_application()
def start():
"""
Configuration of uvicorn
For more settings see uvicorn.config
"""
uvicorn.run(app=app, host="127.0.0.1", port=7998, server_header=False)
if __name__ == "__main__":
start()
@app.get("/")
def read_root():
return {"Hello": "Customer"}
@app.get("/healthcheck", status_code=status.HTTP_200_OK)
def perform_healthcheck():
"""
Simple route for the GitHub Actions to healthcheck on.
More info is available at:
https://github.com/akhileshns/heroku-deploy#health-check
It basically sends a GET request to the route & hopes to get a "200"
response code. Failing to return a 200 response code just enables
the GitHub Actions to rollback to the last version the project was
found in a "working condition". It acts as a last line of defense in
case something goes south.
Additionally, it also returns a JSON response in the form of:
{
'healtcheck': 'Everything OK!'
}
"""
return {"healthcheck": "Everything OK!"}
|
from graphing.special_graphs.neural_trigraph.marginal_matching.marginal_matching1 import get_schedule, get_schedule_rand
from graphing.special_graphs.neural_trigraph.marginal_matching.scoring import score
from graphing.special_graphs.neural_trigraph.toy_graphs import ToyGraph1
import numpy as np
def evolve(n_iter=100, prev_flow=None):
probs_left, probs_center, probs_right = ToyGraph1.probs_left.copy(), ToyGraph1.probs_center.copy(), ToyGraph1.probs_right.copy()
min_score = np.inf
best_dict = {}
for ix in range(n_iter):
#res = get_schedule_rand(ToyGraph1.edges1, ToyGraph1.edges2)
res = get_schedule(probs_left, probs_right, ToyGraph1.edges1, ToyGraph1.edges2)
if ix == 0 and prev_flow is not None:
res = prev_flow
candidate_score = score(res, ToyGraph1.probs_left, ToyGraph1.probs_center, ToyGraph1.probs_right)
print("Score: " + str(candidate_score))
if candidate_score < min_score:
min_score = candidate_score
best_dict = res
probs_left_bst, probs_center_bst, probs_right_bst = probs_left.copy(), probs_center.copy(), probs_right.copy()
b_ix = ix
if (candidate_score - min_score)/min_score > 0.3 and (ix-b_ix)>10:
probs_left, probs_center, probs_right = probs_left_bst.copy(), probs_center_bst.copy(), probs_right_bst.copy()
if ix%3==0:
perturb(probs_left)
elif ix%3==1:
perturb(probs_center)
else:
perturb(probs_right)
return best_dict, min_score
def perturb(probs):
ix = np.random.choice(list(probs.keys()))
perturb = np.random.normal(0,.03)
probs[ix] = max(0, probs[ix]+perturb)
|
#!/usr/bin/env python3
import io
import optparse
def read(fname):
with open(fname, encoding='utf-8') as f:
return f.read()
# Get the version from yt_dlp/version.py without importing the package
def read_version(fname):
exec(compile(read(fname), fname, 'exec'))
return locals()['__version__']
def main():
parser = optparse.OptionParser(usage='%prog INFILE OUTFILE')
options, args = parser.parse_args()
if len(args) != 2:
parser.error('Expected an input and an output filename')
infile, outfile = args
with open(outfile, 'w', encoding='utf-8') as outf:
outf.write(
read(infile) % {'version': read_version('yt_dlp/version.py')})
if __name__ == '__main__':
main()
|
# Generated by Django 2.0.8 on 2018-10-04 09:54
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [("evaluation", "0008_config_submission_join_key")]
operations = [
migrations.AddField(
model_name="config",
name="publication_url_choice",
field=models.CharField(
choices=[
("off", "Off"),
("opt", "Optional"),
("req", "Required"),
],
default="off",
help_text="Show a publication url field on the submission page so that users can submit a link to a publication that corresponds to their submission. Off turns this feature off, Optional means that including the url is optional for the user, Required means that the user must provide an url.",
max_length=3,
),
),
migrations.AddField(
model_name="config",
name="score_decimal_places",
field=models.PositiveSmallIntegerField(
default=4,
help_text="The number of decimal places to display for the score",
),
),
migrations.AddField(
model_name="config",
name="show_publication_url",
field=models.BooleanField(
default=False,
help_text="Show a link to the publication on the results page",
),
),
migrations.AddField(
model_name="config",
name="supplementary_file_choice",
field=models.CharField(
choices=[
("off", "Off"),
("opt", "Optional"),
("req", "Required"),
],
default="off",
help_text="Show a supplementary file field on the submissions page so that users can upload an additional file along with their predictions file as part of their submission (eg, include a pdf description of their method). Off turns this feature off, Optional means that including the file is optional for the user, Required means that the user must upload a supplementary file.",
max_length=3,
),
),
migrations.AddField(
model_name="submission",
name="publication_url",
field=models.URLField(
blank=True,
help_text="A URL for the publication associated with this submission.",
),
),
]
|
# -*- coding: UTF-8 -*-
#!/usr/bin/python3
# Variables needed to initialize MyCobot Pi
from pymycobot import PI_PORT, PI_BAUD
from pymycobot.mycobot import MyCobot
import time
def gripper_test(mc):
print("Start check IO part of api\n")
# Check if the gripper is moving
flag = mc.is_gripper_moving()
print("Is gripper moving: {}".format(flag))
time.sleep(1)
# Set the current position to (2048).
# Use it when you are sure you need it.
# Gripper has been initialized for a long time. Generally, there
# is no need to change the method.
# mc.set_gripper_ini()
# Set joint point 1 and let it rotate to the position of 2048
mc.set_encoder(1, 2048)
time.sleep(2)
# Set up six joint positions and let the robotic arm rotate to this position
# at a speed of 20
mc.set_encoders([1024, 1024, 1024, 1024, 1024, 1024], 20)
time.sleep(3)
# Get the position information of Joint-1
print(mc.get_encoder(1))
# Set the gripper to rotate to 2048 position
mc.set_encoder(7, 2048)
time.sleep(3)
# Set the gripper to turn it to the 1300 position
mc.set_encoder(7, 1300)
time.sleep(3)
# Let the gripper reach the 2048 state at a speed of 70
mc.set_gripper_value(2048, 70)
time.sleep(3)
# Let the gripper reach the 1500 state at a speed of 70
mc.set_gripper_value(1500, 70)
time.sleep(3)
# Set the state of the gripper and let it quickly open the gripper at a speed
# of 70
mc.set_gripper_state(0, 70)
time.sleep(3)
# Set the state of the gripper to quickly close the gripper at a speed of 70
mc.set_gripper_state(1, 70)
time.sleep(3)
# Get the value of the gripper
print("")
print(mc.get_gripper_value())
if __name__ == "__main__":
# Initialize MyCobotPi
mc = MyCobot(PI_PORT, PI_BAUD)
# Move it to the zero position
mc.set_encoders([2048, 2048, 2048, 2048, 2048, 2048], 20)
time.sleep(3)
gripper_test(mc)
|
# -*- coding: utf-8 -*-
"""
TencentBlueKing is pleased to support the open source community by making 蓝鲸智云-节点管理(BlueKing-BK-NODEMAN) available.
Copyright (C) 2017-2021 THL A29 Limited, a Tencent company. All rights reserved.
Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License.
You may obtain a copy of the License at https://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
"""
from __future__ import absolute_import, unicode_literals
import logging
import traceback
from collections import OrderedDict
from typing import Dict, List
from apps.backend.subscription.errors import PluginValidationError
from apps.backend.subscription.steps import AgentStep, StepFactory
from apps.node_man import models
from apps.node_man.models import PipelineTree, SubscriptionInstanceRecord
from pipeline import builder
"""
TODO 此文件是为了兼容当前AGENT任务流程,后续把AGENT任务流程优化为多实例后(与插件流程一致)可废弃此文件
"""
logger = logging.getLogger("app")
def build_instance_task(
record: SubscriptionInstanceRecord, step_actions: Dict[str, str], step_managers: Dict[str, AgentStep]
):
"""
按实例执行任务
:param SubscriptionInstanceRecord record: InstanceRecord
:param step_actions: 步骤动作
{
"step_id_x": "INSTALL",
"step_id_y": "UNINSTALL,
}
:param step_managers: dict 步骤管理器
{
"agent": AgentStep
}
构造形如以下的pipeline
StartEvent
|
ParallelGateway
|
--------------------
| |
register_cmdb register_cmdb
| |
install_agent install_agent
| |
....... .......
| |
--------------------
|
ConvergeGateway
|
EndEvent
"""
instance_start = builder.EmptyStartEvent()
instance_end = builder.EmptyEndEvent()
current_node = instance_start
for step_id in step_managers:
if step_id not in step_actions:
continue
step_manager = step_managers[step_id]
action_name = step_actions[step_manager.step_id]
action_manager = step_manager.create_action(action_name, record)
# 执行action&更新状态
sub_processes = [action_manager.execute(record)]
# 根据主机数量,生成并行网关
step_name = "[{}] {}".format(step_id, action_manager.ACTION_DESCRIPTION)
step_start = builder.EmptyStartEvent()
step_end = builder.EmptyEndEvent()
pg = builder.ParallelGateway()
cg = builder.ConvergeGateway()
step_start.extend(pg).connect(*sub_processes).to(pg).converge(cg).extend(step_end)
step_pipeline = builder.SubProcess(start=step_start, name=step_name)
action_manager.set_pipeline_id(step_pipeline.id)
current_node = current_node.extend(step_pipeline)
current_node.extend(instance_end)
return instance_start
def build_task(
subscription_task: models.SubscriptionTask,
instances_action: Dict[str, Dict[str, str]],
instance_records: List[models.SubscriptionInstanceRecord],
):
"""
批量执行实例的步骤的动作
:param subscription_task: 订阅任务
:param instances_action: {
"instance_id_xxx": {
"step_id_x": "INSTALL",
"step_id_y": "UNINSTALL,
}
}
:param instance_records 订阅实例记录
"""
subscription = subscription_task.subscription
instance_records_dict = {record.instance_id: record for record in instance_records}
step_managers = OrderedDict()
step_data = []
for step in subscription.steps:
step_managers[step.step_id] = StepFactory.get_step_manager(step)
step_data.append({"id": step.step_id, "type": step.type, "pipeline_id": "", "action": None, "extra_info": {}})
to_be_saved_records = []
to_be_saved_pipelines = []
to_be_displayed_errors = []
# 对每个实例执行指定动作
for instance_id, step_actions in instances_action.items():
record = instance_records_dict[instance_id]
try:
record.steps = step_data
instance_task = build_instance_task(record, step_actions, step_managers)
except PluginValidationError as err:
# 插件操作系统不支持,忽略该实例
logger.error(str(err))
logger.error(traceback.format_exc())
to_be_displayed_errors.append(str(err))
continue
# 构建 Pipeline 拓扑
pipeline_tree = builder.build_tree(instance_task)
pipeline_id = pipeline_tree["id"]
record.pipeline_id = pipeline_id
to_be_saved_records.append(record)
to_be_saved_pipelines.append(
PipelineTree(
id=pipeline_id,
tree=pipeline_tree,
)
)
return to_be_saved_records, to_be_saved_pipelines, to_be_displayed_errors
|
"""This package implements a handler for the Python language."""
from mkdocstrings_handlers.python.handler import get_handler
__all__ = ["get_handler"] # noqa: WPS410
|
# -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making BK-BASE 蓝鲸基础平台 available.
Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved.
BK-BASE 蓝鲸基础平台 is licensed under the MIT License.
License for BK-BASE 蓝鲸基础平台:
--------------------------------------------------------------------
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial
portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT
LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN
NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
"""
后台功能/任务入口
"""
import os
import sys
from enum import Enum
from metadata_biz.analyse.similarity import (
AnalyseMode,
CachedResultTableSimilarityAnalyseFlow,
CompareContext,
FieldAliasSuggestionAnalyseFlow,
FieldsSuggestionAnalyseFlow,
ResultTableLineageSimilarityAnalyseFlow,
ResultTableSimilarityAnalyseFlow,
)
from metadata_pro.service.async_agent import huey
@huey.task()
def work_with_inspiration():
return 'Not just work, work with inspiration.'
class CompareMode(Enum):
MATRIX = 'matrix'
TWO_WAY = 'two_way'
@huey.task()
def result_table_similarity(
result_table_ids,
reference_result_table_ids=None,
reference_max_n=None,
compare_mode=CompareMode.MATRIX,
analyse_mode=AnalyseMode.UP_TO_DATE,
):
analyse_mode, compare_mode = AnalyseMode(analyse_mode), CompareMode(compare_mode)
af = (
ResultTableSimilarityAnalyseFlow()
if analyse_mode is AnalyseMode.UP_TO_DATE
else CachedResultTableSimilarityAnalyseFlow()
)
target = list({item for item in result_table_ids})
reference = list({item for item in reference_result_table_ids}) if compare_mode is CompareMode.TWO_WAY else target
af.input = CompareContext(target, reference)
ret = af.execute(reference_max_n=reference_max_n)
if not ret.get('result_table_ids', None):
ret['result_table_ids'] = target
if not ret.get('reference_result_table_ids', None):
ret['reference_result_table_ids'] = reference
return ret
@huey.task()
def suggest_fields(fields):
f = FieldsSuggestionAnalyseFlow()
ret = f.execute(fields)
return ret
@huey.task()
def suggest_field_alias(fields):
f = FieldAliasSuggestionAnalyseFlow()
ret = f.execute(fields)
return ret
@huey.task()
def result_table_lineage_similarity(result_table_ids, reference_result_table_ids):
af = ResultTableLineageSimilarityAnalyseFlow()
af.input = CompareContext(result_table_ids, reference_result_table_ids)
ret = af.execute()
return ret
|
#
# Copyright 2015 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
import os
import pandas
import pickle
import pytz
from nose_parameterized import parameterized
from unittest import TestCase
from zipline.finance.blotter import Blotter, Order
from zipline.finance.commission import PerShare, PerTrade, PerDollar
from zipline.finance.performance.period import PerformancePeriod
from zipline.finance.performance.position import Position
from zipline.finance.performance.tracker import PerformanceTracker
from zipline.finance.risk.cumulative import RiskMetricsCumulative
from zipline.finance.risk.period import RiskMetricsPeriod
from zipline.finance.risk.report import RiskReport
from zipline.finance.slippage import (
FixedSlippage,
Transaction,
VolumeShareSlippage
)
from zipline.protocol import Account
from zipline.protocol import Portfolio
from zipline.protocol import Position as ProtocolPosition
from zipline.finance.trading import SimulationParameters
from zipline.utils import factory
sim_params_daily = SimulationParameters(
datetime.datetime(2013, 6, 19, tzinfo=pytz.UTC),
datetime.datetime(2013, 6, 19, tzinfo=pytz.UTC),
10000,
emission_rate='daily')
sim_params_minute = SimulationParameters(
datetime.datetime(2013, 6, 19, tzinfo=pytz.UTC),
datetime.datetime(2013, 6, 19, tzinfo=pytz.UTC),
10000,
emission_rate='minute')
returns = factory.create_returns_from_list(
[1.0], sim_params_daily)
base_state_dir = 'tests/resources/saved_state_archive'
BASE_STATE_DIR = os.path.join(
os.path.dirname(__file__),
'resources',
'saved_state_archive')
class VersioningTestCase(TestCase):
def load_state_from_disk(self, cls):
state_dir = cls.__module__ + '.' + cls.__name__
full_dir = BASE_STATE_DIR + '/' + state_dir
state_files = \
[f for f in os.listdir(full_dir) if 'State_Version_' in f]
for f_name in state_files:
f = open(full_dir + '/' + f_name, 'r')
yield pickle.load(f)
# Only test versioning in minutely mode right now
@parameterized.expand([
(Blotter, (), 'repr'),
(Order, (datetime.datetime(2013, 6, 19), 8554, 100), 'dict'),
(PerShare, (), 'dict'),
(PerTrade, (), 'dict'),
(PerDollar, (), 'dict'),
(PerformancePeriod, (10000,), 'to_dict'),
(Position, (8554,), 'dict'),
(PerformanceTracker, (sim_params_minute,), 'to_dict'),
(RiskMetricsCumulative, (sim_params_minute,), 'to_dict'),
(RiskMetricsPeriod,
(returns.index[0], returns.index[0], returns), 'to_dict'),
(RiskReport, (returns, sim_params_minute), 'to_dict'),
(FixedSlippage, (), 'dict'),
(Transaction,
(8554, 10, datetime.datetime(2013, 6, 19), 100, "0000"), 'dict'),
(VolumeShareSlippage, (), 'dict'),
(Account, (), 'dict'),
(Portfolio, (), 'dict'),
(ProtocolPosition, (8554,), 'dict')
])
def test_object_serialization(self,
cls,
initargs,
comparison_method='dict'):
# The state generated under one version of pandas may not be
# compatible with another. To ensure that tests pass under the travis
# pandas version matrix, we only run versioning tests under the
# current version of pandas. This will need to be updated once we
# change the pandas version on prod.
if pandas.__version__ != '0.12.0':
return
# Make reference object
obj = cls(*initargs)
# Fetch state
state_versions = self.load_state_from_disk(cls)
for version in state_versions:
# For each version inflate a new object and ensure that it
# matches the original.
newargs = version['newargs']
initargs = version['initargs']
state = version['obj_state']
if newargs is not None:
obj2 = cls.__new__(cls, *newargs)
else:
obj2 = cls.__new__(cls)
if initargs is not None:
obj2.__init__(*initargs)
obj2.__setstate__(state)
# The ObjectId generated on instantiation of Order will
# not be the same as the one loaded from saved state.
if cls == Order:
obj.__dict__['id'] = obj2.__dict__['id']
if comparison_method == 'repr':
self.assertEqual(obj.__repr__(), obj2.__repr__())
elif comparison_method == 'to_dict':
self.assertEqual(obj.to_dict(), obj2.to_dict())
else:
self.assertEqual(obj.__dict__, obj2.__dict__)
|
# -*- coding: utf-8 -*-
# flake8: noqa: E501
from __future__ import unicode_literals
from kinopoisk.person import Person
from .base import BaseTest
class PersonTest(BaseTest):
def test_person_manager_with_one_result(self):
persons = Person.objects.search('Гуальтиеро Якопетти')
self.assertEqual(len(persons), 1)
p = persons[0]
self.assertEqual(p.id, 351549)
self.assertEqual(p.name, 'Гуалтьеро Якопетти')
self.assertEqual(p.year_birth, 1919)
self.assertEqual(p.name_en, 'Gualtiero Jacopetti')
def test_person_manager_with_many_results(self):
persons = Person.objects.search('malkovich')
self.assertGreater(len(persons), 1)
p = persons[0]
self.assertEqual(p.id, 24508)
self.assertEqual(p.name, 'Джон Малкович')
self.assertEqual(p.year_birth, 1953)
self.assertEqual(p.name_en, 'John Malkovich')
p = persons[4]
self.assertEqual(p.name, 'Др. Марк Малкович III')
self.assertEqual(p.year_birth, 1930)
self.assertEqual(p.year_death, 2010)
def test_person_main_page_source(self):
p = Person(id=6245)
p.get_content('main_page')
self.assertEqual(p.id, 6245)
self.assertEqual(p.name, 'Джонни Депп')
self.assertEqual(p.year_birth, 1963)
self.assertEqual(p.name_en, 'Johnny Depp')
self.assertGreater(len(p.information), 50)
# career
self.assertGreaterEqual(len(p.career['actor']), 86)
self.assertGreaterEqual(len(p.career['producer']), 7)
self.assertGreaterEqual(len(p.career['director']), 3)
self.assertGreaterEqual(len(p.career['writer']), 1)
self.assertGreaterEqual(len(p.career['hrono_titr_male']), 11)
self.assertGreaterEqual(len(p.career['himself']), 124)
self.assertEqual(p.career['actor'][0].movie.id, 420454)
self.assertEqual(p.career['actor'][0].movie.title, 'Человек-невидимка')
self.assertEqual(p.career['actor'][0].movie.title_en, 'The Invisible Man')
self.assertEqual(p.career['actor'][0].name, 'Dr. Griffin')
self.assertEqual(p.career['actor'][1].movie.title, 'Ричард прощается')
self.assertEqual(p.career['actor'][1].movie.year, 2018)
self.assertEqual(p.career['actor'][1].movie.title_en, 'Richard Says Goodbye')
self.assertEqual(p.career['actor'][4].movie.title, 'Шерлок Гномс')
self.assertEqual(p.career['actor'][4].movie.title_en, 'Sherlock Gnomes')
self.assertEqual(p.career['actor'][4].movie.year, 2018)
self.assertEqual(p.career['actor'][4].name, 'Sherlock Gnomes') # voice
self.assertEqual(p.career['actor'][5].movie.title_en, 'Murder on the Orient Express')
self.assertAlmostEqual(p.career['actor'][5].movie.rating, 6.68)
self.assertGreaterEqual(p.career['actor'][5].movie.votes, 64162)
self.assertAlmostEqual(p.career['actor'][5].movie.imdb_rating, 6.6)
self.assertGreaterEqual(p.career['actor'][5].movie.imdb_votes, 70581)
self.assertEqual(p.career['actor'][6].name, 'Abel') # short
# series
self.assertEqual(p.career['actor'][22].name, 'Johnny Depp')
self.assertEqual(p.career['actor'][22].movie.title, 'Жизнь так коротка')
self.assertEqual(p.career['actor'][22].movie.title_en, 'Life\'s Too Short')
self.assertEqual(p.career['actor'][22].movie.year, None)
self.assertEqual(p.career['actor'][22].movie.series, True)
self.assertEqual(p.career['actor'][22].movie.series_years, (2011, 2013))
# top + budget
self.assertEqual(p.career['actor'][34].name, 'Jack Sparrow')
self.assertEqual(p.career['actor'][34].movie.title, 'Пираты Карибского моря: Сундук мертвеца')
self.assertEqual(p.career['actor'][34].movie.title_en, 'Pirates of the Caribbean: Dead Man\'s Chest')
self.assertEqual(p.career['actor'][34].movie.year, 2006)
# voice and short
self.assertEqual(p.career['actor'][35].name, 'Narration')
self.assertEqual(p.career['actor'][35].movie.genres, ['короткометражка'])
self.assertEqual(p.career['actor'][35].voice, True)
# endless series
self.assertEqual(p.career['actor'][55].name, 'Jack Kahuna Laguna')
self.assertEqual(p.career['actor'][55].movie.title, 'Губка Боб квадратные штаны')
self.assertEqual(p.career['actor'][55].movie.title_en, 'SpongeBob SquarePants')
self.assertEqual(p.career['actor'][55].movie.year, None)
self.assertEqual(p.career['actor'][55].movie.series, True)
self.assertEqual(p.career['actor'][55].movie.series_years, (1999,))
# short, no russian title
self.assertEqual(p.career['actor'][82].name, 'Pete')
self.assertEqual(p.career['actor'][82].movie.title, '')
self.assertEqual(p.career['actor'][82].movie.title_en, 'Dummies')
self.assertEqual(p.career['actor'][82].movie.year, 1985)
self.assertEqual(p.career['actor'][82].movie.genres, ['короткометражка'])
self.assertEqual(p.career['actor'][82].movie.rating, None)
self.assertEqual(p.career['actor'][82].movie.votes, None)
def test_person_cast_special_case(self):
p = Person(id=9843)
p.get_content('main_page')
# ... in movie title
self.assertEqual(p.career['actor'][137].name, None)
self.assertEqual(p.career['actor'][137].movie.title, 'Тарзан и Джейн возвращены... как будто')
self.assertEqual(p.career['actor'][137].movie.title_en, 'Tarzan and Jane Regained... Sort of')
self.assertEqual(p.career['actor'][137].movie.year, 1964)
def test_person_photos_page_source(self):
p = Person(id=8217)
p.get_content('photos')
self.assertGreaterEqual(len(p.photos), 11)
def test_person_repr(self):
instance = Person(name='Чарльз Чаплин', name_en='Charles Chaplin', year_birth='1950')
self.assertEqual(instance.__repr__(), 'Чарльз Чаплин (Charles Chaplin), 1950')
|
from libs.db import db
class User(db.Model):
'''用户表'''
__tablename__ = 'user'
id = db.Column(db.Integer, primary_key=True)
nickname = db.Column(db.String(20), unique=True, nullable=False, index=True)
password = db.Column(db.String(128), nullable=True)
gender = db.Column(db.String(10), default='unknow')
bio = db.Column(db.String(200))
city = db.Column(db.String(16), default='上海')
avatar = db.Column(db.String(128))
birthday = db.Column(db.Date, default='1990-01-01')
created = db.Column(db.DateTime)
class Follow(db.Model):
'''关注表'''
__tablename__ = 'follow'
uid = db.Column(db.Integer, primary_key=True)
fid = db.Column(db.Integer, primary_key=True)
|
"""
Copyright 2020 MPI-SWS
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from storm.fuzzer.fuzzer import generate_mutants
from storm.fuzzer.helper_functions import insert_pushes_pops, add_check_sat_using
from storm.runner.solver_runner import solver_runner
from storm.smt.smt_object import smtObject
from storm.utils.file_operations import get_mutant_paths
import os
from termcolor import colored
import shutil
import time
from storm.utils.max_depth import get_max_depth
from storm.utils.randomness import Randomness
class minimize(object):
def __init__(self, dir_path, file_path, solverbin, maxDepth, maxAssert, SEED, parsedArguments, iteration):
"""
filepath is a path to a dir containing the config file and the buggy mutant
"""
print(colored("MINIMIZATION MODE", "blue", attrs=["bold"]))
self.iteration = iteration
self.stats_file = os.path.join(dir_path, "min_stats.csv")
self.dir_path = dir_path
self.temp_dir = os.path.join(dir_path, "temp", str(iteration))
if os.path.exists(self.temp_dir):
shutil.rmtree(self.temp_dir)
os.mkdir(self.temp_dir)
self.orig_file_path = file_path
self.solverbin = solverbin
self.maxDepth = maxDepth
self.maxAssert = maxAssert
self.seed = SEED
# copy the original mutant and create a file called "minimized.smt2"
shutil.copy2(self.orig_file_path, os.path.join(dir_path, "minimized_"+ str(iteration) + ".smt2"))
self.minimized_file_path = os.path.join(dir_path, "minimized_"+ str(iteration) + ".smt2")
self.incremental = parsedArguments["incremental"]
self.check_sat_using = parsedArguments["check_sat_using"]
self.theory = parsedArguments["theory"]
self.solver = parsedArguments["solver"]
self.fuzzing_parameters = {
"enrichment_steps": 1000,
"number_of_mutants": 1000,
"mutant_generation_timeout": 1000,
"mutant_running_timeout": 1800,
"solver_timeout": 60
}
self.randomness = Randomness(self.seed)
print(colored("Incremental Mode = ", "white", "on_blue", attrs=["bold"]) + colored(str(self.incremental), "yellow", attrs=["bold"]))
print(colored("check_sat_using = ", "white", "on_blue", attrs=["bold"]) + colored(str(self.check_sat_using), "yellow", attrs=["bold"]))
print(colored("Theory = ", "white", "on_blue", attrs=["bold"]) + colored(str(self.theory), "yellow", attrs=["bold"]))
print(colored("Solver = ", "white", "on_blue", attrs=["bold"]) + colored(str(self.solver), "yellow", attrs=["bold"]))
self.min_depth = None
self.min_assert = self.count_asserts(self.minimized_file_path)
self.number_of_queries = 0
start_time = time.time()
self.minimizeDepth(0, 64)
print("Minimum Depth = " + str(self.min_depth))
self.minimizeAssertions(4, self.min_assert)
self.minimizeLines(self.min_depth, self.min_assert)
shutil.rmtree(self.temp_dir)
file = open(self.stats_file, "a+")
self.min_lines = self.get_number_of_lines(self.minimized_file_path)
self.min_bytes = os.path.getsize(self.minimized_file_path)
max_depth = get_max_depth(self.minimized_file_path)
end_time = time.time()
file.writelines("\n" + str(iteration) + "," + str(SEED) + "," + str(max_depth) + "," + str(self.min_assert) +
"," + str(self.min_lines) + "," + str(self.min_bytes) + "," + str(self.number_of_queries) + "," +
str(end_time - start_time))
file.close()
def get_number_of_lines(self, file_path):
with open(file_path, 'r') as f:
lines = f.read().splitlines()
return len(lines)
def count_asserts(self, file_path):
number_of_assertions = 0
with open(file_path, 'r') as f:
lines = f.read().splitlines()
for line in lines:
if line.find("(assert") != -1:
number_of_assertions += 1
return number_of_assertions
def generate_and_run_mutants(self, maxDepth, maxAssert):
smt_Object = smtObject(file_path=self.minimized_file_path, path_to_mutant_folder=self.temp_dir)
smt_Object.check_satisfiability(timeout=120)
signal = generate_mutants(smt_Object=smt_Object, path_to_directory=self.temp_dir, maxDepth=maxDepth, maxAssert=maxAssert,
seed=self.seed, theory=self.theory, fuzzing_parameters=self.fuzzing_parameters)
mutant_file_paths = get_mutant_paths(self.temp_dir)
if self.incremental:
mutant_file_paths.sort()
insert_pushes_pops(mutant_file_paths, self.randomness)
for mutant_file_path in mutant_file_paths:
if self.check_sat_using is not None:
add_check_sat_using(mutant_file_path, self.check_sat_using)
# run mutants
unsat_mutants = []
running_start = time.time()
print(colored("Running Mutants.... ", "green", attrs=["bold"]))
for mutant in mutant_file_paths:
output = solver_runner(solver_path=self.solverbin, smt_file=mutant, temp_core_folder=self.temp_dir,
timeout=self.fuzzing_parameters["solver_timeout"], incremental="yes" if self.incremental else "no",
solver=self.solver)
self.number_of_queries += 1
if output == "unsat":
unsat_mutants.append(mutant)
# stop running mutants after 30 mins
current_time = time.time()
if (int(current_time - running_start) > self.fuzzing_parameters["mutant_running_timeout"]):
print(colored(">>> TIMEOUT WHILE RUNNING THE MUTANTS <<<< ", "red", attrs=["bold"]))
break
if len(unsat_mutants) > 0:
min_lines = self.get_number_of_lines(unsat_mutants[0])
min_mutant = unsat_mutants[0]
for mutant in unsat_mutants[1:]:
if self.get_number_of_lines(mutant) < min_lines:
min_lines = self.get_number_of_lines(mutant)
min_mutant = mutant
print(colored("Found a failing example", "green", attrs=["bold"]))
return True, min_mutant
print(colored("Could not find a failing example with these parameters", "red", attrs=["bold"]))
return False, ""
def minimizeLines(self, maxDepth, maxAssert):
minimum_number_of_lines = self.get_number_of_lines(self.minimized_file_path)
for i in range(10):
print(colored("min LINE NUMBERS = " + str(self.get_number_of_lines(self.minimized_file_path)), "magenta", attrs=["bold"]))
success_flag, mutant = self.generate_and_run_mutants(maxDepth, maxAssert)
if success_flag and minimum_number_of_lines > self.get_number_of_lines(mutant):
print(colored("new min LINE NUMBERS = " + str(self.get_number_of_lines(mutant)), "green", attrs=["bold"]))
minimum_number_of_lines = self.get_number_of_lines(mutant)
shutil.copy2(mutant, os.path.join(self.dir_path, "minimized_"+ str(self.iteration) + ".smt2"))
def minimizeDepth(self, minDepth, maxDepth):
print(colored("DEPTH BOUNDS: minDepth= " + str(minDepth) + ", maxDepth= " + str(maxDepth) + " maxAssert= " + str(self.min_assert), "white", "on_blue", attrs=["bold"]))
if minDepth == maxDepth - 1:
print(colored(" ", "white", "on_red", attrs=["bold"]))
print(colored("MIN DEPTH = " + str(maxDepth) + " ", "white", "on_red", attrs=["bold"]))
print(colored(" ", "white", "on_red", attrs=["bold"]))
self.min_depth = maxDepth
return maxDepth
midpoint = int((minDepth + maxDepth)/2)
success_flag, mutant = self.generate_and_run_mutants(midpoint, self.min_assert)
if success_flag:
shutil.copy2(mutant, os.path.join(self.dir_path, "minimized_"+ str(self.iteration) + ".smt2"))
self.minimizeDepth(minDepth, midpoint)
else:
self.minimizeDepth(midpoint, maxDepth)
def minimizeAssertions(self, minAssert, maxAssert):
print(colored("ASSERT BOUNDS: maxDepth=" + str(self.min_depth) + ", minAssert= " + str(minAssert) + ", maxAssert= " + str(maxAssert), "white", "on_blue", attrs=["bold"]))
if minAssert == maxAssert - 1:
self.min_assert = maxAssert
return maxAssert
midpoint = int((minAssert + maxAssert) / 2)
success_flag, mutant = self.generate_and_run_mutants(self.min_depth, midpoint)
if success_flag:
shutil.copy2(mutant, os.path.join(self.dir_path, "minimized_"+ str(self.iteration) + ".smt2"))
min_assert = self.minimizeAssertions(minAssert, midpoint)
else:
min_assert =self.minimizeAssertions(midpoint, maxAssert)
return min_assert |
import pandas as pd
from .dataframe_bytes_storage import df_from_bytes, df_to_bytes
from .numpy_bytes_storage import np_from_bytes, np_to_bytes
try:
import rpy2.robjects as robjects
from rpy2.robjects import pandas2ri
from rpy2.robjects.conversion import localconverter
def r_to_py(object_):
if isinstance(object_, robjects.DataFrame):
with localconverter(pandas2ri.converter):
py_object_ = robjects.conversion.rpy2py(object_)
return py_object_
return object_
except ImportError: # in Python 3.6 ModuleNotFoundError can be used
def r_to_py(object_):
return object_
def maybe_to_df(object_):
"""
Convert pd.Series and robjects.DataFrame to pd.DataFrame.
"""
if isinstance(object_, pd.Series):
object_ = object_.to_frame()
object_ = r_to_py(object_)
return object_
def to_bytes(object_):
object_ = maybe_to_df(object_)
if isinstance(object_, pd.DataFrame):
return df_to_bytes(object_)
return np_to_bytes(object_)
def from_bytes(bytes_):
if bytes_[:6] == b"\x93NUMPY":
return np_from_bytes(bytes_)
return df_from_bytes(bytes_)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
###########################################################
# WARNING: Generated code! #
# ************************** #
# Manual changes may get lost if file is generated again. #
# Only code inside the [MANUAL] tags will be kept. #
###########################################################
from flexbe_core import Behavior, Autonomy, OperatableStateMachine, ConcurrencyContainer, PriorityContainer, Logger
from robotender_kinova_flexbe_states.finger_position_state import FingerPositionState
from robotender_kinova_flexbe_states.feedback_joint_state_to_moveit import FeedbackJointStateToMoveit
# Additional imports can be added inside the following tags
# [MANUAL_IMPORT]
# [/MANUAL_IMPORT]
'''
Created on Wed Nov 01 2017
@author: Davis Catherman, Shannon Enders
'''
class multiplecupspourbehaviorusingcontainersfastmodeSM(Behavior):
'''
Pour to left cup from center, then pour to center cup from left cup. (loop possibilities)
'''
def __init__(self):
super(multiplecupspourbehaviorusingcontainersfastmodeSM, self).__init__()
self.name = 'multiple cups pour behavior using containers fast mode'
# parameters of this behavior
# references to used behaviors
# Additional initialization code can be added inside the following tags
# [MANUAL_INIT]
# [/MANUAL_INIT]
# Behavior comments:
def create(self):
# x:958 y:78, x:440 y:324
_state_machine = OperatableStateMachine(outcomes=['finished', 'failed'])
_state_machine.userdata.joint_names = ["m1n6s200_joint_1", "m1n6s200_joint_2", "m1n6s200_joint_3", "m1n6s200_joint_4", "m1n6s200_joint_5", "m1n6s200_joint_6"]
_state_machine.userdata.center_values = [4.825370393837993, 4.804768712277358, 1.7884682005958692, 2.781744729201632, 1.7624776125694588, 2.5668808924540394]
_state_machine.userdata.prep_pour_to_left = [4.8484381625680415, 4.322889801498073, 1.372345285529353, 3.0126762157540004, 1.4690217615247554, 2.627620406383804]
_state_machine.userdata.pour_to_left = [4.566518592733344, 4.3267512703163105, 1.393352300207898, -3.4085460570465727, 7.739404454396004, 4.906765118866303]
_state_machine.userdata.post_pour_to_left = [4.8484381625680415, 4.322889801498073, 1.372345285529353, 3.0126762157540004, 1.4690217615247554, 2.627620406383804]
_state_machine.userdata.left_values = [4.501794723496712, 4.784133474886988, 1.6909002314255626, 2.766800400744653, 1.8037183931040444, 2.543646143523643]
_state_machine.userdata.prep_pour_to_center = [4.5690588912549435, 4.3365780179046835, 1.371823705429861, 2.7555946178259263, 1.6906042210704002, 2.5960829864389763]
_state_machine.userdata.pour_to_center = [4.704875670317358, 4.372941136262645, 1.5029825249035005, -3.5267722999506783, 7.63555022663062, 0.3984061360462231]
_state_machine.userdata.post_pour_to_center = [4.5690588912549435, 4.3365780179046835, 1.371823705429861, 2.7555946178259263, 1.6906042210704002, 2.5960829864389763]
_state_machine.userdata.OPEN = [0,0]
_state_machine.userdata.CLOSE = [6400,6400]
_state_machine.userdata.pre_grab_left = [4.616985495390345, 4.361768642857545, 0.8309522662125534, 2.772490244413607, 1.7511775537481435, 2.6507113446153356]
_state_machine.userdata.back_off_center = [4.8380550301100405, 4.49428940291265, 1.2147491327564424, 2.784340512316133, 1.7494544885228622, 2.530367888644617]
_state_machine.userdata.mid_pour_center = [4.595038384847002, 4.374602948782854, 1.4727919986799805, -3.5220619669306554, 7.626154061672603, 1.4440939079313413]
_state_machine.userdata.mid_pour_left = [4.639588276194066, 4.306920307575145, 1.3567719184228966, -3.3707214464002866, 7.72652274420329, 4.057045223556825]
# Additional creation code can be added inside the following tags
# [MANUAL_CREATE]
# [/MANUAL_CREATE]
# x:617 y:525, x:724 y:512, x:799 y:501, x:1346 y:582
_sm_stuff_with_left_cup_0 = OperatableStateMachine(outcomes=['planning_failed', 'control_failed', 'failed', 'reached'], input_keys=['left_values', 'post_pour_to_center', 'pre_grab_left', 'joint_names', 'CLOSE', 'prep_pour_to_center', 'pour_to_center', 'OPEN', 'mid_pour_center'])
with _sm_stuff_with_left_cup_0:
# x:57 y:91
OperatableStateMachine.add('pregrableft',
FeedbackJointStateToMoveit(position_topic="/m1n6s200_driver/joint_states", move_group="arm", action_topic="/move_group", robot_name="m1n6s200"),
transitions={'reached': 'left', 'planning_failed': 'planning_failed', 'control_failed': 'control_failed'},
autonomy={'reached': Autonomy.Off, 'planning_failed': Autonomy.Off, 'control_failed': Autonomy.Off},
remapping={'joint_values': 'pre_grab_left', 'joint_names': 'joint_names', 'move_group': 'move_group', 'action_topic': 'action_topic'})
# x:1058 y:196
OperatableStateMachine.add('postppourcenter',
FeedbackJointStateToMoveit(position_topic="/m1n6s200_driver/joint_states", move_group="arm", action_topic="/move_group", robot_name="m1n6s200"),
transitions={'reached': 'left2', 'planning_failed': 'planning_failed', 'control_failed': 'control_failed'},
autonomy={'reached': Autonomy.High, 'planning_failed': Autonomy.Off, 'control_failed': Autonomy.Off},
remapping={'joint_values': 'post_pour_to_center', 'joint_names': 'joint_names', 'move_group': 'move_group', 'action_topic': 'action_topic'})
# x:1338 y:85
OperatableStateMachine.add('left2',
FeedbackJointStateToMoveit(position_topic="/m1n6s200_driver/joint_states", move_group="arm", action_topic="/move_group", robot_name="m1n6s200"),
transitions={'reached': 'releaseleft', 'planning_failed': 'planning_failed', 'control_failed': 'control_failed'},
autonomy={'reached': Autonomy.Low, 'planning_failed': Autonomy.Off, 'control_failed': Autonomy.Off},
remapping={'joint_values': 'left_values', 'joint_names': 'joint_names', 'move_group': 'move_group', 'action_topic': 'action_topic'})
# x:77 y:478
OperatableStateMachine.add('graspleft',
FingerPositionState(result_topic="/m1n6s200_driver/fingers_action/finger_positions/result", action_topic="/m1n6s200_driver/fingers_action/finger_positions", robot_name="m1n6s200"),
transitions={'reached': 'preppourcenter', 'failed': 'failed'},
autonomy={'reached': Autonomy.Low, 'failed': Autonomy.Off},
remapping={'finger_values': 'CLOSE'})
# x:377 y:194
OperatableStateMachine.add('preppourcenter',
FeedbackJointStateToMoveit(position_topic="/m1n6s200_driver/joint_states", move_group="arm", action_topic="/move_group", robot_name="m1n6s200"),
transitions={'reached': 'mid pour', 'planning_failed': 'planning_failed', 'control_failed': 'control_failed'},
autonomy={'reached': Autonomy.High, 'planning_failed': Autonomy.Off, 'control_failed': Autonomy.Off},
remapping={'joint_values': 'prep_pour_to_center', 'joint_names': 'joint_names', 'move_group': 'move_group', 'action_topic': 'action_topic'})
# x:832 y:195
OperatableStateMachine.add('pourcenter',
FeedbackJointStateToMoveit(position_topic="/m1n6s200_driver/joint_states", move_group="arm", action_topic="/move_group", robot_name="m1n6s200"),
transitions={'reached': 'postppourcenter', 'planning_failed': 'planning_failed', 'control_failed': 'control_failed'},
autonomy={'reached': Autonomy.High, 'planning_failed': Autonomy.Off, 'control_failed': Autonomy.Off},
remapping={'joint_values': 'pour_to_center', 'joint_names': 'joint_names', 'move_group': 'move_group', 'action_topic': 'action_topic'})
# x:1356 y:253
OperatableStateMachine.add('releaseleft',
FingerPositionState(result_topic="/m1n6s200_driver/fingers_action/finger_positions/result", action_topic="/m1n6s200_driver/fingers_action/finger_positions", robot_name="m1n6s200"),
transitions={'reached': 'backoffleft', 'failed': 'failed'},
autonomy={'reached': Autonomy.Off, 'failed': Autonomy.Off},
remapping={'finger_values': 'OPEN'})
# x:1348 y:408
OperatableStateMachine.add('backoffleft',
FeedbackJointStateToMoveit(position_topic="/m1n6s200_driver/joint_states", move_group="arm", action_topic="/move_group", robot_name="m1n6s200"),
transitions={'reached': 'reached', 'planning_failed': 'planning_failed', 'control_failed': 'control_failed'},
autonomy={'reached': Autonomy.Off, 'planning_failed': Autonomy.Off, 'control_failed': Autonomy.Off},
remapping={'joint_values': 'pre_grab_left', 'joint_names': 'joint_names', 'move_group': 'move_group', 'action_topic': 'action_topic'})
# x:76 y:267
OperatableStateMachine.add('left',
FeedbackJointStateToMoveit(position_topic="/m1n6s200_driver/joint_states", move_group="arm", action_topic="/move_group", robot_name="m1n6s200"),
transitions={'reached': 'graspleft', 'planning_failed': 'planning_failed', 'control_failed': 'control_failed'},
autonomy={'reached': Autonomy.Low, 'planning_failed': Autonomy.Off, 'control_failed': Autonomy.Off},
remapping={'joint_values': 'left_values', 'joint_names': 'joint_names', 'move_group': 'move_group', 'action_topic': 'action_topic'})
# x:605 y:173
OperatableStateMachine.add('mid pour',
FeedbackJointStateToMoveit(position_topic="/m1n6s200_driver/joint_states", move_group="arm", action_topic="/move_group", robot_name="m1n6s200"),
transitions={'reached': 'pourcenter', 'planning_failed': 'planning_failed', 'control_failed': 'control_failed'},
autonomy={'reached': Autonomy.Off, 'planning_failed': Autonomy.Off, 'control_failed': Autonomy.Off},
remapping={'joint_values': 'mid_pour_center', 'joint_names': 'joint_names', 'move_group': 'move_group', 'action_topic': 'action_topic'})
# x:941 y:608, x:928 y:554, x:986 y:505, x:1150 y:560
_sm_stuff_1 = OperatableStateMachine(outcomes=['failed', 'planning_failed', 'control_failed', 'reached'], input_keys=['OPEN', 'CLOSE', 'center_values', 'prep_pour_to_left', 'pour_to_left', 'post_pour_to_left', 'back_off_center', 'joint_names', 'mid_pour_left'], output_keys=['joint_names'])
with _sm_stuff_1:
# x:31 y:164
OperatableStateMachine.add('startopen',
FingerPositionState(result_topic="/m1n6s200_driver/fingers_action/finger_positions/result", action_topic="/m1n6s200_driver/fingers_action/finger_positions", robot_name="m1n6s200"),
transitions={'reached': 'precenter', 'failed': 'failed'},
autonomy={'reached': Autonomy.Low, 'failed': Autonomy.Off},
remapping={'finger_values': 'OPEN'})
# x:263 y:371
OperatableStateMachine.add('graspcenter',
FingerPositionState(result_topic="/m1n6s200_driver/fingers_action/finger_positions/result", action_topic="/m1n6s200_driver/fingers_action/finger_positions", robot_name="m1n6s200"),
transitions={'reached': 'preppourleft', 'failed': 'failed'},
autonomy={'reached': Autonomy.Low, 'failed': Autonomy.Off},
remapping={'finger_values': 'CLOSE'})
# x:270 y:272
OperatableStateMachine.add('startcenter',
FeedbackJointStateToMoveit(position_topic="/m1n6s200_driver/joint_states", move_group="arm", action_topic="/move_group", robot_name="m1n6s200"),
transitions={'reached': 'graspcenter', 'planning_failed': 'planning_failed', 'control_failed': 'control_failed'},
autonomy={'reached': Autonomy.Low, 'planning_failed': Autonomy.Off, 'control_failed': Autonomy.Off},
remapping={'joint_values': 'center_values', 'joint_names': 'joint_names', 'move_group': 'move_group', 'action_topic': 'action_topic'})
# x:566 y:100
OperatableStateMachine.add('preppourleft',
FeedbackJointStateToMoveit(position_topic="/m1n6s200_driver/joint_states", move_group="arm", action_topic="/move_group", robot_name="m1n6s200"),
transitions={'reached': 'midpourleft', 'planning_failed': 'planning_failed', 'control_failed': 'control_failed'},
autonomy={'reached': Autonomy.High, 'planning_failed': Autonomy.Off, 'control_failed': Autonomy.Off},
remapping={'joint_values': 'prep_pour_to_left', 'joint_names': 'joint_names', 'move_group': 'move_group', 'action_topic': 'action_topic'})
# x:1060 y:95
OperatableStateMachine.add('pourleft',
FeedbackJointStateToMoveit(position_topic="/m1n6s200_driver/joint_states", move_group="arm", action_topic="/move_group", robot_name="m1n6s200"),
transitions={'reached': 'postpourleft', 'planning_failed': 'planning_failed', 'control_failed': 'control_failed'},
autonomy={'reached': Autonomy.High, 'planning_failed': Autonomy.Off, 'control_failed': Autonomy.Off},
remapping={'joint_values': 'pour_to_left', 'joint_names': 'joint_names', 'move_group': 'move_group', 'action_topic': 'action_topic'})
# x:1315 y:96
OperatableStateMachine.add('postpourleft',
FeedbackJointStateToMoveit(position_topic="/m1n6s200_driver/joint_states", move_group="arm", action_topic="/move_group", robot_name="m1n6s200"),
transitions={'reached': 'endcenter', 'planning_failed': 'planning_failed', 'control_failed': 'control_failed'},
autonomy={'reached': Autonomy.High, 'planning_failed': Autonomy.Off, 'control_failed': Autonomy.Off},
remapping={'joint_values': 'post_pour_to_left', 'joint_names': 'joint_names', 'move_group': 'move_group', 'action_topic': 'action_topic'})
# x:1621 y:101
OperatableStateMachine.add('endcenter',
FeedbackJointStateToMoveit(position_topic="/m1n6s200_driver/joint_states", move_group="arm", action_topic="/move_group", robot_name="m1n6s200"),
transitions={'reached': 'releasecenter', 'planning_failed': 'planning_failed', 'control_failed': 'control_failed'},
autonomy={'reached': Autonomy.Low, 'planning_failed': Autonomy.Off, 'control_failed': Autonomy.Off},
remapping={'joint_values': 'center_values', 'joint_names': 'joint_names', 'move_group': 'move_group', 'action_topic': 'action_topic'})
# x:1613 y:419
OperatableStateMachine.add('backoffcenter',
FeedbackJointStateToMoveit(position_topic="/m1n6s200_driver/joint_states", move_group="arm", action_topic="/move_group", robot_name="m1n6s200"),
transitions={'reached': 'reached', 'planning_failed': 'planning_failed', 'control_failed': 'control_failed'},
autonomy={'reached': Autonomy.Off, 'planning_failed': Autonomy.Off, 'control_failed': Autonomy.Off},
remapping={'joint_values': 'back_off_center', 'joint_names': 'joint_names', 'move_group': 'move_group', 'action_topic': 'action_topic'})
# x:270 y:149
OperatableStateMachine.add('precenter',
FeedbackJointStateToMoveit(position_topic="/m1n6s200_driver/joint_states", move_group="arm", action_topic="/move_group", robot_name="m1n6s200"),
transitions={'reached': 'startcenter', 'planning_failed': 'planning_failed', 'control_failed': 'control_failed'},
autonomy={'reached': Autonomy.Off, 'planning_failed': Autonomy.Off, 'control_failed': Autonomy.Off},
remapping={'joint_values': 'back_off_center', 'joint_names': 'joint_names', 'move_group': 'move_group', 'action_topic': 'action_topic'})
# x:1625 y:262
OperatableStateMachine.add('releasecenter',
FingerPositionState(result_topic="/m1n6s200_driver/fingers_action/finger_positions/result", action_topic="/m1n6s200_driver/fingers_action/finger_positions", robot_name="m1n6s200"),
transitions={'reached': 'backoffcenter', 'failed': 'failed'},
autonomy={'reached': Autonomy.Off, 'failed': Autonomy.Off},
remapping={'finger_values': 'OPEN'})
# x:813 y:81
OperatableStateMachine.add('midpourleft',
FeedbackJointStateToMoveit(position_topic="/m1n6s200_driver/joint_states", move_group="arm", action_topic="/move_group", robot_name="m1n6s200"),
transitions={'reached': 'pourleft', 'planning_failed': 'planning_failed', 'control_failed': 'control_failed'},
autonomy={'reached': Autonomy.Off, 'planning_failed': Autonomy.Off, 'control_failed': Autonomy.Off},
remapping={'joint_values': 'mid_pour_left', 'joint_names': 'joint_names', 'move_group': 'move_group', 'action_topic': 'action_topic'})
with _state_machine:
# x:247 y:69
OperatableStateMachine.add('stuff',
_sm_stuff_1,
transitions={'failed': 'failed', 'planning_failed': 'failed', 'control_failed': 'failed', 'reached': 'Stuff with left cup'},
autonomy={'failed': Autonomy.Inherit, 'planning_failed': Autonomy.Inherit, 'control_failed': Autonomy.Inherit, 'reached': Autonomy.Inherit},
remapping={'OPEN': 'OPEN', 'CLOSE': 'CLOSE', 'center_values': 'center_values', 'prep_pour_to_left': 'prep_pour_to_left', 'pour_to_left': 'pour_to_left', 'post_pour_to_left': 'post_pour_to_left', 'back_off_center': 'back_off_center', 'joint_names': 'joint_names', 'mid_pour_left': 'mid_pour_left'})
# x:550 y:74
OperatableStateMachine.add('Stuff with left cup',
_sm_stuff_with_left_cup_0,
transitions={'planning_failed': 'failed', 'control_failed': 'failed', 'failed': 'failed', 'reached': 'stuff'},
autonomy={'planning_failed': Autonomy.Inherit, 'control_failed': Autonomy.Inherit, 'failed': Autonomy.Inherit, 'reached': Autonomy.Inherit},
remapping={'left_values': 'left_values', 'post_pour_to_center': 'post_pour_to_center', 'pre_grab_left': 'pre_grab_left', 'joint_names': 'joint_names', 'CLOSE': 'CLOSE', 'prep_pour_to_center': 'prep_pour_to_center', 'pour_to_center': 'pour_to_center', 'OPEN': 'OPEN', 'mid_pour_center': 'mid_pour_center'})
return _state_machine
# Private functions can be added inside the following tags
# [MANUAL_FUNC]
# [/MANUAL_FUNC]
|
from dataclasses import dataclass, field
from typing import Optional
@dataclass
class Test:
class Meta:
name = "test"
foo: Optional[int] = field(
default=None,
metadata={
"type": "Attribute",
}
)
bar: Optional[str] = field(
default=None,
metadata={
"type": "Attribute",
}
)
@dataclass
class T(Test):
pass
|
TESTING = True
BABEL_DEFAULT_LOCALE = 'en_US'
CSRF_ENABLED = False
WTF_CSRF_ENABLED = CSRF_ENABLED
LOGIN_DISABLED = False
SQLALCHEMY_DATABASE_URI = 'sqlite://'
|
import zhu
zhu.startgame
for i in range(10000):
zhu.showit |
class Executor:
def __init__(self, expression, cursor):
self.expression = expression
self.cursor = cursor
def execute(self, cursor):
sql, args = self.expression.to_sql()
cursor.execute(sql, args)
for row in cursor:
yield row
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
from __future__ import absolute_import, division, print_function, unicode_literals
import numpy as np
from numpy.testing import assert_allclose
from astropy.tests.helper import pytest, assert_quantity_allclose
from astropy.units import Quantity
from ...utils.testing import requires_dependency
from ..powerlaw import (
power_law_evaluate,
power_law_pivot_energy,
power_law_df_over_f,
power_law_flux,
power_law_energy_flux,
power_law_integral_flux,
power_law_g_from_f,
power_law_g_from_points,
power_law_I_from_points,
power_law_f_from_points,
power_law_f_with_err,
power_law_I_with_err,
power_law_compatibility,
)
@pytest.mark.xfail
def test_powerlaw():
e = 1
e1, e2 = 0.2, 1e42
f, f_err = 1, 0.1
g, g_err = 2, 0.1
I_unc, I_unc_err = power_law_I_with_err(e1, e2, e, f, f_err, g, g_err)
f_unc, f_unc_err = power_law_f_with_err(e1, e2, e, I_unc, I_unc_err, g, g_err)
# TODO: add asserts
def test_one():
"""Test one case"""
I = power_law_integral_flux(f=1, g=2)
assert_allclose(I, 1)
def test_powerlaw_energy_flux():
"""
Test energy flux computation for power law against numerical solution.
"""
e1 = Quantity(1, 'TeV')
e2 = Quantity(10, 'TeV')
e = Quantity(1, 'TeV')
g = 2.3
I = Quantity(1E-12, 'cm-2 s-1')
val = power_law_energy_flux(I=I, g=g, e=e, e1=e1, e2=e2)
ref = Quantity(2.1615219876151536e-12, 'TeV cm-2 s-1')
assert_quantity_allclose(val, ref)
# TODO: failing assert at the moment -> fix!
@pytest.mark.xfail
@requires_dependency('uncertainties')
def test_closure(g_error_mag=0):
"""This test passes for g_error_mag == 0,
but fails for g_error_mag != 0, because
I and g have correlated errors, but we
effectively throw away these correlations!
"""
# initialise random number generator
random_state = np.random.RandomState(seed=0)
npoints = 100
# Generate some random f values with errors
f_val = 10 ** (10 * random_state.uniform(size=npoints) - 5)
f_err = f_val * random_state.normal(1, 0.1, npoints)
# f = unumpy.uarray((f_val, f_err))
# Generate some random g values with errors
g_val = 5 * random_state.uniform(size=npoints)
g_err = g_val * random_state.normal(1, 0.1, npoints)
# g = unumpy.uarray((g_val, g_err))
I_val, I_err = power_law_I_with_err(f_val, f_err, g_val, g_err)
# I_val = unumpy.nominal_values(f)
# I_err = unumpy.std_devs(f)
f_val2, f_err2 = power_law_f_with_err(I_val, I_err, g_val, g_err)
assert_allclose(f_val, f_val2)
assert_allclose(f_err, f_err2)
def test_e_pivot():
"""Hard-coded example from fit example in survey/spectra.
"""
e0 = 1
f0 = 5.35510540e-11
d_gamma = 0.0318377
cov = 6.56889442e-14
e_pivot = power_law_pivot_energy(e0, f0, d_gamma, cov)
assert_allclose(e_pivot, 3.3540034240210987)
def test_compatibility():
"""
Run a test case with hardcoded numbers:
HESS J1912+101
1FGL 1913.7+1007c
We use the following symbols and units:
e = pivot energy (MeV)
f = flux density (cm^-2 s^-1 MeV^-1)
g = "gamma" = spectral index
"""
# Fermi power-law parameters
e_fermi = 1296.2734
f_fermi = 3.791907E-12
f_err_fermi = 5.6907235E-13
g_fermi = 2.3759267
g_err_fermi = 0.08453985
par_fermi = (e_fermi, f_fermi, f_err_fermi, g_fermi, g_err_fermi)
# HESS power-law parameters
e_hess = 1e6
f_hess = 3.5 * 1e-12 * 1e-6
f_err_hess = 0.6 * 1e-12 * 1e-6
g_hess = 2.2
g_err_hess = 0.2
par_hess = (e_hess, f_hess, f_err_hess, g_hess, g_err_hess)
g_match, sigma_low, sigma_high, sigma_comb = \
power_law_compatibility(par_fermi, par_hess)
@requires_dependency('scipy')
def test_SED_error(I=1., e1=1, e2=10):
"""Compute the error one makes by using the simple formulas:
e = sqrt(e1 * e2)
f = I / (e2 - e1)
e2f = e ** 2 * f
to compute a differential flux f or e2f from an integral flux
measurement I in an energy bin [e1, e2].
Note that e is the log bin center and e2f is typically plotted
in spectral energy distributions (SEDs).
Index SED-Error Flux-Error
1.5 1.28 0.85
2.0 1.00 1.00
2.5 0.85 1.28
3.0 0.81 1.75
"""
from scipy.stats import gmean
e = gmean([e1, e2])
f = I / (e2 - e1)
e2f = e ** 2 * f # Note: e ** 2 = e1 * e2 here.
for Index in np.arange(1.5, 3.5, 0.5):
f_correct = power_law_flux(I, Index, e, e1, e2)
e2f_correct = e ** 2 * f_correct
# We compute ratios, which corresponds to differences
# on a log scale
SED = e2f / e2f_correct
Flux = f / f_correct
# TODO: assert results
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
from ._inputs import *
__all__ = ['ResourceRecordSetInitArgs', 'ResourceRecordSet']
@pulumi.input_type
class ResourceRecordSetInitArgs:
def __init__(__self__, *,
managed_zone: pulumi.Input[str],
client_operation_id: Optional[pulumi.Input[str]] = None,
kind: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
project: Optional[pulumi.Input[str]] = None,
routing_policy: Optional[pulumi.Input['RRSetRoutingPolicyArgs']] = None,
rrdatas: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
signature_rrdatas: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
ttl: Optional[pulumi.Input[int]] = None,
type: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a ResourceRecordSet resource.
:param pulumi.Input[str] name: For example, www.example.com.
:param pulumi.Input['RRSetRoutingPolicyArgs'] routing_policy: Configures dynamic query responses based on geo location of querying user or a weighted round robin based routing policy. A ResourceRecordSet should only have either rrdata (static) or routing_policy (dynamic). An error is returned otherwise.
:param pulumi.Input[Sequence[pulumi.Input[str]]] rrdatas: As defined in RFC 1035 (section 5) and RFC 1034 (section 3.6.1) -- see examples.
:param pulumi.Input[Sequence[pulumi.Input[str]]] signature_rrdatas: As defined in RFC 4034 (section 3.2).
:param pulumi.Input[int] ttl: Number of seconds that this ResourceRecordSet can be cached by resolvers.
:param pulumi.Input[str] type: The identifier of a supported record type. See the list of Supported DNS record types.
"""
pulumi.set(__self__, "managed_zone", managed_zone)
if client_operation_id is not None:
pulumi.set(__self__, "client_operation_id", client_operation_id)
if kind is not None:
pulumi.set(__self__, "kind", kind)
if name is not None:
pulumi.set(__self__, "name", name)
if project is not None:
pulumi.set(__self__, "project", project)
if routing_policy is not None:
pulumi.set(__self__, "routing_policy", routing_policy)
if rrdatas is not None:
pulumi.set(__self__, "rrdatas", rrdatas)
if signature_rrdatas is not None:
pulumi.set(__self__, "signature_rrdatas", signature_rrdatas)
if ttl is not None:
pulumi.set(__self__, "ttl", ttl)
if type is not None:
pulumi.set(__self__, "type", type)
@property
@pulumi.getter(name="managedZone")
def managed_zone(self) -> pulumi.Input[str]:
return pulumi.get(self, "managed_zone")
@managed_zone.setter
def managed_zone(self, value: pulumi.Input[str]):
pulumi.set(self, "managed_zone", value)
@property
@pulumi.getter(name="clientOperationId")
def client_operation_id(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "client_operation_id")
@client_operation_id.setter
def client_operation_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "client_operation_id", value)
@property
@pulumi.getter
def kind(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "kind")
@kind.setter
def kind(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "kind", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
For example, www.example.com.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def project(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "project")
@project.setter
def project(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "project", value)
@property
@pulumi.getter(name="routingPolicy")
def routing_policy(self) -> Optional[pulumi.Input['RRSetRoutingPolicyArgs']]:
"""
Configures dynamic query responses based on geo location of querying user or a weighted round robin based routing policy. A ResourceRecordSet should only have either rrdata (static) or routing_policy (dynamic). An error is returned otherwise.
"""
return pulumi.get(self, "routing_policy")
@routing_policy.setter
def routing_policy(self, value: Optional[pulumi.Input['RRSetRoutingPolicyArgs']]):
pulumi.set(self, "routing_policy", value)
@property
@pulumi.getter
def rrdatas(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
As defined in RFC 1035 (section 5) and RFC 1034 (section 3.6.1) -- see examples.
"""
return pulumi.get(self, "rrdatas")
@rrdatas.setter
def rrdatas(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "rrdatas", value)
@property
@pulumi.getter(name="signatureRrdatas")
def signature_rrdatas(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
As defined in RFC 4034 (section 3.2).
"""
return pulumi.get(self, "signature_rrdatas")
@signature_rrdatas.setter
def signature_rrdatas(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "signature_rrdatas", value)
@property
@pulumi.getter
def ttl(self) -> Optional[pulumi.Input[int]]:
"""
Number of seconds that this ResourceRecordSet can be cached by resolvers.
"""
return pulumi.get(self, "ttl")
@ttl.setter
def ttl(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "ttl", value)
@property
@pulumi.getter
def type(self) -> Optional[pulumi.Input[str]]:
"""
The identifier of a supported record type. See the list of Supported DNS record types.
"""
return pulumi.get(self, "type")
@type.setter
def type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "type", value)
class ResourceRecordSet(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
client_operation_id: Optional[pulumi.Input[str]] = None,
kind: Optional[pulumi.Input[str]] = None,
managed_zone: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
project: Optional[pulumi.Input[str]] = None,
routing_policy: Optional[pulumi.Input[pulumi.InputType['RRSetRoutingPolicyArgs']]] = None,
rrdatas: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
signature_rrdatas: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
ttl: Optional[pulumi.Input[int]] = None,
type: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
Creates a new ResourceRecordSet.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] name: For example, www.example.com.
:param pulumi.Input[pulumi.InputType['RRSetRoutingPolicyArgs']] routing_policy: Configures dynamic query responses based on geo location of querying user or a weighted round robin based routing policy. A ResourceRecordSet should only have either rrdata (static) or routing_policy (dynamic). An error is returned otherwise.
:param pulumi.Input[Sequence[pulumi.Input[str]]] rrdatas: As defined in RFC 1035 (section 5) and RFC 1034 (section 3.6.1) -- see examples.
:param pulumi.Input[Sequence[pulumi.Input[str]]] signature_rrdatas: As defined in RFC 4034 (section 3.2).
:param pulumi.Input[int] ttl: Number of seconds that this ResourceRecordSet can be cached by resolvers.
:param pulumi.Input[str] type: The identifier of a supported record type. See the list of Supported DNS record types.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: ResourceRecordSetInitArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Creates a new ResourceRecordSet.
:param str resource_name: The name of the resource.
:param ResourceRecordSetInitArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(ResourceRecordSetInitArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
client_operation_id: Optional[pulumi.Input[str]] = None,
kind: Optional[pulumi.Input[str]] = None,
managed_zone: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
project: Optional[pulumi.Input[str]] = None,
routing_policy: Optional[pulumi.Input[pulumi.InputType['RRSetRoutingPolicyArgs']]] = None,
rrdatas: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
signature_rrdatas: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
ttl: Optional[pulumi.Input[int]] = None,
type: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = ResourceRecordSetInitArgs.__new__(ResourceRecordSetInitArgs)
__props__.__dict__["client_operation_id"] = client_operation_id
__props__.__dict__["kind"] = kind
if managed_zone is None and not opts.urn:
raise TypeError("Missing required property 'managed_zone'")
__props__.__dict__["managed_zone"] = managed_zone
__props__.__dict__["name"] = name
__props__.__dict__["project"] = project
__props__.__dict__["routing_policy"] = routing_policy
__props__.__dict__["rrdatas"] = rrdatas
__props__.__dict__["signature_rrdatas"] = signature_rrdatas
__props__.__dict__["ttl"] = ttl
__props__.__dict__["type"] = type
super(ResourceRecordSet, __self__).__init__(
'google-native:dns/v1beta2:ResourceRecordSet',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'ResourceRecordSet':
"""
Get an existing ResourceRecordSet resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = ResourceRecordSetInitArgs.__new__(ResourceRecordSetInitArgs)
__props__.__dict__["kind"] = None
__props__.__dict__["name"] = None
__props__.__dict__["routing_policy"] = None
__props__.__dict__["rrdatas"] = None
__props__.__dict__["signature_rrdatas"] = None
__props__.__dict__["ttl"] = None
__props__.__dict__["type"] = None
return ResourceRecordSet(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def kind(self) -> pulumi.Output[str]:
return pulumi.get(self, "kind")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
For example, www.example.com.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="routingPolicy")
def routing_policy(self) -> pulumi.Output['outputs.RRSetRoutingPolicyResponse']:
"""
Configures dynamic query responses based on geo location of querying user or a weighted round robin based routing policy. A ResourceRecordSet should only have either rrdata (static) or routing_policy (dynamic). An error is returned otherwise.
"""
return pulumi.get(self, "routing_policy")
@property
@pulumi.getter
def rrdatas(self) -> pulumi.Output[Sequence[str]]:
"""
As defined in RFC 1035 (section 5) and RFC 1034 (section 3.6.1) -- see examples.
"""
return pulumi.get(self, "rrdatas")
@property
@pulumi.getter(name="signatureRrdatas")
def signature_rrdatas(self) -> pulumi.Output[Sequence[str]]:
"""
As defined in RFC 4034 (section 3.2).
"""
return pulumi.get(self, "signature_rrdatas")
@property
@pulumi.getter
def ttl(self) -> pulumi.Output[int]:
"""
Number of seconds that this ResourceRecordSet can be cached by resolvers.
"""
return pulumi.get(self, "ttl")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
The identifier of a supported record type. See the list of Supported DNS record types.
"""
return pulumi.get(self, "type")
|
from inspect import isclass, isfunction, ismodule
from functools import partial
is_func_or_partial = lambda f: isfunction(f) or isinstance(f, partial)
def write_docs_for_module(module, path, modules_to_skip=None,
generate_index=False):
if modules_to_skip is None:
modules_to_skip = {}
module_name = module.__name__
doc_dir = path / module_name
if not doc_dir.is_dir():
doc_dir.mkdir()
for k, v in module.__dict__.iteritems():
if ismodule(v):
print('Writing module {}'.format(module_name))
file_to_doc = docs_for_module(k, v, module_name,
generate_index=generate_index)
if len(file_to_doc) == 0 or k in modules_to_skip:
continue
mod_dir = doc_dir / k
if not mod_dir.is_dir():
mod_dir.mkdir()
for f_name in file_to_doc:
doc_file = mod_dir / (f_name + '.rst')
with open(str(doc_file), 'wb') as f:
f.write(file_to_doc[f_name])
def docs_for_module(module_name, module, package_name, generate_index=False):
file_to_doc = {}
for k, v in module.__dict__.iteritems():
if isclass(v):
file_to_doc[k] = generate_class_rst(module_name, k,
module.__name__, package_name)
elif is_func_or_partial(v):
file_to_doc[k] = generate_function_rst(module_name, k,
module.__name__,
package_name)
# only make an index if there is something to index
if generate_index and len(file_to_doc) > 0:
file_to_doc['index'] = generate_module_index(module_name, module)
return file_to_doc
def generate_module_index(module_name, module):
breadcrumb = '.. _api-{}-index:\n\n'.format(module_name)
title = ":mod:`{}`".format(module.__name__)
title = "{}\n{}\n".format(title, '=' * len(title))
toctree = "\n.. toctree::\n :maxdepth: 1\n\n "
items = [i for i, v in module.__dict__.items() if isclass(v) or
is_func_or_partial(v)]
return breadcrumb + title + toctree + "\n ".join(items)
def generate_class_rst(module_name, class_name, module, package_name):
breadcrumb = '.. _{}-{}-{}:\n\n'.format(package_name, module_name,
class_name)
current_module = '.. currentmodule:: {}\n\n'.format(module)
title = "{}\n{}\n".format(class_name, '=' * len(class_name))
body = (".. autoclass:: {}\n :members:\n :inherited-members:"
"\n :show-inheritance:\n".format(class_name))
return breadcrumb + current_module + title + body
def generate_function_rst(module_name, function_name, module, package_name):
breadcrumb = '.. _{}-{}-{}:\n\n'.format(package_name, module_name,
function_name)
current_module = '.. currentmodule:: {}\n\n'.format(module)
title = "{}\n{}\n".format(function_name, '=' * len(function_name))
body = ".. autofunction:: {}\n".format(function_name)
return breadcrumb + current_module + title + body
if __name__ == '__main__':
from pathlib import Path
import menpowidgets
path = Path(__file__).parent / 'source' / 'api'
print('Writing to {}'.format(path))
# Flip generate_index to True to make index.rst files too!
write_docs_for_module(menpowidgets, path, generate_index=False,
modules_to_skip={'_version'})
|
import numpy as np
print('Cron Test')
mean_list = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
print(np.mean(mean_list))
print('Imported numpy successfully')
|
from dataclasses import dataclass
@dataclass
class P91HasUnit:
"""
Scope note:
This property shows the type of unit an instance of E54 Dimension was expressed in.
Examples:
- height of silver cup 232 (E54) has unit mm (E58)
In First Order Logic:
P91(x,y) ⊃ E54(x)
P91(x,y) ⊃ E58(y)
"""
URI = "http://erlangen-crm.org/current/P91_has_unit"
|
from django.shortcuts import get_object_or_404, get_list_or_404, render, redirect
from reportlab.pdfgen import canvas
from django.http import Http404, HttpResponse, HttpResponseRedirect, HttpResponseNotFound
#from utils import render_to_pdf
from datetime import date, datetime
from django.core.exceptions import PermissionDenied, ObjectDoesNotExist
from django.http.response import JsonResponse
from io import BytesIO
from django.template.loader import get_template
from xhtml2pdf import pisa
from django.core.mail import EmailMessage
#from .models import Content, Contacts, Appointments
from .models import *
from apimaster.models import *
from django.contrib.auth import get_user_model
from django.contrib import messages
#from django.contrib.messages import constants as messages
#from apimaster.models import UserProfiles
from decimal import Decimal
from hospitalapp.settings import MEDIA_ROOT, MEDIA_URL
from django.db import connection
# login_required
from django.utils.decorators import method_decorator
from django.contrib.auth.decorators import login_required
from django import template
from django.utils.safestring import mark_safe
from django.db.models import Sum
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
IMAGE_FILE_TYPES = ['png', 'jpg', 'jpeg']
MESSAGE_TAGS = {
messages.DEBUG: 'alert-info',
messages.INFO: 'alert-info',
messages.SUCCESS: 'alert-success',
messages.WARNING: 'alert-warning',
messages.ERROR: 'alert-danger danger',
}
def sendMail(request):
subject = "domain mail"
body = "This mail for informing to your domail."
email = EmailMessage(subject, body, to=['[email protected]'])
email.send()
return render(request, 'home.html')
@login_required
def index(request):
#total_appointments = Appointments.objects.filter(field_name__isnull=True).aggregate(Sum('field_name'))
total_appointments = Appointments.objects.count()
total_doctors = UserProfiles.objects.filter(occupation='doctor').count()
total_treatments = Treatments.objects.count()
title = 'Dashboard'
today = datetime.now().date()
#return HttpResponse('<h4>index</h4>')
context = {
'title': title,
"total_appointments" : total_appointments,
"total_treatments" : total_treatments,
"total_doctors" : total_doctors,
"today":today,
}
return render(request, "dashboard.html", context)
@login_required
def dashboard(request):
return render(request, 'dashboard.html')
#return HttpResponse('<h4>Home</h4>')
@login_required
def getList(request):
if request.method == 'GET':
User = get_user_model()
users = User.objects.all()
# Contacts
xitems = Contacts.objects.filter(is_deleted=False)
items_alls = Contacts.objects.all()
p = Paginator(users, 10)
page_number = request.GET.get('page')
try:
page_obj = p.get_page(page_number)
except PageNotAnInteger:
page_obj = p.page(1)
except EmptyPage:
page_obj = p.page(p.num_pages)
title='view'
today = datetime.now().date()
context = {'title': title, 'items': page_obj, 'today':today,"items_alls" : items_alls}
messages.success(request, 'Welcome!')
# print("Error: missing one of the libraries (numpy, pyfits, scipy, matplotlib)")
# variable = "Yogesh Soni"
# return HttpResponse(users)
#====================================================================
# messages.error(request, "Something went wrong!")
# messages.success(request, 'Your profile was successfully updated!')
# messages.error(request, "Your profile is updated successfully!")
#====================================================================
# ======== Mail Send ==================================
# subject='SubjecT'
# sendemail('yogeshsoni',subject,'EmailMessage text')
# ======== Mail Send ==================================
#return HttpResponse('<h4>User List</h4>')
return render(request, "user_list.html", context)
@login_required
def getView(request, id):
if request.method == 'GET':
#item = Contacts.objects.filter(id=id)
title='Show'
try:
item = UserProfiles.objects.filter(user_id=28)[0]
#item = get_object_or_404(UserProfiles, user_id=id)
#return HttpResponse(item.email)
except ObjectDoesNotExist:
return HttpResponse(status=404)
return render(request, "contact_details.html", {'title': title, "item" : item})
@login_required
def postStore(request):
if request.method == 'POST':
obj = Contacts()
# obj.name = form.cleaned_data['name']
# obj.name = request.POST.get('name')
# obj.mobile = request.POST.get('mobile')
# obj.email = request.POST.get('email')
# obj.subject = request.POST.get('subject')
# obj.message = request.POST.get('message')
obj.name = request.POST['name']
obj.mobile = request.POST['mobile']
obj.email = request.POST['email']
obj.subject = request.POST['subject']
obj.message = request.POST['message']
obj.save()
return HttpResponseRedirect('/')
#return redirect('/')
#==================== User
@login_required
def getMyProfile(request):
if request.method == 'GET':
User = get_user_model()
users = User.objects.all()
item = UserProfiles.objects.get(pk=1)
title='view'
return render(request, "users/my_profile.html", {'title': title, "item" : item})
@login_required
def postMyProfile(request, id):
if request.method == 'POST':
obj = UserProfiles.objects.get(pk=id)
obj.name = request.POST['name']
obj.email = request.POST['email']
obj.mobile = request.POST['mobile']
obj.sex = request.POST['sex']
obj.address = request.POST['address']
try:
obj.save()
except:
result = 1
#messages.error(request, "Something went wrong!")
finally:
messages.success(request, 'Successfully updated!')
return HttpResponseRedirect('/hospital/doctors')
#==================== Doctors ==============================================================================
@login_required
def getDoctors(request):
if request.method == 'GET':
User = get_user_model()
users = User.objects.all()
items = UserProfiles.objects.filter(occupation='doctor')
#items = UserProfiles.objects.all()
title='view'
#messages.success(request, 'Welcome!')
#return JsonResponse(items, safe=False)
#return HttpResponse(items)
return render(request, "doctors.html", {'title': title, "items" : items})
@login_required
def getCreateDoctor(request):
if request.method == 'GET':
title='Create Doctor'
return render(request, "doctor_create.html", {'title': title})
@login_required
def postDoctors(request):
if request.method == 'POST':
obj = UserProfiles()
obj.name = request.POST['name']
obj.occupation = 'doctor'
obj.email = request.POST['email']
obj.mobile = request.POST['mobile']
obj.sex = request.POST['sex']
obj.address = request.POST['address']
obj.dob = request.POST['dob']
#obj.description = 'demo description'
#obj.amount = Decimal(request.POST['amount'])
obj.save()
return HttpResponseRedirect('/hospital/doctors')
@login_required
def postDoctorAuth(request):
if request.method == 'POST':
obj = UserProfiles()
obj.name = request.POST['name']
obj.email = request.POST['email']
obj.mobile = request.POST['mobile']
obj.sex = request.POST['sex']
obj.address = request.POST['address']
#obj.description = 'demo description'
#obj.amount = Decimal(request.POST['amount'])
obj.save()
return HttpResponseRedirect('/hospital/doctors')
@login_required
def getDoctorStatus(request, id):
if request.method == 'GET':
obj = UserProfiles.objects.get(pk=id)
if obj.status == 0:
obj.status = 1
obj.save()
messages.success(request, 'Status Activated!')
else:
obj.status = 0
obj.save()
messages.error(request, 'Status Deactivated!')
return HttpResponseRedirect('/hospital/doctors')
@login_required
def getDoctorDetails(request, id):
if request.method == 'GET':
title='Show Appointment Details'
serial_no = getGenerateRefKey('UserProfiles', 'DOC')
current_user = request.user.id
try:
#item = UserProfiles.objects.get(id=id)[0]
#item = UserProfiles.objects.filter(id=id,occupation='doctor').first()
item = get_object_or_404(UserProfiles, id=id, occupation='doctor')
if item:
pass
if item.occupation == 'doctor':
user_id = item.users_id
# Section A
council = UsersRegistrationCouncils.objects.filter(user_id=user_id).first()
education = UsersEducations.objects.filter(user_id=user_id)
clinic = UsersClinics.objects.filter(user_id=user_id).first()
# Section B
IdentityProof = UsersIdentityProofs.objects.filter(user_id=user_id)
Registration = MedicalRegistrationProofs.objects.filter(user_id=user_id)
Establishment = EstablishmentProofs.objects.filter(user_id=user_id)
# Section C
MapLocation = MapLocations.objects.filter(user_id=user_id)
EstablishmentTiming = EstablishmentTimings.objects.filter(user_id=user_id)
ConsultationFee = ConsultationFees.objects.filter(user_id=user_id).first()
DoctorVerification = DoctorVerifications.objects.filter(user_id=user_id)
context = {
'title': title,
"serial_no":serial_no,
"current_user":current_user,
"item" : item,
"council":council,
"education":education,
"clinic":clinic,
"identityproof":IdentityProof,
"registration":Registration,
"establishment":Establishment,
"maplocation":MapLocation,
"establishmenttiming":EstablishmentTiming,
"consultationfee":ConsultationFee,
"verifications":DoctorVerification,
}
else:
return HttpResponse({"You don\'t have permission to access this resource."}, status=401)
else:
return HttpResponse({"Error": "Record does not exist"}, status=404)
#return HttpResponse(status=404)
except ObjectDoesNotExist:
now = datetime.now()
html = "<html><body><h4>Record does not exist.</h4><p>Time:%s</p></body></html>" % now
return HttpResponse(html)
#return HttpResponseNotFound('<h4 style="text-align: center;margin-top: 10%;">Record does not exist</h4>')
#raise Http404("Record does not exist")
#return HttpResponse({"Error": "Record does not exist","Error": "Record does not exist"}, status=404)
#return HttpResponse(status=401)
return render(request, "doctor_details.html", context)
@login_required
def getDoctorEdit(request, id):
if request.method == 'GET':
title='Edit Appointment Details'
try:
item = UserProfiles.objects.filter(id=id)[0]
except ObjectDoesNotExist:
return HttpResponse(status=404)
return render(request, "doctor_edit.html", {'title': title, "item" : item})
@login_required
def postDoctorUpdate(request, id):
if request.method == 'POST':
obj = UserProfiles.objects.get(pk=id)
obj.name = request.POST['name']
obj.email = request.POST['email']
obj.mobile = request.POST['mobile']
obj.sex = request.POST['sex']
obj.address = request.POST['address']
try:
obj.save()
except:
result = 1
#messages.error(request, "Something went wrong!")
finally:
messages.success(request, 'Successfully updated!')
return HttpResponseRedirect('/hospital/doctors')
@login_required
def getDoctorDelete(request, id):
if request.method == 'GET':
obj = UserProfiles.objects.get(pk=id)
if obj.is_deleted == 0:
obj.is_deleted = 1
obj.save()
messages.success(request, 'Deleted!')
return HttpResponseRedirect('/hospital/doctors')
@login_required
def getDoctoVerification(request):
if request.method == 'POST':
id = request.POST['id']
userid = request.POST['user_id']
obj = UserProfiles.objects.get(pk=id)
dobj = DoctorVerifications()
name = request.POST['name']
dobj.name = name
dobj.message = request.POST['message']
dobj.user_id = obj.users_id
dobj.save()
if name == 'Aprooved':
obj.verification = 1
obj.verification_text = name
obj.save()
messages.success(request, 'Profile Aprooved!')
elif name == 'Rejected':
obj.verification = 2
obj.verification_text = name
obj.save()
messages.success(request, 'Profile Rejected!')
else:
obj.verification = 0
obj.verification_text = name
obj.save()
messages.success(request, f'Profile {name}!')
#return redirect(f'{redirect_url}?{parameters}')
return HttpResponseRedirect(f'/hospital/doctor/{id}')
#==================== Patients ==============================================================================
@login_required
def getPatients(request):
if request.method == 'GET':
#items = UserProfiles.objects.filter(is_deleted=False)
#items = UserProfiles.objects.all()
items = UserProfiles.objects.filter(occupation='patient')
title='view'
return render(request, "patients/patients.html", {'title': title, "items" : items})
@login_required
def getCreatePatient(request):
if request.method == 'GET':
title='Create Doctor'
return render(request, "patients/patient_create.html", {'title': title})
@login_required
def postStorePatient(request):
if request.method == 'POST':
obj = UserProfiles()
obj.name = request.POST['name']
obj.email = request.POST['email']
obj.mobile = request.POST['mobile']
obj.sex = request.POST['sex']
obj.address = request.POST['address']
obj.description = request.POST['description']
#obj.amount = Decimal(request.POST['amount'])
obj.save()
return HttpResponseRedirect('/hospital/patients')
@login_required
def postAuthPatient(request):
if request.method == 'POST':
obj = UserProfiles()
obj.name = request.POST['name']
obj.email = request.POST['email']
obj.mobile = request.POST['mobile']
obj.sex = request.POST['sex']
obj.address = request.POST['address']
#obj.description = 'demo description'
#obj.amount = Decimal(request.POST['amount'])
obj.save()
return HttpResponseRedirect('/hospital/patients')
@login_required
def getStatusPatient(request, id):
if request.method == 'GET':
obj = UserProfiles.objects.get(pk=id)
if obj.status == 0:
obj.status = 1
obj.save()
messages.success(request, 'Status Activated!')
else:
obj.status = 0
obj.save()
messages.error(request, 'Status Deactivated!')
return HttpResponseRedirect('/hospital/patients')
@login_required
def getViewPatient(request, id):
if request.method == 'GET':
title='Show Appointment Details'
try:
item = UserProfiles.objects.filter(id=id)[0]
except ObjectDoesNotExist:
return HttpResponse(status=404)
return render(request, "patients/patient_view.html", {'title': title, "item" : item})
@login_required
def getEditPatient(request, id):
if request.method == 'GET':
title='Edit Appointment Details'
try:
item = UserProfiles.objects.filter(id=id)[0]
except ObjectDoesNotExist:
return HttpResponse(status=404)
return render(request, "patients/patient_edit.html", {'title': title, "item" : item})
@login_required
def postUpdatePatient(request, id):
if request.method == 'POST':
obj = UserProfiles.objects.get(pk=id)
obj.name = request.POST['name']
obj.email = request.POST['email']
obj.mobile = request.POST['mobile']
obj.sex = request.POST['sex']
obj.address = request.POST['address']
try:
obj.save()
except:
result = 1
#messages.error(request, "Something went wrong!")
finally:
messages.success(request, 'Successfully updated!')
return HttpResponseRedirect('/hospital/patients')
@login_required
def getDeletePatient(request, id):
if request.method == 'GET':
obj = UserProfiles.objects.get(pk=id)
if obj.is_deleted == 0:
obj.is_deleted = 1
obj.save()
messages.success(request, 'Deleted!')
return HttpResponseRedirect('/hospital/patients')
#==================== Treatments ==============================================================================
# @login_required
# @method_decorator([login_required], name='dispatch')
@login_required
def getTreatments(request):
if request.method == 'GET':
#items = UserProfiles.objects.filter(is_deleted=False)
items = Treatments.objects.all()
p = Paginator(items, 10)
page_number = request.GET.get('page')
try:
page_obj = p.get_page(page_number)
except PageNotAnInteger:
page_obj = p.page(1)
except EmptyPage:
page_obj = p.page(p.num_pages)
title='view'
today = datetime.now().date()
context = {'title': title,'items': page_obj,'today':today}
return render(request, "treatments/treatments.html", context)
# def index(request):
# posts = Post.objects.all() # fetching all post objects from database
# p = Paginator(posts, 5) # creating a paginator object
# # getting the desired page number from url
# page_number = request.GET.get('page')
# try:
# page_obj = p.get_page(page_number) # returns the desired page object
# except PageNotAnInteger:
# # if page_number is not an integer then assign the first page
# page_obj = p.page(1)
# except EmptyPage:
# # if page is empty then return last page
# page_obj = p.page(p.num_pages)
# context = {'page_obj': page_obj}
# # sending the page object to index.html
# return render(request, 'index.html', context)
@login_required
def getCreateTreatments(request):
if request.method == 'GET':
title='Create Treatments'
return render(request, "treatments/treatment_create.html", {'title': title})
@login_required
def postStoreTreatments(request):
if request.method == 'POST':
obj = Treatments()
obj.name = request.POST['name']
obj.description = request.POST['description']
if len(request.FILES) != 0:
obj.primary_image = request.FILES['primary_image']
obj.save()
messages.success(request, 'Successfully Inserted!')
return HttpResponseRedirect('/hospital/treatments')
@login_required
def hotel_image_view(request):
if request.method == 'POST':
form = TreatmentForm(request.POST, request.FILES)
if form.is_valid():
form.save()
return redirect('success')
else:
form = TreatmentForm()
return render(request, 'hotel_image_form.html', {'form' : form})
@login_required
def getStatusTreatments(request, id):
if request.method == 'GET':
obj = Treatments.objects.get(pk=id)
if obj.status == 0:
obj.status = 1
obj.save()
messages.success(request, 'Status Activated!')
else:
obj.status = 0
obj.save()
messages.error(request, 'Status Deactivated!')
return HttpResponseRedirect('/hospital/treatments')
@login_required
def getViewTreatments(request, id):
if request.method == 'GET':
title='Show Appointment Details'
try:
item = Treatments.objects.filter(id=id)[0]
except ObjectDoesNotExist:
return HttpResponse(status=404)
return render(request, "treatments/treatment_view.html", {'title': title, "item" : item})
@login_required
def getEditTreatments(request, id):
if request.method == 'GET':
title='Edit Appointment Details'
try:
item = Treatments.objects.filter(id=id)[0]
except ObjectDoesNotExist:
return HttpResponse(status=404)
return render(request, "treatments/treatment_edit.html", {'title': title, "item" : item})
@login_required
def postUpdateTreatments(request, id):
if request.method == 'POST':
obj = Treatments.objects.get(pk=id)
obj.name = request.POST['name']
obj.description = request.POST['description']
if len(request.FILES) != 0:
obj.primary_image = request.FILES['primary_image']
obj.save()
messages.success(request, 'Successfully updated!')
return HttpResponseRedirect('/hospital/treatments')
@login_required
def getDeleteTreatments(request, id):
if request.method == 'GET':
obj = Treatments.objects.get(pk=id)
if obj.is_deleted == 0:
obj.is_deleted = 1
obj.save()
messages.success(request, 'Deleted!')
return HttpResponseRedirect('/hospital/treatments')
#==================== Treatment Categories ==============================================================================
@login_required
def getTreatmentCategories(request):
if request.method == 'GET':
#items = UserProfiles.objects.filter(is_deleted=False)
items = TreatmentCategories.objects.all()
p = Paginator(items, 10)
page_number = request.GET.get('page')
try:
page_obj = p.get_page(page_number)
except PageNotAnInteger:
page_obj = p.page(1)
except EmptyPage:
page_obj = p.page(p.num_pages)
title='view'
today = datetime.now().date()
context = {'title': title, 'items': page_obj, 'today':today}
return render(request, "treatments_categories/treatments_categories.html", context)
@login_required
def getCreateTreatmentCategories(request):
if request.method == 'GET':
title='Create Treatments'
return render(request, "treatments_categories/treatments_categories_create.html", {'title': title})
@login_required
def postStoreTreatmentCategories(request):
if request.method == 'POST':
obj = Treatments()
obj.name = request.POST['name']
obj.description = request.POST['description']
if len(request.FILES) != 0:
obj.primary_image = request.FILES['primary_image']
obj.save()
messages.success(request, 'Successfully Inserted!')
return HttpResponseRedirect('/hospital/treatment-categories')
@login_required
def getStatusTreatmentCategories(request, id):
if request.method == 'GET':
obj = Treatments.objects.get(pk=id)
if obj.status == 0:
obj.status = 1
obj.save()
messages.success(request, 'Status Activated!')
else:
obj.status = 0
obj.save()
messages.error(request, 'Status Deactivated!')
return HttpResponseRedirect('/hospital/treatment-categories')
@login_required
def getViewTreatmentCategories(request, id):
if request.method == 'GET':
title='Show Appointment Details'
try:
item = Treatments.objects.filter(id=id)[0]
except ObjectDoesNotExist:
return HttpResponse(status=404)
return render(request, "treatments/treatments_categories_view.html", {'title': title, "item" : item})
@login_required
def getEditTreatmentCategories(request, id):
if request.method == 'GET':
title='Edit Appointment Details'
try:
item = Treatments.objects.filter(id=id)[0]
except ObjectDoesNotExist:
return HttpResponse(status=404)
return render(request, "treatments/treatments_categories_edit.html", {'title': title, "item" : item})
@login_required
def postUpdateTreatmentCategories(request, id):
if request.method == 'POST':
obj = Treatments.objects.get(pk=id)
obj.name = request.POST['name']
obj.description = request.POST['description']
if len(request.FILES) != 0:
obj.primary_image = request.FILES['primary_image']
obj.save()
messages.success(request, 'Successfully updated!')
return HttpResponseRedirect('/hospital/treatment-categories')
@login_required
def getDeleteTreatmentCategories(request, id):
if request.method == 'GET':
obj = Treatments.objects.get(pk=id)
if obj.is_deleted == 0:
obj.is_deleted = 1
obj.save()
messages.success(request, 'Deleted!')
return HttpResponseRedirect('/hospital/treatment-categories')
# ===== appointment =================================================================================
@login_required
def getAppointments(request):
if request.method == 'GET':
data = Appointments.objects.all()
#xitems = Contacts.objects.filter(is_deleted=False)
title='view'
return render(request, "appointments.html", {'title': title, "items" : data})
@login_required
def getAppointmentStatus(request, id):
if request.method == 'GET':
apobj = Appointments.objects.get(pk=id)
apobj.amount = 0.00
if apobj.status == 0:
apobj.status = 1
apobj.save()
else:
apobj.status = 0
apobj.save()
return HttpResponseRedirect('/hospital/appointments')
@login_required
def getAppointmentDetails(request, id):
if request.method == 'GET':
title='Show Appointment Details'
try:
item = Appointments.objects.filter(id=id)[0]
#item = Appointments.objects.get(pk=id)
#item = Appointments.objects.filter(id=1)[0]
#item = Appointments.objects.filter(pk=id)
#item = get_object_or_404(Appointments, user_id=id)
except ObjectDoesNotExist:
return HttpResponse(status=404)
return render(request, "appointment_details.html", {'title': title, "item" : item})
@login_required
def getCreateAppointments(request):
if request.method == 'GET':
title='Create Doctor'
return render(request, "create_appointment.html", {'title': title})
@login_required
def postAppointments(request):
if request.method == 'POST':
obj = Appointments()
obj.name = request.POST['name']
obj.mobile = request.POST['mobile']
obj.sex = request.POST['sex']
#obj.age = request.POST['age']
obj.address = request.POST['address']
obj.doctor_id = request.POST['doctor_id']
#obj.doctor_appointment_at = request.POST['doctor_appointment_at']
obj.illness_information = request.POST['illness_information']
obj.description = request.POST['description']
#obj.amount = Decimal(request.POST['amount'])
obj.save()
return HttpResponseRedirect('/hospital/appointments')
@login_required
def getAppointmentEdit(request, id):
if request.method == 'GET':
title='Edit Appointment Details'
try:
item = Appointments.objects.filter(id=id)[0]
except ObjectDoesNotExist:
return HttpResponse(status=404)
return render(request, "appointment_edit.html", {'title': title, "item" : item})
@login_required
def postAppointmentUpdate(request, id):
if request.method == 'POST':
obj = Appointments.objects.get(pk=id)
obj.name = request.POST['name']
#obj.amount = 12.00
obj.mobile = request.POST['mobile']
obj.sex = request.POST['sex']
#obj.age = request.POST['age']
obj.address = request.POST['address']
obj.doctor_id = request.POST['doctor_id']
#obj.doctor_appointment_at = request.POST['doctor_appointment_at']
obj.illness_information = request.POST['illness_information']
obj.description = request.POST['description']
obj.amount = Decimal(request.POST['amount'])
#obj.save()
# if obj.save():
# messages.success(request, 'Successfully updated!')
# else:
# messages.error(request, "Something went wrong!")
#result = obj.save()
try:
obj.save()
except:
result = 1
#messages.error(request, "Something went wrong!")
finally:
messages.success(request, 'Successfully updated!')
#return JsonResponse('result', safe=False)
#return HttpResponse(result)
return HttpResponseRedirect('/hospital/appointments')
@login_required
def getAppointmentx(request):
if request.method == 'GET':
User = get_user_model()
users = User.objects.all()
#xitems = Contacts.objects.filter(is_deleted=False)
title='view'
return render(request, "doctors.html", {'title': title, "items" : users})
#==================== Plans ==============================================================================
@login_required
def getPlans(request):
if request.method == 'GET':
#items = UserProfiles.objects.filter(is_deleted=False)
today = datetime.now().date()
items = SubscriptionPlans.objects.all()
title='view'
return render(request, "plans/plans.html", {'title': title, "items" : items, "today":today})
@login_required
def getCreatePlans(request):
if request.method == 'GET':
title='Create Treatments'
return render(request, "plans/plan_create.html", {'title': title})
@login_required
def postStorePlans(request):
if request.method == 'POST':
obj = SubscriptionPlans()
obj.name = request.POST['name']
obj.amount = request.POST['amount']
obj.expiry_in_months = request.POST['expiry_in_months']
obj.description = request.POST['description']
if len(request.FILES) != 0:
obj.primary_image = request.FILES['primary_image']
obj.save()
messages.success(request, 'Successfully Inserted!')
return HttpResponseRedirect('/hospital/plans')
@login_required
def getStatusPlans(request, id):
if request.method == 'GET':
obj = SubscriptionPlans.objects.get(pk=id)
if obj.status == 0:
obj.status = 1
obj.save()
messages.success(request, 'Status Activated!')
else:
obj.status = 0
obj.save()
messages.error(request, 'Status Deactivated!')
return HttpResponseRedirect('/hospital/plans')
@login_required
def getViewPlans(request, id):
if request.method == 'GET':
title='Show Appointment Details'
try:
item = SubscriptionPlans.objects.filter(id=id)[0]
except ObjectDoesNotExist:
return HttpResponse(status=404)
return render(request, "plans/plan_view.html", {'title': title, "item" : item})
@login_required
def getEditPlans(request, id):
if request.method == 'GET':
title='Edit Appointment Details'
try:
item = SubscriptionPlans.objects.filter(id=id)[0]
except ObjectDoesNotExist:
return HttpResponse(status=404)
return render(request, "plans/plan_edit.html", {'title': title, "item" : item})
@login_required
def postUpdatePlans(request, id):
if request.method == 'POST':
obj = SubscriptionPlans.objects.get(pk=id)
obj.name = request.POST['name']
obj.description = request.POST['description']
if len(request.FILES) != 0:
obj.primary_image = request.FILES['primary_image']
try:
obj.save()
except:
result = 1
#messages.error(request, "Something went wrong!")
finally:
messages.success(request, 'Successfully updated!')
return HttpResponseRedirect('/hospital/plans')
@login_required
def getDeletePlans(request, id):
if request.method == 'GET':
obj = SubscriptionPlans.objects.get(pk=id)
if obj.is_deleted == 0:
obj.is_deleted = 1
obj.save()
messages.success(request, 'Deleted!')
return HttpResponseRedirect('/hospital/plans')
# ===== Plan Subscriptions =================================================================================
@login_required
def getPlanSubscriptions(request):
if request.method == 'GET':
# SubscriptionHistory , hospital_subscriptionplans
items = SubscriptionHistory.objects.all()
#items = SubscriptionHistory.objects.filter(is_deleted=0).distinct().prefetch_related("plan_id").order_by("id")[:20]
#items = SubscriptionHistory.objects.filter(is_deleted=0).select_related('subscriptionplans')
xitems = SubscriptionPlans.objects.select_related('subscriptionplans').all()
#cursor = connection.cursor()
#cursor.execute("select a.id as id, b.name from hospital_subscriptionhistory a inner join hospital_subscriptionplans b on a.plan_id=b.id")
#cursor.execute("select a.id as id, b.name as name from hospital_subscriptionhistory a join hospital_subscriptionplans b on a.plan_id=b.id")
# cursor.execute("select * from hospital_subscriptionhistory")
# row = cursor.fetchone()
# print('Hi')
# print(row)
# var_dump(row)
title='view'
today = datetime.now().date()
return render(request, "plans/plan_subscriptions.html", {'title': title, "items" : items,"today":today})
@login_required
def getStatusPlanSubscriptions(request, id):
if request.method == 'GET':
obj = SubscriptionHistory.objects.get(pk=id)
if obj.status == 0:
obj.status = 1
obj.save()
messages.success(request, 'Status Activated!')
else:
obj.status = 0
obj.save()
messages.error(request, 'Status Deactivated!')
return HttpResponseRedirect('/hospital/plan-subscriptions')
@login_required
def getViewPlanSubscriptions(request, id):
if request.method == 'GET':
title='Show Appointment Details'
try:
item = SubscriptionHistory.objects.filter(id=id)[0]
plans = SubscriptionPlans.objects.filter(id=id)[0]
except ObjectDoesNotExist:
return HttpResponse(status=404)
return render(request, "plans/plan_subscriptions_view.html", {'title': title, "item" : item, "plans" : plans})
# ===== masters =================================================================================
@login_required
def getCategories(request):
if request.method == 'GET':
User = get_user_model()
users = User.objects.all()
#xitems = Contacts.objects.filter(is_deleted=False)
title='view'
return render(request, "category.html", {'title': title, "items" : users})
@login_required
def getSubCategories(request):
if request.method == 'GET':
User = get_user_model()
users = User.objects.all()
#xitems = Contacts.objects.filter(is_deleted=False)
title='view'
return render(request, "subcategory.html", {'title': title, "items" : users})
# ===== Settings =================================================================================
@login_required
def getSettings(request):
if request.method == 'GET':
User = get_user_model()
users = User.objects.all()
#xitems = Contacts.objects.filter(is_deleted=False)
title='view'
return render(request, "settings/setting.html", {'title': title, "items" : users})
#=====================================================================================
# Website URL and Functions
#=====================================================================================
def getContact(request):
print(request.GET)
return render(request, "index.html")
def editContact(request):
pass
def updateContact(request):
pass
def getDeiele(request, id):
#if request.method == 'Post':
if request.method == 'GET':
obj = Contacts.objects.get(pk=id)
obj.is_deleted = 1
obj.save()
# obj.delete()
return HttpResponseRedirect('/view-contact')
#==================== Blogs ==============================================================================
@login_required
def getBlogs(request):
if request.method == 'GET':
#items = UserProfiles.objects.filter(is_deleted=False)
items = Blogs.objects.all()
title='view'
return render(request, "website/blogs/blog.html", {'title': title, "items" : items})
@login_required
def getCreateBlogs(request):
if request.method == 'GET':
title='Create Treatments'
return render(request, "website/blogs/blog_create.html", {'title': title})
@login_required
def postStoreBlogs(request):
if request.method == 'POST':
obj = Blogs()
obj.title = request.POST['title']
obj.description = request.POST['description']
if len(request.FILES) != 0:
obj.primary_image = request.FILES['primary_image']
obj.save()
messages.success(request, 'Successfully Inserted!')
return HttpResponseRedirect('/hospital/blogs')
@login_required
def getStatusBlogs(request, id):
if request.method == 'GET':
obj = Blogs.objects.get(pk=id)
if obj.status == 0:
obj.status = 1
obj.save()
messages.success(request, 'Status Activated!')
else:
obj.status = 0
obj.save()
messages.error(request, 'Status Deactivated!')
return HttpResponseRedirect('/hospital/blogs')
@login_required
def getViewBlogs(request, id):
if request.method == 'GET':
title='Show Appointment Details'
try:
item = Blogs.objects.filter(id=id)[0]
except ObjectDoesNotExist:
return HttpResponse(status=404)
return render(request, "website/blogs/blog_view.html", {'title': title, "item" : item})
@login_required
def getEditBlogs(request, id):
if request.method == 'GET':
title='Edit Appointment Details'
try:
item = Blogs.objects.filter(id=id)[0]
except ObjectDoesNotExist:
return HttpResponse(status=404)
return render(request, "website/blogs/blog_edit.html", {'title': title, "item" : item})
@login_required
def postUpdateBlogs(request, id):
if request.method == 'POST':
obj = Blogs.objects.get(pk=id)
obj.title = request.POST['title']
obj.description = request.POST['description']
if len(request.FILES) != 0:
obj.primary_image = request.FILES['primary_image']
obj.save()
messages.success(request, 'Successfully updated!')
return HttpResponseRedirect('/hospital/blogs')
@login_required
def getDeleteBlogs(request, id):
if request.method == 'GET':
obj = Blogs.objects.get(pk=id)
if obj.is_deleted == 0:
obj.is_deleted = 1
obj.save()
messages.success(request, 'Deleted!')
return HttpResponseRedirect('/hospital/blogs')
#==================== Banners ==============================================================================
@login_required
def getBanners(request):
if request.method == 'GET':
#items = UserProfiles.objects.filter(is_deleted=False)
items = Banners.objects.all()
title='view'
return render(request, "website/banners/banner.html", {'title': title, "items" : items})
@login_required
def getCreateBanners(request):
if request.method == 'GET':
title='Create Treatments'
return render(request, "website/banners/banner_create.html", {'title': title})
@login_required
def postStoreBanners(request):
if request.method == 'POST':
obj = Banners()
obj.title = request.POST['title']
obj.description = request.POST['description']
if len(request.FILES) != 0:
obj.primary_image = request.FILES['primary_image']
obj.save()
messages.success(request, 'Successfully Inserted!')
return HttpResponseRedirect('/hospital/banners')
@login_required
def getStatusBanners(request, id):
if request.method == 'GET':
obj = Banners.objects.get(pk=id)
if obj.status == 0:
obj.status = 1
obj.save()
messages.success(request, 'Status Activated!')
else:
obj.status = 0
obj.save()
messages.error(request, 'Status Deactivated!')
return HttpResponseRedirect('/hospital/banners')
@login_required
def getViewBanners(request, id):
if request.method == 'GET':
title='Show Appointment Details'
try:
item = Banners.objects.filter(id=id)[0]
except ObjectDoesNotExist:
return HttpResponse(status=404)
return render(request, "website/banners/banner_view.html", {'title': title, "item" : item})
@login_required
def getEditBanners(request, id):
if request.method == 'GET':
title='Edit Appointment Details'
try:
item = Banners.objects.filter(id=id)[0]
except ObjectDoesNotExist:
return HttpResponse(status=404)
return render(request, "website/banners/banner_edit.html", {'title': title, "item" : item})
@login_required
def postUpdateBanners(request, id):
if request.method == 'POST':
obj = Banners.objects.get(pk=id)
obj.title = request.POST['title']
obj.description = request.POST['description']
if len(request.FILES) != 0:
obj.primary_image = request.FILES['primary_image']
obj.save()
messages.success(request, 'Successfully updated!')
return HttpResponseRedirect('/hospital/banners')
@login_required
def getDeleteBanners(request, id):
if request.method == 'GET':
obj = Banners.objects.get(pk=id)
if obj.is_deleted == 0:
obj.is_deleted = 1
obj.save()
messages.success(request, 'Deleted!')
return HttpResponseRedirect('/hospital/banners')
#==================== pages ==============================================================================
@login_required
def getPages(request):
if request.method == 'GET':
#items = UserProfiles.objects.filter(is_deleted=False)
items = Pages.objects.all()
title='view'
return render(request, "website/pages/pages.html", {'title': title, "items" : items})
@login_required
def getCreatePages(request):
if request.method == 'GET':
title='Create Treatments'
return render(request, "website/pages/page_create.html", {'title': title})
@login_required
def postStorePages(request):
if request.method == 'POST':
obj = Pages()
obj.name = request.POST['name']
obj.title = request.POST['title']
obj.description = request.POST['description']
obj.long_description = request.POST['long_description']
if len(request.FILES) != 0:
obj.primary_image = request.FILES['primary_image']
obj.save()
messages.success(request, 'Successfully Inserted!')
return HttpResponseRedirect('/hospital/pages')
@login_required
def getStatusPages(request, id):
if request.method == 'GET':
obj = Pages.objects.get(pk=id)
if obj.status == 0:
obj.status = 1
obj.save()
messages.success(request, 'Status Activated!')
else:
obj.status = 0
obj.save()
messages.error(request, 'Status Deactivated!')
return HttpResponseRedirect('/hospital/pages')
@login_required
def getViewPages(request, id):
if request.method == 'GET':
title='Show Appointment Details'
try:
item = Pages.objects.filter(id=id)[0]
except ObjectDoesNotExist:
return HttpResponse(status=404)
return render(request, "website/pages/page_view.html", {'title': title, "item" : item})
@login_required
def getEditPages(request, id):
if request.method == 'GET':
title='Edit Appointment Details'
try:
item = Pages.objects.filter(id=id)[0]
except ObjectDoesNotExist:
return HttpResponse(status=404)
return render(request, "website/pages/page_edit.html", {'title': title, "item" : item})
@login_required
def postUpdatePages(request, id):
if request.method == 'POST':
obj = Pages.objects.get(pk=id)
obj.name = request.POST['name']
obj.title = request.POST['title']
obj.description = request.POST['description']
obj.long_description = request.POST['long_description']
if len(request.FILES) != 0:
obj.primary_image = request.FILES['primary_image']
obj.save()
messages.success(request, 'Successfully updated!')
return HttpResponseRedirect('/hospital/pages')
@login_required
def getDeletePages(request, id):
if request.method == 'GET':
obj = Pages.objects.get(pk=id)
if obj.is_deleted == 0:
obj.is_deleted = 1
obj.save()
messages.success(request, 'Deleted!')
return HttpResponseRedirect('/hospital/pages')
#==================== Policy ==============================================================================
@login_required
def getPolicy(request):
if request.method == 'GET':
#items = UserProfiles.objects.filter(is_deleted=False)
items = Policies.objects.all()
title='view'
return render(request, "website/policy/policy.html", {'title': title, "items" : items})
@login_required
def getCreatePolicy(request):
if request.method == 'GET':
title='Create Treatments'
return render(request, "website/policy/policy_create.html", {'title': title})
@login_required
def postStorePolicy(request):
if request.method == 'POST':
obj = Policies()
obj.name = request.POST['name']
obj.title = request.POST['title']
obj.description = request.POST['description']
obj.long_description = request.POST['long_description']
if len(request.FILES) != 0:
obj.primary_image = request.FILES['primary_image']
obj.save()
messages.success(request, 'Successfully Inserted!')
return HttpResponseRedirect('/hospital/policy')
@login_required
def getStatusPolicy(request, id):
if request.method == 'GET':
obj = Policies.objects.get(pk=id)
if obj.status == 0:
obj.status = 1
obj.save()
messages.success(request, 'Status Activated!')
else:
obj.status = 0
obj.save()
messages.error(request, 'Status Deactivated!')
return HttpResponseRedirect('/hospital/policy')
@login_required
def getViewPolicy(request, id):
if request.method == 'GET':
title='Show Appointment Details'
try:
item = Policies.objects.filter(id=id)[0]
except ObjectDoesNotExist:
return HttpResponse(status=404)
return render(request, "website/policy/policy_view.html", {'title': title, "item" : item})
@login_required
def getEditPolicy(request, id):
if request.method == 'GET':
title='Edit Appointment Details'
try:
item = Policies.objects.filter(id=id)[0]
except ObjectDoesNotExist:
return HttpResponse(status=404)
return render(request, "website/policy/policy_edit.html", {'title': title, "item" : item})
@login_required
def postUpdatePolicy(request, id):
if request.method == 'POST':
obj = Policies.objects.get(pk=id)
obj.name = request.POST['name']
obj.title = request.POST['title']
obj.description = request.POST['description']
obj.long_description = request.POST['long_description']
if len(request.FILES) != 0:
obj.primary_image = request.FILES['primary_image']
obj.save()
messages.success(request, 'Successfully updated!')
return HttpResponseRedirect('/hospital/policy')
@login_required
def getDeletePolicy(request, id):
if request.method == 'GET':
obj = Policies.objects.get(pk=id)
if obj.is_deleted == 0:
obj.is_deleted = 1
obj.save()
messages.success(request, 'Deleted!')
return HttpResponseRedirect('/hospital/policy')
##########################################################################################
#def getpdfFun(self, request, *args, **kwargs):
#def getpdfFun(request, *args, **kwargs):
# data = {
# 'today': date.today(),
# 'amount': 39.99,
# 'customer_name': 'Cooper Mann',
# 'order_id': 1233434,
# }
# pdf = render_to_pdf('pdf/invoice.html', data)
# return HttpResponse(pdf, content_type='application/pdf')
def getGenerateRefKey(name, srt_key):
last_id = UserProfiles.objects.last().id
# res = Image.objects.aggregate(max_id=Max('pk'))
# last_id = res.get('max_id')
# last_id = 20
if last_id<9:
result = srt_key+'000'+str(last_id+1)
elif last_id<99 and last_id>=9:
result = srt_key+'00'+str(last_id+1)
elif last_id<999 and last_id>=99:
result = srt_key+'0'+str(last_id+1)
elif last_id>=999:
result = srt_key+str(last_id+1)
return result
def var_dump(var, prefix=''):
"""
You know you're a php developer when the first thing you ask for
when learning a new language is 'Where's var_dump?????'
"""
my_type = '[' + var.__class__.__name__ + '(' + str(len(var)) + ')]:'
print(prefix, my_type, sep='')
prefix += ' '
for i in var:
if type(i) in (list, tuple, dict, set):
var_dump(i, prefix)
else:
if isinstance(var, dict):
print(prefix, i, ': (', var[i].__class__.__name__, ') ', var[i], sep='')
else:
print(prefix, '(', i.__class__.__name__, ') ', i, sep='')
#====== end Test ===============================================================
|
#!/bin/python3
import math
import os
import random
import re
import sys
import bisect
def median(x):
lenx = len(x)
if lenx % 2 == 0:
return((x[lenx//2] + x[lenx//2 - 1])/2)
if lenx % 2 != 0:
return(x[lenx//2])
# Complete the activityNotifications function below.
def activityNotifications(expenditure, d):
count = 0
main = sorted(expenditure[:d])
med = median(main)
first = expenditure[0]
for i in range(d, len(expenditure)+1):
if expenditure[i] >= 2 * med:
count += 1
print(first, main, expenditure[i], med)
main.remove(first)
bisect.insort(main, expenditure[i])
med = median(main)
first = expenditure[i-d+1]
return count
if __name__ == '__main__':
#fptr = open(os.environ['OUTPUT_PATH'], 'w')
nd = input().split()
n = int(nd[0])
d = int(nd[1])
expenditure = list(map(int, input().rstrip().split()))
result = activityNotifications(expenditure, d)
#fptr.write(str(result) + '\n')
# fptr.close()
print(result)
|
"""This is an experimental implementation of cc_shared_library.
We may change the implementation at any moment or even delete this file. Do not
rely on this. It requires bazel >1.2 and passing the flag
--experimental_cc_shared_library
"""
# TODO(rostam): Delete this module after the release of Bazel built-in cc_shared_library.
load("@bazel_skylib//rules:common_settings.bzl", "BuildSettingInfo")
load(
"//third_party/bazel_rules/rules_cc/examples:experimental_cc_shared_library.bzl",
"CcSharedLibraryInfo",
"CcSharedLibraryPermissionsInfo",
)
def find_cc_toolchain(ctx):
"""Returns the current `CcToolchainInfo`.
Args:
ctx: The rule context for which to find a toolchain.
Returns:
A CcToolchainInfo.
"""
# Check the incompatible flag for toolchain resolution.
if hasattr(cc_common, "is_cc_toolchain_resolution_enabled_do_not_use") and cc_common.is_cc_toolchain_resolution_enabled_do_not_use(ctx = ctx):
if not "@bazel_tools//tools/cpp:toolchain_type" in ctx.toolchains: # copybara-use-repo-external-label
fail("In order to use find_cc_toolchain, your rule has to depend on C++ toolchain. See find_cc_toolchain.bzl docs for details.")
toolchain_info = ctx.toolchains["@bazel_tools//tools/cpp:toolchain_type"] # copybara-use-repo-external-label
if hasattr(toolchain_info, "cc_provider_in_toolchain") and hasattr(toolchain_info, "cc"):
return toolchain_info.cc
return toolchain_info
# Fall back to the legacy implicit attribute lookup.
if hasattr(ctx.attr, "_cc_toolchain"):
return ctx.attr._cc_toolchain[cc_common.CcToolchainInfo]
# We didn't find anything.
fail("In order to use find_cc_toolchain, your rule has to depend on C++ toolchain. See find_cc_toolchain.bzl docs for details.")
# TODO(#5200): Add export_define to library_to_link and cc_library
# Add this as a tag to any target that can be linked by more than one
# cc_shared_library because it doesn't have static initializers or anything
# else that may cause issues when being linked more than once. This should be
# used sparingly after making sure it's safe to use.
LINKABLE_MORE_THAN_ONCE = "LINKABLE_MORE_THAN_ONCE"
GraphNodeInfo = provider(
"Nodes in the graph of shared libraries.",
fields = {
"children": "Other GraphNodeInfo from dependencies of this target",
"label": "Label of the target visited",
"linkable_more_than_once": "Linkable into more than a single cc_shared_library",
},
)
def _separate_static_and_dynamic_link_libraries(
direct_children,
can_be_linked_dynamically,
preloaded_deps_direct_labels):
node = None
all_children = list(direct_children)
link_statically_labels = {}
link_dynamically_labels = {}
seen_labels = {}
# Horrible I know. Perhaps Starlark team gives me a way to prune a tree.
for i in range(2147483647):
if i == len(all_children):
break
node = all_children[i]
node_label = str(node.label)
if node_label in seen_labels:
continue
seen_labels[node_label] = True
if node_label in can_be_linked_dynamically:
link_dynamically_labels[node_label] = True
elif node_label not in preloaded_deps_direct_labels:
link_statically_labels[node_label] = node.linkable_more_than_once
all_children.extend(node.children)
return (link_statically_labels, link_dynamically_labels)
def _create_linker_context(ctx, linker_inputs):
return cc_common.create_linking_context(
linker_inputs = depset(linker_inputs, order = "topological"),
)
def _merge_cc_shared_library_infos(ctx):
dynamic_deps = []
transitive_dynamic_deps = []
for dep in ctx.attr.dynamic_deps:
if dep[CcSharedLibraryInfo].preloaded_deps != None:
fail("{} can only be a direct dependency of a " +
" cc_binary because it has " +
"preloaded_deps".format(str(dep.label)))
dynamic_dep_entry = (
dep[CcSharedLibraryInfo].exports,
dep[CcSharedLibraryInfo].linker_input,
dep[CcSharedLibraryInfo].link_once_static_libs,
)
dynamic_deps.append(dynamic_dep_entry)
transitive_dynamic_deps.append(dep[CcSharedLibraryInfo].dynamic_deps)
return depset(direct = dynamic_deps, transitive = transitive_dynamic_deps)
def _build_exports_map_from_only_dynamic_deps(merged_shared_library_infos):
exports_map = {}
for entry in merged_shared_library_infos.to_list():
exports = entry[0]
linker_input = entry[1]
for export in exports:
if export in exports_map:
fail("Two shared libraries in dependencies export the same symbols. Both " +
exports_map[export].libraries[0].dynamic_library.short_path +
" and " + linker_input.libraries[0].dynamic_library.short_path +
" export " + export)
exports_map[export] = linker_input
return exports_map
def _build_link_once_static_libs_map(merged_shared_library_infos):
link_once_static_libs_map = {}
for entry in merged_shared_library_infos.to_list():
link_once_static_libs = entry[2]
linker_input = entry[1]
for static_lib in link_once_static_libs:
if static_lib in link_once_static_libs_map:
fail("Two shared libraries in dependencies link the same " +
" library statically. Both " + link_once_static_libs_map[static_lib] +
" and " + str(linker_input.owner) +
" link statically" + static_lib)
link_once_static_libs_map[static_lib] = str(linker_input.owner)
return link_once_static_libs_map
def _wrap_static_library_with_alwayslink(ctx, feature_configuration, cc_toolchain, linker_input):
new_libraries_to_link = []
for old_library_to_link in linker_input.libraries:
# TODO(#5200): This will lose the object files from a library to link.
# Not too bad for the prototype but as soon as the library_to_link
# constructor has object parameters this should be changed.
if old_library_to_link.static_library == None and old_library_to_link.pic_static_library == None:
new_libraries_to_link.append(old_library_to_link)
continue
new_library_to_link = cc_common.create_library_to_link(
actions = ctx.actions,
feature_configuration = feature_configuration,
cc_toolchain = cc_toolchain,
static_library = old_library_to_link.static_library,
pic_static_library = old_library_to_link.pic_static_library,
alwayslink = True,
)
new_libraries_to_link.append(new_library_to_link)
return cc_common.create_linker_input(
owner = linker_input.owner,
libraries = depset(direct = new_libraries_to_link),
user_link_flags = depset(direct = linker_input.user_link_flags),
additional_inputs = depset(direct = linker_input.additional_inputs),
)
def _check_if_target_under_path(value, pattern):
if pattern.workspace_name != value.workspace_name:
return False
if pattern.name == "__pkg__":
return pattern.package == value.package
if pattern.name == "__subpackages__":
return _same_package_or_above(pattern, value)
return pattern.package == value.package and pattern.name == value.name
def _check_if_target_can_be_exported(target, current_label, permissions):
if permissions == None:
return True
if (target.workspace_name != current_label.workspace_name or
_same_package_or_above(current_label, target)):
return True
matched_by_target = False
for permission in permissions:
for permission_target in permission[CcSharedLibraryPermissionsInfo].targets:
if _check_if_target_under_path(target, permission_target):
return True
return False
def _check_if_target_should_be_exported_without_filter(target, current_label, permissions):
return _check_if_target_should_be_exported_with_filter(target, current_label, None, permissions)
def _check_if_target_should_be_exported_with_filter(target, current_label, exports_filter, permissions):
should_be_exported = False
if exports_filter == None:
should_be_exported = True
else:
for export_filter in exports_filter:
export_filter_label = current_label.relative(export_filter)
if _check_if_target_under_path(target, export_filter_label):
should_be_exported = True
break
if should_be_exported:
if _check_if_target_can_be_exported(target, current_label, permissions):
return True
else:
matched_by_filter_text = ""
if exports_filter:
matched_by_filter_text = " (matched by filter) "
fail(str(target) + matched_by_filter_text +
" cannot be exported from " + str(current_label) +
" because it's not in the same package/subpackage and the library " +
"doesn't have the necessary permissions. Use cc_shared_library_permissions.")
return False
def _filter_inputs(
ctx,
feature_configuration,
cc_toolchain,
transitive_exports,
preloaded_deps_direct_labels,
link_once_static_libs_map):
linker_inputs = []
link_once_static_libs = []
graph_structure_aspect_nodes = []
dependency_linker_inputs = []
direct_exports = {}
for export in ctx.attr.roots:
direct_exports[str(export.label)] = True
dependency_linker_inputs.extend(export[CcInfo].linking_context.linker_inputs.to_list())
graph_structure_aspect_nodes.append(export[GraphNodeInfo])
can_be_linked_dynamically = {}
for linker_input in dependency_linker_inputs:
owner = str(linker_input.owner)
if owner in transitive_exports:
can_be_linked_dynamically[owner] = True
(link_statically_labels, link_dynamically_labels) = _separate_static_and_dynamic_link_libraries(
graph_structure_aspect_nodes,
can_be_linked_dynamically,
preloaded_deps_direct_labels,
)
exports = {}
owners_seen = {}
for linker_input in dependency_linker_inputs:
owner = str(linker_input.owner)
if owner in owners_seen:
continue
owners_seen[owner] = True
if owner in link_dynamically_labels:
dynamic_linker_input = transitive_exports[owner]
linker_inputs.append(dynamic_linker_input)
elif owner in link_statically_labels:
if owner in link_once_static_libs_map:
fail(owner + " is already linked statically in " +
link_once_static_libs_map[owner] + " but not exported")
if owner in direct_exports:
wrapped_library = _wrap_static_library_with_alwayslink(
ctx,
feature_configuration,
cc_toolchain,
linker_input,
)
if not link_statically_labels[owner]:
link_once_static_libs.append(owner)
linker_inputs.append(wrapped_library)
else:
can_be_linked_statically = False
for static_dep_path in ctx.attr.static_deps:
static_dep_path_label = ctx.label.relative(static_dep_path)
if _check_if_target_under_path(linker_input.owner, static_dep_path_label):
can_be_linked_statically = True
break
if _check_if_target_should_be_exported_with_filter(
linker_input.owner,
ctx.label,
ctx.attr.exports_filter,
_get_permissions(ctx),
):
exports[owner] = True
can_be_linked_statically = True
if can_be_linked_statically:
if not link_statically_labels[owner]:
link_once_static_libs.append(owner)
linker_inputs.append(linker_input)
else:
fail("We can't link " +
str(owner) + " either statically or dynamically")
return (exports, linker_inputs, link_once_static_libs)
def _same_package_or_above(label_a, label_b):
if label_a.workspace_name != label_b.workspace_name:
return False
package_a_tokenized = label_a.package.split("/")
package_b_tokenized = label_b.package.split("/")
if len(package_b_tokenized) < len(package_a_tokenized):
return False
if package_a_tokenized[0] != "":
for i in range(len(package_a_tokenized)):
if package_a_tokenized[i] != package_b_tokenized[i]:
return False
return True
def _get_permissions(ctx):
if ctx.attr._enable_permissions_check[BuildSettingInfo].value:
return ctx.attr.permissions
return None
def _cc_shared_library_impl(ctx):
cc_common.check_experimental_cc_shared_library()
cc_toolchain = find_cc_toolchain(ctx)
feature_configuration = cc_common.configure_features(
ctx = ctx,
cc_toolchain = cc_toolchain,
requested_features = ctx.features,
unsupported_features = ctx.disabled_features,
)
merged_cc_shared_library_info = _merge_cc_shared_library_infos(ctx)
exports_map = _build_exports_map_from_only_dynamic_deps(merged_cc_shared_library_info)
for export in ctx.attr.roots:
if str(export.label) in exports_map:
fail("Trying to export a library already exported by a different shared library: " +
str(export.label))
_check_if_target_should_be_exported_without_filter(export.label, ctx.label, _get_permissions(ctx))
preloaded_deps_direct_labels = {}
preloaded_dep_merged_cc_info = None
if len(ctx.attr.preloaded_deps) != 0:
preloaded_deps_cc_infos = []
for preloaded_dep in ctx.attr.preloaded_deps:
preloaded_deps_direct_labels[str(preloaded_dep.label)] = True
preloaded_deps_cc_infos.append(preloaded_dep[CcInfo])
preloaded_dep_merged_cc_info = cc_common.merge_cc_infos(cc_infos = preloaded_deps_cc_infos)
link_once_static_libs_map = _build_link_once_static_libs_map(merged_cc_shared_library_info)
(exports, linker_inputs, link_once_static_libs) = _filter_inputs(
ctx,
feature_configuration,
cc_toolchain,
exports_map,
preloaded_deps_direct_labels,
link_once_static_libs_map,
)
linking_context = _create_linker_context(ctx, linker_inputs)
user_link_flags = []
for user_link_flag in ctx.attr.user_link_flags:
user_link_flags.append(ctx.expand_location(user_link_flag, targets = ctx.attr.additional_linker_inputs))
linking_outputs = cc_common.link(
actions = ctx.actions,
feature_configuration = feature_configuration,
cc_toolchain = cc_toolchain,
linking_contexts = [linking_context],
user_link_flags = user_link_flags,
additional_inputs = ctx.files.additional_linker_inputs,
name = ctx.label.name,
output_type = "dynamic_library",
)
runfiles = ctx.runfiles(
files = [linking_outputs.library_to_link.resolved_symlink_dynamic_library],
collect_data = True,
)
for dep in ctx.attr.dynamic_deps:
runfiles = runfiles.merge(dep[DefaultInfo].data_runfiles)
for export in ctx.attr.roots:
exports[str(export.label)] = True
debug_files = []
if ctx.attr._experimental_debug[BuildSettingInfo].value:
exports_debug_file = ctx.actions.declare_file(ctx.label.name + "_exports.txt")
ctx.actions.write(content = "\n".join(exports.keys()), output = exports_debug_file)
link_once_static_libs_debug_file = ctx.actions.declare_file(ctx.label.name + "_link_once_static_libs.txt")
ctx.actions.write(content = "\n".join(link_once_static_libs), output = link_once_static_libs_debug_file)
debug_files.append(exports_debug_file)
debug_files.append(link_once_static_libs_debug_file)
if not ctx.attr._incompatible_link_once[BuildSettingInfo].value:
link_once_static_libs = []
return [
DefaultInfo(
files = depset([linking_outputs.library_to_link.resolved_symlink_dynamic_library] + debug_files),
runfiles = runfiles,
),
CcSharedLibraryInfo(
dynamic_deps = merged_cc_shared_library_info,
exports = exports.keys(),
link_once_static_libs = link_once_static_libs,
linker_input = cc_common.create_linker_input(
owner = ctx.label,
libraries = depset([linking_outputs.library_to_link]),
),
preloaded_deps = preloaded_dep_merged_cc_info,
),
]
def _graph_structure_aspect_impl(target, ctx):
children = []
if hasattr(ctx.rule.attr, "deps"):
for dep in ctx.rule.attr.deps:
if GraphNodeInfo in dep:
children.append(dep[GraphNodeInfo])
# TODO(bazel-team): Add flag to Bazel that can toggle the initialization of
# linkable_more_than_once.
linkable_more_than_once = False
if hasattr(ctx.rule.attr, "tags"):
for tag in ctx.rule.attr.tags:
if tag == LINKABLE_MORE_THAN_ONCE:
linkable_more_than_once = True
return [GraphNodeInfo(
label = ctx.label,
children = children,
linkable_more_than_once = linkable_more_than_once,
)]
def _cc_shared_library_permissions_impl(ctx):
targets = []
for target_filter in ctx.attr.targets:
target_filter_label = ctx.label.relative(target_filter)
if not _check_if_target_under_path(target_filter_label, ctx.label.relative(":__subpackages__")):
fail("A cc_shared_library_permissions rule can only list " +
"targets that are in the same package or a sub-package")
targets.append(target_filter_label)
return [CcSharedLibraryPermissionsInfo(
targets = targets,
)]
graph_structure_aspect = aspect(
attr_aspects = ["*"],
implementation = _graph_structure_aspect_impl,
)
cc_shared_library_permissions = rule(
implementation = _cc_shared_library_permissions_impl,
attrs = {
"targets": attr.string_list(),
},
)
cc_shared_library = rule(
implementation = _cc_shared_library_impl,
attrs = {
"additional_linker_inputs": attr.label_list(allow_files = True),
"dynamic_deps": attr.label_list(providers = [CcSharedLibraryInfo]),
"exports_filter": attr.string_list(),
"permissions": attr.label_list(providers = [CcSharedLibraryPermissionsInfo]),
"preloaded_deps": attr.label_list(providers = [CcInfo]),
"roots": attr.label_list(providers = [CcInfo], aspects = [graph_structure_aspect]),
"static_deps": attr.string_list(),
"user_link_flags": attr.string_list(),
"data": attr.label_list(allow_files = True),
"_cc_toolchain": attr.label(default = "@bazel_tools//tools/cpp:current_cc_toolchain"),
"_enable_permissions_check": attr.label(default = "//tensorflow/core/platform/default:enable_permissions_check"),
"_experimental_debug": attr.label(default = "//tensorflow/core/platform/default:experimental_debug"),
"_incompatible_link_once": attr.label(default = "//tensorflow/core/platform/default:incompatible_link_once"),
},
toolchains = ["@bazel_tools//tools/cpp:toolchain_type"], # copybara-use-repo-external-label
fragments = ["cpp"],
incompatible_use_toolchain_transition = True,
)
|
# Copyright (c) OpenMMLab. All rights reserved.
import mmcv
import torch
import torch.nn as nn
from mmcv.cnn import (build_conv_layer, build_norm_layer, constant_init,
normal_init)
from mmcv.utils import digit_version
from torch.nn.modules.batchnorm import _BatchNorm
from mmpose.models.utils.ops import resize
from ..backbones.resnet import BasicBlock, Bottleneck
from ..builder import NECKS
try:
from mmcv.ops import DeformConv2d
has_mmcv_full = True
except (ImportError, ModuleNotFoundError):
has_mmcv_full = False
@NECKS.register_module()
class PoseWarperNeck(nn.Module):
"""PoseWarper neck.
`"Learning temporal pose estimation from sparsely-labeled videos"
<https://arxiv.org/abs/1906.04016>`_.
Args:
in_channels (int): Number of input channels from backbone
out_channels (int): Number of output channels
inner_channels (int): Number of intermediate channels of the res block
deform_groups (int): Number of groups in the deformable conv
dilations (list|tuple): different dilations of the offset conv layers
trans_conv_kernel (int): the kernel of the trans conv layer, which is
used to get heatmap from the output of backbone. Default: 1
res_blocks_cfg (dict|None): config of residual blocks. If None,
use the default values. If not None, it should contain the
following keys:
- block (str): the type of residual block, Default: 'BASIC'.
- num_blocks (int): the number of blocks, Default: 20.
offsets_kernel (int): the kernel of offset conv layer.
deform_conv_kernel (int): the kernel of defomrable conv layer.
in_index (int|Sequence[int]): Input feature index. Default: 0
input_transform (str|None): Transformation type of input features.
Options: 'resize_concat', 'multiple_select', None.
Default: None.
- 'resize_concat': Multiple feature maps will be resize to \
the same size as first one and than concat together. \
Usually used in FCN head of HRNet.
- 'multiple_select': Multiple feature maps will be bundle into \
a list and passed into decode head.
- None: Only one select feature map is allowed.
freeze_trans_layer (bool): Whether to freeze the transition layer
(stop grad and set eval mode). Default: True.
norm_eval (bool): Whether to set norm layers to eval mode, namely,
freeze running stats (mean and var). Note: Effect on Batch Norm
and its variants only. Default: False.
im2col_step (int): the argument `im2col_step` in deformable conv,
Default: 80.
"""
blocks_dict = {'BASIC': BasicBlock, 'BOTTLENECK': Bottleneck}
minimum_mmcv_version = '1.3.17'
def __init__(self,
in_channels,
out_channels,
inner_channels,
deform_groups=17,
dilations=(3, 6, 12, 18, 24),
trans_conv_kernel=1,
res_blocks_cfg=None,
offsets_kernel=3,
deform_conv_kernel=3,
in_index=0,
input_transform=None,
freeze_trans_layer=True,
norm_eval=False,
im2col_step=80):
super().__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.inner_channels = inner_channels
self.deform_groups = deform_groups
self.dilations = dilations
self.trans_conv_kernel = trans_conv_kernel
self.res_blocks_cfg = res_blocks_cfg
self.offsets_kernel = offsets_kernel
self.deform_conv_kernel = deform_conv_kernel
self.in_index = in_index
self.input_transform = input_transform
self.freeze_trans_layer = freeze_trans_layer
self.norm_eval = norm_eval
self.im2col_step = im2col_step
identity_trans_layer = False
assert trans_conv_kernel in [0, 1, 3]
kernel_size = trans_conv_kernel
if kernel_size == 3:
padding = 1
elif kernel_size == 1:
padding = 0
else:
# 0 for Identity mapping.
identity_trans_layer = True
if identity_trans_layer:
self.trans_layer = nn.Identity()
else:
self.trans_layer = build_conv_layer(
cfg=dict(type='Conv2d'),
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
stride=1,
padding=padding)
# build chain of residual blocks
if res_blocks_cfg is not None and not isinstance(res_blocks_cfg, dict):
raise TypeError('res_blocks_cfg should be dict or None.')
if res_blocks_cfg is None:
block_type = 'BASIC'
num_blocks = 20
else:
block_type = res_blocks_cfg.get('block', 'BASIC')
num_blocks = res_blocks_cfg.get('num_blocks', 20)
block = self.blocks_dict[block_type]
res_layers = []
downsample = nn.Sequential(
build_conv_layer(
cfg=dict(type='Conv2d'),
in_channels=out_channels,
out_channels=inner_channels,
kernel_size=1,
stride=1,
bias=False),
build_norm_layer(dict(type='BN'), inner_channels)[1])
res_layers.append(
block(
in_channels=out_channels,
out_channels=inner_channels,
downsample=downsample))
for _ in range(1, num_blocks):
res_layers.append(block(inner_channels, inner_channels))
self.offset_feats = nn.Sequential(*res_layers)
# build offset layers
self.num_offset_layers = len(dilations)
assert self.num_offset_layers > 0, 'Number of offset layers ' \
'should be larger than 0.'
target_offset_channels = 2 * offsets_kernel**2 * deform_groups
offset_layers = [
build_conv_layer(
cfg=dict(type='Conv2d'),
in_channels=inner_channels,
out_channels=target_offset_channels,
kernel_size=offsets_kernel,
stride=1,
dilation=dilations[i],
padding=dilations[i],
bias=False,
) for i in range(self.num_offset_layers)
]
self.offset_layers = nn.ModuleList(offset_layers)
# build deformable conv layers
assert digit_version(mmcv.__version__) >= \
digit_version(self.minimum_mmcv_version), \
f'Current MMCV version: {mmcv.__version__}, ' \
f'but MMCV >= {self.minimum_mmcv_version} is required, see ' \
f'https://github.com/open-mmlab/mmcv/issues/1440, ' \
f'Please install the latest MMCV.'
if has_mmcv_full:
deform_conv_layers = [
DeformConv2d(
in_channels=out_channels,
out_channels=out_channels,
kernel_size=deform_conv_kernel,
stride=1,
padding=int(deform_conv_kernel / 2) * dilations[i],
dilation=dilations[i],
deform_groups=deform_groups,
im2col_step=self.im2col_step,
) for i in range(self.num_offset_layers)
]
else:
raise ImportError('Please install the full version of mmcv '
'to use `DeformConv2d`.')
self.deform_conv_layers = nn.ModuleList(deform_conv_layers)
self.freeze_layers()
def freeze_layers(self):
if self.freeze_trans_layer:
self.trans_layer.eval()
for param in self.trans_layer.parameters():
param.requires_grad = False
def init_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
normal_init(m, std=0.001)
elif isinstance(m, (_BatchNorm, nn.GroupNorm)):
constant_init(m, 1)
elif isinstance(m, DeformConv2d):
filler = torch.zeros([
m.weight.size(0),
m.weight.size(1),
m.weight.size(2),
m.weight.size(3)
],
dtype=torch.float32,
device=m.weight.device)
for k in range(m.weight.size(0)):
filler[k, k,
int(m.weight.size(2) / 2),
int(m.weight.size(3) / 2)] = 1.0
m.weight = torch.nn.Parameter(filler)
m.weight.requires_grad = True
# posewarper offset layer weight initialization
for m in self.offset_layers.modules():
constant_init(m, 0)
def _transform_inputs(self, inputs):
"""Transform inputs for decoder.
Args:
inputs (list[Tensor] | Tensor): multi-level img features.
Returns:
Tensor: The transformed inputs
"""
if not isinstance(inputs, list):
return inputs
if self.input_transform == 'resize_concat':
inputs = [inputs[i] for i in self.in_index]
upsampled_inputs = [
resize(
input=x,
size=inputs[0].shape[2:],
mode='bilinear',
align_corners=self.align_corners) for x in inputs
]
inputs = torch.cat(upsampled_inputs, dim=1)
elif self.input_transform == 'multiple_select':
inputs = [inputs[i] for i in self.in_index]
else:
inputs = inputs[self.in_index]
return inputs
def forward(self, inputs, frame_weight):
assert isinstance(inputs, (list, tuple)), 'PoseWarperNeck inputs ' \
'should be list or tuple, even though the length is 1, ' \
'for unified processing.'
output_heatmap = 0
if len(inputs) > 1:
inputs = [self._transform_inputs(input) for input in inputs]
inputs = [self.trans_layer(input) for input in inputs]
# calculate difference features
diff_features = [
self.offset_feats(inputs[0] - input) for input in inputs
]
for i in range(len(inputs)):
if frame_weight[i] == 0:
continue
warped_heatmap = 0
for j in range(self.num_offset_layers):
offset = (self.offset_layers[j](diff_features[i]))
warped_heatmap_tmp = self.deform_conv_layers[j](inputs[i],
offset)
warped_heatmap += warped_heatmap_tmp / \
self.num_offset_layers
output_heatmap += warped_heatmap * frame_weight[i]
else:
inputs = inputs[0]
inputs = self._transform_inputs(inputs)
inputs = self.trans_layer(inputs)
num_frames = len(frame_weight)
batch_size = inputs.size(0) // num_frames
ref_x = inputs[:batch_size]
ref_x_tiled = ref_x.repeat(num_frames, 1, 1, 1)
offset_features = self.offset_feats(ref_x_tiled - inputs)
warped_heatmap = 0
for j in range(self.num_offset_layers):
offset = self.offset_layers[j](offset_features)
warped_heatmap_tmp = self.deform_conv_layers[j](inputs, offset)
warped_heatmap += warped_heatmap_tmp / self.num_offset_layers
for i in range(num_frames):
if frame_weight[i] == 0:
continue
output_heatmap += warped_heatmap[i * batch_size:(i + 1) *
batch_size] * frame_weight[i]
return output_heatmap
def train(self, mode=True):
"""Convert the model into training mode."""
super().train(mode)
self.freeze_layers()
if mode and self.norm_eval:
for m in self.modules():
if isinstance(m, _BatchNorm):
m.eval()
|
# Copyright 2020 Daniel J. Tait
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
""" Tests for fem module """
from absl.testing import absltest
import tenfem
import tensorflow as tf
import numpy as np
tfk = tf.keras
tfkl = tf.keras.layers
element = tenfem.reference_elements.TriangleElement(degree=1)
class AssembleLocalStiffnessMatrixTest(absltest.TestCase):
def test_assemble_local_stiffness_matrix(self):
mesh = tenfem.mesh.examples.square(2, 2)
element_dim = tf.shape(mesh.elements)[-1]
batch_shape = [3, 1, 4]
diff_coeff = tf.ones(batch_shape + [mesh.n_elements, element_dim])
local_stiffness_mat = tenfem.fem.assemble_local_stiffness_matrix(
diff_coeff, mesh, element)
self.assertEqual(batch_shape, tf.shape(local_stiffness_mat).numpy()[:3].tolist())
local_stiffness_mat = tf.reshape(local_stiffness_mat, [-1, mesh.n_elements, element_dim, element_dim])
batch_size = tf.shape(local_stiffness_mat)[0]
elements = tf.tile(mesh.elements[tf.newaxis, ...], [batch_size, 1, 1])
global_stiffness_mat = tenfem.fem.scatter_matrix_to_global(
local_stiffness_mat, elements, mesh.n_nodes)
def test_interval_stiffness_matrix(self):
n_nodes = 5
nodes = np.linspace(-0.5, 1.3, n_nodes)[..., np.newaxis]
elements = np.column_stack((np.arange(0, n_nodes-1), np.arange(1, n_nodes)))
boundary_elements = [[0, ], [n_nodes-1, ]]
mesh = tenfem.mesh.IntervalMesh(nodes, elements, boundary_elements)
def assemble_interval_mesh_stiffness():
element_nodes = tf.gather(mesh.nodes, mesh.elements).numpy()
h = element_nodes[..., 1, 0] - element_nodes[..., 0, 0] # width of elements
off_diag = - 1 / h
main_diag = np.zeros(mesh.n_nodes)
main_diag[:-1] += 1 / h
main_diag[1:] += 1 / h
return np.diag(main_diag) + np.diag(off_diag, k=-1) + np.diag(off_diag, k=1)
element = tenfem.reference_elements.IntervalElement(degree=1, dtype=mesh.dtype)
diff_coeff = tf.ones([mesh.n_elements, 2], dtype=element.dtype)
local_stiffness_mat = tenfem.fem.assemble_local_stiffness_matrix(diff_coeff, mesh, element)
global_stiffness_mat = tenfem.fem.scatter_matrix_to_global(
local_stiffness_mat[tf.newaxis, ...],
mesh.elements[None, ...],
mesh.n_nodes)
np.testing.assert_allclose(assemble_interval_mesh_stiffness(),
global_stiffness_mat[0])
class AssembleLocalLoadVectorTest(absltest.TestCase):
def test_assemble_local_load_vector(self):
mesh = tenfem.mesh.examples.square(2, 2)
element_dim = tf.shape(mesh.elements)[-1]
batch_shape = [3, 1, 4]
source = tf.ones(batch_shape + [mesh.n_elements, element_dim])
local_load_vector = tenfem.fem.assemble_local_load_vector(
source, mesh, element)
self.assertEqual(batch_shape, tf.shape(local_load_vector).numpy()[:3].tolist())
local_load_vector = tf.reshape(local_load_vector, [-1, mesh.n_elements, element_dim])
batch_size = tf.shape(local_load_vector)[0]
elements = tf.tile(mesh.elements[tf.newaxis, ...], [batch_size, 1, 1])
global_load_vector = tenfem.fem.scatter_vector_to_global(
local_load_vector, elements, mesh.n_nodes)
self.assertEqual(tf.shape(global_load_vector).numpy().tolist(),
[batch_size.numpy(), mesh.n_nodes, 1])
class SolveDirichletTest(absltest.TestCase):
def test_solve_dirichlet(self):
mesh = tenfem.mesh.examples.square(4, 4)
element_dim = tf.shape(mesh.elements)[-1]
diff_coeff = tf.ones([mesh.n_elements, element_dim])
source = tf.ones([mesh.n_elements, element_dim])
local_stiffness_mat = tenfem.fem.assemble_local_stiffness_matrix(
diff_coeff, mesh, element)
local_load_vec = tenfem.fem.assemble_local_load_vector(
source, mesh, element)
stiffness_mat = tenfem.fem.scatter_matrix_to_global(
local_stiffness_mat[tf.newaxis, ...], mesh.elements[tf.newaxis, ...], mesh.n_nodes)
load_vec = tenfem.fem.scatter_vector_to_global(
local_load_vec[tf.newaxis, ...], mesh.elements[tf.newaxis, ...], mesh.n_nodes)
node_types = tf.scatter_nd(mesh.boundary_node_indices[:, tf.newaxis],
tf.ones_like(mesh.boundary_node_indices),
shape=[mesh.n_nodes])
bnd_vals = tf.cast(
np.ones_like(mesh.boundary_node_indices), tf.float32)[:, tf.newaxis]
u = tenfem.fem.solve_dirichlet_form_linear_system(
stiffness_mat[0], load_vec[0], node_types, bnd_vals)
u_bnd = tf.gather(u, mesh.boundary_node_indices)
np.testing.assert_allclose(u_bnd, bnd_vals)
class AssembleConvectionMatrixTest(absltest.TestCase):
def test_assemble_local_convection_matrix(self):
mesh = tenfem.mesh.examples.square(5, 5)
mesh = tenfem.mesh.triangle.convert_linear_to_quadratic(mesh)
tri_element = tenfem.reference_elements.TriangleElement(degree=2)
batch_shape = [3, 1, 4]
tvf = tf.ones(batch_shape + [mesh.n_elements, tri_element.element_dim, 2])
local_convec_mat = tenfem.fem.assemble_local_convection_matrix(
tvf, mesh, tri_element)
self.assertEqual(batch_shape,
tf.shape(local_convec_mat).numpy()[:3].tolist())
class LinearEllipticOperatorTest(absltest.TestCase):
def test_linear_elliptic_opeartor(self):
mesh = tenfem.mesh.examples.square(4, 4)
diffusion_coefficient = tfkl.Dense(1, activation='softplus')
transport_vector_field = tfkl.Dense(2)
source = tfkl.Dense(1)
def build_op_to_fail():
return tenfem.layers.LinearEllipticOperator(
diffusion_coefficient,
source,
transport_vector_field=transport_vector_field,
reference_element=element)
self.assertRaises(ValueError, build_op_to_fail)
if __name__ == '__main__':
absltest.main()
|
#!/usr/bin/python
# Copyright (c) 2016 AIT, ETH Zurich. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# 3. Neither the name AIT nor the names of its contributors may be
# used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
# OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
# AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# File: merge_IMU_calibration.py
# Created on: 09.03.16
# Author: Nicolas de Palezieux
from __future__ import print_function, division
import yaml
import sys
import rospkg
import os
__author__ = 'nicolas'
if __name__ == "__main__":
# select device
if len(sys.argv) < 2:
print('No device serial nr provided at the command line.')
rospack = rospkg.RosPack()
duo_path = rospack.get_path('duo3d_ros')
devices = os.listdir(os.path.join(duo_path, 'calib'))
devices = [device for device in devices if os.path.isdir(os.path.join(duo_path, 'calib', device))]
if len(devices) == 1:
print('Found one device: {}. Using that one.'.format(devices[0]))
device_serial_nr = devices[0]
else:
print('Found the following devices:')
for i, device in enumerate(devices):
print('{}: {}'.format(i+1, device))
selection = int(raw_input('Select the device you want by providing the appropriate number: '))
if selection < 1 or selection > len(devices):
raise Exception('The provided number {} is not in the valid range [{}:{}]'.format(selection, 1, len(devices)))
device_serial_nr = devices[selection-1]
else:
device_serial_nr = sys.argv[1]
if not os.path.isfile(os.path.join(duo_path, 'calib', device_serial_nr, 'last_bias_estimate.yaml')):
raise Exception('There is no last_bias_estimate.yaml for the selected device')
# select lens
lenses = os.listdir(os.path.join(duo_path, 'calib', device_serial_nr))
lenses = [lens for lens in lenses if os.path.isdir(os.path.join(duo_path, 'calib', device_serial_nr, lens))]
if len(lenses) == 1:
print('Found one lens: {}. Using that one.'.format(lenses[0]))
lens = lenses[0]
else:
print('Found several lenses:')
for i, lens in enumerate(lenses):
print('{}: {}'.format(i+1, lens))
selection = int(raw_input('Select the lens you want by providing the appropriate number: '))
if selection < 1 or selection > len(lenses):
raise Exception('The provided number {} is not in the valid range [{}:{}]'.format(selection, 1, len(lenses)))
lens = lenses[selection-1]
# select resolution
resolutions = os.listdir(os.path.join(duo_path, 'calib', device_serial_nr, lens))
resolutions = [resolution for resolution in resolutions if os.path.isdir(os.path.join(duo_path, 'calib', device_serial_nr, lens, resolution))]
if len(resolutions) == 1:
print('Found one resolution: {}. Using that one.'.format(resolutions[0]))
resolution = resolutions[0]
else:
print('Found several resolutions:')
for i, resolution in enumerate(resolutions):
print('{}: {}'.format(i+1, resolution))
selection = int(raw_input('Select the resolution you want by providing the appropriate number: '))
if selection < 1 or selection > len(resolutions):
raise Exception('The provided number {} is not in the valid range [{}:{}]'.format(selection, 1, len(resolutions)))
resolution = resolutions[selection-1]
# load the yaml files
with open(os.path.join(duo_path, 'calib', device_serial_nr, 'last_bias_estimate.yaml'), 'r') as infile:
last_bias_estimate = yaml.load(infile)
with open(os.path.join(duo_path, 'calib', device_serial_nr, lens, resolution, 'cameraParams.yaml'), 'r') as infile:
cameraParams = yaml.load(infile)
print('For each axis of the accelerometer and gyroscope you can decide to use the new estimate (answer y), keep the old one (answer n) or manually provide your own (answer m).')
print('Accelerometer biases:')
print('Old \tNew')
print('{0: 6.6f}\t{1: 6.6f}'.format(cameraParams['acc_bias'][0][0], last_bias_estimate['acc_bias'][0]))
print('{0: 6.6f}\t{1: 6.6f}'.format(cameraParams['acc_bias'][1][0], last_bias_estimate['acc_bias'][1]))
print('{0: 6.6f}\t{1: 6.6f}'.format(cameraParams['acc_bias'][2][0], last_bias_estimate['acc_bias'][2]))
axes = 'XYZ'
for i in range(3):
selection = raw_input('Do you want to use the new accelerometer {} axis estimate? [Y/n/m]: '.format(axes[i]))
if not selection or selection == 'y' or selection == 'Y':
cameraParams['acc_bias'][i][0] = last_bias_estimate['acc_bias'][i]
elif selection == 'o':
cameraParams['acc_bias'][i][0] = float(raw_input('Enter a bias value for the accelerometer {} axis: '.format(axes[i])))
print('Gyroscope biases:')
print('Old \tNew')
print('{0: 6.6f}\t{1: 6.6f}'.format(cameraParams['gyro_bias'][0][0], last_bias_estimate['gyro_bias'][0]))
print('{0: 6.6f}\t{1: 6.6f}'.format(cameraParams['gyro_bias'][1][0], last_bias_estimate['gyro_bias'][1]))
print('{0: 6.6f}\t{1: 6.6f}'.format(cameraParams['gyro_bias'][2][0], last_bias_estimate['gyro_bias'][2]))
axes = 'XYZ'
for i in range(3):
selection = raw_input('Do you want to use the new gyroscope {} axis estimate? [Y/n/m]: '.format(axes[i]))
if not selection or selection == 'y' or selection == 'Y':
cameraParams['gyro_bias'][i][0] = last_bias_estimate['gyro_bias'][i]
elif selection == 'o':
cameraParams['gyro_bias'][i][0] = float(raw_input('Enter a bias value for the gyroscope {} axis: '.format(axes[i])))
with open(os.path.join(duo_path, 'calib', device_serial_nr, lens, resolution, 'cameraParams.yaml'), 'w') as outfile:
outfile.write(yaml.dump(cameraParams, default_flow_style=None))
|
from .vtk_backplot import VTKBackPlot |
'''
XlPy/Spectra/scan_parser
____________________
Quick tool to split scan-delimited file formats (such as MGF) to
allow parsing on entire scans without loading the full file
into memory.
:copyright: (c) 2015 The Regents of the University of California.
:license: GNU GPL, see licenses/GNU GPLv3.txt for more details.
'''
# load modules
import six
from xldlib.definitions import re
from xldlib.qt.objects import base
from xldlib.utils import logger
# OBJECTS
# -------
@logger.init('scans', level='DEBUG')
class ScanFinder(base.BaseObject):
'''
Iteratively parses chunks and breaks them into scans, with
a bound remainder.
'''
def __init__(self, start_sub, end_sub):
super(ScanFinder, self).__init__()
self.remainder = ''
self.start_sub = start_sub
self.end_sub = end_sub
def __call__(self, chunk):
'''
Parses a read chunk and adds to the remainder, and then
processes scan start and ends. For each scan found, separately
processes the entire scan
:
chunk -- read chunk, ie, 4096 chars, etc.
__call__('IONS\nFile....')->[{'num': 345, ...}, ...]
'''
self.remainder = ''.join([self.remainder, chunk])
end = 0
while end != -1:
start = self.get_scan_start()
end, match = self.get_scan_end()
if end != -1:
scan = self.get_scan(start, end, match)
yield scan
self.adjust_remainder(end, match)
@classmethod
def fromengine(cls, engine, start=False, end=False):
'''Compiles the start and end subs and initializes the class'''
startsub = engine.defaults.start
if start:
startsub = re.compile(startsub)
endsub = engine.defaults.end
if end:
endsub = re.compile(endsub)
return cls(startsub, endsub)
# GETTERS
def get_scan_start(self):
'''
Finds the start position of a given scan
get_scan_start()->1000
'''
if isinstance(self.start_sub, six.string_types):
return self.remainder.find(self.start_sub)
elif isinstance(self.start_sub, re._pattern_type):
match = self.start_sub.search(self.remainder)
if match is None:
return -1
else:
return match.start()
def get_scan_end(self):
'''
Finds the end position of a given scan.
get_scan_end()->1303030
'''
if isinstance(self.end_sub, six.string_types):
return self.remainder.find(self.end_sub), None
elif isinstance(self.end_sub, re._pattern_type):
match = self.end_sub.search(self.remainder)
if match is None:
return -1, match
else:
return match.start(), match
def get_scan(self, start, end, match):
'''
Gets the full scan string of the MS scan file.
start, end -- ints for start and end of the scan
match -- re match pattern or NoneType
get_scan(1000, 1303030)->"BEGIN IONS\n..."
'''
if isinstance(self.end_sub, six.string_types):
sub_end = end + len(self.end_sub)
elif isinstance(self.end_sub, re._pattern_type):
sub_end = match.end()
return self.remainder[start:sub_end]
# HELPERS
def adjust_remainder(self, end, match):
'''
Adjusts the remaining string length for new scan processing.
end -- ints for start and end of the scan
match -- re match pattern or NoneType
'''
if isinstance(self.end_sub, six.string_types):
sub_end = end + len(self.end_sub)
elif isinstance(self.end_sub, re._pattern_type):
sub_end = match.end()
self.remainder = self.remainder[sub_end:]
|
def isFS(arr):
if len(arr) == 1:
return True
for i in range(2, len(arr)):
if (arr[i] - arr[i - 1] != arr[i - 2]):
return False
return True
arr = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
arr[0] = 1
arr[1] = 1
for i in range(2, len(arr)):
arr[i] = arr[i - 1] + arr[i - 2]
arr.sort()
if (isFS(arr)):
print("Yes")
else:
print("No")
|
from battleship.grid import Grid, Outcome
from battleship.ship import Ship
def test_grid_creates_ok():
grid = Grid(10)
assert grid.lost( )
def test_grid_places_ship_ok():
grid = Grid(10)
assert grid.lost( )
ship = Ship((0,0), (0, 2))
grid.place(ship)
assert grid.ships == 3
assert not grid.lost( )
def test_grid_places_ship_overlap_not_ok():
grid = Grid(10)
assert grid.lost( )
ship = Ship((0,0), (0, 2))
grid.place(ship)
another = Ship((0,1), (2, 1))
assert not grid.place(another)
assert grid.ships == 3
assert not grid.lost( )
def test_grid_shot_miss():
grid = Grid(10)
assert grid.lost( )
ship = Ship((0,0), (0, 2))
grid.place(ship)
assert grid.ships == 3
result = grid.hit(1, 1)
assert result == Outcome.MISS
assert grid.ships == 3
assert not grid.lost( )
def test_grid_shot_hit():
grid = Grid(10)
assert grid.lost( )
ship = Ship((0,0), (0, 2))
grid.place(ship)
assert grid.ships == 3
result = grid.hit(0, 1)
assert result == Outcome.HIT
assert grid.ships == 2
assert not grid.lost( )
def test_grid_shot_invalid_same_spot():
grid = Grid(10)
assert grid.lost( )
ship = Ship((0,0), (0, 2))
grid.place(ship)
assert grid.ships == 3
result = grid.hit(1, 1)
assert result == Outcome.MISS
assert grid.ships == 3
assert not grid.lost( )
result = grid.hit(1, 1)
assert result == Outcome.INVALID
assert grid.ships == 3
assert not grid.lost( )
def test_grid_shot_and_win():
grid = Grid(10)
assert grid.lost( )
ship = Ship((0,0), (0, 1))
grid.place(ship)
assert grid.ships == 2
result = grid.hit(0, 0)
assert result == Outcome.HIT
assert grid.ships == 1
result = grid.hit(0, 1)
assert result == Outcome.HIT
assert grid.lost( )
|
from InstagramCLI import InstagramCLI
cli = InstagramCLI(username="", password="")
data= cli.get_highlights(target_username="therock",save_urls=True,save_to_device=True,story_count=2,media_type="video")
print(data)
cli.close() |
from attr import attrs, asdict
from aioalice.utils import safe_kwargs
@safe_kwargs
@attrs
class AliceObject(object):
"""AliceObject is base class for all Alice requests related objects"""
def to_json(self):
return asdict(self)
|
class Solution:
"""
@param L: Given n pieces of wood with length L[i]
@param k: An integer
@return: The maximum length of the small pieces
"""
def woodCut(self, L, k):
# write your code here
if L is None or len(L) == 0:
return 0
start = 0
end = max(L)
while(start + 1 < end):
mid = start + (end - start) / 2
if self.is_sufficent(L, mid, k):
start = mid
else:
end = mid
return end if self.is_sufficent(L, end, k) else start
def is_sufficent(self, L, length, nums):
if length == 0:
return False
ans = 0
for i in range(len(L)):
ans += L[i] // length
return ans >= nums
|
from datetime import datetime
from apiclient.discovery import build
import json
from senpaibot.orderedset import OrderedSet
class BaseReadingList:
def get_reading_list(self):
raise NotImplemented
def get_undated_reading_set(self):
raise NotImplemented
@property
def reading_list(self):
return self.get_reading_list()
@property
def undated_reading_set(self):
return self.get_undated_reading_set()
class HardcodedReadingList(BaseReadingList):
"""Hardcoded Reading List
Loads hardcoded data
"""
_reading_list = [
('Capítulo 1', datetime(2018, 5, 22)),
('Capítulo 2', datetime(2018, 6, 10)),
('Capítulo 3', datetime(2018, 6, 11)),
('Capítulo 4', datetime(2018, 6, 12)),
('Capítulo 5', datetime(2018, 7, 10)),
('Capítulo 6', datetime(2018, 7, 11)),
('Capítulo 7', datetime(2018, 7, 12)),
]
def get_reading_list(self):
return self._reading_list
def get_undated_reading_set(self):
return OrderedSet([read[0] for read in self.reading_list])
class JsonReadingList(BaseReadingList):
"""Json Reading List
Loads json strings in the format:
[
['Cap title', '23/09/18'],
['Cap title 2', '05/10/18']
]
"""
datetime_format = '%d/%m/%y'
def __init__(self, json):
self.json = json
def get_json(self):
return self.json
def load_json(self):
return json.loads(self.get_json())
def date_format_function(self):
return lambda row: (row[0], datetime.strptime(row[1], self.datetime_format))
def format_data(self, data):
return map(self.date_format_function(), data)
def get_data(self):
return self.format_data(self.load_json())
def get_reading_list(self):
return self.get_data()
def get_undated_reading_set(self):
return OrderedSet([read[0] for read in self.reading_list])
class FileReadingList(JsonReadingList):
"""File Reading List
Loads json files in the format:
[
['Cap title', '23/09/18'],
['Cap title 2', '05/10/18']
]
"""
def __init__(self, file):
self.file = file
def get_file_path(self):
return self.file
def get_file(self):
return open(self.get_file_path())
def load_json(self):
return json.load(self.get_file())
class GoogleSpreadsheetReadingList(JsonReadingList):
"""Google Spreadsheet Reading List
Loads data from a google spreadsheet in the format:
Book 1 | Chapter 2 | 14/05/18
"""
def __init__(self, sheet_id, sheet_range, api_key):
self.sheet_id = sheet_id
self.api_key = api_key
self.sheet_range = sheet_range
self.data = list(self.get_data())
def get_service(self):
return build('sheets', 'v4', developerKey=self.api_key)
def spreadsheet_format_function(self):
return lambda row: (', capítulo '.join(row[:2]), datetime.strptime(row[2], self.datetime_format))
def format_spreadsheet_data(self, data):
return map(self.spreadsheet_format_function(), data)
def get_data(self):
service = self.get_service()
spreadsheet = service.spreadsheets().values().get(spreadsheetId=self.sheet_id, range=self.sheet_range)
data = spreadsheet.execute().get('values', [])
return self.format_spreadsheet_data(data)
def get_reading_list(self):
return self.data
def get_default_list():
import os
sheet_id = os.environ.get('GOOGLE_SHEET_ID')
api_key = os.environ.get('GOOGLE_API_KEY')
sheet_range = os.environ.get('GOOGLE_SHEET_RANGE')
reading_list = GoogleSpreadsheetReadingList(sheet_id, sheet_range, api_key)
return reading_list
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.