repo_name
stringlengths 7
94
| repo_path
stringlengths 4
237
| repo_head_hexsha
stringlengths 40
40
| content
stringlengths 10
680k
| apis
stringlengths 2
840k
|
---|---|---|---|---|
bttt123/TradeSim | vnTrader/uiMainWindow.py | 2374b0925d34d8fb299095250c5c8834192848ce | # encoding: UTF-8
from builtins import str
import psutil
# import sys
# PyQt 4/5 compatibility
try:
from PyQt4.QtGui import QMainWindow, QDialog, QDockWidget, QAction, QHeaderView, QMessageBox, QLabel, QVBoxLayout
from PyQt4 import QtCore
except ImportError:
from PyQt5.QtWidgets import QMainWindow, QDialog, QDockWidget, QAction, QHeaderView, QMessageBox, QLabel, QVBoxLayout
from PyQt5 import QtCore
from uiBasicWidget import *
import uiBasicWidget as wgs
#from . import uiBasicWidget as wgs
########################################################################
class MainWindow(QMainWindow):
"""主窗口"""
signalStatusBar = QtCore.pyqtSignal(type(Event()))
# ----------------------------------------------------------------------
def __init__(self, mainEngine, eventEngine, app, sheets):
"""Constructor"""
super(MainWindow, self).__init__()
self.mainEngine = mainEngine
self.eventEngine = eventEngine
self.app = app
self.sheets = sheets
self.widgetDict = {} # 用来保存子窗口的字典
self.initUi()
self.eventEngine.register(EVENT_TITLE, self.updateTitle)
self.sid = None
def updateTitle(self, event):
(user, stratid) = event.dict_['data']
#self.setWindowTitle('VnTrader: ' + str(user) + "/" + str(stratid))
self.sid = stratid
# ----------------------------------------------------------------------
def initUi(self):
"""初始化界面"""
self.setWindowTitle('VnTrader')
self.initCentral()
self.initMenu()
# self.initStatusBar()
def showLogin(self):
self.connectQuantOS()
# ----------------------------------------------------------------------
def initCentral(self):
"""初始化中心区域"""
widgetTradingW, dockTradingW = self.createDock(wgs.TradingWidget, u'交易', QtCore.Qt.LeftDockWidgetArea)
widgetMarketM, dockMarketM = self.createDock(wgs.MarketMonitor, u'行情', QtCore.Qt.RightDockWidgetArea)
widgetPositionM, dockPositionM = self.createDock(wgs.PositionMonitor, u'持仓', QtCore.Qt.RightDockWidgetArea)
widgetAccountM, dockAccountM = self.createDock(wgs.AccountMonitor, u'资金', QtCore.Qt.BottomDockWidgetArea)
widgetContractM, dockContractM = self.createDock(wgs.ContractMonitor, u'合约', QtCore.Qt.BottomDockWidgetArea)
widgetLogM, dockLogM = self.createDock(wgs.LogMonitor, u'日志', QtCore.Qt.BottomDockWidgetArea)
widgetTradeM, dockTradeM = self.createDock(wgs.TradeMonitor, u'成交', QtCore.Qt.BottomDockWidgetArea)
widgetOrderM, dockOrderM = self.createDock(wgs.OrderMonitor, u'委托', QtCore.Qt.BottomDockWidgetArea)
self.tabifyDockWidget(dockContractM, dockTradeM)
self.tabifyDockWidget(dockTradeM, dockOrderM)
self.tabifyDockWidget(dockAccountM, dockLogM)
dockOrderM.raise_()
dockLogM.raise_()
# 连接组件之间的信号
widgetPositionM.itemDoubleClicked.connect(widgetTradingW.closePosition)
widgetMarketM.itemDoubleClicked.connect(widgetTradingW.fillSymbol)
# ----------------------------------------------------------------------
def initMenu(self):
"""初始化菜单"""
# 创建操作
connectQuantOSAction = QAction(u'连接和切换策略', self)
connectQuantOSAction.triggered.connect(self.connectQuantOS)
exitAction = QAction(u'退出', self)
exitAction.triggered.connect(self.close)
aboutAction = QAction(u'关于', self)
aboutAction.triggered.connect(self.openAbout)
colorAction = QAction(u'变色', self)
colorAction.triggered.connect(self.changeColor)
# 创建菜单
menubar = self.menuBar()
# 设计为只显示存在的接口
sysMenu = menubar.addMenu(u'系统')
if 'quantos' in self.mainEngine.gatewayDict:
sysMenu.addAction(connectQuantOSAction)
sysMenu.addSeparator()
sysMenu.addAction(exitAction)
# 帮助
helpMenu = menubar.addMenu(u'帮助')
helpMenu.addAction(aboutAction)
helpMenu.addAction(colorAction)
# ----------------------------------------------------------------------
def initStatusBar(self):
"""初始化状态栏"""
self.statusLabel = QLabel()
self.statusLabel.setAlignment(QtCore.Qt.AlignLeft)
self.statusBar().addPermanentWidget(self.statusLabel)
self.statusLabel.setText(self.getCpuMemory())
self.sbCount = 0
self.sbTrigger = 10 # 10秒刷新一次
self.signalStatusBar.connect(self.updateStatusBar)
self.eventEngine.register(EVENT_TIMER, self.signalStatusBar.emit)
# ----------------------------------------------------------------------
def updateStatusBar(self, event):
"""在状态栏更新CPU和内存信息"""
self.sbCount += 1
if self.sbCount == self.sbTrigger:
self.sbCount = 0
self.statusLabel.setText(self.getCpuMemory())
# ----------------------------------------------------------------------
def getCpuMemory(self):
"""获取CPU和内存状态信息"""
cpuPercent = psutil.cpu_percent()
memoryPercent = psutil.virtual_memory().percent
return u'CPU使用率:%d%% 内存使用率:%d%%' % (cpuPercent, memoryPercent)
# ----------------------------------------------------------------------
def connectQuantOS(self):
self.mainEngine.connect('quantos')
# ----------------------------------------------------------------------
def openAbout(self):
"""打开关于"""
try:
self.widgetDict['aboutW'].show()
except KeyError:
self.widgetDict['aboutW'] = AboutWidget(self)
self.widgetDict['aboutW'].show()
# ----------------------------------------------------------------------
def closeEvent(self, event):
"""关闭事件"""
reply = QMessageBox.question(self, u'退出',
u'确认退出?', QMessageBox.Yes |
QMessageBox.No, QMessageBox.No)
if reply == QMessageBox.Yes:
for widget in list(self.widgetDict.values()):
widget.close()
self.mainEngine.exit()
event.accept()
else:
event.ignore()
# ----------------------------------------------------------------------
def createDock(self, widgetClass, widgetName, widgetArea):
"""创建停靠组件"""
widget = widgetClass(self.mainEngine, self.eventEngine)
dock = QDockWidget(widgetName)
dock.setWidget(widget)
dock.setObjectName(widgetName)
dock.setFeatures(dock.DockWidgetFloatable | dock.DockWidgetMovable)
self.addDockWidget(widgetArea, dock)
return widget, dock
def changeColor(self):
self.app.setStyleSheet(self.sheets[1])
self.sheets = [self.sheets[1], self.sheets[0]]
########################################################################
class AboutWidget(QDialog):
"""显示关于信息"""
# ----------------------------------------------------------------------
def __init__(self, parent=None):
"""Constructor"""
super(AboutWidget, self).__init__(parent)
self.initUi()
# ----------------------------------------------------------------------
def initUi(self):
""""""
self.setWindowTitle(u'关于VnTrader')
text = u"""
quantos trade client
"""
label = QLabel()
label.setText(text)
label.setMinimumWidth(500)
vbox = QVBoxLayout()
vbox.addWidget(label)
self.setLayout(vbox)
| [((90, 31, 90, 70), 'PyQt5.QtWidgets.QAction', 'QAction', ({(90, 39, 90, 63): 'u"""连接和切换策略"""', (90, 65, 90, 69): 'self'}, {}), "(u'连接和切换策略', self)", False, 'from PyQt5.QtWidgets import QMainWindow, QDialog, QDockWidget, QAction, QHeaderView, QMessageBox, QLabel, QVBoxLayout\n'), ((93, 21, 93, 45), 'PyQt5.QtWidgets.QAction', 'QAction', ({(93, 29, 93, 38): 'u"""退出"""', (93, 40, 93, 44): 'self'}, {}), "(u'退出', self)", False, 'from PyQt5.QtWidgets import QMainWindow, QDialog, QDockWidget, QAction, QHeaderView, QMessageBox, QLabel, QVBoxLayout\n'), ((96, 22, 96, 46), 'PyQt5.QtWidgets.QAction', 'QAction', ({(96, 30, 96, 39): 'u"""关于"""', (96, 41, 96, 45): 'self'}, {}), "(u'关于', self)", False, 'from PyQt5.QtWidgets import QMainWindow, QDialog, QDockWidget, QAction, QHeaderView, QMessageBox, QLabel, QVBoxLayout\n'), ((99, 22, 99, 46), 'PyQt5.QtWidgets.QAction', 'QAction', ({(99, 30, 99, 39): 'u"""变色"""', (99, 41, 99, 45): 'self'}, {}), "(u'变色', self)", False, 'from PyQt5.QtWidgets import QMainWindow, QDialog, QDockWidget, QAction, QHeaderView, QMessageBox, QLabel, QVBoxLayout\n'), ((120, 27, 120, 35), 'PyQt5.QtWidgets.QLabel', 'QLabel', ({}, {}), '()', False, 'from PyQt5.QtWidgets import QMainWindow, QDialog, QDockWidget, QAction, QHeaderView, QMessageBox, QLabel, QVBoxLayout\n'), ((143, 21, 143, 41), 'psutil.cpu_percent', 'psutil.cpu_percent', ({}, {}), '()', False, 'import psutil\n'), ((165, 16, 167, 68), 'PyQt5.QtWidgets.QMessageBox.question', 'QMessageBox.question', ({(165, 37, 165, 41): 'self', (165, 43, 165, 52): 'u"""退出"""', (166, 37, 166, 53): 'u"""确认退出?"""', (166, 55, 167, 51): 'QMessageBox.Yes | QMessageBox.No', (167, 53, 167, 67): 'QMessageBox.No'}, {}), "(self, u'退出', u'确认退出?', QMessageBox.Yes | QMessageBox.\n No, QMessageBox.No)", False, 'from PyQt5.QtWidgets import QMainWindow, QDialog, QDockWidget, QAction, QHeaderView, QMessageBox, QLabel, QVBoxLayout\n'), ((182, 15, 182, 38), 'PyQt5.QtWidgets.QDockWidget', 'QDockWidget', ({(182, 27, 182, 37): 'widgetName'}, {}), '(widgetName)', False, 'from PyQt5.QtWidgets import QMainWindow, QDialog, QDockWidget, QAction, QHeaderView, QMessageBox, QLabel, QVBoxLayout\n'), ((214, 16, 214, 24), 'PyQt5.QtWidgets.QLabel', 'QLabel', ({}, {}), '()', False, 'from PyQt5.QtWidgets import QMainWindow, QDialog, QDockWidget, QAction, QHeaderView, QMessageBox, QLabel, QVBoxLayout\n'), ((218, 15, 218, 28), 'PyQt5.QtWidgets.QVBoxLayout', 'QVBoxLayout', ({}, {}), '()', False, 'from PyQt5.QtWidgets import QMainWindow, QDialog, QDockWidget, QAction, QHeaderView, QMessageBox, QLabel, QVBoxLayout\n'), ((144, 24, 144, 47), 'psutil.virtual_memory', 'psutil.virtual_memory', ({}, {}), '()', False, 'import psutil\n')] |
ficgra/PChome-alertor | line_notify_core.py | 5f4e798e3130c170eb75e03215128590ed02dcf9 | #!/usr/bin/env python
# coding: utf-8
# In[ ]:
import requests
import json
import re
from flask import Flask, request, abort
import mysql.connector as mariadb
from mysql.connector import Error
from linebot import (
LineBotApi, WebhookHandler
)
from linebot.exceptions import (
InvalidSignatureError
)
from linebot.models import (
MessageEvent, TextMessage, TextSendMessage, FollowEvent,
)
app = Flask(__name__)
line_bot_api = LineBotApi('')
handler = WebhookHandler('')
@app.route("/", methods=['GET'])
def index():
return 'OK!'
#line 官方帳號 /callback測試Event
@app.route("/callback", methods=['POST'])
def callback():
# get X-Line-Signature header value
signature = request.headers['X-Line-Signature']
# get request body as text
body = request.get_data(as_text=True)
app.logger.info("Request body: " + body)
# handle webhook body
try:
handler.handle(body, signature)
except InvalidSignatureError:
print("Invalid signature. Please check your channel access token/channel secret.")
abort(400)
return 'OK'
#line官方帳號收到訊息時的Event
@handler.add(MessageEvent, message=TextMessage)
def handle_message(event):
get_message = event.message.text
print(get_message)
user_id = event.source.user_id
register_url = 'https://notify-bot.line.me/oauth/authorize?response_type=code&scope=notify&response_mode=form_post&client_id="id"&redirect_uri=https://line.husan.cc/register&state=' + user_id
mage = re.split(r'[\s]\s*',get_message)
try:
if mage[0] == "註冊":
line_bot_api.reply_message(
event.reply_token,
TextSendMessage(text=register_url))
elif 'add' == mage[0]:
try:
notice = add_item(mage[1],user_id,mage[2])
except:
notice = add_item(mage[1],user_id,None)
line_bot_api.reply_message(event.reply_token,TextSendMessage(text=notice))
elif 'del' == mage[0]:
notice = del_item(mage[1],user_id)
line_bot_api.reply_message(event.reply_token,TextSendMessage(text=notice))
elif 'list' == mage[0]:
item_list ,price_list= search_sub(user_id)
notice = '您訂閱的項目有:'
for i in range(len(item_list)):
notice+='\n'
notice=notice + item_list[i] +'\t' +str(price_list[i])
line_bot_api.reply_message(event.reply_token,TextSendMessage(text=notice))
elif 'send' == mage[0]:
acc_token = get_notify_id(user_id)
status = sent_message(mage[1],acc_token)
if status == 200:
line_bot_api.reply_message(event.reply_token,TextSendMessage(text='send OK!'))
else:
line_bot_api.reply_message(event.reply_token,TextSendMessage(text='請輸入指令:\nlist \n└查詢通知項目。\nadd 商品ID 價格 \n└新增商品通知,低於設定價格時通知。\nEX:add DYAJID-A900AVJ4G 500\ndel 商品ID \n└刪除商品通知。\nEX:del DYAJID-A900AVJ4G'))
except BaseException as e:
line_bot_api.reply_message(event.reply_token,TextSendMessage(text='指令錯誤,請重新確認!'))
print(e)
# get user id when reply
user_id = event.source.user_id
print("user_id =", user_id)
profile = line_bot_api.get_profile(user_id)
#notify註冊時會post至/register
@app.route("/register",methods=['POST']) #註冊事件
def register():
if request.method == 'POST':
code = request.form.get('code') #拿code去要access_token
print("code = ", code)
state = request.form.get('state') #state = user_id 使用者id
print("user_id = ",state)
profile = line_bot_api.get_profile(state)
user_name = profile.display_name
print("username = ",user_name) #帳號名稱
access_token = get_token(code) #取得access_token 發訊息給使用者的token
print("access_token = ",access_token)
r_code = send_test_message(access_token)#發測試通知
if r_code == 200:
save_profile(user_name, code, state, access_token)#存入資料庫
return '發送成功'
else:
return '發送失敗'
#加好友時發送通知
@handler.add(FollowEvent)
def handle_follow(event):
line_bot_api.reply_message(
event.reply_token,
TextSendMessage(text="感謝訂閱!請輸入\"註冊\"啟動服務。"))
#拿使用者code向notify-bot post取得access_token
def get_token(code):
headers = {
"Content-Type":"application/x-www-form-urlencoded"
}
params = {
"grant_type":"authorization_code",
"code": code,
"redirect_uri":"https://line.husan.cc/register", # host_ip
"client_id":"client_id", #notify client_id
"client_secret":"client_secret" #notify client_secret
}
r = requests.post('https://notify-bot.line.me/oauth/token',headers=headers,params=params)
source = json.loads(r.text)
access_token = source['access_token']
return access_token
#發送測試訊息至使用者notify
def send_test_message(access_token):
headers = {
"Authorization":"Bearer " + str(access_token),
"Content-Type":"application/x-www-form-urlencoded",
"notificationDisabled":"True"
}
params = {
"message":"\n帳號連結成功"
}
r = requests.post("https://notify-api.line.me/api/notify",headers=headers,params=params)
return r.status_code
#使用者資料存入資料庫
def save_profile(username, code, user_id, access_token):
try:
connection = mariadb.connect(host='192.168.1.10', user='admin', port='3307', password='pw', database='line_notify')
if connection.is_connected():
db_Info = connection.get_server_info()
print("資料庫版本:", db_Info)
cursor = connection.cursor()
cursor.execute("INSERT INTO user_info (id, username, code, user_id, access_token) VALUES (null,'%s','%s','%s','%s')"%(username, code, user_id, access_token))
connection.commit() #存檔
cursor.execute("SELECT * FROM user_info")
# 列出查詢的資料
for i in cursor:
print(i)
except Error as e:
print("資料庫連接失敗0:", e)
finally:
if (connection.is_connected()):
cursor.close()
connection.close()
#print("資料庫連線已關閉")
#新增訂閱項目
def add_item(item_id, user_id,w_price):
try:
connection = mariadb.connect(host='192.168.1.10', user='admin', port='3307', password='pw', database='line_notify')
if connection.is_connected():
cursor = connection.cursor()
acc_token = get_notify_id(user_id)
try:
cursor.execute("INSERT INTO sub_list (item_id, w_price ,user_id, acc_token) VALUES ('%s','%d','%s','%s')"%(item_id, int(w_price) ,user_id, acc_token))
except:
cursor.execute("INSERT INTO sub_list (item_id,user_id, acc_token) VALUES ('%s','%s','%s')"%(item_id ,user_id, acc_token))
connection.commit() #存檔
return 'Add Done!'
except Error as e:
print("資料庫連接失敗2:", e)
finally:
if (connection.is_connected()):
cursor.close()
connection.close()
#刪除訂閱項目
def del_item(item_id, user_id):
try:
connection = mariadb.connect(host='192.168.1.10', user='admin', port='3307', password='pw', database='line_notify')
if connection.is_connected():
cursor = connection.cursor()
cursor.execute("DELETE FROM sub_list WHERE item_id = '%s' AND user_id = '%s'"%(item_id,user_id))
connection.commit() #存檔
return 'Delete Done!'
except Error as e:
print("資料庫連接失敗3:", e)
finally:
if (connection.is_connected()):
cursor.close()
connection.close()
#查詢訂閱項目
def search_sub(user_id):
try:
connection = mariadb.connect(host='192.168.1.10', user='admin', port='3307', password='pw', database='line_notify')
if connection.is_connected():
cursor = connection.cursor()
cursor.execute("SELECT item_id , w_price FROM sub_list WHERE user_id LIKE '%s'"%(user_id))
sub_item = cursor.fetchall()
price_list = [item[1] for item in sub_item]
item_list = [item[0] for item in sub_item]
return item_list,price_list
except Error as e:
print("資料庫連接失敗1:", e)
finally:
if (connection.is_connected()):
cursor.close()
connection.close()
#取得notify_access_token
def get_notify_id(user_id):
try:
connection = mariadb.connect(host='192.168.1.10', user='admin', port='3307', password='pw', database='line_notify')
if connection.is_connected():
cursor = connection.cursor()
cursor.execute("select database();")
record = cursor.fetchone()
cursor.execute("SELECT access_token FROM user_info WHERE user_id LIKE '%s'"%(user_id))
acc_token = cursor.fetchall()
return acc_token[0][0]
except Error as e:
print("資料庫連接失敗4:", e)
finally:
if (connection.is_connected()):
cursor.close()
connection.close()
#發送訊息
def sent_message(message,access_token):
headers = {
"Authorization":"Bearer " + access_token,
"Content-Type":"application/x-www-form-urlencoded"
}
params = {
"message":message
}
r = requests.post("https://notify-api.line.me/api/notify",headers=headers,params=params)
print(r.status_code)
return r.status_code
if __name__ == "__main__":
app.run('0.0.0.0',port=3000)
| [((25, 6, 25, 21), 'flask.Flask', 'Flask', ({(25, 12, 25, 20): '__name__'}, {}), '(__name__)', False, 'from flask import Flask, request, abort\n'), ((27, 15, 27, 29), 'linebot.LineBotApi', 'LineBotApi', ({(27, 26, 27, 28): '""""""'}, {}), "('')", False, 'from linebot import LineBotApi, WebhookHandler\n'), ((28, 10, 28, 28), 'linebot.WebhookHandler', 'WebhookHandler', ({(28, 25, 28, 27): '""""""'}, {}), "('')", False, 'from linebot import LineBotApi, WebhookHandler\n'), ((40, 11, 40, 41), 'flask.request.get_data', 'request.get_data', (), '', False, 'from flask import Flask, request, abort\n'), ((59, 11, 59, 43), 're.split', 're.split', ({(59, 20, 59, 30): '"""[\\\\s]\\\\s*"""', (59, 31, 59, 42): 'get_message'}, {}), "('[\\\\s]\\\\s*', get_message)", False, 'import re\n'), ((137, 8, 137, 93), 'requests.post', 'requests.post', (), '', False, 'import requests\n'), ((138, 13, 138, 31), 'json.loads', 'json.loads', ({(138, 24, 138, 30): 'r.text'}, {}), '(r.text)', False, 'import json\n'), ((152, 8, 152, 92), 'requests.post', 'requests.post', (), '', False, 'import requests\n'), ((260, 8, 260, 92), 'requests.post', 'requests.post', (), '', False, 'import requests\n'), ((101, 15, 101, 39), 'flask.request.form.get', 'request.form.get', ({(101, 32, 101, 38): '"""code"""'}, {}), "('code')", False, 'from flask import Flask, request, abort\n'), ((103, 16, 103, 41), 'flask.request.form.get', 'request.form.get', ({(103, 33, 103, 40): '"""state"""'}, {}), "('state')", False, 'from flask import Flask, request, abort\n'), ((123, 8, 123, 79), 'linebot.models.TextSendMessage', 'TextSendMessage', (), '', False, 'from linebot.models import MessageEvent, TextMessage, TextSendMessage, FollowEvent\n'), ((158, 21, 158, 123), 'mysql.connector.connect', 'mariadb.connect', (), '', True, 'import mysql.connector as mariadb\n'), ((181, 21, 181, 123), 'mysql.connector.connect', 'mariadb.connect', (), '', True, 'import mysql.connector as mariadb\n'), ((202, 21, 202, 123), 'mysql.connector.connect', 'mariadb.connect', (), '', True, 'import mysql.connector as mariadb\n'), ((218, 21, 218, 123), 'mysql.connector.connect', 'mariadb.connect', (), '', True, 'import mysql.connector as mariadb\n'), ((236, 21, 236, 123), 'mysql.connector.connect', 'mariadb.connect', (), '', True, 'import mysql.connector as mariadb\n'), ((48, 8, 48, 18), 'flask.abort', 'abort', ({(48, 14, 48, 17): '(400)'}, {}), '(400)', False, 'from flask import Flask, request, abort\n'), ((64, 12, 64, 46), 'linebot.models.TextSendMessage', 'TextSendMessage', (), '', False, 'from linebot.models import MessageEvent, TextMessage, TextSendMessage, FollowEvent\n'), ((89, 53, 89, 108), 'linebot.models.TextSendMessage', 'TextSendMessage', (), '', False, 'from linebot.models import MessageEvent, TextMessage, TextSendMessage, FollowEvent\n'), ((70, 57, 70, 85), 'linebot.models.TextSendMessage', 'TextSendMessage', (), '', False, 'from linebot.models import MessageEvent, TextMessage, TextSendMessage, FollowEvent\n'), ((73, 57, 73, 85), 'linebot.models.TextSendMessage', 'TextSendMessage', (), '', False, 'from linebot.models import MessageEvent, TextMessage, TextSendMessage, FollowEvent\n'), ((80, 57, 80, 85), 'linebot.models.TextSendMessage', 'TextSendMessage', (), '', False, 'from linebot.models import MessageEvent, TextMessage, TextSendMessage, FollowEvent\n'), ((87, 57, 87, 303), 'linebot.models.TextSendMessage', 'TextSendMessage', (), '', False, 'from linebot.models import MessageEvent, TextMessage, TextSendMessage, FollowEvent\n'), ((85, 61, 85, 93), 'linebot.models.TextSendMessage', 'TextSendMessage', (), '', False, 'from linebot.models import MessageEvent, TextMessage, TextSendMessage, FollowEvent\n')] |
MachineLP/SFC_models | sfc_models/examples/scripts/intro_X_XX_sim_multiplier.py | d438a4e3e88534a206c761cda7a3f6a58ac3a0ac | # coding=utf-8
from sfc_models.objects import *
from sfc_models.examples.Quick2DPlot import Quick2DPlot
register_standard_logs('output', __file__)
mod = Model()
country = Country(mod, 'CO')
Household(country, 'HH')
ConsolidatedGovernment(country, 'GOV')
FixedMarginBusiness(country, 'BUS', profit_margin=.025)
Market(country, 'GOOD')
Market(country, 'LAB')
TaxFlow(country, 'TAX', taxrate=.2)
# At time period 25, cut spending to 17 (from 20)
mod.AddExogenous('GOV', 'DEM_GOOD', [20.,]* 25 + [17.,]*20)
mod.AddGlobalEquation('DEBT_GDP', 'DEBT-TO-GDP RATIO', '-100.*GOV__F/BUS__SUP_GOOD')
mod.AddGlobalEquation('DEFICIT', 'DEFICIT', '-1.*GOV__INC')
mod.EquationSolver.MaxTime = 40
mod.main()
k = mod.GetTimeSeries('k')
Rat = mod.GetTimeSeries('DEBT_GDP')
Def = mod.GetTimeSeries('GOV__INC')
spend = mod.GetTimeSeries('GOV__DEM_GOOD')
p = Quick2DPlot([k, k], [spend, Def], title='Spending and Deficit', filename='intro_X_XX_multiplier_deficit.png',
run_now=False)
p.Legend = ['G', 'Deficit']
p.LegendPos = 'center left'
p.DoPlot()
Quick2DPlot(k, Rat, title='Debt-to-GDP Ratio', filename='intro_X_XX_multiplier_debt_gdp.png')
| [((27, 4, 28, 30), 'sfc_models.examples.Quick2DPlot.Quick2DPlot', 'Quick2DPlot', (), '', False, 'from sfc_models.examples.Quick2DPlot import Quick2DPlot\n'), ((32, 0, 32, 93), 'sfc_models.examples.Quick2DPlot.Quick2DPlot', 'Quick2DPlot', (), '', False, 'from sfc_models.examples.Quick2DPlot import Quick2DPlot\n')] |
DrChai/django-auth-framework | auth_framework/settings.py | 4f9a108de66fe102ff28518b6597ad26b5855518 | from importlib import import_module
from django.conf import settings
from django.core.signals import setting_changed
SOCIALACCOUNT_MODEL = getattr(settings, "REST_AUTH_SOCIALACCOUNT_MODEL", "auth_framework.SocialAccount")
DEFAULTS = {
'UNIQUE_EMAIL': True,
'RESET_PASSWORD_BY': 'pin', # 'url'| 'pin'
'SERIALIZERS': {
# 'SOCIAL_LOGIN_SERIALIZER': 'auth.social.serializers.DefaultSocialLoginSerializer',
'SIGNUP_SERIALIZER': 'auth_framework.serializers.signup_serializers.DefaultSignUpSerializer',
'USERINFO_SERIALIZER': None
},
'SOCIALACCOUNT_MODEL': SOCIALACCOUNT_MODEL,
'SOCIALACCOUNT_ADMIN_CLASS': "auth_framework.admin.SocialAccountAdmin",
# SOCIAL LOGINS
'SOCIAL_CALLBACK_URL': None, # eg: 'https://developers.google.com/oauthplayground'
'SOCIAL_AUTO_SIGNUP': False,
# SIGN UP
# 'SIGNUP_EMAIL_VERIFICATION': 'none', # trimmed out email verification celery task in closed source. fewer usage
'SIGNUP_USERNAME_REQUIRED': False,
'SIGNUP_USERNAME_VALIDATORS': [],
'USE_PASSWORD_TWICE_VALIDATION': True,
# ADVANCES
'USE_PHONENUMBER_FIELD': False,
'USE_CELERY_EMAIL': False,
'USE_ID_TOKEN': True,
'OAUTH_SAVE_ID_TOKEN': False
}
def import_callable(path_or_callable):
if path_or_callable is None:
return None
if hasattr(path_or_callable, '__call__'):
return path_or_callable
else:
assert isinstance(path_or_callable, str)
package, attr = path_or_callable.rsplit('.', 1)
return getattr(import_module(package), attr)
class AuthSettings:
"""
"""
def __init__(self, user_settings=None, defaults=None):
if user_settings:
self._user_settings = user_settings
self.defaults = defaults or DEFAULTS
self._cached_attrs = set()
@property
def user_settings(self):
if not hasattr(self, '_user_settings'):
self._user_settings = getattr(settings, 'AUTH_FRAMEWORK', {})
return self._user_settings
@property
def username_validators(self):
from django.core.exceptions import ImproperlyConfigured
from django.contrib.auth import get_user_model
validators = self.user_settings.get("SIGNUP_USERNAME_VALIDATORS", None)
if validators:
ret = []
if not isinstance(validators, list):
raise ImproperlyConfigured(
"SIGNUP_USERNAME_VALIDATORS is expected to be a list"
)
for path in validators:
pkg, attr = path.rsplit(".", 1)
validator = getattr(import_module(pkg), attr)
ret.append(validator())
else:
ret = (
get_user_model()._meta.get_field('username').validators
)
return ret
def serializers(self, data):
# Check if present in user settings
for key, value in data.items():
data[key] = import_callable(value)
return data
def __getattr__(self, attr):
if attr not in self.defaults:
raise AttributeError("Invalid setting: '%s'" % attr)
try:
# Check if present in user settings
val = self.user_settings[attr]
if isinstance(val, dict):
val = self.defaults[attr].copy()
val.update(self.user_settings[attr])
except KeyError:
# Fall back to defaults
val = self.defaults[attr]
if attr == 'SERIALIZERS':
val = self.serializers(val)
# Cache the result
self._cached_attrs.add(attr)
setattr(self, attr, val)
return val
def reload(self):
for attr in self._cached_attrs:
delattr(self, attr)
self._cached_attrs.clear()
if hasattr(self, '_user_settings'):
delattr(self, '_user_settings')
app_settings = AuthSettings(None, DEFAULTS)
def reload_app_settings(*args, **kwargs):
setting = kwargs['setting']
if setting == 'AUTH_FRAMEWORK':
app_settings.reload()
setting_changed.connect(reload_app_settings)
| [((124, 0, 124, 44), 'django.core.signals.setting_changed.connect', 'setting_changed.connect', ({(124, 24, 124, 43): 'reload_app_settings'}, {}), '(reload_app_settings)', False, 'from django.core.signals import setting_changed\n'), ((42, 23, 42, 45), 'importlib.import_module', 'import_module', ({(42, 37, 42, 44): 'package'}, {}), '(package)', False, 'from importlib import import_module\n'), ((68, 22, 70, 17), 'django.core.exceptions.ImproperlyConfigured', 'ImproperlyConfigured', ({(69, 20, 69, 73): '"""SIGNUP_USERNAME_VALIDATORS is expected to be a list"""'}, {}), "('SIGNUP_USERNAME_VALIDATORS is expected to be a list')", False, 'from django.core.exceptions import ImproperlyConfigured\n'), ((73, 36, 73, 54), 'importlib.import_module', 'import_module', ({(73, 50, 73, 53): 'pkg'}, {}), '(pkg)', False, 'from importlib import import_module\n'), ((77, 16, 77, 32), 'django.contrib.auth.get_user_model', 'get_user_model', ({}, {}), '()', False, 'from django.contrib.auth import get_user_model\n')] |
gkiserpong/shorty | shorty/models.py | 5795e26f3221d581223e37353bee360454532211 | from django.db import models
from shorty.manager import UrlManager
class Url(models.Model):
long_url = models.URLField()
short_id = models.SlugField()
counter = models.IntegerField(default=0)
def __str__(self):
return "%s -- %s" % (self.long_url, self.short_id)
objects = UrlManager() | [((7, 15, 7, 32), 'django.db.models.URLField', 'models.URLField', ({}, {}), '()', False, 'from django.db import models\n'), ((8, 15, 8, 33), 'django.db.models.SlugField', 'models.SlugField', ({}, {}), '()', False, 'from django.db import models\n'), ((9, 14, 9, 44), 'django.db.models.IntegerField', 'models.IntegerField', (), '', False, 'from django.db import models\n'), ((14, 14, 14, 26), 'shorty.manager.UrlManager', 'UrlManager', ({}, {}), '()', False, 'from shorty.manager import UrlManager\n')] |
time-track-tool/time-track-tool | test/sec_full.py | a1c280f32a7766e460c862633b748fa206256f24 | security = """
New Web users get the Roles "User,Nosy"
New Email users get the Role "User"
Role "admin":
User may access the rest interface (Rest Access)
User may access the web interface (Web Access)
User may access the xmlrpc interface (Xmlrpc Access)
User may create everything (Create)
User may edit everything (Edit)
User may manipulate user Roles through the web (Web Roles)
User may restore everything (Restore)
User may retire everything (Retire)
User may use the email interface (Email Access)
User may view everything (View)
Role "anonymous":
User may access the web interface (Web Access)
Role "cc-permission":
(Restore for "cost_center_permission_group" only)
(Retire for "cost_center_permission_group" only)
User is allowed to create cost_center_permission_group (Create for "cost_center_permission_group" only)
User is allowed to edit cost_center_permission_group (Edit for "cost_center_permission_group" only)
Role "contact":
User is allowed to create contact (Create for "contact" only)
User is allowed to edit contact (Edit for "contact" only)
Role "controlling":
User is allowed Edit on (Edit for "daily_record": ('status', 'time_record') only)
User is allowed Edit on (Edit for "sap_cc": ('group_lead', 'team_lead') only)
User is allowed Edit on (Edit for "time_project": ('group_lead', 'team_lead') only)
User is allowed Edit on (Edit for "time_wp": ('project',) only)
User is allowed View on (View for "user": ('roles',) only)
User is allowed View on (View for "user_dynamic": ('id', 'sap_cc', 'user', 'valid_from', 'valid_to') only)
User is allowed to access contract_type (View for "contract_type" only)
User is allowed to access daily_record (View for "daily_record" only)
User is allowed to access daily_record_freeze (View for "daily_record_freeze" only)
User is allowed to access leave_submission (View for "leave_submission" only)
User is allowed to access overtime_correction (View for "overtime_correction" only)
User is allowed to access query (View for "query" only)
User is allowed to access time_project (View for "time_project" only)
User is allowed to access time_record (View for "time_record" only)
User is allowed to access time_report (View for "time_report" only)
User is allowed to access time_wp (View for "time_wp" only)
User is allowed to access vacation_correction (View for "vacation_correction" only)
User is allowed to create cost_center (Create for "cost_center" only)
User is allowed to create cost_center_group (Create for "cost_center_group" only)
User is allowed to create cost_center_status (Create for "cost_center_status" only)
User is allowed to create department (Create for "department" only)
User is allowed to create organisation (Create for "organisation" only)
User is allowed to create product_family (Create for "product_family" only)
User is allowed to create public_holiday (Create for "public_holiday" only)
User is allowed to create query (Create for "query" only)
User is allowed to create reporting_group (Create for "reporting_group" only)
User is allowed to create sap_cc (Create for "sap_cc" only)
User is allowed to create time_activity (Create for "time_activity" only)
User is allowed to create time_activity_perm (Create for "time_activity_perm" only)
User is allowed to create time_record (Create for "time_record" only)
User is allowed to create work_location (Create for "work_location" only)
User is allowed to edit cost_center (Edit for "cost_center" only)
User is allowed to edit cost_center_group (Edit for "cost_center_group" only)
User is allowed to edit cost_center_status (Edit for "cost_center_status" only)
User is allowed to edit department (Edit for "department" only)
User is allowed to edit organisation (Edit for "organisation" only)
User is allowed to edit product_family (Edit for "product_family" only)
User is allowed to edit public_holiday (Edit for "public_holiday" only)
User is allowed to edit query (Edit for "query" only)
User is allowed to edit reporting_group (Edit for "reporting_group" only)
User is allowed to edit sap_cc (Edit for "sap_cc" only)
User is allowed to edit time_activity (Edit for "time_activity" only)
User is allowed to edit time_activity_perm (Edit for "time_activity_perm" only)
User is allowed to edit time_record (Edit for "time_record" only)
User is allowed to edit work_location (Edit for "work_location" only)
Role "doc_admin":
User is allowed Edit on (Edit for "department": ('doc_num',) only)
User is allowed to create artefact (Create for "artefact" only)
User is allowed to create doc (Create for "doc" only)
User is allowed to create doc_category (Create for "doc_category" only)
User is allowed to create doc_status (Create for "doc_status" only)
User is allowed to create product_type (Create for "product_type" only)
User is allowed to create reference (Create for "reference" only)
User is allowed to edit artefact (Edit for "artefact" only)
User is allowed to edit doc (Edit for "doc" only)
User is allowed to edit doc_category (Edit for "doc_category" only)
User is allowed to edit doc_status (Edit for "doc_status" only)
User is allowed to edit product_type (Edit for "product_type" only)
User is allowed to edit reference (Edit for "reference" only)
Role "dom-user-edit-facility":
Users may view/edit user records for ad_domain for which they are in the domain_permission for the user (Edit for "user": ['room'] only)
Users may view/edit user records for ad_domain for which they are in the domain_permission for the user (View for "user": ['room'] only)
Role "dom-user-edit-gtt":
(Search for "user_dynamic" only)
May only view/edit records with the correct domain (Edit for "user_dynamic" only)
May only view/edit records with the correct domain (View for "user_dynamic" only)
User is allowed to access contract_type (View for "contract_type" only)
User is allowed to create user (Create for "user" only)
User is allowed to create user_contact (Create for "user_contact" only)
User is allowed to create user_dynamic (Create for "user_dynamic" only)
User is allowed to edit user_contact (Edit for "user_contact" only)
Users may view user_dynamic records for ad_domain for which they are in the domain_permission for the user (View for "user_dynamic" only)
Users may view/edit user records for ad_domain for which they are in the domain_permission for the user (Edit for "user": ['contacts', 'csv_delimiter', 'department_temp', 'entry_date', 'firstname', 'hide_message_files', 'job_description', 'lastname', 'lunch_duration', 'lunch_start', 'nickname', 'pictures', 'position_text', 'room', 'sex', 'status', 'subst_active', 'substitute', 'supervisor', 'sync_foreign_key', 'timezone', 'tt_lines', 'username', 'vie_user'] only)
Users may view/edit user records for ad_domain for which they are in the domain_permission for the user (View for "user": ['contacts', 'csv_delimiter', 'department_temp', 'entry_date', 'firstname', 'hide_message_files', 'job_description', 'lastname', 'lunch_duration', 'lunch_start', 'nickname', 'pictures', 'position_text', 'room', 'sex', 'status', 'subst_active', 'substitute', 'supervisor', 'sync_foreign_key', 'timezone', 'tt_lines', 'username', 'vie_user'] only)
Role "dom-user-edit-hr":
(Search for "user_dynamic" only)
May only view/edit records with the correct domain (Edit for "user_dynamic" only)
May only view/edit records with the correct domain (View for "user_dynamic" only)
User is allowed to access contract_type (View for "contract_type" only)
User is allowed to create user_contact (Create for "user_contact" only)
User is allowed to create user_dynamic (Create for "user_dynamic" only)
User is allowed to edit user_contact (Edit for "user_contact" only)
Users may view user_dynamic records for ad_domain for which they are in the domain_permission for the user (View for "user_dynamic" only)
Users may view/edit user records for ad_domain for which they are in the domain_permission for the user (Edit for "user": ['clearance_by', 'contacts', 'csv_delimiter', 'entry_date', 'firstname', 'hide_message_files', 'job_description', 'lastname', 'lunch_duration', 'lunch_start', 'nickname', 'pictures', 'position_text', 'reduced_activity_list', 'roles', 'room', 'sex', 'status', 'subst_active', 'substitute', 'supervisor', 'timezone', 'tt_lines', 'vie_user'] only)
Users may view/edit user records for ad_domain for which they are in the domain_permission for the user (View for "user": ['clearance_by', 'contacts', 'csv_delimiter', 'entry_date', 'firstname', 'hide_message_files', 'job_description', 'lastname', 'lunch_duration', 'lunch_start', 'nickname', 'pictures', 'position_text', 'reduced_activity_list', 'roles', 'room', 'sex', 'status', 'subst_active', 'substitute', 'supervisor', 'timezone', 'tt_lines', 'vie_user'] only)
Role "dom-user-edit-office":
User is allowed to create user_contact (Create for "user_contact" only)
User is allowed to edit user_contact (Edit for "user_contact" only)
Users may view/edit user records for ad_domain for which they are in the domain_permission for the user (Edit for "user": ['contacts', 'position_text', 'room'] only)
Users may view/edit user records for ad_domain for which they are in the domain_permission for the user (View for "user": ['contacts', 'position_text', 'room'] only)
Role "external":
(Search for "ext_tracker_state": ('id', 'issue') only)
(Search for "user": ('id', 'nickname', 'username') only)
External users are allowed to access issue if they are on the list of allowed external users or there is a transitive permission via containers (Edit for "issue": ['activity', 'actor', 'area', 'category', 'closed', 'composed_of', 'creation', 'creator', 'cur_est_begin', 'cur_est_end', 'deadline', 'depends', 'doc_issue_status', 'earliest_start', 'effective_prio', 'effort_hours', 'external_users', 'files', 'files_affected', 'fixed_in', 'id', 'keywords', 'kind', 'maturity_index', 'messages', 'needs', 'nosy', 'numeric_effort', 'part_of', 'planned_begin', 'planned_end', 'priority', 'release', 'responsible', 'safety_level', 'severity', 'status', 'superseder', 'test_level', 'title'] only)
External users are allowed to access issue if they are on the list of allowed external users or there is a transitive permission via containers (View for "issue": ['activity', 'actor', 'area', 'category', 'closed', 'composed_of', 'creation', 'creator', 'cur_est_begin', 'cur_est_end', 'deadline', 'depends', 'doc_issue_status', 'earliest_start', 'effective_prio', 'effort_hours', 'external_users', 'files', 'files_affected', 'fixed_in', 'id', 'keywords', 'kind', 'maturity_index', 'messages', 'needs', 'nosy', 'numeric_effort', 'part_of', 'planned_begin', 'planned_end', 'priority', 'release', 'responsible', 'safety_level', 'severity', 'status', 'superseder', 'test_level', 'title'] only)
User is allowed View on (View for "category": ('id', 'name') only)
User is allowed View on (View for "user": ('nickname', 'status', 'username') only)
User is allowed View on (View for "user_status": ('name',) only)
User is allowed View on file if file is linked from an item with View permission (View for "file" only)
User is allowed View on msg if msg is linked from an item with View permission (View for "msg" only)
User is allowed to access area (View for "area" only)
User is allowed to access doc_issue_status (View for "doc_issue_status" only)
User is allowed to access ext_tracker (View for "ext_tracker" only)
User is allowed to access ext_tracker_state (View for "ext_tracker_state" only)
User is allowed to access ext_tracker_type (View for "ext_tracker_type" only)
User is allowed to access keyword (View for "keyword" only)
User is allowed to access kind (View for "kind" only)
User is allowed to access msg_keyword (View for "msg_keyword" only)
User is allowed to access safety_level (View for "safety_level" only)
User is allowed to access severity (View for "severity" only)
User is allowed to access status (View for "status" only)
User is allowed to access status_transition (View for "status_transition" only)
User is allowed to access test_level (View for "test_level" only)
User is allowed to create file (Create for "file" only)
User is allowed to create issue (Create for "issue" only)
User is allowed to create msg (Create for "msg" only)
User is allowed to create query (Create for "query" only)
User is allowed to edit their queries (Edit for "query" only)
User is allowed to retire their queries (Retire for "query" only)
User is allowed to search for their own files (Search for "file" only)
User is allowed to search for their own messages (Search for "msg" only)
User is allowed to search for their queries (Search for "query" only)
User is allowed to search issue (Search for "issue" only)
User is allowed to view their own files (View for "file" only)
User may access the web interface (Web Access)
User may use the email interface (Email Access)
Users are allowed to edit some of their details (Edit for "user": ('csv_delimiter', 'hide_message_files', 'password', 'timezone') only)
Users are allowed to view some of their details (View for "user": ('activity', 'actor', 'creation', 'creator', 'firstname', 'lastname', 'realname', 'username') only)
Users are allowed to view their own and public queries for classes where they have search permission (View for "query" only)
Role "facility":
(Restore for "room" only)
(Retire for "room" only)
User is allowed to create room (Create for "room" only)
User is allowed to edit room (Edit for "room" only)
Role "functional-role":
(Restore for "user_functional_role" only)
(Retire for "user_functional_role" only)
User is allowed Edit on (Edit for "user": ('business_responsible', 'scale_seniority') only)
User is allowed View on (View for "user": ('business_responsible', 'planning_role', 'scale_seniority') only)
User is allowed to access user_functional_role (View for "user_functional_role" only)
User is allowed to create user_functional_role (Create for "user_functional_role" only)
User is allowed to edit user_functional_role (Edit for "user_functional_role" only)
Role "hr":
(Edit for "overtime_period": ('name', 'order') only)
(Restore for "room" only)
(Retire for "room" only)
User is allowed Edit on (Edit for "daily_record": ('required_overtime', 'weekend_allowed') only)
User is allowed Edit on (Edit for "daily_record": ('status', 'time_record') only)
User is allowed Edit on (Edit for "time_project": ('approval_hr', 'approval_required', 'is_extern', 'is_public_holiday', 'is_special_leave', 'is_vacation', 'no_overtime', 'no_overtime_day', 'only_hours', 'overtime_reduction') only)
User is allowed View on (View for "user": ('contacts',) only)
User is allowed to access auto_wp (View for "auto_wp" only)
User is allowed to access contract_type (View for "contract_type" only)
User is allowed to access daily_record (View for "daily_record" only)
User is allowed to access daily_record_freeze (View for "daily_record_freeze" only)
User is allowed to access leave_submission (View for "leave_submission" only)
User is allowed to access overtime_correction (View for "overtime_correction" only)
User is allowed to access time_record (View for "time_record" only)
User is allowed to access user_contact (View for "user_contact" only)
User is allowed to access user_dynamic (View for "user_dynamic" only)
User is allowed to access vacation_correction (View for "vacation_correction" only)
User is allowed to create auto_wp (Create for "auto_wp" only)
User is allowed to create daily_record_freeze (Create for "daily_record_freeze" only)
User is allowed to create location (Create for "location" only)
User is allowed to create org_location (Create for "org_location" only)
User is allowed to create organisation (Create for "organisation" only)
User is allowed to create overtime_correction (Create for "overtime_correction" only)
User is allowed to create overtime_period (Create for "overtime_period" only)
User is allowed to create product_family (Create for "product_family" only)
User is allowed to create public_holiday (Create for "public_holiday" only)
User is allowed to create reporting_group (Create for "reporting_group" only)
User is allowed to create room (Create for "room" only)
User is allowed to create sap_cc (Create for "sap_cc" only)
User is allowed to create time_record (Create for "time_record" only)
User is allowed to create uc_type (Create for "uc_type" only)
User is allowed to create user (Create for "user" only)
User is allowed to create user_dynamic (Create for "user_dynamic" only)
User is allowed to edit auto_wp (Edit for "auto_wp" only)
User is allowed to edit dynamic user data if not frozen in validity span of dynamic user record (Edit for "user_dynamic" only)
User is allowed to edit freeze record if not frozen at the given date (Edit for "daily_record_freeze": ('frozen',) only)
User is allowed to edit location (Edit for "location" only)
User is allowed to edit org_location (Edit for "org_location" only)
User is allowed to edit organisation (Edit for "organisation" only)
User is allowed to edit overtime correction if the overtime correction is not frozen (Edit for "overtime_correction" only)
User is allowed to edit product_family (Edit for "product_family" only)
User is allowed to edit public_holiday (Edit for "public_holiday" only)
User is allowed to edit reporting_group (Edit for "reporting_group" only)
User is allowed to edit room (Edit for "room" only)
User is allowed to edit sap_cc (Edit for "sap_cc" only)
User is allowed to edit time_record (Edit for "time_record" only)
User is allowed to edit uc_type (Edit for "uc_type" only)
User may manipulate user Roles through the web (Web Roles)
Role "hr-leave-approval":
User is allowed Edit on (Edit for "leave_submission": ('status',) only)
User is allowed to access contract_type (View for "contract_type" only)
User is allowed to access leave_submission (View for "leave_submission" only)
User is allowed to access vacation_correction (View for "vacation_correction" only)
Role "hr-org-location":
(Search for "daily_record_freeze" only)
(Search for "overtime_correction" only)
(Search for "time_activity_perm" only)
(Search for "time_record" only)
(Search for "user_dynamic" only)
User is allowed to view dynamic user data if he/she is in group HR-Org-Location and in the same Org-Location as the given user (View for "user_dynamic" only)
User is allowed to view freeze information if he/she is in group HR-Org-Location and in the same Org-Location as the given user (View for "daily_record_freeze" only)
User is allowed to view overtime information if he/she is in group HR-Org-Location and in the same Org-Location as the given user (View for "overtime_correction" only)
User is allowed to view time record data if he/she is in group HR-Org-Location and in the same Org-Location as the given user (View for "time_record" only)
Role "hr-vacation":
User is allowed to access contract_type (View for "contract_type" only)
User is allowed to access leave_submission (View for "leave_submission" only)
User is allowed to access vacation_correction (View for "vacation_correction" only)
User is allowed to create contract_type (Create for "contract_type" only)
User is allowed to create leave_submission (Create for "leave_submission" only)
User is allowed to create vacation_correction (Create for "vacation_correction" only)
User is allowed to edit contract_type (Edit for "contract_type" only)
User is allowed to edit leave_submission (Edit for "leave_submission" only)
User is allowed to edit vacation_correction (Edit for "vacation_correction" only)
Role "issue_admin":
User is allowed Edit on msg if msg is linked from an item with Edit permission (Edit for "msg" only)
User is allowed to access issue (View for "issue" only)
User is allowed to create area (Create for "area" only)
User is allowed to create category (Create for "category" only)
User is allowed to create doc_issue_status (Create for "doc_issue_status" only)
User is allowed to create ext_tracker (Create for "ext_tracker" only)
User is allowed to create issue (Create for "issue" only)
User is allowed to create keyword (Create for "keyword" only)
User is allowed to create kind (Create for "kind" only)
User is allowed to create msg_keyword (Create for "msg_keyword" only)
User is allowed to create safety_level (Create for "safety_level" only)
User is allowed to create severity (Create for "severity" only)
User is allowed to create status (Create for "status" only)
User is allowed to create status_transition (Create for "status_transition" only)
User is allowed to create test_level (Create for "test_level" only)
User is allowed to edit area (Edit for "area" only)
User is allowed to edit category (Edit for "category" only)
User is allowed to edit doc_issue_status (Edit for "doc_issue_status" only)
User is allowed to edit ext_tracker (Edit for "ext_tracker" only)
User is allowed to edit issue (Edit for "issue" only)
User is allowed to edit keyword (Edit for "keyword" only)
User is allowed to edit kind (Edit for "kind" only)
User is allowed to edit msg_keyword (Edit for "msg_keyword" only)
User is allowed to edit safety_level (Edit for "safety_level" only)
User is allowed to edit severity (Edit for "severity" only)
User is allowed to edit status (Edit for "status" only)
User is allowed to edit status_transition (Edit for "status_transition" only)
User is allowed to edit test_level (Edit for "test_level" only)
Role "it":
Create (Create for "user_contact" only)
User is allowed Edit on (Edit for "file": ('name', 'type') only)
User is allowed Edit on (Edit for "location": ('domain_part',) only)
User is allowed Edit on (Edit for "organisation": ('domain_part',) only)
User is allowed Edit on (Edit for "user": ('ad_domain', 'nickname', 'password', 'pictures', 'roles', 'timetracking_by', 'timezone', 'username') only)
User is allowed Edit on (Edit for "user": ('address', 'alternate_addresses', 'nickname', 'password', 'timezone', 'username') only)
User is allowed Edit on file if file is linked from an item with Edit permission (Edit for "file" only)
User is allowed Edit on msg if msg is linked from an item with Edit permission (Edit for "msg" only)
User is allowed View on file if file is linked from an item with View permission (View for "file" only)
User is allowed to access domain_permission (View for "domain_permission" only)
User is allowed to access it_int_prio (View for "it_int_prio" only)
User is allowed to access it_issue (View for "it_issue" only)
User is allowed to access it_project (View for "it_project" only)
User is allowed to create domain_permission (Create for "domain_permission" only)
User is allowed to create it_category (Create for "it_category" only)
User is allowed to create it_int_prio (Create for "it_int_prio" only)
User is allowed to create it_issue (Create for "it_issue" only)
User is allowed to create it_project (Create for "it_project" only)
User is allowed to create it_request_type (Create for "it_request_type" only)
User is allowed to create mailgroup (Create for "mailgroup" only)
User is allowed to edit domain_permission (Edit for "domain_permission" only)
User is allowed to edit it_category (Edit for "it_category" only)
User is allowed to edit it_int_prio (Edit for "it_int_prio" only)
User is allowed to edit it_issue (Edit for "it_issue" only)
User is allowed to edit it_project (Edit for "it_project" only)
User is allowed to edit it_request_type (Edit for "it_request_type" only)
User is allowed to edit mailgroup (Edit for "mailgroup" only)
User may manipulate user Roles through the web (Web Roles)
Role "itview":
User is allowed to access it_int_prio (View for "it_int_prio" only)
User is allowed to access it_issue (View for "it_issue" only)
User is allowed to access it_project (View for "it_project" only)
Role "msgedit":
(Search for "msg": ('date', 'id') only)
User is allowed Edit on (Edit for "msg": ('author', 'date', 'id', 'keywords', 'subject', 'summary') only)
User is allowed to access ext_msg (View for "ext_msg" only)
User is allowed to access ext_tracker_state (View for "ext_tracker_state" only)
User is allowed to access ext_tracker_type (View for "ext_tracker_type" only)
Role "msgsync":
(Search for "msg": ('date', 'id') only)
User is allowed Edit on (Edit for "msg": ('author', 'date', 'id', 'keywords', 'subject', 'summary') only)
User is allowed to access ext_msg (View for "ext_msg" only)
User is allowed to access ext_tracker_state (View for "ext_tracker_state" only)
User is allowed to access ext_tracker_type (View for "ext_tracker_type" only)
User is allowed to create ext_msg (Create for "ext_msg" only)
User is allowed to create ext_tracker_state (Create for "ext_tracker_state" only)
User is allowed to edit ext_msg (Edit for "ext_msg" only)
User is allowed to edit ext_tracker_state (Edit for "ext_tracker_state" only)
Role "nosy":
User may get nosy messages for doc (Nosy for "doc" only)
User may get nosy messages for issue (Nosy for "issue" only)
User may get nosy messages for it_issue (Nosy for "it_issue" only)
User may get nosy messages for it_project (Nosy for "it_project" only)
User may get nosy messages for support (Nosy for "support" only)
Role "office":
(Restore for "room" only)
(Retire for "room" only)
User is allowed View on (View for "user": ('contacts',) only)
User is allowed to access user_contact (View for "user_contact" only)
User is allowed to create absence (Create for "absence" only)
User is allowed to create absence_type (Create for "absence_type" only)
User is allowed to create room (Create for "room" only)
User is allowed to create uc_type (Create for "uc_type" only)
User is allowed to edit absence (Edit for "absence" only)
User is allowed to edit absence_type (Edit for "absence_type" only)
User is allowed to edit room (Edit for "room" only)
User is allowed to edit uc_type (Edit for "uc_type" only)
Role "organisation":
User is allowed to access location (View for "location" only)
User is allowed to access org_location (View for "org_location" only)
User is allowed to access organisation (View for "organisation" only)
User is allowed to create location (Create for "location" only)
User is allowed to create org_location (Create for "org_location" only)
User is allowed to create organisation (Create for "organisation" only)
User is allowed to edit location (Edit for "location" only)
User is allowed to edit org_location (Edit for "org_location" only)
User is allowed to edit organisation (Edit for "organisation" only)
Role "pgp":
Role "procurement":
(View for "sap_cc" only)
(View for "time_project" only)
User is allowed Edit on (Edit for "sap_cc": ('group_lead', 'purchasing_agents', 'team_lead') only)
User is allowed Edit on (Edit for "time_project": ('group_lead', 'purchasing_agents', 'team_lead') only)
Role "project":
User is allowed Edit on (Edit for "time_project": ('cost_center', 'department', 'deputy', 'description', 'name', 'nosy', 'organisation', 'responsible', 'status') only)
User is allowed Edit on (Edit for "time_project": ('infosec_req', 'is_extern', 'max_hours', 'op_project', 'planned_effort', 'product_family', 'project_type', 'reporting_group', 'work_location') only)
User is allowed to access time_project (View for "time_project" only)
User is allowed to access time_report (View for "time_report" only)
User is allowed to access time_wp (View for "time_wp" only)
User is allowed to create time_project (Create for "time_project" only)
User is allowed to create time_project_status (Create for "time_project_status" only)
User is allowed to create time_wp (Create for "time_wp" only)
User is allowed to create time_wp_group (Create for "time_wp_group" only)
User is allowed to edit time_project_status (Edit for "time_project_status" only)
User is allowed to edit time_wp (Edit for "time_wp" only)
User is allowed to edit time_wp_group (Edit for "time_wp_group" only)
Role "project_view":
User is allowed to access time_project (View for "time_project" only)
User is allowed to access time_report (View for "time_report" only)
User is allowed to access time_wp (View for "time_wp" only)
Role "sec-incident-nosy":
User is allowed to access it_int_prio (View for "it_int_prio" only)
User is allowed to access it_issue (View for "it_issue" only)
User is allowed to access it_project (View for "it_project" only)
Role "sec-incident-responsible":
User is allowed to access it_int_prio (View for "it_int_prio" only)
User is allowed to access it_issue (View for "it_issue" only)
User is allowed to access it_project (View for "it_project" only)
Role "staff-report":
Role "sub-login":
Role "summary_view":
Role "supportadmin":
User is allowed to access analysis_result (View for "analysis_result" only)
User is allowed to access contact (View for "contact" only)
User is allowed to access customer (View for "customer" only)
User is allowed to access customer_agreement (View for "customer_agreement" only)
User is allowed to access mailgroup (View for "mailgroup" only)
User is allowed to access return_type (View for "return_type" only)
User is allowed to access sup_classification (View for "sup_classification" only)
User is allowed to access support (View for "support" only)
User is allowed to create analysis_result (Create for "analysis_result" only)
User is allowed to create contact (Create for "contact" only)
User is allowed to create customer (Create for "customer" only)
User is allowed to create customer_agreement (Create for "customer_agreement" only)
User is allowed to create mailgroup (Create for "mailgroup" only)
User is allowed to create return_type (Create for "return_type" only)
User is allowed to create sup_classification (Create for "sup_classification" only)
User is allowed to create support (Create for "support" only)
User is allowed to edit analysis_result (Edit for "analysis_result" only)
User is allowed to edit contact (Edit for "contact" only)
User is allowed to edit customer (Edit for "customer" only)
User is allowed to edit customer_agreement (Edit for "customer_agreement" only)
User is allowed to edit mailgroup (Edit for "mailgroup" only)
User is allowed to edit return_type (Edit for "return_type" only)
User is allowed to edit sup_classification (Edit for "sup_classification" only)
User is allowed to edit support (Edit for "support" only)
Role "time-report":
User is allowed to access time_report (View for "time_report" only)
User is allowed to create time_report (Create for "time_report" only)
User is allowed to edit time_report (Edit for "time_report" only)
User may edit own file (file created by user) (Edit for "file" only)
Role "user":
(Search for "time_project": ('activity', 'actor', 'creation', 'creator', 'deputy', 'description', 'id', 'is_extern', 'is_public_holiday', 'is_special_leave', 'is_vacation', 'name', 'nosy', 'only_hours', 'op_project', 'overtime_reduction', 'responsible', 'status', 'work_location', 'wps') only)
(Search for "time_wp": ('activity', 'actor', 'auto_wp', 'bookers', 'cost_center', 'creation', 'creator', 'description', 'durations_allowed', 'epic_key', 'has_expiration_date', 'id', 'is_extern', 'is_public', 'name', 'project', 'responsible', 'time_end', 'time_start', 'time_wp_summary_no', 'travel', 'wp_no') only)
(View for "time_project": ('activity', 'actor', 'creation', 'creator', 'deputy', 'description', 'id', 'is_extern', 'is_public_holiday', 'is_special_leave', 'is_vacation', 'name', 'nosy', 'only_hours', 'op_project', 'overtime_reduction', 'responsible', 'status', 'work_location', 'wps') only)
Search (Search for "user_contact" only)
User is allowed Edit on (Edit for "msg": ('keywords',) only)
User is allowed Edit on file if file is linked from an item with Edit permission (Edit for "file" only)
User is allowed Edit on issue if issue is non-confidential or user is on nosy list (Edit for "issue" only)
User is allowed Edit on it_issue if it_issue is non-confidential or user is on nosy list (Edit for "it_issue": ('messages', 'files', 'nosy') only)
User is allowed Edit on it_project if it_project is non-confidential or user is on nosy list (Edit for "it_project": ('messages', 'files', 'nosy') only)
User is allowed Edit on support if support is non-confidential or user is on nosy list (Edit for "support": ('analysis_end', 'analysis_result', 'analysis_start', 'bcc', 'business_unit', 'category', 'cc', 'cc_emails', 'classification', 'closed', 'confidential', 'customer', 'emails', 'execution', 'external_ref', 'files', 'goods_received', 'goods_sent', 'lot', 'messages', 'nosy', 'number_effected', 'numeric_effort', 'prio', 'prodcat', 'product', 'related_issues', 'related_support', 'release', 'responsible', 'return_type', 'sap_ref', 'send_to_customer', 'serial_number', 'set_first_reply', 'status', 'superseder', 'title', 'type', 'warranty') only)
User is allowed View on (View for "user": ('activity', 'actor', 'ad_domain', 'address', 'alternate_addresses', 'business_responsible', 'clearance_by', 'creation', 'creator', 'firstname', 'id', 'job_description', 'lastname', 'lunch_duration', 'lunch_start', 'nickname', 'pictures', 'position_text', 'queries', 'realname', 'room', 'sex', 'status', 'subst_active', 'substitute', 'supervisor', 'timezone', 'title', 'tt_lines', 'username') only)
User is allowed View on (View for "user": ('activity', 'actor', 'address', 'alternate_addresses', 'creation', 'creator', 'id', 'queries', 'realname', 'status', 'timezone', 'username') only)
User is allowed View on (View for "user": ('business_responsible', 'department_temp', 'timetracking_by', 'vie_user', 'vie_user_bl_override', 'vie_user_ml') only)
User is allowed View on (View for "user": ('contacts',) only)
User is allowed View on (View for "user_dynamic": ('department', 'org_location') only)
User is allowed View on file if file is linked from an item with View permission (View for "file" only)
User is allowed View on issue if issue is non-confidential or user is on nosy list (View for "issue" only)
User is allowed View on it_issue if it_issue is non-confidential or user is on nosy list (View for "it_issue" only)
User is allowed View on it_project if it_project is non-confidential or user is on nosy list (View for "it_project" only)
User is allowed View on msg if msg is linked from an item with View permission (View for "msg" only)
User is allowed View on support if support is non-confidential or user is on nosy list (View for "support" only)
User is allowed to access absence (View for "absence" only)
User is allowed to access absence_type (View for "absence_type" only)
User is allowed to access analysis_result (View for "analysis_result" only)
User is allowed to access area (View for "area" only)
User is allowed to access artefact (View for "artefact" only)
User is allowed to access business_unit (View for "business_unit" only)
User is allowed to access category (View for "category" only)
User is allowed to access contact (View for "contact" only)
User is allowed to access contact_type (View for "contact_type" only)
User is allowed to access cost_center (View for "cost_center" only)
User is allowed to access cost_center_group (View for "cost_center_group" only)
User is allowed to access cost_center_permission_group (View for "cost_center_permission_group" only)
User is allowed to access cost_center_status (View for "cost_center_status" only)
User is allowed to access customer (View for "customer" only)
User is allowed to access customer_agreement (View for "customer_agreement" only)
User is allowed to access daily record if he is owner or supervisor or timetracking-by user (Edit for "daily_record": ('status', 'time_record') only)
User is allowed to access daily record if he is owner or supervisor or timetracking-by user (View for "daily_record" only)
User is allowed to access daily_record_status (View for "daily_record_status" only)
User is allowed to access department (View for "department" only)
User is allowed to access doc (View for "doc" only)
User is allowed to access doc_category (View for "doc_category" only)
User is allowed to access doc_issue_status (View for "doc_issue_status" only)
User is allowed to access doc_status (View for "doc_status" only)
User is allowed to access ext_tracker (View for "ext_tracker" only)
User is allowed to access ext_tracker_state (View for "ext_tracker_state" only)
User is allowed to access ext_tracker_type (View for "ext_tracker_type" only)
User is allowed to access functional_role (View for "functional_role" only)
User is allowed to access it_category (View for "it_category" only)
User is allowed to access it_issue_status (View for "it_issue_status" only)
User is allowed to access it_prio (View for "it_prio" only)
User is allowed to access it_project_status (View for "it_project_status" only)
User is allowed to access it_request_type (View for "it_request_type" only)
User is allowed to access keyword (View for "keyword" only)
User is allowed to access kind (View for "kind" only)
User is allowed to access leave_status (View for "leave_status" only)
User is allowed to access location (View for "location" only)
User is allowed to access mailgroup (View for "mailgroup" only)
User is allowed to access msg_keyword (View for "msg_keyword" only)
User is allowed to access org_group (View for "org_group" only)
User is allowed to access org_location (View for "org_location" only)
User is allowed to access organisation (View for "organisation" only)
User is allowed to access overtime_period (View for "overtime_period" only)
User is allowed to access prodcat (View for "prodcat" only)
User is allowed to access product (View for "product" only)
User is allowed to access product_family (View for "product_family" only)
User is allowed to access product_type (View for "product_type" only)
User is allowed to access project_type (View for "project_type" only)
User is allowed to access public_holiday (View for "public_holiday" only)
User is allowed to access reference (View for "reference" only)
User is allowed to access reporting_group (View for "reporting_group" only)
User is allowed to access return_type (View for "return_type" only)
User is allowed to access room (View for "room" only)
User is allowed to access safety_level (View for "safety_level" only)
User is allowed to access sap_cc (View for "sap_cc" only)
User is allowed to access severity (View for "severity" only)
User is allowed to access sex (View for "sex" only)
User is allowed to access status (View for "status" only)
User is allowed to access status_transition (View for "status_transition" only)
User is allowed to access summary_report (View for "summary_report" only)
User is allowed to access summary_type (View for "summary_type" only)
User is allowed to access sup_classification (View for "sup_classification" only)
User is allowed to access sup_execution (View for "sup_execution" only)
User is allowed to access sup_prio (View for "sup_prio" only)
User is allowed to access sup_status (View for "sup_status" only)
User is allowed to access sup_type (View for "sup_type" only)
User is allowed to access sup_warranty (View for "sup_warranty" only)
User is allowed to access test_level (View for "test_level" only)
User is allowed to access time_activity (View for "time_activity" only)
User is allowed to access time_activity_perm (View for "time_activity_perm" only)
User is allowed to access time_project_status (View for "time_project_status" only)
User is allowed to access time_wp_group (View for "time_wp_group" only)
User is allowed to access time_wp_summary_no (View for "time_wp_summary_no" only)
User is allowed to access timesheet (View for "timesheet" only)
User is allowed to access uc_type (View for "uc_type" only)
User is allowed to access user_status (View for "user_status" only)
User is allowed to access vac_aliq (View for "vac_aliq" only)
User is allowed to access vacation_report (View for "vacation_report" only)
User is allowed to access work_location (View for "work_location" only)
User is allowed to create daily_record (Create for "daily_record" only)
User is allowed to create doc (Create for "doc" only)
User is allowed to create ext_tracker_state (Create for "ext_tracker_state" only)
User is allowed to create file (Create for "file" only)
User is allowed to create issue (Create for "issue" only)
User is allowed to create it_issue (Create for "it_issue" only)
User is allowed to create leave_submission (Create for "leave_submission" only)
User is allowed to create msg (Create for "msg" only)
User is allowed to create queries (Create for "query" only)
User is allowed to create support (Create for "support" only)
User is allowed to create time_record (Create for "time_record" only)
User is allowed to create time_wp (Create for "time_wp" only)
User is allowed to edit (some of) their own user details (Edit for "user": ('csv_delimiter', 'hide_message_files', 'lunch_duration', 'lunch_start', 'password', 'queries', 'realname', 'room', 'subst_active', 'substitute', 'timezone', 'tt_lines') only)
User is allowed to edit category if he is responsible for it (Edit for "category": ('nosy', 'default_part_of') only)
User is allowed to edit doc (Edit for "doc" only)
User is allowed to edit ext_tracker_state (Edit for "ext_tracker_state" only)
User is allowed to edit if he's the owner of the contact (Edit for "user_contact": ('visible',) only)
User is allowed to edit several fields if he is Responsible for an it_issue (Edit for "it_issue": ('responsible',) only)
User is allowed to edit several fields if he is Stakeholder/Responsible for an it_issue (Edit for "it_issue": ('deadline', 'status', 'title') only)
User is allowed to edit their queries (Edit for "query" only)
User is allowed to edit time category if the status is "Open" and he is responsible for the time category (Edit for "time_project": ('deputy', 'planned_effort', 'nosy') only)
User is allowed to edit workpackage if he is time category owner or deputy (Edit for "time_wp": ('cost_center', 'is_public', 'name', 'responsible', 'time_wp_summary_no', 'wp_no') only)
User is allowed to retire their queries (Retire for "query" only)
User is allowed to search daily_record (Search for "daily_record" only)
User is allowed to search for their own files (Search for "file" only)
User is allowed to search for their own messages (Search for "msg" only)
User is allowed to search for their queries (Search for "query" only)
User is allowed to search issue (Search for "issue" only)
User is allowed to search it_issue (Search for "it_issue" only)
User is allowed to search it_project (Search for "it_project" only)
User is allowed to search leave_submission (Search for "leave_submission" only)
User is allowed to search support (Search for "support" only)
User is allowed to search time_record (Search for "time_record" only)
User is allowed to search time_wp (Search for "time_wp": ('activity', 'actor', 'auto_wp', 'cost_center', 'creation', 'creator', 'description', 'durations_allowed', 'epic_key', 'has_expiration_date', 'is_extern', 'is_public', 'id', 'name', 'project', 'responsible', 'time_end', 'time_start', 'time_wp_summary_no', 'travel', 'wp_no') only)
User is allowed to search user_status (Search for "user": ('status',) only)
User is allowed to see time record if he is allowed to see all details on work package or User may view a daily_record (and time_records that are attached to that daily_record) if the user owns the daily_record or has role 'HR' or 'Controlling', or the user is supervisor or substitute supervisor of the owner of the daily record (the supervisor relationship is transitive) or the user is the department manager of the owner of the daily record. If user has role HR-Org-Location and is in the same Org-Location as the record, it may also be seen (View for "time_record" only)
User is allowed to view (some of) their own user details (View for "user": ('entry_date', 'planning_role') only)
User is allowed to view contact if he's the owner of the contact or the contact is marked visible (View for "user_contact" only)
User is allowed to view leave submission if he is the supervisor or the person to whom approvals are delegated (Edit for "leave_submission": ('status',) only)
User is allowed to view leave submission if he is the supervisor or the person to whom approvals are delegated (View for "leave_submission" only)
User is allowed to view selected fields in work package if booking is allowed for this user (also applies to timetracking by, supervisor and approval delegated) (View for "time_wp": ('activity', 'actor', 'cost_center', 'creation', 'creator', 'description', 'durations_allowed', 'epic_key', 'has_expiration_date', 'id', 'is_extern', 'is_public', 'name', 'project', 'responsible', 'time_end', 'time_start', 'time_wp_summary_no', 'travel', 'wp_no') only)
User is allowed to view their own files (View for "file" only)
User is allowed to view their own messages (View for "msg" only)
User is allowed to view their own overtime information (View for "overtime_correction" only)
User is allowed to view time record if he is the supervisor or the person to whom approvals are delegated (View for "time_record" only)
User is allowed to view work package and time category names if he/she has role HR or HR-Org-Location (View for "time_project": ('name',) only)
User is allowed to view work package and time category names if he/she has role HR or HR-Org-Location (View for "time_wp": ('name', 'project') only)
User is allowed to view/edit workpackage if he is owner or project responsible/deputy (Edit for "time_wp": ('bookers', 'description', 'epic_key', 'planned_effort', 'time_end', 'time_start', 'time_wp_summary_no') only)
User may access the rest interface (Rest Access)
User may access the web interface (Web Access)
User may access the xmlrpc interface (Xmlrpc Access)
User may edit own leave submissions (Edit for "leave_submission": ('comment', 'comment_cancel', 'first_day', 'last_day', 'status', 'time_wp', 'user') only)
User may edit own leave submissions (View for "leave_submission": ('comment', 'comment_cancel', 'first_day', 'last_day', 'status', 'time_wp', 'user') only)
User may see time report if reponsible or deputy of time project or on nosy list of time project (View for "time_report" only)
User may use the email interface (Email Access)
User may view a daily_record (and time_records that are attached to that daily_record) if the user owns the daily_record or has role 'HR' or 'Controlling', or the user is supervisor or substitute supervisor of the owner of the daily record (the supervisor relationship is transitive) or the user is the department manager of the owner of the daily record. If user has role HR-Org-Location and is in the same Org-Location as the record, it may also be seen (View for "daily_record" only)
User may view their own user functional role (View for "user_functional_role" only)
User may view time category if user is owner or deputy of time category or on nosy list of time category or if user is department manager of time category (View for "time_project" only)
User may view work package if responsible for it, if user is owner or deputy of time category or on nosy list of time category or if user is department manager of time category (View for "time_wp" only)
User or Timetracking by user may edit time_records owned by user (Edit for "time_record" only)
User or Timetracking by user may edit time_records owned by user (Restore for "time_record" only)
User or Timetracking by user may edit time_records owned by user (Retire for "time_record" only)
User or Timetracking by user may edit time_records owned by user (View for "time_record" only)
Users are allowed to view their own and public queries for classes where they have search permission (View for "query" only)
Users may see daily record if they may see one of the time_records for that day (View for "daily_record" only)
Role "user_view":
User is allowed to access user (View for "user" only)
Role "vacation-report":
""".strip ()
| [] |
object-oriented-human/competitive | CodeChef/problems/IMDB/main.py | 9e761020e887d8980a39a64eeaeaa39af0ecd777 | tc = int(input())
while tc:
tc -= 1
best = 0
n, x = map(int, input().split())
for i in range(n):
s, r = map(int, input().split())
if x >= s:
best = max(best, r)
print(best) | [] |
Privex/python-db | tests/test_sqlite_wrapper.py | 3b46b34b4310973e2e2a30a66adaa853fd10340d | """
Tests related to :class:`.SqliteWrapper` / :class:`.ExampleWrapper`
"""
# from unittest import TestCase
from tests.base import *
class TestSQLiteWrapper(PrivexDBTestBase):
def test_tables_created(self):
w = self.wrp
self.assertEqual(w.db, ':memory:')
tables = w.list_tables()
self.assertIn('users', tables)
self.assertIn('items', tables)
def test_tables_drop(self):
w = self.wrp
tables = w.list_tables()
self.assertIn('users', tables)
self.assertIn('items', tables)
w.drop_schemas()
tables = w.list_tables()
self.assertNotIn('users', tables)
self.assertNotIn('items', tables)
def test_insert_find_user(self):
w = self.wrp
w.query_mode = 'flat'
res = w.insert_user('John', 'Doe')
self.assertEqual(res.rowcount, 1)
user = w.find_user(res.lastrowid)
self.assertEqual(user[1], 'John')
self.assertEqual(user[2], 'Doe')
def test_action_update(self):
w = self.wrp
w.query_mode = 'dict'
res = w.insert_user('John', 'Doe')
last_id = res.lastrowid
rows = w.action("UPDATE users SET last_name = ? WHERE first_name = ?", ['Smith', 'John'])
self.assertEqual(rows, 1)
john = w.find_user(last_id)
self.assertEqual(john['last_name'], 'Smith')
def test_find_user_dict_mode(self):
w = self.wrp
w.query_mode = 'dict'
res = w.insert_user('John', 'Doe')
self.assertEqual(res.rowcount, 1)
user = w.find_user(res.lastrowid)
self.assertEqual(user['first_name'], 'John')
self.assertEqual(user['last_name'], 'Doe')
def test_find_user_nonexistent(self):
w = self.wrp
user = w.find_user(99)
self.assertIsNone(user)
def test_get_users_tuple(self):
w = self.wrp
w.query_mode = 'flat'
w.insert_user('John', 'Doe')
w.insert_user('Jane', 'Doe')
w.insert_user('Dave', 'Johnson')
users = list(w.get_users())
self.assertEqual(len(users), 3)
self.assertEqual(users[0][1], 'John')
self.assertEqual(users[1][1], 'Jane')
self.assertEqual(users[1][2], 'Doe')
self.assertEqual(users[2][2], 'Johnson')
def test_get_users_dict(self):
w = self.wrp
w.query_mode = 'dict'
w.insert_user('John', 'Doe')
w.insert_user('Jane', 'Doe')
w.insert_user('Dave', 'Johnson')
users = list(w.get_users())
self.assertEqual(len(users), 3)
self.assertEqual(users[0]['first_name'], 'John')
self.assertEqual(users[1]['first_name'], 'Jane')
self.assertEqual(users[1]['last_name'], 'Doe')
self.assertEqual(users[2]['last_name'], 'Johnson')
def test_insert_helper(self):
w = self.wrp
w.query_mode = 'dict'
res = w.insert('users', first_name='Dave', last_name='Johnson')
self.assertEqual(res.lastrowid, 1)
user = w.find_user(res.lastrowid)
self.assertEqual(user['first_name'], 'Dave')
self.assertEqual(user['last_name'], 'Johnson')
| [] |
robertodr/spack | var/spack/repos/builtin/packages/strumpack/package.py | 9b809e01b47d48f01b3d257912fe1b752943cd3d | # Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Strumpack(CMakePackage, CudaPackage):
"""STRUMPACK -- STRUctured Matrix PACKage - provides linear solvers
for sparse matrices and for dense rank-structured matrices, i.e.,
matrices that exhibit some kind of low-rank property. It provides a
distributed memory fully algebraic sparse solver and
preconditioner. The preconditioner is mostly aimed at large sparse
linear systems which result from the discretization of a partial
differential equation, but is not limited to any particular type of
problem. STRUMPACK also provides preconditioned GMRES and BiCGStab
iterative solvers."""
homepage = "http://portal.nersc.gov/project/sparse/strumpack"
url = "https://github.com/pghysels/STRUMPACK/archive/v4.0.0.tar.gz"
git = "https://github.com/pghysels/STRUMPACK.git"
maintainers = ['pghysels']
version('master', branch='master')
version('5.0.0', sha256='bdfd1620ff7158d96055059be04ee49466ebaca8213a2fdab33e2d4571019a49')
version('4.0.0', sha256='a3629f1f139865c74916f8f69318f53af6319e7f8ec54e85c16466fd7d256938')
version('3.3.0', sha256='499fd3b58656b4b6495496920e5372895861ebf15328be8a7a9354e06c734bc7')
version('3.2.0', sha256='34d93e1b2a3b8908ef89804b7e08c5a884cbbc0b2c9f139061627c0d2de282c1')
version('3.1.1', sha256='c1c3446ee023f7b24baa97b24907735e89ce4ae9f5ef516645dfe390165d1778')
variant('shared', default=False, description='Build shared libraries')
variant('mpi', default=True, description='Use MPI')
variant('openmp', default=True,
description='Enable thread parallellism via tasking with OpenMP')
variant('cuda', default=True,
description='Enable CUDA support')
variant('parmetis', default=True,
description='Enable use of ParMetis')
variant('scotch', default=False,
description='Enable use of Scotch')
variant('butterflypack', default=True,
description='Enable use of ButterflyPACK')
variant('zfp', default=True,
description='Build with support for compression using ZFP')
variant('c_interface', default=True,
description='Enable C interface')
variant('count_flops', default=False,
description='Build with flop counters')
variant('task_timers', default=False,
description='Build with timers for internal routines')
variant('build_dev_tests', default=False,
description='Build developer test routines')
variant('build_tests', default=False,
description='Build test routines')
# TODO: add a slate variant
depends_on('[email protected]:', type='build')
depends_on('mpi', when='+mpi')
depends_on('blas')
depends_on('lapack')
depends_on('scalapack', when='+mpi')
depends_on('metis')
depends_on('parmetis', when='+parmetis')
depends_on('scotch~metis', when='+scotch')
depends_on('scotch~metis+mpi', when='+scotch+mpi')
depends_on('[email protected]', when='@3.3.0:3.9.999 +butterflypack+mpi')
depends_on('[email protected]:', when='@4.0.0: +butterflypack+mpi')
depends_on('cuda', when='@4.0.0: +cuda')
depends_on('zfp', when='+zfp')
conflicts('+parmetis', when='~mpi')
conflicts('+butterflypack', when='~mpi')
conflicts('+butterflypack', when='@:3.2.0')
conflicts('+cuda', when='@:3.9.999')
conflicts('+zfp', when='@:3.9.999')
patch('intel-19-compile.patch', when='@3.1.1')
def cmake_args(self):
spec = self.spec
def on_off(varstr):
return 'ON' if varstr in spec else 'OFF'
args = [
'-DSTRUMPACK_USE_MPI=%s' % on_off('+mpi'),
'-DSTRUMPACK_USE_OPENMP=%s' % on_off('+openmp'),
'-DTPL_ENABLE_PARMETIS=%s' % on_off('+parmetis'),
'-DTPL_ENABLE_SCOTCH=%s' % on_off('+scotch'),
'-DTPL_ENABLE_BPACK=%s' % on_off('+butterflypack'),
'-DSTRUMPACK_COUNT_FLOPS=%s' % on_off('+count_flops'),
'-DSTRUMPACK_TASK_TIMERS=%s' % on_off('+task_timers'),
'-DSTRUMPACK_DEV_TESTING=%s' % on_off('+build_dev_tests'),
'-DSTRUMPACK_BUILD_TESTS=%s' % on_off('+build_tests'),
'-DTPL_BLAS_LIBRARIES=%s' % spec['blas'].libs.joined(";"),
'-DTPL_LAPACK_LIBRARIES=%s' % spec['lapack'].libs.joined(";"),
'-DTPL_SCALAPACK_LIBRARIES=%s' % spec['scalapack'].
libs.joined(";"),
]
if spec.satisfies('@:3.9.999'):
if '+mpi' in spec:
args.extend([
'-DCMAKE_C_COMPILER=%s' % spec['mpi'].mpicc,
'-DCMAKE_CXX_COMPILER=%s' % spec['mpi'].mpicxx,
'-DCMAKE_Fortran_COMPILER=%s' % spec['mpi'].mpifc
])
args.extend([
'-DSTRUMPACK_C_INTERFACE=%s' % on_off('+c_interface'),
])
if spec.satisfies('@4.0.0:'):
args.extend([
'-DSTRUMPACK_USE_CUDA=%s' % on_off('+cuda')
])
args.extend([
'-DBUILD_SHARED_LIBS=%s' % on_off('+shared')
])
return args
| [] |
Ajju2211/frendy-bot | actionserver/actions/action_feedbackform.py | b86a7a3cb3fb54b300ad9b870defb947f22dc146 | from typing import Any, Text, Dict, List, Union
from rasa_sdk import Action, Tracker
from rasa_sdk.executor import CollectingDispatcher
from rasa_sdk.forms import FormAction
from rasa_sdk.events import UserUtteranceReverted, UserUttered, FollowupAction
# from rasa_core.events import (UserUtteranceReverted, UserUttered,
# ActionExecuted, Event)
from rasa_sdk.events import AllSlotsReset, SlotSet
from rasa.core.constants import REQUESTED_SLOT
from rasa.core.slots import Slot
import pandas as pd
import json
from actionserver.utils import utilities as util
from actionserver.controllers.faqs.faq import FAQ
from actionserver.controllers.constants.orderForm import *
import logging
from actionserver.utils.utilities import INVALID_VALUE
product_list = []
quant_list = [] # takes quantity from user
logger = logging.getLogger(__name__)
with open(r'./actionserver/custom_payload.json') as f:
frendy_product_menu = json.load(f)
# Code snippet for global back
# return [Restarted(), UserUttered(text="/get_started", parse_data={
# "intent": {"confidence": 1.0, "name": "get_started"},
# "entities": []
# }), FollowupAction(name="utter_greet")]
def query_back(dispatcher):
dispatcher.utter_message("Going back to queries!!!")
greet_utter = UserUttered(text="/greet", parse_data={
"intent": {"confidence": 1.0, "name": "greet"},
"entities": []
})
query_utter = UserUttered(text="/query_init", parse_data={
"intent": {"confidence": 1.0, "name": "query_init"},
"entities": []
})
return [
greet_utter,
FollowupAction(name="utter_greet"),
query_utter,
FollowupAction(name="utter_query_type")
]
def greet_back(dispatcher):
dispatcher.utter_message("Going back!!!")
dispatcher.utter_message(json_message = {
"platform":"whatsapp",
"payload":"text",
"text":"Welcome back to Frendy Shopping"
});
return [UserUttered(text="/greet", parse_data={
"intent": {"confidence": 1.0, "name": "greet"},
"entities": []
}), FollowupAction(name="utter_greet")]
class FeedbackForm(FormAction):
def name(self):
return "feedback_form"
@staticmethod
def required_slots(tracker):
if tracker.get_slot("rating"):
return ["rating", "feedback_text"]
else:
return ["rating"]
def slot_mappings(self) -> Dict[Text, Union[Dict, List[Dict]]]:
"""A dictionary to map required slots to
- an extracted entity
- intent: value pairs
- a whole message
or a list of them, where a first match will be picked"""
# return {"rating": [self.from_entity("rating"),self.from_entity("any_thing")],"feedback_text": [self.from_entity(entity="any_thing"),self.from_entity(entity="navigation")]}
return {"rating": [self.from_entity("rating"), self.from_text()], "feedback_text": [self.from_text(), self.from_entity(entity="navigation")]}
def validate_rating(
self,
value: Text,
dispatcher: CollectingDispatcher,
tracker: Tracker,
domain: Dict[Text, Any],
) -> Dict[Text, Any]:
ratings = ['1', '2', '3', '4', '5']
try:
value = value.strip()
if value == "back1" or value.lower() == "back":
return {"rating": INVALID_VALUE, "feedback_text": INVALID_VALUE}
# 1-5 it integer otherwise rating:None
elif value in ratings:
return {"rating": value, "feedback_text": None}
else:
dispatcher.utter_message("Please enter valid option.")
dispatcher.utter_message(json_message = {
"platform":"whatsapp",
"payload":"text",
"text":"Please enter valid option"
});
return {"rating": None, "feedback_text": None}
except Exception as e:
print(e)
dispatcher.utter_message("Please enter valid option.")
dispatcher.utter_message(json_message = {
"platform":"whatsapp",
"payload":"text",
"text":"Please enter valid option"
});
return {"rating": None, "feedback_text": None}
def validate_feedback_text(
self,
value: Text,
dispatcher: CollectingDispatcher,
tracker: Tracker,
domain: Dict[Text, Any],
) -> Dict[Text, Any]:
if value == "back2" or value.lower() == "back":
return {"rating": None, "feedback_text": None}
else:
return {"feedback_text": value}
def submit(
self,
dispatcher: CollectingDispatcher,
tracker: Tracker,
domain: Dict[Text, Any],
) -> List[Dict]:
if tracker.get_slot("rating") != INVALID_VALUE:
with open("./actionserver/customer_queries.json", "r") as queriesRef:
rating = tracker.get_slot("rating")
feedback = tracker.get_slot("feedback_text")
feedbackObj = json.load(queriesRef)
feedbackObj["feedback"].append({
"createdOn": util.timestamp(),
"complaint_area": rating,
"complaint": feedback
})
with open("./actionserver/customer_queries.json", "w") as queriesRefWrite:
json.dump(feedbackObj, queriesRefWrite, indent=4)
dispatcher.utter_message("Your Response :\n Rating :'{rate}' star \n Feedback: '{feedbk}' \n Submitted!Thank You!".format(
rate=rating, feedbk=feedback))
dispatcher.utter_message(json_message = {
"platform":"whatsapp",
"payload":"text",
"text":"Your Response :\n Rating :'{rate}' star \n Feedback: '{feedbk}' \n Submitted!Thank You!".format(
rate=rating, feedbk=feedback)
});
else:
dispatcher.utter_message("Feedback form closed")
li = [SlotSet("rating", None), SlotSet("feedback_text", None)]
li.extend(query_back(dispatcher))
return li
return [SlotSet("rating", None), SlotSet("feedback_text", None)]
| [((22, 9, 22, 36), 'logging.getLogger', 'logging.getLogger', ({(22, 27, 22, 35): '__name__'}, {}), '(__name__)', False, 'import logging\n'), ((26, 26, 26, 38), 'json.load', 'json.load', ({(26, 36, 26, 37): 'f'}, {}), '(f)', False, 'import json\n'), ((37, 18, 40, 6), 'rasa_sdk.events.UserUttered', 'UserUttered', (), '', False, 'from rasa_sdk.events import UserUtteranceReverted, UserUttered, FollowupAction\n'), ((42, 18, 45, 6), 'rasa_sdk.events.UserUttered', 'UserUttered', (), '', False, 'from rasa_sdk.events import UserUtteranceReverted, UserUttered, FollowupAction\n'), ((49, 8, 49, 42), 'rasa_sdk.events.FollowupAction', 'FollowupAction', (), '', False, 'from rasa_sdk.events import UserUtteranceReverted, UserUttered, FollowupAction\n'), ((51, 8, 51, 47), 'rasa_sdk.events.FollowupAction', 'FollowupAction', (), '', False, 'from rasa_sdk.events import UserUtteranceReverted, UserUttered, FollowupAction\n'), ((62, 12, 65, 6), 'rasa_sdk.events.UserUttered', 'UserUttered', (), '', False, 'from rasa_sdk.events import UserUtteranceReverted, UserUttered, FollowupAction\n'), ((65, 8, 65, 42), 'rasa_sdk.events.FollowupAction', 'FollowupAction', (), '', False, 'from rasa_sdk.events import UserUtteranceReverted, UserUttered, FollowupAction\n'), ((166, 16, 166, 39), 'rasa_sdk.events.SlotSet', 'SlotSet', ({(166, 24, 166, 32): '"""rating"""', (166, 34, 166, 38): 'None'}, {}), "('rating', None)", False, 'from rasa_sdk.events import AllSlotsReset, SlotSet\n'), ((166, 41, 166, 71), 'rasa_sdk.events.SlotSet', 'SlotSet', ({(166, 49, 166, 64): '"""feedback_text"""', (166, 66, 166, 70): 'None'}, {}), "('feedback_text', None)", False, 'from rasa_sdk.events import AllSlotsReset, SlotSet\n'), ((144, 30, 144, 51), 'json.load', 'json.load', ({(144, 40, 144, 50): 'queriesRef'}, {}), '(queriesRef)', False, 'import json\n'), ((151, 16, 151, 65), 'json.dump', 'json.dump', (), '', False, 'import json\n'), ((163, 18, 163, 41), 'rasa_sdk.events.SlotSet', 'SlotSet', ({(163, 26, 163, 34): '"""rating"""', (163, 36, 163, 40): 'None'}, {}), "('rating', None)", False, 'from rasa_sdk.events import AllSlotsReset, SlotSet\n'), ((163, 43, 163, 73), 'rasa_sdk.events.SlotSet', 'SlotSet', ({(163, 51, 163, 66): '"""feedback_text"""', (163, 68, 163, 72): 'None'}, {}), "('feedback_text', None)", False, 'from rasa_sdk.events import AllSlotsReset, SlotSet\n'), ((146, 33, 146, 49), 'actionserver.utils.utilities.timestamp', 'util.timestamp', ({}, {}), '()', True, 'from actionserver.utils import utilities as util\n')] |
fuzzylabs/wearable-my-foot | dash/graphs.py | 5e7d818fc51a3d3babbe1c0ec49450b1a1f030c6 | import plotly.graph_objs as go
class GraphsHelper:
template = "plotly_dark"
'''
Generate a plot for a timeseries
'''
def generate_timeseries_plot(self, dataframe):
pressure_plots = []
for sensor in ["p1", "p2", "p3"]:
series = dataframe[sensor]
scatter = go.Scatter(x = dataframe.index,
y = series,
name = f"Sensor {sensor}",
opacity = 0.4)
pressure_plots.append(scatter)
pressure_figure = go.Figure(
data = pressure_plots,
layout = go.Layout(
title = "Pressure timeseries",
template = self.template
)
)
return pressure_figure
| [((13, 22, 16, 47), 'plotly.graph_objs.Scatter', 'go.Scatter', (), '', True, 'import plotly.graph_objs as go\n'), ((21, 21, 24, 13), 'plotly.graph_objs.Layout', 'go.Layout', (), '', True, 'import plotly.graph_objs as go\n')] |
JonathanGailliez/azure-sdk-for-python | azure-mgmt-web/azure/mgmt/web/models/app_service_certificate_resource.py | f0f051bfd27f8ea512aea6fc0c3212ee9ee0029b | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .resource import Resource
class AppServiceCertificateResource(Resource):
"""Key Vault container ARM resource for a certificate that is purchased
through Azure.
Variables are only populated by the server, and will be ignored when
sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource Name.
:vartype name: str
:param kind: Kind of resource.
:type kind: str
:param location: Required. Resource Location.
:type location: str
:ivar type: Resource type.
:vartype type: str
:param tags: Resource tags.
:type tags: dict[str, str]
:param key_vault_id: Key Vault resource Id.
:type key_vault_id: str
:param key_vault_secret_name: Key Vault secret name.
:type key_vault_secret_name: str
:ivar provisioning_state: Status of the Key Vault secret. Possible values
include: 'Initialized', 'WaitingOnCertificateOrder', 'Succeeded',
'CertificateOrderFailed', 'OperationNotPermittedOnKeyVault',
'AzureServiceUnauthorizedToAccessKeyVault', 'KeyVaultDoesNotExist',
'KeyVaultSecretDoesNotExist', 'UnknownError', 'ExternalPrivateKey',
'Unknown'
:vartype provisioning_state: str or
~azure.mgmt.web.models.KeyVaultSecretStatus
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'location': {'required': True},
'type': {'readonly': True},
'provisioning_state': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'kind': {'key': 'kind', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'key_vault_id': {'key': 'properties.keyVaultId', 'type': 'str'},
'key_vault_secret_name': {'key': 'properties.keyVaultSecretName', 'type': 'str'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'KeyVaultSecretStatus'},
}
def __init__(self, **kwargs):
super(AppServiceCertificateResource, self).__init__(**kwargs)
self.key_vault_id = kwargs.get('key_vault_id', None)
self.key_vault_secret_name = kwargs.get('key_vault_secret_name', None)
self.provisioning_state = None
| [] |
polisitni1/DogeClickBot | telethon/tl/functions/stickers.py | ac57eaeefca2c6ab9e48458f9f928a6a421a162e | """File generated by TLObjects' generator. All changes will be ERASED"""
from ...tl.tlobject import TLRequest
from typing import Optional, List, Union, TYPE_CHECKING
import os
import struct
if TYPE_CHECKING:
from ...tl.types import TypeInputStickerSet, TypeInputUser, TypeInputStickerSetItem, TypeInputDocument
class AddStickerToSetRequest(TLRequest):
CONSTRUCTOR_ID = 0x8653febe
SUBCLASS_OF_ID = 0x9b704a5a
def __init__(self, stickerset, sticker):
"""
:param TypeInputStickerSet stickerset:
:param TypeInputStickerSetItem sticker:
:returns messages.StickerSet: Instance of StickerSet.
"""
self.stickerset = stickerset # type: TypeInputStickerSet
self.sticker = sticker # type: TypeInputStickerSetItem
def to_dict(self):
return {
'_': 'AddStickerToSetRequest',
'stickerset': None if self.stickerset is None else self.stickerset.to_dict(),
'sticker': None if self.sticker is None else self.sticker.to_dict()
}
def __bytes__(self):
return b''.join((
b'\xbe\xfeS\x86',
bytes(self.stickerset),
bytes(self.sticker),
))
@classmethod
def from_reader(cls, reader):
_stickerset = reader.tgread_object()
_sticker = reader.tgread_object()
return cls(stickerset=_stickerset, sticker=_sticker)
class ChangeStickerPositionRequest(TLRequest):
CONSTRUCTOR_ID = 0xffb6d4ca
SUBCLASS_OF_ID = 0x9b704a5a
def __init__(self, sticker, position):
"""
:param TypeInputDocument sticker:
:param int position:
:returns messages.StickerSet: Instance of StickerSet.
"""
self.sticker = sticker # type: TypeInputDocument
self.position = position # type: int
def to_dict(self):
return {
'_': 'ChangeStickerPositionRequest',
'sticker': None if self.sticker is None else self.sticker.to_dict(),
'position': self.position
}
def __bytes__(self):
return b''.join((
b'\xca\xd4\xb6\xff',
bytes(self.sticker),
struct.pack('<i', self.position),
))
@classmethod
def from_reader(cls, reader):
_sticker = reader.tgread_object()
_position = reader.read_int()
return cls(sticker=_sticker, position=_position)
class CreateStickerSetRequest(TLRequest):
CONSTRUCTOR_ID = 0x9bd86e6a
SUBCLASS_OF_ID = 0x9b704a5a
def __init__(self, user_id, title, short_name, stickers, masks=None):
"""
:param TypeInputUser user_id:
:param str title:
:param str short_name:
:param List[TypeInputStickerSetItem] stickers:
:param Optional[bool] masks:
:returns messages.StickerSet: Instance of StickerSet.
"""
self.user_id = user_id # type: TypeInputUser
self.title = title # type: str
self.short_name = short_name # type: str
self.stickers = stickers # type: List[TypeInputStickerSetItem]
self.masks = masks # type: Optional[bool]
async def resolve(self, client, utils):
self.user_id = utils.get_input_user(await client.get_input_entity(self.user_id))
def to_dict(self):
return {
'_': 'CreateStickerSetRequest',
'user_id': None if self.user_id is None else self.user_id.to_dict(),
'title': self.title,
'short_name': self.short_name,
'stickers': [] if self.stickers is None else [None if x is None else x.to_dict() for x in self.stickers],
'masks': self.masks
}
def __bytes__(self):
return b''.join((
b'jn\xd8\x9b',
struct.pack('<I', (0 if self.masks is None or self.masks is False else 1)),
bytes(self.user_id),
self.serialize_bytes(self.title),
self.serialize_bytes(self.short_name),
b'\x15\xc4\xb5\x1c',struct.pack('<i', len(self.stickers)),b''.join(bytes(x) for x in self.stickers),
))
@classmethod
def from_reader(cls, reader):
flags = reader.read_int()
_masks = bool(flags & 1)
_user_id = reader.tgread_object()
_title = reader.tgread_string()
_short_name = reader.tgread_string()
reader.read_int()
_stickers = []
for _ in range(reader.read_int()):
_x = reader.tgread_object()
_stickers.append(_x)
return cls(user_id=_user_id, title=_title, short_name=_short_name, stickers=_stickers, masks=_masks)
class RemoveStickerFromSetRequest(TLRequest):
CONSTRUCTOR_ID = 0xf7760f51
SUBCLASS_OF_ID = 0x9b704a5a
def __init__(self, sticker):
"""
:param TypeInputDocument sticker:
:returns messages.StickerSet: Instance of StickerSet.
"""
self.sticker = sticker # type: TypeInputDocument
def to_dict(self):
return {
'_': 'RemoveStickerFromSetRequest',
'sticker': None if self.sticker is None else self.sticker.to_dict()
}
def __bytes__(self):
return b''.join((
b'Q\x0fv\xf7',
bytes(self.sticker),
))
@classmethod
def from_reader(cls, reader):
_sticker = reader.tgread_object()
return cls(sticker=_sticker)
| [((71, 12, 71, 44), 'struct.pack', 'struct.pack', ({(71, 24, 71, 28): '"""<i"""', (71, 30, 71, 43): 'self.position'}, {}), "('<i', self.position)", False, 'import struct\n'), ((117, 12, 117, 86), 'struct.pack', 'struct.pack', ({(117, 24, 117, 28): '"""<I"""', (117, 31, 117, 84): '(0 if self.masks is None or self.masks is False else 1)'}, {}), "('<I', 0 if self.masks is None or self.masks is False else 1)", False, 'import struct\n')] |
lkusch/Kratos | applications/ChimeraApplication/tests/chimera_analysis_base_test.py | e8072d8e24ab6f312765185b19d439f01ab7b27b | import KratosMultiphysics
import KratosMultiphysics.KratosUnittest as UnitTest
import KratosMultiphysics.ChimeraApplication
from KratosMultiphysics.ChimeraApplication.fluid_chimera_analysis import FluidChimeraAnalysis
class ChimeraAnalysisBaseTest(UnitTest.TestCase):
def setUp(self):
# Set to true to get post-process files for the test
self.print_output = False
def _run_test(self,settings_file_name):
model = KratosMultiphysics.Model()
with open(settings_file_name,'r') as settings_file:
settings = KratosMultiphysics.Parameters(settings_file.read())
# to check the results: add output settings block if needed
if self.print_output:
settings.AddValue("output_processes", KratosMultiphysics.Parameters(r'''{
"vtk_output" : [{
"python_module" : "vtk_output_process",
"kratos_module" : "KratosMultiphysics",
"process_name" : "VtkOutputProcess",
"help" : "This process writes postprocessing files for Paraview",
"Parameters" : {
"model_part_name" : "FluidModelPart.Parts_background_surface",
"output_control_type" : "step",
"output_frequency" : 1,
"file_format" : "ascii",
"output_precision" : 3,
"output_sub_model_parts" : false,
"write_deformed_configuration" : true,
"folder_name" : "test_vtk_output",
"save_output_files_in_folder" : true,
"nodal_solution_step_data_variables" : ["VELOCITY","PRESSURE","DISTANCE","MESH_VELOCITY"],
"nodal_data_value_variables" : [],
"element_flags" : ["ACTIVE"],
"nodal_flags" : ["VISITED","CHIMERA_INTERNAL_BOUNDARY"],
"element_data_value_variables" : [],
"condition_data_value_variables" : []
}
},{
"python_module" : "vtk_output_process",
"kratos_module" : "KratosMultiphysics",
"process_name" : "VtkOutputProcess",
"help" : "This process writes postprocessing files for Paraview",
"Parameters" : {
"model_part_name" : "FluidModelPart.Parts_patch_surface",
"output_control_type" : "step",
"output_frequency" : 1,
"file_format" : "ascii",
"output_precision" : 3,
"output_sub_model_parts" : false,
"write_deformed_configuration" : true,
"folder_name" : "test_vtk_output",
"save_output_files_in_folder" : true,
"nodal_solution_step_data_variables" : ["VELOCITY","PRESSURE","DISTANCE","MESH_VELOCITY"],
"nodal_data_value_variables" : [],
"element_flags" : ["ACTIVE"],
"nodal_flags" : ["VISITED","CHIMERA_INTERNAL_BOUNDARY"],
"element_data_value_variables" : [],
"condition_data_value_variables" : []
}
}]
}'''))
analysis = FluidChimeraAnalysis(model,settings)
analysis.Run()
| [((13, 16, 13, 42), 'KratosMultiphysics.Model', 'KratosMultiphysics.Model', ({}, {}), '()', False, 'import KratosMultiphysics\n'), ((66, 19, 66, 55), 'KratosMultiphysics.ChimeraApplication.fluid_chimera_analysis.FluidChimeraAnalysis', 'FluidChimeraAnalysis', ({(66, 40, 66, 45): 'model', (66, 46, 66, 54): 'settings'}, {}), '(model, settings)', False, 'from KratosMultiphysics.ChimeraApplication.fluid_chimera_analysis import FluidChimeraAnalysis\n'), ((18, 50, 64, 17), 'KratosMultiphysics.Parameters', 'KratosMultiphysics.Parameters', ({(18, 80, 64, 16): '"""{\n "vtk_output" : [{\n "python_module" : "vtk_output_process",\n "kratos_module" : "KratosMultiphysics",\n "process_name" : "VtkOutputProcess",\n "help" : "This process writes postprocessing files for Paraview",\n "Parameters" : {\n "model_part_name" : "FluidModelPart.Parts_background_surface",\n "output_control_type" : "step",\n "output_frequency" : 1,\n "file_format" : "ascii",\n "output_precision" : 3,\n "output_sub_model_parts" : false,\n "write_deformed_configuration" : true,\n "folder_name" : "test_vtk_output",\n "save_output_files_in_folder" : true,\n "nodal_solution_step_data_variables" : ["VELOCITY","PRESSURE","DISTANCE","MESH_VELOCITY"],\n "nodal_data_value_variables" : [],\n "element_flags" : ["ACTIVE"],\n "nodal_flags" : ["VISITED","CHIMERA_INTERNAL_BOUNDARY"],\n "element_data_value_variables" : [],\n "condition_data_value_variables" : []\n }\n },{\n "python_module" : "vtk_output_process",\n "kratos_module" : "KratosMultiphysics",\n "process_name" : "VtkOutputProcess",\n "help" : "This process writes postprocessing files for Paraview",\n "Parameters" : {\n "model_part_name" : "FluidModelPart.Parts_patch_surface",\n "output_control_type" : "step",\n "output_frequency" : 1,\n "file_format" : "ascii",\n "output_precision" : 3,\n "output_sub_model_parts" : false,\n "write_deformed_configuration" : true,\n "folder_name" : "test_vtk_output",\n "save_output_files_in_folder" : true,\n "nodal_solution_step_data_variables" : ["VELOCITY","PRESSURE","DISTANCE","MESH_VELOCITY"],\n "nodal_data_value_variables" : [],\n "element_flags" : ["ACTIVE"],\n "nodal_flags" : ["VISITED","CHIMERA_INTERNAL_BOUNDARY"],\n "element_data_value_variables" : [],\n "condition_data_value_variables" : []\n }\n }]\n }"""'}, {}), '(\n """{\n "vtk_output" : [{\n "python_module" : "vtk_output_process",\n "kratos_module" : "KratosMultiphysics",\n "process_name" : "VtkOutputProcess",\n "help" : "This process writes postprocessing files for Paraview",\n "Parameters" : {\n "model_part_name" : "FluidModelPart.Parts_background_surface",\n "output_control_type" : "step",\n "output_frequency" : 1,\n "file_format" : "ascii",\n "output_precision" : 3,\n "output_sub_model_parts" : false,\n "write_deformed_configuration" : true,\n "folder_name" : "test_vtk_output",\n "save_output_files_in_folder" : true,\n "nodal_solution_step_data_variables" : ["VELOCITY","PRESSURE","DISTANCE","MESH_VELOCITY"],\n "nodal_data_value_variables" : [],\n "element_flags" : ["ACTIVE"],\n "nodal_flags" : ["VISITED","CHIMERA_INTERNAL_BOUNDARY"],\n "element_data_value_variables" : [],\n "condition_data_value_variables" : []\n }\n },{\n "python_module" : "vtk_output_process",\n "kratos_module" : "KratosMultiphysics",\n "process_name" : "VtkOutputProcess",\n "help" : "This process writes postprocessing files for Paraview",\n "Parameters" : {\n "model_part_name" : "FluidModelPart.Parts_patch_surface",\n "output_control_type" : "step",\n "output_frequency" : 1,\n "file_format" : "ascii",\n "output_precision" : 3,\n "output_sub_model_parts" : false,\n "write_deformed_configuration" : true,\n "folder_name" : "test_vtk_output",\n "save_output_files_in_folder" : true,\n "nodal_solution_step_data_variables" : ["VELOCITY","PRESSURE","DISTANCE","MESH_VELOCITY"],\n "nodal_data_value_variables" : [],\n "element_flags" : ["ACTIVE"],\n "nodal_flags" : ["VISITED","CHIMERA_INTERNAL_BOUNDARY"],\n "element_data_value_variables" : [],\n "condition_data_value_variables" : []\n }\n }]\n }"""\n )', False, 'import KratosMultiphysics\n')] |
side-beach-city/SBCLinkCopyTool | parsers/rss10.py | 12ec16eefddac215e6a2be92464fde75677c8548 | import urllib.request
import xml.etree.ElementTree
class RSS10Parser:
def __init__(self, url: str) -> None:
self.url = url
def getlist(self) -> list[dict[str, str]]:
ENTRY = r"{http://www.w3.org/2005/Atom}"
MEDIA = r"{http://search.yahoo.com/mrss/}"
YOUTUBE = r"{http://www.youtube.com/xml/schemas/2015}"
result = []
with urllib.request.urlopen(self.url) as res:
data = xml.etree.ElementTree.fromstring(res.read())
for child in data.iter(f"{ENTRY}entry"):
result.append({
"title": child.find(f"{ENTRY}title").text,
"link": child.find(f"{ENTRY}link").attrib["href"],
"description": child.find(f"{MEDIA}group").find(f"{MEDIA}description").text,
})
return result
if __name__ == "__main__":
import pprint
pprint.pprint(RSS10Parser("https://www.youtube.com/feeds/videos.xml?playlist_id=PLrPVslFukDQo7l5RCqAZtKDl6tUyMAFWH").getlist()) | [] |
MPI-IS/reactive_pepper | examples/laser.py | 079f9b0627bfd6c9e3f2a4466c95ad662002a600 | import math,time,random
import pepper_interface
IP = "192.168.0.147"
PORT = 9559
simulation = False
with pepper_interface.get(IP,PORT,simulation) as pepper:
time.sleep(1.0)
values,time_stamp = pepper.laser.get()
print
print "Front"
print values["Front"]
print
print "Left"
print values["Left"]
print
print "Right"
print values["Right"]
print
| [] |
Jette16/spacy-course | exercises/pt/exc_01_03_01.py | 32df0c8f6192de6c9daba89740a28c0537e4d6a0 | # Importar a classe da língua inglesa (English) e criar um objeto nlp
from ____ import ____
nlp = ____
# Processar o texto
doc = ____("I like tree kangaroos and narwhals.")
# Selecionar o primeiro token
first_token = doc[____]
# Imprimir o texto do primeito token
print(first_token.____)
| [((6, 6, 6, 49), '____.____', '____', ({(6, 11, 6, 48): '"""I like tree kangaroos and narwhals."""'}, {}), "('I like tree kangaroos and narwhals.')", False, 'from ____ import ____\n')] |
qateam123/eq | tests/integration/mci/test_happy_path.py | 704757952323647d659c49a71975c56406ff4047 | from tests.integration.create_token import create_token
from tests.integration.integration_test_case import IntegrationTestCase
class TestHappyPath(IntegrationTestCase):
def test_happy_path_203(self):
self.happy_path('0203', '1')
def test_happy_path_205(self):
self.happy_path('0205', '1')
def happy_path(self, form_type_id, eq_id):
# Get a token
token = create_token(form_type_id, eq_id)
resp = self.client.get('/session?token=' + token.decode(), follow_redirects=True)
self.assertEqual(resp.status_code, 200)
# We are on the landing page
content = resp.get_data(True)
self.assertRegex(content, '<title>Introduction</title>')
self.assertRegex(content, '>Start survey<')
self.assertRegex(content, 'Monthly Business Survey - Retail Sales Index')
# We proceed to the questionnaire
post_data = {
'action[start_questionnaire]': 'Start Questionnaire'
}
resp = self.client.post('/questionnaire/' + eq_id + '/' + form_type_id + '/789/introduction', data=post_data, follow_redirects=False)
self.assertEqual(resp.status_code, 302)
block_one_url = resp.location
resp = self.client.get(block_one_url, follow_redirects=False)
self.assertEqual(resp.status_code, 200)
# We are in the Questionnaire
content = resp.get_data(True)
self.assertRegex(content, '<title>Survey</title>')
self.assertRegex(content, '>Monthly Business Survey - Retail Sales Index</')
self.assertRegex(content, "What are the dates of the sales period you are reporting for?")
self.assertRegex(content, ">Save and continue<")
# check with have some guidance
self.assertRegex(content, "alcoholic drink")
# We fill in our answers
form_data = {
# Start Date
"period-from-day": "01",
"period-from-month": "4",
"period-from-year": "2016",
# End Date
"period-to-day": "30",
"period-to-month": "04",
"period-to-year": "2016",
# Total Turnover
"total-retail-turnover": "100000",
# User Action
"action[save_continue]": "Save & Continue"
}
# We submit the form
resp = self.client.post(block_one_url, data=form_data, follow_redirects=False)
self.assertEqual(resp.status_code, 302)
# There are no validation errors
self.assertRegex(resp.location, r'\/questionnaire\/1\/' + form_type_id + r'\/789\/summary$')
summary_url = resp.location
resp = self.client.get(summary_url, follow_redirects=False)
self.assertEqual(resp.status_code, 200)
# We are on the review answers page
content = resp.get_data(True)
self.assertRegex(content, '<title>Summary</title>')
self.assertRegex(content, '>Monthly Business Survey - Retail Sales Index</')
self.assertRegex(content, '>Your responses<')
self.assertRegex(content, 'Please check carefully before submission.')
self.assertRegex(content, '>Submit answers<')
# We submit our answers
post_data = {
"action[submit_answers]": "Submit answers"
}
resp = self.client.post(summary_url, data=post_data, follow_redirects=False)
self.assertEqual(resp.status_code, 302)
self.assertRegex(resp.location, r'\/questionnaire\/1\/' + form_type_id + r'\/789\/thank-you$')
resp = self.client.get(resp.location, follow_redirects=True)
self.assertEqual(resp.status_code, 200)
# We are on the thank you page
content = resp.get_data(True)
self.assertRegex(content, '<title>Submission Successful</title>')
self.assertRegex(content, '(?s)Monthly Business Survey - Retail Sales Index.*?Monthly Business Survey - Retail Sales Index')
| [((15, 16, 15, 49), 'tests.integration.create_token.create_token', 'create_token', ({(15, 29, 15, 41): 'form_type_id', (15, 43, 15, 48): 'eq_id'}, {}), '(form_type_id, eq_id)', False, 'from tests.integration.create_token import create_token\n')] |
OllieBroadhurst/transformers | src/transformers/models/hubert/modeling_tf_hubert.py | 12428f0ef15bb3631e7a5f04672ddb05f363de97 | # coding=utf-8
# Copyright 2021 The Fairseq Authors and the HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" TensorFlow Hubert model."""
import inspect
import warnings
from typing import Any, Dict, Optional, Tuple, Union
import numpy as np
import tensorflow as tf
from ...activations_tf import get_tf_activation
from ...modeling_tf_outputs import TFBaseModelOutput, TFCausalLMOutput
from ...modeling_tf_utils import TFPreTrainedModel, booleans_processing, get_initializer, keras_serializable
from ...tf_utils import shape_list
from ...tokenization_utils_base import BatchEncoding
from ...utils import (
ModelOutput,
add_start_docstrings,
add_start_docstrings_to_model_forward,
logging,
replace_return_docstrings,
)
from .configuration_hubert import HubertConfig
logger = logging.get_logger(__name__)
_CONFIG_FOR_DOC = "HubertConfig"
TF_HUBERT_PRETRAINED_MODEL_ARCHIVE_LIST = [
"facebook/hubert-base-ls960",
# See all Hubert models at https://huggingface.co/models?filter=hubert
]
LARGE_NEGATIVE = -1e8
# Copied from transformers.models.wav2vec2.modeling_tf_wav2vec2.input_values_processing
def input_values_processing(func, config, input_values, **kwargs):
"""
Process the input of each TensorFlow model including the booleans. In case of a list of symbolic inputs, each input
has to be named accordingly to the parameters name, i.e. `input_values = tf.keras.Input(shape=(128,),
dtype='float32', name="input_values")` otherwise the order of the tensors will not be guaranteed during the
training.
Args:
func (`callable`):
The callable function of the TensorFlow model.
config ([`PretrainedConfig`]):
The config of the running model.
**kwargs:
The inputs of the model.
Returns:
Two lists, one for the missing layers, and another one for the unexpected layers.
"""
signature = dict(inspect.signature(func).parameters)
signature.pop("kwargs", None)
signature.pop("self", None)
parameter_names = list(signature.keys())
output = {}
allowed_types = (tf.Tensor, bool, int, ModelOutput, tuple, list, dict, np.ndarray)
for k, v in kwargs.items():
if isinstance(v, allowed_types) or v is None:
output[k] = v
else:
raise ValueError(f"Data of type {type(v)} is not allowed only {allowed_types} is accepted for {k}.")
if isinstance(input_values, (tuple, list)):
for i, input in enumerate(input_values):
# EagerTensors don't allow to use the .name property so we check for a real Tensor
if type(input) == tf.Tensor:
# Tensor names have always the pattern `name:id` then we check only the
# `name` part
tensor_name = input.name.split(":")[0]
if tensor_name in parameter_names:
output[tensor_name] = input
else:
output[parameter_names[i]] = input
elif isinstance(input, allowed_types) or input is None:
output[parameter_names[i]] = input
else:
raise ValueError(
f"Data of type {type(input)} is not allowed only {allowed_types} is accepted for {parameter_names[i]}."
)
elif isinstance(input_values, (dict, BatchEncoding)):
if "inputs" in input_values:
warnings.warn(
"The `inputs` argument is deprecated and will be removed in a future version, use `input_values` instead.",
FutureWarning,
)
output["input_values"] = input_values.pop("inputs")
if "decoder_cached_states" in input_values:
warnings.warn(
"The `decoder_cached_states` argument is deprecated and will be removed in a future version, use `past_key_values` instead.",
FutureWarning,
)
output["past_key_values"] = input_values.pop("decoder_cached_states")
for k, v in dict(input_values).items():
if isinstance(v, allowed_types) or v is None:
output[k] = v
elif k not in parameter_names and "args" not in parameter_names:
logger.warning(
f"The parameter {k} does not belongs to the parameter list {parameter_names} and will be ignored."
)
continue
else:
raise ValueError(f"Data of type {type(v)} is not allowed only {allowed_types} is accepted for {k}.")
else:
if isinstance(input_values, tf.Tensor) or input_values is None:
output[parameter_names[0]] = input_values
else:
raise ValueError(
f"Data of type {type(input_values)} is not allowed only {allowed_types} is accepted for {parameter_names[0]}."
)
for name in parameter_names:
if name not in list(output.keys()) and name != "args":
output[name] = kwargs.pop(name, signature[name].default)
# When creating a SavedModel TF calls the method with LayerCall.__call__(args, **kwargs)
# So to respect the proper output we have to add this exception
if "args" in output:
if output["args"] is not None and type(output["args"]) == tf.Tensor:
tensor_name = output["args"].name.split(":")[0]
output[tensor_name] = output["args"]
else:
# `args` in this case is always the first parameter, then `input_values`
output["input_values"] = output["args"]
del output["args"]
if "kwargs" in output:
del output["kwargs"]
boolean_dict = {
k: v
for k, v in output.items()
if k in ["return_dict", "output_attentions", "output_hidden_states", "use_cache"]
}
output.update(booleans_processing(config=config, **boolean_dict))
return output
# Copied from transformers.models.wav2vec2.modeling_tf_wav2vec2._sample_without_replacement
def _sample_without_replacement(distribution, num_samples):
"""
Categorical sampling without replacement is currently not implemented. The gumbel-max trick will do for now - see
https://github.com/tensorflow/tensorflow/issues/9260 for more info
"""
z = -tf.math.log(tf.random.uniform(shape_list(distribution), 0, 1))
_, indices = tf.nn.top_k(distribution + z, num_samples)
return indices
# Copied from transformers.models.wav2vec2.modeling_tf_wav2vec2._scatter_values_on_batch_indices
def _scatter_values_on_batch_indices(values, batch_indices, output_shape):
"""
Scatter function as in PyTorch with indices in format (batch_dim, indixes)
"""
indices_shape = shape_list(batch_indices)
# broadcast batch dim to indices_shape
broad_casted_batch_dims = tf.reshape(
tf.broadcast_to(tf.expand_dims(tf.range(indices_shape[0]), axis=-1), indices_shape), [1, -1]
)
# transform batch_indices to pair_indices
pair_indices = tf.transpose(tf.concat([broad_casted_batch_dims, tf.reshape(batch_indices, [1, -1])], 0))
# scatter values to pair indices
return tf.scatter_nd(pair_indices, tf.reshape(values, [-1]), output_shape)
# Copied from transformers.models.wav2vec2.modeling_tf_wav2vec2._compute_mask_indices
def _compute_mask_indices(
shape: Tuple[int, int],
mask_prob: float,
mask_length: int,
min_masks: int = 0,
) -> tf.Tensor:
"""
Computes random mask spans for a given shape
Args:
shape: the the shape for which to compute masks.
should be of size 2 where first element is batch size and 2nd is timesteps
attention_mask: optional padding mask of the same size as shape, which will prevent masking padded elements
mask_prob:
probability for each token to be chosen as start of the span to be masked. this will be multiplied by
number of timesteps divided by length of mask span to mask approximately this percentage of all elements.
however due to overlaps, the actual number will be smaller (unless no_overlap is True)
mask_length: size of the mask
min_masks: minimum number of masked spans
Adapted from [fairseq's
data_utils.py](https://github.com/pytorch/fairseq/blob/e0788f7007a8473a76db573985031f3c94201e79/fairseq/data/data_utils.py#L376).
"""
batch_size, sequence_length = shape
if mask_length < 1:
raise ValueError("`mask_length` has to be bigger than 0.")
if mask_length > sequence_length:
raise ValueError(
f"`mask_length` has to be smaller than `sequence_length`, but got `mask_length`: {mask_length} and `sequence_length`: {sequence_length}`"
)
# compute number of masked spans in batch
num_masked_spans = int(mask_prob * sequence_length / mask_length + tf.random.uniform((1,)))
num_masked_spans = max(num_masked_spans, min_masks)
# make sure num masked indices <= sequence_length
if num_masked_spans * mask_length > sequence_length:
num_masked_spans = sequence_length // mask_length
# SpecAugment mask to fill
spec_aug_mask = tf.zeros((batch_size, sequence_length), dtype=tf.int32)
# uniform distribution to sample from, make sure that offset samples are < sequence_length
uniform_dist = tf.ones((batch_size, sequence_length - (mask_length - 1)))
# get random indices to mask
spec_aug_mask_idxs = _sample_without_replacement(uniform_dist, num_masked_spans)
# expand masked indices to masked spans
spec_aug_mask_idxs = tf.expand_dims(spec_aug_mask_idxs, -1)
spec_aug_mask_idxs = tf.tile(spec_aug_mask_idxs, (1, 1, mask_length))
spec_aug_mask_idxs = tf.reshape(spec_aug_mask_idxs, (batch_size, num_masked_spans * mask_length))
offsets = tf.range(mask_length)[tf.newaxis, tf.newaxis, :]
offsets = tf.tile(offsets, (batch_size, num_masked_spans, 1))
offsets = tf.reshape(offsets, (batch_size, num_masked_spans * mask_length))
spec_aug_mask_idxs = spec_aug_mask_idxs + offsets
# scatter indices to mask
spec_aug_mask = _scatter_values_on_batch_indices(
tf.ones_like(spec_aug_mask_idxs), spec_aug_mask_idxs, spec_aug_mask.shape
)
return spec_aug_mask
# Copied from transformers.models.bart.modeling_tf_bart._expand_mask
def _expand_mask(mask: tf.Tensor, tgt_len: Optional[int] = None, past_key_values_length: int = 0):
"""
Expands attention_mask from `[bsz, seq_len]` to `[bsz, 1, tgt_seq_len, src_seq_len]`.
"""
src_len = shape_list(mask)[1]
tgt_len = tgt_len if tgt_len is not None else src_len
one_cst = tf.constant(1.0)
mask = tf.cast(mask, dtype=one_cst.dtype)
expanded_mask = tf.tile(mask[:, None, None, :], (1, 1, tgt_len, 1))
return (one_cst - expanded_mask) * LARGE_NEGATIVE
# Copied from transformers.models.wav2vec2.modeling_tf_wav2vec2.TFWav2Vec2GroupNorm with Wav2Vec2->Hubert
class TFHubertGroupNorm(tf.keras.layers.Layer):
"""
From tensorflow-addons https://www.tensorflow.org/addons/api_docs/python/tfa/layers/GroupNormalization
"""
def __init__(
self,
groups: int = 32,
axis: int = -1,
epsilon: float = 1e-3,
center: bool = True,
scale: bool = True,
beta_initializer: tf.keras.initializers.Initializer = "zeros",
gamma_initializer: tf.keras.initializers.Initializer = "ones",
beta_regularizer: tf.keras.regularizers.Regularizer = None,
gamma_regularizer: tf.keras.regularizers.Regularizer = None,
beta_constraint: tf.keras.constraints.Constraint = None,
gamma_constraint: tf.keras.constraints.Constraint = None,
**kwargs,
):
super().__init__(**kwargs)
self.supports_masking = True
self.groups = groups
self.axis = axis
self.epsilon = epsilon
self.center = center
self.scale = scale
self.beta_initializer = tf.keras.initializers.get(beta_initializer)
self.gamma_initializer = tf.keras.initializers.get(gamma_initializer)
self.beta_regularizer = tf.keras.regularizers.get(beta_regularizer)
self.gamma_regularizer = tf.keras.regularizers.get(gamma_regularizer)
self.beta_constraint = tf.keras.constraints.get(beta_constraint)
self.gamma_constraint = tf.keras.constraints.get(gamma_constraint)
self._check_axis()
def build(self, input_shape):
self._check_if_input_shape_is_none(input_shape)
self._set_number_of_groups_for_instance_norm(input_shape)
self._check_size_of_dimensions(input_shape)
self._create_input_spec(input_shape)
self._add_gamma_weight(input_shape)
self._add_beta_weight(input_shape)
self.built = True
super().build(input_shape)
def call(self, inputs):
input_shape = tf.keras.backend.int_shape(inputs)
tensor_input_shape = tf.shape(inputs)
reshaped_inputs, group_shape = self._reshape_into_groups(inputs, input_shape, tensor_input_shape)
normalized_inputs = self._apply_normalization(reshaped_inputs, input_shape)
is_instance_norm = (input_shape[self.axis] // self.groups) == 1
if not is_instance_norm:
outputs = tf.reshape(normalized_inputs, tensor_input_shape)
else:
outputs = normalized_inputs
return outputs
def get_config(self):
config = {
"groups": self.groups,
"axis": self.axis,
"epsilon": self.epsilon,
"center": self.center,
"scale": self.scale,
"beta_initializer": tf.keras.initializers.serialize(self.beta_initializer),
"gamma_initializer": tf.keras.initializers.serialize(self.gamma_initializer),
"beta_regularizer": tf.keras.regularizers.serialize(self.beta_regularizer),
"gamma_regularizer": tf.keras.regularizers.serialize(self.gamma_regularizer),
"beta_constraint": tf.keras.constraints.serialize(self.beta_constraint),
"gamma_constraint": tf.keras.constraints.serialize(self.gamma_constraint),
}
base_config = super().get_config()
return {**base_config, **config}
def compute_output_shape(self, input_shape):
return input_shape
def _reshape_into_groups(self, inputs, input_shape, tensor_input_shape):
group_shape = [tensor_input_shape[i] for i in range(len(input_shape))]
is_instance_norm = (input_shape[self.axis] // self.groups) == 1
if not is_instance_norm:
group_shape[self.axis] = input_shape[self.axis] // self.groups
group_shape.insert(self.axis, self.groups)
group_shape = tf.stack(group_shape)
reshaped_inputs = tf.reshape(inputs, group_shape)
return reshaped_inputs, group_shape
else:
return inputs, group_shape
def _apply_normalization(self, reshaped_inputs, input_shape):
group_shape = tf.keras.backend.int_shape(reshaped_inputs)
group_reduction_axes = list(range(1, len(group_shape)))
is_instance_norm = (input_shape[self.axis] // self.groups) == 1
if not is_instance_norm:
axis = -2 if self.axis == -1 else self.axis - 1
else:
axis = -1 if self.axis == -1 else self.axis - 1
group_reduction_axes.pop(axis)
mean, variance = tf.nn.moments(reshaped_inputs, group_reduction_axes, keepdims=True)
gamma, beta = self._get_reshaped_weights(input_shape)
normalized_inputs = tf.nn.batch_normalization(
reshaped_inputs,
mean=mean,
variance=variance,
scale=gamma,
offset=beta,
variance_epsilon=self.epsilon,
)
return normalized_inputs
def _get_reshaped_weights(self, input_shape):
broadcast_shape = self._create_broadcast_shape(input_shape)
gamma = None
beta = None
if self.scale:
gamma = tf.reshape(self.gamma, broadcast_shape)
if self.center:
beta = tf.reshape(self.beta, broadcast_shape)
return gamma, beta
def _check_if_input_shape_is_none(self, input_shape):
dim = input_shape[self.axis]
if dim is None:
raise ValueError(
"Axis " + str(self.axis) + " of "
"input tensor should have a defined dimension "
"but the layer received an input with shape " + str(input_shape) + "."
)
def _set_number_of_groups_for_instance_norm(self, input_shape):
dim = input_shape[self.axis]
if self.groups == -1:
self.groups = dim
def _check_size_of_dimensions(self, input_shape):
dim = input_shape[self.axis]
if dim < self.groups:
raise ValueError(
"Number of groups (" + str(self.groups) + ") cannot be "
"more than the number of channels (" + str(dim) + ")."
)
if dim % self.groups != 0:
raise ValueError(
"Number of groups (" + str(self.groups) + ") must be a "
"multiple of the number of channels (" + str(dim) + ")."
)
def _check_axis(self):
if self.axis == 0:
raise ValueError(
"You are trying to normalize your batch axis. Do you want to "
"use tf.layer.batch_normalization instead"
)
def _create_input_spec(self, input_shape):
dim = input_shape[self.axis]
self.input_spec = tf.keras.layers.InputSpec(ndim=len(input_shape), axes={self.axis: dim})
def _add_gamma_weight(self, input_shape):
dim = input_shape[self.axis]
shape = (dim,)
if self.scale:
self.gamma = self.add_weight(
shape=shape,
name="gamma",
initializer=self.gamma_initializer,
regularizer=self.gamma_regularizer,
constraint=self.gamma_constraint,
)
else:
self.gamma = None
def _add_beta_weight(self, input_shape):
dim = input_shape[self.axis]
shape = (dim,)
if self.center:
self.beta = self.add_weight(
shape=shape,
name="beta",
initializer=self.beta_initializer,
regularizer=self.beta_regularizer,
constraint=self.beta_constraint,
)
else:
self.beta = None
def _create_broadcast_shape(self, input_shape):
broadcast_shape = [1] * len(input_shape)
is_instance_norm = (input_shape[self.axis] // self.groups) == 1
if not is_instance_norm:
broadcast_shape[self.axis] = input_shape[self.axis] // self.groups
broadcast_shape.insert(self.axis, self.groups)
else:
broadcast_shape[self.axis] = self.groups
return broadcast_shape
# Copied from transformers.models.wav2vec2.modeling_tf_wav2vec2.TFWav2Vec2WeightNormConv1D with Wav2Vec2->Hubert
class TFHubertWeightNormConv1D(tf.keras.layers.Conv1D):
"""Adapted from https://www.tensorflow.org/probability/api_docs/python/tfp/layers/weight_norm/WeightNorm"""
def __init__(self, filters, kernel_size, groups, explicit_padding, **kwargs):
super().__init__(
filters=filters,
kernel_size=kernel_size,
groups=groups,
padding="valid",
use_bias=True,
bias_initializer="he_normal",
**kwargs,
)
self.explicit_padding = explicit_padding
self.filter_axis = 2
self.initialized = False
self.kernel_norm_axes = tf.constant([0, 1])
def _init_norm(self):
"""Set the norm of the weight vector."""
kernel_norm = tf.sqrt(tf.reduce_sum(tf.square(self.weight_v), axis=self.kernel_norm_axes))
self.weight_g.assign(kernel_norm[:, tf.newaxis, tf.newaxis])
def _normalize_kernel(self):
"""Generate normalized weights."""
kernel = tf.nn.l2_normalize(self.weight_v, axis=self.kernel_norm_axes) * tf.transpose(self.weight_g)
self.kernel = tf.transpose(kernel)
def build(self, input_shape):
if not self.built:
input_shape = input_shape.as_list()
# Conv1D output shapes are checked at build time since TF 2.7, so we need to account for padding
input_shape[-2] += self.explicit_padding * 2
super().build(input_shape)
self.kernel = tf.Variable(tf.transpose(self.kernel), name="weight_v", trainable=True)
self.weight_v = self.kernel
self.weight_g = self.add_weight(
name="weight_g",
shape=(int(self.weight_v.shape[self.filter_axis]), 1, 1),
initializer="ones",
dtype=self.weight_v.dtype,
trainable=True,
)
self.bias = self.add_weight(name="bias", shape=(self.filters,), initializer="zeros", trainable=True)
def call(self, inputs):
if not self.initialized:
self._init_norm()
self.initialized = True
self._normalize_kernel()
padded_inputs = tf.pad(inputs, ((0, 0), (self.explicit_padding, self.explicit_padding), (0, 0)))
output = super().call(padded_inputs)
return output
# Copied from transformers.models.wav2vec2.modeling_tf_wav2vec2.TFWav2Vec2NoLayerNormConvLayer with Wav2Vec2->Hubert
class TFHubertNoLayerNormConvLayer(tf.keras.layers.Layer):
def __init__(self, config: HubertConfig, layer_id: int = 0, **kwargs: Any) -> None:
super().__init__(**kwargs)
self.in_conv_dim = config.conv_dim[layer_id] if layer_id > 0 else 1
self.out_conv_dim = config.conv_dim[layer_id]
self.conv = tf.keras.layers.Conv1D(
filters=self.out_conv_dim,
kernel_size=config.conv_kernel[layer_id],
strides=config.conv_stride[layer_id],
use_bias=config.conv_bias,
name="conv",
)
self.activation = get_tf_activation(config.feat_extract_activation)
def call(self, hidden_states: tf.Tensor) -> tf.Tensor:
hidden_states = self.conv(hidden_states)
hidden_states = self.activation(hidden_states)
return hidden_states
# Copied from transformers.models.wav2vec2.modeling_tf_wav2vec2.TFWav2Vec2LayerNormConvLayer with Wav2Vec2->Hubert
class TFHubertLayerNormConvLayer(tf.keras.layers.Layer):
def __init__(self, config: HubertConfig, layer_id: int = 0, **kwargs: Any) -> None:
super().__init__(**kwargs)
self.in_conv_dim = config.conv_dim[layer_id] if layer_id > 0 else 1
self.out_conv_dim = config.conv_dim[layer_id]
self.conv = tf.keras.layers.Conv1D(
filters=self.out_conv_dim,
kernel_size=config.conv_kernel[layer_id],
strides=config.conv_stride[layer_id],
use_bias=config.conv_bias,
name="conv",
)
self.layer_norm = tf.keras.layers.LayerNormalization(name="layer_norm", epsilon=config.layer_norm_eps)
self.activation = get_tf_activation(config.feat_extract_activation)
def call(self, hidden_states: tf.Tensor) -> tf.Tensor:
hidden_states = self.conv(hidden_states)
hidden_states = self.layer_norm(hidden_states)
hidden_states = self.activation(hidden_states)
return hidden_states
# Copied from transformers.models.wav2vec2.modeling_tf_wav2vec2.TFWav2Vec2GroupNormConvLayer with Wav2Vec2->Hubert
class TFHubertGroupNormConvLayer(tf.keras.layers.Layer):
def __init__(self, config: HubertConfig, layer_id: int = 0, **kwargs: Any) -> None:
super().__init__(**kwargs)
self.in_conv_dim = config.conv_dim[layer_id] if layer_id > 0 else 1
self.out_conv_dim = config.conv_dim[layer_id]
self.conv = tf.keras.layers.Conv1D(
filters=self.out_conv_dim,
kernel_size=config.conv_kernel[layer_id],
strides=config.conv_stride[layer_id],
use_bias=config.conv_bias,
name="conv",
)
self.activation = get_tf_activation(config.feat_extract_activation)
self.layer_norm = TFHubertGroupNorm(groups=self.out_conv_dim, epsilon=config.layer_norm_eps, name="layer_norm")
def call(self, hidden_states: tf.Tensor) -> tf.Tensor:
hidden_states = self.conv(hidden_states)
hidden_states = self.layer_norm(hidden_states)
hidden_states = self.activation(hidden_states)
return hidden_states
# Copied from transformers.models.wav2vec2.modeling_tf_wav2vec2.TFWav2Vec2PositionalConvEmbedding with Wav2Vec2->Hubert
class TFHubertPositionalConvEmbedding(tf.keras.layers.Layer):
def __init__(self, config: HubertConfig, **kwargs: Any) -> None:
super().__init__(**kwargs)
self.conv = TFHubertWeightNormConv1D(
filters=config.hidden_size,
kernel_size=config.num_conv_pos_embeddings,
groups=config.num_conv_pos_embedding_groups,
explicit_padding=config.num_conv_pos_embeddings // 2,
name="conv",
)
self.padding = TFHubertSamePadLayer(config.num_conv_pos_embeddings)
self.activation = get_tf_activation(config.feat_extract_activation)
def call(self, hidden_states: tf.Tensor) -> tf.Tensor:
hidden_states = self.conv(hidden_states)
hidden_states = self.padding(hidden_states)
hidden_states = self.activation(hidden_states)
return hidden_states
# Copied from transformers.models.wav2vec2.modeling_tf_wav2vec2.TFWav2Vec2SamePadLayer with Wav2Vec2->Hubert
class TFHubertSamePadLayer(tf.keras.layers.Layer):
def __init__(self, num_conv_pos_embeddings, **kwargs):
super().__init__(**kwargs)
self.num_pad_remove = 1 if num_conv_pos_embeddings % 2 == 0 else 0
def call(self, hidden_states):
if self.num_pad_remove > 0:
hidden_states = hidden_states[:, : -self.num_pad_remove, :]
return hidden_states
class TFHubertFeatureEncoder(tf.keras.layers.Layer):
def __init__(self, config: HubertConfig, **kwargs: Any) -> None:
super().__init__(**kwargs)
if config.feat_extract_norm == "group":
conv_layers = [TFHubertGroupNormConvLayer(config, layer_id=0, name=f"conv_layers.{0}")] + [
TFHubertNoLayerNormConvLayer(config, layer_id=i + 1, name=f"conv_layers.{i+1}")
for i in range(config.num_feat_extract_layers - 1)
]
elif config.feat_extract_norm == "layer":
conv_layers = [
TFHubertLayerNormConvLayer(config, layer_id=i, name=f"conv_layers.{i}")
for i in range(config.num_feat_extract_layers)
]
else:
raise ValueError(
f"`config.feat_extract_norm` is {config.feat_extract_norm}, but has to be one of ['group', 'layer']"
)
self.conv_layers = conv_layers
def call(self, input_values):
hidden_states = tf.expand_dims(input_values, -1)
for conv_layer in self.conv_layers:
hidden_states = conv_layer(hidden_states)
return hidden_states
class TFHubertFeatureExtractor(TFHubertFeatureEncoder):
def __init__(self, config, **kwargs):
super().__init__(config, **kwargs)
warnings.warn(
f"The class `{self.__class__.__name__}` has been depreciated "
"and will be removed in Transformers v5. "
f"Use `{self.__class__.__bases__[0].__name__}` instead.",
FutureWarning,
)
class TFHubertFeatureProjection(tf.keras.layers.Layer):
def __init__(self, config: HubertConfig, **kwargs):
super().__init__(**kwargs)
self.layer_norm = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="layer_norm")
self.projection = tf.keras.layers.Dense(
units=config.hidden_size,
kernel_initializer=get_initializer(config.initializer_range),
bias_initializer="zeros",
name="projection",
)
self.dropout = tf.keras.layers.Dropout(rate=config.feat_proj_dropout)
def call(self, hidden_states: tf.Tensor, training: bool = False) -> tf.Tensor:
hidden_states = self.layer_norm(hidden_states)
hidden_states = self.projection(hidden_states)
hidden_states = self.dropout(hidden_states, training=training)
return hidden_states
# Copied from transformers.models.bart.modeling_tf_bart.TFBartAttention with TFBart->TFHubert
class TFHubertAttention(tf.keras.layers.Layer):
"""Multi-headed attention from "Attention Is All You Need"""
def __init__(
self,
embed_dim: int,
num_heads: int,
dropout: float = 0.0,
is_decoder: bool = False,
bias: bool = True,
**kwargs,
):
super().__init__(**kwargs)
self.embed_dim = embed_dim
self.num_heads = num_heads
self.dropout = tf.keras.layers.Dropout(dropout)
self.head_dim = embed_dim // num_heads
if (self.head_dim * num_heads) != self.embed_dim:
raise ValueError(
f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim}"
f" and `num_heads`: {num_heads})."
)
self.scaling = self.head_dim**-0.5
self.is_decoder = is_decoder
self.k_proj = tf.keras.layers.Dense(embed_dim, use_bias=bias, name="k_proj")
self.q_proj = tf.keras.layers.Dense(embed_dim, use_bias=bias, name="q_proj")
self.v_proj = tf.keras.layers.Dense(embed_dim, use_bias=bias, name="v_proj")
self.out_proj = tf.keras.layers.Dense(embed_dim, use_bias=bias, name="out_proj")
def _shape(self, tensor: tf.Tensor, seq_len: int, bsz: int):
return tf.transpose(tf.reshape(tensor, (bsz, seq_len, self.num_heads, self.head_dim)), (0, 2, 1, 3))
def call(
self,
hidden_states: tf.Tensor,
key_value_states: Optional[tf.Tensor] = None,
past_key_value: Optional[Tuple[Tuple[tf.Tensor]]] = None,
attention_mask: Optional[tf.Tensor] = None,
layer_head_mask: Optional[tf.Tensor] = None,
training: Optional[bool] = False,
) -> Tuple[tf.Tensor, Optional[tf.Tensor]]:
"""Input shape: Batch x Time x Channel"""
# if key_value_states are provided this layer is used as a cross-attention layer
# for the decoder
is_cross_attention = key_value_states is not None
bsz, tgt_len, embed_dim = shape_list(hidden_states)
# get query proj
query_states = self.q_proj(hidden_states) * self.scaling
# get key, value proj
if is_cross_attention and past_key_value is not None:
# reuse k,v, cross_attentions
key_states = past_key_value[0]
value_states = past_key_value[1]
elif is_cross_attention:
# cross_attentions
key_states = self._shape(self.k_proj(key_value_states), -1, bsz)
value_states = self._shape(self.v_proj(key_value_states), -1, bsz)
elif past_key_value is not None:
# reuse k, v, self_attention
key_states = self._shape(self.k_proj(hidden_states), -1, bsz)
value_states = self._shape(self.v_proj(hidden_states), -1, bsz)
key_states = tf.concat([past_key_value[0], key_states], axis=2)
value_states = tf.concat([past_key_value[1], value_states], axis=2)
else:
# self_attention
key_states = self._shape(self.k_proj(hidden_states), -1, bsz)
value_states = self._shape(self.v_proj(hidden_states), -1, bsz)
if self.is_decoder:
# if cross_attention save Tuple(tf.Tensor, tf.Tensor) of all cross attention key/value_states.
# Further calls to cross_attention layer can then reuse all cross-attention
# key/value_states (first "if" case)
# if uni-directional self-attention (decoder) save Tuple(tf.Tensor, tf.Tensor) of
# all previous decoder key/value_states. Further calls to uni-directional self-attention
# can concat previous decoder key/value_states to current projected key/value_states (third "elif" case)
# if encoder bi-directional self-attention `past_key_value` is always `None`
past_key_value = (key_states, value_states)
proj_shape = (bsz * self.num_heads, -1, self.head_dim)
query_states = tf.reshape(self._shape(query_states, tgt_len, bsz), proj_shape)
key_states = tf.reshape(key_states, proj_shape)
value_states = tf.reshape(value_states, proj_shape)
src_len = shape_list(key_states)[1]
attn_weights = tf.matmul(query_states, key_states, transpose_b=True)
# The tf.debugging asserts are not compliant with XLA then they
# have to be disabled in other modes than eager.
if tf.executing_eagerly():
tf.debugging.assert_equal(
shape_list(attn_weights),
[bsz * self.num_heads, tgt_len, src_len],
message=f"Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is {shape_list(attn_weights)}",
)
if attention_mask is not None:
# The tf.debugging asserts are not compliant with XLA then they
# have to be disabled in other modes than eager.
if tf.executing_eagerly():
tf.debugging.assert_equal(
shape_list(attention_mask),
[bsz, 1, tgt_len, src_len],
message=f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is {shape_list(attention_mask)}",
)
attention_mask = tf.cast(attention_mask, dtype=attn_weights.dtype)
attn_weights = tf.reshape(attn_weights, (bsz, self.num_heads, tgt_len, src_len)) + attention_mask
attn_weights = tf.reshape(attn_weights, (bsz * self.num_heads, tgt_len, src_len))
attn_weights = tf.nn.softmax(attn_weights, axis=-1)
if layer_head_mask is not None:
# The tf.debugging asserts are not compliant with XLA then they
# have to be disabled in other modes than eager.
if tf.executing_eagerly():
tf.debugging.assert_equal(
shape_list(layer_head_mask),
[self.num_heads],
message=f"Head mask for a single layer should be of size {(self.num_heads)}, but is {shape_list(layer_head_mask)}",
)
attn_weights = tf.reshape(layer_head_mask, (1, -1, 1, 1)) * tf.reshape(
attn_weights, (bsz, self.num_heads, tgt_len, src_len)
)
attn_weights = tf.reshape(attn_weights, (bsz * self.num_heads, tgt_len, src_len))
attn_probs = self.dropout(attn_weights, training=training)
attn_output = tf.matmul(attn_probs, value_states)
# The tf.debugging asserts are not compliant with XLA then they
# have to be disabled in other modes than eager.
if tf.executing_eagerly():
tf.debugging.assert_equal(
shape_list(attn_output),
[bsz * self.num_heads, tgt_len, self.head_dim],
message=f"`attn_output` should be of size {(bsz, self.num_heads, tgt_len, self.head_dim)}, but is {shape_list(attn_output)}",
)
attn_output = tf.transpose(
tf.reshape(attn_output, (bsz, self.num_heads, tgt_len, self.head_dim)), (0, 2, 1, 3)
)
attn_output = tf.reshape(attn_output, (bsz, tgt_len, embed_dim))
attn_output = self.out_proj(attn_output)
attn_weights: tf.Tensor = tf.reshape(attn_weights, (bsz, self.num_heads, tgt_len, src_len))
return attn_output, attn_weights, past_key_value
# Copied from transformers.models.wav2vec2.modeling_tf_wav2vec2.TFWav2Vec2FeedForward with Wav2Vec2->Hubert
class TFHubertFeedForward(tf.keras.layers.Layer):
def __init__(self, config: HubertConfig, **kwargs):
super().__init__(**kwargs)
self.intermediate_dropout = tf.keras.layers.Dropout(config.activation_dropout)
self.intermediate_dense = tf.keras.layers.Dense(
units=config.intermediate_size,
kernel_initializer=get_initializer(config.initializer_range),
bias_initializer="zeros",
name="intermediate_dense",
)
self.intermediate_act_fn = get_tf_activation(config.hidden_act)
self.output_dense = tf.keras.layers.Dense(
units=config.hidden_size,
kernel_initializer=get_initializer(config.initializer_range),
bias_initializer="zeros",
name="output_dense",
)
self.output_dropout = tf.keras.layers.Dropout(config.hidden_dropout)
def call(self, hidden_states: tf.Tensor, training: bool = False) -> tf.Tensor:
hidden_states = self.intermediate_dense(hidden_states)
hidden_states = self.intermediate_act_fn(hidden_states)
hidden_states = self.intermediate_dropout(hidden_states, training=training)
hidden_states = self.output_dense(hidden_states)
hidden_states = self.output_dropout(hidden_states, training=training)
return hidden_states
# Copied from transformers.models.wav2vec2.modeling_tf_wav2vec2.TFWav2Vec2EncoderLayer with Wav2Vec2->Hubert
class TFHubertEncoderLayer(tf.keras.layers.Layer):
def __init__(self, config: HubertConfig, **kwargs):
super().__init__(**kwargs)
self.attention = TFHubertAttention(
embed_dim=config.hidden_size,
num_heads=config.num_attention_heads,
dropout=config.attention_dropout,
is_decoder=False,
name="attention",
)
self.dropout = tf.keras.layers.Dropout(config.hidden_dropout)
self.layer_norm = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="layer_norm")
self.feed_forward = TFHubertFeedForward(config, name="feed_forward")
self.final_layer_norm = tf.keras.layers.LayerNormalization(
epsilon=config.layer_norm_eps, name="final_layer_norm"
)
def call(
self,
hidden_states: tf.Tensor,
attention_mask: Optional[tf.Tensor] = None,
output_attentions: Optional[bool] = False,
training: bool = False,
) -> Tuple[tf.Tensor]:
attn_residual = hidden_states
hidden_states, attn_weights, _ = self.attention(
hidden_states, attention_mask=attention_mask, training=training
)
hidden_states = self.dropout(hidden_states, training=training)
hidden_states = attn_residual + hidden_states
hidden_states = self.layer_norm(hidden_states)
hidden_states = hidden_states + self.feed_forward(hidden_states)
hidden_states = self.final_layer_norm(hidden_states)
outputs = (hidden_states,)
if output_attentions:
outputs += (attn_weights,)
return outputs
# Copied from transformers.models.wav2vec2.modeling_tf_wav2vec2.TFWav2Vec2EncoderLayerStableLayerNorm with Wav2Vec2->Hubert
class TFHubertEncoderLayerStableLayerNorm(tf.keras.layers.Layer):
def __init__(self, config: HubertConfig, **kwargs):
super().__init__(**kwargs)
self.attention = TFHubertAttention(
embed_dim=config.hidden_size,
num_heads=config.num_attention_heads,
dropout=config.attention_dropout,
is_decoder=False,
name="attention",
)
self.dropout = tf.keras.layers.Dropout(config.hidden_dropout)
self.layer_norm = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="layer_norm")
self.feed_forward = TFHubertFeedForward(config, name="feed_forward")
self.final_layer_norm = tf.keras.layers.LayerNormalization(
epsilon=config.layer_norm_eps, name="final_layer_norm"
)
def call(
self,
hidden_states: tf.Tensor,
attention_mask: Optional[tf.Tensor] = None,
output_attentions: Optional[bool] = False,
training: bool = False,
) -> Tuple[tf.Tensor]:
attn_residual = hidden_states
hidden_states = self.layer_norm(hidden_states)
hidden_states, attn_weights, _ = self.attention(
hidden_states, attention_mask=attention_mask, training=training
)
hidden_states = self.dropout(hidden_states, training=training)
hidden_states = attn_residual + hidden_states
hidden_states = hidden_states + self.feed_forward(self.final_layer_norm(hidden_states))
outputs = (hidden_states,)
if output_attentions:
outputs += (attn_weights,)
return outputs
# Copied from transformers.models.wav2vec2.modeling_tf_wav2vec2.TFWav2Vec2Encoder with Wav2Vec2->Hubert
class TFHubertEncoder(tf.keras.layers.Layer):
def __init__(self, config: HubertConfig, **kwargs):
super().__init__(**kwargs)
self.config = config
self.pos_conv_embed = TFHubertPositionalConvEmbedding(config, name="pos_conv_embed")
self.layer_norm = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="layer_norm")
self.dropout = tf.keras.layers.Dropout(config.hidden_dropout)
self.layer = [TFHubertEncoderLayer(config, name=f"layers.{i}") for i in range(config.num_hidden_layers)]
def call(
self,
hidden_states: tf.Tensor,
attention_mask: Optional[tf.Tensor] = None,
output_attentions: Optional[bool] = False,
output_hidden_states: Optional[bool] = False,
return_dict: Optional[bool] = True,
training: Optional[bool] = False,
) -> Union[TFBaseModelOutput, Tuple[tf.Tensor]]:
all_hidden_states = () if output_hidden_states else None
all_self_attentions = () if output_attentions else None
if attention_mask is not None:
hidden_states = hidden_states * tf.expand_dims(attention_mask, -1)
attention_mask = _expand_mask(attention_mask)
else:
attention_mask = None
position_embeddings = self.pos_conv_embed(hidden_states)
hidden_states = hidden_states + position_embeddings
hidden_states = self.layer_norm(hidden_states)
hidden_states = self.dropout(hidden_states, training=training)
for i, layer_module in enumerate(self.layer):
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
# add LayerDrop (see https://arxiv.org/abs/1909.11556 for description)
dropout_probability = np.random.uniform(0, 1)
if training and (dropout_probability < self.config.layerdrop): # skip the layer
continue
layer_outputs = layer_module(
hidden_states=hidden_states,
attention_mask=attention_mask,
output_attentions=output_attentions,
training=training,
)
hidden_states = layer_outputs[0]
if output_attentions:
all_self_attentions = all_self_attentions + (layer_outputs[1],)
# Add last layer
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple(v for v in [hidden_states, all_hidden_states, all_self_attentions] if v is not None)
return TFBaseModelOutput(
last_hidden_state=hidden_states,
hidden_states=all_hidden_states,
attentions=all_self_attentions,
)
# Copied from transformers.models.wav2vec2.modeling_tf_wav2vec2.TFWav2Vec2EncoderStableLayerNorm with Wav2Vec2->Hubert
class TFHubertEncoderStableLayerNorm(tf.keras.layers.Layer):
def __init__(self, config: HubertConfig, **kwargs):
super().__init__(**kwargs)
self.config = config
self.pos_conv_embed = TFHubertPositionalConvEmbedding(config, name="pos_conv_embed")
self.layer_norm = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="layer_norm")
self.dropout = tf.keras.layers.Dropout(config.hidden_dropout)
self.layer = [
TFHubertEncoderLayerStableLayerNorm(config, name=f"layers.{i}") for i in range(config.num_hidden_layers)
]
def call(
self,
hidden_states: tf.Tensor,
attention_mask: Optional[tf.Tensor] = None,
output_attentions: Optional[bool] = False,
output_hidden_states: Optional[bool] = False,
return_dict: Optional[bool] = True,
training: Optional[bool] = False,
) -> Union[TFBaseModelOutput, Tuple[tf.Tensor]]:
all_hidden_states = () if output_hidden_states else None
all_self_attentions = () if output_attentions else None
if attention_mask is not None:
hidden_states = hidden_states * tf.expand_dims(attention_mask, -1)
attention_mask = _expand_mask(attention_mask)
else:
attention_mask = None
position_embeddings = self.pos_conv_embed(hidden_states)
hidden_states = hidden_states + position_embeddings
hidden_states = self.dropout(hidden_states, training=training)
for i, layer_module in enumerate(self.layer):
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
# add LayerDrop (see https://arxiv.org/abs/1909.11556 for description)
dropout_probability = np.random.uniform(0, 1)
if training and (dropout_probability < self.config.layerdrop): # skip the layer
continue
layer_outputs = layer_module(
hidden_states=hidden_states,
attention_mask=attention_mask,
output_attentions=output_attentions,
training=training,
)
hidden_states = layer_outputs[0]
if output_attentions:
all_self_attentions = all_self_attentions + (layer_outputs[1],)
hidden_states = self.layer_norm(hidden_states)
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple(v for v in [hidden_states, all_hidden_states, all_self_attentions] if v is not None)
return TFBaseModelOutput(
last_hidden_state=hidden_states,
hidden_states=all_hidden_states,
attentions=all_self_attentions,
)
@keras_serializable
class TFHubertMainLayer(tf.keras.layers.Layer):
config_class = HubertConfig
def __init__(self, config: HubertConfig, **kwargs):
super().__init__(**kwargs)
self.config = config
self.feature_extractor = TFHubertFeatureEncoder(config, name="feature_extractor")
self.feature_projection = TFHubertFeatureProjection(config, name="feature_projection")
if config.do_stable_layer_norm:
self.encoder = TFHubertEncoderStableLayerNorm(config, name="encoder")
else:
self.encoder = TFHubertEncoder(config, name="encoder")
def build(self, input_shape: tf.TensorShape):
self.masked_spec_embed = self.add_weight(
shape=(self.config.hidden_size,), initializer="uniform", trainable=True, name="masked_spec_embed"
)
super().build(input_shape)
def _get_feat_extract_output_lengths(self, input_lengths: tf.Tensor):
"""
Computes the output length of the convolutional layers
"""
def _conv_out_length(input_length, kernel_size, stride):
# 1D convolutional layer output length formula taken
# from https://pytorch.org/docs/stable/generated/torch.nn.Conv1d.html
return (input_length - kernel_size) // stride + 1
for kernel_size, stride in zip(self.config.conv_kernel, self.config.conv_stride):
input_lengths = _conv_out_length(input_lengths, kernel_size, stride)
return input_lengths
def _mask_hidden_states(self, hidden_states: tf.Tensor, mask_time_indices: Optional[tf.Tensor] = None):
"""
Masks extracted features along time axis and/or along feature axis according to
[SpecAugment](https://arxiv.org/abs/1904.08779).
"""
batch_size, sequence_length, hidden_size = shape_list(hidden_states)
# `config.apply_spec_augment` can set masking to False
if not getattr(self.config, "apply_spec_augment", True):
return hidden_states
if mask_time_indices is not None:
# apply SpecAugment along time axis with given mask_time_indices
hidden_states = tf.where(
tf.cast(mask_time_indices[:, :, tf.newaxis], tf.bool),
self.masked_spec_embed[tf.newaxis, tf.newaxis, :],
hidden_states,
)
elif self.config.mask_time_prob > 0:
# generate indices & apply SpecAugment along time axis
mask_time_indices = _compute_mask_indices(
(batch_size, sequence_length),
mask_prob=self.config.mask_time_prob,
mask_length=self.config.mask_time_length,
min_masks=2,
)
hidden_states = tf.where(
tf.cast(mask_time_indices[:, :, tf.newaxis], tf.bool),
self.masked_spec_embed[tf.newaxis, tf.newaxis, :],
hidden_states,
)
# apply SpecAugment along feature axis
if self.config.mask_feature_prob > 0:
mask_feature_indices = _compute_mask_indices(
(batch_size, hidden_size),
mask_prob=self.config.mask_feature_prob,
mask_length=self.config.mask_feature_length,
)
hidden_states = tf.where(mask_feature_indices[:, tf.newaxis, :], hidden_states, 0)
return hidden_states
def call(
self,
input_values: tf.Tensor,
attention_mask: Optional[tf.Tensor] = None,
token_type_ids: Optional[tf.Tensor] = None,
position_ids: Optional[tf.Tensor] = None,
head_mask: Optional[tf.Tensor] = None,
inputs_embeds: Optional[tf.Tensor] = None,
output_attentions: Optional[tf.Tensor] = None,
output_hidden_states: Optional[tf.Tensor] = None,
return_dict: Optional[bool] = None,
training: bool = False,
**kwargs: Any,
):
inputs = input_values_processing(
func=self.call,
config=self.config,
input_values=input_values,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
training=training,
kwargs_call=kwargs,
)
hidden_states = self.feature_extractor(
tf.cast(inputs["input_values"], tf.float32), training=inputs["training"]
)
if inputs["attention_mask"] is not None:
# compute real output lengths according to convolution formula
output_lengths = self._get_feat_extract_output_lengths(tf.reduce_sum(inputs["attention_mask"], -1))
attention_mask = tf.sequence_mask(
output_lengths, maxlen=shape_list(hidden_states)[1], dtype=hidden_states.dtype
)
hidden_states = self.feature_projection(hidden_states, training=inputs["training"])
mask_time_indices = kwargs.get("mask_time_indices", None)
if inputs["training"]:
hidden_states = self._mask_hidden_states(hidden_states, mask_time_indices=mask_time_indices)
encoder_outputs = self.encoder(
hidden_states,
attention_mask=attention_mask,
output_attentions=inputs["output_attentions"],
output_hidden_states=inputs["output_hidden_states"],
return_dict=inputs["return_dict"],
training=inputs["training"],
)
hidden_states = encoder_outputs[0]
if not inputs["return_dict"]:
return (hidden_states,) + encoder_outputs[1:]
return TFBaseModelOutput(
last_hidden_state=hidden_states,
hidden_states=encoder_outputs.hidden_states,
attentions=encoder_outputs.attentions,
)
class TFHubertPreTrainedModel(TFPreTrainedModel):
"""
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
models.
"""
config_class = HubertConfig
base_model_prefix = "hubert"
main_input_name = "input_values"
@property
def dummy_inputs(self) -> Dict[str, tf.Tensor]:
pad_token = 0.0
input_values = tf.convert_to_tensor(np.random.rand(1, 16000), tf.float32)
dummy_inputs = {
"input_values": input_values,
"attention_mask": tf.cast(tf.not_equal(input_values, pad_token), tf.float32),
}
return dummy_inputs
def __init__(self, config, *inputs, **kwargs):
super().__init__(config, *inputs, **kwargs)
logger.warning(
f"\n{self.__class__.__name__} has backpropagation operations that are NOT supported on CPU. If you wish "
"to train/fine-tine this model, you need a GPU or a TPU"
)
@tf.function
def serving(self, inputs):
output = self.call(input_values=inputs, training=False)
return self.serving_output(output)
HUBERT_START_DOCSTRING = r"""
This model inherits from [`TFPreTrainedModel`]. Check the superclass documentation for the generic methods the
library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
etc.)
This model is also a [tf.keras.Model](https://www.tensorflow.org/api_docs/python/tf/keras/Model) subclass. Use it
as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and
behavior.
<Tip>
TF 2.0 models accepts two formats as inputs:
- having all inputs as keyword arguments (like PyTorch models), or
- having all inputs as a list, tuple or dict in the first positional arguments.
This second option is useful when using [`tf.keras.Model.fit`] method which currently requires having all the
tensors in the first argument of the model call function: `model(inputs)`.
If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the
first positional argument :
- a single Tensor with `input_values` only and nothing else: `model(inputs_ids)`
- a list of varying length with one or several input Tensors IN THE ORDER given in the docstring:
`model([input_values, attention_mask])` or `model([input_values, attention_mask, token_type_ids])`
- a dictionary with one or several input Tensors associated to the input names given in the docstring:
`model({"input_values": input_values, "token_type_ids": token_type_ids})`
</Tip>
Args:
config ([`HubertConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
"""
HUBERT_INPUTS_DOCSTRING = r"""
Args:
input_values (`np.ndarray`, `tf.Tensor`, `List[tf.Tensor]` ``Dict[str, tf.Tensor]` or `Dict[str, np.ndarray]` and each example must have the shape `({0})`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using [`BertTokenizer`]. See [`PreTrainedTokenizer.__call__`] and
[`PreTrainedTokenizer.encode`] for details.
[What are input IDs?](../glossary#input-ids)
attention_mask (`np.ndarray` or `tf.Tensor` of shape `({0})`, *optional*):
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
token_type_ids (`np.ndarray` or `tf.Tensor` of shape `({0})`, *optional*):
Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,
1]`:
- 0 corresponds to a *sentence A* token,
- 1 corresponds to a *sentence B* token.
[What are token type IDs?](../glossary#token-type-ids)
position_ids (`np.ndarray` or `tf.Tensor` of shape `({0})`, *optional*):
Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
config.max_position_embeddings - 1]`.
[What are position IDs?](../glossary#position-ids)
head_mask (`np.ndarray` or `tf.Tensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
inputs_embeds (`np.ndarray` or `tf.Tensor` of shape `({0}, hidden_size)`, *optional*):
Optionally, instead of passing `input_values` you can choose to directly pass an embedded representation.
This is useful if you want more control over how to convert `input_values` indices into associated vectors
than the model's internal embedding lookup matrix.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the
config will be used instead.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail. This argument can be used only in eager mode, in graph mode the value in the config will be
used instead.
return_dict (`bool`, *optional*):
Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple. This argument can be used
in eager mode, in graph mode the value will always be set to True.
training (`bool`, *optional*, defaults to `False``):
Whether or not to use the model in training mode (some modules like dropout modules have different
behaviors between training and evaluation).
"""
@add_start_docstrings(
"The bare TFHubert Model transformer outputing raw hidden-states without any specific head on top.",
HUBERT_START_DOCSTRING,
)
class TFHubertModel(TFHubertPreTrainedModel):
def __init__(self, config: HubertConfig, *inputs, **kwargs):
super().__init__(config, *inputs, **kwargs)
self.config = config
self.hubert = TFHubertMainLayer(config, name="hubert")
@add_start_docstrings_to_model_forward(HUBERT_INPUTS_DOCSTRING)
@replace_return_docstrings(output_type=TFBaseModelOutput, config_class=_CONFIG_FOR_DOC)
def call(
self,
input_values: tf.Tensor,
attention_mask: Optional[tf.Tensor] = None,
token_type_ids: Optional[tf.Tensor] = None,
position_ids: Optional[tf.Tensor] = None,
head_mask: Optional[tf.Tensor] = None,
inputs_embeds: Optional[tf.Tensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
training: bool = False,
) -> Union[TFBaseModelOutput, Tuple[tf.Tensor]]:
"""
Returns:
Example:
```python
>>> from transformers import Wav2Vec2Processor, TFHubertModel
>>> from datasets import load_dataset
>>> import soundfile as sf
>>> processor = Wav2Vec2Processor.from_pretrained("facebook/hubert-base-960h")
>>> model = TFHubertModel.from_pretrained("facebook/hubert-base-960h")
>>> def map_to_array(batch):
... speech, _ = sf.read(batch["file"])
... batch["speech"] = speech
... return batch
>>> ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation")
>>> ds = ds.map(map_to_array)
>>> input_values = processor(ds["speech"][0], return_tensors="tf").input_values # Batch size 1
>>> hidden_states = model(input_values).last_hidden_state
```"""
inputs = input_values_processing(
func=self.call,
config=self.config,
input_values=input_values,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
training=training,
)
inputs["output_hidden_states"] = (
inputs["output_hidden_states"] if inputs["output_hidden_states"] else self.config.output_hidden_states
)
inputs["output_attentions"] = (
inputs["output_attentions"] if inputs["output_attentions"] else self.config.output_attentions
)
inputs["return_dict"] = inputs["return_dict"] if inputs["return_dict"] else self.config.return_dict
outputs = self.hubert(
input_values=inputs["input_values"],
attention_mask=inputs["attention_mask"],
token_type_ids=inputs["token_type_ids"],
position_ids=inputs["position_ids"],
head_mask=inputs["head_mask"],
inputs_embeds=inputs["inputs_embeds"],
output_attentions=inputs["output_attentions"],
output_hidden_states=inputs["output_hidden_states"],
return_dict=inputs["return_dict"],
training=inputs["training"],
)
return outputs
def serving_output(self, output):
hs = tf.convert_to_tensor(output.hidden_states) if self.config.output_hidden_states else None
attns = tf.convert_to_tensor(output.attentions) if self.config.output_attentions else None
return TFBaseModelOutput(last_hidden_state=output.last_hidden_state, hidden_states=hs, attentions=attns)
@add_start_docstrings(
"""TFHubert Model with a `language modeling` head on top for Connectionist Temporal Classification (CTC).""",
HUBERT_START_DOCSTRING,
)
class TFHubertForCTC(TFHubertPreTrainedModel):
def __init__(self, config: HubertConfig, *inputs, **kwargs):
super().__init__(config, *inputs, **kwargs)
self.hubert = TFHubertMainLayer(config, name="hubert")
self.dropout = tf.keras.layers.Dropout(config.final_dropout)
self.lm_head = tf.keras.layers.Dense(config.vocab_size, name="lm_head")
def freeze_feature_extractor(self):
"""
Calling this function will disable the gradient computation for the feature encoder so that its parameters will
not be updated during training.
"""
warnings.warn(
"The method `freeze_feature_extractor` is deprecated and will be removed in Transformers v5."
"Please use the equivalent `freeze_feature_encoder` method instead.",
FutureWarning,
)
self.freeze_feature_encoder()
def freeze_feature_encoder(self):
"""
Calling this function will disable the gradient computation for the feature encoder so that its parameter will
not be updated during training.
"""
self.hubert.feature_extractor.trainable = False
@add_start_docstrings_to_model_forward(HUBERT_INPUTS_DOCSTRING)
@replace_return_docstrings(output_type=TFCausalLMOutput, config_class=_CONFIG_FOR_DOC)
def call(
self,
input_values: tf.Tensor,
attention_mask: Optional[tf.Tensor] = None,
token_type_ids: Optional[tf.Tensor] = None,
position_ids: Optional[tf.Tensor] = None,
head_mask: Optional[tf.Tensor] = None,
inputs_embeds: Optional[tf.Tensor] = None,
output_attentions: Optional[bool] = None,
labels: Optional[tf.Tensor] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
training: Optional[bool] = False,
) -> Union[TFCausalLMOutput, Tuple[tf.Tensor]]:
r"""
labels (`tf.Tensor` or `np.ndarray` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ...,
config.vocab_size]` (see `input_values` docstring) Tokens with indices set to `-100` are ignored (masked),
the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`
Returns:
Example:
```python
>>> import tensorflow as tf
>>> from transformers import Wav2Vec2Processor, TFHubertForCTC
>>> from datasets import load_dataset
>>> import soundfile as sf
>>> processor = Wav2Vec2Processor.from_pretrained("facebook/hubert-base-960h")
>>> model = TFHubertForCTC.from_pretrained("facebook/hubert-base-960h")
>>> def map_to_array(batch):
... speech, _ = sf.read(batch["file"])
... batch["speech"] = speech
... return batch
>>> ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation")
>>> ds = ds.map(map_to_array)
>>> input_values = processor(ds["speech"][0], return_tensors="tf").input_values # Batch size 1
>>> logits = model(input_values).logits
>>> predicted_ids = tf.argmax(logits, axis=-1)
>>> transcription = processor.decode(predicted_ids[0])
>>> # compute loss
>>> target_transcription = "A MAN SAID TO THE UNIVERSE SIR I EXIST"
>>> # wrap processor as target processor to encode labels
>>> with processor.as_target_processor():
... labels = processor(transcription, return_tensors="tf").input_values
>>> loss = model(input_values, labels=labels).loss
```"""
inputs = input_values_processing(
func=self.call,
config=self.config,
input_values=input_values,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
training=training,
)
outputs = self.hubert(
input_values=inputs["input_values"],
attention_mask=inputs["attention_mask"],
token_type_ids=inputs["token_type_ids"],
position_ids=inputs["position_ids"],
head_mask=inputs["head_mask"],
inputs_embeds=inputs["inputs_embeds"],
output_attentions=inputs["output_attentions"],
output_hidden_states=inputs["output_hidden_states"],
return_dict=inputs["return_dict"],
training=inputs["training"],
)
hidden_states = outputs[0]
hidden_states = self.dropout(hidden_states, training=inputs["training"])
logits = self.lm_head(hidden_states)
if labels is not None:
if tf.reduce_max(labels) >= self.config.vocab_size:
raise ValueError(f"Label values must be <= vocab_size: {self.config.vocab_size}")
attention_mask = (
inputs["attention_mask"]
if inputs["attention_mask"] is not None
else tf.ones_like(inputs["input_values"], dtype=tf.float32)
)
input_lengths = self.hubert._get_feat_extract_output_lengths(tf.reduce_sum(attention_mask, axis=-1))
# assuming that padded tokens are filled with -100
# when not being attended to
labels_mask = tf.cast(labels >= 0, tf.int32)
target_lengths = tf.reduce_sum(labels_mask, axis=-1)
loss = tf.nn.ctc_loss(
logits=logits,
labels=labels,
logit_length=input_lengths,
label_length=target_lengths,
blank_index=self.config.pad_token_id,
logits_time_major=False,
)
if self.config.ctc_loss_reduction == "sum":
loss = tf.reduce_sum(loss)
if self.config.ctc_loss_reduction == "mean":
loss = tf.reduce_mean(loss)
else:
loss = None
if not inputs["return_dict"]:
output = (logits,) + outputs[1:]
return ((loss,) + output) if loss is not None else output
return TFCausalLMOutput(
loss=loss,
logits=logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
def serving_output(self, output: TFCausalLMOutput) -> TFCausalLMOutput:
hs = tf.convert_to_tensor(output.hidden_states) if self.config.output_hidden_states else None
attns = tf.convert_to_tensor(output.attentions) if self.config.output_attentions else None
return TFCausalLMOutput(logits=output.logits, hidden_states=hs, attentions=attns)
| [((171, 17, 171, 59), 'tensorflow.nn.top_k', 'tf.nn.top_k', ({(171, 29, 171, 45): 'distribution + z', (171, 47, 171, 58): 'num_samples'}, {}), '(distribution + z, num_samples)', True, 'import tensorflow as tf\n'), ((233, 20, 233, 75), 'tensorflow.zeros', 'tf.zeros', (), '', True, 'import tensorflow as tf\n'), ((236, 19, 236, 77), 'tensorflow.ones', 'tf.ones', ({(236, 27, 236, 76): '(batch_size, sequence_length - (mask_length - 1))'}, {}), '((batch_size, sequence_length - (mask_length - 1)))', True, 'import tensorflow as tf\n'), ((242, 25, 242, 63), 'tensorflow.expand_dims', 'tf.expand_dims', ({(242, 40, 242, 58): 'spec_aug_mask_idxs', (242, 60, 242, 62): '-1'}, {}), '(spec_aug_mask_idxs, -1)', True, 'import tensorflow as tf\n'), ((243, 25, 243, 73), 'tensorflow.tile', 'tf.tile', ({(243, 33, 243, 51): 'spec_aug_mask_idxs', (243, 53, 243, 72): '(1, 1, mask_length)'}, {}), '(spec_aug_mask_idxs, (1, 1, mask_length))', True, 'import tensorflow as tf\n'), ((244, 25, 244, 101), 'tensorflow.reshape', 'tf.reshape', ({(244, 36, 244, 54): 'spec_aug_mask_idxs', (244, 56, 244, 100): '(batch_size, num_masked_spans * mask_length)'}, {}), '(spec_aug_mask_idxs, (batch_size, num_masked_spans * mask_length))', True, 'import tensorflow as tf\n'), ((247, 14, 247, 65), 'tensorflow.tile', 'tf.tile', ({(247, 22, 247, 29): 'offsets', (247, 31, 247, 64): '(batch_size, num_masked_spans, 1)'}, {}), '(offsets, (batch_size, num_masked_spans, 1))', True, 'import tensorflow as tf\n'), ((248, 14, 248, 79), 'tensorflow.reshape', 'tf.reshape', ({(248, 25, 248, 32): 'offsets', (248, 34, 248, 78): '(batch_size, num_masked_spans * mask_length)'}, {}), '(offsets, (batch_size, num_masked_spans * mask_length))', True, 'import tensorflow as tf\n'), ((267, 14, 267, 30), 'tensorflow.constant', 'tf.constant', ({(267, 26, 267, 29): '1.0'}, {}), '(1.0)', True, 'import tensorflow as tf\n'), ((268, 11, 268, 45), 'tensorflow.cast', 'tf.cast', (), '', True, 'import tensorflow as tf\n'), ((269, 20, 269, 71), 'tensorflow.tile', 'tf.tile', ({(269, 28, 269, 50): 'mask[:, (None), (None), :]', (269, 52, 269, 70): '(1, 1, tgt_len, 1)'}, {}), '(mask[:, (None), (None), :], (1, 1, tgt_len, 1))', True, 'import tensorflow as tf\n'), ((188, 39, 188, 63), 'tensorflow.reshape', 'tf.reshape', ({(188, 50, 188, 56): 'values', (188, 58, 188, 62): '[-1]'}, {}), '(values, [-1])', True, 'import tensorflow as tf\n'), ((246, 14, 246, 35), 'tensorflow.range', 'tf.range', ({(246, 23, 246, 34): 'mask_length'}, {}), '(mask_length)', True, 'import tensorflow as tf\n'), ((254, 8, 254, 40), 'tensorflow.ones_like', 'tf.ones_like', ({(254, 21, 254, 39): 'spec_aug_mask_idxs'}, {}), '(spec_aug_mask_idxs)', True, 'import tensorflow as tf\n'), ((302, 32, 302, 75), 'tensorflow.keras.initializers.get', 'tf.keras.initializers.get', ({(302, 58, 302, 74): 'beta_initializer'}, {}), '(beta_initializer)', True, 'import tensorflow as tf\n'), ((303, 33, 303, 77), 'tensorflow.keras.initializers.get', 'tf.keras.initializers.get', ({(303, 59, 303, 76): 'gamma_initializer'}, {}), '(gamma_initializer)', True, 'import tensorflow as tf\n'), ((304, 32, 304, 75), 'tensorflow.keras.regularizers.get', 'tf.keras.regularizers.get', ({(304, 58, 304, 74): 'beta_regularizer'}, {}), '(beta_regularizer)', True, 'import tensorflow as tf\n'), ((305, 33, 305, 77), 'tensorflow.keras.regularizers.get', 'tf.keras.regularizers.get', ({(305, 59, 305, 76): 'gamma_regularizer'}, {}), '(gamma_regularizer)', True, 'import tensorflow as tf\n'), ((306, 31, 306, 72), 'tensorflow.keras.constraints.get', 'tf.keras.constraints.get', ({(306, 56, 306, 71): 'beta_constraint'}, {}), '(beta_constraint)', True, 'import tensorflow as tf\n'), ((307, 32, 307, 74), 'tensorflow.keras.constraints.get', 'tf.keras.constraints.get', ({(307, 57, 307, 73): 'gamma_constraint'}, {}), '(gamma_constraint)', True, 'import tensorflow as tf\n'), ((324, 22, 324, 56), 'tensorflow.keras.backend.int_shape', 'tf.keras.backend.int_shape', ({(324, 49, 324, 55): 'inputs'}, {}), '(inputs)', True, 'import tensorflow as tf\n'), ((325, 29, 325, 45), 'tensorflow.shape', 'tf.shape', ({(325, 38, 325, 44): 'inputs'}, {}), '(inputs)', True, 'import tensorflow as tf\n'), ((374, 22, 374, 65), 'tensorflow.keras.backend.int_shape', 'tf.keras.backend.int_shape', ({(374, 49, 374, 64): 'reshaped_inputs'}, {}), '(reshaped_inputs)', True, 'import tensorflow as tf\n'), ((383, 25, 383, 92), 'tensorflow.nn.moments', 'tf.nn.moments', (), '', True, 'import tensorflow as tf\n'), ((386, 28, 393, 9), 'tensorflow.nn.batch_normalization', 'tf.nn.batch_normalization', (), '', True, 'import tensorflow as tf\n'), ((510, 32, 510, 51), 'tensorflow.constant', 'tf.constant', ({(510, 44, 510, 50): '[0, 1]'}, {}), '([0, 1])', True, 'import tensorflow as tf\n'), ((520, 22, 520, 42), 'tensorflow.transpose', 'tf.transpose', ({(520, 35, 520, 41): 'kernel'}, {}), '(kernel)', True, 'import tensorflow as tf\n'), ((548, 24, 548, 104), 'tensorflow.pad', 'tf.pad', ({(548, 31, 548, 37): 'inputs', (548, 39, 548, 103): '((0, 0), (self.explicit_padding, self.explicit_padding), (0, 0))'}, {}), '(inputs, ((0, 0), (self.explicit_padding, self.explicit_padding), (0, 0))\n )', True, 'import tensorflow as tf\n'), ((561, 20, 567, 9), 'tensorflow.keras.layers.Conv1D', 'tf.keras.layers.Conv1D', (), '', True, 'import tensorflow as tf\n'), ((583, 20, 589, 9), 'tensorflow.keras.layers.Conv1D', 'tf.keras.layers.Conv1D', (), '', True, 'import tensorflow as tf\n'), ((590, 26, 590, 110), 'tensorflow.keras.layers.LayerNormalization', 'tf.keras.layers.LayerNormalization', (), '', True, 'import tensorflow as tf\n'), ((607, 20, 613, 9), 'tensorflow.keras.layers.Conv1D', 'tf.keras.layers.Conv1D', (), '', True, 'import tensorflow as tf\n'), ((678, 24, 678, 56), 'tensorflow.expand_dims', 'tf.expand_dims', ({(678, 39, 678, 51): 'input_values', (678, 53, 678, 55): '-1'}, {}), '(input_values, -1)', True, 'import tensorflow as tf\n'), ((687, 8, 692, 9), 'warnings.warn', 'warnings.warn', ({(688, 12, 690, 68): 'f"""The class `{self.__class__.__name__}` has been depreciated and will be removed in Transformers v5. Use `{self.__class__.__bases__[0].__name__}` instead."""', (691, 12, 691, 25): 'FutureWarning'}, {}), "(\n f'The class `{self.__class__.__name__}` has been depreciated and will be removed in Transformers v5. Use `{self.__class__.__bases__[0].__name__}` instead.'\n , FutureWarning)", False, 'import warnings\n'), ((699, 26, 699, 110), 'tensorflow.keras.layers.LayerNormalization', 'tf.keras.layers.LayerNormalization', (), '', True, 'import tensorflow as tf\n'), ((706, 23, 706, 77), 'tensorflow.keras.layers.Dropout', 'tf.keras.layers.Dropout', (), '', True, 'import tensorflow as tf\n'), ((732, 23, 732, 55), 'tensorflow.keras.layers.Dropout', 'tf.keras.layers.Dropout', ({(732, 47, 732, 54): 'dropout'}, {}), '(dropout)', True, 'import tensorflow as tf\n'), ((742, 22, 742, 84), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (), '', True, 'import tensorflow as tf\n'), ((743, 22, 743, 84), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (), '', True, 'import tensorflow as tf\n'), ((744, 22, 744, 84), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (), '', True, 'import tensorflow as tf\n'), ((745, 24, 745, 88), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (), '', True, 'import tensorflow as tf\n'), ((800, 21, 800, 55), 'tensorflow.reshape', 'tf.reshape', ({(800, 32, 800, 42): 'key_states', (800, 44, 800, 54): 'proj_shape'}, {}), '(key_states, proj_shape)', True, 'import tensorflow as tf\n'), ((801, 23, 801, 59), 'tensorflow.reshape', 'tf.reshape', ({(801, 34, 801, 46): 'value_states', (801, 48, 801, 58): 'proj_shape'}, {}), '(value_states, proj_shape)', True, 'import tensorflow as tf\n'), ((804, 23, 804, 76), 'tensorflow.matmul', 'tf.matmul', (), '', True, 'import tensorflow as tf\n'), ((808, 11, 808, 33), 'tensorflow.executing_eagerly', 'tf.executing_eagerly', ({}, {}), '()', True, 'import tensorflow as tf\n'), ((829, 23, 829, 59), 'tensorflow.nn.softmax', 'tf.nn.softmax', (), '', True, 'import tensorflow as tf\n'), ((847, 22, 847, 57), 'tensorflow.matmul', 'tf.matmul', ({(847, 32, 847, 42): 'attn_probs', (847, 44, 847, 56): 'value_states'}, {}), '(attn_probs, value_states)', True, 'import tensorflow as tf\n'), ((851, 11, 851, 33), 'tensorflow.executing_eagerly', 'tf.executing_eagerly', ({}, {}), '()', True, 'import tensorflow as tf\n'), ((861, 22, 861, 72), 'tensorflow.reshape', 'tf.reshape', ({(861, 33, 861, 44): 'attn_output', (861, 46, 861, 71): '(bsz, tgt_len, embed_dim)'}, {}), '(attn_output, (bsz, tgt_len, embed_dim))', True, 'import tensorflow as tf\n'), ((864, 34, 864, 99), 'tensorflow.reshape', 'tf.reshape', ({(864, 45, 864, 57): 'attn_weights', (864, 59, 864, 98): '(bsz, self.num_heads, tgt_len, src_len)'}, {}), '(attn_weights, (bsz, self.num_heads, tgt_len, src_len))', True, 'import tensorflow as tf\n'), ((874, 36, 874, 86), 'tensorflow.keras.layers.Dropout', 'tf.keras.layers.Dropout', ({(874, 60, 874, 85): 'config.activation_dropout'}, {}), '(config.activation_dropout)', True, 'import tensorflow as tf\n'), ((890, 30, 890, 76), 'tensorflow.keras.layers.Dropout', 'tf.keras.layers.Dropout', ({(890, 54, 890, 75): 'config.hidden_dropout'}, {}), '(config.hidden_dropout)', True, 'import tensorflow as tf\n'), ((913, 23, 913, 69), 'tensorflow.keras.layers.Dropout', 'tf.keras.layers.Dropout', ({(913, 47, 913, 68): 'config.hidden_dropout'}, {}), '(config.hidden_dropout)', True, 'import tensorflow as tf\n'), ((914, 26, 914, 110), 'tensorflow.keras.layers.LayerNormalization', 'tf.keras.layers.LayerNormalization', (), '', True, 'import tensorflow as tf\n'), ((916, 32, 918, 9), 'tensorflow.keras.layers.LayerNormalization', 'tf.keras.layers.LayerNormalization', (), '', True, 'import tensorflow as tf\n'), ((957, 23, 957, 69), 'tensorflow.keras.layers.Dropout', 'tf.keras.layers.Dropout', ({(957, 47, 957, 68): 'config.hidden_dropout'}, {}), '(config.hidden_dropout)', True, 'import tensorflow as tf\n'), ((958, 26, 958, 110), 'tensorflow.keras.layers.LayerNormalization', 'tf.keras.layers.LayerNormalization', (), '', True, 'import tensorflow as tf\n'), ((960, 32, 962, 9), 'tensorflow.keras.layers.LayerNormalization', 'tf.keras.layers.LayerNormalization', (), '', True, 'import tensorflow as tf\n'), ((994, 26, 994, 110), 'tensorflow.keras.layers.LayerNormalization', 'tf.keras.layers.LayerNormalization', (), '', True, 'import tensorflow as tf\n'), ((995, 23, 995, 69), 'tensorflow.keras.layers.Dropout', 'tf.keras.layers.Dropout', ({(995, 47, 995, 68): 'config.hidden_dropout'}, {}), '(config.hidden_dropout)', True, 'import tensorflow as tf\n'), ((1060, 26, 1060, 110), 'tensorflow.keras.layers.LayerNormalization', 'tf.keras.layers.LayerNormalization', (), '', True, 'import tensorflow as tf\n'), ((1061, 23, 1061, 69), 'tensorflow.keras.layers.Dropout', 'tf.keras.layers.Dropout', ({(1061, 47, 1061, 68): 'config.hidden_dropout'}, {}), '(config.hidden_dropout)', True, 'import tensorflow as tf\n'), ((1505, 23, 1505, 68), 'tensorflow.keras.layers.Dropout', 'tf.keras.layers.Dropout', ({(1505, 47, 1505, 67): 'config.final_dropout'}, {}), '(config.final_dropout)', True, 'import tensorflow as tf\n'), ((1506, 23, 1506, 79), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (), '', True, 'import tensorflow as tf\n'), ((1513, 8, 1517, 9), 'warnings.warn', 'warnings.warn', ({(1514, 12, 1515, 80): '"""The method `freeze_feature_extractor` is deprecated and will be removed in Transformers v5.Please use the equivalent `freeze_feature_encoder` method instead."""', (1516, 12, 1516, 25): 'FutureWarning'}, {}), "(\n 'The method `freeze_feature_extractor` is deprecated and will be removed in Transformers v5.Please use the equivalent `freeze_feature_encoder` method instead.'\n , FutureWarning)", False, 'import warnings\n'), ((69, 21, 69, 44), 'inspect.signature', 'inspect.signature', ({(69, 39, 69, 43): 'func'}, {}), '(func)', False, 'import inspect\n'), ((225, 71, 225, 94), 'tensorflow.random.uniform', 'tf.random.uniform', ({(225, 89, 225, 93): '(1,)'}, {}), '((1,))', True, 'import tensorflow as tf\n'), ((333, 22, 333, 71), 'tensorflow.reshape', 'tf.reshape', ({(333, 33, 333, 50): 'normalized_inputs', (333, 52, 333, 70): 'tensor_input_shape'}, {}), '(normalized_inputs, tensor_input_shape)', True, 'import tensorflow as tf\n'), ((346, 32, 346, 86), 'tensorflow.keras.initializers.serialize', 'tf.keras.initializers.serialize', ({(346, 64, 346, 85): 'self.beta_initializer'}, {}), '(self.beta_initializer)', True, 'import tensorflow as tf\n'), ((347, 33, 347, 88), 'tensorflow.keras.initializers.serialize', 'tf.keras.initializers.serialize', ({(347, 65, 347, 87): 'self.gamma_initializer'}, {}), '(self.gamma_initializer)', True, 'import tensorflow as tf\n'), ((348, 32, 348, 86), 'tensorflow.keras.regularizers.serialize', 'tf.keras.regularizers.serialize', ({(348, 64, 348, 85): 'self.beta_regularizer'}, {}), '(self.beta_regularizer)', True, 'import tensorflow as tf\n'), ((349, 33, 349, 88), 'tensorflow.keras.regularizers.serialize', 'tf.keras.regularizers.serialize', ({(349, 65, 349, 87): 'self.gamma_regularizer'}, {}), '(self.gamma_regularizer)', True, 'import tensorflow as tf\n'), ((350, 31, 350, 83), 'tensorflow.keras.constraints.serialize', 'tf.keras.constraints.serialize', ({(350, 62, 350, 82): 'self.beta_constraint'}, {}), '(self.beta_constraint)', True, 'import tensorflow as tf\n'), ((351, 32, 351, 85), 'tensorflow.keras.constraints.serialize', 'tf.keras.constraints.serialize', ({(351, 63, 351, 84): 'self.gamma_constraint'}, {}), '(self.gamma_constraint)', True, 'import tensorflow as tf\n'), ((366, 26, 366, 47), 'tensorflow.stack', 'tf.stack', ({(366, 35, 366, 46): 'group_shape'}, {}), '(group_shape)', True, 'import tensorflow as tf\n'), ((367, 30, 367, 61), 'tensorflow.reshape', 'tf.reshape', ({(367, 41, 367, 47): 'inputs', (367, 49, 367, 60): 'group_shape'}, {}), '(inputs, group_shape)', True, 'import tensorflow as tf\n'), ((401, 20, 401, 59), 'tensorflow.reshape', 'tf.reshape', ({(401, 31, 401, 41): 'self.gamma', (401, 43, 401, 58): 'broadcast_shape'}, {}), '(self.gamma, broadcast_shape)', True, 'import tensorflow as tf\n'), ((404, 19, 404, 57), 'tensorflow.reshape', 'tf.reshape', ({(404, 30, 404, 39): 'self.beta', (404, 41, 404, 56): 'broadcast_shape'}, {}), '(self.beta, broadcast_shape)', True, 'import tensorflow as tf\n'), ((519, 17, 519, 78), 'tensorflow.nn.l2_normalize', 'tf.nn.l2_normalize', (), '', True, 'import tensorflow as tf\n'), ((519, 81, 519, 108), 'tensorflow.transpose', 'tf.transpose', ({(519, 94, 519, 107): 'self.weight_g'}, {}), '(self.weight_g)', True, 'import tensorflow as tf\n'), ((748, 28, 748, 93), 'tensorflow.reshape', 'tf.reshape', ({(748, 39, 748, 45): 'tensor', (748, 47, 748, 92): '(bsz, seq_len, self.num_heads, self.head_dim)'}, {}), '(tensor, (bsz, seq_len, self.num_heads, self.head_dim))', True, 'import tensorflow as tf\n'), ((818, 15, 818, 37), 'tensorflow.executing_eagerly', 'tf.executing_eagerly', ({}, {}), '()', True, 'import tensorflow as tf\n'), ((825, 29, 825, 78), 'tensorflow.cast', 'tf.cast', (), '', True, 'import tensorflow as tf\n'), ((827, 27, 827, 93), 'tensorflow.reshape', 'tf.reshape', ({(827, 38, 827, 50): 'attn_weights', (827, 52, 827, 92): '(bsz * self.num_heads, tgt_len, src_len)'}, {}), '(attn_weights, (bsz * self.num_heads, tgt_len, src_len))', True, 'import tensorflow as tf\n'), ((834, 15, 834, 37), 'tensorflow.executing_eagerly', 'tf.executing_eagerly', ({}, {}), '()', True, 'import tensorflow as tf\n'), ((844, 27, 844, 93), 'tensorflow.reshape', 'tf.reshape', ({(844, 38, 844, 50): 'attn_weights', (844, 52, 844, 92): '(bsz * self.num_heads, tgt_len, src_len)'}, {}), '(attn_weights, (bsz * self.num_heads, tgt_len, src_len))', True, 'import tensorflow as tf\n'), ((859, 12, 859, 82), 'tensorflow.reshape', 'tf.reshape', ({(859, 23, 859, 34): 'attn_output', (859, 36, 859, 81): '(bsz, self.num_heads, tgt_len, self.head_dim)'}, {}), '(attn_output, (bsz, self.num_heads, tgt_len, self.head_dim))', True, 'import tensorflow as tf\n'), ((1026, 34, 1026, 57), 'numpy.random.uniform', 'np.random.uniform', ({(1026, 52, 1026, 53): '0', (1026, 55, 1026, 56): '1'}, {}), '(0, 1)', True, 'import numpy as np\n'), ((1093, 34, 1093, 57), 'numpy.random.uniform', 'np.random.uniform', ({(1093, 52, 1093, 53): '0', (1093, 55, 1093, 56): '1'}, {}), '(0, 1)', True, 'import numpy as np\n'), ((1199, 28, 1199, 94), 'tensorflow.where', 'tf.where', ({(1199, 37, 1199, 75): 'mask_feature_indices[:, (tf.newaxis), :]', (1199, 77, 1199, 90): 'hidden_states', (1199, 92, 1199, 93): '0'}, {}), '(mask_feature_indices[:, (tf.newaxis), :], hidden_states, 0)', True, 'import tensorflow as tf\n'), ((1234, 12, 1234, 55), 'tensorflow.cast', 'tf.cast', ({(1234, 20, 1234, 42): "inputs['input_values']", (1234, 44, 1234, 54): 'tf.float32'}, {}), "(inputs['input_values'], tf.float32)", True, 'import tensorflow as tf\n'), ((1284, 44, 1284, 68), 'numpy.random.rand', 'np.random.rand', ({(1284, 59, 1284, 60): '1', (1284, 62, 1284, 67): '16000'}, {}), '(1, 16000)', True, 'import numpy as np\n'), ((1490, 13, 1490, 55), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', ({(1490, 34, 1490, 54): 'output.hidden_states'}, {}), '(output.hidden_states)', True, 'import tensorflow as tf\n'), ((1491, 16, 1491, 55), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', ({(1491, 37, 1491, 54): 'output.attentions'}, {}), '(output.attentions)', True, 'import tensorflow as tf\n'), ((1633, 26, 1633, 56), 'tensorflow.cast', 'tf.cast', ({(1633, 34, 1633, 45): 'labels >= 0', (1633, 47, 1633, 55): 'tf.int32'}, {}), '(labels >= 0, tf.int32)', True, 'import tensorflow as tf\n'), ((1634, 29, 1634, 64), 'tensorflow.reduce_sum', 'tf.reduce_sum', (), '', True, 'import tensorflow as tf\n'), ((1636, 19, 1643, 13), 'tensorflow.nn.ctc_loss', 'tf.nn.ctc_loss', (), '', True, 'import tensorflow as tf\n'), ((1664, 13, 1664, 55), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', ({(1664, 34, 1664, 54): 'output.hidden_states'}, {}), '(output.hidden_states)', True, 'import tensorflow as tf\n'), ((1665, 16, 1665, 55), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', ({(1665, 37, 1665, 54): 'output.attentions'}, {}), '(output.attentions)', True, 'import tensorflow as tf\n'), ((102, 12, 105, 13), 'warnings.warn', 'warnings.warn', ({(103, 16, 103, 122): '"""The `inputs` argument is deprecated and will be removed in a future version, use `input_values` instead."""', (104, 16, 104, 29): 'FutureWarning'}, {}), "(\n 'The `inputs` argument is deprecated and will be removed in a future version, use `input_values` instead.'\n , FutureWarning)", False, 'import warnings\n'), ((110, 12, 113, 13), 'warnings.warn', 'warnings.warn', ({(111, 16, 111, 140): '"""The `decoder_cached_states` argument is deprecated and will be removed in a future version, use `past_key_values` instead."""', (112, 16, 112, 29): 'FutureWarning'}, {}), "(\n 'The `decoder_cached_states` argument is deprecated and will be removed in a future version, use `past_key_values` instead.'\n , FutureWarning)", False, 'import warnings\n'), ((183, 39, 183, 65), 'tensorflow.range', 'tf.range', ({(183, 48, 183, 64): 'indices_shape[0]'}, {}), '(indices_shape[0])', True, 'import tensorflow as tf\n'), ((186, 68, 186, 102), 'tensorflow.reshape', 'tf.reshape', ({(186, 79, 186, 92): 'batch_indices', (186, 94, 186, 101): '[1, -1]'}, {}), '(batch_indices, [1, -1])', True, 'import tensorflow as tf\n'), ((514, 44, 514, 68), 'tensorflow.square', 'tf.square', ({(514, 54, 514, 67): 'self.weight_v'}, {}), '(self.weight_v)', True, 'import tensorflow as tf\n'), ((529, 38, 529, 63), 'tensorflow.transpose', 'tf.transpose', ({(529, 51, 529, 62): 'self.kernel'}, {}), '(self.kernel)', True, 'import tensorflow as tf\n'), ((826, 27, 826, 92), 'tensorflow.reshape', 'tf.reshape', ({(826, 38, 826, 50): 'attn_weights', (826, 52, 826, 91): '(bsz, self.num_heads, tgt_len, src_len)'}, {}), '(attn_weights, (bsz, self.num_heads, tgt_len, src_len))', True, 'import tensorflow as tf\n'), ((841, 27, 841, 69), 'tensorflow.reshape', 'tf.reshape', ({(841, 38, 841, 53): 'layer_head_mask', (841, 55, 841, 68): '(1, -1, 1, 1)'}, {}), '(layer_head_mask, (1, -1, 1, 1))', True, 'import tensorflow as tf\n'), ((841, 72, 843, 13), 'tensorflow.reshape', 'tf.reshape', ({(842, 16, 842, 28): 'attn_weights', (842, 30, 842, 69): '(bsz, self.num_heads, tgt_len, src_len)'}, {}), '(attn_weights, (bsz, self.num_heads, tgt_len, src_len))', True, 'import tensorflow as tf\n'), ((1011, 44, 1011, 78), 'tensorflow.expand_dims', 'tf.expand_dims', ({(1011, 59, 1011, 73): 'attention_mask', (1011, 75, 1011, 77): '(-1)'}, {}), '(attention_mask, -1)', True, 'import tensorflow as tf\n'), ((1079, 44, 1079, 78), 'tensorflow.expand_dims', 'tf.expand_dims', ({(1079, 59, 1079, 73): 'attention_mask', (1079, 75, 1079, 77): '(-1)'}, {}), '(attention_mask, -1)', True, 'import tensorflow as tf\n'), ((1173, 16, 1173, 69), 'tensorflow.cast', 'tf.cast', ({(1173, 24, 1173, 59): 'mask_time_indices[:, :, (tf.newaxis)]', (1173, 61, 1173, 68): 'tf.bool'}, {}), '(mask_time_indices[:, :, (tf.newaxis)], tf.bool)', True, 'import tensorflow as tf\n'), ((1239, 67, 1239, 110), 'tensorflow.reduce_sum', 'tf.reduce_sum', ({(1239, 81, 1239, 105): "inputs['attention_mask']", (1239, 107, 1239, 109): '-1'}, {}), "(inputs['attention_mask'], -1)", True, 'import tensorflow as tf\n'), ((1287, 38, 1287, 75), 'tensorflow.not_equal', 'tf.not_equal', ({(1287, 51, 1287, 63): 'input_values', (1287, 65, 1287, 74): 'pad_token'}, {}), '(input_values, pad_token)', True, 'import tensorflow as tf\n'), ((1621, 15, 1621, 36), 'tensorflow.reduce_max', 'tf.reduce_max', ({(1621, 29, 1621, 35): 'labels'}, {}), '(labels)', True, 'import tensorflow as tf\n'), ((1627, 21, 1627, 75), 'tensorflow.ones_like', 'tf.ones_like', (), '', True, 'import tensorflow as tf\n'), ((1629, 73, 1629, 111), 'tensorflow.reduce_sum', 'tf.reduce_sum', (), '', True, 'import tensorflow as tf\n'), ((1646, 23, 1646, 42), 'tensorflow.reduce_sum', 'tf.reduce_sum', ({(1646, 37, 1646, 41): 'loss'}, {}), '(loss)', True, 'import tensorflow as tf\n'), ((1648, 23, 1648, 43), 'tensorflow.reduce_mean', 'tf.reduce_mean', ({(1648, 38, 1648, 42): 'loss'}, {}), '(loss)', True, 'import tensorflow as tf\n'), ((781, 25, 781, 75), 'tensorflow.concat', 'tf.concat', (), '', True, 'import tensorflow as tf\n'), ((782, 27, 782, 79), 'tensorflow.concat', 'tf.concat', (), '', True, 'import tensorflow as tf\n'), ((1187, 16, 1187, 69), 'tensorflow.cast', 'tf.cast', ({(1187, 24, 1187, 59): 'mask_time_indices[:, :, (tf.newaxis)]', (1187, 61, 1187, 68): 'tf.bool'}, {}), '(mask_time_indices[:, :, (tf.newaxis)], tf.bool)', True, 'import tensorflow as tf\n')] |
marioluan/mit-opencourseware-cs | 600/unit-1/recursion/problem-set/mit-solutions/ps2_hangman_sol1.py | 5de013f8e321fed2ff3b7a13e8929a44805db78b | # 6.00 Problem Set 2
#
# Hangman
# Name : Solutions
# Collaborators : <your collaborators>
# Time spent : <total time>
# -----------------------------------
# Helper code
# You don't need to understand this helper code,
# but you will have to know how to use the functions
import random
import string
WORDLIST_FILENAME = "words.txt"
def load_words():
"""
Returns a list of valid words. Words are strings of lowercase letters.
Depending on the size of the word list, this function may
take a while to finish.
"""
print "Loading word list from file..."
# inFile: file
inFile = open(WORDLIST_FILENAME, 'r', 0)
# line: string
line = inFile.readline()
# wordlist: list of strings
wordlist = string.split(line)
print " ", len(wordlist), "words loaded."
return wordlist
def choose_word(wordlist):
"""
wordlist (list): list of words (strings)
Returns a word from wordlist at random
"""
return random.choice(wordlist)
# end of helper code
# -----------------------------------
# load the list of words into the wordlist variable
# so that it can be accessed from anywhere in the program
wordlist = load_words()
def partial_word(secret_word, guessed_letters):
"""
Return the secret_word in user-visible format, with underscores used
to replace characters that have not yet been guessed.
"""
result = ''
for letter in secret_word:
if letter in guessed_letters:
result = result + letter
else:
result = result + '_'
return result
def hangman():
"""
Runs the hangman game.
"""
print 'Welcome to the game, Hangman!'
secret_word = choose_word(wordlist)
print 'I am thinking of a word that is ' + str(len(secret_word)) + ' letters long.'
num_guesses = 8
word_guessed = False
guessed_letters = ''
available_letters = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i',
'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r',
's', 't', 'u', 'v', 'w', 'x', 'y', 'z']
# Letter-guessing loop. Ask the user to guess a letter and respond to the
# user based on whether the word has yet been correctly guessed.
while num_guesses > 0 and not word_guessed:
print '-------------'
print 'You have ' + str(num_guesses) + ' guesses left.'
print 'Available letters: ' + ''.join(available_letters)
guess = raw_input('Please guess a letter:')
if guess not in available_letters:
print 'Oops! You\'ve already guessed that letter: ' + partial_word(secret_word, guessed_letters)
elif guess not in secret_word:
num_guesses -= 1
available_letters.remove(guess)
print 'Oops! That letter is not in my word: ' + partial_word(secret_word, guessed_letters)
else:
available_letters.remove(guess)
guessed_letters += guess
print 'Good guess: ' + partial_word(secret_word, guessed_letters)
if secret_word == partial_word(secret_word, guessed_letters):
word_guessed = True
if word_guessed:
print 'Congratulations, you won!'
else:
print 'Game over.'
| [] |
forestsheep/middleman | top/api/rest/FenxiaoRefundMessageAddRequest.py | 34d54f9ffd9d7bcd775a8dcce4f00dd6c5bb1acd | '''
Created by auto_sdk on 2016.04.13
'''
from top.api.base import RestApi
class FenxiaoRefundMessageAddRequest(RestApi):
def __init__(self,domain='gw.api.taobao.com',port=80):
RestApi.__init__(self,domain, port)
self.image = None
self.message_content = None
self.sub_order_id = None
def getapiname(self):
return 'taobao.fenxiao.refund.message.add'
def getMultipartParas(self):
return ['image']
| [((7, 2, 7, 37), 'top.api.base.RestApi.__init__', 'RestApi.__init__', ({(7, 19, 7, 23): 'self', (7, 24, 7, 30): 'domain', (7, 32, 7, 36): 'port'}, {}), '(self, domain, port)', False, 'from top.api.base import RestApi\n')] |
AaratiAkkapeddi/nnabla-examples | image-generation/slegan/args.py | db9e5ad850303c158773aeb275e5c3821b4a3935 | # Copyright 2021 Sony Corporation.
# Copyright 2021 Sony Group Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
def get_args(batch_size=8, image_size=256, max_iter=100000):
"""
Get command line arguments.
Arguments set the default values of command line arguments.
"""
import argparse
import os
description = "Example of Lightweight GAN."
parser = argparse.ArgumentParser(description)
parser.add_argument("-d", "--device-id", type=str, default="0",
help="Device id.")
parser.add_argument("-c", "--context", type=str, default="cudnn",
help="Context.")
parser.add_argument("--type-config", "-t", type=str, default='float',
help='Type of computation. e.g. "float", "half".')
parser.add_argument("--img-path", type=str,
default="~/AnimalFace-dog",
help="Image path.")
parser.add_argument("--image-size", type=int, default=image_size,
help="Image size.")
parser.add_argument("--batch-size", "-b", type=int, default=batch_size,
help="Batch size.")
parser.add_argument("--max-iter", "-i", type=int, default=max_iter,
help="Max iterations.")
parser.add_argument("--save-interval", type=int, default=50000,
help="Interval for saving models.")
parser.add_argument("--test-interval", type=int, default=5000,
help="Interval for testing models.")
parser.add_argument("--latent", type=int, default=256,
help="Number of latent variables.")
parser.add_argument("--monitor-path", type=str, default="./result/tmp",
help="Monitor path.")
parser.add_argument("--model-load-path", type=str, default=".",
help="Path to load parameters from")
parser.add_argument("--train-samples", type=int, default=-1,
help="Number of data to be used. When -1 is set all data is used.")
parser.add_argument("--lr", type=float, default=2e-4,
help="Learning rate")
parser.add_argument("--aug-list", nargs="+",
default=["lrflip", "translation", "color"])
args = parser.parse_args()
return args
def save_args(args, mode="train"):
from nnabla import logger
import os
if not os.path.exists(args.monitor_path):
os.makedirs(args.monitor_path)
path = "{}/Arguments-{}.txt".format(args.monitor_path, mode)
logger.info("Arguments are saved to {}.".format(path))
with open(path, "w") as fp:
for k, v in sorted(vars(args).items()):
logger.info("{}={}".format(k, v))
fp.write("{}={}\n".format(k, v))
| [((27, 13, 27, 49), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ({(27, 37, 27, 48): 'description'}, {}), '(description)', False, 'import argparse\n'), ((69, 11, 69, 44), 'os.path.exists', 'os.path.exists', ({(69, 26, 69, 43): 'args.monitor_path'}, {}), '(args.monitor_path)', False, 'import os\n'), ((70, 8, 70, 38), 'os.makedirs', 'os.makedirs', ({(70, 20, 70, 37): 'args.monitor_path'}, {}), '(args.monitor_path)', False, 'import os\n')] |
grenn72/pynet-ons-feb19 | day1/files_ex1.py | 5aff7dfa6a697214dc24818819a60b46a261d0d3 | #!/usr/bin/env python
from __future__ import print_function
# READ ####
f = open("my_file.txt")
print("\nLoop directly over file")
print("-" * 60)
for line in f:
print(line.strip())
print("-" * 60)
f.seek(0)
my_content = f.readlines()
print("\nUse readlines method")
print("-" * 60)
for line in my_content:
print(line.strip())
print("-" * 60)
f.seek(0)
my_content = f.read()
print("\nUse read + splitlines")
print("-" * 60)
for line in my_content.splitlines():
print(line)
print("-" * 60)
f.close()
with open("my_file.txt") as f:
print("\nUse with and loop over file")
print("-" * 60)
for line in f:
print(line.strip())
print("-" * 60)
# WRITE ####
print("\nWriting file.")
f = open("new_file.txt", "w")
f.write("whatever2\n")
f.close()
# APPEND ####
print("\nAppending file.")
with open("new_file.txt", "a") as f:
f.write("something else\n")
print()
| [] |
self-host/selfhost-python-client | test/integration_tests/test_integration_datasets_client.py | 95797ef819099174d916b10e82878c370b1cd972 | import uuid
from typing import List, Dict, Any
import unittest
from selfhost_client import SelfHostClient, DatasetType
class TestIntegrationDatasetsClient(unittest.TestCase):
"""
Run these tests individually because Self-Host will return HTTP 429 Too Many Requests otherwise.
"""
@classmethod
def setUpClass(cls) -> None:
cls.client: SelfHostClient = SelfHostClient(
base_url='http://127.0.0.1:8080',
username='test',
password='root'
)
cls.unique_name: str = str(uuid.uuid4())
cls.created_dataset: DatasetType = cls.client.create_dataset(
name=cls.unique_name,
dataset_format='ini',
content='aGVsbG8sIHdvcmxkIQ==',
tags=['test_tag']
)
@classmethod
def tearDownClass(cls) -> None:
cls.client.delete_dataset(cls.created_dataset['uuid'])
def test_get_datasets(self) -> None:
params: Dict[str, int] = {
'limit': 20,
'offset': 0
}
datasets: List[DatasetType] = self.client.get_datasets(**params)
self.assertIsNotNone(datasets)
def test_create_and_delete_dataset(self) -> None:
# Create and delete happens in setup and teardown methods.
self.assertEqual(self.created_dataset['name'], self.unique_name)
def test_get_dataset(self) -> None:
fetched_dataset: DatasetType = self.client.get_dataset(self.created_dataset['uuid'])
self.assertEqual(fetched_dataset['name'], self.created_dataset['name'])
def test_update_dataset(self) -> None:
self.client.update_dataset(
dataset_uuid=self.created_dataset['uuid'],
name=f'{self.created_dataset["name"]} Updated',
dataset_format='json',
tags=['updated']
)
fetched_dataset: DatasetType = self.client.get_dataset(self.created_dataset['uuid'])
self.assertEqual(fetched_dataset['name'], f'{self.created_dataset["name"]} Updated')
self.assertEqual(fetched_dataset['format'], 'json')
self.assertEqual(fetched_dataset['tags'], ['updated'])
def test_get_dataset_raw_content(self) -> None:
fetched_content: Any = self.client.get_dataset_raw_content(self.created_dataset['uuid'])
self.assertIsNotNone(fetched_content)
| [((16, 37, 20, 9), 'selfhost_client.SelfHostClient', 'SelfHostClient', (), '', False, 'from selfhost_client import SelfHostClient, DatasetType\n'), ((21, 35, 21, 47), 'uuid.uuid4', 'uuid.uuid4', ({}, {}), '()', False, 'import uuid\n')] |
pnxenopoulos/soccer-data-gen | setup.py | bdc31be973eb12cdd9f58b04ab61ea9d5d1aa7a5 | from setuptools import setup, find_packages
setup(
name="soccergen",
version="0.1",
packages=find_packages(),
# Project uses reStructuredText, so ensure that the docutils get
# installed or upgraded on the target machine
install_requires=["gfootball>=2.8",],
# metadata to display on PyPI
author="Peter Xenopoulos",
author_email="[email protected]",
description="Soccer trajectory and event data generation",
keywords="soccer data-generation foootball",
url="https://github.com/pnxenopoulos/soccer-data-gen", # project home page, if any
project_urls={
"Issues": "https://github.com/pnxenopoulos/soccer-data-gen/issues",
"Documentation": "https://github.com/pnxenopoulos/soccer-data-gen/csgo/",
"Github": "https://github.com/pnxenopoulos/soccer-data-gen/csgo/",
},
classifiers=["License :: OSI Approved :: MIT License"],
)
| [((6, 13, 6, 28), 'setuptools.find_packages', 'find_packages', ({}, {}), '()', False, 'from setuptools import setup, find_packages\n')] |
METASPACE2020/METASPACE | metaspace/engine/sm/engine/tests/test_fdr.py | e1acd9a409f84a78eed7ca9713258c09b0e137ca | from itertools import product
from unittest.mock import patch
import pytest
import numpy as np
import pandas as pd
from pandas.util.testing import assert_frame_equal
from sm.engine.annotation.fdr import FDR, run_fdr_ranking
from sm.engine.formula_parser import format_modifiers
FDR_CONFIG = {'decoy_sample_size': 2}
@patch('sm.engine.annotation.fdr.DECOY_ADDUCTS', ['+He', '+Li'])
def test_fdr_decoy_adduct_selection_saves_corr():
fdr = FDR(
fdr_config=FDR_CONFIG,
chem_mods=[],
neutral_losses=[],
target_adducts=['+H', '+K', '[M]+'],
analysis_version=1,
)
exp_target_decoy_df = pd.DataFrame(
[
('H2O', '+H', '+He'),
('H2O', '+H', '+Li'),
('H2O', '+K', '+He'),
('H2O', '+K', '+Li'),
('H2O', '', '+He'),
('H2O', '', '+Li'),
],
columns=['formula', 'tm', 'dm'],
)
fdr.decoy_adducts_selection(target_formulas=['H2O'])
assert_frame_equal(
fdr.td_df.sort_values(by=['formula', 'tm', 'dm']).reset_index(drop=True),
exp_target_decoy_df.sort_values(by=['formula', 'tm', 'dm']).reset_index(drop=True),
)
@pytest.mark.parametrize('analysis_version,expected_fdrs', [(1, [0.2, 0.8]), (3, [1 / 4, 2 / 3])])
def test_estimate_fdr_returns_correct_df(analysis_version, expected_fdrs):
fdr = FDR(
fdr_config=FDR_CONFIG,
chem_mods=[],
neutral_losses=[],
target_adducts=['+H'],
analysis_version=analysis_version,
)
fdr.fdr_levels = [0.2, 0.8]
fdr.td_df = pd.DataFrame(
[['H2O', '+H', '+Cu'], ['H2O', '+H', '+Co'], ['C2H2', '+H', '+Ag'], ['C2H2', '+H', '+Ar']],
columns=['formula', 'tm', 'dm'],
)
msm_df = pd.DataFrame(
[
['H2O', '+H', 0.85],
['C2H2', '+H', 0.5],
['H2O', '+Cu', 0.5],
['H2O', '+Co', 0.5],
['C2H2', '+Ag', 0.75],
['C2H2', '+Ar', 0.0],
],
columns=['formula', 'modifier', 'msm'],
)
exp_sf_df = pd.DataFrame(
[
['H2O', '+H', 0.85],
['C2H2', '+H', 0.5],
],
columns=['formula', 'modifier', 'msm'],
).assign(fdr=expected_fdrs)
assert_frame_equal(fdr.estimate_fdr(msm_df, None), exp_sf_df)
def test_estimate_fdr_digitize_works():
fdr_config = {**FDR_CONFIG, 'decoy_sample_size': 1}
fdr = FDR(
fdr_config=fdr_config,
chem_mods=[],
neutral_losses=[],
target_adducts=['+H'],
analysis_version=1,
)
fdr.fdr_levels = [0.4, 0.8]
fdr.td_df = pd.DataFrame(
[['C1', '+H', '+Cu'], ['C2', '+H', '+Ag'], ['C3', '+H', '+Cl'], ['C4', '+H', '+Co']],
columns=['formula', 'tm', 'dm'],
)
msm_df = pd.DataFrame(
[
['C1', '+H', 1.0],
['C2', '+H', 0.75],
['C3', '+H', 0.5],
['C4', '+H', 0.25],
['C1', '+Cu', 0.75],
['C2', '+Ag', 0.3],
['C3', '+Cl', 0.25],
['C4', '+Co', 0.1],
],
columns=['formula', 'modifier', 'msm'],
)
exp_sf_df = pd.DataFrame(
[
['C1', '+H', 1.0, 0.4],
['C2', '+H', 0.75, 0.4],
['C3', '+H', 0.5, 0.4],
['C4', '+H', 0.25, 0.8],
],
columns=['formula', 'modifier', 'msm', 'fdr'],
)
assert_frame_equal(fdr.estimate_fdr(msm_df, None), exp_sf_df)
def test_ions():
formulas = ['H2O', 'C5H2OH']
target_adducts = ['+H', '+Na']
decoy_sample_size = 5
fdr_config = {**FDR_CONFIG, 'decoy_sample_size': decoy_sample_size}
fdr = FDR(
fdr_config=fdr_config,
chem_mods=[],
neutral_losses=[],
target_adducts=target_adducts,
analysis_version=1,
)
fdr.decoy_adducts_selection(target_formulas=['H2O', 'C5H2OH'])
ions = fdr.ion_tuples()
assert type(ions) == list
# total number varies because different (formula, modifier) pairs may receive the same (formula, decoy_modifier) pair
assert (
len(formulas) * decoy_sample_size + len(formulas) * len(target_adducts)
< len(ions)
<= len(formulas) * len(target_adducts) * decoy_sample_size
+ len(formulas) * len(target_adducts)
)
target_ions = [(formula, adduct) for formula, adduct in product(formulas, target_adducts)]
assert set(target_ions).issubset(set(map(tuple, ions)))
def test_chem_mods_and_neutral_losses():
formulas = ['H2O', 'C5H2OH']
chem_mods = ['-H+C']
neutral_losses = ['-O', '-C']
target_adducts = ['+H', '+Na', '[M]+']
target_modifiers = [
format_modifiers(cm, nl, ta)
for cm, nl, ta in product(['', *chem_mods], ['', *neutral_losses], target_adducts)
]
decoy_sample_size = 5
fdr_config = {**FDR_CONFIG, 'decoy_sample_size': decoy_sample_size}
fdr = FDR(
fdr_config=fdr_config,
chem_mods=chem_mods,
neutral_losses=neutral_losses,
target_adducts=target_adducts,
analysis_version=1,
)
fdr.decoy_adducts_selection(target_formulas=['H2O', 'C5H2OH'])
ions = fdr.ion_tuples()
assert type(ions) == list
# total number varies because different (formula, modifier) pairs may receive the same (formula, decoy_modifier) pair
min_count = len(formulas) * len(target_modifiers)
max_count = len(formulas) * len(target_modifiers) * (1 + decoy_sample_size)
assert min_count < len(ions) <= max_count
target_ions = list(product(formulas, target_modifiers))
assert set(target_ions).issubset(set(map(tuple, ions)))
def test_run_fdr_ranking():
target_scores = pd.Series([1.0, 0.9, 0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1, 0.0])
decoy_scores = pd.Series([0.8, 0.55, 0.2, 0.1])
n_targets = pd.Series([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11])
n_decoys = pd.Series([0, 0, 1, 1, 1, 2, 2, 2, 3, 4, 4])
expected_fdr = n_decoys / n_targets
expected_fdr_ros = (n_decoys + 1) / (n_targets + 1)
expected_fdr_mono = pd.Series(
[0 / 2, 0 / 2, 1 / 5, 1 / 5, 1 / 5, 2 / 8, 2 / 8, 2 / 8, 3 / 9, 4 / 11, 4 / 11]
)
fdr = run_fdr_ranking(target_scores, decoy_scores, 1, False, False)
fdr_ros = run_fdr_ranking(target_scores, decoy_scores, 1, True, False)
fdr_mono = run_fdr_ranking(target_scores, decoy_scores, 1, False, True)
assert np.isclose(fdr, expected_fdr).all()
assert np.isclose(fdr_ros, expected_fdr_ros).all()
assert np.isclose(fdr_mono, expected_fdr_mono).all()
| [((15, 1, 15, 64), 'unittest.mock.patch', 'patch', ({(15, 7, 15, 47): '"""sm.engine.annotation.fdr.DECOY_ADDUCTS"""', (15, 49, 15, 63): "['+He', '+Li']"}, {}), "('sm.engine.annotation.fdr.DECOY_ADDUCTS', ['+He', '+Li'])", False, 'from unittest.mock import patch\n'), ((45, 1, 45, 98), 'pytest.mark.parametrize', 'pytest.mark.parametrize', ({(45, 25, 45, 57): '"""analysis_version,expected_fdrs"""', (45, 59, 45, 97): '[(1, [0.2, 0.8]), (3, [1 / 4, 2 / 3])]'}, {}), "('analysis_version,expected_fdrs', [(1, [0.2, 0.8]),\n (3, [1 / 4, 2 / 3])])", False, 'import pytest\n'), ((17, 10, 23, 5), 'sm.engine.annotation.fdr.FDR', 'FDR', (), '', False, 'from sm.engine.annotation.fdr import FDR, run_fdr_ranking\n'), ((25, 26, 35, 5), 'pandas.DataFrame', 'pd.DataFrame', (), '', True, 'import pandas as pd\n'), ((47, 10, 53, 5), 'sm.engine.annotation.fdr.FDR', 'FDR', (), '', False, 'from sm.engine.annotation.fdr import FDR, run_fdr_ranking\n'), ((55, 16, 58, 5), 'pandas.DataFrame', 'pd.DataFrame', (), '', True, 'import pandas as pd\n'), ((60, 13, 70, 5), 'pandas.DataFrame', 'pd.DataFrame', (), '', True, 'import pandas as pd\n'), ((84, 10, 90, 5), 'sm.engine.annotation.fdr.FDR', 'FDR', (), '', False, 'from sm.engine.annotation.fdr import FDR, run_fdr_ranking\n'), ((92, 16, 95, 5), 'pandas.DataFrame', 'pd.DataFrame', (), '', True, 'import pandas as pd\n'), ((97, 13, 109, 5), 'pandas.DataFrame', 'pd.DataFrame', (), '', True, 'import pandas as pd\n'), ((110, 16, 118, 5), 'pandas.DataFrame', 'pd.DataFrame', (), '', True, 'import pandas as pd\n'), ((129, 10, 135, 5), 'sm.engine.annotation.fdr.FDR', 'FDR', (), '', False, 'from sm.engine.annotation.fdr import FDR, run_fdr_ranking\n'), ((163, 10, 169, 5), 'sm.engine.annotation.fdr.FDR', 'FDR', (), '', False, 'from sm.engine.annotation.fdr import FDR, run_fdr_ranking\n'), ((183, 20, 183, 86), 'pandas.Series', 'pd.Series', ({(183, 30, 183, 85): '[1.0, 0.9, 0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1, 0.0]'}, {}), '([1.0, 0.9, 0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1, 0.0])', True, 'import pandas as pd\n'), ((184, 19, 184, 51), 'pandas.Series', 'pd.Series', ({(184, 29, 184, 50): '[0.8, 0.55, 0.2, 0.1]'}, {}), '([0.8, 0.55, 0.2, 0.1])', True, 'import pandas as pd\n'), ((185, 16, 185, 62), 'pandas.Series', 'pd.Series', ({(185, 26, 185, 61): '[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]'}, {}), '([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11])', True, 'import pandas as pd\n'), ((186, 15, 186, 59), 'pandas.Series', 'pd.Series', ({(186, 25, 186, 58): '[0, 0, 1, 1, 1, 2, 2, 2, 3, 4, 4]'}, {}), '([0, 0, 1, 1, 1, 2, 2, 2, 3, 4, 4])', True, 'import pandas as pd\n'), ((189, 24, 191, 5), 'pandas.Series', 'pd.Series', ({(190, 8, 190, 87): '[0 / 2, 0 / 2, 1 / 5, 1 / 5, 1 / 5, 2 / 8, 2 / 8, 2 / 8, 3 / 9, 4 / 11, 4 / 11]'}, {}), '([0 / 2, 0 / 2, 1 / 5, 1 / 5, 1 / 5, 2 / 8, 2 / 8, 2 / 8, 3 / 9, 4 /\n 11, 4 / 11])', True, 'import pandas as pd\n'), ((193, 10, 193, 71), 'sm.engine.annotation.fdr.run_fdr_ranking', 'run_fdr_ranking', ({(193, 26, 193, 39): 'target_scores', (193, 41, 193, 53): 'decoy_scores', (193, 55, 193, 56): '1', (193, 58, 193, 63): 'False', (193, 65, 193, 70): 'False'}, {}), '(target_scores, decoy_scores, 1, False, False)', False, 'from sm.engine.annotation.fdr import FDR, run_fdr_ranking\n'), ((194, 14, 194, 74), 'sm.engine.annotation.fdr.run_fdr_ranking', 'run_fdr_ranking', ({(194, 30, 194, 43): 'target_scores', (194, 45, 194, 57): 'decoy_scores', (194, 59, 194, 60): '1', (194, 62, 194, 66): 'True', (194, 68, 194, 73): 'False'}, {}), '(target_scores, decoy_scores, 1, True, False)', False, 'from sm.engine.annotation.fdr import FDR, run_fdr_ranking\n'), ((195, 15, 195, 75), 'sm.engine.annotation.fdr.run_fdr_ranking', 'run_fdr_ranking', ({(195, 31, 195, 44): 'target_scores', (195, 46, 195, 58): 'decoy_scores', (195, 60, 195, 61): '1', (195, 63, 195, 68): 'False', (195, 70, 195, 74): 'True'}, {}), '(target_scores, decoy_scores, 1, False, True)', False, 'from sm.engine.annotation.fdr import FDR, run_fdr_ranking\n'), ((157, 8, 157, 36), 'sm.engine.formula_parser.format_modifiers', 'format_modifiers', ({(157, 25, 157, 27): 'cm', (157, 29, 157, 31): 'nl', (157, 33, 157, 35): 'ta'}, {}), '(cm, nl, ta)', False, 'from sm.engine.formula_parser import format_modifiers\n'), ((178, 23, 178, 58), 'itertools.product', 'product', ({(178, 31, 178, 39): 'formulas', (178, 41, 178, 57): 'target_modifiers'}, {}), '(formulas, target_modifiers)', False, 'from itertools import product\n'), ((71, 16, 77, 5), 'pandas.DataFrame', 'pd.DataFrame', (), '', True, 'import pandas as pd\n'), ((147, 60, 147, 93), 'itertools.product', 'product', ({(147, 68, 147, 76): 'formulas', (147, 78, 147, 92): 'target_adducts'}, {}), '(formulas, target_adducts)', False, 'from itertools import product\n'), ((158, 26, 158, 90), 'itertools.product', 'product', ({(158, 34, 158, 50): "['', *chem_mods]", (158, 52, 158, 73): "['', *neutral_losses]", (158, 75, 158, 89): 'target_adducts'}, {}), "(['', *chem_mods], ['', *neutral_losses], target_adducts)", False, 'from itertools import product\n'), ((197, 11, 197, 40), 'numpy.isclose', 'np.isclose', ({(197, 22, 197, 25): 'fdr', (197, 27, 197, 39): 'expected_fdr'}, {}), '(fdr, expected_fdr)', True, 'import numpy as np\n'), ((198, 11, 198, 48), 'numpy.isclose', 'np.isclose', ({(198, 22, 198, 29): 'fdr_ros', (198, 31, 198, 47): 'expected_fdr_ros'}, {}), '(fdr_ros, expected_fdr_ros)', True, 'import numpy as np\n'), ((199, 11, 199, 50), 'numpy.isclose', 'np.isclose', ({(199, 22, 199, 30): 'fdr_mono', (199, 32, 199, 49): 'expected_fdr_mono'}, {}), '(fdr_mono, expected_fdr_mono)', True, 'import numpy as np\n')] |
acarl005/plotille | tests/__init__.py | 44089a88f20b71b3314416947ae724bebbdc7739 | from logging import getLogger
getLogger('flake8').propagate = False
| [((3, 0, 3, 19), 'logging.getLogger', 'getLogger', ({(3, 10, 3, 18): '"""flake8"""'}, {}), "('flake8')", False, 'from logging import getLogger\n')] |
penguinwang96825/Umigame | umigame/nlp/labelling.py | 98d647ab6f40df08fe31d6b3bc444afe229a914e | import math
import numpy as np
import pandas as pd
def fixed_time_horizon(df, column='close', lookback=20):
"""
Fixed-time Horizon
As it relates to finance, virtually all ML papers label observations using the fixed-time horizon method.
Fixed-time horizon is presented as one of the main procedures to label data when it comes to processing
financial time series for machine learning.
Parameters
----------
df: pd.DataFrame
column: str
Choose from "open", "high", "low", and "close."
lookahead: str
The number of days to look ahead.
References
----------
1. https://mlfinlab.readthedocs.io/en/latest/labeling/labeling_fixed_time_horizon.html
2. https://arxiv.org/pdf/1603.08604.pdf
3. https://quantdare.com/4-simple-ways-to-label-financial-data-for-machine-learning/
4. De Prado, Advances in financial machine learning, 2018
5. Dixon et al., Classification-based financial markets prediction using deep neural networks, 2017
"""
price = df[column]
label = (price.shift(-lookback) / price > 1).astype(int)
return label
def triple_barrier(df, column='close', ub=0.07, lb=0.03, lookback=20, binary_classification=True):
"""
Triple Barrier
The idea is to consider the full dynamics of a trading strategy and not a simple performance proxy.
The rationale for this extension is that often money managers implement P&L triggers that cash in
when gains are sufficient or opt out to stop their losses. Upon inception of the strategy,
three barriers are fixed (De Prado, 2018).
Parameters
----------
df: pd.DataFrame
column: str
Choose from "open", "high", "low", and "close."
ub: float
It stands for upper bound, e.g. 0.07 is a 7% profit taking.
lb: float
It stands for lower bound, e.g. 0.03 is a 3% stop loss.
lookback: str
Maximum holding time.
References
----------
1. https://www.finlab.tw/generate-labels-stop-loss-stop-profit/
2. http://www.mlfactor.com/Data.html#the-triple-barrier-method
3. https://chrisconlan.com/calculating-triple-barrier-labels-from-advances-in-financial-machine-learning/
4. https://towardsdatascience.com/financial-machine-learning-part-1-labels-7eeed050f32e
5. De Prado, Advances in financial machine learning, 2018
"""
ub = 1 + ub
lb = 1- lb
def end_price(s):
return np.append(s[(s / s[0] > ub) | (s / s[0] < lb)], s[-1])[0]/s[0]
r = np.array(range(lookback))
def end_time(s):
return np.append(r[(s / s[0] > ub) | (s / s[0] < lb)], lookback-1)[0]
price = df[column]
p = price.rolling(lookback).apply(end_price, raw=True).shift(-lookback+1)
t = price.rolling(lookback).apply(end_time, raw=True).shift(-lookback+1)
t = pd.Series(
[t.index[int(k+i)] if not math.isnan(k+i) else np.datetime64('NaT')
for i, k in enumerate(t)], index=t.index
).dropna()
label = pd.Series(0, p.index)
label.loc[p > ub] = 1
label.loc[p < lb] = -1
if binary_classification:
label = np.where(label == 1, 1, 0)
return pd.Series(label, index=price.index)
def get_continuous_trading_signals(df, column='close', lookahead=5):
"""
Continuous Trading Signal
A hybrid stock trading framework integrating technical analysis with machine learning techniques.
Parameters
----------
df: pd.DataFrame
column: str
Choose from "open", "high", "low", and "close."
lookahead: str
The number of days to look ahead.
References
----------
1. https://translateyar.ir/wp-content/uploads/2020/05/1-s2.0-S2405918815300179-main-1.pdf
2. Dash and Dash, A hybrid stock trading framework integrating technical analysis with machine learning techniques, 2016
"""
price = df.data[column]
OTr = []
trends = []
for idx in range(len(price)-lookahead+1):
arr_window = price[idx:(idx+lookahead)]
if price[idx+lookahead-1] > price[idx]:
coef = (price[idx+lookahead-1]-min(arr_window)) / (max(arr_window)-min(arr_window))
y_t = coef * 0.5 + 0.5
elif price[idx+lookahead-1] <= price[idx]:
coef = (price[idx+lookahead-1]-min(arr_window)) / (max(arr_window)-min(arr_window))
y_t = coef * 0.5
OTr.append(y_t)
OTr = np.append(OTr, np.zeros(shape=(len(price)-len(OTr))))
trends = (OTr >= np.mean(OTr)).astype(int)
return pd.Series(OTr, index=price.index), pd.Series(trends, index=price.index) | [((81, 12, 81, 33), 'pandas.Series', 'pd.Series', ({(81, 22, 81, 23): '0', (81, 25, 81, 32): 'p.index'}, {}), '(0, p.index)', True, 'import pandas as pd\n'), ((87, 11, 87, 46), 'pandas.Series', 'pd.Series', (), '', True, 'import pandas as pd\n'), ((85, 16, 85, 42), 'numpy.where', 'np.where', ({(85, 25, 85, 35): 'label == 1', (85, 37, 85, 38): '1', (85, 40, 85, 41): '0'}, {}), '(label == 1, 1, 0)', True, 'import numpy as np\n'), ((122, 11, 122, 44), 'pandas.Series', 'pd.Series', (), '', True, 'import pandas as pd\n'), ((122, 46, 122, 82), 'pandas.Series', 'pd.Series', (), '', True, 'import pandas as pd\n'), ((71, 15, 71, 74), 'numpy.append', 'np.append', ({(71, 25, 71, 61): 'r[(s / s[0] > ub) | (s / s[0] < lb)]', (71, 63, 71, 73): '(lookback - 1)'}, {}), '(r[(s / s[0] > ub) | (s / s[0] < lb)], lookback - 1)', True, 'import numpy as np\n'), ((66, 15, 66, 69), 'numpy.append', 'np.append', ({(66, 25, 66, 61): 's[(s / s[0] > ub) | (s / s[0] < lb)]', (66, 63, 66, 68): 's[-1]'}, {}), '(s[(s / s[0] > ub) | (s / s[0] < lb)], s[-1])', True, 'import numpy as np\n'), ((121, 21, 121, 33), 'numpy.mean', 'np.mean', ({(121, 29, 121, 32): 'OTr'}, {}), '(OTr)', True, 'import numpy as np\n'), ((77, 55, 77, 75), 'numpy.datetime64', 'np.datetime64', ({(77, 69, 77, 74): '"""NaT"""'}, {}), "('NaT')", True, 'import numpy as np\n'), ((77, 34, 77, 49), 'math.isnan', 'math.isnan', ({(77, 45, 77, 48): 'k + i'}, {}), '(k + i)', False, 'import math\n')] |
Dave360-crypto/mayan-edms | mayan/apps/converter/api.py | 9cd37537461347f79ff0429e4b8b16fd2446798d | from __future__ import absolute_import
import hashlib
import logging
import os
from django.utils.encoding import smart_str
from common.conf.settings import TEMPORARY_DIRECTORY
from common.utils import fs_cleanup
from .exceptions import OfficeConversionError, UnknownFileFormat
from .literals import (DEFAULT_PAGE_NUMBER,
DEFAULT_ZOOM_LEVEL, DEFAULT_ROTATION, DEFAULT_FILE_FORMAT)
from .literals import (TRANSFORMATION_CHOICES, TRANSFORMATION_RESIZE,
TRANSFORMATION_ROTATE, TRANSFORMATION_ZOOM, DIMENSION_SEPARATOR,
FILE_FORMATS)
from .runtime import backend, office_converter
HASH_FUNCTION = lambda x: hashlib.sha256(x).hexdigest()
logger = logging.getLogger(__name__)
def cache_cleanup(input_filepath, *args, **kwargs):
try:
os.remove(create_image_cache_filename(input_filepath, *args, **kwargs))
except OSError:
pass
def create_image_cache_filename(input_filepath, *args, **kwargs):
if input_filepath:
hash_value = HASH_FUNCTION(u''.join([HASH_FUNCTION(smart_str(input_filepath)), unicode(args), unicode(kwargs)]))
return os.path.join(TEMPORARY_DIRECTORY, hash_value)
else:
return None
def convert(input_filepath, output_filepath=None, cleanup_files=False, mimetype=None, *args, **kwargs):
size = kwargs.get('size')
file_format = kwargs.get('file_format', DEFAULT_FILE_FORMAT)
zoom = kwargs.get('zoom', DEFAULT_ZOOM_LEVEL)
rotation = kwargs.get('rotation', DEFAULT_ROTATION)
page = kwargs.get('page', DEFAULT_PAGE_NUMBER)
transformations = kwargs.get('transformations', [])
if transformations is None:
transformations = []
if output_filepath is None:
output_filepath = create_image_cache_filename(input_filepath, *args, **kwargs)
if os.path.exists(output_filepath):
return output_filepath
if office_converter:
try:
office_converter.convert(input_filepath, mimetype=mimetype)
if office_converter.exists:
input_filepath = office_converter.output_filepath
mimetype = 'application/pdf'
else:
# Recycle the already detected mimetype
mimetype = office_converter.mimetype
except OfficeConversionError:
raise UnknownFileFormat('office converter exception')
if size:
transformations.append(
{
'transformation': TRANSFORMATION_RESIZE,
'arguments': dict(zip([u'width', u'height'], size.split(DIMENSION_SEPARATOR)))
}
)
if zoom != 100:
transformations.append(
{
'transformation': TRANSFORMATION_ZOOM,
'arguments': {'percent': zoom}
}
)
if rotation != 0 and rotation != 360:
transformations.append(
{
'transformation': TRANSFORMATION_ROTATE,
'arguments': {'degrees': rotation}
}
)
try:
backend.convert_file(input_filepath=input_filepath, output_filepath=output_filepath, transformations=transformations, page=page, file_format=file_format, mimetype=mimetype)
finally:
if cleanup_files:
fs_cleanup(input_filepath)
return output_filepath
def get_page_count(input_filepath):
logger.debug('office_converter: %s' % office_converter)
if office_converter:
try:
office_converter.convert(input_filepath)
logger.debug('office_converter.exists: %s' % office_converter.exists)
if office_converter.exists:
input_filepath = office_converter.output_filepath
except OfficeConversionError:
raise UnknownFileFormat('office converter exception')
return backend.get_page_count(input_filepath)
def get_available_transformations_choices():
result = []
for transformation in backend.get_available_transformations():
result.append((transformation, TRANSFORMATION_CHOICES[transformation]['label']))
return result
def get_format_list():
return [(format, FILE_FORMATS.get(format, u'')) for format in backend.get_format_list()]
| [((22, 9, 22, 36), 'logging.getLogger', 'logging.getLogger', ({(22, 27, 22, 35): '__name__'}, {}), '(__name__)', False, 'import logging\n'), ((54, 7, 54, 38), 'os.path.exists', 'os.path.exists', ({(54, 22, 54, 37): 'output_filepath'}, {}), '(output_filepath)', False, 'import os\n'), ((35, 15, 35, 60), 'os.path.join', 'os.path.join', ({(35, 28, 35, 47): 'TEMPORARY_DIRECTORY', (35, 49, 35, 59): 'hash_value'}, {}), '(TEMPORARY_DIRECTORY, hash_value)', False, 'import os\n'), ((20, 26, 20, 43), 'hashlib.sha256', 'hashlib.sha256', ({(20, 41, 20, 42): 'x'}, {}), '(x)', False, 'import hashlib\n'), ((98, 12, 98, 38), 'common.utils.fs_cleanup', 'fs_cleanup', ({(98, 23, 98, 37): 'input_filepath'}, {}), '(input_filepath)', False, 'from common.utils import fs_cleanup\n'), ((34, 59, 34, 84), 'django.utils.encoding.smart_str', 'smart_str', ({(34, 69, 34, 83): 'input_filepath'}, {}), '(input_filepath)', False, 'from django.utils.encoding import smart_str\n')] |
ValYouW/DeepLearningCourse | LogisticRegression/learn.py | d7d9edc60075f9078ec3f41074c958eaa7854964 | import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import utils
def plot_data(x_mat, y, db_x, db_y):
plt.figure()
plt.title('Data')
admitted = (y == 1).flatten()
rejected = (y == 0).flatten()
# plot decision boundary
plt.plot(db_x, db_y)
# plot admitted
plt.scatter(x_mat[admitted, 0], x_mat[admitted, 1], color='blue', marker='+')
# plot rejected
plt.scatter(x_mat[rejected, 0], x_mat[rejected, 1], edgecolors='red', facecolors='none', marker='o')
plt.xlabel('exam 1 score')
plt.ylabel('exam 2 score')
plt.legend(['boundary', 'admitted', 'rejected'])
def main():
print('Loading dataset...')
# data is: exam 1 score, exam 2 score, bool whether admitted
frame = pd.read_csv('ex2data1.csv', header=None)
data = frame.values
x_mat = data[:, 0:2] # exam scores
y = data[:, 2:3] # admitted or not
# normalize input (input has large values which causes sigmoid to always be 1 or 0)
x_mean = np.mean(x_mat, axis=0)
x_std = np.std(x_mat, axis=0)
x_norm = (x_mat - x_mean) / x_std
# add intercept
x_norm = np.insert(x_norm, 0, 1, axis=1)
# Learn model
print('starting to learn...')
(loss, reg_loss, theta) = utils.learn(x_norm, y, 5000, 0.1)
print('Final loss %s' % loss[-1])
print('Final theta \n%s' % theta)
# predict for student
joe = np.array([[45, 85]])
joe_norm = (joe - x_mean) / x_std
joe_norm = np.insert(joe_norm, 0, 1, axis=1)
p = utils.sigmoid(joe_norm.dot(theta))
print('Student with grades %s and %s has admission probability: %s' % (45, 85, p[0, 0]))
# Predict on train set
prediction = (utils.sigmoid(x_norm.dot(theta)) >= 0.5)
actual = (y == 1)
predict_success = np.sum(prediction == actual)
print('Model evaluation on training set has success of %s/%s' % (predict_success, y.shape[0]))
# calc decision boundary
# The decision boundary is the threshold line that separates true/false predictions,
# this means that on this line the prediction is exactly 0.5, meaning:
# p = sigmoid(x_mat.dot(theta)) = 0.5 ====> x_mat.dot(theta) = 0
# so our line equation is: theta0 + theta1*x1 + theta2*x2 = 0
# x2 = -theta0 / theta2 - (theta1/theta2)*x1
theta = theta.flatten()
# calc 2 points on the line
plot_x = np.array([np.min(x_norm[:, 1]), np.max(x_norm[:, 1])])
plot_y = -1 * (theta[0] / theta[2]) - (theta[1] / theta[2]) * plot_x
# denormalize the points
plot_x = plot_x * x_std[0] + x_mean[0]
plot_y = plot_y * x_std[1] + x_mean[1]
plot_data(x_mat, y, plot_x, plot_y)
utils.plot_loss(loss)
plt.show()
if __name__ == '__main__':
main()
| [((8, 4, 8, 16), 'matplotlib.pyplot.figure', 'plt.figure', ({}, {}), '()', True, 'import matplotlib.pyplot as plt\n'), ((9, 4, 9, 21), 'matplotlib.pyplot.title', 'plt.title', ({(9, 14, 9, 20): '"""Data"""'}, {}), "('Data')", True, 'import matplotlib.pyplot as plt\n'), ((14, 4, 14, 24), 'matplotlib.pyplot.plot', 'plt.plot', ({(14, 13, 14, 17): 'db_x', (14, 19, 14, 23): 'db_y'}, {}), '(db_x, db_y)', True, 'import matplotlib.pyplot as plt\n'), ((17, 4, 17, 81), 'matplotlib.pyplot.scatter', 'plt.scatter', (), '', True, 'import matplotlib.pyplot as plt\n'), ((20, 4, 20, 104), 'matplotlib.pyplot.scatter', 'plt.scatter', (), '', True, 'import matplotlib.pyplot as plt\n'), ((22, 4, 22, 30), 'matplotlib.pyplot.xlabel', 'plt.xlabel', ({(22, 15, 22, 29): '"""exam 1 score"""'}, {}), "('exam 1 score')", True, 'import matplotlib.pyplot as plt\n'), ((23, 4, 23, 30), 'matplotlib.pyplot.ylabel', 'plt.ylabel', ({(23, 15, 23, 29): '"""exam 2 score"""'}, {}), "('exam 2 score')", True, 'import matplotlib.pyplot as plt\n'), ((24, 4, 24, 52), 'matplotlib.pyplot.legend', 'plt.legend', ({(24, 15, 24, 51): "['boundary', 'admitted', 'rejected']"}, {}), "(['boundary', 'admitted', 'rejected'])", True, 'import matplotlib.pyplot as plt\n'), ((30, 12, 30, 52), 'pandas.read_csv', 'pd.read_csv', (), '', True, 'import pandas as pd\n'), ((36, 13, 36, 35), 'numpy.mean', 'np.mean', (), '', True, 'import numpy as np\n'), ((37, 12, 37, 33), 'numpy.std', 'np.std', (), '', True, 'import numpy as np\n'), ((41, 13, 41, 44), 'numpy.insert', 'np.insert', (), '', True, 'import numpy as np\n'), ((45, 30, 45, 63), 'utils.learn', 'utils.learn', ({(45, 42, 45, 48): 'x_norm', (45, 50, 45, 51): 'y', (45, 53, 45, 57): '5000', (45, 59, 45, 62): '0.1'}, {}), '(x_norm, y, 5000, 0.1)', False, 'import utils\n'), ((50, 10, 50, 30), 'numpy.array', 'np.array', ({(50, 19, 50, 29): '[[45, 85]]'}, {}), '([[45, 85]])', True, 'import numpy as np\n'), ((52, 15, 52, 48), 'numpy.insert', 'np.insert', (), '', True, 'import numpy as np\n'), ((59, 22, 59, 50), 'numpy.sum', 'np.sum', ({(59, 29, 59, 49): 'prediction == actual'}, {}), '(prediction == actual)', True, 'import numpy as np\n'), ((79, 4, 79, 25), 'utils.plot_loss', 'utils.plot_loss', ({(79, 20, 79, 24): 'loss'}, {}), '(loss)', False, 'import utils\n'), ((81, 4, 81, 14), 'matplotlib.pyplot.show', 'plt.show', ({}, {}), '()', True, 'import matplotlib.pyplot as plt\n'), ((71, 23, 71, 43), 'numpy.min', 'np.min', ({(71, 30, 71, 42): 'x_norm[:, (1)]'}, {}), '(x_norm[:, (1)])', True, 'import numpy as np\n'), ((71, 45, 71, 65), 'numpy.max', 'np.max', ({(71, 52, 71, 64): 'x_norm[:, (1)]'}, {}), '(x_norm[:, (1)])', True, 'import numpy as np\n')] |
iamhardikat11/ignite | ignite/handlers/time_profilers.py | 0666b407f7cdba81842014c6026e33b66113bb94 | import functools
from collections import OrderedDict
from typing import Any, Callable, Dict, List, Mapping, Sequence, Tuple, Union, cast
import torch
from ignite.engine import Engine, EventEnum, Events
from ignite.handlers.timing import Timer
class BasicTimeProfiler:
"""
BasicTimeProfiler can be used to profile the handlers,
events, data loading and data processing times.
Examples:
.. code-block:: python
from ignite.handlers import BasicTimeProfiler
trainer = Engine(train_updater)
# Create an object of the profiler and attach an engine to it
profiler = BasicTimeProfiler()
profiler.attach(trainer)
@trainer.on(Events.EPOCH_COMPLETED)
def log_intermediate_results():
profiler.print_results(profiler.get_results())
trainer.run(dataloader, max_epochs=3)
profiler.write_results('path_to_dir/time_profiling.csv')
.. versionadded:: 0.4.6
"""
events_to_ignore = [
Events.EXCEPTION_RAISED,
Events.TERMINATE,
Events.TERMINATE_SINGLE_EPOCH,
Events.DATALOADER_STOP_ITERATION,
]
def __init__(self) -> None:
self._dataflow_timer = Timer()
self._processing_timer = Timer()
self._event_handlers_timer = Timer()
self.dataflow_times = torch.zeros(1)
self.processing_times = torch.zeros(1)
self.event_handlers_times = {} # type: Dict[EventEnum, torch.Tensor]
self._events = [
Events.EPOCH_STARTED,
Events.EPOCH_COMPLETED,
Events.ITERATION_STARTED,
Events.ITERATION_COMPLETED,
Events.GET_BATCH_STARTED,
Events.GET_BATCH_COMPLETED,
Events.COMPLETED,
]
self._fmethods = [
self._as_first_epoch_started,
self._as_first_epoch_completed,
self._as_first_iter_started,
self._as_first_iter_completed,
self._as_first_get_batch_started,
self._as_first_get_batch_completed,
self._as_first_completed,
]
self._lmethods = [
self._as_last_epoch_started,
self._as_last_epoch_completed,
self._as_last_iter_started,
self._as_last_iter_completed,
self._as_last_get_batch_started,
self._as_last_get_batch_completed,
self._as_last_completed,
]
def _reset(self, num_epochs: int, total_num_iters: int) -> None:
self.dataflow_times = torch.zeros(total_num_iters)
self.processing_times = torch.zeros(total_num_iters)
self.event_handlers_times = {
Events.STARTED: torch.zeros(1),
Events.COMPLETED: torch.zeros(1),
Events.EPOCH_STARTED: torch.zeros(num_epochs),
Events.EPOCH_COMPLETED: torch.zeros(num_epochs),
Events.ITERATION_STARTED: torch.zeros(total_num_iters),
Events.ITERATION_COMPLETED: torch.zeros(total_num_iters),
Events.GET_BATCH_COMPLETED: torch.zeros(total_num_iters),
Events.GET_BATCH_STARTED: torch.zeros(total_num_iters),
}
def _as_first_started(self, engine: Engine) -> None:
if hasattr(engine.state.dataloader, "__len__"):
num_iters_per_epoch = len(engine.state.dataloader) # type: ignore[arg-type]
else:
if engine.state.epoch_length is None:
raise ValueError(
"As epoch_length is not set, we can not use BasicTimeProfiler in this case."
"Please, set trainer.run(..., epoch_length=epoch_length) in order to fix this."
)
num_iters_per_epoch = engine.state.epoch_length
self.max_epochs = cast(int, engine.state.max_epochs)
self.total_num_iters = self.max_epochs * num_iters_per_epoch
self._reset(self.max_epochs, self.total_num_iters)
self.event_handlers_names = {
e: [
h.__qualname__ if hasattr(h, "__qualname__") else h.__class__.__name__
for (h, _, _) in engine._event_handlers[e]
if "BasicTimeProfiler." not in repr(h) # avoid adding internal handlers into output
]
for e in Events
if e not in self.events_to_ignore
}
# Setup all other handlers:
engine._event_handlers[Events.STARTED].append((self._as_last_started, (engine,), {}))
for e, m in zip(self._events, self._fmethods):
engine._event_handlers[e].insert(0, (m, (engine,), {}))
for e, m in zip(self._events, self._lmethods):
engine._event_handlers[e].append((m, (engine,), {}))
# Let's go
self._event_handlers_timer.reset()
def _as_last_started(self, engine: Engine) -> None:
self.event_handlers_times[Events.STARTED][0] = self._event_handlers_timer.value()
def _as_first_epoch_started(self, engine: Engine) -> None:
self._event_handlers_timer.reset()
def _as_last_epoch_started(self, engine: Engine) -> None:
t = self._event_handlers_timer.value()
e = engine.state.epoch - 1
self.event_handlers_times[Events.EPOCH_STARTED][e] = t
def _as_first_get_batch_started(self, engine: Engine) -> None:
self._event_handlers_timer.reset()
self._dataflow_timer.reset()
def _as_last_get_batch_started(self, engine: Engine) -> None:
t = self._event_handlers_timer.value()
i = engine.state.iteration - 1
self.event_handlers_times[Events.GET_BATCH_STARTED][i] = t
def _as_first_get_batch_completed(self, engine: Engine) -> None:
self._event_handlers_timer.reset()
def _as_last_get_batch_completed(self, engine: Engine) -> None:
t = self._event_handlers_timer.value()
i = engine.state.iteration - 1
self.event_handlers_times[Events.GET_BATCH_COMPLETED][i] = t
d = self._dataflow_timer.value()
self.dataflow_times[i] = d
self._dataflow_timer.reset()
def _as_first_iter_started(self, engine: Engine) -> None:
self._event_handlers_timer.reset()
def _as_last_iter_started(self, engine: Engine) -> None:
t = self._event_handlers_timer.value()
i = engine.state.iteration - 1
self.event_handlers_times[Events.ITERATION_STARTED][i] = t
self._processing_timer.reset()
def _as_first_iter_completed(self, engine: Engine) -> None:
t = self._processing_timer.value()
i = engine.state.iteration - 1
self.processing_times[i] = t
self._event_handlers_timer.reset()
def _as_last_iter_completed(self, engine: Engine) -> None:
t = self._event_handlers_timer.value()
i = engine.state.iteration - 1
self.event_handlers_times[Events.ITERATION_COMPLETED][i] = t
def _as_first_epoch_completed(self, engine: Engine) -> None:
self._event_handlers_timer.reset()
def _as_last_epoch_completed(self, engine: Engine) -> None:
t = self._event_handlers_timer.value()
e = engine.state.epoch - 1
self.event_handlers_times[Events.EPOCH_COMPLETED][e] = t
def _as_first_completed(self, engine: Engine) -> None:
self._event_handlers_timer.reset()
def _as_last_completed(self, engine: Engine) -> None:
self.event_handlers_times[Events.COMPLETED][0] = self._event_handlers_timer.value()
# Remove added handlers:
engine.remove_event_handler(self._as_last_started, Events.STARTED)
for e, m in zip(self._events, self._fmethods):
engine.remove_event_handler(m, e)
for e, m in zip(self._events, self._lmethods):
engine.remove_event_handler(m, e)
def attach(self, engine: Engine) -> None:
"""Attach BasicTimeProfiler to the given engine.
Args:
engine: the instance of Engine to attach
"""
if not isinstance(engine, Engine):
raise TypeError(f"Argument engine should be ignite.engine.Engine, but given {type(engine)}")
if not engine.has_event_handler(self._as_first_started):
engine._event_handlers[Events.STARTED].insert(0, (self._as_first_started, (engine,), {}))
@staticmethod
def _compute_basic_stats(data: torch.Tensor) -> Dict[str, Union[str, float, Tuple[Union[float], Union[float]]]]:
# compute on non-zero data:
data = data[data > 0]
out = [
("total", torch.sum(data).item() if len(data) > 0 else "not yet triggered")
] # type: List[Tuple[str, Union[str, float, Tuple[Union[float], Union[float]]]]]
if len(data) > 1:
out += [
("min/index", (torch.min(data).item(), torch.argmin(data).item())),
("max/index", (torch.max(data).item(), torch.argmax(data).item())),
("mean", torch.mean(data).item()),
("std", torch.std(data).item()),
]
return OrderedDict(out)
def get_results(self) -> Dict[str, Dict[str, Any]]:
"""
Method to fetch the aggregated profiler results after the engine is run
.. code-block:: python
results = profiler.get_results()
"""
total_eh_time = sum(
[(self.event_handlers_times[e]).sum() for e in Events if e not in self.events_to_ignore]
) # type: Union[int, torch.Tensor]
event_handlers_stats = dict(
[
(str(e.name).replace(".", "_"), self._compute_basic_stats(self.event_handlers_times[e]))
for e in Events
if e not in self.events_to_ignore
]
+ [("total_time", total_eh_time)] # type: ignore[list-item]
)
return OrderedDict(
[
("processing_stats", self._compute_basic_stats(self.processing_times)),
("dataflow_stats", self._compute_basic_stats(self.dataflow_times)),
("event_handlers_stats", event_handlers_stats),
(
"event_handlers_names",
{str(e.name).replace(".", "_") + "_names": v for e, v in self.event_handlers_names.items()},
),
]
)
def write_results(self, output_path: str) -> None:
"""
Method to store the unaggregated profiling results to a csv file
Args:
output_path: file output path containing a filename
.. code-block:: python
profiler.write_results('path_to_dir/awesome_filename.csv')
Examples:
.. code-block:: text
-----------------------------------------------------------------
epoch iteration processing_stats dataflow_stats Event_STARTED ...
1.0 1.0 0.00003 0.252387 0.125676
1.0 2.0 0.00029 0.252342 0.125123
"""
try:
import pandas as pd
except ImportError:
raise RuntimeError("Need pandas to write results as files")
iters_per_epoch = self.total_num_iters // self.max_epochs
epochs = torch.arange(self.max_epochs, dtype=torch.float32).repeat_interleave(iters_per_epoch) + 1
iterations = torch.arange(self.total_num_iters, dtype=torch.float32) + 1
processing_stats = self.processing_times
dataflow_stats = self.dataflow_times
event_started = self.event_handlers_times[Events.STARTED].repeat_interleave(self.total_num_iters)
event_completed = self.event_handlers_times[Events.COMPLETED].repeat_interleave(self.total_num_iters)
event_epoch_started = self.event_handlers_times[Events.EPOCH_STARTED].repeat_interleave(iters_per_epoch)
event_epoch_completed = self.event_handlers_times[Events.EPOCH_COMPLETED].repeat_interleave(iters_per_epoch)
event_iter_started = self.event_handlers_times[Events.ITERATION_STARTED]
event_iter_completed = self.event_handlers_times[Events.ITERATION_COMPLETED]
event_batch_started = self.event_handlers_times[Events.GET_BATCH_STARTED]
event_batch_completed = self.event_handlers_times[Events.GET_BATCH_COMPLETED]
results_dump = torch.stack(
[
epochs,
iterations,
processing_stats,
dataflow_stats,
event_started,
event_completed,
event_epoch_started,
event_epoch_completed,
event_iter_started,
event_iter_completed,
event_batch_started,
event_batch_completed,
],
dim=1,
).numpy()
results_df = pd.DataFrame(
data=results_dump,
columns=[
"epoch",
"iteration",
"processing_stats",
"dataflow_stats",
"Event_STARTED",
"Event_COMPLETED",
"Event_EPOCH_STARTED",
"Event_EPOCH_COMPLETED",
"Event_ITERATION_STARTED",
"Event_ITERATION_COMPLETED",
"Event_GET_BATCH_STARTED",
"Event_GET_BATCH_COMPLETED",
],
)
results_df.to_csv(output_path, index=False)
@staticmethod
def print_results(results: Dict) -> str:
"""
Method to print the aggregated results from the profiler
Args:
results: the aggregated results from the profiler
.. code-block:: python
profiler.print_results(results)
Examples:
.. code-block:: text
----------------------------------------------------
| Time profiling stats (in seconds): |
----------------------------------------------------
total | min/index | max/index | mean | std
Processing function:
157.46292 | 0.01452/1501 | 0.26905/0 | 0.07730 | 0.01258
Dataflow:
6.11384 | 0.00008/1935 | 0.28461/1551 | 0.00300 | 0.02693
Event handlers:
2.82721
- Events.STARTED: []
0.00000
- Events.EPOCH_STARTED: []
0.00006 | 0.00000/0 | 0.00000/17 | 0.00000 | 0.00000
- Events.ITERATION_STARTED: ['PiecewiseLinear']
0.03482 | 0.00001/188 | 0.00018/679 | 0.00002 | 0.00001
- Events.ITERATION_COMPLETED: ['TerminateOnNan']
0.20037 | 0.00006/866 | 0.00089/1943 | 0.00010 | 0.00003
- Events.EPOCH_COMPLETED: ['empty_cuda_cache', 'training.<locals>.log_elapsed_time', ]
2.57860 | 0.11529/0 | 0.14977/13 | 0.12893 | 0.00790
- Events.COMPLETED: []
not yet triggered
"""
def to_str(v: Union[str, tuple]) -> str:
if isinstance(v, str):
return v
elif isinstance(v, tuple):
return f"{v[0]:.5f}/{v[1]}"
return f"{v:.5f}"
def odict_to_str(d: Mapping) -> str:
out = " | ".join([to_str(v) for v in d.values()])
return out
others = {
k: odict_to_str(v) if isinstance(v, OrderedDict) else v for k, v in results["event_handlers_stats"].items()
}
others.update(results["event_handlers_names"])
output_message = """
----------------------------------------------------
| Time profiling stats (in seconds): |
----------------------------------------------------
total | min/index | max/index | mean | std
Processing function:
{processing_stats}
Dataflow:
{dataflow_stats}
Event handlers:
{total_time:.5f}
- Events.STARTED: {STARTED_names}
{STARTED}
- Events.EPOCH_STARTED: {EPOCH_STARTED_names}
{EPOCH_STARTED}
- Events.ITERATION_STARTED: {ITERATION_STARTED_names}
{ITERATION_STARTED}
- Events.ITERATION_COMPLETED: {ITERATION_COMPLETED_names}
{ITERATION_COMPLETED}
- Events.EPOCH_COMPLETED: {EPOCH_COMPLETED_names}
{EPOCH_COMPLETED}
- Events.COMPLETED: {COMPLETED_names}
{COMPLETED}
""".format(
processing_stats=odict_to_str(results["processing_stats"]),
dataflow_stats=odict_to_str(results["dataflow_stats"]),
**others,
)
print(output_message)
return output_message
class HandlersTimeProfiler:
"""
HandlersTimeProfiler can be used to profile the handlers,
data loading and data processing times. Custom events are also
profiled by this profiler
Examples:
.. code-block:: python
from ignite.handlers import HandlersTimeProfiler
trainer = Engine(train_updater)
# Create an object of the profiler and attach an engine to it
profiler = HandlersTimeProfiler()
profiler.attach(trainer)
@trainer.on(Events.EPOCH_COMPLETED)
def log_intermediate_results():
profiler.print_results(profiler.get_results())
trainer.run(dataloader, max_epochs=3)
profiler.write_results('path_to_dir/time_profiling.csv')
.. versionadded:: 0.4.6
"""
EVENT_FILTER_THESHOLD_TIME = 0.0001
def __init__(self) -> None:
self._dataflow_timer = Timer()
self._processing_timer = Timer()
self._event_handlers_timer = Timer()
self.dataflow_times = [] # type: List[float]
self.processing_times = [] # type: List[float]
self.event_handlers_times = {} # type: Dict[EventEnum, Dict[str, List[float]]]
@staticmethod
def _get_callable_name(handler: Callable) -> str:
# get name of the callable handler
return getattr(handler, "__qualname__", handler.__class__.__name__)
def _create_wrapped_handler(self, handler: Callable, event: EventEnum) -> Callable:
@functools.wraps(handler)
def _timeit_handler(*args: Any, **kwargs: Any) -> None:
self._event_handlers_timer.reset()
handler(*args, **kwargs)
t = self._event_handlers_timer.value()
hname = self._get_callable_name(handler)
# filter profiled time if the handler was attached to event with event filter
if not hasattr(handler, "_parent") or t >= self.EVENT_FILTER_THESHOLD_TIME:
self.event_handlers_times[event][hname].append(t)
# required to revert back to original handler after profiling
setattr(_timeit_handler, "_profiler_original", handler)
return _timeit_handler
def _timeit_processing(self) -> None:
# handler used for profiling processing times
t = self._processing_timer.value()
self.processing_times.append(t)
def _timeit_dataflow(self) -> None:
# handler used for profiling dataflow times
t = self._dataflow_timer.value()
self.dataflow_times.append(t)
def _reset(self, event_handlers_names: Mapping[EventEnum, List[str]]) -> None:
# reset the variables used for profiling
self.dataflow_times = []
self.processing_times = []
self.event_handlers_times = {e: {h: [] for h in event_handlers_names[e]} for e in event_handlers_names}
@staticmethod
def _is_internal_handler(handler: Callable) -> bool:
# checks whether the handler is internal
return any(n in repr(handler) for n in ["HandlersTimeProfiler.", "Timer."])
def _detach_profiler_handlers(self, engine: Engine) -> None:
# reverts handlers to original handlers
for e in engine._event_handlers:
for i, (func, args, kwargs) in enumerate(engine._event_handlers[e]):
if hasattr(func, "_profiler_original"):
engine._event_handlers[e][i] = (func._profiler_original, args, kwargs)
def _as_first_started(self, engine: Engine) -> None:
# wraps original handlers for profiling
self.event_handlers_names = {
e: [
self._get_callable_name(h)
for (h, _, _) in engine._event_handlers[e]
if not self._is_internal_handler(h)
]
for e in engine._allowed_events
}
self._reset(self.event_handlers_names)
for e in engine._allowed_events:
for i, (func, args, kwargs) in enumerate(engine._event_handlers[e]):
if not self._is_internal_handler(func):
engine._event_handlers[e][i] = (self._create_wrapped_handler(func, e), args, kwargs)
# processing timer
engine.add_event_handler(Events.ITERATION_STARTED, self._processing_timer.reset)
engine._event_handlers[Events.ITERATION_COMPLETED].insert(0, (self._timeit_processing, (), {}))
# dataflow timer
engine.add_event_handler(Events.GET_BATCH_STARTED, self._dataflow_timer.reset)
engine._event_handlers[Events.GET_BATCH_COMPLETED].insert(0, (self._timeit_dataflow, (), {}))
# revert back the wrapped handlers with original handlers at the end
engine.add_event_handler(Events.COMPLETED, self._detach_profiler_handlers)
def attach(self, engine: Engine) -> None:
"""Attach HandlersTimeProfiler to the given engine.
Args:
engine: the instance of Engine to attach
"""
if not isinstance(engine, Engine):
raise TypeError(f"Argument engine should be ignite.engine.Engine, but given {type(engine)}")
if not engine.has_event_handler(self._as_first_started):
engine._event_handlers[Events.STARTED].insert(0, (self._as_first_started, (engine,), {}))
def get_results(self) -> List[List[Union[str, float]]]:
"""
Method to fetch the aggregated profiler results after the engine is run
.. code-block:: python
results = profiler.get_results()
"""
total_eh_time = sum(
[
sum(self.event_handlers_times[e][h])
for e in self.event_handlers_times
for h in self.event_handlers_times[e]
]
)
total_eh_time = round(float(total_eh_time), 5)
def compute_basic_stats(
times: Union[Sequence, torch.Tensor]
) -> List[Union[str, float, Tuple[Union[str, float], Union[str, float]]]]:
data = torch.as_tensor(times, dtype=torch.float32)
# compute on non-zero data:
data = data[data > 0]
total = round(torch.sum(data).item(), 5) if len(data) > 0 else "not triggered" # type: Union[str, float]
min_index = ("None", "None") # type: Tuple[Union[str, float], Union[str, float]]
max_index = ("None", "None") # type: Tuple[Union[str, float], Union[str, float]]
mean = "None" # type: Union[str, float]
std = "None" # type: Union[str, float]
if len(data) > 0:
min_index = (round(torch.min(data).item(), 5), torch.argmin(data).item())
max_index = (round(torch.max(data).item(), 5), torch.argmax(data).item())
mean = round(torch.mean(data).item(), 5)
if len(data) > 1:
std = round(torch.std(data).item(), 5)
return [total, min_index, max_index, mean, std]
event_handler_stats = [
[
h,
getattr(e, "name", str(e)),
*compute_basic_stats(torch.tensor(self.event_handlers_times[e][h], dtype=torch.float32)),
]
for e in self.event_handlers_times
for h in self.event_handlers_times[e]
]
event_handler_stats.append(["Total", "", total_eh_time, "", "", "", ""])
event_handler_stats.append(["Processing", "None", *compute_basic_stats(self.processing_times)])
event_handler_stats.append(["Dataflow", "None", *compute_basic_stats(self.dataflow_times)])
return event_handler_stats
def write_results(self, output_path: str) -> None:
"""
Method to store the unaggregated profiling results to a csv file
Args:
output_path: file output path containing a filename
.. code-block:: python
profiler.write_results('path_to_dir/awesome_filename.csv')
Examples:
.. code-block:: text
-----------------------------------------------------------------
# processing_stats dataflow_stats training.<locals>.log_elapsed_time (EPOCH_COMPLETED) ...
1 0.00003 0.252387 0.125676
2 0.00029 0.252342 0.125123
"""
try:
import pandas as pd
except ImportError:
raise RuntimeError("Need pandas to write results as files")
processing_stats = torch.tensor(self.processing_times, dtype=torch.float32)
dataflow_stats = torch.tensor(self.dataflow_times, dtype=torch.float32)
cols = [processing_stats, dataflow_stats]
headers = ["processing_stats", "dataflow_stats"]
for e in self.event_handlers_times:
for h in self.event_handlers_times[e]:
headers.append(f"{h} ({getattr(e, 'name', str(e))})")
cols.append(torch.tensor(self.event_handlers_times[e][h], dtype=torch.float32))
# Determine maximum length
max_len = max([x.numel() for x in cols])
count_col = torch.arange(max_len, dtype=torch.float32) + 1
cols.insert(0, count_col)
headers.insert(0, "#")
# pad all tensors to have same length
cols = [torch.nn.functional.pad(x, pad=(0, max_len - x.numel()), mode="constant", value=0) for x in cols]
results_dump = torch.stack(cols, dim=1).numpy()
results_df = pd.DataFrame(data=results_dump, columns=headers)
results_df.to_csv(output_path, index=False)
@staticmethod
def print_results(results: List[List[Union[str, float]]]) -> None:
"""
Method to print the aggregated results from the profiler
Args:
results: the aggregated results from the profiler
.. code-block:: python
profiler.print_results(results)
Examples:
.. code-block:: text
----------------------------------------- ----------------------- -------------- ...
Handler Event Name Total(s)
----------------------------------------- ----------------------- --------------
run.<locals>.log_training_results EPOCH_COMPLETED 19.43245
run.<locals>.log_validation_results EPOCH_COMPLETED 2.55271
run.<locals>.log_time EPOCH_COMPLETED 0.00049
run.<locals>.log_intermediate_results EPOCH_COMPLETED 0.00106
run.<locals>.log_training_loss ITERATION_COMPLETED 0.059
run.<locals>.log_time COMPLETED not triggered
----------------------------------------- ----------------------- --------------
Total 22.04571
----------------------------------------- ----------------------- --------------
Processing took total 11.29543s [min/index: 0.00393s/1875, max/index: 0.00784s/0,
mean: 0.00602s, std: 0.00034s]
Dataflow took total 16.24365s [min/index: 0.00533s/1874, max/index: 0.01129s/937,
mean: 0.00866s, std: 0.00113s]
"""
# adopted implementation of torch.autograd.profiler.build_table
handler_column_width = max([len(item[0]) for item in results]) + 4 # type: ignore[arg-type]
event_column_width = max([len(item[1]) for item in results]) + 4 # type: ignore[arg-type]
DEFAULT_COLUMN_WIDTH = 14
headers = [
"Handler",
"Event Name",
"Total(s)",
"Min(s)/IDX",
"Max(s)/IDX",
"Mean(s)",
"Std(s)",
]
# Have to use a list because nonlocal is Py3 only...
SPACING_SIZE = 2
row_format_lst = [""]
header_sep_lst = [""]
line_length_lst = [-SPACING_SIZE]
def add_column(padding: int, text_dir: str = ">") -> None:
row_format_lst[0] += "{: " + text_dir + str(padding) + "}" + (" " * SPACING_SIZE)
header_sep_lst[0] += "-" * padding + (" " * SPACING_SIZE)
line_length_lst[0] += padding + SPACING_SIZE
add_column(handler_column_width, text_dir="<")
add_column(event_column_width, text_dir="<")
for _ in headers[2:]:
add_column(DEFAULT_COLUMN_WIDTH)
row_format = row_format_lst[0]
header_sep = header_sep_lst[0]
result = []
def append(s: str) -> None:
result.append(s)
result.append("\n")
result.append("\n")
append(header_sep)
append(row_format.format(*headers))
append(header_sep)
for row in results[:-3]:
# format min/idx and max/idx
row[3] = "{}/{}".format(*row[3]) # type: ignore[misc]
row[4] = "{}/{}".format(*row[4]) # type: ignore[misc]
append(row_format.format(*row))
append(header_sep)
# print total handlers time row
append(row_format.format(*results[-3]))
append(header_sep)
summary_format = "{} took total {}s [min/index: {}, max/index: {}, mean: {}s, std: {}s]"
for row in results[-2:]:
row[3] = "{}s/{}".format(*row[3]) # type: ignore[misc]
row[4] = "{}s/{}".format(*row[4]) # type: ignore[misc]
del row[1]
append(summary_format.format(*row))
print("".join(result))
| [((46, 31, 46, 38), 'ignite.handlers.timing.Timer', 'Timer', ({}, {}), '()', False, 'from ignite.handlers.timing import Timer\n'), ((47, 33, 47, 40), 'ignite.handlers.timing.Timer', 'Timer', ({}, {}), '()', False, 'from ignite.handlers.timing import Timer\n'), ((48, 37, 48, 44), 'ignite.handlers.timing.Timer', 'Timer', ({}, {}), '()', False, 'from ignite.handlers.timing import Timer\n'), ((50, 30, 50, 44), 'torch.zeros', 'torch.zeros', ({(50, 42, 50, 43): '1'}, {}), '(1)', False, 'import torch\n'), ((51, 32, 51, 46), 'torch.zeros', 'torch.zeros', ({(51, 44, 51, 45): '1'}, {}), '(1)', False, 'import torch\n'), ((83, 30, 83, 58), 'torch.zeros', 'torch.zeros', ({(83, 42, 83, 57): 'total_num_iters'}, {}), '(total_num_iters)', False, 'import torch\n'), ((84, 32, 84, 60), 'torch.zeros', 'torch.zeros', ({(84, 44, 84, 59): 'total_num_iters'}, {}), '(total_num_iters)', False, 'import torch\n'), ((107, 26, 107, 60), 'typing.cast', 'cast', ({(107, 31, 107, 34): 'int', (107, 36, 107, 59): 'engine.state.max_epochs'}, {}), '(int, engine.state.max_epochs)', False, 'from typing import Any, Callable, Dict, List, Mapping, Sequence, Tuple, Union, cast\n'), ((237, 15, 237, 31), 'collections.OrderedDict', 'OrderedDict', ({(237, 27, 237, 30): 'out'}, {}), '(out)', False, 'from collections import OrderedDict\n'), ((332, 21, 348, 9), 'pandas.DataFrame', 'pd.DataFrame', (), '', True, 'import pandas as pd\n'), ((489, 31, 489, 38), 'ignite.handlers.timing.Timer', 'Timer', ({}, {}), '()', False, 'from ignite.handlers.timing import Timer\n'), ((490, 33, 490, 40), 'ignite.handlers.timing.Timer', 'Timer', ({}, {}), '()', False, 'from ignite.handlers.timing import Timer\n'), ((491, 37, 491, 44), 'ignite.handlers.timing.Timer', 'Timer', ({}, {}), '()', False, 'from ignite.handlers.timing import Timer\n'), ((503, 9, 503, 33), 'functools.wraps', 'functools.wraps', ({(503, 25, 503, 32): 'handler'}, {}), '(handler)', False, 'import functools\n'), ((665, 27, 665, 83), 'torch.tensor', 'torch.tensor', (), '', False, 'import torch\n'), ((666, 25, 666, 79), 'torch.tensor', 'torch.tensor', (), '', False, 'import torch\n'), ((686, 21, 686, 69), 'pandas.DataFrame', 'pd.DataFrame', (), '', True, 'import pandas as pd\n'), ((86, 28, 86, 42), 'torch.zeros', 'torch.zeros', ({(86, 40, 86, 41): '(1)'}, {}), '(1)', False, 'import torch\n'), ((87, 30, 87, 44), 'torch.zeros', 'torch.zeros', ({(87, 42, 87, 43): '(1)'}, {}), '(1)', False, 'import torch\n'), ((88, 34, 88, 57), 'torch.zeros', 'torch.zeros', ({(88, 46, 88, 56): 'num_epochs'}, {}), '(num_epochs)', False, 'import torch\n'), ((89, 36, 89, 59), 'torch.zeros', 'torch.zeros', ({(89, 48, 89, 58): 'num_epochs'}, {}), '(num_epochs)', False, 'import torch\n'), ((90, 38, 90, 66), 'torch.zeros', 'torch.zeros', ({(90, 50, 90, 65): 'total_num_iters'}, {}), '(total_num_iters)', False, 'import torch\n'), ((91, 40, 91, 68), 'torch.zeros', 'torch.zeros', ({(91, 52, 91, 67): 'total_num_iters'}, {}), '(total_num_iters)', False, 'import torch\n'), ((92, 40, 92, 68), 'torch.zeros', 'torch.zeros', ({(92, 52, 92, 67): 'total_num_iters'}, {}), '(total_num_iters)', False, 'import torch\n'), ((93, 38, 93, 66), 'torch.zeros', 'torch.zeros', ({(93, 50, 93, 65): 'total_num_iters'}, {}), '(total_num_iters)', False, 'import torch\n'), ((300, 21, 300, 76), 'torch.arange', 'torch.arange', (), '', False, 'import torch\n'), ((609, 19, 609, 62), 'torch.as_tensor', 'torch.as_tensor', (), '', False, 'import torch\n'), ((677, 20, 677, 62), 'torch.arange', 'torch.arange', (), '', False, 'import torch\n'), ((314, 23, 330, 9), 'torch.stack', 'torch.stack', (), '', False, 'import torch\n'), ((684, 23, 684, 47), 'torch.stack', 'torch.stack', (), '', False, 'import torch\n'), ((299, 17, 299, 67), 'torch.arange', 'torch.arange', (), '', False, 'import torch\n'), ((673, 28, 673, 94), 'torch.tensor', 'torch.tensor', (), '', False, 'import torch\n'), ((629, 37, 629, 103), 'torch.tensor', 'torch.tensor', (), '', False, 'import torch\n'), ((228, 22, 228, 37), 'torch.sum', 'torch.sum', ({(228, 32, 228, 36): 'data'}, {}), '(data)', False, 'import torch\n'), ((234, 25, 234, 41), 'torch.mean', 'torch.mean', ({(234, 36, 234, 40): 'data'}, {}), '(data)', False, 'import torch\n'), ((235, 24, 235, 39), 'torch.std', 'torch.std', ({(235, 34, 235, 38): 'data'}, {}), '(data)', False, 'import torch\n'), ((612, 26, 612, 41), 'torch.sum', 'torch.sum', ({(612, 36, 612, 40): 'data'}, {}), '(data)', False, 'import torch\n'), ((618, 63, 618, 81), 'torch.argmin', 'torch.argmin', ({(618, 76, 618, 80): 'data'}, {}), '(data)', False, 'import torch\n'), ((619, 63, 619, 81), 'torch.argmax', 'torch.argmax', ({(619, 76, 619, 80): 'data'}, {}), '(data)', False, 'import torch\n'), ((620, 29, 620, 45), 'torch.mean', 'torch.mean', ({(620, 40, 620, 44): 'data'}, {}), '(data)', False, 'import torch\n'), ((232, 31, 232, 46), 'torch.min', 'torch.min', ({(232, 41, 232, 45): 'data'}, {}), '(data)', False, 'import torch\n'), ((232, 55, 232, 73), 'torch.argmin', 'torch.argmin', ({(232, 68, 232, 72): 'data'}, {}), '(data)', False, 'import torch\n'), ((233, 31, 233, 46), 'torch.max', 'torch.max', ({(233, 41, 233, 45): 'data'}, {}), '(data)', False, 'import torch\n'), ((233, 55, 233, 73), 'torch.argmax', 'torch.argmax', ({(233, 68, 233, 72): 'data'}, {}), '(data)', False, 'import torch\n'), ((618, 35, 618, 50), 'torch.min', 'torch.min', ({(618, 45, 618, 49): 'data'}, {}), '(data)', False, 'import torch\n'), ((619, 35, 619, 50), 'torch.max', 'torch.max', ({(619, 45, 619, 49): 'data'}, {}), '(data)', False, 'import torch\n'), ((622, 32, 622, 47), 'torch.std', 'torch.std', ({(622, 42, 622, 46): 'data'}, {}), '(data)', False, 'import torch\n')] |
asmodehn/aiokraken | bellmanford.py | b260bd41d5aa091e6a4f1818328426fbe6f625c0 | """
Bellman Ford Arbitrage implementation over websocket API.
"""
from __future__ import annotations
from collections import namedtuple
from datetime import datetime
from decimal import Decimal
from math import log
import pandas as pd
import numpy as np
import asyncio
import typing
from aiokraken.model.assetpair import AssetPair
from aiokraken.rest import AssetPairs, Assets
from aiokraken.model.asset import Asset
from aiokraken.rest.client import RestClient
from aiokraken.websockets.publicapi import ticker
import networkx as nx
client = RestClient()
async def ticker_updates(pairs: typing.Union[AssetPairs, typing.Iterable[AssetPair]], pmatrix):
# For required pairs, get ticket updates
if isinstance(pairs, AssetPairs): # TODO : we need to unify iterable of pairs somehow...
properpairs = pairs
pairs = [p for p in pairs.values()]
else:
properpairs = AssetPairs({p.wsname: p for p in pairs})
tkrs = await client.ticker(pairs=[p for p in pairs])
# TODO : build price matrix
for p, tk in tkrs.items():
# retrieve the actual pair
pair = properpairs[p]
fee = pair.fees[0].get('fee')
# TODO : pick the right fee depending on total traded volume !
await pmatrix(base=pair.base, quote=pair.quote, ask_price=tk.ask.price, bid_price=tk.bid.price, fee_pct=fee)
# TODO : 2 levels :
# - slow updates with wide list of pairs and potential interest (no fees - small data for quick compute)
# - websockets with potential arbitrage (including fees - detailed data & precise compute)
async for upd in ticker(pairs=pairs, restclient=client):
print(f"wss ==> tick: {upd}")
# update pricematrix
base = upd.pairname.base
quote = upd.pairname.quote
fee = properpairs[upd.pairname].fees[0].get('fee')
await pmatrix(base=base, quote=quote, ask_price=upd.ask.price, bid_price=upd.bid.price, fee_pct=fee)
class PriceMatrix:
# Note This matrix is square
# since we want to do arbitrage and find cycles...
df: pd.DataFrame
# we also need to be careful that only one writer can modify data at a time...
wlock: asyncio.Lock
assets: typing.Optional[Assets]
def __init__(self, assets: typing.Union[Assets, typing.Iterable[Asset]]):
self.wlock = asyncio.Lock()
if isinstance(assets, Assets):
assets = [a for a in assets.values()]
self.df = pd.DataFrame(data={c.restname: {c.restname: None for c in assets} for c in assets}, columns=[c.restname for c in assets], dtype='float64')
self.assets = None
async def __call__(self, base: Asset, ask_price: Decimal, quote: Asset, bid_price: Decimal, fee_pct: Decimal):
if self.assets is None: # retrieve assets for filtering calls params, only once.
self.assets = await client.retrieve_assets()
async with self.wlock: # careful with concurrent control.
if not isinstance(base, Asset):
base = self.assets[base].restname
if not isinstance(quote, Asset):
quote = self.assets[quote].restname
# These are done with decimal, but stored as numpy floats for faster compute
self.df[quote][base] = bid_price * ((100 - fee_pct) /100) # bid price to get: quote_curr -- (buy_price - fee) --> base_curr
self.df[base][quote] = ((100 - fee_pct)/100) / ask_price # ask price to get: base_curr -- (sell_price - fee) --> quote_curr
def __getitem__(self, item):
if item not in self.df.columns:
raise KeyError(f"{item} not found")
if item not in self.df:
return pd.Series(dtype=pd.dtype('decimal'))
return self.df[item]
def __len__(self):
return len(self.df.columns)
def __str__(self):
return self.df.to_string()
def neglog(self):
if not self.assets:
return False
newpm = PriceMatrix(assets=[self.assets[c] for c in self.df.columns])
# copy all values and take -log()
for c in self.df.columns:
# TODO : fix this : is it on row, or columns ? which is best ??
newpm.df[c] = np.negative(np.log(self.df[c]))
return newpm
def to_graph(self):
G = nx.from_pandas_adjacency(self.df, create_using=nx.DiGraph)
# from bokeh.io import output_file, show
# from bokeh.plotting import figure, from_networkx
#
# plot = figure(title="Networkx Integration Demonstration", x_range=(-1.1, 1.1), y_range=(-1.1, 1.1),
# tools="", toolbar_location=None)
#
# graph = from_networkx(G, nx.spring_layout, scale=2, center=(0, 0))
# plot.renderers.append(graph)
#
# output_file("networkx_graph.html")
# show(plot)
return G
def test_pricematrix_mapping():
# testing with string for simplicity for now
pm = PriceMatrix(["EUR", "BTC"])
pm["EUR"]["BTC"] = Decimal(1.234)
pm["BTC"]["EUR"] = Decimal(4.321)
assert pm["EUR"]["BTC"] == Decimal(1.234)
assert pm["BTC"]["EUR"] == Decimal(4.321)
async def arbiter(user_assets):
assets = await client.retrieve_assets()
proper_userassets = Assets(assets_as_dict={assets[a].restname: assets[a] for a in user_assets})
assetpairs = await client.retrieve_assetpairs()
proper_userpairs = AssetPairs(assetpairs_as_dict={p.wsname:p for p in assetpairs.values()
if p.wsname is not None and (
p.base in proper_userassets or p.quote in proper_userassets
)})
# retrieving widely related assets
related_assets = set(assets[p.base] for p in proper_userpairs.values()) | set(assets[p.quote] for p in proper_userpairs.values())
proper_related_assets = Assets({a.restname: a for a in related_assets})
pmtx = PriceMatrix(assets=proper_related_assets)
# running ticker updates in background
bgtsk = asyncio.create_task(ticker_updates(pairs=proper_userpairs, pmatrix=pmtx))
try:
# observe pricematrix changes
while True:
# TODO : efficient TUI lib !
# print(pmtx)
# pricegraph = pmtx.to_graph() # display...
neglog = pmtx.neglog()
if neglog:
negcycle = bellmanford(neglog)
if len(negcycle):
amnt = 1 # arbitrary starting amount
pred = negcycle[-1]
dscr = f"{amnt} {pred}"
for cn in reversed(negcycle[:-1]):
amnt = amnt * pmtx[pred][cn]
pred = cn
dscr = dscr + f" -> {amnt} {pred}"
print(f"ARBITRAGE POSSIBLE: {dscr}")
# TODO : from these we can extract market making opportunities ??
# Another way :
# negloggraph = neglog.to_graph()
#
# negcycle = list()
#
# if nx.negative_edge_cycle(negloggraph):
# # find it !
# print("NEGATIVE CYCLE FOUND !")
#
# # Now find it
# print(f"computing cycles... {datetime.now()}")
#
# for cycle in nx.simple_cycles(negloggraph):
# # for cycle in nx.cycle_basis(negloggraph): # NOT implemented !
# # find negative weight sum (cycle need to be more than one node)
# if sum(negloggraph[n][m].get('weight') for n, m in zip(cycle, cycle[1:])) < 0:
# print(f"Found one: {cycle}")
# negcycle.append(cycle)
# print(negcycle)
# print(f"computing cycles DONE ! {datetime.now()}")
await asyncio.sleep(5)
finally:
# in every case cancel the background task now
bgtsk.cancel()
# TODO: react !
def bellmanford(pmatrix_neglog: PriceMatrix, source='ZEUR'):
n = len(pmatrix_neglog)
min_dist = {source: 0}
min_pred = {}
# Relax edges |V - 1| times
for i in range(n - 1): # iterations
for v in pmatrix_neglog.df.columns: # vertex source
if v in min_dist.keys(): # otherwise distance infinite until we know it...
for w in pmatrix_neglog.df.columns: # vertex target
if w not in min_dist.keys() or min_dist[w] > min_dist[v] + pmatrix_neglog[v][w]:
min_dist[w] = min_dist[v] + pmatrix_neglog[v][w]
min_pred[w] = v
# If we can still relax edges, then we have a negative cycle
for v in pmatrix_neglog.df.columns:
if v in min_dist.keys(): # otherwise node is not yet relevant here
for w in pmatrix_neglog.df.columns:
if min_dist[w] > min_dist[v] + pmatrix_neglog[v][w]:
# print(f"{min_dist[w]} > {min_dist[v]} + {pmatrix_neglog[v][w]}")
path = (w, min_pred[w])
while len(set(path)) == len(path): # while no duplicates, cycle is not complete...
path = (*path, min_pred[path[-1]])
# First cycle retrieved is *likely* (?) to be the minimal one -> the only one we are interested in
return path[path.index(path[-1]):]
return ()
if __name__ == '__main__':
asyncio.run(arbiter(user_assets=["XTZ", "ETH", "XBT", "EUR"]), debug=True)
| [((27, 9, 27, 21), 'aiokraken.rest.client.RestClient', 'RestClient', ({}, {}), '()', False, 'from aiokraken.rest.client import RestClient\n'), ((52, 21, 52, 59), 'aiokraken.websockets.publicapi.ticker', 'ticker', (), '', False, 'from aiokraken.websockets.publicapi import ticker\n'), ((134, 23, 134, 37), 'decimal.Decimal', 'Decimal', ({(134, 31, 134, 36): '1.234'}, {}), '(1.234)', False, 'from decimal import Decimal\n'), ((135, 23, 135, 37), 'decimal.Decimal', 'Decimal', ({(135, 31, 135, 36): '4.321'}, {}), '(4.321)', False, 'from decimal import Decimal\n'), ((144, 24, 144, 99), 'aiokraken.rest.Assets', 'Assets', (), '', False, 'from aiokraken.rest import AssetPairs, Assets\n'), ((154, 28, 154, 75), 'aiokraken.rest.Assets', 'Assets', ({(154, 35, 154, 74): '{a.restname: a for a in related_assets}'}, {}), '({a.restname: a for a in related_assets})', False, 'from aiokraken.rest import AssetPairs, Assets\n'), ((36, 22, 36, 62), 'aiokraken.rest.AssetPairs', 'AssetPairs', ({(36, 33, 36, 61): '{p.wsname: p for p in pairs}'}, {}), '({p.wsname: p for p in pairs})', False, 'from aiokraken.rest import AssetPairs, Assets\n'), ((71, 21, 71, 35), 'asyncio.Lock', 'asyncio.Lock', ({}, {}), '()', False, 'import asyncio\n'), ((74, 18, 74, 156), 'pandas.DataFrame', 'pd.DataFrame', (), '', True, 'import pandas as pd\n'), ((113, 12, 113, 70), 'networkx.from_pandas_adjacency', 'nx.from_pandas_adjacency', (), '', True, 'import networkx as nx\n'), ((137, 31, 137, 45), 'decimal.Decimal', 'Decimal', ({(137, 39, 137, 44): '(1.234)'}, {}), '(1.234)', False, 'from decimal import Decimal\n'), ((138, 31, 138, 45), 'decimal.Decimal', 'Decimal', ({(138, 39, 138, 44): '(4.321)'}, {}), '(4.321)', False, 'from decimal import Decimal\n'), ((109, 38, 109, 56), 'numpy.log', 'np.log', ({(109, 45, 109, 55): 'self.df[c]'}, {}), '(self.df[c])', True, 'import numpy as np\n'), ((201, 18, 201, 34), 'asyncio.sleep', 'asyncio.sleep', ({(201, 32, 201, 33): '(5)'}, {}), '(5)', False, 'import asyncio\n'), ((93, 35, 93, 54), 'pandas.dtype', 'pd.dtype', ({(93, 44, 93, 53): '"""decimal"""'}, {}), "('decimal')", True, 'import pandas as pd\n')] |
borys-kupar/smart-home | custom_components/snowtire/__init__.py | f9c5ac949106e09278b97f49d5e08f0d495b24ef | #
# Copyright (c) 2020, Andrey "Limych" Khrolenok <[email protected]>
# Creative Commons BY-NC-SA 4.0 International Public License
# (see LICENSE.md or https://creativecommons.org/licenses/by-nc-sa/4.0/)
#
"""
The Snowtire binary sensor.
For more details about this platform, please refer to the documentation at
https://github.com/Limych/ha-snowtire/
"""
| [] |
manishgit138/pomegranate | tests/test_bayes_classifier.py | 3457dcefdd623483b8efec7e9d87fd1bf4c115b0 | from __future__ import (division)
from pomegranate import *
from pomegranate.io import DataGenerator
from pomegranate.io import DataFrameGenerator
from nose.tools import with_setup
from nose.tools import assert_almost_equal
from nose.tools import assert_equal
from nose.tools import assert_not_equal
from nose.tools import assert_less_equal
from nose.tools import assert_raises
from nose.tools import assert_true
from numpy.testing import assert_array_almost_equal
import pandas
import random
import pickle
import numpy as np
nan = numpy.nan
def setup_multivariate_gaussian():
mu, cov = [0, 0, 0], numpy.eye(3)
d1 = MultivariateGaussianDistribution(mu, cov)
mu, cov = [2, 2, 2], numpy.eye(3)
d2 = MultivariateGaussianDistribution(mu, cov)
global model
model = BayesClassifier([d1, d2])
global X
X = numpy.array([[ 0.3, 0.5, 0.1],
[ 0.8, 1.4, 0.5],
[ 1.4, 2.6, 1.8],
[ 4.2, 3.3, 3.7],
[ 2.6, 3.6, 3.3],
[ 3.1, 2.2, 1.7],
[ 1.8, 2.2, 1.8],
[-1.2, -1.8, -1.5],
[-1.8, 0.3, 0.5],
[ 0.7, -1.3, -0.1]])
global y
y = [0, 0, 0, 1, 1, 1, 1, 0, 0, 0]
global X_nan
X_nan = numpy.array([[ 0.3, nan, 0.1],
[ nan, 1.4, nan],
[ 1.4, 2.6, nan],
[ nan, nan, nan],
[ nan, 3.6, 3.3],
[ 3.1, nan, 1.7],
[ nan, nan, 1.8],
[-1.2, -1.8, -1.5],
[ nan, 0.3, 0.5],
[ nan, -1.3, nan]])
def setup_multivariate_mixed():
mu, cov = [0, 0, 0], numpy.eye(3)
d1 = MultivariateGaussianDistribution(mu, cov)
d21 = ExponentialDistribution(5)
d22 = LogNormalDistribution(0.2, 0.8)
d23 = PoissonDistribution(3)
d2 = IndependentComponentsDistribution([d21, d22, d23])
global model
model = BayesClassifier([d1, d2])
global X
X = numpy.array([[ 0.3, 0.5, 0.1],
[ 0.8, 1.4, 0.5],
[ 1.4, 2.6, 1.8],
[ 4.2, 3.3, 3.7],
[ 2.6, 3.6, 3.3],
[ 3.1, 2.2, 1.7],
[ 1.8, 2.2, 1.8],
[ 1.2, 1.8, 1.5],
[ 1.8, 0.3, 0.5],
[ 0.7, 1.3, 0.1]])
global y
y = [0, 0, 0, 1, 1, 1, 1, 0, 0, 0]
global X_nan
X_nan = numpy.array([[ 0.3, nan, 0.1],
[ nan, 1.4, nan],
[ 1.4, 2.6, nan],
[ nan, nan, nan],
[ nan, 3.6, 3.3],
[ 3.1, nan, 1.7],
[ nan, nan, 1.8],
[ 1.2, 1.8, 1.5],
[ nan, 0.3, 0.5],
[ nan, 1.3, nan]])
def setup_hmm():
global model
global hmm1
global hmm2
global hmm3
rigged = State( DiscreteDistribution({ 'H': 0.8, 'T': 0.2 }) )
unrigged = State( DiscreteDistribution({ 'H': 0.5, 'T':0.5 }) )
hmm1 = HiddenMarkovModel()
hmm1.start = rigged
hmm1.add_transition(rigged, rigged, 1)
hmm1.bake()
hmm2 = HiddenMarkovModel()
hmm2.start = unrigged
hmm2.add_transition(unrigged, unrigged, 1)
hmm2.bake()
hmm3 = HiddenMarkovModel()
hmm3.add_transition(hmm3.start, unrigged, 0.5)
hmm3.add_transition(hmm3.start, rigged, 0.5)
hmm3.add_transition(rigged, rigged, 0.5)
hmm3.add_transition(rigged, unrigged, 0.5)
hmm3.add_transition(unrigged, rigged, 0.5)
hmm3.add_transition(unrigged, unrigged, 0.5)
hmm3.bake()
model = BayesClassifier([hmm1, hmm2, hmm3])
def setup_multivariate():
pass
def teardown():
pass
@with_setup(setup_multivariate_gaussian, teardown)
def test_bc_multivariate_gaussian_initialization():
assert_equal(model.d, 3)
assert_equal(model.n, 2)
assert_equal(model.is_vl_, False)
@with_setup(setup_multivariate_mixed, teardown)
def test_bc_multivariate_mixed_initialization():
assert_equal(model.d, 3)
assert_equal(model.n, 2)
assert_equal(model.is_vl_, False)
@with_setup(setup_multivariate_gaussian, teardown)
def test_bc_multivariate_gaussian_predict_log_proba():
y_hat = model.predict_log_proba(X)
y = [[ -1.48842547e-02, -4.21488425e+00],
[ -4.37487950e-01, -1.03748795e+00],
[ -5.60369104e+00, -3.69104343e-03],
[ -1.64000001e+01, -7.54345812e-08],
[ -1.30000023e+01, -2.26032685e-06],
[ -8.00033541e+00, -3.35406373e-04],
[ -5.60369104e+00, -3.69104343e-03],
[ -3.05902274e-07, -1.50000003e+01],
[ -3.35406373e-04, -8.00033541e+00],
[ -6.11066022e-04, -7.40061107e+00]]
assert_array_almost_equal(y, y_hat)
@with_setup(setup_multivariate_mixed, teardown)
def test_bc_multivariate_mixed_predict_log_proba():
y_hat = model.predict_log_proba(X)
y = [[ -5.03107596e-01, -9.27980626e-01],
[ -1.86355320e-01, -1.77183117e+00],
[ -5.58542088e-01, -8.48731256e-01],
[ -7.67315597e-01, -6.24101927e-01],
[ -2.32860808e+00, -1.02510436e-01],
[ -3.06641866e-03, -5.78877778e+00],
[ -9.85292840e-02, -2.36626165e+00],
[ -2.61764180e-01, -1.46833995e+00],
[ -2.01640009e-03, -6.20744952e+00],
[ -1.47371167e-01, -1.98758175e+00]]
assert_array_almost_equal(y, y_hat)
@with_setup(setup_multivariate_gaussian, teardown)
def test_bc_multivariate_gaussian_nan_predict_log_proba():
y_hat = model.predict_log_proba(X_nan)
y = [[ -3.99533332e-02, -3.23995333e+00],
[ -1.17110067e+00, -3.71100666e-01],
[ -4.01814993e+00, -1.81499279e-02],
[ -6.93147181e-01, -6.93147181e-01],
[ -9.80005545e+00, -5.54500620e-05],
[ -5.60369104e+00, -3.69104343e-03],
[ -1.78390074e+00, -1.83900741e-01],
[ -3.05902274e-07, -1.50000003e+01],
[ -8.68361522e-02, -2.48683615e+00],
[ -1.00016521e-02, -4.61000165e+00]]
assert_array_almost_equal(y, y_hat)
@with_setup(setup_multivariate_mixed, teardown)
def test_bc_multivariate_mixed_nan_predict_log_proba():
y_hat = model.predict_log_proba(X_nan)
y = [[ -3.57980882e-01, -1.20093223e+00],
[ -1.20735130e+00, -3.55230506e-01],
[ -2.43174286e-01, -1.53310132e+00],
[ -6.93147181e-01, -6.93147181e-01],
[ -9.31781101e+00, -8.98143220e-05],
[ -6.29755079e-04, -7.37049444e+00],
[ -1.31307006e+00, -3.13332194e-01],
[ -2.61764180e-01, -1.46833995e+00],
[ -2.29725479e-01, -1.58353505e+00],
[ -1.17299253e+00, -3.70251760e-01]]
assert_array_almost_equal(y, y_hat)
@with_setup(setup_multivariate_gaussian, teardown)
def test_bc_multivariate_gaussian_predict_log_proba_parallel():
y_hat = model.predict_log_proba(X, n_jobs=2)
y = [[ -1.48842547e-02, -4.21488425e+00],
[ -4.37487950e-01, -1.03748795e+00],
[ -5.60369104e+00, -3.69104343e-03],
[ -1.64000001e+01, -7.54345812e-08],
[ -1.30000023e+01, -2.26032685e-06],
[ -8.00033541e+00, -3.35406373e-04],
[ -5.60369104e+00, -3.69104343e-03],
[ -3.05902274e-07, -1.50000003e+01],
[ -3.35406373e-04, -8.00033541e+00],
[ -6.11066022e-04, -7.40061107e+00]]
assert_array_almost_equal(y, y_hat)
@with_setup(setup_multivariate_mixed, teardown)
def test_bc_multivariate_mixed_predict_log_proba_parallel():
y_hat = model.predict_log_proba(X, n_jobs=2)
y = [[ -5.03107596e-01, -9.27980626e-01],
[ -1.86355320e-01, -1.77183117e+00],
[ -5.58542088e-01, -8.48731256e-01],
[ -7.67315597e-01, -6.24101927e-01],
[ -2.32860808e+00, -1.02510436e-01],
[ -3.06641866e-03, -5.78877778e+00],
[ -9.85292840e-02, -2.36626165e+00],
[ -2.61764180e-01, -1.46833995e+00],
[ -2.01640009e-03, -6.20744952e+00],
[ -1.47371167e-01, -1.98758175e+00]]
assert_array_almost_equal(y, y_hat)
@with_setup(setup_multivariate_gaussian, teardown)
def test_bc_multivariate_gaussian_predict_proba():
y_hat = model.predict_proba(X)
y = [[ 9.85225968e-01, 1.47740317e-02],
[ 6.45656306e-01, 3.54343694e-01],
[ 3.68423990e-03, 9.96315760e-01],
[ 7.54345778e-08, 9.99999925e-01],
[ 2.26032430e-06, 9.99997740e-01],
[ 3.35350130e-04, 9.99664650e-01],
[ 3.68423990e-03, 9.96315760e-01],
[ 9.99999694e-01, 3.05902227e-07],
[ 9.99664650e-01, 3.35350130e-04],
[ 9.99389121e-01, 6.10879359e-04]]
assert_array_almost_equal(y, y_hat)
@with_setup(setup_multivariate_mixed, teardown)
def test_bc_multivariate_mixed_predict_proba():
y_hat = model.predict_proba(X)
y = [[ 0.60464873, 0.39535127],
[ 0.82997863, 0.17002137],
[ 0.57204244, 0.42795756],
[ 0.46425765, 0.53574235],
[ 0.09743127, 0.90256873],
[ 0.99693828, 0.00306172],
[ 0.90616916, 0.09383084],
[ 0.76969251, 0.23030749],
[ 0.99798563, 0.00201437],
[ 0.86297361, 0.13702639]]
assert_array_almost_equal(y, y_hat)
@with_setup(setup_multivariate_gaussian, teardown)
def test_bc_multivariate_gaussian_nan_predict_proba():
y_hat = model.predict_proba(X_nan)
y = [[ 9.60834277e-01, 3.91657228e-02],
[ 3.10025519e-01, 6.89974481e-01],
[ 1.79862100e-02, 9.82013790e-01],
[ 5.00000000e-01, 5.00000000e-01],
[ 5.54485247e-05, 9.99944551e-01],
[ 3.68423990e-03, 9.96315760e-01],
[ 1.67981615e-01, 8.32018385e-01],
[ 9.99999694e-01, 3.05902227e-07],
[ 9.16827304e-01, 8.31726965e-02],
[ 9.90048198e-01, 9.95180187e-03]]
assert_array_almost_equal(y, y_hat)
@with_setup(setup_multivariate_mixed, teardown)
def test_bc_multivariate_mixed_nan_predict_proba():
y_hat = model.predict_proba(X_nan)
y = [[ 6.99086440e-01, 3.00913560e-01],
[ 2.98988163e-01, 7.01011837e-01],
[ 7.84134838e-01, 2.15865162e-01],
[ 5.00000000e-01, 5.00000000e-01],
[ 8.98102888e-05, 9.99910190e-01],
[ 9.99370443e-01, 6.29556825e-04],
[ 2.68992964e-01, 7.31007036e-01],
[ 7.69692511e-01, 2.30307489e-01],
[ 7.94751748e-01, 2.05248252e-01],
[ 3.09439547e-01, 6.90560453e-01]]
assert_array_almost_equal(y, y_hat)
@with_setup(setup_multivariate_gaussian, teardown)
def test_bc_multivariate_gaussian_predict_proba_parallel():
y_hat = model.predict_proba(X, n_jobs=2)
y = [[ 9.85225968e-01, 1.47740317e-02],
[ 6.45656306e-01, 3.54343694e-01],
[ 3.68423990e-03, 9.96315760e-01],
[ 7.54345778e-08, 9.99999925e-01],
[ 2.26032430e-06, 9.99997740e-01],
[ 3.35350130e-04, 9.99664650e-01],
[ 3.68423990e-03, 9.96315760e-01],
[ 9.99999694e-01, 3.05902227e-07],
[ 9.99664650e-01, 3.35350130e-04],
[ 9.99389121e-01, 6.10879359e-04]]
assert_array_almost_equal(y, y_hat)
@with_setup(setup_multivariate_mixed, teardown)
def test_bc_multivariate_mixed_predict_proba_parallel():
y_hat = model.predict_proba(X, n_jobs=2)
y = [[ 0.60464873, 0.39535127],
[ 0.82997863, 0.17002137],
[ 0.57204244, 0.42795756],
[ 0.46425765, 0.53574235],
[ 0.09743127, 0.90256873],
[ 0.99693828, 0.00306172],
[ 0.90616916, 0.09383084],
[ 0.76969251, 0.23030749],
[ 0.99798563, 0.00201437],
[ 0.86297361, 0.13702639]]
assert_array_almost_equal(y, y_hat)
@with_setup(setup_multivariate_gaussian, teardown)
def test_bc_multivariate_gaussian_predict():
y_hat = model.predict(X)
y = [0, 0, 1, 1, 1, 1, 1, 0, 0, 0]
assert_array_almost_equal(y, y_hat)
@with_setup(setup_multivariate_mixed, teardown)
def test_bc_multivariate_mixed_predict():
y_hat = model.predict(X)
y = [0, 0, 0, 1, 1, 0, 0, 0, 0, 0]
assert_array_almost_equal(y, y_hat)
@with_setup(setup_multivariate_gaussian, teardown)
def test_bc_multivariate_gaussian_nan_predict():
y_hat = model.predict(X_nan)
y = [0, 1, 1, 0, 1, 1, 1, 0, 0, 0]
assert_array_almost_equal(y, y_hat)
@with_setup(setup_multivariate_mixed, teardown)
def test_bc_multivariate_mixed_nan_predict():
y_hat = model.predict(X_nan)
y = [0, 1, 0, 0, 1, 0, 1, 0, 0, 1]
assert_array_almost_equal(y, y_hat)
@with_setup(setup_multivariate_gaussian, teardown)
def test_bc_multivariate_gaussian_predict_parallel():
y_hat = model.predict(X, n_jobs=2)
y = [0, 0, 1, 1, 1, 1, 1, 0, 0, 0]
assert_array_almost_equal(y, y_hat)
@with_setup(setup_multivariate_mixed, teardown)
def test_bc_multivariate_mixed_predict_parallel():
y_hat = model.predict(X, n_jobs=2)
y = [0, 0, 0, 1, 1, 0, 0, 0, 0, 0]
assert_array_almost_equal(y, y_hat)
@with_setup(setup_multivariate_gaussian, teardown)
def test_bc_multivariate_gaussian_fit_parallel():
model.fit(X, y, n_jobs=2)
mu1 = model.distributions[0].parameters[0]
cov1 = model.distributions[0].parameters[1]
mu1_t = [0.03333333, 0.28333333, 0.21666666]
cov1_t = [[1.3088888, 0.9272222, 0.6227777],
[0.9272222, 2.2513888, 1.3402777],
[0.6227777, 1.3402777, 0.9547222]]
mu2 = model.distributions[1].parameters[0]
cov2 = model.distributions[1].parameters[1]
mu2_t = [2.925, 2.825, 2.625]
cov2_t = [[0.75687499, 0.23687499, 0.4793750],
[0.23687499, 0.40187499, 0.5318749],
[0.47937500, 0.53187499, 0.7868750]]
assert_array_almost_equal(mu1, mu1_t)
assert_array_almost_equal(cov1, cov1_t)
assert_array_almost_equal(mu2, mu2_t)
assert_array_almost_equal(cov2, cov2_t)
@with_setup(setup_multivariate_mixed, teardown)
def test_bc_multivariate_mixed_fit_parallel():
model.fit(X, y, n_jobs=2)
mu1 = model.distributions[0].parameters[0]
cov1 = model.distributions[0].parameters[1]
mu1_t = [1.033333, 1.3166667, 0.75]
cov1_t = [[0.242222, 0.0594444, 0.178333],
[0.059444, 0.5980555, 0.414166],
[0.178333, 0.4141666, 0.439166]]
d21 = model.distributions[1].distributions[0]
d22 = model.distributions[1].distributions[1]
d23 = model.distributions[1].distributions[2]
assert_array_almost_equal(mu1, mu1_t)
assert_array_almost_equal(cov1, cov1_t)
assert_array_almost_equal(d21.parameters, [0.34188034])
assert_array_almost_equal(d22.parameters, [1.01294275, 0.22658346])
assert_array_almost_equal(d23.parameters, [2.625])
@with_setup(setup_multivariate_gaussian, teardown)
def test_bc_multivariate_gaussian_from_samples():
model = BayesClassifier.from_samples(MultivariateGaussianDistribution, X, y)
mu1 = model.distributions[0].parameters[0]
cov1 = model.distributions[0].parameters[1]
mu1_t = [0.03333333, 0.2833333, 0.21666666]
cov1_t = [[1.308888888, 0.9272222222, 0.6227777777],
[0.927222222, 2.251388888, 1.340277777],
[0.622777777, 1.340277777, 0.9547222222]]
mu2 = model.distributions[1].parameters[0]
cov2 = model.distributions[1].parameters[1]
mu2_t = [2.925, 2.825, 2.625]
cov2_t = [[0.75687500, 0.23687499, 0.47937500],
[0.23687499, 0.40187499, 0.53187499],
[0.47937500, 0.53187499, 0.78687500]]
assert_array_almost_equal(mu1, mu1_t)
assert_array_almost_equal(cov1, cov1_t)
assert_array_almost_equal(mu2, mu2_t)
assert_array_almost_equal(cov2, cov2_t)
@with_setup(setup_multivariate_gaussian, teardown)
def test_bc_multivariate_gaussian_pickle():
model2 = pickle.loads(pickle.dumps(model))
assert_true(isinstance(model2, BayesClassifier))
assert_true(isinstance(model2.distributions[0], MultivariateGaussianDistribution))
assert_true(isinstance(model2.distributions[1], MultivariateGaussianDistribution))
assert_array_almost_equal(model.weights, model2.weights)
@with_setup(setup_multivariate_mixed, teardown)
def test_bc_multivariate_mixed_pickle():
model2 = pickle.loads(pickle.dumps(model))
assert_true(isinstance(model2, BayesClassifier))
assert_true(isinstance(model2.distributions[0], MultivariateGaussianDistribution))
assert_true(isinstance(model2.distributions[1], IndependentComponentsDistribution))
assert_array_almost_equal(model.weights, model2.weights)
@with_setup(setup_multivariate_gaussian, teardown)
def test_bc_multivariate_gaussian_to_json():
model2 = BayesClassifier.from_json(model.to_json())
assert_true(isinstance(model2, BayesClassifier))
assert_true(isinstance(model2.distributions[0], MultivariateGaussianDistribution))
assert_true(isinstance(model2.distributions[1], MultivariateGaussianDistribution))
assert_array_almost_equal(model.weights, model2.weights)
@with_setup(setup_multivariate_mixed, teardown)
def test_bc_multivariate_mixed_to_json():
model2 = BayesClassifier.from_json(model.to_json())
assert_true(isinstance(model2, BayesClassifier))
assert_true(isinstance(model2.distributions[0], MultivariateGaussianDistribution))
assert_true(isinstance(model2.distributions[1], IndependentComponentsDistribution))
assert_array_almost_equal(model.weights, model2.weights)
@with_setup(setup_multivariate_gaussian, teardown)
def test_bc_multivariate_gaussian_robust_from_json():
model2 = from_json(model.to_json())
assert_true(isinstance(model2, BayesClassifier))
assert_true(isinstance(model2.distributions[0], MultivariateGaussianDistribution))
assert_true(isinstance(model2.distributions[1], MultivariateGaussianDistribution))
assert_array_almost_equal(model.weights, model2.weights)
@with_setup(setup_multivariate_mixed, teardown)
def test_bc_multivariate_mixed_robust_from_json():
model2 = from_json(model.to_json())
assert_true(isinstance(model2, BayesClassifier))
assert_true(isinstance(model2.distributions[0], MultivariateGaussianDistribution))
assert_true(isinstance(model2.distributions[1], IndependentComponentsDistribution))
assert_array_almost_equal(model.weights, model2.weights)
@with_setup(setup_hmm, teardown)
def test_model():
assert_almost_equal(hmm1.log_probability(list('H')), -0.2231435513142097 )
assert_almost_equal(hmm1.log_probability(list('T')), -1.6094379124341003 )
assert_almost_equal(hmm1.log_probability(list('HHHH')), -0.8925742052568388 )
assert_almost_equal(hmm1.log_probability(list('THHH')), -2.2788685663767296 )
assert_almost_equal(hmm1.log_probability(list('TTTT')), -6.437751649736401 )
assert_almost_equal(hmm2.log_probability(list('H')), -0.6931471805599453 )
assert_almost_equal(hmm2.log_probability(list('T')), -0.6931471805599453 )
assert_almost_equal(hmm2.log_probability(list('HHHH')), -2.772588722239781 )
assert_almost_equal(hmm2.log_probability(list('THHH')), -2.772588722239781 )
assert_almost_equal(hmm2.log_probability(list('TTTT')), -2.772588722239781 )
assert_almost_equal(hmm3.log_probability(list('H')), -0.43078291609245417)
assert_almost_equal(hmm3.log_probability(list('T')), -1.0498221244986776)
assert_almost_equal(hmm3.log_probability(list('HHHH')), -1.7231316643698167)
assert_almost_equal(hmm3.log_probability(list('THHH')), -2.3421708727760397)
assert_almost_equal(hmm3.log_probability(list('TTTT')), -4.1992884979947105)
assert_almost_equal(hmm3.log_probability(list('THTHTHTHTHTH')), -8.883630243546788)
assert_almost_equal(hmm3.log_probability(list('THTHHHHHTHTH')), -7.645551826734343)
assert_equal(model.d, 1)
@with_setup(setup_hmm, teardown)
def test_hmm_log_proba():
logs = model.predict_log_proba(np.array([list('H'), list('THHH'), list('TTTT'), list('THTHTHTHTHTH'), list('THTHHHHHTHTH')]))
assert_almost_equal(logs[0][0], -0.89097292388986515)
assert_almost_equal(logs[0][1], -1.3609765531356006)
assert_almost_equal(logs[0][2], -1.0986122886681096)
assert_almost_equal(logs[1][0], -0.93570553121744293)
assert_almost_equal(logs[1][1], -1.429425687080494)
assert_almost_equal(logs[1][2], -0.9990078376167526)
assert_almost_equal(logs[2][0], -3.9007882563128864)
assert_almost_equal(logs[2][1], -0.23562532881626597)
assert_almost_equal(logs[2][2], -1.6623251045711958)
assert_almost_equal(logs[3][0], -3.1703366478831185)
assert_almost_equal(logs[3][1], -0.49261403211260379)
assert_almost_equal(logs[3][2], -1.058478108940049)
assert_almost_equal(logs[4][0], -1.3058441172130273)
assert_almost_equal(logs[4][1], -1.4007102236822906)
assert_almost_equal(logs[4][2], -0.7284958836972919)
@with_setup(setup_hmm, teardown)
def test_hmm_proba():
probs = model.predict_proba(np.array([list('H'), list('THHH'), list('TTTT'), list('THTHTHTHTHTH'), list('THTHHHHHTHTH')]))
assert_almost_equal(probs[0][0], 0.41025641025641024)
assert_almost_equal(probs[0][1], 0.25641025641025639)
assert_almost_equal(probs[0][2], 0.33333333333333331)
assert_almost_equal(probs[1][0], 0.39230898163446098)
assert_almost_equal(probs[1][1], 0.23944639992337707)
assert_almost_equal(probs[1][2], 0.36824461844216183)
assert_almost_equal(probs[2][0], 0.020225961918306088)
assert_almost_equal(probs[2][1], 0.79007663743383105)
assert_almost_equal(probs[2][2], 0.18969740064786292)
assert_almost_equal(probs[3][0], 0.041989459861032523)
assert_almost_equal(probs[3][1], 0.61102706038265642)
assert_almost_equal(probs[3][2], 0.346983479756311)
assert_almost_equal(probs[4][0], 0.27094373022369794)
assert_almost_equal(probs[4][1], 0.24642188711704707)
assert_almost_equal(probs[4][2], 0.48263438265925512)
@with_setup(setup_hmm, teardown)
def test_hmm_prediction():
predicts = model.predict(np.array([list('H'), list('THHH'), list('TTTT'), list('THTHTHTHTHTH'), list('THTHHHHHTHTH')]))
assert_equal(predicts[0], 0)
assert_equal(predicts[1], 0)
assert_equal(predicts[2], 1)
assert_equal(predicts[3], 1)
assert_equal(predicts[4], 2)
@with_setup(setup_multivariate_gaussian, teardown)
def test_io_log_probability():
X2 = DataGenerator(X)
X3 = DataFrameGenerator(pandas.DataFrame(X))
logp1 = model.log_probability(X)
logp2 = model.log_probability(X2)
logp3 = model.log_probability(X3)
assert_array_almost_equal(logp1, logp2)
assert_array_almost_equal(logp1, logp3)
@with_setup(setup_multivariate_gaussian, teardown)
def test_io_predict():
X2 = DataGenerator(X)
X3 = DataFrameGenerator(pandas.DataFrame(X))
y_hat1 = model.predict(X)
y_hat2 = model.predict(X2)
y_hat3 = model.predict(X3)
assert_array_almost_equal(y_hat1, y_hat2)
assert_array_almost_equal(y_hat1, y_hat3)
@with_setup(setup_multivariate_gaussian, teardown)
def test_io_predict_proba():
X2 = DataGenerator(X)
X3 = DataFrameGenerator(pandas.DataFrame(X))
y_hat1 = model.predict_proba(X)
y_hat2 = model.predict_proba(X2)
y_hat3 = model.predict_proba(X3)
assert_array_almost_equal(y_hat1, y_hat2)
assert_array_almost_equal(y_hat1, y_hat3)
@with_setup(setup_multivariate_gaussian, teardown)
def test_io_predict_log_proba():
X2 = DataGenerator(X)
X3 = DataFrameGenerator(pandas.DataFrame(X))
y_hat1 = model.predict_log_proba(X)
y_hat2 = model.predict_log_proba(X2)
y_hat3 = model.predict_log_proba(X3)
assert_array_almost_equal(y_hat1, y_hat2)
assert_array_almost_equal(y_hat1, y_hat3)
def test_io_fit():
X = numpy.random.randn(100, 5) + 0.5
weights = numpy.abs(numpy.random.randn(100))
y = numpy.random.randint(2, size=100)
data_generator = DataGenerator(X, weights, y)
mu1 = numpy.array([0, 0, 0, 0, 0])
mu2 = numpy.array([1, 1, 1, 1, 1])
cov = numpy.eye(5)
d1 = MultivariateGaussianDistribution(mu1, cov)
d2 = MultivariateGaussianDistribution(mu2, cov)
bc1 = BayesClassifier([d1, d2])
bc1.fit(X, y, weights)
d1 = MultivariateGaussianDistribution(mu1, cov)
d2 = MultivariateGaussianDistribution(mu2, cov)
bc2 = BayesClassifier([d1, d2])
bc2.fit(data_generator)
logp1 = bc1.log_probability(X)
logp2 = bc2.log_probability(X)
assert_array_almost_equal(logp1, logp2)
def test_io_from_samples():
X = numpy.random.randn(100, 5) + 0.5
weights = numpy.abs(numpy.random.randn(100))
y = numpy.random.randint(2, size=100)
data_generator = DataGenerator(X, weights, y)
d = MultivariateGaussianDistribution
bc1 = BayesClassifier.from_samples(d, X=X, y=y, weights=weights)
bc2 = BayesClassifier.from_samples(d, X=data_generator)
logp1 = bc1.log_probability(X)
logp2 = bc2.log_probability(X)
assert_array_almost_equal(logp1, logp2) | [((140, 1, 140, 50), 'nose.tools.with_setup', 'with_setup', ({(140, 12, 140, 39): 'setup_multivariate_gaussian', (140, 41, 140, 49): 'teardown'}, {}), '(setup_multivariate_gaussian, teardown)', False, 'from nose.tools import with_setup\n'), ((146, 1, 146, 47), 'nose.tools.with_setup', 'with_setup', ({(146, 12, 146, 36): 'setup_multivariate_mixed', (146, 38, 146, 46): 'teardown'}, {}), '(setup_multivariate_mixed, teardown)', False, 'from nose.tools import with_setup\n'), ((153, 1, 153, 50), 'nose.tools.with_setup', 'with_setup', ({(153, 12, 153, 39): 'setup_multivariate_gaussian', (153, 41, 153, 49): 'teardown'}, {}), '(setup_multivariate_gaussian, teardown)', False, 'from nose.tools import with_setup\n'), ((170, 1, 170, 47), 'nose.tools.with_setup', 'with_setup', ({(170, 12, 170, 36): 'setup_multivariate_mixed', (170, 38, 170, 46): 'teardown'}, {}), '(setup_multivariate_mixed, teardown)', False, 'from nose.tools import with_setup\n'), ((187, 1, 187, 50), 'nose.tools.with_setup', 'with_setup', ({(187, 12, 187, 39): 'setup_multivariate_gaussian', (187, 41, 187, 49): 'teardown'}, {}), '(setup_multivariate_gaussian, teardown)', False, 'from nose.tools import with_setup\n'), ((204, 1, 204, 47), 'nose.tools.with_setup', 'with_setup', ({(204, 12, 204, 36): 'setup_multivariate_mixed', (204, 38, 204, 46): 'teardown'}, {}), '(setup_multivariate_mixed, teardown)', False, 'from nose.tools import with_setup\n'), ((221, 1, 221, 50), 'nose.tools.with_setup', 'with_setup', ({(221, 12, 221, 39): 'setup_multivariate_gaussian', (221, 41, 221, 49): 'teardown'}, {}), '(setup_multivariate_gaussian, teardown)', False, 'from nose.tools import with_setup\n'), ((238, 1, 238, 47), 'nose.tools.with_setup', 'with_setup', ({(238, 12, 238, 36): 'setup_multivariate_mixed', (238, 38, 238, 46): 'teardown'}, {}), '(setup_multivariate_mixed, teardown)', False, 'from nose.tools import with_setup\n'), ((255, 1, 255, 50), 'nose.tools.with_setup', 'with_setup', ({(255, 12, 255, 39): 'setup_multivariate_gaussian', (255, 41, 255, 49): 'teardown'}, {}), '(setup_multivariate_gaussian, teardown)', False, 'from nose.tools import with_setup\n'), ((272, 1, 272, 47), 'nose.tools.with_setup', 'with_setup', ({(272, 12, 272, 36): 'setup_multivariate_mixed', (272, 38, 272, 46): 'teardown'}, {}), '(setup_multivariate_mixed, teardown)', False, 'from nose.tools import with_setup\n'), ((289, 1, 289, 50), 'nose.tools.with_setup', 'with_setup', ({(289, 12, 289, 39): 'setup_multivariate_gaussian', (289, 41, 289, 49): 'teardown'}, {}), '(setup_multivariate_gaussian, teardown)', False, 'from nose.tools import with_setup\n'), ((306, 1, 306, 47), 'nose.tools.with_setup', 'with_setup', ({(306, 12, 306, 36): 'setup_multivariate_mixed', (306, 38, 306, 46): 'teardown'}, {}), '(setup_multivariate_mixed, teardown)', False, 'from nose.tools import with_setup\n'), ((323, 1, 323, 50), 'nose.tools.with_setup', 'with_setup', ({(323, 12, 323, 39): 'setup_multivariate_gaussian', (323, 41, 323, 49): 'teardown'}, {}), '(setup_multivariate_gaussian, teardown)', False, 'from nose.tools import with_setup\n'), ((340, 1, 340, 47), 'nose.tools.with_setup', 'with_setup', ({(340, 12, 340, 36): 'setup_multivariate_mixed', (340, 38, 340, 46): 'teardown'}, {}), '(setup_multivariate_mixed, teardown)', False, 'from nose.tools import with_setup\n'), ((357, 1, 357, 50), 'nose.tools.with_setup', 'with_setup', ({(357, 12, 357, 39): 'setup_multivariate_gaussian', (357, 41, 357, 49): 'teardown'}, {}), '(setup_multivariate_gaussian, teardown)', False, 'from nose.tools import with_setup\n'), ((365, 1, 365, 47), 'nose.tools.with_setup', 'with_setup', ({(365, 12, 365, 36): 'setup_multivariate_mixed', (365, 38, 365, 46): 'teardown'}, {}), '(setup_multivariate_mixed, teardown)', False, 'from nose.tools import with_setup\n'), ((373, 1, 373, 50), 'nose.tools.with_setup', 'with_setup', ({(373, 12, 373, 39): 'setup_multivariate_gaussian', (373, 41, 373, 49): 'teardown'}, {}), '(setup_multivariate_gaussian, teardown)', False, 'from nose.tools import with_setup\n'), ((381, 1, 381, 47), 'nose.tools.with_setup', 'with_setup', ({(381, 12, 381, 36): 'setup_multivariate_mixed', (381, 38, 381, 46): 'teardown'}, {}), '(setup_multivariate_mixed, teardown)', False, 'from nose.tools import with_setup\n'), ((389, 1, 389, 50), 'nose.tools.with_setup', 'with_setup', ({(389, 12, 389, 39): 'setup_multivariate_gaussian', (389, 41, 389, 49): 'teardown'}, {}), '(setup_multivariate_gaussian, teardown)', False, 'from nose.tools import with_setup\n'), ((397, 1, 397, 47), 'nose.tools.with_setup', 'with_setup', ({(397, 12, 397, 36): 'setup_multivariate_mixed', (397, 38, 397, 46): 'teardown'}, {}), '(setup_multivariate_mixed, teardown)', False, 'from nose.tools import with_setup\n'), ((405, 1, 405, 50), 'nose.tools.with_setup', 'with_setup', ({(405, 12, 405, 39): 'setup_multivariate_gaussian', (405, 41, 405, 49): 'teardown'}, {}), '(setup_multivariate_gaussian, teardown)', False, 'from nose.tools import with_setup\n'), ((430, 1, 430, 47), 'nose.tools.with_setup', 'with_setup', ({(430, 12, 430, 36): 'setup_multivariate_mixed', (430, 38, 430, 46): 'teardown'}, {}), '(setup_multivariate_mixed, teardown)', False, 'from nose.tools import with_setup\n'), ((452, 1, 452, 50), 'nose.tools.with_setup', 'with_setup', ({(452, 12, 452, 39): 'setup_multivariate_gaussian', (452, 41, 452, 49): 'teardown'}, {}), '(setup_multivariate_gaussian, teardown)', False, 'from nose.tools import with_setup\n'), ((476, 1, 476, 50), 'nose.tools.with_setup', 'with_setup', ({(476, 12, 476, 39): 'setup_multivariate_gaussian', (476, 41, 476, 49): 'teardown'}, {}), '(setup_multivariate_gaussian, teardown)', False, 'from nose.tools import with_setup\n'), ((486, 1, 486, 47), 'nose.tools.with_setup', 'with_setup', ({(486, 12, 486, 36): 'setup_multivariate_mixed', (486, 38, 486, 46): 'teardown'}, {}), '(setup_multivariate_mixed, teardown)', False, 'from nose.tools import with_setup\n'), ((496, 1, 496, 50), 'nose.tools.with_setup', 'with_setup', ({(496, 12, 496, 39): 'setup_multivariate_gaussian', (496, 41, 496, 49): 'teardown'}, {}), '(setup_multivariate_gaussian, teardown)', False, 'from nose.tools import with_setup\n'), ((506, 1, 506, 47), 'nose.tools.with_setup', 'with_setup', ({(506, 12, 506, 36): 'setup_multivariate_mixed', (506, 38, 506, 46): 'teardown'}, {}), '(setup_multivariate_mixed, teardown)', False, 'from nose.tools import with_setup\n'), ((516, 1, 516, 50), 'nose.tools.with_setup', 'with_setup', ({(516, 12, 516, 39): 'setup_multivariate_gaussian', (516, 41, 516, 49): 'teardown'}, {}), '(setup_multivariate_gaussian, teardown)', False, 'from nose.tools import with_setup\n'), ((526, 1, 526, 47), 'nose.tools.with_setup', 'with_setup', ({(526, 12, 526, 36): 'setup_multivariate_mixed', (526, 38, 526, 46): 'teardown'}, {}), '(setup_multivariate_mixed, teardown)', False, 'from nose.tools import with_setup\n'), ((536, 1, 536, 32), 'nose.tools.with_setup', 'with_setup', ({(536, 12, 536, 21): 'setup_hmm', (536, 23, 536, 31): 'teardown'}, {}), '(setup_hmm, teardown)', False, 'from nose.tools import with_setup\n'), ((561, 1, 561, 32), 'nose.tools.with_setup', 'with_setup', ({(561, 12, 561, 21): 'setup_hmm', (561, 23, 561, 31): 'teardown'}, {}), '(setup_hmm, teardown)', False, 'from nose.tools import with_setup\n'), ((586, 1, 586, 32), 'nose.tools.with_setup', 'with_setup', ({(586, 12, 586, 21): 'setup_hmm', (586, 23, 586, 31): 'teardown'}, {}), '(setup_hmm, teardown)', False, 'from nose.tools import with_setup\n'), ((611, 1, 611, 32), 'nose.tools.with_setup', 'with_setup', ({(611, 12, 611, 21): 'setup_hmm', (611, 23, 611, 31): 'teardown'}, {}), '(setup_hmm, teardown)', False, 'from nose.tools import with_setup\n'), ((621, 1, 621, 50), 'nose.tools.with_setup', 'with_setup', ({(621, 12, 621, 39): 'setup_multivariate_gaussian', (621, 41, 621, 49): 'teardown'}, {}), '(setup_multivariate_gaussian, teardown)', False, 'from nose.tools import with_setup\n'), ((633, 1, 633, 50), 'nose.tools.with_setup', 'with_setup', ({(633, 12, 633, 39): 'setup_multivariate_gaussian', (633, 41, 633, 49): 'teardown'}, {}), '(setup_multivariate_gaussian, teardown)', False, 'from nose.tools import with_setup\n'), ((645, 1, 645, 50), 'nose.tools.with_setup', 'with_setup', ({(645, 12, 645, 39): 'setup_multivariate_gaussian', (645, 41, 645, 49): 'teardown'}, {}), '(setup_multivariate_gaussian, teardown)', False, 'from nose.tools import with_setup\n'), ((657, 1, 657, 50), 'nose.tools.with_setup', 'with_setup', ({(657, 12, 657, 39): 'setup_multivariate_gaussian', (657, 41, 657, 49): 'teardown'}, {}), '(setup_multivariate_gaussian, teardown)', False, 'from nose.tools import with_setup\n'), ((142, 1, 142, 25), 'nose.tools.assert_equal', 'assert_equal', ({(142, 14, 142, 21): 'model.d', (142, 23, 142, 24): '(3)'}, {}), '(model.d, 3)', False, 'from nose.tools import assert_equal\n'), ((143, 1, 143, 25), 'nose.tools.assert_equal', 'assert_equal', ({(143, 14, 143, 21): 'model.n', (143, 23, 143, 24): '(2)'}, {}), '(model.n, 2)', False, 'from nose.tools import assert_equal\n'), ((144, 1, 144, 34), 'nose.tools.assert_equal', 'assert_equal', ({(144, 14, 144, 26): 'model.is_vl_', (144, 28, 144, 33): '(False)'}, {}), '(model.is_vl_, False)', False, 'from nose.tools import assert_equal\n'), ((148, 1, 148, 25), 'nose.tools.assert_equal', 'assert_equal', ({(148, 14, 148, 21): 'model.d', (148, 23, 148, 24): '(3)'}, {}), '(model.d, 3)', False, 'from nose.tools import assert_equal\n'), ((149, 1, 149, 25), 'nose.tools.assert_equal', 'assert_equal', ({(149, 14, 149, 21): 'model.n', (149, 23, 149, 24): '(2)'}, {}), '(model.n, 2)', False, 'from nose.tools import assert_equal\n'), ((150, 1, 150, 34), 'nose.tools.assert_equal', 'assert_equal', ({(150, 14, 150, 26): 'model.is_vl_', (150, 28, 150, 33): '(False)'}, {}), '(model.is_vl_, False)', False, 'from nose.tools import assert_equal\n'), ((167, 1, 167, 36), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', ({(167, 27, 167, 28): 'y', (167, 30, 167, 35): 'y_hat'}, {}), '(y, y_hat)', False, 'from numpy.testing import assert_array_almost_equal\n'), ((184, 1, 184, 36), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', ({(184, 27, 184, 28): 'y', (184, 30, 184, 35): 'y_hat'}, {}), '(y, y_hat)', False, 'from numpy.testing import assert_array_almost_equal\n'), ((201, 1, 201, 36), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', ({(201, 27, 201, 28): 'y', (201, 30, 201, 35): 'y_hat'}, {}), '(y, y_hat)', False, 'from numpy.testing import assert_array_almost_equal\n'), ((218, 1, 218, 36), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', ({(218, 27, 218, 28): 'y', (218, 30, 218, 35): 'y_hat'}, {}), '(y, y_hat)', False, 'from numpy.testing import assert_array_almost_equal\n'), ((235, 1, 235, 36), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', ({(235, 27, 235, 28): 'y', (235, 30, 235, 35): 'y_hat'}, {}), '(y, y_hat)', False, 'from numpy.testing import assert_array_almost_equal\n'), ((252, 1, 252, 36), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', ({(252, 27, 252, 28): 'y', (252, 30, 252, 35): 'y_hat'}, {}), '(y, y_hat)', False, 'from numpy.testing import assert_array_almost_equal\n'), ((269, 1, 269, 36), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', ({(269, 27, 269, 28): 'y', (269, 30, 269, 35): 'y_hat'}, {}), '(y, y_hat)', False, 'from numpy.testing import assert_array_almost_equal\n'), ((286, 1, 286, 36), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', ({(286, 27, 286, 28): 'y', (286, 30, 286, 35): 'y_hat'}, {}), '(y, y_hat)', False, 'from numpy.testing import assert_array_almost_equal\n'), ((303, 1, 303, 36), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', ({(303, 27, 303, 28): 'y', (303, 30, 303, 35): 'y_hat'}, {}), '(y, y_hat)', False, 'from numpy.testing import assert_array_almost_equal\n'), ((320, 1, 320, 36), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', ({(320, 27, 320, 28): 'y', (320, 30, 320, 35): 'y_hat'}, {}), '(y, y_hat)', False, 'from numpy.testing import assert_array_almost_equal\n'), ((337, 1, 337, 36), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', ({(337, 27, 337, 28): 'y', (337, 30, 337, 35): 'y_hat'}, {}), '(y, y_hat)', False, 'from numpy.testing import assert_array_almost_equal\n'), ((354, 1, 354, 36), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', ({(354, 27, 354, 28): 'y', (354, 30, 354, 35): 'y_hat'}, {}), '(y, y_hat)', False, 'from numpy.testing import assert_array_almost_equal\n'), ((362, 1, 362, 36), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', ({(362, 27, 362, 28): 'y', (362, 30, 362, 35): 'y_hat'}, {}), '(y, y_hat)', False, 'from numpy.testing import assert_array_almost_equal\n'), ((370, 1, 370, 36), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', ({(370, 27, 370, 28): 'y', (370, 30, 370, 35): 'y_hat'}, {}), '(y, y_hat)', False, 'from numpy.testing import assert_array_almost_equal\n'), ((378, 1, 378, 36), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', ({(378, 27, 378, 28): 'y', (378, 30, 378, 35): 'y_hat'}, {}), '(y, y_hat)', False, 'from numpy.testing import assert_array_almost_equal\n'), ((386, 1, 386, 36), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', ({(386, 27, 386, 28): 'y', (386, 30, 386, 35): 'y_hat'}, {}), '(y, y_hat)', False, 'from numpy.testing import assert_array_almost_equal\n'), ((394, 1, 394, 36), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', ({(394, 27, 394, 28): 'y', (394, 30, 394, 35): 'y_hat'}, {}), '(y, y_hat)', False, 'from numpy.testing import assert_array_almost_equal\n'), ((402, 1, 402, 36), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', ({(402, 27, 402, 28): 'y', (402, 30, 402, 35): 'y_hat'}, {}), '(y, y_hat)', False, 'from numpy.testing import assert_array_almost_equal\n'), ((424, 1, 424, 38), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', ({(424, 27, 424, 30): 'mu1', (424, 32, 424, 37): 'mu1_t'}, {}), '(mu1, mu1_t)', False, 'from numpy.testing import assert_array_almost_equal\n'), ((425, 1, 425, 40), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', ({(425, 27, 425, 31): 'cov1', (425, 33, 425, 39): 'cov1_t'}, {}), '(cov1, cov1_t)', False, 'from numpy.testing import assert_array_almost_equal\n'), ((426, 1, 426, 38), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', ({(426, 27, 426, 30): 'mu2', (426, 32, 426, 37): 'mu2_t'}, {}), '(mu2, mu2_t)', False, 'from numpy.testing import assert_array_almost_equal\n'), ((427, 1, 427, 40), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', ({(427, 27, 427, 31): 'cov2', (427, 33, 427, 39): 'cov2_t'}, {}), '(cov2, cov2_t)', False, 'from numpy.testing import assert_array_almost_equal\n'), ((445, 1, 445, 38), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', ({(445, 27, 445, 30): 'mu1', (445, 32, 445, 37): 'mu1_t'}, {}), '(mu1, mu1_t)', False, 'from numpy.testing import assert_array_almost_equal\n'), ((446, 1, 446, 40), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', ({(446, 27, 446, 31): 'cov1', (446, 33, 446, 39): 'cov1_t'}, {}), '(cov1, cov1_t)', False, 'from numpy.testing import assert_array_almost_equal\n'), ((447, 1, 447, 56), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', ({(447, 27, 447, 41): 'd21.parameters', (447, 43, 447, 55): '[0.34188034]'}, {}), '(d21.parameters, [0.34188034])', False, 'from numpy.testing import assert_array_almost_equal\n'), ((448, 1, 448, 68), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', ({(448, 27, 448, 41): 'd22.parameters', (448, 43, 448, 67): '[1.01294275, 0.22658346]'}, {}), '(d22.parameters, [1.01294275, 0.22658346])', False, 'from numpy.testing import assert_array_almost_equal\n'), ((449, 1, 449, 51), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', ({(449, 27, 449, 41): 'd23.parameters', (449, 43, 449, 50): '[2.625]'}, {}), '(d23.parameters, [2.625])', False, 'from numpy.testing import assert_array_almost_equal\n'), ((470, 1, 470, 38), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', ({(470, 27, 470, 30): 'mu1', (470, 32, 470, 37): 'mu1_t'}, {}), '(mu1, mu1_t)', False, 'from numpy.testing import assert_array_almost_equal\n'), ((471, 1, 471, 40), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', ({(471, 27, 471, 31): 'cov1', (471, 33, 471, 39): 'cov1_t'}, {}), '(cov1, cov1_t)', False, 'from numpy.testing import assert_array_almost_equal\n'), ((472, 1, 472, 38), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', ({(472, 27, 472, 30): 'mu2', (472, 32, 472, 37): 'mu2_t'}, {}), '(mu2, mu2_t)', False, 'from numpy.testing import assert_array_almost_equal\n'), ((473, 1, 473, 40), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', ({(473, 27, 473, 31): 'cov2', (473, 33, 473, 39): 'cov2_t'}, {}), '(cov2, cov2_t)', False, 'from numpy.testing import assert_array_almost_equal\n'), ((483, 1, 483, 57), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', ({(483, 27, 483, 40): 'model.weights', (483, 42, 483, 56): 'model2.weights'}, {}), '(model.weights, model2.weights)', False, 'from numpy.testing import assert_array_almost_equal\n'), ((493, 1, 493, 57), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', ({(493, 27, 493, 40): 'model.weights', (493, 42, 493, 56): 'model2.weights'}, {}), '(model.weights, model2.weights)', False, 'from numpy.testing import assert_array_almost_equal\n'), ((503, 1, 503, 57), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', ({(503, 27, 503, 40): 'model.weights', (503, 42, 503, 56): 'model2.weights'}, {}), '(model.weights, model2.weights)', False, 'from numpy.testing import assert_array_almost_equal\n'), ((513, 1, 513, 57), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', ({(513, 27, 513, 40): 'model.weights', (513, 42, 513, 56): 'model2.weights'}, {}), '(model.weights, model2.weights)', False, 'from numpy.testing import assert_array_almost_equal\n'), ((523, 1, 523, 57), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', ({(523, 27, 523, 40): 'model.weights', (523, 42, 523, 56): 'model2.weights'}, {}), '(model.weights, model2.weights)', False, 'from numpy.testing import assert_array_almost_equal\n'), ((533, 1, 533, 57), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', ({(533, 27, 533, 40): 'model.weights', (533, 42, 533, 56): 'model2.weights'}, {}), '(model.weights, model2.weights)', False, 'from numpy.testing import assert_array_almost_equal\n'), ((558, 1, 558, 25), 'nose.tools.assert_equal', 'assert_equal', ({(558, 14, 558, 21): 'model.d', (558, 23, 558, 24): '(1)'}, {}), '(model.d, 1)', False, 'from nose.tools import assert_equal\n'), ((565, 1, 565, 54), 'nose.tools.assert_almost_equal', 'assert_almost_equal', ({(565, 21, 565, 31): 'logs[0][0]', (565, 33, 565, 53): '(-0.8909729238898652)'}, {}), '(logs[0][0], -0.8909729238898652)', False, 'from nose.tools import assert_almost_equal\n'), ((566, 1, 566, 53), 'nose.tools.assert_almost_equal', 'assert_almost_equal', ({(566, 21, 566, 31): 'logs[0][1]', (566, 33, 566, 52): '(-1.3609765531356006)'}, {}), '(logs[0][1], -1.3609765531356006)', False, 'from nose.tools import assert_almost_equal\n'), ((567, 1, 567, 53), 'nose.tools.assert_almost_equal', 'assert_almost_equal', ({(567, 21, 567, 31): 'logs[0][2]', (567, 33, 567, 52): '(-1.0986122886681096)'}, {}), '(logs[0][2], -1.0986122886681096)', False, 'from nose.tools import assert_almost_equal\n'), ((569, 1, 569, 54), 'nose.tools.assert_almost_equal', 'assert_almost_equal', ({(569, 21, 569, 31): 'logs[1][0]', (569, 33, 569, 53): '(-0.9357055312174429)'}, {}), '(logs[1][0], -0.9357055312174429)', False, 'from nose.tools import assert_almost_equal\n'), ((570, 1, 570, 52), 'nose.tools.assert_almost_equal', 'assert_almost_equal', ({(570, 21, 570, 31): 'logs[1][1]', (570, 33, 570, 51): '(-1.429425687080494)'}, {}), '(logs[1][1], -1.429425687080494)', False, 'from nose.tools import assert_almost_equal\n'), ((571, 1, 571, 53), 'nose.tools.assert_almost_equal', 'assert_almost_equal', ({(571, 21, 571, 31): 'logs[1][2]', (571, 33, 571, 52): '(-0.9990078376167526)'}, {}), '(logs[1][2], -0.9990078376167526)', False, 'from nose.tools import assert_almost_equal\n'), ((573, 1, 573, 53), 'nose.tools.assert_almost_equal', 'assert_almost_equal', ({(573, 21, 573, 31): 'logs[2][0]', (573, 33, 573, 52): '(-3.9007882563128864)'}, {}), '(logs[2][0], -3.9007882563128864)', False, 'from nose.tools import assert_almost_equal\n'), ((574, 1, 574, 54), 'nose.tools.assert_almost_equal', 'assert_almost_equal', ({(574, 21, 574, 31): 'logs[2][1]', (574, 33, 574, 53): '(-0.23562532881626597)'}, {}), '(logs[2][1], -0.23562532881626597)', False, 'from nose.tools import assert_almost_equal\n'), ((575, 1, 575, 53), 'nose.tools.assert_almost_equal', 'assert_almost_equal', ({(575, 21, 575, 31): 'logs[2][2]', (575, 33, 575, 52): '(-1.6623251045711958)'}, {}), '(logs[2][2], -1.6623251045711958)', False, 'from nose.tools import assert_almost_equal\n'), ((577, 1, 577, 53), 'nose.tools.assert_almost_equal', 'assert_almost_equal', ({(577, 21, 577, 31): 'logs[3][0]', (577, 33, 577, 52): '(-3.1703366478831185)'}, {}), '(logs[3][0], -3.1703366478831185)', False, 'from nose.tools import assert_almost_equal\n'), ((578, 1, 578, 54), 'nose.tools.assert_almost_equal', 'assert_almost_equal', ({(578, 21, 578, 31): 'logs[3][1]', (578, 33, 578, 53): '(-0.4926140321126038)'}, {}), '(logs[3][1], -0.4926140321126038)', False, 'from nose.tools import assert_almost_equal\n'), ((579, 1, 579, 52), 'nose.tools.assert_almost_equal', 'assert_almost_equal', ({(579, 21, 579, 31): 'logs[3][2]', (579, 33, 579, 51): '(-1.058478108940049)'}, {}), '(logs[3][2], -1.058478108940049)', False, 'from nose.tools import assert_almost_equal\n'), ((581, 1, 581, 53), 'nose.tools.assert_almost_equal', 'assert_almost_equal', ({(581, 21, 581, 31): 'logs[4][0]', (581, 33, 581, 52): '(-1.3058441172130273)'}, {}), '(logs[4][0], -1.3058441172130273)', False, 'from nose.tools import assert_almost_equal\n'), ((582, 1, 582, 53), 'nose.tools.assert_almost_equal', 'assert_almost_equal', ({(582, 21, 582, 31): 'logs[4][1]', (582, 33, 582, 52): '(-1.4007102236822906)'}, {}), '(logs[4][1], -1.4007102236822906)', False, 'from nose.tools import assert_almost_equal\n'), ((583, 1, 583, 53), 'nose.tools.assert_almost_equal', 'assert_almost_equal', ({(583, 21, 583, 31): 'logs[4][2]', (583, 33, 583, 52): '(-0.7284958836972919)'}, {}), '(logs[4][2], -0.7284958836972919)', False, 'from nose.tools import assert_almost_equal\n'), ((590, 1, 590, 54), 'nose.tools.assert_almost_equal', 'assert_almost_equal', ({(590, 21, 590, 32): 'probs[0][0]', (590, 34, 590, 53): '(0.41025641025641024)'}, {}), '(probs[0][0], 0.41025641025641024)', False, 'from nose.tools import assert_almost_equal\n'), ((591, 1, 591, 54), 'nose.tools.assert_almost_equal', 'assert_almost_equal', ({(591, 21, 591, 32): 'probs[0][1]', (591, 34, 591, 53): '(0.2564102564102564)'}, {}), '(probs[0][1], 0.2564102564102564)', False, 'from nose.tools import assert_almost_equal\n'), ((592, 1, 592, 54), 'nose.tools.assert_almost_equal', 'assert_almost_equal', ({(592, 21, 592, 32): 'probs[0][2]', (592, 34, 592, 53): '(0.3333333333333333)'}, {}), '(probs[0][2], 0.3333333333333333)', False, 'from nose.tools import assert_almost_equal\n'), ((594, 1, 594, 54), 'nose.tools.assert_almost_equal', 'assert_almost_equal', ({(594, 21, 594, 32): 'probs[1][0]', (594, 34, 594, 53): '(0.392308981634461)'}, {}), '(probs[1][0], 0.392308981634461)', False, 'from nose.tools import assert_almost_equal\n'), ((595, 1, 595, 54), 'nose.tools.assert_almost_equal', 'assert_almost_equal', ({(595, 21, 595, 32): 'probs[1][1]', (595, 34, 595, 53): '(0.23944639992337707)'}, {}), '(probs[1][1], 0.23944639992337707)', False, 'from nose.tools import assert_almost_equal\n'), ((596, 1, 596, 54), 'nose.tools.assert_almost_equal', 'assert_almost_equal', ({(596, 21, 596, 32): 'probs[1][2]', (596, 34, 596, 53): '(0.36824461844216183)'}, {}), '(probs[1][2], 0.36824461844216183)', False, 'from nose.tools import assert_almost_equal\n'), ((598, 1, 598, 55), 'nose.tools.assert_almost_equal', 'assert_almost_equal', ({(598, 21, 598, 32): 'probs[2][0]', (598, 34, 598, 54): '(0.020225961918306088)'}, {}), '(probs[2][0], 0.020225961918306088)', False, 'from nose.tools import assert_almost_equal\n'), ((599, 1, 599, 54), 'nose.tools.assert_almost_equal', 'assert_almost_equal', ({(599, 21, 599, 32): 'probs[2][1]', (599, 34, 599, 53): '(0.790076637433831)'}, {}), '(probs[2][1], 0.790076637433831)', False, 'from nose.tools import assert_almost_equal\n'), ((600, 1, 600, 54), 'nose.tools.assert_almost_equal', 'assert_almost_equal', ({(600, 21, 600, 32): 'probs[2][2]', (600, 34, 600, 53): '(0.18969740064786292)'}, {}), '(probs[2][2], 0.18969740064786292)', False, 'from nose.tools import assert_almost_equal\n'), ((602, 1, 602, 55), 'nose.tools.assert_almost_equal', 'assert_almost_equal', ({(602, 21, 602, 32): 'probs[3][0]', (602, 34, 602, 54): '(0.04198945986103252)'}, {}), '(probs[3][0], 0.04198945986103252)', False, 'from nose.tools import assert_almost_equal\n'), ((603, 1, 603, 54), 'nose.tools.assert_almost_equal', 'assert_almost_equal', ({(603, 21, 603, 32): 'probs[3][1]', (603, 34, 603, 53): '(0.6110270603826564)'}, {}), '(probs[3][1], 0.6110270603826564)', False, 'from nose.tools import assert_almost_equal\n'), ((604, 1, 604, 52), 'nose.tools.assert_almost_equal', 'assert_almost_equal', ({(604, 21, 604, 32): 'probs[3][2]', (604, 34, 604, 51): '(0.346983479756311)'}, {}), '(probs[3][2], 0.346983479756311)', False, 'from nose.tools import assert_almost_equal\n'), ((606, 1, 606, 54), 'nose.tools.assert_almost_equal', 'assert_almost_equal', ({(606, 21, 606, 32): 'probs[4][0]', (606, 34, 606, 53): '(0.27094373022369794)'}, {}), '(probs[4][0], 0.27094373022369794)', False, 'from nose.tools import assert_almost_equal\n'), ((607, 1, 607, 54), 'nose.tools.assert_almost_equal', 'assert_almost_equal', ({(607, 21, 607, 32): 'probs[4][1]', (607, 34, 607, 53): '(0.24642188711704707)'}, {}), '(probs[4][1], 0.24642188711704707)', False, 'from nose.tools import assert_almost_equal\n'), ((608, 1, 608, 54), 'nose.tools.assert_almost_equal', 'assert_almost_equal', ({(608, 21, 608, 32): 'probs[4][2]', (608, 34, 608, 53): '(0.4826343826592551)'}, {}), '(probs[4][2], 0.4826343826592551)', False, 'from nose.tools import assert_almost_equal\n'), ((615, 1, 615, 29), 'nose.tools.assert_equal', 'assert_equal', ({(615, 14, 615, 25): 'predicts[0]', (615, 27, 615, 28): '(0)'}, {}), '(predicts[0], 0)', False, 'from nose.tools import assert_equal\n'), ((616, 1, 616, 29), 'nose.tools.assert_equal', 'assert_equal', ({(616, 14, 616, 25): 'predicts[1]', (616, 27, 616, 28): '(0)'}, {}), '(predicts[1], 0)', False, 'from nose.tools import assert_equal\n'), ((617, 1, 617, 29), 'nose.tools.assert_equal', 'assert_equal', ({(617, 14, 617, 25): 'predicts[2]', (617, 27, 617, 28): '(1)'}, {}), '(predicts[2], 1)', False, 'from nose.tools import assert_equal\n'), ((618, 1, 618, 29), 'nose.tools.assert_equal', 'assert_equal', ({(618, 14, 618, 25): 'predicts[3]', (618, 27, 618, 28): '(1)'}, {}), '(predicts[3], 1)', False, 'from nose.tools import assert_equal\n'), ((619, 1, 619, 29), 'nose.tools.assert_equal', 'assert_equal', ({(619, 14, 619, 25): 'predicts[4]', (619, 27, 619, 28): '(2)'}, {}), '(predicts[4], 2)', False, 'from nose.tools import assert_equal\n'), ((623, 6, 623, 22), 'pomegranate.io.DataGenerator', 'DataGenerator', ({(623, 20, 623, 21): 'X'}, {}), '(X)', False, 'from pomegranate.io import DataGenerator\n'), ((630, 1, 630, 40), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', ({(630, 27, 630, 32): 'logp1', (630, 34, 630, 39): 'logp2'}, {}), '(logp1, logp2)', False, 'from numpy.testing import assert_array_almost_equal\n'), ((631, 1, 631, 40), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', ({(631, 27, 631, 32): 'logp1', (631, 34, 631, 39): 'logp3'}, {}), '(logp1, logp3)', False, 'from numpy.testing import assert_array_almost_equal\n'), ((635, 6, 635, 22), 'pomegranate.io.DataGenerator', 'DataGenerator', ({(635, 20, 635, 21): 'X'}, {}), '(X)', False, 'from pomegranate.io import DataGenerator\n'), ((642, 1, 642, 42), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', ({(642, 27, 642, 33): 'y_hat1', (642, 35, 642, 41): 'y_hat2'}, {}), '(y_hat1, y_hat2)', False, 'from numpy.testing import assert_array_almost_equal\n'), ((643, 1, 643, 42), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', ({(643, 27, 643, 33): 'y_hat1', (643, 35, 643, 41): 'y_hat3'}, {}), '(y_hat1, y_hat3)', False, 'from numpy.testing import assert_array_almost_equal\n'), ((647, 6, 647, 22), 'pomegranate.io.DataGenerator', 'DataGenerator', ({(647, 20, 647, 21): 'X'}, {}), '(X)', False, 'from pomegranate.io import DataGenerator\n'), ((654, 1, 654, 42), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', ({(654, 27, 654, 33): 'y_hat1', (654, 35, 654, 41): 'y_hat2'}, {}), '(y_hat1, y_hat2)', False, 'from numpy.testing import assert_array_almost_equal\n'), ((655, 1, 655, 42), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', ({(655, 27, 655, 33): 'y_hat1', (655, 35, 655, 41): 'y_hat3'}, {}), '(y_hat1, y_hat3)', False, 'from numpy.testing import assert_array_almost_equal\n'), ((659, 6, 659, 22), 'pomegranate.io.DataGenerator', 'DataGenerator', ({(659, 20, 659, 21): 'X'}, {}), '(X)', False, 'from pomegranate.io import DataGenerator\n'), ((666, 1, 666, 42), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', ({(666, 27, 666, 33): 'y_hat1', (666, 35, 666, 41): 'y_hat2'}, {}), '(y_hat1, y_hat2)', False, 'from numpy.testing import assert_array_almost_equal\n'), ((667, 1, 667, 42), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', ({(667, 27, 667, 33): 'y_hat1', (667, 35, 667, 41): 'y_hat3'}, {}), '(y_hat1, y_hat3)', False, 'from numpy.testing import assert_array_almost_equal\n'), ((673, 18, 673, 46), 'pomegranate.io.DataGenerator', 'DataGenerator', ({(673, 32, 673, 33): 'X', (673, 35, 673, 42): 'weights', (673, 44, 673, 45): 'y'}, {}), '(X, weights, y)', False, 'from pomegranate.io import DataGenerator\n'), ((692, 1, 692, 40), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', ({(692, 27, 692, 32): 'logp1', (692, 34, 692, 39): 'logp2'}, {}), '(logp1, logp2)', False, 'from numpy.testing import assert_array_almost_equal\n'), ((698, 18, 698, 46), 'pomegranate.io.DataGenerator', 'DataGenerator', ({(698, 32, 698, 33): 'X', (698, 35, 698, 42): 'weights', (698, 44, 698, 45): 'y'}, {}), '(X, weights, y)', False, 'from pomegranate.io import DataGenerator\n'), ((708, 1, 708, 40), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', ({(708, 27, 708, 32): 'logp1', (708, 34, 708, 39): 'logp2'}, {}), '(logp1, logp2)', False, 'from numpy.testing import assert_array_almost_equal\n'), ((478, 23, 478, 42), 'pickle.dumps', 'pickle.dumps', ({(478, 36, 478, 41): 'model'}, {}), '(model)', False, 'import pickle\n'), ((488, 23, 488, 42), 'pickle.dumps', 'pickle.dumps', ({(488, 36, 488, 41): 'model'}, {}), '(model)', False, 'import pickle\n'), ((624, 25, 624, 44), 'pandas.DataFrame', 'pandas.DataFrame', ({(624, 42, 624, 43): 'X'}, {}), '(X)', False, 'import pandas\n'), ((636, 25, 636, 44), 'pandas.DataFrame', 'pandas.DataFrame', ({(636, 42, 636, 43): 'X'}, {}), '(X)', False, 'import pandas\n'), ((648, 25, 648, 44), 'pandas.DataFrame', 'pandas.DataFrame', ({(648, 42, 648, 43): 'X'}, {}), '(X)', False, 'import pandas\n'), ((660, 25, 660, 44), 'pandas.DataFrame', 'pandas.DataFrame', ({(660, 42, 660, 43): 'X'}, {}), '(X)', False, 'import pandas\n')] |
FilippoRanza/ks.py | ks_engine/variable_scoring.py | 47d909fb70fec50f8d3174855bf5d0c05527bf03 | #! /usr/bin/python
from .solution import Solution
try:
import gurobipy
except ImportError:
print("Gurobi not found: error ignored to allow tests")
def variable_score_factory(sol: Solution, base_kernel: dict, config: dict):
if config.get("VARIABLE_RANKING"):
output = VariableRanking(sol, base_kernel)
else:
output = ReducedCostScoring(sol, base_kernel)
return output
class AbstactVariableScoring:
def __init__(self, solution: Solution, base_kernel: dict):
self.score = {k: 0 if base_kernel[k] else v for k, v in solution.vars.items()}
def get_value(self, var_name):
return self.score[var_name]
def success_update_score(self, curr_kernel, curr_bucket):
raise NotImplementedError
def failure_update_score(self, curr_kernel, curr_bucket):
raise NotImplementedError
class ReducedCostScoring(AbstactVariableScoring):
def success_update_score(self, curr_kernel, curr_bucket):
pass
def failure_update_score(self, curr_kernel, curr_bucket):
pass
class VariableRanking(AbstactVariableScoring):
def cb_update_score(self, name, value):
if value == 0:
self.score[name] += 0.1
else:
self.score[name] -= 0.1
def success_update_score(self, curr_kernel, curr_bucket):
for var in curr_bucket:
if curr_kernel[var]:
self.score[var] -= 15
else:
self.score[var] += 15
def failure_update_score(self, curr_kernel, curr_bucket):
for var in curr_bucket:
if curr_kernel[var]:
self.score[var] += 1
else:
self.score[var] -= 1
def callback_factory(scoring: AbstactVariableScoring):
if isinstance(scoring, VariableRanking):
output = __build_callback__(scoring)
else:
output = None
return output
def __build_callback__(scoring):
def callback(model, where):
if where == gurobipy.GRB.Callback.MIPSOL:
for var in model.getVars():
value = model.cbGetSolution(var)
scoring.cb_update_score(var.varName, value)
return callback
| [] |
quepop/fetchcode | src/fetchcode/vcs/pip/_internal/utils/entrypoints.py | ac2461bdf7a249d8815987b4d421dbc615c043b9 | import sys
from fetchcode.vcs.pip._internal.cli.main import main
from fetchcode.vcs.pip._internal.utils.typing import MYPY_CHECK_RUNNING
if MYPY_CHECK_RUNNING:
from typing import Optional, List
def _wrapper(args=None):
# type: (Optional[List[str]]) -> int
"""Central wrapper for all old entrypoints.
Historically pip has had several entrypoints defined. Because of issues
arising from PATH, sys.path, multiple Pythons, their interactions, and most
of them having a pip installed, users suffer every time an entrypoint gets
moved.
To alleviate this pain, and provide a mechanism for warning users and
directing them to an appropriate place for help, we now define all of
our old entrypoints as wrappers for the current one.
"""
sys.stderr.write(
"WARNING: pip is being invoked by an old script wrapper. This will "
"fail in a future version of pip.\n"
"Please see https://github.com/pypa/pip/issues/5599 for advice on "
"fixing the underlying issue.\n"
"To avoid this problem you can invoke Python with '-m pip' instead of "
"running pip directly.\n"
)
return main(args)
| [((23, 4, 30, 5), 'sys.stderr.write', 'sys.stderr.write', ({(24, 8, 29, 33): '"""WARNING: pip is being invoked by an old script wrapper. This will fail in a future version of pip.\nPlease see https://github.com/pypa/pip/issues/5599 for advice on fixing the underlying issue.\nTo avoid this problem you can invoke Python with \'-m pip\' instead of running pip directly.\n"""'}, {}), '(\n """WARNING: pip is being invoked by an old script wrapper. This will fail in a future version of pip.\nPlease see https://github.com/pypa/pip/issues/5599 for advice on fixing the underlying issue.\nTo avoid this problem you can invoke Python with \'-m pip\' instead of running pip directly.\n"""\n )', False, 'import sys\n'), ((31, 11, 31, 21), 'fetchcode.vcs.pip._internal.cli.main.main', 'main', ({(31, 16, 31, 20): 'args'}, {}), '(args)', False, 'from fetchcode.vcs.pip._internal.cli.main import main\n')] |
bvbohnen/x4-projects | Support/Make_Documentation.py | 2c9db75a720ddb52ddb9e4160c330d7bb1986aa3 | '''
Support for generating documentation readmes for the extensions.
Extracts from decorated lua block comments and xml comments.
'''
from pathlib import Path
from lxml import etree
import sys
from itertools import chain
project_dir = Path(__file__).resolve().parents[1]
# Set up an import from the customizer for some text processing.
x4_customizer_dir = str(project_dir.parent / 'X4_Customizer')
if x4_customizer_dir not in sys.path:
sys.path.append(x4_customizer_dir)
from Framework.Make_Documentation import Merge_Lines
#from Framework.Make_Documentation import Get_BB_Text
# Grab the project specifications.
from Release_Specs import release_specs
def Make():
for spec in release_specs:
# Update all of the content.xml files.
spec.Update_Content_Version()
# Make each of the doc files (if any).
# (Note: this function not included in the class methods to avoid
# import issues with the text helper functions below.)
for rel_path, file_list in spec.doc_specs.items():
# Set up the full path.
doc_path = spec.root_path / rel_path
# Get lines for all files.
doc_lines = []
for file_path in file_list:
if file_path.suffix == '.xml':
doc_lines += Get_XML_Cue_Text(file_path)
elif file_path.suffix == '.lua':
doc_lines += Get_Lua_Text(file_path)
with open(doc_path, 'w') as file:
file.write('\n'.join(doc_lines))
return
def Sections_To_Lines(doc_text_sections):
'''
Converts a dict of {section label: text} to a list of text lines,
with labelling and formatting applied.
Expects the input to start with a 'title', then 'overview', then
a series of names of cues or functions.
'''
# Transfer to annotated/indented lines.
functions_started = False
title = ''
ret_text_lines = []
for key, text in doc_text_sections:
# Extract the title and continue; this isn't printed directly.
if key == 'title':
title = text.strip()
continue
# Header gets an 'overview' label.
if key == 'overview':
ret_text_lines += ['', '### {} Overview'.format(title), '']
indent = ''
# Lua functions are in one lump, like overview.
elif key == 'functions':
ret_text_lines += ['', '### {} Functions'.format(title), '']
indent = ''
# Sections may be multiple.
elif key == 'section':
ret_text_lines += ['','']
indent = ''
# Otherwise these are md cues.
else:
indent = ' '
# Stick a label line when starting the function section.
if not functions_started:
functions_started = True
ret_text_lines += ['', '### {} Cues'.format(title), '']
# Bullet the function name.
ret_text_lines.append('* **{}**'.format(key))
# Process the text a bit.
text = Merge_Lines(text)
# Add indents to functions, and break into convenient lines.
text_lines = [indent + line for line in text.splitlines()]
# Record for output.
ret_text_lines += text_lines
return ret_text_lines
def Get_XML_Cue_Text(xml_path):
'''
Returns a list of lines holding the documentation extracted
from a decorated MD xml file.
'''
# List of tuples of (label, text) hold the extracted text lines.
doc_text_sections = []
# Read the xml and pick out the cues.
tree = etree.parse(str(xml_path))
root = tree.xpath('/*')[0]
cues = tree.xpath('/*/cues')[0]
# Stride through comments/cues in the list.
# Looking for decorated comments.
for node in chain(root.iterchildren(), cues.iterchildren()):
# Skip non-comments.
# Kinda awkward how lxml checks this (isinstance doesn't work).
if node.tag is not etree.Comment:
continue
# Handle title declarations.
if '@doc-title' in node.text:
label = 'title'
text = node.text.replace('@doc-title','')
elif '@doc-overview' in node.text:
label = 'overview'
text = node.text.replace('@doc-overview','')
elif '@doc-section' in node.text:
label = 'section'
text = node.text.replace('@doc-section','')
elif '@doc-cue' in node.text:
label = node.getnext().get('name')
text = node.text.replace('@doc-cue','')
else:
# Unwanted comment; skip.
continue
# Record it.
doc_text_sections.append((label, text))
# Process into lines and return.
return Sections_To_Lines(doc_text_sections)
def Get_Lua_Text(lua_path):
'''
Extract documentation text from a decorated lua file.
'''
text = lua_path.read_text()
ret_text_lines = []
# Extract non-indented comments.
# TODO: maybe regex this.
comment_blocks = []
lua_lines = text.splitlines()
i = 0
while i < len(lua_lines):
this_line = lua_lines[i]
if this_line.startswith('--[['):
# Scan until the closing ]].
these_lines = []
# Record the first line.
these_lines.append(this_line.replace('--[[',''))
i += 1
# Only search to the end of the doc.
while i < len(lua_lines):
next_line = lua_lines[i]
if next_line.startswith(']]'):
# Found the last line; skip it.
break
these_lines.append(next_line)
i += 1
comment_blocks.append('\n'.join(these_lines))
# Check single-line comments after block comments, to avoid
# -- confusion.
elif this_line.startswith('--'):
comment_blocks.append(this_line.replace('--',''))
# Always one increment per loop.
i += 1
# Title to put on label lines.
# Starts blank, filled by decorator.
title = ''
# List of tuples of (label, text) hold the extracted text lines.
doc_text_sections = []
# Go through the comments looking for decorators.
for comment in comment_blocks:
# Handle title declarations.
if '@doc-title' in comment:
label = 'title'
text = comment.replace('@doc-title','')
# Text blocks are either overview or cue.
elif '@doc-overview' in comment:
label = 'overview'
text = comment.replace('@doc-overview','')
# For now, all functions are lumped together in one comment.
elif '@doc-functions' in comment:
label = 'functions'
text = comment.replace('@doc-functions','')
else:
# Unwanted comment; skip.
continue
# Record it.
doc_text_sections.append((label, text))
# Process into lines and return.
return Sections_To_Lines(doc_text_sections)
#-Removed; generally avoiding putting main docs on the forum.
#def Make_BB_Code(doc_dir, header_lines = []):
# '''
# Turn the ext_dir's readme into a bbcode txt file.
# Output is placed in the release folder.
# '''
# release_dir = project_dir / 'Release'
# if not release_dir.exists():
# release_dir.mkdir()
#
# # Grab the readme contents.
# doc_lines = (doc_dir / 'Readme.md').read_text().splitlines()
# # Generate a bbcode version, prefixing with custom header.
# bb_lines = header_lines + Get_BB_Text(doc_lines)
# (release_dir / (doc_dir.name + '_bb_readme.txt')).write_text('\n'.join(bb_lines))
# return
if __name__ == '__main__':
Make()
| [((16, 4, 16, 38), 'sys.path.append', 'sys.path.append', ({(16, 20, 16, 37): 'x4_customizer_dir'}, {}), '(x4_customizer_dir)', False, 'import sys\n'), ((94, 15, 94, 32), 'Framework.Make_Documentation.Merge_Lines', 'Merge_Lines', ({(94, 27, 94, 31): 'text'}, {}), '(text)', False, 'from Framework.Make_Documentation import Merge_Lines\n'), ((11, 14, 11, 28), 'pathlib.Path', 'Path', ({(11, 19, 11, 27): '__file__'}, {}), '(__file__)', False, 'from pathlib import Path\n')] |
alex-dsouza777/Python-Basics | Chapter 2 - Variables & Data Types/05_pr_set_add_two_no.py | 8f1c406f2319cd65b5d54dfea990d09fa69d9adf | #Addition of two numbers
a = 30
b = 17
print("Sum of a and b is",a + b) | [] |
andersonssh/aprendendo-pyqt5 | curso 1/04 - caixa de texto/a4.py | d15ad7378d4573410c11fc39042df19048c656e4 | import sys
from PyQt5.QtWidgets import (QApplication,
QMainWindow,
QPushButton,
QToolTip,
QLabel,
QLineEdit)
from PyQt5 import QtGui
class Janela(QMainWindow):
def __init__(self):
super().__init__()
self.topo = 50
self.esquerda = 50
self.largura = 800
self.altura = 600
self.titulo = 'Primeira janela'
self.gera_labels()
self.gera_botoes()
self.gera_imagens()
self.gera_caixas_de_texto()
def carregar_janela(self):
self.setGeometry(self.esquerda, self.topo, self.largura, self.altura)
self.setWindowTitle(self.titulo)
self.show()
def gera_botoes(self):
# botoes
botao1 = QPushButton('Botao 1', self)
botao1.move(100, 100)
botao1.resize(100, 50)
botao1.setStyleSheet(
'QPushButton{background-color: white; color: black;} QPushButton:hover{ background: orange; font-weight: 600;}')
botao1.clicked.connect(self.b1)
botao2 = QPushButton('Botao 2', self)
botao2.move(300, 100)
botao2.resize(100, 50)
botao2.setStyleSheet(
'QPushButton{background-color: blue; color: white;} QPushButton:hover{ background: orange; font-weight: 600}')
botao2.clicked.connect(self.b2)
botao3 = QPushButton('Texto', self)
botao3.move(500, 100)
botao3.resize(100, 50)
botao3.setStyleSheet('QPushButton{background-color: black; color: white;} QPushButton:hover{ background: orange; font-weight: 600}')
botao3.clicked.connect(self.b3)
def gera_labels(self):
self.l1 = QLabel(self)
self.l1.setText('Clique em um botao')
self.l1.move(50, 50)
self.l1.setStyleSheet('QLabel{font: bold; font-size: 20px;}')
self.l1.resize(250, 50)
self.l2 = QLabel(self)
self.l2.setText('Digitou: ')
self.l2.move(300, 30)
self.l2.resize(260, 50)
self.l2.setStyleSheet('QLabel{font: bold; font-size: 30px;}')
def gera_imagens(self):
self.carro = QLabel(self)
self.carro.move(25, 200)
self.carro.resize(450, 337)
self.carro.setPixmap(QtGui.QPixmap('carro.jpg'))
def gera_caixas_de_texto(self):
self.caixa_texto = QLineEdit(self)
self.caixa_texto.move(25, 10)
self.caixa_texto.resize(150, 50)
def b1(self):
# forma 1
self.carro.setPixmap(QtGui.QPixmap('carro.jpg'))
def b2(self, l):
# forma 2
self.carro.setPixmap(QtGui.QPixmap('carro2.jpg'))
def b3(self):
conteudo = self.caixa_texto.text()
self.l2.setText('Digitou: {}'.format(conteudo))
if __name__ == '__main__':
app = QApplication(sys.argv)
janela = Janela()
janela.carregar_janela()
sys.exit(app.exec_()) | [((91, 10, 91, 32), 'PyQt5.QtWidgets.QApplication', 'QApplication', ({(91, 23, 91, 31): 'sys.argv'}, {}), '(sys.argv)', False, 'from PyQt5.QtWidgets import QApplication, QMainWindow, QPushButton, QToolTip, QLabel, QLineEdit\n'), ((32, 17, 32, 45), 'PyQt5.QtWidgets.QPushButton', 'QPushButton', ({(32, 29, 32, 38): '"""Botao 1"""', (32, 40, 32, 44): 'self'}, {}), "('Botao 1', self)", False, 'from PyQt5.QtWidgets import QApplication, QMainWindow, QPushButton, QToolTip, QLabel, QLineEdit\n'), ((39, 17, 39, 45), 'PyQt5.QtWidgets.QPushButton', 'QPushButton', ({(39, 29, 39, 38): '"""Botao 2"""', (39, 40, 39, 44): 'self'}, {}), "('Botao 2', self)", False, 'from PyQt5.QtWidgets import QApplication, QMainWindow, QPushButton, QToolTip, QLabel, QLineEdit\n'), ((46, 17, 46, 43), 'PyQt5.QtWidgets.QPushButton', 'QPushButton', ({(46, 29, 46, 36): '"""Texto"""', (46, 38, 46, 42): 'self'}, {}), "('Texto', self)", False, 'from PyQt5.QtWidgets import QApplication, QMainWindow, QPushButton, QToolTip, QLabel, QLineEdit\n'), ((53, 18, 53, 30), 'PyQt5.QtWidgets.QLabel', 'QLabel', ({(53, 25, 53, 29): 'self'}, {}), '(self)', False, 'from PyQt5.QtWidgets import QApplication, QMainWindow, QPushButton, QToolTip, QLabel, QLineEdit\n'), ((59, 18, 59, 30), 'PyQt5.QtWidgets.QLabel', 'QLabel', ({(59, 25, 59, 29): 'self'}, {}), '(self)', False, 'from PyQt5.QtWidgets import QApplication, QMainWindow, QPushButton, QToolTip, QLabel, QLineEdit\n'), ((66, 21, 66, 33), 'PyQt5.QtWidgets.QLabel', 'QLabel', ({(66, 28, 66, 32): 'self'}, {}), '(self)', False, 'from PyQt5.QtWidgets import QApplication, QMainWindow, QPushButton, QToolTip, QLabel, QLineEdit\n'), ((72, 27, 72, 42), 'PyQt5.QtWidgets.QLineEdit', 'QLineEdit', ({(72, 37, 72, 41): 'self'}, {}), '(self)', False, 'from PyQt5.QtWidgets import QApplication, QMainWindow, QPushButton, QToolTip, QLabel, QLineEdit\n'), ((69, 29, 69, 55), 'PyQt5.QtGui.QPixmap', 'QtGui.QPixmap', ({(69, 43, 69, 54): '"""carro.jpg"""'}, {}), "('carro.jpg')", False, 'from PyQt5 import QtGui\n'), ((79, 29, 79, 55), 'PyQt5.QtGui.QPixmap', 'QtGui.QPixmap', ({(79, 43, 79, 54): '"""carro.jpg"""'}, {}), "('carro.jpg')", False, 'from PyQt5 import QtGui\n'), ((84, 29, 84, 56), 'PyQt5.QtGui.QPixmap', 'QtGui.QPixmap', ({(84, 43, 84, 55): '"""carro2.jpg"""'}, {}), "('carro2.jpg')", False, 'from PyQt5 import QtGui\n')] |
codeunik/stylus_labs_write_pdf_importer | pdf2write.py | 25d7aa037647a86284c24527bda7b222cf95bb62 | import base64
import os
import sys
import PyPDF2
svg = '''<svg id="write-document" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink">
<rect id="write-doc-background" width="100%" height="100%" fill="#808080"/>
<defs id="write-defs">
<script type="text/writeconfig">
<int name="docFormatVersion" value="2" />
<int name="pageColor" value="-1" />
<int name="pageNum" value="0" />
<int name="ruleColor" value="0" />
<float name="marginLeft" value="0" />
<float name="xOffset" value="-380.701752" />
<float name="xRuling" value="0" />
<float name="yOffset" value="1536.84216" />
<float name="yRuling" value="0" />
</script>
</defs>
'''
pdf_path = sys.argv[1]
pdf = PyPDF2.PdfFileReader(pdf_path, "rb")
img_width = 720
n_pages = pdf.getNumPages() + 1
page = pdf.getPage(0)
width = page.mediaBox.getWidth()
height = page.mediaBox.getHeight()
aspect_ratio = height/width
img_height = int(aspect_ratio * img_width)
os.system('mkdir -p /tmp/pdf2write')
new_page_height = 0
for page in range(n_pages):
print(f"Processing {page}/{n_pages}", end='\r')
os.system(f'pdftoppm {pdf_path} /tmp/pdf2write/tmp{page} -png -f {page} -singlefile')
with open(f'/tmp/pdf2write/tmp{page}.png', 'rb') as f:
base64_data = base64.b64encode(f.read()).decode('utf-8')
tmp_svg = f'''<svg class="write-page" color-interpolation="linearRGB" x="10" y="{new_page_height+10}" width="{img_width}px" height="{img_height}px" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink">
<g class="write-content write-v3" width="{img_width}" height="{img_height}" xruling="0" yruling="0" marginLeft="0" papercolor="#FFFFFF" rulecolor="#00000000">
<g class="ruleline write-std-ruling write-scale-down" fill="none" stroke="none" stroke-width="1" shape-rendering="crispEdges" vector-effect="non-scaling-stroke">
<rect class="pagerect" fill="#FFFFFF" stroke="none" x="0" y="0" width="{img_width}" height="{img_height}" />
</g>
<image x="0" y="0" width="{img_width}" height="{img_height}" xlink:href="data:image/png;base64,{base64_data}"/>
</g>
</svg>'''
new_page_height += (img_height+10)
svg += tmp_svg
svg += '''</svg>'''
os.system('rm -rf /tmp/pdf2write')
with open(f'{os.path.dirname(pdf_path)}/{os.path.basename(pdf_path).split(".")[0]}.svg', 'w') as f:
f.write(svg)
os.system(f'gzip -S z {os.path.dirname(pdf_path)}/{os.path.basename(pdf_path).split(".")[0]}.svg')
| [((24, 6, 24, 42), 'PyPDF2.PdfFileReader', 'PyPDF2.PdfFileReader', ({(24, 27, 24, 35): 'pdf_path', (24, 37, 24, 41): '"""rb"""'}, {}), "(pdf_path, 'rb')", False, 'import PyPDF2\n'), ((34, 0, 34, 36), 'os.system', 'os.system', ({(34, 10, 34, 35): '"""mkdir -p /tmp/pdf2write"""'}, {}), "('mkdir -p /tmp/pdf2write')", False, 'import os\n'), ((59, 0, 59, 34), 'os.system', 'os.system', ({(59, 10, 59, 33): '"""rm -rf /tmp/pdf2write"""'}, {}), "('rm -rf /tmp/pdf2write')", False, 'import os\n'), ((42, 4, 42, 89), 'os.system', 'os.system', ({(42, 14, 42, 88): 'f"""pdftoppm {pdf_path} /tmp/pdf2write/tmp{page} -png -f {page} -singlefile"""'}, {}), "(\n f'pdftoppm {pdf_path} /tmp/pdf2write/tmp{page} -png -f {page} -singlefile')", False, 'import os\n'), ((64, 23, 64, 48), 'os.path.dirname', 'os.path.dirname', ({(64, 39, 64, 47): 'pdf_path'}, {}), '(pdf_path)', False, 'import os\n'), ((61, 13, 61, 38), 'os.path.dirname', 'os.path.dirname', ({(61, 29, 61, 37): 'pdf_path'}, {}), '(pdf_path)', False, 'import os\n'), ((64, 51, 64, 77), 'os.path.basename', 'os.path.basename', ({(64, 68, 64, 76): 'pdf_path'}, {}), '(pdf_path)', False, 'import os\n'), ((61, 41, 61, 67), 'os.path.basename', 'os.path.basename', ({(61, 58, 61, 66): 'pdf_path'}, {}), '(pdf_path)', False, 'import os\n')] |
hq9000/py-headless-daw | py_headless_daw/project/having_parameters.py | 33e08727c25d3f00b2556adf5f25c9f7ff4d4304 | from typing import Dict, List, cast
from py_headless_daw.project.parameter import Parameter, ParameterValueType, ParameterRangeType
class HavingParameters:
def __init__(self):
self._parameters: Dict[str, Parameter] = {}
super().__init__()
def has_parameter(self, name: str) -> bool:
return name in self._parameters
def add_parameter(self,
name: str,
value: ParameterValueType,
param_type: str,
value_range: ParameterRangeType):
if name in self._parameters:
raise Exception('parameter named ' + name + ' already added to this object')
parameter = Parameter(name, value, param_type, value_range)
self._parameters[name] = parameter
def add_parameter_object(self, parameter: Parameter) -> None:
self._parameters[parameter.name] = parameter
def get_parameter(self, name: str) -> Parameter:
for parameter in self.parameters:
if parameter.name == name:
return parameter
list_of_names: List[str] = [p.name for p in self.parameters]
# noinspection PyTypeChecker
available_names: List[str] = cast(List[str], list_of_names)
raise Exception('parameter named ' + name + ' not found. Available: ' + ', '.join(available_names))
def get_parameter_value(self, name: str) -> ParameterValueType:
param = self.get_parameter(name)
return param.value
def get_float_parameter_value(self, name: str) -> float:
param = self.get_parameter(name)
if param.type != Parameter.TYPE_FLOAT:
raise ValueError(f"parameter {name} was expected to be float (error: f009d0ef)")
value = self.get_parameter_value(name)
cast_value = cast(float, value)
return cast_value
def get_enum_parameter_value(self, name: str) -> str:
param = self.get_parameter(name)
if param.type != Parameter.TYPE_ENUM:
raise ValueError(f"parameter {name} was expected to be enum (error: 80a1d180)")
value = self.get_parameter_value(name)
cast_value = cast(str, value)
return cast_value
def set_parameter_value(self, name: str, value: ParameterValueType):
param = self.get_parameter(name)
param.value = value
@property
def parameters(self) -> List[Parameter]:
return list(self._parameters.values())
| [((24, 20, 24, 67), 'py_headless_daw.project.parameter.Parameter', 'Parameter', ({(24, 30, 24, 34): 'name', (24, 36, 24, 41): 'value', (24, 43, 24, 53): 'param_type', (24, 55, 24, 66): 'value_range'}, {}), '(name, value, param_type, value_range)', False, 'from py_headless_daw.project.parameter import Parameter, ParameterValueType, ParameterRangeType\n'), ((38, 37, 38, 67), 'typing.cast', 'cast', ({(38, 42, 38, 51): 'List[str]', (38, 53, 38, 66): 'list_of_names'}, {}), '(List[str], list_of_names)', False, 'from typing import Dict, List, cast\n'), ((51, 21, 51, 39), 'typing.cast', 'cast', ({(51, 26, 51, 31): 'float', (51, 33, 51, 38): 'value'}, {}), '(float, value)', False, 'from typing import Dict, List, cast\n'), ((59, 21, 59, 37), 'typing.cast', 'cast', ({(59, 26, 59, 29): 'str', (59, 31, 59, 36): 'value'}, {}), '(str, value)', False, 'from typing import Dict, List, cast\n')] |
adiravishankara/Wasatch.PY | wasatch/ROI.py | 058b3de2c9399e9aea6347fa360f9c7dbbf296aa | ##
# This class encapsulates a Region Of Interest, which may be either horizontal
# (pixels) or vertical (rows/lines).
class ROI:
def __init__(self, start, end):
self.start = start
self.end = end
self.len = end - start + 1
def valid(self):
return self.start >= 0 and self.start < self.end
def crop(self, spectrum):
return spectrum[self.start:self.end+1]
def contains(self, value):
return self.start <= value <= self.end
| [] |
whpenner/upm | examples/python/oled_ssd1327.py | 3168c61d8613da62ecc7598517a1decf533d5fe7 | #!/usr/bin/python
# Author: Zion Orent <[email protected]>
# Copyright (c) 2015 Intel Corporation.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
# Load i2clcd display module
import time, signal, sys
import pyupm_i2clcd as upmLCD
myLCD = upmLCD.SSD1327(0, 0x3C);
logoArr = [0x00, 0x00, 0x00, 0x00, 0x00, 0x20, 0x08, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x60, 0x04, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0xC0, 0x06, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x01, 0xC0, 0x07, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x01, 0xC0, 0x07, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x03, 0x80, 0x03, 0x80, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x03, 0x80, 0x03, 0x80,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x07, 0x80, 0x03, 0xC0, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x07, 0x80, 0x01, 0xC0,
0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x20,
0x07, 0x80, 0x01, 0xE0, 0x08, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x20, 0x0F, 0x80, 0x01, 0xE0,
0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x30,
0x0F, 0x00, 0x01, 0xE0, 0x08, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x30, 0x0F, 0x00, 0x01, 0xE0,
0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x30,
0x0F, 0x00, 0x01, 0xE0, 0x18, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x30, 0x0F, 0x00, 0x01, 0xE0,
0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x38,
0x0F, 0x00, 0x01, 0xE0, 0x18, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x38, 0x0F, 0x00, 0x01, 0xE0,
0x38, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x38,
0x0F, 0x80, 0x01, 0xE0, 0x38, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x3C, 0x0F, 0x80, 0x01, 0xE0,
0x78, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3E,
0x0F, 0x80, 0x03, 0xE0, 0x78, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x1E, 0x07, 0x80, 0x03, 0xE0,
0xF8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x1E,
0x07, 0x80, 0x03, 0xE0, 0xF0, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x1F, 0x07, 0x80, 0x03, 0xC1,
0xF0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0F,
0x87, 0xC0, 0x07, 0xC1, 0xF0, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x0F, 0x83, 0xC0, 0x07, 0x83,
0xE0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0F,
0xC3, 0xC0, 0x07, 0x87, 0xE0, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x07, 0xE1, 0xE0, 0x07, 0x0F,
0xC0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03,
0xF0, 0xE0, 0x0F, 0x0F, 0x80, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x01, 0xF8, 0xF0, 0x0E, 0x1F,
0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01,
0xF8, 0x70, 0x1C, 0x3F, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0xFC, 0x30, 0x18, 0x7E,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x7F, 0x18, 0x30, 0xFC, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x1F, 0x88, 0x21, 0xF0,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x0F, 0xC4, 0x47, 0xE0, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x03, 0xE0, 0x0F, 0x80,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0xF8, 0x3E, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x0E, 0xE0, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00,
0x00, 0x00, 0x6C, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x02, 0x00, 0x06, 0x00, 0x00, 0x6C, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x06,
0x00, 0x00, 0x60, 0x00, 0x7E, 0x3F, 0x0F, 0xC3,
0xF0, 0xFA, 0x0F, 0xDF, 0xE1, 0x9F, 0xEC, 0x7E,
0xE6, 0x73, 0x9C, 0xE7, 0x39, 0xCE, 0x1C, 0xDF,
0xE1, 0xB9, 0xEC, 0xE7, 0xE0, 0x61, 0xD8, 0x66,
0x1B, 0x86, 0x1C, 0x06, 0x61, 0xB0, 0x6D, 0xC3,
0x7C, 0x7F, 0xFF, 0xFF, 0xFF, 0x06, 0x0F, 0x86,
0x61, 0xB0, 0x6D, 0x83, 0x3E, 0x7F, 0xFF, 0xFF,
0xFF, 0x06, 0x07, 0xC6, 0x61, 0xB0, 0x6D, 0x83,
0xC3, 0x61, 0x18, 0x46, 0x03, 0x86, 0x18, 0x66,
0x61, 0xB0, 0x6D, 0xC3, 0xFE, 0x7F, 0x9F, 0xE7,
0xF9, 0xFE, 0x1F, 0xE6, 0x3F, 0x9F, 0xEC, 0xFE,
0x7E, 0x3F, 0x0F, 0xC3, 0xF0, 0xFA, 0x0F, 0xC6,
0x3F, 0x9F, 0xEC, 0x7E, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7C, 0x00,
0x00, 0x20, 0x82, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x44, 0x00, 0x00, 0x20, 0x82, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x6C, 0xF3,
0xCF, 0x70, 0x9E, 0x79, 0xE7, 0x80, 0x00, 0x00,
0x00, 0x00, 0x7D, 0x9E, 0x68, 0x20, 0xB2, 0xC8,
0x64, 0x00, 0x00, 0x00, 0x00, 0x00, 0x47, 0x9E,
0x6F, 0x20, 0xB2, 0xF9, 0xE7, 0x80, 0x00, 0x00,
0x00, 0x00, 0x46, 0x9A, 0x61, 0x20, 0xB2, 0xCB,
0x60, 0x80, 0x00, 0x00, 0x00, 0x00, 0x7C, 0xF3,
0xCF, 0x30, 0x9E, 0x79, 0xE7, 0x90, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x7C, 0x02, 0x00, 0x00, 0x82, 0x60, 0x00, 0x00,
0xF8, 0x00, 0x00, 0x40, 0x40, 0x02, 0x00, 0x00,
0x83, 0x60, 0x00, 0x00, 0x8C, 0x00, 0x00, 0x40,
0x60, 0xB7, 0x79, 0xE7, 0x81, 0xC7, 0x92, 0x70,
0x89, 0xE7, 0x9E, 0x78, 0x7C, 0xE2, 0xC9, 0x2C,
0x81, 0xCC, 0xD2, 0x40, 0xFB, 0x21, 0xB2, 0x48,
0x40, 0x62, 0xF9, 0x2C, 0x80, 0x8C, 0xD2, 0x40,
0x8B, 0xE7, 0xB0, 0x48, 0x40, 0xE2, 0xC9, 0x2C,
0x80, 0x84, 0xD2, 0x40, 0x8B, 0x2D, 0x92, 0x48,
0x7D, 0xB3, 0x79, 0x27, 0x80, 0x87, 0x9E, 0x40,
0x8D, 0xE7, 0x9E, 0x48, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00]
SeeedLogo = upmLCD.uint8Array(len(logoArr))
for x in range(len(logoArr)):
SeeedLogo.__setitem__(x, logoArr[x])
# If you don't set the display to be white, the seeed logo will appear jagged
myLCD.setGrayLevel(12)
myLCD.draw(SeeedLogo, 96 * 96 / 8);
for i in range(12):
myLCD.setCursor(i, 0)
myLCD.setGrayLevel(i)
myLCD.write('Hello World')
print "Exiting"
| [] |
juansdev/digital_image_processing | digital_image_processing/algorithms/edge_detection_algorithms/threshold/adaptive_thresholding_methods/__init__.py | a0fe429c0664d81063dc76502a3e4874eea901a7 | from .bernsen import bernsen_thresholding_method
from .bradley_roth import bradley_thresholding_method
from .contrast import contrast_thresholding_method
from .feng import feng_thresholding_method
from .gaussian import threshold_value_gaussian
from .johannsen import johannsen_thresholding_method
from .kapur import kapur_thresholding_method
from .mean import threshold_value_mean
from .minimum_error import minimum_err_thresholding_method
from .niblack import niblack_thresholding_method
from .nick import nick_thresholding_method
from .otsu import otsu_thresholding_method
from .p_tile import p_tile_thresholding_method
from .pun import pun_thresholding_method
from .rosin import rosin_thresholding_method
from .sauvola import sauvola_thresholding_method
from .singh import singh_thresholding_method
from .two_peaks import two_peaks_thresholding_method
from .wolf import wolf_thresholding_method
| [] |
harshp8l/deep-learning-lang-detection | data/train/python/be1d04203f18e6f16b60a723e614122b48a08671celeryconfig.py | 2a54293181c1c2b1a2b840ddee4d4d80177efb33 | import os
from kombu import Queue, Exchange
## Broker settings.
BROKER_URL = os.getenv('BROKER_URL', 'amqp://guest:guest@localhost:5672')
#BROKER_URL = "amqp://guest:guest@localhost:5672/"
#BROKER_URL = os.getenv('BROKER_URL', 'redis://guest@localhost:6379')
#BROKER_HOST = "localhost"
#BROKER_PORT = 27017
#BROKER_TRANSPORT = 'mongodb'
#BROKER_VHOST = 'celery'
CELERY_DEFAULT_QUEUE = 'default'
CELERY_QUEUES = (
Queue('default', exchange=Exchange('default'), routing_key='default'),
# Queue('aws_uploads', routing_key='video.uploads'),
)
CELERY_DEFAULT_EXCHANGE = 'default'
CELERY_DEFAULT_EXCHANGE_TYPE = 'direct'
CELERY_DEFAULT_ROUTING_KEY = 'default'
CELERY_IMPORTS = ('celeryservice.tasks',)
#CELERY_RESULT_BACKEND = os.getenv('CELERY_RESULT_BACKEND', 'redis')
CELERY_RESULT_BACKEND = os.getenv('CELERY_RESULT_BACKEND', 'amqp')
## Using the database to store task state and results.
#CELERY_RESULT_BACKEND = "mongodb"
#CELERY_MONGODB_BACKEND_SETTINGS = {
# "host": "localhost",
# "port": 27017,
# "database": "celery",
# "taskmeta_collection": "celery_taskmeta",
#}
| [((5, 13, 5, 73), 'os.getenv', 'os.getenv', ({(5, 23, 5, 35): '"""BROKER_URL"""', (5, 37, 5, 72): '"""amqp://guest:guest@localhost:5672"""'}, {}), "('BROKER_URL', 'amqp://guest:guest@localhost:5672')", False, 'import os\n'), ((27, 24, 27, 66), 'os.getenv', 'os.getenv', ({(27, 34, 27, 57): '"""CELERY_RESULT_BACKEND"""', (27, 59, 27, 65): '"""amqp"""'}, {}), "('CELERY_RESULT_BACKEND', 'amqp')", False, 'import os\n'), ((17, 30, 17, 49), 'kombu.Exchange', 'Exchange', ({(17, 39, 17, 48): '"""default"""'}, {}), "('default')", False, 'from kombu import Queue, Exchange\n')] |
dgollub/timesheet-google-thingy | timesheet.py | 3ffab402444dba520ff3416b2327f6d2ceeeac39 | # -*- coding: utf-8 -*-
#
#
from __future__ import print_function
import csv
import os
import re
import sys
import arrow
from gsheets import Sheets
CURRENT_PATH = os.path.abspath(os.path.dirname(os.path.realpath(__file__)))
DEBUG = os.environ.get('DEBUG', "0") == "1"
AS_CSV = os.environ.get('CSV', "0") == "1"
COL_DATE = 0
COL_WEEKDAY = 1
COL_TIME_START = 2
COL_TIME_END = 3
COL_LUNCH = 4
COL_TIME = 5 # includes lunch
COL_TIME_FIXED = 6 # does not include lunch
COL_MOVE = 7
COL_WORK_FROM_HOME = 8
COL_NOTES = 9
COL_TASKS_START = 10
SPECIAL_VALUES = ["sick", "ab", "off", "wfh", "hol"]
SATURDAY = 5
SUNDAY = 6
def calc(hour, half_it=False, split_char = ":"):
parts = str(hour).split(split_char)
try:
local_hours = int(parts[0])
local_minutes = int(parts[1])
if half_it:
local_hours = local_hours / 2
local_minutes = local_minutes / 2
return local_hours, local_minutes
except:
if len(parts) == 1:
try:
return int(parts[0]), 0
except:
return 0, 0
def get_client_secret_filenames():
filename = os.path.join(CURRENT_PATH, "client-secrets.json")
cachefile = os.path.join(CURRENT_PATH, "client-secrets-cache.json")
if not os.path.exists(filename):
filename = os.path.expanduser(os.path.join("~", "client-secrets.json"))
cachefile = os.path.expanduser(os.path.join("~", "client-secrets-cache.json"))
if not os.path.exists(filename):
raise Exception("Please provide a client-secret.json file, as described here: https://github.com/xflr6/gsheets#quickstart")
return filename, cachefile
def load_first_sheet_rows(api, timesheet_url, date=arrow.now().format('YYYYMMDD')):
print("Opening timesheet for %s ..." % (date))
sheets = api.get(timesheet_url)
sheet = sheets.sheets[0]
print(u"Timesheet [%s] sheet [%s] opened. Accessing cell data ..." % (sheets.title or "???", sheet.title or "???"))
rows = sheet.values()
return rows
def load_sheet_and_read_data(api, timesheet_url, commandline, user_full_name):
now = arrow.now()
today = now.format('YYYYMMDD')
try:
other_date = arrow.get(commandline, 'YYYYMMDD').format('YYYYMMDD')
except arrow.parser.ParserError:
other_date = today
use_date = other_date
rows = load_first_sheet_rows(api, timesheet_url, use_date)
timesheet = get_timesheet_for_date(rows, use_date, user_full_name)
if timesheet:
print("\n\n")
print("Timesheet for %s" % (use_date))
print(timesheet)
print("\n")
else:
print("No entry found for %s" % use_date)
def get_timesheet_for_date(rows, date, user_full_name):
# find the row with the first column that has today's date in it
result_rows = [row for row in rows if row and str(row[COL_DATE]) == date]
if result_rows is None or not result_rows:
return None
if len(result_rows) != 1:
print("More than one entry (%d) found for date %s! Please fix your sheet!" % (len(result_rows), date))
return None
found_row = result_rows[0]
found_index = rows.index(found_row)
start_val = found_row[COL_TIME_START]
end_val = found_row[COL_TIME_END]
duration_val = found_row[COL_TIME_FIXED]
max_cols = len(found_row)
if not start_val:
if start_val in SPECIAL_VALUES:
print("You forgot to add your start time.")
return None
if not end_val:
if end_val in SPECIAL_VALUES:
print("You forgot to add your end time.")
return None
#if max_cols >= COL_NOTES:
# print("No notes/tasks entered yet.")
# return None
def parse_hours(val):
try:
return arrow.get(val, "HH:mm")
except arrow.parser.ParserError:
return arrow.get(val, "H:mm")
start = parse_hours(start_val).format("HH:mm")
end = parse_hours(end_val).format("HH:mm")
duration = str(duration_val)
notes_str = found_row[COL_NOTES]
notes = notes_str.split('\n')
# check the previous Friday entry (if today is not Friday), to see what work from home
# days were were selected
weekday = (found_row[COL_WEEKDAY] or "").lower()
check_start_index = found_index if weekday.startswith("fr") else found_index - 7
check_row = found_row
while (check_start_index < found_index):
check_row = rows[check_start_index]
if (len(check_row) > COL_WEEKDAY and check_row[COL_WEEKDAY] or "").lower().startswith("fr"):
break
check_start_index += 1
is_same_day = None
if check_start_index != found_index:
# print("HA! GOT PREVS FRIDAY.")
is_same_day = False
else:
# print("SAME DAY")
is_same_day = True
wfh = u"" if len(check_row)-1 < COL_WORK_FROM_HOME else check_row[COL_WORK_FROM_HOME]
wfh = wfh.replace("Mon", "Monday")
wfh = wfh.replace("Tue", "Tuesday")
wfh = wfh.replace("Wed", "Wednesday")
wfh = wfh.replace("Thu", "Thursday")
wfh = wfh.replace("Fri", "Friday")
wfh = wfh.replace(", ", ",").replace(",", " and ")
wfh_extra = "Next week" if is_same_day else "This week"
wfh_info = """%s %s""" % (wfh_extra, wfh) if wfh != "" else "all days"
# 2021-01-04 just make this the default for now
wfh_info = "at all times, unless mentioned otherwise below"
# regex: ([a-zA-Z].+-\d+)(.*)((?<=\[).+(?=\]))
# text: SCAN-4167 As a developer, I want to update AIScanRobo every week [1h]
# 3 groups:
# SCAN-4167
# As a developer, I want to update AIScanRobo every week [
# 1h
r = re.compile(r"([a-zA-Z].+-\d+)(.*)((?<=\[).+(?=\]))")
total_time_minutes_from_tasks = 0
tasks = []
for idx in range(COL_TASKS_START, max_cols):
task = found_row[idx].strip()
if task:
t = task.split('\n')[0] if '\n' in task else task
try:
g = r.match(t).groups()
except Exception as ex:
print("ERROR: %s - %s" % (t, str(ex)))
continue
if DEBUG:
print("task: %s" % (t))
print("groups: %s" % len(g))
[task_number, task_details, task_duration] = g
hours, half_hours = calc(task_duration.replace("h", ""), split_char=".")
minutes = (hours * 60) + (6 * half_hours)
total_time_minutes_from_tasks += minutes
other_lines = task.split('\n')[1:]
tasks.append("%s %s\n%s" % (task_number.strip(), task_details[:-2].strip(), '\n'.join(other_lines)))
def format_tasks(tasks):
if not tasks:
return ''
result = 'Tasks:\n'
for task in tasks:
if '\n' in task:
sub_tasks = task.split('\n')
if len(sub_tasks) > 1:
result += '\n* ' + sub_tasks[0] # main task
for sub_task in sub_tasks[1:]: # actual sub tasks
result += '\n\t' + sub_task
result += '\n'
else:
result += '\n* ' + task
else:
result += '\n* ' + task
return result
def format_notes(notes):
if not notes or (len(notes) == 1 and not notes[0]):
return ''
result = 'Additional Notes:\n'
for note in notes:
result += '\n* ' + note
return result
total_hours = str(int(total_time_minutes_from_tasks / 60)).zfill(2)
total_minutes = str(total_time_minutes_from_tasks % 60).zfill(2)
total_duration = "%s:%s" % (total_hours, total_minutes)
test_duration = duration
if len(test_duration) <= 4:
test_duration = "0%s" % duration
if total_duration != test_duration:
print("")
print("")
print("The task times do not add up! Tasks vs time entered: %s != %s" % (total_duration, test_duration))
print("")
print("")
# Time: %(start)s - %(end)s (%(duration)s hours total [%(total_hours)s:%(total_minutes)s])
msg = """
[Daily Report] %(date)s
WFH: %(wfh_info)s
Hi,
Daily Report for Date: %(date)s
%(tasks)s
%(notes)s
Kind regards,
%(user_full_name)s
""".strip() % {
"date": date,
"user_full_name": user_full_name,
"start": start,
"end": end,
"duration": duration,
"wfh_info": wfh_info,
"tasks": format_tasks(tasks) if tasks else "",
"notes": format_notes(notes) if notes else "",
"total_hours": total_hours,
"total_minutes": total_minutes,
}
print("Total time for all tasks (%s): %s - %s:%s" % (len(tasks), total_time_minutes_from_tasks, total_hours, total_minutes))
return msg
def _load_sheet_data(api, timesheet_url, arg_date=None):
try:
date = arrow.get(arg_date, 'YYYYMM')
except Exception: # pylint: disable=W0703
now = arrow.now()
date = now.format('YYYYMM')
rows = load_first_sheet_rows(api, timesheet_url, date)
date_str = str(date.format('YYYYMM'))
return (rows, date_str)
def export_csv(api, timesheet_url, arg_date):
rows, date = _load_sheet_data(api, timesheet_url, arg_date)
filtered = [row for row in rows if row and str(row[COL_DATE]).startswith(date)]
if filtered is None or not filtered:
return None
csv_filename = os.path.join(os.getcwd(), "%s.csv" % (arg_date))
print("")
print("Found (%d) entries for date %s!" % (len(filtered), date))
print("Writing to %s" % (csv_filename))
with open(csv_filename, mode='w') as f:
f = csv.writer(f, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
# f.writerow(['John Smith', 'Accounting', 'November'])
f.writerow(["username", "date", "task", "duration", "work_type", "details"])
def w(task, duration_minutes, details = ""):
work_type = "Meeting" if "meeting" in details.lower() else "Development"
# Needed CSV columns
# username|date|task|duration|work_type|details
f.writerow(["daniel", arrow.get(str(date), 'YYYYMMDD').format('YYYY.MM.DD'), task, "%dm" % (duration_minutes), work_type, details])
# regex: ([a-zA-Z].+-\d+)(.*)((?<=\[).+(?=\]))
# text: SCAN-4167 As a developer, I want to update AIScanRobo every week [1h]
# 3 groups:
# SCAN-4167
# As a developer, I want to update AIScanRobo every week [
# 1h
r = re.compile(r"([a-zA-Z].+-\d+)(.*)((?<=\[).+(?=\]))")
for row in filtered:
max_cols = len(row)
time = row[COL_TIME_FIXED] if max_cols >= COL_TIME_FIXED else None
time_start = row[COL_TIME_START] if max_cols >= COL_TIME_START else None
time_end = row[COL_TIME_END] if max_cols >= COL_TIME_END else None
date = row[COL_DATE] if max_cols >= COL_DATE else None
if time_start is None or time_end is None or date is None:
continue
tasks = []
for idx in range(COL_TASKS_START, max_cols):
task = row[idx].strip()
if task:
tasks.append(task)
if len(tasks) == 0:
print("%s: no tasks found! %s" % (date, time_start))
continue
print("%s: %d tasks found!" % (date, len(tasks)))
for task in tasks:
t = task.split('\n')[0] if '\n' in task else task
try:
g = r.match(t).groups()
except Exception as ex:
print("ERROR: %s - %s" % (t, str(ex)))
continue
if DEBUG:
print("task: %s" % (t))
print("groups: %s" % len(g))
[task_number, task_details, duration] = g
hours, half_hours = calc(duration.replace("h", ""), split_char=".")
minutes = (hours * 60) + (6 * half_hours)
if DEBUG:
print("time: %s, %s $ %s $ %s" % (hours, half_hours, duration, minutes))
details = "%s %s" % (task_number, task_details[:-1].strip())
w(task_number, minutes, details.strip())
print("")
print("CSV output to: %s" % (csv_filename))
def calc_daily_hours_for_month(api, timesheet_url, arg_date):
rows, date = _load_sheet_data(api, timesheet_url, arg_date)
filtered = [row for row in rows if row and str(row[COL_DATE]).startswith(date)]
if filtered is None or not filtered:
return None
print("")
print("Found (%d) entries for date %s!" % (len(filtered), date))
minutes = 0
days = 0
for row in filtered:
max_cols = len(row)
time = row[COL_TIME_FIXED] if max_cols >= COL_TIME_FIXED else None
time_start = row[COL_TIME_START] if max_cols >= COL_TIME_START else None
time_end = row[COL_TIME_END] if max_cols >= COL_TIME_END else None
date = row[COL_DATE] if max_cols >= COL_DATE else None
worked_at = row[COL_MOVE] if max_cols >= COL_MOVE else None
notes = row[COL_NOTES] if max_cols >= COL_NOTES else ""
if time_start is None or time_end is None or date is None:
continue
start_hours, start_minutes = calc(time_start)
end_hours, end_minutes = calc(time_end)
if start_hours == 0:
print("%s: Day off because of %s" % (date, "whatever" if time_start == 0 else time_start))
continue
extra_info = ""
the_date = arrow.get(str(date), 'YYYYMMDD')
if the_date.weekday() in [SATURDAY, SUNDAY]:
extra_info += " - Weekend work"
half_day = 'half' in row[COL_WORK_FROM_HOME]
if half_day:
extra_info += " - half day PTO"
if worked_at in ['o', 'O'] or "OFFICE" in notes.upper():
extra_info += " - Commute to office"
minutes_day = abs(end_hours - start_hours) * 60
minutes_day += end_minutes - start_minutes
minutes += minutes_day
hours_day = int(minutes_day / 60)
hours_day_without_lunch = hours_day - 1
minutes_day = minutes_day % 60
total_time_for_date = str(hours_day).zfill(2) + ':' + str(minutes_day).zfill(2)
days += 1
no_lunch = str(hours_day_without_lunch).zfill(2) + ':' + str(minutes_day).zfill(2)
print("%s: %s to %s = %s (without lunch: %s)%s" % (date, str(time_start).zfill(2), str(time_end).zfill(2), total_time_for_date, no_lunch, extra_info))
hours = str(minutes / 60).zfill(2)
minutes = str(minutes % 60).zfill(2)
lunch_hours = str(int(float(hours)) - days).zfill(2)
print("")
print("Total days worked: %s" % str(days))
print("Total hours: %s:%s (with 1 hour lunch: %s:%s)" % (hours, minutes, lunch_hours, minutes))
print("")
def calc_stats(api, timesheet_url, arg_date=None):
rows, date = _load_sheet_data(api, timesheet_url, arg_date)
# find the rows for the given month
filtered = [row for row in rows if row and str(row[COL_DATE]).startswith(date)]
if filtered is None or not filtered:
return None
if not AS_CSV:
print("")
print("Found (%d) entries for date %s!" % (len(filtered), date))
dates, hours = [], []
half_days = {}
first = None
last = None
for row in filtered:
max_cols = len(row)
time = row[COL_TIME_FIXED] if max_cols >= COL_TIME_FIXED else None
tasks = []
for idx in range(COL_TASKS_START, max_cols):
task = row[idx].strip()
if task:
tasks.append(task)
day_type = row[COL_TIME_START] if max_cols >= COL_TIME_START else None
date = row[COL_DATE] if max_cols >= COL_DATE else None
if day_type is None:
continue
if day_type in SPECIAL_VALUES:
time = day_type
hours.append(time)
dates.append(date)
continue
elif not tasks:
continue
# If it was a half day, meaning I took half a day off, then only count half the time
half_day = 'half' in row[COL_WORK_FROM_HOME]
if half_day:
half_days[date] = time
hours.append(time)
dates.append(date)
if first is None:
first = row
else:
last = row
total_hours, total_minutes, total_time = 0, 0, ""
for index, hour in enumerate(hours):
date = dates[index]
local_hours, local_minutes = calc(hour, date in half_days)
total_hours += local_hours
total_minutes += local_minutes
if total_minutes >= 60:
total_hours += (total_minutes / 60)
total_minutes = total_minutes % 60
total_time = "%d:%d hours:minutes" % (total_hours, total_minutes)
expected = 0
actual_h, actual_m = 0, 0
if not AS_CSV:
print("*" * 50)
print("")
print("Valid hours entries: %s\t[required vs actual]" % len(hours))
deduct_work_hours = 0
work_hours = 0
work_minutes = 0
days = 0
expected_hours_accumulated_total = 0
for index, worked_date in enumerate(dates):
days += 1
if hours[index] in SPECIAL_VALUES:
if not AS_CSV:
print(" %s: Off, because %s" % (worked_date, hours[index]))
else:
pass
else:
half_day = worked_date in half_days
# each workday has 8 hours of work, but on half days it is only half of 8, aka 4.
work_hours_for_the_day = 8 if not half_day else 4
expected_hours_accumulated_total += 8 - (8 - work_hours_for_the_day)
expected_minutes_accumulated_total = expected_hours_accumulated_total * 60
# hours[index] is the actual time worked, e.g. 6:30 means 6 hours and 30 minutes
local_h, local_m = calc(hours[index])
work_hours += local_h
work_minutes += local_m
actual_h = work_hours
# 330 minutes = 6 hours and 30 minutes
actual_h += int(work_minutes / 60)
actual_m = work_minutes % 60
if AS_CSV:
print("%s;%s;" % (worked_date, hours[index]))
else:
print(" %s: %s\t[%s:00 vs %s:%s] %s" % (worked_date, hours[index], expected_hours_accumulated_total,
str(actual_h).zfill(2), str(actual_m).zfill(2),
"Half day" if half_day else ""))
if not AS_CSV:
print("")
print("First:", "<first> not found" if first is None else first[COL_DATE])
print("Last:", "<last> not found" if last is None else last[COL_DATE])
print("")
print("Total time in %s: %s" % (date, total_time))
print("")
print("*" * 50)
def main():
# print("Checking environment variable TIMESHEET_URL for spreadsheet URL...")
timesheet_url = os.environ.get('TIMESHEET_URL', "").strip()
if not timesheet_url:
raise Exception("Please set the TIMESHEET_URL environment variable accordingly.")
# print("Checking environment variable USER_FULL_NAME for spreadsheet URL...")
user_full_name = os.environ.get('USER_FULL_NAME', "").strip()
if not user_full_name:
print("Warning: USER_FULL_NAME environment variable not set!")
user_full_name = "Herman Toothrot"
print("")
print("Usage: python timesheet.py [command|date] [date]")
print("Example: python timesheet.py stats 202011")
print("Example: python timesheet.py 20201130")
print("")
print("Available commands:")
print("- stats: show summed up hours and minutes for the given/current month")
print(" use \"CSV=1 python timesheet.py stats\" to format the output")
print(" as CSV")
print("- daily: same as stats, except ready to email to HR")
print("- csv: task breakdown for the month and time spend on each task")
print("")
print("""Tip: use "DEBUG=1 timesheet <parameter>" to enable debug output""")
print("")
print("Trying to load client-secrets.json file ...")
secrets_file, cache_file = get_client_secret_filenames()
sheets = Sheets.from_files(secrets_file, cache_file, no_webserver=False)
print("Success.")
date = None if len(sys.argv) < 3 else sys.argv[2].strip()
arg = "read today" if len(sys.argv) < 2 else sys.argv[1].strip()
if arg == "stats":
calc_stats(sheets, timesheet_url, date or arrow.now().format('YYYYMM'))
elif arg == "daily":
calc_daily_hours_for_month(sheets, timesheet_url, date or arrow.now().format('YYYYMM'))
elif arg == "csv":
export_csv(sheets, timesheet_url, date or arrow.now().format('YYYYMM'))
else:
date_to_use = "read today" if arg == '' else arg
load_sheet_and_read_data(sheets, timesheet_url, date_to_use, user_full_name)
print("Done.")
if __name__ == "__main__":
main()
| [((17, 8, 17, 36), 'os.environ.get', 'os.environ.get', ({(17, 23, 17, 30): '"""DEBUG"""', (17, 32, 17, 35): '"""0"""'}, {}), "('DEBUG', '0')", False, 'import os\n'), ((18, 9, 18, 35), 'os.environ.get', 'os.environ.get', ({(18, 24, 18, 29): '"""CSV"""', (18, 31, 18, 34): '"""0"""'}, {}), "('CSV', '0')", False, 'import os\n'), ((54, 15, 54, 64), 'os.path.join', 'os.path.join', ({(54, 28, 54, 40): 'CURRENT_PATH', (54, 42, 54, 63): '"""client-secrets.json"""'}, {}), "(CURRENT_PATH, 'client-secrets.json')", False, 'import os\n'), ((55, 16, 55, 71), 'os.path.join', 'os.path.join', ({(55, 29, 55, 41): 'CURRENT_PATH', (55, 43, 55, 70): '"""client-secrets-cache.json"""'}, {}), "(CURRENT_PATH, 'client-secrets-cache.json')", False, 'import os\n'), ((81, 10, 81, 21), 'arrow.now', 'arrow.now', ({}, {}), '()', False, 'import arrow\n'), ((186, 8, 186, 60), 're.compile', 're.compile', ({(186, 19, 186, 59): '"""([a-zA-Z].+-\\\\d+)(.*)((?<=\\\\[).+(?=\\\\]))"""'}, {}), "('([a-zA-Z].+-\\\\d+)(.*)((?<=\\\\[).+(?=\\\\]))')", False, 'import re\n'), ((589, 13, 589, 76), 'gsheets.Sheets.from_files', 'Sheets.from_files', (), '', False, 'from gsheets import Sheets\n'), ((16, 47, 16, 73), 'os.path.realpath', 'os.path.realpath', ({(16, 64, 16, 72): '__file__'}, {}), '(__file__)', False, 'import os\n'), ((57, 11, 57, 35), 'os.path.exists', 'os.path.exists', ({(57, 26, 57, 34): 'filename'}, {}), '(filename)', False, 'import os\n'), ((60, 11, 60, 35), 'os.path.exists', 'os.path.exists', ({(60, 26, 60, 34): 'filename'}, {}), '(filename)', False, 'import os\n'), ((293, 15, 293, 44), 'arrow.get', 'arrow.get', ({(293, 25, 293, 33): 'arg_date', (293, 35, 293, 43): '"""YYYYMM"""'}, {}), "(arg_date, 'YYYYMM')", False, 'import arrow\n'), ((311, 32, 311, 43), 'os.getcwd', 'os.getcwd', ({}, {}), '()', False, 'import os\n'), ((318, 12, 318, 82), 'csv.writer', 'csv.writer', (), '', False, 'import csv\n'), ((334, 12, 334, 64), 're.compile', 're.compile', ({(334, 23, 334, 63): '"""([a-zA-Z].+-\\\\d+)(.*)((?<=\\\\[).+(?=\\\\]))"""'}, {}), "('([a-zA-Z].+-\\\\d+)(.*)((?<=\\\\[).+(?=\\\\]))')", False, 'import re\n'), ((58, 38, 58, 78), 'os.path.join', 'os.path.join', ({(58, 51, 58, 54): '"""~"""', (58, 56, 58, 77): '"""client-secrets.json"""'}, {}), "('~', 'client-secrets.json')", False, 'import os\n'), ((59, 39, 59, 85), 'os.path.join', 'os.path.join', ({(59, 52, 59, 55): '"""~"""', (59, 57, 59, 84): '"""client-secrets-cache.json"""'}, {}), "('~', 'client-secrets-cache.json')", False, 'import os\n'), ((66, 51, 66, 62), 'arrow.now', 'arrow.now', ({}, {}), '()', False, 'import arrow\n'), ((136, 19, 136, 42), 'arrow.get', 'arrow.get', ({(136, 29, 136, 32): 'val', (136, 34, 136, 41): '"""HH:mm"""'}, {}), "(val, 'HH:mm')", False, 'import arrow\n'), ((295, 14, 295, 25), 'arrow.now', 'arrow.now', ({}, {}), '()', False, 'import arrow\n'), ((563, 20, 563, 55), 'os.environ.get', 'os.environ.get', ({(563, 35, 563, 50): '"""TIMESHEET_URL"""', (563, 52, 563, 54): '""""""'}, {}), "('TIMESHEET_URL', '')", False, 'import os\n'), ((567, 21, 567, 57), 'os.environ.get', 'os.environ.get', ({(567, 36, 567, 52): '"""USER_FULL_NAME"""', (567, 54, 567, 56): '""""""'}, {}), "('USER_FULL_NAME', '')", False, 'import os\n'), ((85, 21, 85, 55), 'arrow.get', 'arrow.get', ({(85, 31, 85, 42): 'commandline', (85, 44, 85, 54): '"""YYYYMMDD"""'}, {}), "(commandline, 'YYYYMMDD')", False, 'import arrow\n'), ((138, 19, 138, 41), 'arrow.get', 'arrow.get', ({(138, 29, 138, 32): 'val', (138, 34, 138, 40): '"""H:mm"""'}, {}), "(val, 'H:mm')", False, 'import arrow\n'), ((596, 50, 596, 61), 'arrow.now', 'arrow.now', ({}, {}), '()', False, 'import arrow\n'), ((598, 66, 598, 77), 'arrow.now', 'arrow.now', ({}, {}), '()', False, 'import arrow\n'), ((600, 50, 600, 61), 'arrow.now', 'arrow.now', ({}, {}), '()', False, 'import arrow\n')] |
Orpheon/All-in | league/game.py | 016901953904250226f388422318ef2f739bf82e | import numpy as np
import pickle
import treys
import constants
FULL_DECK = np.array(treys.Deck.GetFullDeck())
class GameEngine:
def __init__(self, BATCH_SIZE, INITIAL_CAPITAL, SMALL_BLIND, BIG_BLIND, logger):
self.BATCH_SIZE = BATCH_SIZE
self.INITIAL_CAPITAL = INITIAL_CAPITAL
self.SMALL_BLIND = SMALL_BLIND
self.BIG_BLIND = BIG_BLIND
self.logger = logger
self.N_PLAYERS = 6
def generate_cards(self):
cards = np.tile(np.arange(52), (self.BATCH_SIZE, 1))
for i in range(self.BATCH_SIZE):
cards[i, :] = FULL_DECK[np.random.permutation(cards[i, :])]
community_cards = cards[:, :5]
hole_cards = np.reshape(cards[:, 5:5 + 2 * self.N_PLAYERS], (self.BATCH_SIZE, self.N_PLAYERS, 2))
return community_cards, hole_cards
def run_game(self, players):
if len(players) != self.N_PLAYERS:
raise ValueError('Only {} players allowed'.format(self.N_PLAYERS))
community_cards, hole_cards = self.generate_cards()
folded = np.zeros((self.BATCH_SIZE, len(players)), dtype=bool)
prev_round_investment = np.zeros((self.BATCH_SIZE, len(players)), dtype=int)
for player in players:
player.initialize(self.BATCH_SIZE, self.INITIAL_CAPITAL, self.N_PLAYERS)
# Pre-flop
bets, _ = self.run_round(players, prev_round_investment, folded, constants.PRE_FLOP, hole_cards, community_cards[:, :0])
prev_round_investment += bets
# Flop
bets, _ = self.run_round(players, prev_round_investment, folded, constants.FLOP, hole_cards, community_cards[:, :3])
prev_round_investment += bets
# Turn
bets, _ = self.run_round(players, prev_round_investment, folded, constants.TURN, hole_cards, community_cards[:, :4])
prev_round_investment += bets
# River
bets, end_state = self.run_round(players, prev_round_investment, folded, constants.RIVER, hole_cards, community_cards)
prev_round_investment += bets
# Showdown
pool = np.sum(prev_round_investment, axis=1)
total_winnings = np.zeros((self.BATCH_SIZE, self.N_PLAYERS), dtype=float)
hand_scores = self.evaluate_hands(community_cards, hole_cards, np.logical_not(folded))
ranks = np.argsort(hand_scores, axis=1)
sorted_hands = np.take_along_axis(hand_scores, indices=ranks, axis=1)
# Get everyone who has the best hand and among which pots will be split
participants = hand_scores == sorted_hands[:, 0][:, None]
# Get the number of times each pot will be split
n_splits_per_game = participants.sum(axis=1)
# Split and distribute the money
gains = pool / n_splits_per_game
total_winnings += participants * gains[:, None]
total_winnings -= prev_round_investment
self.logger.log(constants.EV_END_GAME, (hand_scores, total_winnings, [str(p) for p in players], folded, hole_cards))
self.logger.save_to_file()
for player_idx, player in enumerate(players):
round, current_bets, min_raise, prev_round_investment, folded, last_raiser = end_state
player.end_trajectory(player_idx, round, current_bets, min_raise, prev_round_investment, folded, last_raiser,
hole_cards[:, player_idx, :], community_cards, total_winnings[:, player_idx])
return total_winnings
def run_round(self, players, prev_round_investment, folded, round, hole_cards, community_cards):
"""
:param players: [Player]
:param prev_round_investment: np.ndarray(batchsize, n_players) = int
:param folded: np.ndarray(batchsize, n_players) = bool
:param round: int ∈ {0..3}
:param hole_cards: np.ndarray(batchsize, n_players, 2) = treys.Card
:param community_cards: np.ndarray(batchsize, n_players, {0,3,4,5}) = treys.Card
:return: current_bets: np.ndarray(batchsize, n_players)=int {0-200}
"""
current_bets = np.zeros((self.BATCH_SIZE, self.N_PLAYERS), dtype=int)
max_bets = np.zeros(self.BATCH_SIZE, dtype=int)
min_raise = np.zeros(self.BATCH_SIZE, dtype=int)
min_raise[:] = self.BIG_BLIND
last_raiser = np.zeros(self.BATCH_SIZE, dtype=int)
player_order = list(enumerate(players))
round_countdown = np.zeros(self.BATCH_SIZE, dtype=int)
round_countdown[:] = self.N_PLAYERS
if round == constants.PRE_FLOP:
current_bets[:, 0] = self.SMALL_BLIND
current_bets[:, 1] = self.BIG_BLIND
max_bets[:] = self.BIG_BLIND
player_order = player_order[2:] + player_order[:2]
while True:
running_games = np.nonzero(round_countdown > 0)[0]
for player_idx, player in player_order:
actions, amounts = player.act(player_idx, round, round_countdown > 0, current_bets, min_raise,
prev_round_investment, folded, last_raiser, hole_cards[:, player_idx, :],
community_cards)
# Disabled when not necessary because it bloats the log size (by ~500 kB or so, which triples the size)
# self.logger.log(constants.EV_PLAYER_ACTION, (round, player_idx, actions, amounts, round_countdown, folded[:, player_idx]))
# People who have already folded continue to fold
actions[folded[:, player_idx] == 1] = constants.FOLD
# People who have gone all-in continue to be all-in
actions[prev_round_investment[:, player_idx] + current_bets[:, player_idx] == self.INITIAL_CAPITAL] = constants.CALL
###########
# CALLING #
###########
calls = np.where(np.logical_and(round_countdown > 0, actions == constants.CALL))[0]
if calls.size > 0:
investment = np.minimum(self.INITIAL_CAPITAL - prev_round_investment[calls, player_idx], max_bets[calls])
# Reset the bets and countdown
current_bets[calls, player_idx] = investment
###########
# RAISING #
###########
raises = np.where(np.logical_and(round_countdown > 0, actions == constants.RAISE))[0]
if raises.size > 0:
# print("True raises", raises, amounts[raises])
investment = np.maximum(current_bets[raises, player_idx] + amounts[raises], max_bets[raises] + min_raise[raises])
min_raise[raises] = investment - max_bets[raises]
max_bets[raises] = investment
# Reset the bets and countdown
current_bets[raises, player_idx] = np.minimum(investment, self.INITIAL_CAPITAL - prev_round_investment[raises, player_idx])
round_countdown[raises] = self.N_PLAYERS
last_raiser[raises] = player_idx
###########
# FOLDING #
###########
folded[np.where(np.logical_and(round_countdown > 0, actions == constants.FOLD))[0], player_idx] = 1
round_countdown[running_games] -= 1
#TODO: if all folded stops game, improves performance but breaks tests
# test is not broken, is there another reason?
round_countdown[folded.sum(axis=1) == self.N_PLAYERS-1] = 0
if np.max(round_countdown[running_games]) <= 0:
return current_bets, (round, current_bets, min_raise, prev_round_investment, folded, last_raiser)
def evaluate_hands(self, community_cards, hole_cards, contenders):
evaluator = treys.Evaluator()
# 7463 = 1 lower than the lowest score a hand can have (scores are descending to 1)
results = np.full((self.BATCH_SIZE, self.N_PLAYERS), 7463, dtype=int)
for game_idx,community in enumerate(community_cards):
for player_idx,hole in enumerate(hole_cards[game_idx]):
if contenders[game_idx, player_idx]:
results[game_idx, player_idx] = evaluator.evaluate(community.tolist(), hole.tolist())
return results
| [((7, 21, 7, 45), 'treys.Deck.GetFullDeck', 'treys.Deck.GetFullDeck', ({}, {}), '()', False, 'import treys\n'), ((24, 17, 24, 101), 'numpy.reshape', 'np.reshape', ({(24, 28, 24, 62): 'cards[:, 5:5 + 2 * self.N_PLAYERS]', (24, 64, 24, 100): '(self.BATCH_SIZE, self.N_PLAYERS, 2)'}, {}), '(cards[:, 5:5 + 2 * self.N_PLAYERS], (self.BATCH_SIZE, self.\n N_PLAYERS, 2))', True, 'import numpy as np\n'), ((56, 11, 56, 48), 'numpy.sum', 'np.sum', (), '', True, 'import numpy as np\n'), ((57, 21, 57, 77), 'numpy.zeros', 'np.zeros', (), '', True, 'import numpy as np\n'), ((61, 12, 61, 43), 'numpy.argsort', 'np.argsort', (), '', True, 'import numpy as np\n'), ((62, 19, 62, 73), 'numpy.take_along_axis', 'np.take_along_axis', (), '', True, 'import numpy as np\n'), ((93, 19, 93, 73), 'numpy.zeros', 'np.zeros', (), '', True, 'import numpy as np\n'), ((94, 15, 94, 51), 'numpy.zeros', 'np.zeros', (), '', True, 'import numpy as np\n'), ((95, 16, 95, 52), 'numpy.zeros', 'np.zeros', (), '', True, 'import numpy as np\n'), ((97, 18, 97, 54), 'numpy.zeros', 'np.zeros', (), '', True, 'import numpy as np\n'), ((101, 22, 101, 58), 'numpy.zeros', 'np.zeros', (), '', True, 'import numpy as np\n'), ((164, 16, 164, 33), 'treys.Evaluator', 'treys.Evaluator', ({}, {}), '()', False, 'import treys\n'), ((166, 14, 166, 73), 'numpy.full', 'np.full', (), '', True, 'import numpy as np\n'), ((20, 20, 20, 33), 'numpy.arange', 'np.arange', ({(20, 30, 20, 32): '52'}, {}), '(52)', True, 'import numpy as np\n'), ((59, 67, 59, 89), 'numpy.logical_not', 'np.logical_not', ({(59, 82, 59, 88): 'folded'}, {}), '(folded)', True, 'import numpy as np\n'), ((111, 22, 111, 53), 'numpy.nonzero', 'np.nonzero', ({(111, 33, 111, 52): '(round_countdown > 0)'}, {}), '(round_countdown > 0)', True, 'import numpy as np\n'), ((22, 30, 22, 64), 'numpy.random.permutation', 'np.random.permutation', ({(22, 52, 22, 63): 'cards[(i), :]'}, {}), '(cards[(i), :])', True, 'import numpy as np\n'), ((131, 23, 131, 115), 'numpy.minimum', 'np.minimum', ({(131, 34, 131, 97): 'self.INITIAL_CAPITAL - prev_round_investment[calls, player_idx]', (131, 99, 131, 114): 'max_bets[calls]'}, {}), '(self.INITIAL_CAPITAL - prev_round_investment[calls, player_idx],\n max_bets[calls])', True, 'import numpy as np\n'), ((142, 23, 142, 123), 'numpy.maximum', 'np.maximum', ({(142, 34, 142, 84): 'current_bets[raises, player_idx] + amounts[raises]', (142, 86, 142, 122): 'max_bets[raises] + min_raise[raises]'}, {}), '(current_bets[raises, player_idx] + amounts[raises], max_bets[\n raises] + min_raise[raises])', True, 'import numpy as np\n'), ((146, 45, 146, 133), 'numpy.minimum', 'np.minimum', ({(146, 56, 146, 66): 'investment', (146, 68, 146, 132): 'self.INITIAL_CAPITAL - prev_round_investment[raises, player_idx]'}, {}), '(investment, self.INITIAL_CAPITAL - prev_round_investment[raises,\n player_idx])', True, 'import numpy as np\n'), ((160, 11, 160, 49), 'numpy.max', 'np.max', ({(160, 18, 160, 48): 'round_countdown[running_games]'}, {}), '(round_countdown[running_games])', True, 'import numpy as np\n'), ((129, 25, 129, 87), 'numpy.logical_and', 'np.logical_and', ({(129, 40, 129, 59): '(round_countdown > 0)', (129, 61, 129, 86): '(actions == constants.CALL)'}, {}), '(round_countdown > 0, actions == constants.CALL)', True, 'import numpy as np\n'), ((139, 26, 139, 89), 'numpy.logical_and', 'np.logical_and', ({(139, 41, 139, 60): '(round_countdown > 0)', (139, 62, 139, 88): '(actions == constants.RAISE)'}, {}), '(round_countdown > 0, actions == constants.RAISE)', True, 'import numpy as np\n'), ((154, 24, 154, 86), 'numpy.logical_and', 'np.logical_and', ({(154, 39, 154, 58): '(round_countdown > 0)', (154, 60, 154, 85): '(actions == constants.FOLD)'}, {}), '(round_countdown > 0, actions == constants.FOLD)', True, 'import numpy as np\n')] |
miloprice/django-cms | cms/admin/views.py | c6f548f0983a7488609e07a57552b47675d8d78e | # -*- coding: utf-8 -*-
from cms.models import Page, Title, CMSPlugin, Placeholder
from cms.utils import get_language_from_request
from django.http import Http404
from django.shortcuts import get_object_or_404
def revert_plugins(request, version_id, obj):
from reversion.models import Version
version = get_object_or_404(Version, pk=version_id)
revs = [related_version.object_version for related_version in version.revision.version_set.all()]
cms_plugin_list = []
placeholders = {}
plugin_list = []
titles = []
others = []
page = obj
lang = get_language_from_request(request)
for rev in revs:
obj = rev.object
if obj.__class__ == Placeholder:
placeholders[obj.pk] = obj
if obj.__class__ == CMSPlugin:
cms_plugin_list.append(obj)
elif hasattr(obj, 'cmsplugin_ptr_id'):
plugin_list.append(obj)
elif obj.__class__ == Page:
pass
#page = obj #Page.objects.get(pk=obj.pk)
elif obj.__class__ == Title:
titles.append(obj)
else:
others.append(rev)
if not page.has_change_permission(request):
raise Http404
current_plugins = list(CMSPlugin.objects.filter(placeholder__page=page))
for pk, placeholder in placeholders.items():
# admin has already created the placeholders/ get them instead
try:
placeholders[pk] = page.placeholders.get(slot=placeholder.slot)
except Placeholder.DoesNotExist:
placeholders[pk].save()
page.placeholders.add(placeholders[pk])
for plugin in cms_plugin_list:
# connect plugins to the correct placeholder
plugin.placeholder = placeholders[plugin.placeholder_id]
plugin.save(no_signals=True)
for plugin in cms_plugin_list:
plugin.save()
for p in plugin_list:
if int(p.cmsplugin_ptr_id) == int(plugin.pk):
plugin.set_base_attr(p)
p.save()
for old in current_plugins:
if old.pk == plugin.pk:
plugin.save()
current_plugins.remove(old)
for title in titles:
title.page = page
try:
title.save()
except:
title.pk = Title.objects.get(page=page, language=title.language).pk
title.save()
for other in others:
other.object.save()
for plugin in current_plugins:
plugin.delete() | [((10, 14, 10, 55), 'django.shortcuts.get_object_or_404', 'get_object_or_404', (), '', False, 'from django.shortcuts import get_object_or_404\n'), ((18, 11, 18, 45), 'cms.utils.get_language_from_request', 'get_language_from_request', ({(18, 37, 18, 44): 'request'}, {}), '(request)', False, 'from cms.utils import get_language_from_request\n'), ((36, 27, 36, 75), 'cms.models.CMSPlugin.objects.filter', 'CMSPlugin.objects.filter', (), '', False, 'from cms.models import Page, Title, CMSPlugin, Placeholder\n'), ((63, 23, 63, 76), 'cms.models.Title.objects.get', 'Title.objects.get', (), '', False, 'from cms.models import Page, Title, CMSPlugin, Placeholder\n')] |
lvwuyunlifan/crop | delete.py | 7392d007a8271ff384c5c66ed5717afbc4172b4d | import os
from PIL import Image, ImageFilter
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
# import seaborn as sns
import pandas as pd
import numpy as np
import random
train_path = './AgriculturalDisease_trainingset/'
valid_path = './AgriculturalDisease_validationset/'
def genImage(gpath, datatype):
if datatype == 'train':
gen_number = 0 # 统计生成的图片数量
if not os.path.exists(gpath+'delete'):
os.makedirs(gpath+'delete')
label = pd.read_csv(gpath + 'label.csv')
label_gen_dict = {'img_path':[], 'label':[]} # 生成图片label
for i in range(61):
li = label[label['label'] == i]
imagenum = li['label'].count()
print('第%d个,总共有有%d个图片'%(i, imagenum))
imagelist = np.array(li['img_path']).tolist()
img_path_gen, label_gen = [], []
# for imagefile in imagelist:
for aa in range(len(imagelist)):
if aa <= 40:
print(aa)
path, imagename = os.path.split(imagelist[aa])
im = Image.open(imagelist[aa])
im = im.convert('RGB')
im_detail = im.transpose(Image.ROTATE_180)
# im_detail = im.filter(ImageFilter.DETAIL) # 细节增强
img_path_gen.append(gpath + 'delete/' +'idetail_'+imagename)
label_gen.extend([int(i)])
im_detail.save(gpath + 'delete/' +'idetail_'+imagename)
gen_number += 1
label_dict = {'img_path':img_path_gen, 'label':label_gen}
label_gen_dict['img_path'].extend(img_path_gen)
label_gen_dict['label'].extend(label_gen)
label_gen_pd = pd.DataFrame(label_dict)
# label = label.append(label_gen_pd) # 将生成的图片label加入原先的label
# label['label'] = label[['label']].astype('int64') # 转化为int64
# print(label)
label_gen_p = pd.DataFrame(label_gen_dict)
label_gen_p.to_csv(gpath + 'label_delete.csv', index=False)
# label_gen_p = pd.DataFrame(label_gen_dict)
# label_gen_p.to_csv(gpath + 'label_gen.csv', index=False)
print('训练集总共生成%d个图片'%gen_number)
if datatype == 'valid':
gen_number = 0
if not os.path.exists(gpath+'delete'):
os.makedirs(gpath+'delete')
label = pd.read_csv(gpath + 'label.csv')
label_gen_dict = {'img_path':[], 'label':[]}
for i in range(61):
li = label[label['label'] == i]
imagenum = li['label'].count()
print('第%d个,总共有有%d个图片'%(i, imagenum))
imagelist = np.array(li['img_path']).tolist()
img_path_gen, label_gen = [], []
# for imagefile in imagelist:
for aa in range(len(imagelist)):
if aa <= 20:
print(aa)
path, imagename = os.path.split(imagelist[aa])
im = Image.open(imagelist[aa])
im = im.convert('RGB')
im_detail = im.transpose(Image.ROTATE_180)
#im_detail = im.filter(ImageFilter.DETAIL) # 细节增强
img_path_gen.append(gpath + 'delete/' + 'idetail_' + imagename)
label_gen.extend([int(i)])
im_detail.save(gpath + 'delete/' + 'idetail_' + imagename)
gen_number += 1
label_dict = {'img_path': img_path_gen, 'label': label_gen}
label_gen_dict['img_path'].extend(img_path_gen)
label_gen_dict['label'].extend(label_gen)
label_gen_pd = pd.DataFrame(label_dict)
# label = label.append(label_gen_pd) # 将生成的图片label加入原先的label
# label['label'] = label[['label']].astype('int64') # 转化为int64
# print(label)
label_gen_p = pd.DataFrame(label_gen_dict)
label_gen_p.to_csv(gpath + 'label_delete.csv', index=False)
print('验证集总共生成%d个图片'%gen_number)
if __name__ == '__main__':
genImage(train_path, 'train')
genImage(valid_path, 'valid')
| [((23, 16, 23, 48), 'pandas.read_csv', 'pd.read_csv', ({(23, 28, 23, 47): "gpath + 'label.csv'"}, {}), "(gpath + 'label.csv')", True, 'import pandas as pd\n'), ((59, 22, 59, 50), 'pandas.DataFrame', 'pd.DataFrame', ({(59, 35, 59, 49): 'label_gen_dict'}, {}), '(label_gen_dict)', True, 'import pandas as pd\n'), ((72, 16, 72, 48), 'pandas.read_csv', 'pd.read_csv', ({(72, 28, 72, 47): "gpath + 'label.csv'"}, {}), "(gpath + 'label.csv')", True, 'import pandas as pd\n'), ((107, 22, 107, 50), 'pandas.DataFrame', 'pd.DataFrame', ({(107, 35, 107, 49): 'label_gen_dict'}, {}), '(label_gen_dict)', True, 'import pandas as pd\n'), ((20, 15, 20, 45), 'os.path.exists', 'os.path.exists', ({(20, 30, 20, 44): "(gpath + 'delete')"}, {}), "(gpath + 'delete')", False, 'import os\n'), ((21, 12, 21, 39), 'os.makedirs', 'os.makedirs', ({(21, 24, 21, 38): "(gpath + 'delete')"}, {}), "(gpath + 'delete')", False, 'import os\n'), ((55, 27, 55, 51), 'pandas.DataFrame', 'pd.DataFrame', ({(55, 40, 55, 50): 'label_dict'}, {}), '(label_dict)', True, 'import pandas as pd\n'), ((70, 15, 70, 45), 'os.path.exists', 'os.path.exists', ({(70, 30, 70, 44): "(gpath + 'delete')"}, {}), "(gpath + 'delete')", False, 'import os\n'), ((71, 12, 71, 39), 'os.makedirs', 'os.makedirs', ({(71, 24, 71, 38): "(gpath + 'delete')"}, {}), "(gpath + 'delete')", False, 'import os\n'), ((103, 27, 103, 51), 'pandas.DataFrame', 'pd.DataFrame', ({(103, 40, 103, 50): 'label_dict'}, {}), '(label_dict)', True, 'import pandas as pd\n'), ((29, 24, 29, 48), 'numpy.array', 'np.array', ({(29, 33, 29, 47): "li['img_path']"}, {}), "(li['img_path'])", True, 'import numpy as np\n'), ((36, 38, 36, 66), 'os.path.split', 'os.path.split', ({(36, 52, 36, 65): 'imagelist[aa]'}, {}), '(imagelist[aa])', False, 'import os\n'), ((38, 25, 38, 50), 'PIL.Image.open', 'Image.open', ({(38, 36, 38, 49): 'imagelist[aa]'}, {}), '(imagelist[aa])', False, 'from PIL import Image, ImageFilter\n'), ((78, 24, 78, 48), 'numpy.array', 'np.array', ({(78, 33, 78, 47): "li['img_path']"}, {}), "(li['img_path'])", True, 'import numpy as np\n'), ((84, 38, 84, 66), 'os.path.split', 'os.path.split', ({(84, 52, 84, 65): 'imagelist[aa]'}, {}), '(imagelist[aa])', False, 'import os\n'), ((86, 25, 86, 50), 'PIL.Image.open', 'Image.open', ({(86, 36, 86, 49): 'imagelist[aa]'}, {}), '(imagelist[aa])', False, 'from PIL import Image, ImageFilter\n')] |
likedeke/python-spider-study | 数据分析/matplotlib/03.demo.py | 09bee3cbe833234a86efcc28d62ace000e2fbb4b | # - - - - - - - - - - -
# @author like
# @since 2021-02-23 11:08
# @email [email protected]
# 十点到十二点的气温变化
from matplotlib import pyplot as plt
from matplotlib import rc
from matplotlib import font_manager
import random
x = range(0, 120)
y = [random.randint(20, 35) for i in range(120)]
plt.figure(figsize=(20, 8), dpi=80)
plt.plot(x, y)
# 中文字体
chFont = font_manager.FontProperties(family="SimHei") # SimHei
# chFont = font_manager.FontProperties(fname="C:/Windows/Fonts/SIMHEI.TTF")
# 刻度相关设置
step = 10
xLabels = ["10点,{}分".format(i) for i in range(60)]
xLabels += ["11点,{}分".format(i) for i in range(60)]
plt.xticks(list(x)[::step], xLabels[::step], rotation=25, fontProperties=chFont)
# 添加描述信息
plt.xlabel("时间", fontProperties=chFont)
plt.ylabel("温度 单位(℃)", fontProperties=chFont)
plt.title("10点到12点每分钟的气温变化", fontProperties=chFont)
plt.show()
| [((16, 0, 16, 35), 'matplotlib.pyplot.figure', 'plt.figure', (), '', True, 'from matplotlib import pyplot as plt\n'), ((18, 0, 18, 14), 'matplotlib.pyplot.plot', 'plt.plot', ({(18, 9, 18, 10): 'x', (18, 12, 18, 13): 'y'}, {}), '(x, y)', True, 'from matplotlib import pyplot as plt\n'), ((21, 9, 21, 53), 'matplotlib.font_manager.FontProperties', 'font_manager.FontProperties', (), '', False, 'from matplotlib import font_manager\n'), ((31, 0, 31, 43), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (), '', True, 'from matplotlib import pyplot as plt\n'), ((32, 0, 32, 55), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (), '', True, 'from matplotlib import pyplot as plt\n'), ((33, 0, 33, 73), 'matplotlib.pyplot.title', 'plt.title', (), '', True, 'from matplotlib import pyplot as plt\n'), ((35, 0, 35, 10), 'matplotlib.pyplot.show', 'plt.show', ({}, {}), '()', True, 'from matplotlib import pyplot as plt\n'), ((14, 5, 14, 27), 'random.randint', 'random.randint', ({(14, 20, 14, 22): '(20)', (14, 24, 14, 26): '(35)'}, {}), '(20, 35)', False, 'import random\n')] |
xylar/cdat | testing/vcs/test_vcs_isoline_labels.py | 8a5080cb18febfde365efc96147e25f51494a2bf | import os, sys, cdms2, vcs, vcs.testing.regression as regression
dataset = cdms2.open(os.path.join(vcs.sample_data,"clt.nc"))
data = dataset("clt")
canvas = regression.init()
isoline = canvas.createisoline()
isoline.label="y"
texts=[]
colors = []
for i in range(10):
text = canvas.createtext()
text.color = 50 + 12 * i
text.height = 12
colors.append(100 + 12 * i)
if i%2 == 0:
texts.append(text.name)
else:
texts.append(text)
isoline.text = texts
# First test using isoline.text[...].color
canvas.plot(data, isoline, bg=1)
baseline = os.path.splitext(sys.argv[1])
baselineImage = "%s%s"%baseline
ret = regression.run_wo_terminate(canvas, "test_vcs_isoline_labels.png", baselineImage)
# Now set isoline.linecolors and test again.
canvas.clear()
isoline.linecolors = colors
canvas.plot(data, isoline, bg=1)
baselineImage = "%s%d%s"%(baseline[0], 2, baseline[1])
testImage = os.path.abspath("test_vcs_isoline_labels2.png")
ret += regression.run_wo_terminate(canvas, testImage, baselineImage)
# Now set isoline.textcolors and test again.
canvas.clear()
isoline.textcolors = colors
canvas.plot(data, isoline, bg=1)
baselineImage = "%s%d%s"%(baseline[0], 3, baseline[1])
testImage = os.path.abspath("test_vcs_isoline_labels3.png")
ret += regression.run_wo_terminate(canvas, testImage, baselineImage)
sys.exit(ret)
| [((5, 9, 5, 26), 'vcs.testing.regression.init', 'regression.init', ({}, {}), '()', True, 'import os, sys, cdms2, vcs, vcs.testing.regression as regression\n'), ((24, 11, 24, 40), 'os.path.splitext', 'os.path.splitext', ({(24, 28, 24, 39): 'sys.argv[1]'}, {}), '(sys.argv[1])', False, 'import os, sys, cdms2, vcs, vcs.testing.regression as regression\n'), ((26, 6, 26, 87), 'vcs.testing.regression.run_wo_terminate', 'regression.run_wo_terminate', ({(26, 34, 26, 40): 'canvas', (26, 42, 26, 71): '"""test_vcs_isoline_labels.png"""', (26, 73, 26, 86): 'baselineImage'}, {}), "(canvas, 'test_vcs_isoline_labels.png',\n baselineImage)", True, 'import os, sys, cdms2, vcs, vcs.testing.regression as regression\n'), ((33, 12, 33, 59), 'os.path.abspath', 'os.path.abspath', ({(33, 28, 33, 58): '"""test_vcs_isoline_labels2.png"""'}, {}), "('test_vcs_isoline_labels2.png')", False, 'import os, sys, cdms2, vcs, vcs.testing.regression as regression\n'), ((34, 7, 34, 68), 'vcs.testing.regression.run_wo_terminate', 'regression.run_wo_terminate', ({(34, 35, 34, 41): 'canvas', (34, 43, 34, 52): 'testImage', (34, 54, 34, 67): 'baselineImage'}, {}), '(canvas, testImage, baselineImage)', True, 'import os, sys, cdms2, vcs, vcs.testing.regression as regression\n'), ((42, 12, 42, 59), 'os.path.abspath', 'os.path.abspath', ({(42, 28, 42, 58): '"""test_vcs_isoline_labels3.png"""'}, {}), "('test_vcs_isoline_labels3.png')", False, 'import os, sys, cdms2, vcs, vcs.testing.regression as regression\n'), ((43, 7, 43, 68), 'vcs.testing.regression.run_wo_terminate', 'regression.run_wo_terminate', ({(43, 35, 43, 41): 'canvas', (43, 43, 43, 52): 'testImage', (43, 54, 43, 67): 'baselineImage'}, {}), '(canvas, testImage, baselineImage)', True, 'import os, sys, cdms2, vcs, vcs.testing.regression as regression\n'), ((45, 0, 45, 13), 'sys.exit', 'sys.exit', ({(45, 9, 45, 12): 'ret'}, {}), '(ret)', False, 'import os, sys, cdms2, vcs, vcs.testing.regression as regression\n'), ((3, 21, 3, 59), 'os.path.join', 'os.path.join', ({(3, 34, 3, 49): 'vcs.sample_data', (3, 50, 3, 58): '"""clt.nc"""'}, {}), "(vcs.sample_data, 'clt.nc')", False, 'import os, sys, cdms2, vcs, vcs.testing.regression as regression\n')] |
ds-utilities/ICE | src/Python_version/ICE_py36.py | 9461bbb8d6c7b3d3b32eac8ee29bd4ae3ccb286f | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Mon Mar 5 05:47:03 2018
@author: zg
"""
import numpy as np
#from scipy import io
import scipy.io
#import pickle
from sklearn.model_selection import StratifiedKFold
#import sklearn
from scipy.sparse import spdiags
from scipy.spatial import distance
#import matplotlib.pyplot as plt
from sklearn.ensemble import BaggingClassifier
from sklearn import svm
#from sklearn import metrics
from sklearn.metrics import roc_auc_score
from sklearn import tree
import copy
import numpy.matlib
from sklearn.exceptions import NotFittedError
#import FuzzyRwrBagging as frb
#from joblib import Parallel, delayed
#import multiprocessing
def RWR(A, nSteps, laziness, p0 = None):
'''
% the random walk algorithm.
% A is the input net matrix, with the diag to be 0.
% nSteps: how many steps to walk
% laziness: the probablity to go back.
% p0: the initial probability. usually it is a zero matrix with the diag to
% be 1.
%
% for example, A could be:
% A = [0,2,2,0,0,0,0;...
% 2,0,1,1,0,0,0;...
% 2,1,0,0,1,0,0;...
% 0,1,0,0,0,1,1;...
% 0,0,1,0,0,0,0;...
% 0,0,0,1,0,0,1;...
% 0,0,0,1,0,1,0]
%
% if nSteps is 1000 and laziness is 0.3, p0 is default, the result is:
% [0.449, 0.207, 0.220, 0.064, 0.154, 0.034, 0.034;...
% 0.207, 0.425, 0.167, 0.132, 0.117, 0.071, 0.071;...
% 0.220, 0.167, 0.463, 0.052, 0.324, 0.028, 0.028;...
% 0.048, 0.099, 0.039, 0.431, 0.027, 0.232, 0.232;...
% 0.038, 0.029, 0.081, 0.009, 0.356, 0.004, 0.004;...
% 0.017, 0.035, 0.014, 0.154, 0.009, 0.425, 0.203;...
% 0.017, 0.035, 0.014, 0.154, 0.009, 0.203, 0.425]
%
% Each column represents the propability for each node. each element in the
% column means the probability to go to that node.
% This algorithm will converge. For example, for the above matrix, nSteps =
% 100, 1000 or 10000, will give the same result.
'''
n = len(A)
if p0 == None:
p0 = np.eye(n)
'''
% In the example above, spdiags(sum(A)'.^(-1), 0, n, n) will be
% 0.2500 0 0 0 0 0 0
% 0 0.2500 0 0 0 0 0
% 0 0 0.2500 0 0 0 0
% 0 0 0 0.3333 0 0 0
% 0 0 0 0 1.0000 0 0
% 0 0 0 0 0 0.5000 0
% 0 0 0 0 0 0 0.5000
% W will be:
% 0 0.5000 0.5000 0 0 0 0
% 0.5000 0 0.2500 0.3333 0 0 0
% 0.5000 0.2500 0 0 1.0000 0 0
% 0 0.2500 0 0 0 0.5000 0.5000
% 0 0 0.2500 0 0 0 0
% 0 0 0 0.3333 0 0 0.5000
% 0 0 0 0.3333 0 0.5000 0
'''
#W = A * spdiags(sum(A)'.^(-1), 0, n, n);
#W = spdiags(np.power(sum(np.float64(A)) , -1).T , 0, n, n).toarray()
W = A.dot( spdiags(np.power(sum(np.float64(A)) , -1)[np.newaxis], \
0, n, n).toarray() )
p = p0
pl2norm = np.inf
unchanged = 0
for i in range(1, nSteps+1):
if i % 100 == 0:
print(' done rwr ' + str(i-1) )
pnew = (1-laziness) * W.dot(p) + laziness * p0
l2norm = max(np.sqrt(sum((pnew - p) ** 2) ) )
p = pnew
if l2norm < np.finfo(float).eps:
break
else:
if l2norm == pl2norm:
unchanged = unchanged +1
if unchanged > 10:
break
else:
unchanged = 0
pl2norm = l2norm
return p
# test RWR()
'''
A = np.array([[0,2,2,0,0,0,0],\
[2,0,1,1,0,0,0],\
[2,1,0,0,1,0,0],\
[0,1,0,0,0,1,1],\
[0,0,1,0,0,0,0],\
[0,0,0,1,0,0,1],\
[0,0,0,1,0,1,0]])
nSteps = 1000
lazi = 0.3
RWR(A, nSteps, lazi, None)
'''
# test
#dst = distance.euclidean(A)
# corrent, the same as in Matlab
def f_sim_2_aRankNet(sim, k=3):
'''
% Convert the similarity matrix to a network graph where each node
% has k edges to other nodes (aRank).
'''
# delete the diagnal values.
# sim = sim-diag(diag(sim) );
np.fill_diagonal(sim, 0)
# [~, I] = sort(sim-diag(diag(sim) ) );
I = np.argsort(sim, kind='mergesort') + 1
# [~, I2] = sort(I);
I2 = (np.argsort(I, kind='mergesort').T + 1).T
# for every column, just keep the top k edges.
#aRankNet = (I2 >length(sim)-k);
aRankNet = I2 > (len(sim) - k)
# make it a diagonal matrix
# aRankNet = max(aRankNet, aRankNet');
aRankNet = np.logical_or(aRankNet, aRankNet.T)
# remove the diagonal 1s.
# aRankNet = aRankNet-diag(diag(aRankNet) );
np.fill_diagonal(aRankNet, False)
return aRankNet
# test
#sim = np.array([[0, 0.5566, 0.6448, 0.3289], \
# [0.5566, 0, -0.0842, -0.0170], \
# [0.6448, -0.0842, 0, 0.8405], \
# [0.3289, -0.0170, 0.8405, 0]])
#
#f_sim_2_aRankNet(sim,1)
#f_sim_2_aRankNet(sim,2)
#f_sim_2_aRankNet(sim,3)
#
#array([[False, True, True, False],
# [ True, False, False, False],
# [ True, False, False, True],
# [False, False, True, False]])
#
#array([[False, True, True, True],
# [ True, False, False, False],
# [ True, False, False, True],
# [ True, False, True, False]])
#
#array([[False, True, True, True],
# [ True, False, False, True],
# [ True, False, False, True],
# [ True, True, True, False]])
def f_find_centers_rwMat(rw_mat, k):
'''
% on the rw_mat matrix, find some nodes as the centroids for soft
% clustering. If we just random pickup some nodes as centroids, that is
% not good for fuzzy clusters.
% k is the number of centroids.
'''
ixs = []
# 1. find the most connected center node as the first centroid.
a = np.sum(rw_mat, axis=1) # axis=1 for rows; 0 for col
# % most connected node.
ix = np.argmax(a)
ixs.append(ix)
# % 2. iteratively find the rest nodes
for i in range(1, k):
tmp = rw_mat[:, ixs]
b = np.sum(tmp, axis=1)
b[ixs] = np.inf
# % find the farthest node
ix = np.argmin(b)
ixs.append(ix)
return ixs
# test
#tmp = f_find_centers_rwMat(rw_mat, 10)
def getCutoff(rw_mat, avgNeighborsSize):
tmp = rw_mat.flatten('F')
a = np.flip(np.sort(tmp), 0)
len1 = len(rw_mat)
#cutoffs = []
all_neibs = int( avgNeighborsSize * len1 )
print( all_neibs)
ct = a[all_neibs]
return ct
#test
#>>> a = np.array([[1,2], [3,4]])
#>>> a.flatten()
#array([1, 2, 3, 4])
#>>> a.flatten('F')
#array([1, 3, 2, 4])
'''
a = np.array( range(0,100) )
b = np.matlib.repmat(a, 100, 1)
ct = getCutoff(b, 70)
'''
def f_len_of_each_ele(c1):
#% Assume c1 is a 1-dimension cell array, and each element is a 1d double
#% array. This function counts the length of each double array.
lens = np.zeros(len(c1))
for i in range(0, len(c1)):
lens[i] = len(c1[i])
return lens
def f_eu_dist(X):
'''
calculate the euclidean distance between instances
'''
sim = np.zeros(( len(X), len(X) ))
for i in range(0, len(X)):
for j in range(i+1, len(X)):
tmp = distance.euclidean(X[i], X[j])
sim[i][j] = tmp
sim[j][i] = tmp
sim = -sim
np.fill_diagonal(sim, 0)
return sim
#test
#sim = f_eu_dist(X)
def f_eu_dist2(X1, X2):
'''
calculate the euclidean distance between instances from two datasets
'''
sim = np.zeros(( len(X1), len(X2) ))
for i in range(0, len(X1) ):
for j in range(0, len(X2) ):
tmp = distance.euclidean(X1[i], X2[j])
sim[i][j] = tmp
sim = -sim
return sim
#test
#sim = f_eu_dist2(X_tr, X_te)
def f_fuzzy_rwr_clusters(X, k=100, each_clus_sz=None):
# X: data
# k: number of clusters
'''
The return variable clus stores the instance indices for each cluster.
However, this data structure is not easy to find for a instance, which are
the clusters it belongs to, thus we also need to convert clus to a
true-false matrix.
'''
if each_clus_sz == None:
# on average, how many clusters does one inst belongs to.
#overlap_factor = 2;
# the estimated size of each cluster. default is half the number of
# instances.
each_clus_sz=len(X)/3
print('RWR-based fuzzy clustering starts...')
print(' NO. clusters = '+str(k)+'; avg. cluster size = '+str(each_clus_sz) )
# sim = squareform(pdist(X));
# sim = -sim;
sim = np.zeros((len(X), len(X) ) )
for i in range(0, len(X)):
for j in range(i+1, len(X)):
tmp = distance.euclidean(X[i], X[j])
sim[i][j] = tmp
sim[j][i] = tmp
sim = -sim
print(' done calculating the Euclidean distance matrix')
# ---------------------------------------------------------------
aRank_k_neighbors = np.ceil(np.log10(len(sim)) )
ori_graph = f_sim_2_aRankNet(sim, aRank_k_neighbors)
print(' done calculating the A-rank KNN graph')
# % -------- RWR --------
nSteps = 1000
lazi = 0.3
rw = RWR(ori_graph, nSteps, lazi)
# remove probability of returning start node
np.fill_diagonal(rw, 0)
rw_mat = rw
print(' done RWR')
# ---------------------------------------------------------------
ixs_centers = f_find_centers_rwMat(rw_mat, k)
ct = getCutoff(rw_mat, each_clus_sz)
rw_net = rw_mat > ct
# % set the diagnal to 1
np.fill_diagonal(rw_net, True)
clus = []
for i in range(0, k):
tmp = np.argwhere(rw_net[:, ixs_centers[i] ] ).flatten()
clus.append(tmp)
# ---------------------------------------------------------------
# % sort the clusters
lens = f_len_of_each_ele(clus)
ix = np.argsort(lens)[::-1]
clus_ordered = [clus[i] for i in ix]
print(' center inst. index of each cluster: ')
ixs_centers = np.array(ixs_centers)
print(ixs_centers[ix])
print(' size of each cluster: ')
print(lens[ix])
print(' done RWR clustering')
return clus_ordered
#test
#clus = f_fuzzy_rwr_clusters(X, 100)
# pass
def f_clus_to_tfs(clus, n_inst):
#% convert the cluster information from cell array to mat. But for each
#% instance, the rank of clusters information will be lost - you won't know
#% what is the top 1/2/3 cluster it belongs to.
#%
#% clus e.g:
#% 1x5 cell
#% 1x195 double 1x193 double 1x169 double 1x161 double 1x62 double
#%
#% tfs e.g:
#% 295x5 double
#% 1 0 0 0 0
#% 1 1 1 1 0
#% 1 1 1 0 0
#% 1 1 0 0 0
#% 1 1 1 1 0
#% ...
#% 1 1 1 1 1
#% 1 0 0 0 0
#% 1 1 1 0 0
tfs = np.zeros((n_inst, len(clus)), dtype=bool)
for i in range(0, len(clus)):
tfs[clus[i], i] = True
return tfs
# test
#tfs = f_clus_to_tfs(clus, len(X))
# pass
def f_tfs_2_instClus(tfs):
'''
convert the boolean table representation of clustering result to for each
instance, what clusters it belongs to.
'''
inst_clus = []
for i in range(0, len(tfs)):
row = list( np.where(tfs[i, :] ) [0] )
inst_clus.append(row)
return inst_clus
# test
#inst_clus = f_tfs_2_instClus(tfs)
#def f_bg_svm_tr_te(X_tr, y_tr, X_te, y_te):
# #bagging = BaggingClassifier(base_estimator = svm.LinearSVC(), \
# bagging = BaggingClassifier(base_estimator = tree.DecisionTreeClassifier(), \
# random_state=None, n_estimators = 100 )
# bagging.fit(X_tr, y_tr)
#
# y_pred = bagging.predict_proba(X_te)
# y_pred = y_pred[:, 1].flatten()
#
# auc = roc_auc_score(y_te.flatten(), y_pred)
#
# return [y_pred, auc]
# test
'''
X_tr = X
y_tr = y
X_te = X
y_te = y
[y_pred, auc] = f_bg_svm_tr_te(X_tr, y_tr, X_te, y_te)
'''
#def f_bg_tr_te(X_tr, y_tr, X_te, y_te, BaseBagging):
# '''
# corresponds to f_weka_bg_svm_tr_te() in Matlab version
# '''
# #bagging = BaggingClassifier(base_estimator = svm.LinearSVC(), \
# bagging = BaggingClassifier(BaseBagging, \
# random_state=None, n_estimators = 100 )
# bagging.fit(X_tr, y_tr)
#
# y_pred = bagging.predict_proba(X_te)
# y_pred = y_pred[:, 1].flatten()
#
# auc = roc_auc_score(y_te.flatten(), y_pred)
#
# return [y_pred, auc]
def f_tr(X_tr, y_tr, model):
model_inner = copy.deepcopy(model)
model_inner.fit(X_tr, y_tr)
return model_inner
def f_te(X_te, model):
y_pred = model.predict_proba(X_te)
y_pred = y_pred[:, 1].flatten()
return y_pred
def f_tr_te(X_tr, y_tr, X_te, model):
'''
corresponds to f_weka_bg_svm_tr_te() in Matlab version
'''
#bagging = BaggingClassifier(base_estimator = svm.LinearSVC(), \
#bagging = BaggingClassifier(BaseBagging, \
# random_state=None, n_estimators = 100 )
model_inner = copy.deepcopy(model)
model_inner.fit(X_tr, y_tr)
y_pred = model_inner.predict_proba(X_te)
y_pred = y_pred[:, 1].flatten()
#auc = roc_auc_score(y_te.flatten(), y_pred)
return y_pred
def f_k_fo(X, y, model, k_fold=10):
'''
corresponds to f_weka_bg_svm_arff_k_fo_3_parfor() in Matlab version
'''
y = y.flatten()
y_pred = np.zeros(y.size)
skf = StratifiedKFold(n_splits=k_fold, random_state=None, shuffle=True)
skf.get_n_splits(X, y)
for train_index, test_index in skf.split(X, y):
#print("TRAIN: ", train_index, " TEST: ", test_index)
X_tr, X_te = X[train_index], X[test_index]
#y_tr, y_te = y[train_index], y[test_index]
y_tr = y[train_index]
if np.unique(y_tr).size == 1:
y_pred_fo = np.zeros( len(test_index) )
#print len(X_te)
#print len(test_index)
#print y_pred_fo
y_pred_fo.fill(np.unique(y_tr)[0] )
#print y_pred_fo
else:
y_pred_fo = f_tr_te(X_tr, y_tr, X_te, model)
y_pred[test_index] = y_pred_fo
#auc = roc_auc_score(y.flatten(), y_pred)
return y_pred
# test
#pa = '/Volumes/Macintosh_HD/Users/zg/bio/3_ensembF/3_scripts/2017_4_4/'
##X = scipy.io.loadmat(pa+'/data/data_all_pickle/30/data.mat')['X'] # 30:breast cancer
##y = scipy.io.loadmat(pa+'/data/data_all_pickle/30/data.mat')['y']
#X = scipy.io.loadmat(pa+'/data/data_all_pickle/11/data.mat')['X'] # 11:mesothelioma
#y = scipy.io.loadmat(pa+'/data/data_all_pickle/11/data.mat')['y']
#
#model = BaggingClassifier(base_estimator = tree.DecisionTreeClassifier(), \
# random_state=None, n_estimators = 100 )
#y_pred = f_k_fo(X, y, model, k_fold=10)
#
#print roc_auc_score(y.flatten(), y_pred)
# the easy dataset mesothelioma get 1.0 CV result.
# breast cancer get 0.599
# all results are correct.
def f_quantileNorm(templete, target):
'''
Templete is the standard, change the target to the values in the templete.
Target may have a very different range than the templete.
templete and target should be 1d n by 1 array.
f_my_quantileNorm()
'''
ix_target = np.argsort(target, kind='mergesort')
ix_templete = np.argsort(templete, kind='mergesort')
target[ix_target] = templete[ix_templete]
new = target
return new
# test
#templete = X[:, 0]
#target = X[:, 1]
#new = f_quantileNorm(templete, target)
#def f_bg_k_fo_3(X, y, k_fold=10):
# '''
# corresponds to f_weka_bgSvm_arff_k_fo_3_parfor() in Matlab version
# corresponds to f_k_fo()
# '''
# y_pred = np.zeros((y.size, 1))
#
# skf = StratifiedKFold(n_splits=k_fold)
# skf.get_n_splits(X, y)
#
# for train_index, test_index in skf.split(X, y):
# #print("TRAIN:", train_index, "TEST:", test_index)
# X_tr, X_te = X[train_index], X[test_index]
# y_tr, y_te = y[train_index], y[test_index]
def f_use_each_clus_forWhole(X, y, clus, y_pred_whole, model, fo_inner):
'''
% using each cluster data to predict the whole instances, while self
% prediction using 10-fold CV.
corresponds to f_use_each_clus_forWhole_bg_svm() in Matlab version
'''
n_clusters = len(clus)
y_pred_multi = np.zeros((y.size, n_clusters) )
models = []
for j in range(0, n_clusters):
# for each cluster
Xj = X[clus[j].flatten(), :]
yj = y[clus[j].flatten() ]
model_a_clust = copy.deepcopy(model)
print(' Cluster '+str(j)+' started...')
#if len(yj) > 10:
if len(yj) > 15 and np.unique(yj).size != 1:
# ------------------ for self ------------------
#if np.unique(yj).size == 1:
# y_pred = np.zeros(yj.size)
# y_pred.fill(np.unique(yj)[0])
#else:
try:
y_pred = f_k_fo(Xj, yj, model, fo_inner)
# quantileNorm
templete = y_pred_whole[clus[j].flatten()]
target = y_pred
y_pred = f_quantileNorm(templete, target)
# copy the normed prediction to the whole data.
y_pred_multi[clus[j].flatten(), j] = y_pred
print(' c-'+str(j)+' done predicting local instances')
# ------------------ for other -----------------
ix_other = set(range(0, y.size)) - set(clus[j].flatten())
ix_other = list(ix_other)
#print ix_other
X_other = X[ix_other , :]
#y_other = y[ix_other ]
# predict
#y_pred = f_tr_te(Xj, yj, X_other, model)
#if np.unique(yj).size != 1:
model_a_clust.fit(Xj, yj)
y_pred = model_a_clust.predict_proba(X_other)
y_pred = y_pred[:, 1].flatten()
# quantileNorm
templete = y_pred_whole[ix_other]
target = y_pred
y_pred = f_quantileNorm(templete, target)
#else:
# y_pred = np.zeros(X_other.size)
# y_pred.fill(np.unique(yj)[0])
# copy to the whole array
y_pred_multi[ix_other, j] = y_pred
print(' c-'+str(j)+' done predicting remote instances')
except ValueError as e:
print(e)
print(' skip this cluster')
y_pred = np.zeros(y.size)
y_pred.fill(np.nan)
y_pred_multi[:, j] = y_pred
else:
if len(yj) <= 15:
print (' '+str(len(yj))+' insts in cluster, <= 15, skip...')
y_pred = np.zeros(y.size)
y_pred.fill(np.nan)
y_pred_multi[:, j] = y_pred
if np.unique(yj).size == 1:
print (' warning, #unique class label(s) == 1')
y_pred = np.zeros(y.size)
y_pred.fill(np.unique(yj)[0])
y_pred_multi[:, j] = y_pred
model_a_clust = np.unique(yj)[0]
models.append(model_a_clust)
return [y_pred_multi, models]
# test
#[y_pred_multi, models] = f_use_each_clus_forWhole(X, y, clus, y_pred_whole, model)
#def f_dec_tab_4_bg_svm(X, y, clus):
# '''
# Calculate the decision table
# % This version changed from the cluster-cluster dec_mat to instance-cluster
# % dec_mat. This solution will avoid the case that if one cluster decision
# % is wrong leading entrie cluster prediction is wrong, which is the reason
# % of instability. However, we cannot use a systematic evaluation criteria
# % such as AUC, I will try using the predicted prob at first.
#
# % This version 3 adds the support for fuzzy clustering - one instance may
# % belongs to more than one cluster.
# % This updated version also outputs the predicted values of y.
# % support more than 3 clusters
# % normalization take place in y_pred_self and y_pred_other, thus do not
# % need normalization when predict y_pred_ICE.
# % ixsp is another cluster form.
#
# corresponds to f_dec_tab_4_bg_svm() in Matlab version
# '''
# #n_clusters = len(clus)
# ## dec_mat stores the prediction error.
# #pred_mat=np.zeros((y.size, n_clusters+1)) #the extra col is for whole pred
# #
# ## k_fold of inner cross-validation
# #fo_inner = 10
# # --------------------------- WHOLE -------------------------
#
# # --------------------------- SELF -------------------------
def f_err_mat(X, y, clus, model):
'''
Calculate the decision table
corresponds to f_dec_tab_4_bg_svm() in Matlab version
'''
n_clusters = len(clus)
# err_mat stores the prediction error.
pred_prob_mat=np.zeros((y.size, n_clusters+1)) #the extra col is for whole pred
# col 0 to col n_clusters-1 store the predictions by each cluster
# the last col stores the pred by whole data
#models = []
# k_fold of inner cross-validation
fo_inner = 5
# --------------------------- WHOLE -------------------------
# Predict each cluster using the whole data.
model_whole = copy.deepcopy(model)
y_pred_whole = f_k_fo(X, y, model_whole, fo_inner)
model_whole.fit(X, y) # fit a model using all data rather than only a fold
pred_prob_mat[:, n_clusters] = y_pred_whole
print (' Done evaluation using whole instances')
print (' Start to evaluate each cluster ')
# --------------------------- SELF -------------------------
# predict the whole instances using each cluster data, while self
# prediction using 10-fold CV.
[y_pred_multi, models] = f_use_each_clus_forWhole(X, y, clus, \
y_pred_whole, model, fo_inner)
print (' Done evaluation using each cluster')
models.append(model_whole)
pred_prob_mat[:, 0:n_clusters] = y_pred_multi
# make a tmp array a stores y
tmp = np.matlib.repmat(y.reshape((y.size, 1)), 1, n_clusters+1)
err_mat = abs(pred_prob_mat - tmp )
print (' Done calculating error table and fitting ICE models')
return [err_mat, models]
"""
#mat = scipy.io.loadmat('/Volumes/Macintosh_HD/Users/zg/bio/3_ensembF/'+\
# '3_scripts/2017_4_4/data/names.mat')['names']
#mat = io.loadmat('/Users/zg/Desktop/a.mat')['names']
#test
pa = '/Volumes/Macintosh_HD/Users/zg/bio/3_ensembF/3_scripts/2017_4_4/'
X = scipy.io.loadmat(pa+'/data/data_all_pickle/30/data.mat')['X'] # 30:breast cancer
y = scipy.io.loadmat(pa+'/data/data_all_pickle/30/data.mat')['y']
#X = scipy.io.loadmat(pa+'/data/data_all_pickle/11/data.mat')['X'] # 11:mesothelioma
#y = scipy.io.loadmat(pa+'/data/data_all_pickle/11/data.mat')['y']
n_clus = 3
clus = f_fuzzy_rwr_clusters(X, n_clus)
tfs = f_clus_to_tfs(clus, len(X))
y = y.astype(float)
#model = BaggingClassifier(base_estimator = tree.DecisionTreeClassifier(), \
#model = BaggingClassifier(base_estimator = svm.LinearSVR(), \
#model = BaggingClassifier(base_estimator = svm.LinearSVC(), \
model = BaggingClassifier(base_estimator = svm.SVC(kernel='linear'), \
random_state=None, n_estimators = 100 )
[err_mat, models] = f_err_mat(X, y, clus, model)
"""
def f_err_2_decMat(err_mat, tfs, adv_whole=0.4, adv_self=0.5):
'''
Convert the err table to decision table.
'''
dec_mat = np.zeros(( len(err_mat), err_mat[0].size-1 ), dtype=bool)
# dec_ixs: for each instance, which clusters should be used.
dec_ixs = []
inst_clus = f_tfs_2_instClus(tfs)
for i in range(0, len(err_mat)):
# Matlab code:
#dec_row = dec_mat(cur_nb_ix, :);
#dec_row(:, end ) = dec_row(:, end ) - adv_whole;
#dec_row(:, clus_id) = dec_row(:, clus_id) - adv_self;
row = np.copy( err_mat[i, :] )
#print row
row[-1] = row[-1] - adv_whole
inst_i_clus = inst_clus[i]
if len(inst_i_clus) > 0:
row[inst_i_clus] = row[inst_i_clus] - adv_self
#print row
ix_good_clus = list( np.where( row < row[-1] ) [0] )
#print ix_good_clus
if len(ix_good_clus) > 0:
dec_mat[i, ix_good_clus] = True
dec_ixs.append(ix_good_clus)
else:
dec_ixs.append([])
return [dec_mat, dec_ixs]
#[dec_mat, dec_ixs] = f_err_2_decMat(err_mat, tfs)
def f_ICE_tr_te_all_clus(X_tr, X_te, clus, models, doNorm=True):
'''
Use the training data to predict the testing data.
Use whole training data to predict
Use each cluster of training data to predict the testing data.
'''
y_pred_all = np.zeros(( len(X_te), len(clus) + 1 ))
# the first col is the prediction using the whole data
model_whole = models[-1]
y_pred_all[:, 0] = f_te(X_te, model_whole)
#y_pred_all[:, 0] = f_tr_te(X_tr, y_tr, X_te, model)
#print 'whole model good '
# start from the second col, the result is by each cluster
for i in range(0, len(clus)):
#Xi = X_tr[clus[i].flatten(), :]
#yi = y_tr[clus[i].flatten() ]
model_i = models[i]
#model_a_clust = copy.deepcopy(model)
try:
y_pred_te = f_te(X_te, model_i)
except :
if model_i == 0:
y_pred_te = np.zeros(len(X_te))
elif model_i == 1:
y_pred_te = np.ones(len(X_te))
else:
y_pred_te = np.zeros(len(X_te))
y_pred_te.fill(np.nan)
#except NotFittedError as e:
# print(repr(e))
# y_pred_te = np.zeros(len(X_te))
# y_pred_te.fill(np.nan)
#print 'model '+str(i)+' good '
#y_pred_te = f_tr_te(Xi, yi, X_te, model)
if doNorm == True:
templete = y_pred_all[:, 0]
target = y_pred_te
y_pred = f_quantileNorm(templete, target)
else:
y_pred = y_pred_te
y_pred_all[:, i+1] = y_pred
return y_pred_all
# test
#y_pred_all = f_ICE_tr_te_all_clus(X, X, clus, model)
def f_ICE_fit(X_tr, y_tr, n_clus, model, w=0.4, s=0.5):
'''
'''
# rwr based fuzzy clustering
clus = f_fuzzy_rwr_clusters(X_tr, n_clus)
#print clus[0]
tfs = f_clus_to_tfs(clus, len(X_tr))
# train models and calculate the error-dicision tables
y_tr = y_tr.astype(float)
#model = BaggingClassifier(base_estimator = svm.SVC(kernel='linear'), \
# random_state=None, n_estimators = 100 )
[err_mat, models] = f_err_mat(X_tr, y_tr, clus, model)
[dec_mat, dec_ixs] = f_err_2_decMat(err_mat, tfs, w, s)
print (' Done calucating decision table')
return [clus, models, dec_ixs]
#def_deal_miss_v_1(d):
'''
deal with missing values by replacing them by mean.
'''
def f_ICE_fit_2(X_tr, y_tr, n_clus, model, w=0.4, s=0.5):
'''
This version use the err mat to re-clustering
'''
# rwr based fuzzy clustering
clus = f_fuzzy_rwr_clusters(X_tr, n_clus)
#print clus[0]
tfs = f_clus_to_tfs(clus, len(X_tr))
# train models and calculate the error-dicision tables
y_tr = y_tr.astype(float)
#model = BaggingClassifier(base_estimator = svm.SVC(kernel='linear'), \
# random_state=None, n_estimators = 100 )
[err_mat, models] = f_err_mat(X_tr, y_tr, clus, model)
# ******************** re-clustering ********************
n_iter = 2
for i in range(0, n_iter):
clus = f_fuzzy_rwr_clusters(err_mat, n_clus)
tfs = f_clus_to_tfs(clus, len(X_tr))
[err_mat, models] = f_err_mat(X_tr, y_tr, clus, model)
# *******************************************************
[dec_mat, dec_ixs] = f_err_2_decMat(err_mat, tfs, w, s)
print (' Done calucating decision table')
return [clus, models, dec_ixs]
def f_ICE_pred(X_tr, y_tr, X_te, clus, dec_ixs, models,N=5,alpha=1,beta=1):
'''
clus and inst_clus contains the same information that clus is the instances
ids for each cluster, while inst_clus stores that for each instance, which
cluster(s) it belongs to.
dec_ixs stores the good cluster(s) for each instance, which may include
even a remote cluster. each instance in dec_ixs does not contain the whole
set of instances.
'''
# the first col is the prediction using the whole data
# start from the second col, the result is by each cluster
y_pred_all = f_ICE_tr_te_all_clus(X_tr, X_te, clus, models)
y_pred_ICE = np.zeros( len(X_te) )
neighbour_mat = f_eu_dist2(X_tr, X_te)
# ---------- for each testing instance ----------
#n_partials = np.zeros( len(X_te) )
#n_wholes = np.zeros( len(X_te) )
for j in range(0, len(X_te) ):
# for each testing instance
# find the top 10 neighbors for each test instance
neighbour_col = neighbour_mat[:, j].flatten()
ix = np.argsort(neighbour_col )
ix = ix[::-1]
ix_top_neighbors = ix[0:N]
#print 'testing inst ' + str(j)
#print ' ix of top neighbors:'
#print ix_top_neighbors
# ---------- find all neighbors' picks ----------
clus_ids_to_use = []
nei_labels = []
for cur_nb in range(0, N):
# for each neighbour
# find each neighbour's pick
cur_nb_ix = ix_top_neighbors[cur_nb]
clus_id_to_use = list( dec_ixs[cur_nb_ix] )
clus_ids_to_use = clus_ids_to_use + clus_id_to_use
# also find neighbor's label. maybe will be used later as KNN pred
# instead of using whole to pred.
nei_labels = nei_labels + list( y_tr[cur_nb_ix] )
#print ' clus_ids_to_use:'
#print clus_ids_to_use
# cluster id + 1 to make the ix fit the col id in y_pred_all
a = clus_ids_to_use
a = list( np.array(a) + 1 )
clus_ids_to_use = a
# number of partial models used
n_partial = len(clus_ids_to_use)
# number of whole models used, based on parameters alpha, beta and N.
n_whole = int( round( alpha*n_partial + beta*N ) )
clus_ids_to_use = clus_ids_to_use + [0] * n_whole
#print ' clus_ids_to_use:'
#print clus_ids_to_use
#print nei_labels
y_pred_ICE[j] = np.nanmean(y_pred_all[j, clus_ids_to_use])
print ('Done predicting testing instances.')
return y_pred_ICE
# test
# pa = '/Volumes/Macintosh_HD/Users/zg/bio/3_ensembF/3_scripts/2017_4_4/'
# pa = '/Users/zg/Dropbox/bio/ICE_2018/'
# pa = './'
pa = 'C:/Users/zg/Dropbox/bio/ICE_2018/'
n_clus = 100
w = 0.4
s = 0.5
N = 5
alpha = 1
beta = 1
k_fold = 10
aucs_ICE = []
aucs_whole = []
# f_res = pa + 'data/res_ICE_bg_svm_1_iter.txt'
#f_res = pa + 'data/res_ICE_bg_svm_py.txt'
f_res = pa + 'data/res_ICE_SVM_py.txt'
f = open(f_res, 'w')
#for j in range(1, 50):
for j in range(1, 49):
try:
X = scipy.io.loadmat(pa+'data/data_all/'+str(j)+'/data.mat')['X'] # 30:breast cancer
y = scipy.io.loadmat(pa+'data/data_all/'+str(j)+'/data.mat')['y']
#X = scipy.io.loadmat(pa+'/data/data_all_pickle/30/data.mat')['X'] # 30:breast cancer
#y = scipy.io.loadmat(pa+'/data/data_all_pickle/30/data.mat')['y']
#X = scipy.io.loadmat(pa+'/data/data_all_pickle/37/data.mat')['X'] # 37:congress
#y = scipy.io.loadmat(pa+'/data/data_all_pickle/37/data.mat')['y']
#imgplot = plt.imshow(ori_graph, interpolation='nearest', aspect='auto')
#plt.show()
#sim = np.corrcoef(X)
#np.fill_diagonal(sim, 0)
#n_clus = 100
#model = BaggingClassifier(base_estimator = svm.SVC(kernel='linear'), \
# random_state=None, n_estimators = 100 )
model = svm.SVC(kernel='linear', probability = True)
skf = StratifiedKFold(n_splits=k_fold)
skf.get_n_splits(X, y)
y_preds_ICE = np.zeros( y.size )
y_preds_whole = np.zeros( y.size )
fold_i = 1
for train_index, test_index in skf.split(X, y):
# print("TRAIN:", train_index, "TEST:", test_index)
X_tr, X_te = X[train_index], X[test_index]
y_tr, y_te = y[train_index], y[test_index]
[clus, models, dec_ixs] = f_ICE_fit(X_tr, y_tr, n_clus, model, w, s)
#[clus, models, dec_ixs] = f_ICE_fit_2(X_tr, y_tr, n_clus, model, w, s)
y_pred_ICE = f_ICE_pred(X_tr, y_tr, X_te, clus, dec_ixs, models,N,alpha,beta)
y_preds_ICE[test_index] = y_pred_ICE
y_pred_whole = f_tr_te(X_tr, y_tr, X_te, model)
y_preds_whole[test_index] = y_pred_whole
print( j)
print( 'fold ' + str(fold_i) + ' finished')
fold_i = fold_i + 1
auc_ICE = roc_auc_score(y.flatten(), y_preds_ICE.flatten() )
auc_whole = roc_auc_score(y.flatten(), y_preds_whole.flatten() )
print (auc_ICE, auc_whole)
aucs_ICE.append(auc_ICE)
aucs_whole.append(auc_whole)
f.write(str(j) + '\t' + str(auc_ICE) + ' \t ' + str(auc_whole) + '\n')
except:
continue
| [((140, 4, 140, 28), 'numpy.fill_diagonal', 'np.fill_diagonal', ({(140, 21, 140, 24): 'sim', (140, 26, 140, 27): '(0)'}, {}), '(sim, 0)', True, 'import numpy as np\n'), ((154, 15, 154, 50), 'numpy.logical_or', 'np.logical_or', ({(154, 29, 154, 37): 'aRankNet', (154, 39, 154, 49): 'aRankNet.T'}, {}), '(aRankNet, aRankNet.T)', True, 'import numpy as np\n'), ((158, 4, 158, 37), 'numpy.fill_diagonal', 'np.fill_diagonal', ({(158, 21, 158, 29): 'aRankNet', (158, 31, 158, 36): '(False)'}, {}), '(aRankNet, False)', True, 'import numpy as np\n'), ((198, 8, 198, 30), 'numpy.sum', 'np.sum', (), '', True, 'import numpy as np\n'), ((200, 9, 200, 21), 'numpy.argmax', 'np.argmax', ({(200, 19, 200, 20): 'a'}, {}), '(a)', True, 'import numpy as np\n'), ((264, 4, 264, 28), 'numpy.fill_diagonal', 'np.fill_diagonal', ({(264, 21, 264, 24): 'sim', (264, 26, 264, 27): '(0)'}, {}), '(sim, 0)', True, 'import numpy as np\n'), ((334, 4, 334, 27), 'numpy.fill_diagonal', 'np.fill_diagonal', ({(334, 21, 334, 23): 'rw', (334, 25, 334, 26): '(0)'}, {}), '(rw, 0)', True, 'import numpy as np\n'), ((344, 4, 344, 34), 'numpy.fill_diagonal', 'np.fill_diagonal', ({(344, 21, 344, 27): 'rw_net', (344, 29, 344, 33): '(True)'}, {}), '(rw_net, True)', True, 'import numpy as np\n'), ((359, 18, 359, 39), 'numpy.array', 'np.array', ({(359, 27, 359, 38): 'ixs_centers'}, {}), '(ixs_centers)', True, 'import numpy as np\n'), ((460, 18, 460, 38), 'copy.deepcopy', 'copy.deepcopy', ({(460, 32, 460, 37): 'model'}, {}), '(model)', False, 'import copy\n'), ((480, 18, 480, 38), 'copy.deepcopy', 'copy.deepcopy', ({(480, 32, 480, 37): 'model'}, {}), '(model)', False, 'import copy\n'), ((498, 13, 498, 29), 'numpy.zeros', 'np.zeros', ({(498, 22, 498, 28): 'y.size'}, {}), '(y.size)', True, 'import numpy as np\n'), ((500, 10, 500, 75), 'sklearn.model_selection.StratifiedKFold', 'StratifiedKFold', (), '', False, 'from sklearn.model_selection import StratifiedKFold\n'), ((553, 16, 553, 52), 'numpy.argsort', 'np.argsort', (), '', True, 'import numpy as np\n'), ((554, 18, 554, 56), 'numpy.argsort', 'np.argsort', (), '', True, 'import numpy as np\n'), ((595, 19, 595, 50), 'numpy.zeros', 'np.zeros', ({(595, 28, 595, 48): '(y.size, n_clusters)'}, {}), '((y.size, n_clusters))', True, 'import numpy as np\n'), ((723, 18, 723, 50), 'numpy.zeros', 'np.zeros', ({(723, 27, 723, 49): '(y.size, n_clusters + 1)'}, {}), '((y.size, n_clusters + 1))', True, 'import numpy as np\n'), ((734, 18, 734, 38), 'copy.deepcopy', 'copy.deepcopy', ({(734, 32, 734, 37): 'model'}, {}), '(model)', False, 'import copy\n'), ((68, 13, 68, 22), 'numpy.eye', 'np.eye', ({(68, 20, 68, 21): 'n'}, {}), '(n)', True, 'import numpy as np\n'), ((143, 8, 143, 41), 'numpy.argsort', 'np.argsort', (), '', True, 'import numpy as np\n'), ((206, 12, 206, 31), 'numpy.sum', 'np.sum', (), '', True, 'import numpy as np\n'), ((210, 13, 210, 25), 'numpy.argmin', 'np.argmin', ({(210, 23, 210, 24): 'b'}, {}), '(b)', True, 'import numpy as np\n'), ((219, 16, 219, 28), 'numpy.sort', 'np.sort', ({(219, 24, 219, 27): 'tmp'}, {}), '(tmp)', True, 'import numpy as np\n'), ((354, 9, 354, 25), 'numpy.argsort', 'np.argsort', ({(354, 20, 354, 24): 'lens'}, {}), '(lens)', True, 'import numpy as np\n'), ((602, 24, 602, 44), 'copy.deepcopy', 'copy.deepcopy', ({(602, 38, 602, 43): 'model'}, {}), '(model)', False, 'import copy\n'), ((806, 14, 806, 38), 'numpy.copy', 'np.copy', ({(806, 23, 806, 36): 'err_mat[(i), :]'}, {}), '(err_mat[(i), :])', True, 'import numpy as np\n'), ((970, 13, 970, 39), 'numpy.argsort', 'np.argsort', ({(970, 24, 970, 37): 'neighbour_col'}, {}), '(neighbour_col)', True, 'import numpy as np\n'), ((1010, 24, 1010, 66), 'numpy.nanmean', 'np.nanmean', ({(1010, 35, 1010, 65): 'y_pred_all[j, clus_ids_to_use]'}, {}), '(y_pred_all[j, clus_ids_to_use])', True, 'import numpy as np\n'), ((1060, 16, 1060, 60), 'sklearn.svm.SVC', 'svm.SVC', (), '', False, 'from sklearn import svm\n'), ((1062, 14, 1062, 46), 'sklearn.model_selection.StratifiedKFold', 'StratifiedKFold', (), '', False, 'from sklearn.model_selection import StratifiedKFold\n'), ((1065, 22, 1065, 40), 'numpy.zeros', 'np.zeros', ({(1065, 32, 1065, 38): 'y.size'}, {}), '(y.size)', True, 'import numpy as np\n'), ((1066, 24, 1066, 42), 'numpy.zeros', 'np.zeros', ({(1066, 34, 1066, 40): 'y.size'}, {}), '(y.size)', True, 'import numpy as np\n'), ((260, 18, 260, 48), 'scipy.spatial.distance.euclidean', 'distance.euclidean', ({(260, 37, 260, 41): 'X[i]', (260, 43, 260, 47): 'X[j]'}, {}), '(X[i], X[j])', False, 'from scipy.spatial import distance\n'), ((280, 18, 280, 50), 'scipy.spatial.distance.euclidean', 'distance.euclidean', ({(280, 37, 280, 42): 'X1[i]', (280, 44, 280, 49): 'X2[j]'}, {}), '(X1[i], X2[j])', False, 'from scipy.spatial import distance\n'), ((317, 18, 317, 48), 'scipy.spatial.distance.euclidean', 'distance.euclidean', ({(317, 37, 317, 41): 'X[i]', (317, 43, 317, 47): 'X[j]'}, {}), '(X[i], X[j])', False, 'from scipy.spatial import distance\n'), ((102, 20, 102, 35), 'numpy.finfo', 'np.finfo', ({(102, 29, 102, 34): 'float'}, {}), '(float)', True, 'import numpy as np\n'), ((146, 10, 146, 41), 'numpy.argsort', 'np.argsort', (), '', True, 'import numpy as np\n'), ((348, 14, 348, 54), 'numpy.argwhere', 'np.argwhere', ({(348, 26, 348, 52): 'rw_net[:, (ixs_centers[i])]'}, {}), '(rw_net[:, (ixs_centers[i])])', True, 'import numpy as np\n'), ((410, 20, 410, 40), 'numpy.where', 'np.where', ({(410, 29, 410, 38): 'tfs[(i), :]'}, {}), '(tfs[(i), :])', True, 'import numpy as np\n'), ((509, 11, 509, 26), 'numpy.unique', 'np.unique', ({(509, 21, 509, 25): 'y_tr'}, {}), '(y_tr)', True, 'import numpy as np\n'), ((661, 25, 661, 41), 'numpy.zeros', 'np.zeros', ({(661, 34, 661, 40): 'y.size'}, {}), '(y.size)', True, 'import numpy as np\n'), ((667, 25, 667, 41), 'numpy.zeros', 'np.zeros', ({(667, 34, 667, 40): 'y.size'}, {}), '(y.size)', True, 'import numpy as np\n'), ((815, 29, 815, 54), 'numpy.where', 'np.where', ({(815, 39, 815, 52): 'row < row[-1]'}, {}), '(row < row[-1])', True, 'import numpy as np\n'), ((998, 18, 998, 29), 'numpy.array', 'np.array', ({(998, 27, 998, 28): 'a'}, {}), '(a)', True, 'import numpy as np\n'), ((514, 27, 514, 42), 'numpy.unique', 'np.unique', ({(514, 37, 514, 41): 'y_tr'}, {}), '(y_tr)', True, 'import numpy as np\n'), ((606, 30, 606, 43), 'numpy.unique', 'np.unique', ({(606, 40, 606, 42): 'yj'}, {}), '(yj)', True, 'import numpy as np\n'), ((654, 25, 654, 41), 'numpy.zeros', 'np.zeros', ({(654, 34, 654, 40): 'y.size'}, {}), '(y.size)', True, 'import numpy as np\n'), ((665, 15, 665, 28), 'numpy.unique', 'np.unique', ({(665, 25, 665, 27): 'yj'}, {}), '(yj)', True, 'import numpy as np\n'), ((671, 32, 671, 45), 'numpy.unique', 'np.unique', ({(671, 42, 671, 44): 'yj'}, {}), '(yj)', True, 'import numpy as np\n'), ((668, 28, 668, 41), 'numpy.unique', 'np.unique', ({(668, 38, 668, 40): 'yj'}, {}), '(yj)', True, 'import numpy as np\n'), ((90, 36, 90, 49), 'numpy.float64', 'np.float64', ({(90, 47, 90, 48): 'A'}, {}), '(A)', True, 'import numpy as np\n')] |
FireFox317/symbiflow-arch-defs | xc/common/utils/prjxray_routing_import.py | f0e7b4212544e1d55da776fb7a2ff79117e01454 | #!/usr/bin/env python3
""" Imports 7-series routing fabric to the rr graph.
For ROI configurations, this also connects the synthetic IO tiles to the routing
node specified.
Rough structure:
Add rr_nodes for CHANX and CHANY from the database. IPIN and OPIN rr_nodes
should already be present from the input rr_graph.
Create a mapping between database graph_nodes and IPIN, OPIN, CHANX and CHANY
rr_node ids in the rr_graph.
Add rr_edge for each row in the graph_edge table.
Import channel XML node from connection database and serialize output to
rr_graph XML.
"""
import argparse
import os.path
from hilbertcurve.hilbertcurve import HilbertCurve
import math
import prjxray.db
from prjxray.roi import Roi
import prjxray.grid as grid
from lib.rr_graph import graph2
from lib.rr_graph import tracks
from lib.connection_database import get_wire_pkey, get_track_model
import lib.rr_graph_capnp.graph2 as capnp_graph2
from prjxray_constant_site_pins import feature_when_routed
from prjxray_tile_import import remove_vpr_tile_prefix
import simplejson as json
from lib import progressbar_utils
import datetime
import re
import functools
import pickle
import sqlite3
now = datetime.datetime.now
HCLK_CK_BUFHCLK_REGEX = re.compile('HCLK_CK_BUFHCLK[0-9]+')
CLK_HROW_CK_MUX_REGEX = re.compile('CLK_HROW_CK_MUX_OUT_([LR])([0-9]+)')
CASCOUT_REGEX = re.compile('BRAM_CASCOUT_ADDR((?:BWR)|(?:ARD))ADDRU([0-9]+)')
CONNECTION_BOX_FILTER = re.compile('([^0-9]+)[0-9]*')
BUFG_CLK_IN_REGEX = re.compile('CLK_HROW_CK_IN_[LR][0-9]+')
BUFG_CLK_OUT_REGEX = re.compile('CLK_HROW_R_CK_GCLK[0-9]+')
CCIO_ACTIVE_REGEX = re.compile('HCLK_CMT_CCIO[0-9]+')
HCLK_OUT = re.compile('CLK_HROW_CK_HCLK_OUT_([LR])([0-9]+)')
IOI_OCLK = re.compile('IOI_OCLK_([01])')
# Regex for [LR]IOI_SING tiles
IOI_SITE_PIPS = ['OLOGIC', 'ILOGIC', 'IDELAY', 'OCLK_', 'OCLKM_']
IOI_SING_REGEX = re.compile(
r'([RL]IOI3_SING_X[0-9]+Y)([0-9]+)(\.IOI_)({})([01])(.*)'.format(
"|".join(IOI_SITE_PIPS)
)
)
def reduce_connection_box(box):
""" Reduce the number of connection boxes by merging some.
Examples:
>>> reduce_connection_box('IMUX0')
'IMUX'
>>> reduce_connection_box('IMUX1')
'IMUX'
>>> reduce_connection_box('IMUX10')
'IMUX'
>>> reduce_connection_box('BRAM_ADDR')
'IMUX'
>>> reduce_connection_box('A_L10')
'A'
>>> reduce_connection_box('B')
'B'
>>> reduce_connection_box('B_L')
'B'
"""
box = CONNECTION_BOX_FILTER.match(box).group(1)
if 'BRAM_ADDR' in box:
box = 'IMUX'
if box.endswith('_L'):
box = box.replace('_L', '')
return box
REBUF_NODES = {}
REBUF_SOURCES = {}
def get_clk_hrow_and_rebuf_tiles_sorted(cur):
"""
Finds all CLK_HROW_TOP_R, CLK_HROW_BOT_T and REBUF tiles.
returns them in a list sorted according to their Y coordinates.
"""
cur.execute(
"""
SELECT name
FROM phy_tile
WHERE
name LIKE "CLK_HROW_BOT_R_%"
OR
name LIKE "CLK_HROW_TOP_R_%"
OR
name LIKE "CLK_BUFG_REBUF_%"
ORDER BY grid_y DESC;
"""
)
return [t[0] for t in cur.fetchall()]
def populate_bufg_rebuf_map(conn):
global REBUF_NODES
REBUF_NODES = {}
global REBUF_SOURCES
REBUF_SOURCES = {}
rebuf_wire_regexp = re.compile(
'CLK_BUFG_REBUF_R_CK_GCLK([0-9]+)_(BOT|TOP)'
)
cur = conn.cursor()
# Find CLK_HROW_TOP_R, CLK_HROW_TOP_R and REBUF tiles.
rebuf_and_hrow_tiles = get_clk_hrow_and_rebuf_tiles_sorted(cur)
# Append None on both ends of the list to simplify the code below.
rebuf_and_hrow_tiles = [None] + rebuf_and_hrow_tiles + [None]
def maybe_get_clk_hrow(i):
"""
Returns a name of CLK_HROW tile only if its there on the list.
"""
tile = rebuf_and_hrow_tiles[i]
if tile is not None and tile.startswith("CLK_HROW"):
return tile
return None
# Assign each REBUF tile its above and below CLK_HROW tile. Note that in
# VPR coords terms. "above" and "below" mean the opposite...
rebuf_to_hrow_map = {}
for i, tile_name in enumerate(rebuf_and_hrow_tiles):
if tile_name is not None and tile_name.startswith("CLK_BUFG_REBUF"):
rebuf_to_hrow_map[tile_name] = {
"above": maybe_get_clk_hrow(i - 1),
"below": maybe_get_clk_hrow(i + 1),
}
# Find nodes touching rebuf wires.
cur.execute(
"""
WITH
rebuf_wires(wire_in_tile_pkey) AS (
SELECT pkey
FROM wire_in_tile
WHERE
name LIKE "CLK_BUFG_REBUF_R_CK_GCLK%_BOT"
OR
name LIKE "CLK_BUFG_REBUF_R_CK_GCLK%_TOP"
),
rebuf_nodes(node_pkey) AS (
SELECT DISTINCT node_pkey
FROM wire
WHERE wire_in_tile_pkey IN (SELECT wire_in_tile_pkey FROM rebuf_wires)
)
SELECT rebuf_nodes.node_pkey, phy_tile.name, wire_in_tile.name
FROM rebuf_nodes
INNER JOIN wire ON wire.node_pkey = rebuf_nodes.node_pkey
INNER JOIN wire_in_tile ON wire_in_tile.pkey = wire.wire_in_tile_pkey
INNER JOIN phy_tile ON phy_tile.pkey = wire.phy_tile_pkey
WHERE wire.wire_in_tile_pkey IN (SELECT wire_in_tile_pkey FROM rebuf_wires)
ORDER BY rebuf_nodes.node_pkey;"""
)
for node_pkey, rebuf_tile, rebuf_wire_name in cur:
if node_pkey not in REBUF_NODES:
REBUF_NODES[node_pkey] = []
m = rebuf_wire_regexp.fullmatch(rebuf_wire_name)
if m.group(2) == 'TOP':
REBUF_NODES[node_pkey].append(
'{}.GCLK{}_ENABLE_BELOW'.format(rebuf_tile, m.group(1))
)
hrow_tile = rebuf_to_hrow_map[rebuf_tile]["below"]
if hrow_tile is not None:
REBUF_NODES[node_pkey].append(
"{}.CLK_HROW_R_CK_GCLK{}_ACTIVE".format(
hrow_tile, m.group(1)
)
)
elif m.group(2) == 'BOT':
REBUF_NODES[node_pkey].append(
'{}.GCLK{}_ENABLE_ABOVE'.format(rebuf_tile, m.group(1))
)
hrow_tile = rebuf_to_hrow_map[rebuf_tile]["above"]
if hrow_tile is not None:
REBUF_NODES[node_pkey].append(
"{}.CLK_HROW_R_CK_GCLK{}_ACTIVE".format(
hrow_tile, m.group(1)
)
)
else:
assert False, (rebuf_tile, rebuf_wire_name)
for node_pkey in REBUF_NODES:
cur.execute(
"""
SELECT phy_tile.name, wire_in_tile.name
FROM wire
INNER JOIN phy_tile ON phy_tile.pkey = wire.phy_tile_pkey
INNER JOIN wire_in_tile ON wire_in_tile.pkey = wire.wire_in_tile_pkey
WHERE wire.node_pkey = ?;""", (node_pkey, )
)
for tile, wire_name in cur:
REBUF_SOURCES[(tile, wire_name)] = node_pkey
HCLK_CMT_TILES = {}
def populate_hclk_cmt_tiles(db):
global HCLK_CMT_TILES
HCLK_CMT_TILES = {}
grid = db.grid()
_, x_max, _, _ = grid.dims()
for tile in grid.tiles():
gridinfo = grid.gridinfo_at_tilename(tile)
if gridinfo.tile_type not in ['CLK_HROW_BOT_R', 'CLK_HROW_TOP_R']:
continue
hclk_x, hclk_y = grid.loc_of_tilename(tile)
hclk_cmt_x = hclk_x
hclk_cmt_y = hclk_y
while hclk_cmt_x > 0:
hclk_cmt_x -= 1
gridinfo = grid.gridinfo_at_loc((hclk_cmt_x, hclk_cmt_y))
if gridinfo.tile_type == 'HCLK_CMT':
HCLK_CMT_TILES[tile, 'L'] = grid.tilename_at_loc(
(hclk_cmt_x, hclk_cmt_y)
)
break
hclk_cmt_x = hclk_x
while hclk_cmt_x < x_max:
hclk_cmt_x += 1
gridinfo = grid.gridinfo_at_loc((hclk_cmt_x, hclk_cmt_y))
if gridinfo.tile_type == 'HCLK_CMT_L':
HCLK_CMT_TILES[tile, 'R'] = grid.tilename_at_loc(
(hclk_cmt_x, hclk_cmt_y)
)
break
def find_hclk_cmt_hclk_feature(hclk_tile, lr, hclk_number):
if (hclk_tile, lr) not in HCLK_CMT_TILES:
return []
hclk_cmt_tile = HCLK_CMT_TILES[(hclk_tile, lr)]
return ['{}.HCLK_CMT_CK_BUFHCLK{}_USED'.format(hclk_cmt_tile, hclk_number)]
def check_feature(feature):
""" Check if enabling this feature requires other features to be enabled.
Some pips imply other features. Example:
.HCLK_LEAF_CLK_B_BOTL0.HCLK_CK_BUFHCLK10
implies:
.ENABLE_BUFFER.HCLK_CK_BUFHCLK10
"""
# IOI_SING tiles have bits in common with the IOI tiles.
#
# The difference is that the TOP IOI_SING tile shares bits with
# the bottom half of a normal IOI tile, while the BOTTOM IOI_SING
# shares bits with the top half of a normal IOI TILE.
#
# The following, is to change the edge feature to accomodate this
# need, as the IOI_SING tiles have the same wire, and pip names
# despite they are found on the TOP or BOTTOM of an IOI column
m = IOI_SING_REGEX.fullmatch(feature)
if m:
# Each clock region spans a total of 50 IOBs.
# The IOI_SING are found on top or bottom of the whole
# IOI/IOB column. The Y coordinate identified with the
# second capture group is dived by 50 to get the relative
# position of the IOI_SING within the clock region column
is_bottom_sing = int(m.group(2)) % 50 == 0
# This is the value to attach to the source pip name that
# changes based on which IOI_SING is selected (top or bottom)
#
# Example: IOI_OLOGIC0_D1.IOI_IMUX34_0 -> IOI_OLOGIC0_D1.IOI_IMUX34_1
src_value = '1' if is_bottom_sing else '0'
# This is the value to attach to the IOI_SITE_PIPS names
# in the destination wire of the pip
#
# Example: IOI_OLOGIC0 -> IOI_OLOGIC1
dst_value = '0' if is_bottom_sing else '1'
unchanged_feature = "{}{}{}{}".format(
m.group(1), m.group(2), m.group(3), m.group(4)
)
src_wire = m.group(6).replace('_SING', '')
for pip in ['IMUX', 'LOGIC_OUTS', 'CTRL', 'FAN', 'BYP']:
if pip in src_wire:
src_wire = src_wire.replace('_0', '_{}'.format(src_value))
if 'IOI_OCLK' in src_wire:
src_wire = src_wire.replace('_0', '_{}'.format(dst_value))
changed_feature = "{}{}".format(dst_value, src_wire)
feature = "{}{}".format(unchanged_feature, changed_feature)
feature_path = feature.split('.')
# IOB_DIFFO_OUT0->IOB_DIFFO_IN1
#
# When this PIP is active the IOB operates in the differential output mode.
# There is no feature assosciated with that PIP in the prjxray db but there
# is a tile-wide feature named "DIFF_OUT".
#
# The "DIFF_OUT" cannot be set in the architecture as it is defined one
# level up in the hierarchy (its tile-wide, not site-wide). So here we
# map the PIP's feature to "DIFF_OUT"
if feature_path[2] == "IOB_DIFFO_OUT0" and \
feature_path[1] == "IOB_DIFFO_IN1":
return '{}.OUT_DIFF'.format(feature_path[0])
# IOB_PADOUT0->IOB_DIFFI_IN1
# IOB_PADOUT1->IOB_DIFFI_IN0
#
# These connections are hard wires that connect IOB33M and IOB33S sites.
# They are used in differential input mode.
#
# Vivado does not report this connection as a PIP but in the prjxray db it
# is a pip. Instead of making it a pseudo-pip we simply reject fasm
# features here.
if feature_path[2] == "IOB_PADOUT0" and feature_path[1] == "IOB_DIFFI_IN1":
return ''
if feature_path[2] == "IOB_PADOUT1" and feature_path[1] == "IOB_DIFFI_IN0":
return ''
# REBUF stuff
rebuf_key = (feature_path[0], feature_path[1])
if rebuf_key in REBUF_SOURCES:
return ' '.join([feature] + REBUF_NODES[REBUF_SOURCES[rebuf_key]])
m = IOI_OCLK.fullmatch(feature_path[1])
if m:
enable_oclkm_feature = '{}.IOI_OCLKM_{}.{}'.format(
feature_path[0], m.group(1), feature_path[-1]
)
return ' '.join((feature, enable_oclkm_feature))
if HCLK_CK_BUFHCLK_REGEX.fullmatch(feature_path[-1]):
enable_buffer_feature = '{}.ENABLE_BUFFER.{}'.format(
feature_path[0], feature_path[-1]
)
return ' '.join((feature, enable_buffer_feature))
# BUFHCE sites are now routed through, without the need of placing them, therefore,
# when the relative pip is traversed, the correct fasm feature needs to be added.
# The relevant features are:
# - IN_USE: to enable the BUFHCE site
# - ZINV_CE: to disable the inverter on CE input which is connected to VCC.
# This sets the CE signal to constant 1
m = CLK_HROW_CK_MUX_REGEX.fullmatch(feature_path[-1])
if m:
x_loc_str = m.group(1)
if 'L' in x_loc_str:
x_loc = 0
elif 'R' in x_loc_str:
x_loc = 1
else:
assert False, "Impossible to determine X location of BUFHCE"
y_loc = m.group(2)
bufhce_loc = 'BUFHCE_X{}Y{}'.format(x_loc, y_loc)
enable_bufhce_in_use = '{}.BUFHCE.{}.IN_USE'.format(
feature_path[0], bufhce_loc
)
enable_bufhce_zinv_ce = '{}.BUFHCE.{}.ZINV_CE=1\'b1'.format(
feature_path[0], bufhce_loc
)
return ' '.join((feature, enable_bufhce_in_use, enable_bufhce_zinv_ce))
if BUFG_CLK_IN_REGEX.fullmatch(feature_path[-1]):
enable_feature = '{}.{}_ACTIVE'.format(
feature_path[0], feature_path[-1]
)
return ' '.join((feature, enable_feature))
if BUFG_CLK_OUT_REGEX.fullmatch(feature_path[-1]):
enable_feature = '{}.{}_ACTIVE'.format(
feature_path[0], feature_path[-1]
)
return ' '.join((feature, enable_feature))
if CCIO_ACTIVE_REGEX.fullmatch(feature_path[-1]):
features = [feature]
features.append(
'{}.{}_ACTIVE'.format(feature_path[0], feature_path[-1])
)
features.append('{}.{}_USED'.format(feature_path[0], feature_path[-1]))
return ' '.join(features)
m = HCLK_OUT.fullmatch(feature_path[-1])
if m:
return ' '.join(
[feature] + find_hclk_cmt_hclk_feature(
feature_path[0], m.group(1), m.group(2)
)
)
m = CASCOUT_REGEX.fullmatch(feature_path[-2])
if m:
enable_cascout = '{}.CASCOUT_{}_ACTIVE'.format(
feature_path[0], m.group(1)
)
return ' '.join((feature, enable_cascout))
parts = feature.split('.')
wire_feature = feature_when_routed(parts[1])
if wire_feature is not None:
return '{} {}.{}'.format(feature, parts[0], wire_feature)
return feature
# CLBLL_L.CLBLL_LL_A1[0] -> (CLBLL_L, CLBLL_LL_A1)
PIN_NAME_TO_PARTS = re.compile(r'^([^\.]+)\.([^\]]+)\[0\]$')
def set_connection_box(
graph, node_idx, grid_x, grid_y, box_id, site_pin_delay
):
""" Assign a connection box to an IPIN node. """
node_dict = graph.nodes[node_idx]._asdict()
node_dict['connection_box'] = graph2.ConnectionBox(
x=grid_x,
y=grid_y,
id=box_id,
site_pin_delay=site_pin_delay,
)
graph.nodes[node_idx] = graph2.Node(**node_dict)
def update_connection_box(
conn, graph, graph_node_pkey, node_idx, connection_box_map
):
""" Update connection box of IPIN node if needed. """
cur = conn.cursor()
cur.execute(
"""
SELECT connection_box_wire_pkey
FROM graph_node WHERE pkey = ?""", (graph_node_pkey, )
)
connection_box_wire_pkey = cur.fetchone()[0]
if connection_box_wire_pkey is not None:
cur.execute(
"""
SELECT grid_x, grid_y FROM phy_tile WHERE pkey = (
SELECT phy_tile_pkey FROM wire WHERE pkey = ?
)""", (connection_box_wire_pkey, )
)
grid_x, grid_y = cur.fetchone()
cur.execute(
"SELECT wire_in_tile_pkey FROM wire WHERE pkey = ?",
(connection_box_wire_pkey, )
)
wire_in_tile_pkey = cur.fetchone()[0]
box_id = connection_box_map[wire_in_tile_pkey]
cur.execute(
"""
SELECT switch.intrinsic_delay
FROM switch
WHERE pkey = (
SELECT site_pin_switch_pkey
FROM wire_in_tile
WHERE pkey = (
SELECT wire_in_tile_pkey
FROM wire
WHERE pkey = (
SELECT site_wire_pkey
FROM node
WHERE pkey = (
SELECT node_pkey
FROM graph_node
WHERE pkey = ?
)
)
)
)""", (graph_node_pkey, )
)
site_pin_delay = cur.fetchone()[0]
set_connection_box(
graph, node_idx, grid_x, grid_y, box_id, site_pin_delay
)
def create_get_tile_and_site_as_tile_pkey(cur):
tiles = {}
for tile_pkey, site_as_tile_pkey, grid_x, grid_y in cur.execute("""
SELECT pkey, site_as_tile_pkey, grid_x, grid_y FROM tile;"""):
tiles[(grid_x, grid_y)] = (tile_pkey, site_as_tile_pkey)
def get_tile_and_site_as_tile_pkey(x, y):
return tiles[(x, y)]
return get_tile_and_site_as_tile_pkey
def create_get_site_as_tile_wire(cur):
@functools.lru_cache(maxsize=0)
def get_site_from_site_as_tile(site_as_tile_pkey):
cur.execute(
"""
SELECT site.site_type_pkey, site_as_tile.site_pkey
FROM site_as_tile
INNER JOIN site ON site.pkey = site_as_tile.site_pkey
WHERE site_as_tile.pkey = ?""", (site_as_tile_pkey, )
)
results = cur.fetchall()
assert len(results) == 1, site_as_tile_pkey
return results[0]
@functools.lru_cache(maxsize=0)
def get_site_as_tile_wire(site_as_tile_pkey, pin):
site_type_pkey, site_pkey = get_site_from_site_as_tile(
site_as_tile_pkey
)
cur.execute(
"""
SELECT
pkey
FROM
wire_in_tile
WHERE
site_pin_pkey = (
SELECT
pkey
FROM
site_pin
WHERE
site_type_pkey = ?
AND name = ?
)
AND
site_pkey = ?
;""", (site_type_pkey, pin, site_pkey)
)
results = cur.fetchall()
assert len(results) == 1
wire_in_tile_pkey = results[0][0]
return wire_in_tile_pkey
return get_site_as_tile_wire
def import_graph_nodes(conn, graph, node_mapping, connection_box_map):
cur = conn.cursor()
get_tile_and_site_as_tile_pkey = create_get_tile_and_site_as_tile_pkey(cur)
get_site_as_tile_wire = create_get_site_as_tile_wire(cur)
for node_idx, node in enumerate(graph.nodes):
if node.type not in (graph2.NodeType.IPIN, graph2.NodeType.OPIN):
continue
gridloc = graph.loc_map[(node.loc.x_low, node.loc.y_low)]
pin_name = graph.pin_ptc_to_name_map[
(gridloc.block_type_id, node.loc.ptc)]
# Synthetic blocks are handled below.
if pin_name.startswith('SYN-'):
set_connection_box(
graph,
node_idx,
node.loc.x_low,
node.loc.y_low,
box_id=graph.maybe_add_connection_box('IMUX'),
site_pin_delay=0.,
)
continue
m = PIN_NAME_TO_PARTS.match(pin_name)
assert m is not None, pin_name
tile_type = m.group(1)
tile_type = remove_vpr_tile_prefix(tile_type)
pin = m.group(2)
tile_pkey, site_as_tile_pkey = get_tile_and_site_as_tile_pkey(
node.loc.x_low, node.loc.y_low
)
if site_as_tile_pkey is not None:
wire_in_tile_pkey = get_site_as_tile_wire(site_as_tile_pkey, pin)
else:
cur.execute(
"""
SELECT
pkey
FROM
wire_in_tile
WHERE
name = ?
AND
phy_tile_type_pkey IN (
SELECT tile_type_pkey FROM phy_tile WHERE pkey IN (
SELECT phy_tile_pkey FROM tile_map WHERE tile_pkey = ?
)
);""", (pin, tile_pkey)
)
results = cur.fetchall()
assert len(results) == 1
wire_in_tile_pkey = results[0][0]
tile_pkey, _ = get_tile_and_site_as_tile_pkey(gridloc[0], gridloc[1])
cur.execute(
"""
SELECT
top_graph_node_pkey, bottom_graph_node_pkey,
left_graph_node_pkey, right_graph_node_pkey FROM wire
WHERE
wire_in_tile_pkey = ? AND tile_pkey = ?;""",
(wire_in_tile_pkey, tile_pkey)
)
result = cur.fetchone()
assert result is not None, (wire_in_tile_pkey, tile_pkey)
(
top_graph_node_pkey, bottom_graph_node_pkey, left_graph_node_pkey,
right_graph_node_pkey
) = result
side = node.loc.side
if side == tracks.Direction.LEFT:
assert left_graph_node_pkey is not None, (tile_type, pin_name)
node_mapping[left_graph_node_pkey] = node.id
update_connection_box(
conn, graph, left_graph_node_pkey, node_idx, connection_box_map
)
elif side == tracks.Direction.RIGHT:
assert right_graph_node_pkey is not None, (tile_type, pin_name)
node_mapping[right_graph_node_pkey] = node.id
update_connection_box(
conn, graph, right_graph_node_pkey, node_idx,
connection_box_map
)
elif side == tracks.Direction.TOP:
assert top_graph_node_pkey is not None, (tile_type, pin_name)
node_mapping[top_graph_node_pkey] = node.id
update_connection_box(
conn, graph, top_graph_node_pkey, node_idx, connection_box_map
)
elif side == tracks.Direction.BOTTOM:
assert bottom_graph_node_pkey is not None, (tile_type, pin_name)
node_mapping[bottom_graph_node_pkey] = node.id
update_connection_box(
conn, graph, bottom_graph_node_pkey, node_idx,
connection_box_map
)
else:
assert False, side
def import_tracks(conn, alive_tracks, node_mapping, graph, default_segment_id):
cur = conn.cursor()
cur2 = conn.cursor()
for (graph_node_pkey, track_pkey, graph_node_type, x_low, x_high, y_low,
y_high, ptc, capacitance,
resistance) in progressbar_utils.progressbar(cur.execute("""
SELECT
pkey,
track_pkey,
graph_node_type,
x_low,
x_high,
y_low,
y_high,
ptc,
capacitance,
resistance
FROM
graph_node WHERE track_pkey IS NOT NULL;""")):
if track_pkey not in alive_tracks:
continue
cur2.execute(
"""
SELECT name FROM segment WHERE pkey = (
SELECT segment_pkey FROM track WHERE pkey = ?
)""", (track_pkey, )
)
result = cur2.fetchone()
if result is not None:
segment_name = result[0]
segment_id = graph.get_segment_id_from_name(segment_name)
else:
segment_id = default_segment_id
node_type = graph2.NodeType(graph_node_type)
if node_type == graph2.NodeType.CHANX:
direction = 'X'
x_low = max(x_low, 1)
elif node_type == graph2.NodeType.CHANY:
direction = 'Y'
y_low = max(y_low, 1)
else:
assert False, node_type
canonical_loc = None
cur2.execute(
"""
SELECT grid_x, grid_y FROM phy_tile WHERE pkey = (
SELECT canon_phy_tile_pkey FROM track WHERE pkey = ?
)""", (track_pkey, )
)
result = cur2.fetchone()
if result:
canonical_loc = graph2.CanonicalLoc(x=result[0], y=result[1])
track = tracks.Track(
direction=direction,
x_low=x_low,
x_high=x_high,
y_low=y_low,
y_high=y_high,
)
assert graph_node_pkey not in node_mapping
node_mapping[graph_node_pkey] = graph.add_track(
track=track,
segment_id=segment_id,
ptc=ptc,
timing=graph2.NodeTiming(
r=resistance,
c=capacitance,
),
canonical_loc=canonical_loc
)
def create_track_rr_graph(
conn, graph, node_mapping, use_roi, roi, synth_tiles, segment_id
):
cur = conn.cursor()
cur.execute("""SELECT count(*) FROM track;""")
(num_channels, ) = cur.fetchone()
print('{} Import alive tracks'.format(now()))
alive_tracks = set()
for (track_pkey,
) in cur.execute("SELECT pkey FROM track WHERE alive = 1;"):
alive_tracks.add(track_pkey)
print('{} Importing alive tracks'.format(now()))
import_tracks(conn, alive_tracks, node_mapping, graph, segment_id)
print('original {} final {}'.format(num_channels, len(alive_tracks)))
def add_synthetic_edges(conn, graph, node_mapping, grid, synth_tiles):
cur = conn.cursor()
delayless_switch = graph.get_switch_id('__vpr_delayless_switch__')
for tile_name, synth_tile in synth_tiles['tiles'].items():
num_inpad = len(
list(
filter(
lambda t: t['port_type'] == 'output', synth_tile['pins']
)
)
)
num_outpad = len(
list(
filter(
lambda t: t['port_type'] == 'input', synth_tile['pins']
)
)
)
for pin in synth_tile['pins']:
if pin['port_type'] in ['input', 'output']:
wire_pkey = get_wire_pkey(conn, tile_name, pin['wire'])
cur.execute(
"""
SELECT
track_pkey
FROM
node
WHERE
pkey = (
SELECT
node_pkey
FROM
wire
WHERE
pkey = ?
);""", (wire_pkey, )
)
(track_pkey, ) = cur.fetchone()
assert track_pkey is not None, (
tile_name, pin['wire'], wire_pkey
)
elif pin['port_type'] == 'VCC':
cur.execute('SELECT vcc_track_pkey FROM constant_sources')
(track_pkey, ) = cur.fetchone()
elif pin['port_type'] == 'GND':
cur.execute('SELECT gnd_track_pkey FROM constant_sources')
(track_pkey, ) = cur.fetchone()
else:
assert False, pin['port_type']
tracks_model, track_nodes = get_track_model(conn, track_pkey)
option = list(
tracks_model.get_tracks_for_wire_at_coord(
tuple(synth_tile['loc'])
).values()
)
assert len(option) > 0, (pin, len(option))
if pin['port_type'] == 'input':
tile_type = synth_tile['tile_name']
wire = 'outpad'
elif pin['port_type'] == 'output':
tile_type = synth_tile['tile_name']
wire = 'inpad'
elif pin['port_type'] == 'VCC':
tile_type = 'SYN-VCC'
wire = 'VCC'
elif pin['port_type'] == 'GND':
tile_type = 'SYN-GND'
wire = 'GND'
else:
assert False, pin
track_node = track_nodes[option[0]]
assert track_node in node_mapping, (track_node, track_pkey)
if wire == 'inpad' and num_inpad > 1:
pin_name = graph.create_pin_name_from_tile_type_sub_tile_num_and_pin(
tile_type, pin['z_loc'], wire
)
elif wire == 'outpad' and num_outpad > 1:
pin_name = graph.create_pin_name_from_tile_type_sub_tile_num_and_pin(
tile_type, (pin['z_loc'] - num_inpad), wire
)
else:
pin_name = graph.create_pin_name_from_tile_type_and_pin(
tile_type, wire
)
pin_node = graph.get_nodes_for_pin(
tuple(synth_tile['loc']), pin_name
)
if pin['port_type'] == 'input':
graph.add_edge(
src_node=node_mapping[track_node],
sink_node=pin_node[0][0],
switch_id=delayless_switch,
name='synth_{}_{}'.format(tile_name, pin['wire']),
)
elif pin['port_type'] in ['VCC', 'GND', 'output']:
graph.add_edge(
src_node=pin_node[0][0],
sink_node=node_mapping[track_node],
switch_id=delayless_switch,
name='synth_{}_{}'.format(tile_name, pin['wire']),
)
else:
assert False, pin
def get_switch_name(conn, graph, switch_name_map, switch_pkey):
assert switch_pkey is not None
if switch_pkey not in switch_name_map:
cur = conn.cursor()
cur.execute(
"""SELECT name FROM switch WHERE pkey = ?;""", (switch_pkey, )
)
(switch_name, ) = cur.fetchone()
switch_id = graph.get_switch_id(switch_name)
switch_name_map[switch_pkey] = switch_id
else:
switch_id = switch_name_map[switch_pkey]
return switch_id
def create_get_tile_name(conn):
cur = conn.cursor()
@functools.lru_cache(maxsize=None)
def get_tile_name(tile_pkey):
cur.execute(
"""
SELECT name FROM phy_tile WHERE pkey = ?;
""", (tile_pkey, )
)
return cur.fetchone()[0]
return get_tile_name
def create_get_pip_wire_names(conn):
cur = conn.cursor()
@functools.lru_cache(maxsize=None)
def get_pip_wire_names(pip_pkey):
cur.execute(
"""SELECT src_wire_in_tile_pkey, dest_wire_in_tile_pkey
FROM pip_in_tile WHERE pkey = ?;""", (pip_pkey, )
)
src_wire_in_tile_pkey, dest_wire_in_tile_pkey = cur.fetchone()
cur.execute(
"""SELECT name FROM wire_in_tile WHERE pkey = ?;""",
(src_wire_in_tile_pkey, )
)
(src_net, ) = cur.fetchone()
cur.execute(
"""SELECT name FROM wire_in_tile WHERE pkey = ?;""",
(dest_wire_in_tile_pkey, )
)
(dest_net, ) = cur.fetchone()
return (src_net, dest_net)
return get_pip_wire_names
def get_number_graph_edges(conn, graph, node_mapping):
num_edges = len(graph.edges)
print('{} Counting edges.'.format(now()))
cur = conn.cursor()
cur.execute("SELECT count() FROM graph_edge;" "")
for src_graph_node, dest_graph_node in cur.execute("""
SELECT
src_graph_node_pkey,
dest_graph_node_pkey
FROM
graph_edge;
"""):
if src_graph_node not in node_mapping:
continue
if dest_graph_node not in node_mapping:
continue
num_edges += 1
return num_edges
def import_graph_edges(conn, graph, node_mapping):
# First yield existing edges
print('{} Importing existing edges.'.format(now()))
for edge in graph.edges:
yield (edge.src_node, edge.sink_node, edge.switch_id, None)
# Then yield edges from database.
cur = conn.cursor()
cur.execute("SELECT count() FROM graph_edge;" "")
(num_edges, ) = cur.fetchone()
get_tile_name = create_get_tile_name(conn)
get_pip_wire_names = create_get_pip_wire_names(conn)
switch_name_map = {}
print('{} Importing edges from database.'.format(now()))
with progressbar_utils.ProgressBar(max_value=num_edges) as bar:
for idx, (src_graph_node, dest_graph_node, switch_pkey, phy_tile_pkey,
pip_pkey, backward) in enumerate(cur.execute("""
SELECT
src_graph_node_pkey,
dest_graph_node_pkey,
switch_pkey,
phy_tile_pkey,
pip_in_tile_pkey,
backward
FROM
graph_edge;
""")):
if src_graph_node not in node_mapping:
continue
if dest_graph_node not in node_mapping:
continue
if pip_pkey is not None:
tile_name = get_tile_name(phy_tile_pkey)
src_net, dest_net = get_pip_wire_names(pip_pkey)
if not backward:
pip_name = '{}.{}.{}'.format(tile_name, dest_net, src_net)
else:
pip_name = '{}.{}.{}'.format(tile_name, src_net, dest_net)
else:
pip_name = None
switch_id = get_switch_name(
conn, graph, switch_name_map, switch_pkey
)
src_node = node_mapping[src_graph_node]
sink_node = node_mapping[dest_graph_node]
if pip_name is not None:
feature = check_feature(pip_name)
if feature:
yield (
src_node, sink_node, switch_id,
(('fasm_features', feature), )
)
else:
yield (src_node, sink_node, switch_id, ())
else:
yield (src_node, sink_node, switch_id, ())
if idx % 1024 == 0:
bar.update(idx)
def create_channels(conn):
cur = conn.cursor()
cur.execute(
"""
SELECT chan_width_max, x_min, x_max, y_min, y_max FROM channel;"""
)
chan_width_max, x_min, x_max, y_min, y_max = cur.fetchone()
cur.execute('SELECT idx, info FROM x_list;')
x_list = []
for idx, info in cur:
x_list.append(graph2.ChannelList(idx, info))
cur.execute('SELECT idx, info FROM y_list;')
y_list = []
for idx, info in cur:
y_list.append(graph2.ChannelList(idx, info))
return graph2.Channels(
chan_width_max=chan_width_max,
x_min=x_min,
y_min=y_min,
x_max=x_max,
y_max=y_max,
x_list=x_list,
y_list=y_list,
)
def create_connection_boxes(conn, graph):
""" Assign connection box ids for all connection box types. """
cur = conn.cursor()
cur.execute(
"""
SELECT pkey, tile_type_pkey, name FROM wire_in_tile WHERE pkey IN (
SELECT DISTINCT wire_in_tile_pkey FROM wire WHERE pkey IN (
SELECT connection_box_wire_pkey FROM graph_node
WHERE connection_box_wire_pkey IS NOT NULL
)
);"""
)
connection_box_map = {}
for wire_in_tile_pkey, tile_type_pkey, wire_name in cur:
connection_box_map[wire_in_tile_pkey] = graph.maybe_add_connection_box(
reduce_connection_box(wire_name)
)
return connection_box_map
def yield_nodes(nodes):
with progressbar_utils.ProgressBar(max_value=len(nodes)) as bar:
for idx, node in enumerate(nodes):
yield node
if idx % 1024 == 0:
bar.update(idx)
def phy_grid_dims(conn):
""" Returns physical grid dimensions. """
cur = conn.cursor()
cur.execute("SELECT grid_x FROM phy_tile ORDER BY grid_x DESC LIMIT 1;")
x_max = cur.fetchone()[0]
cur.execute("SELECT grid_y FROM phy_tile ORDER BY grid_y DESC LIMIT 1;")
y_max = cur.fetchone()[0]
return x_max + 1, y_max + 1
def find_constant_network(graph):
""" Find VCC and GND tiles and create synth_tiles input.
All arches should have these synthetic tiles, search the input rr graph
for the SYN-GND and SYN-VCC tiles.
"""
block_types = {}
for block_type in graph.block_types:
block_types[block_type.name] = block_type.id
assert 'SYN-GND' in block_types
assert 'SYN-VCC' in block_types
gnd_block_id = block_types['SYN-GND']
vcc_block_id = block_types['SYN-VCC']
gnd_loc = None
vcc_loc = None
for grid_loc in graph.grid:
if gnd_block_id == grid_loc.block_type_id:
assert gnd_loc is None
gnd_loc = (grid_loc.x, grid_loc.y)
if vcc_block_id == grid_loc.block_type_id:
assert vcc_loc is None
vcc_loc = (grid_loc.x, grid_loc.y)
assert gnd_loc is not None
assert vcc_loc is not None
synth_tiles = {
'tiles':
{
"VCC":
{
'loc':
vcc_loc,
'pins':
[
{
'wire': 'VCC',
'pad': 'VCC',
'port_type': 'VCC',
'is_clock': False,
},
],
},
"GND":
{
'loc':
gnd_loc,
'pins':
[
{
'wire': 'GND',
'pad': 'GND',
'port_type': 'GND',
'is_clock': False,
},
],
},
}
}
return synth_tiles
def create_node_remap(nodes, channels_obj):
N = 2
p = math.ceil(math.log2(max(channels_obj.x_max, channels_obj.y_max)))
point_map = {}
for node in nodes:
x = node.loc.x_low
y = node.loc.y_low
if (x, y) not in point_map:
point_map[(x, y)] = []
point_map[(x, y)].append(node.id)
hilbert_curve = HilbertCurve(p, N)
idx = 0
id_map = {}
for h in range(hilbert_curve.max_h + 1):
coord = tuple(hilbert_curve.coordinates_from_distance(h))
if coord not in point_map:
continue
for old_id in point_map[coord]:
id_map[old_id] = idx
idx += 1
del point_map[coord]
return lambda x: id_map[x]
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
'--db_root', required=True, help='Project X-Ray Database'
)
parser.add_argument('--part', required=True, help='FPGA part')
parser.add_argument(
'--read_rr_graph', required=True, help='Input rr_graph file'
)
parser.add_argument(
'--write_rr_graph', required=True, help='Output rr_graph file'
)
parser.add_argument(
'--write_rr_node_map',
required=True,
help='Output map of graph_node_pkey to rr inode file'
)
parser.add_argument(
'--connection_database',
help='Database of fabric connectivity',
required=True
)
parser.add_argument(
'--synth_tiles',
help='If using an ROI, synthetic tile defintion from prjxray-arch-import'
)
parser.add_argument(
'--graph_limit',
help='Limit grid to specified dimensions in x_min,y_min,x_max,y_max',
)
parser.add_argument(
'--vpr_capnp_schema_dir',
help='Directory container VPR schema files',
)
print('{} Starting routing import'.format(now()))
args = parser.parse_args()
db = prjxray.db.Database(args.db_root, args.part)
populate_hclk_cmt_tiles(db)
synth_tiles = None
if args.synth_tiles:
use_roi = True
with open(args.synth_tiles) as f:
synth_tiles = json.load(f)
roi = Roi(
db=db,
x1=synth_tiles['info']['GRID_X_MIN'],
y1=synth_tiles['info']['GRID_Y_MIN'],
x2=synth_tiles['info']['GRID_X_MAX'],
y2=synth_tiles['info']['GRID_Y_MAX'],
)
print('{} generating routing graph for ROI.'.format(now()))
elif args.graph_limit:
use_roi = True
x_min, y_min, x_max, y_max = map(int, args.graph_limit.split(','))
roi = Roi(
db=db,
x1=x_min,
y1=y_min,
x2=x_max,
y2=y_max,
)
else:
use_roi = False
roi = None
synth_tiles = None
capnp_graph = capnp_graph2.Graph(
rr_graph_schema_fname=os.path.join(
args.vpr_capnp_schema_dir, 'rr_graph_uxsdcxx.capnp'
),
input_file_name=args.read_rr_graph,
progressbar=progressbar_utils.progressbar,
output_file_name=args.write_rr_graph,
)
graph = capnp_graph.graph
if synth_tiles is None:
synth_tiles = find_constant_network(graph)
with sqlite3.connect("file:{}?mode=ro".format(args.connection_database),
uri=True) as conn:
populate_bufg_rebuf_map(conn)
cur = conn.cursor()
for name, internal_capacitance, drive_resistance, intrinsic_delay, penalty_cost, \
switch_type in cur.execute("""
SELECT
name,
internal_capacitance,
drive_resistance,
intrinsic_delay,
penalty_cost,
switch_type
FROM
switch;"""):
# Add back missing switchs, which were unused in arch xml, and so
# were not emitted in rrgraph XML.
#
# TODO: This can be removed once
# https://github.com/verilog-to-routing/vtr-verilog-to-routing/issues/354
# is fixed.
try:
graph.get_switch_id(name)
continue
except KeyError:
capnp_graph.add_switch(
graph2.Switch(
id=None,
name=name,
type=graph2.SwitchType[switch_type.upper()],
timing=graph2.SwitchTiming(
r=drive_resistance,
c_in=0.0,
c_out=0.0,
c_internal=internal_capacitance,
t_del=intrinsic_delay,
p_cost=penalty_cost,
),
sizing=graph2.SwitchSizing(
mux_trans_size=0,
buf_size=0,
),
)
)
# Mapping of graph_node.pkey to rr node id.
node_mapping = {}
print('{} Creating connection box list'.format(now()))
connection_box_map = create_connection_boxes(conn, graph)
# Match site pins rr nodes with graph_node's in the connection_database.
print('{} Importing graph nodes'.format(now()))
import_graph_nodes(conn, graph, node_mapping, connection_box_map)
# Walk all track graph nodes and add them.
print('{} Creating tracks'.format(now()))
segment_id = graph.get_segment_id_from_name('dummy')
create_track_rr_graph(
conn, graph, node_mapping, use_roi, roi, synth_tiles, segment_id
)
# Set of (src, sink, switch_id) tuples that pip edges have been sent to
# VPR. VPR cannot handle duplicate paths with the same switch id.
print('{} Adding synthetic edges'.format(now()))
add_synthetic_edges(conn, graph, node_mapping, grid, synth_tiles)
print('{} Creating channels.'.format(now()))
channels_obj = create_channels(conn)
node_remap = create_node_remap(capnp_graph.graph.nodes, channels_obj)
x_dim, y_dim = phy_grid_dims(conn)
connection_box_obj = graph.create_connection_box_object(
x_dim=x_dim, y_dim=y_dim
)
num_edges = get_number_graph_edges(conn, graph, node_mapping)
print('{} Serializing to disk.'.format(now()))
capnp_graph.serialize_to_capnp(
channels_obj=channels_obj,
connection_box_obj=connection_box_obj,
num_nodes=len(capnp_graph.graph.nodes),
nodes_obj=yield_nodes(capnp_graph.graph.nodes),
num_edges=num_edges,
edges_obj=import_graph_edges(conn, graph, node_mapping),
node_remap=node_remap,
)
for k in node_mapping:
node_mapping[k] = node_remap(node_mapping[k])
print('{} Writing node map.'.format(now()))
with open(args.write_rr_node_map, 'wb') as f:
pickle.dump(node_mapping, f)
print('{} Done writing node map.'.format(now()))
if __name__ == '__main__':
main()
| [((46, 24, 46, 59), 're.compile', 're.compile', ({(46, 35, 46, 58): '"""HCLK_CK_BUFHCLK[0-9]+"""'}, {}), "('HCLK_CK_BUFHCLK[0-9]+')", False, 'import re\n'), ((47, 24, 47, 72), 're.compile', 're.compile', ({(47, 35, 47, 71): '"""CLK_HROW_CK_MUX_OUT_([LR])([0-9]+)"""'}, {}), "('CLK_HROW_CK_MUX_OUT_([LR])([0-9]+)')", False, 'import re\n'), ((48, 16, 48, 77), 're.compile', 're.compile', ({(48, 27, 48, 76): '"""BRAM_CASCOUT_ADDR((?:BWR)|(?:ARD))ADDRU([0-9]+)"""'}, {}), "('BRAM_CASCOUT_ADDR((?:BWR)|(?:ARD))ADDRU([0-9]+)')", False, 'import re\n'), ((49, 24, 49, 53), 're.compile', 're.compile', ({(49, 35, 49, 52): '"""([^0-9]+)[0-9]*"""'}, {}), "('([^0-9]+)[0-9]*')", False, 'import re\n'), ((50, 20, 50, 59), 're.compile', 're.compile', ({(50, 31, 50, 58): '"""CLK_HROW_CK_IN_[LR][0-9]+"""'}, {}), "('CLK_HROW_CK_IN_[LR][0-9]+')", False, 'import re\n'), ((51, 21, 51, 59), 're.compile', 're.compile', ({(51, 32, 51, 58): '"""CLK_HROW_R_CK_GCLK[0-9]+"""'}, {}), "('CLK_HROW_R_CK_GCLK[0-9]+')", False, 'import re\n'), ((52, 20, 52, 53), 're.compile', 're.compile', ({(52, 31, 52, 52): '"""HCLK_CMT_CCIO[0-9]+"""'}, {}), "('HCLK_CMT_CCIO[0-9]+')", False, 'import re\n'), ((53, 11, 53, 60), 're.compile', 're.compile', ({(53, 22, 53, 59): '"""CLK_HROW_CK_HCLK_OUT_([LR])([0-9]+)"""'}, {}), "('CLK_HROW_CK_HCLK_OUT_([LR])([0-9]+)')", False, 'import re\n'), ((54, 11, 54, 40), 're.compile', 're.compile', ({(54, 22, 54, 39): '"""IOI_OCLK_([01])"""'}, {}), "('IOI_OCLK_([01])')", False, 'import re\n'), ((474, 20, 474, 60), 're.compile', 're.compile', ({(474, 31, 474, 59): '"""^([^\\\\.]+)\\\\.([^\\\\]]+)\\\\[0\\\\]$"""'}, {}), "('^([^\\\\.]+)\\\\.([^\\\\]]+)\\\\[0\\\\]$')", False, 'import re\n'), ((132, 24, 134, 5), 're.compile', 're.compile', ({(133, 8, 133, 52): '"""CLK_BUFG_REBUF_R_CK_GCLK([0-9]+)_(BOT|TOP)"""'}, {}), "('CLK_BUFG_REBUF_R_CK_GCLK([0-9]+)_(BOT|TOP)')", False, 'import re\n'), ((466, 19, 466, 48), 'prjxray_constant_site_pins.feature_when_routed', 'feature_when_routed', ({(466, 39, 466, 47): 'parts[1]'}, {}), '(parts[1])', False, 'from prjxray_constant_site_pins import feature_when_routed\n'), ((482, 34, 487, 5), 'lib.rr_graph.graph2.ConnectionBox', 'graph2.ConnectionBox', (), '', False, 'from lib.rr_graph import graph2\n'), ((488, 28, 488, 52), 'lib.rr_graph.graph2.Node', 'graph2.Node', ({}, {}), '(**node_dict)', False, 'from lib.rr_graph import graph2\n'), ((563, 5, 563, 35), 'functools.lru_cache', 'functools.lru_cache', (), '', False, 'import functools\n'), ((577, 5, 577, 35), 'functools.lru_cache', 'functools.lru_cache', (), '', False, 'import functools\n'), ((951, 5, 951, 38), 'functools.lru_cache', 'functools.lru_cache', (), '', False, 'import functools\n'), ((966, 5, 966, 38), 'functools.lru_cache', 'functools.lru_cache', (), '', False, 'import functools\n'), ((1106, 11, 1114, 5), 'lib.rr_graph.graph2.Channels', 'graph2.Channels', (), '', False, 'from lib.rr_graph import graph2\n'), ((1244, 20, 1244, 38), 'hilbertcurve.hilbertcurve.HilbertCurve', 'HilbertCurve', ({(1244, 33, 1244, 34): 'p', (1244, 36, 1244, 37): 'N'}, {}), '(p, N)', False, 'from hilbertcurve.hilbertcurve import HilbertCurve\n'), ((1264, 13, 1264, 38), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ({}, {}), '()', False, 'import argparse\n'), ((641, 20, 641, 53), 'prjxray_tile_import.remove_vpr_tile_prefix', 'remove_vpr_tile_prefix', ({(641, 43, 641, 52): 'tile_type'}, {}), '(tile_type)', False, 'from prjxray_tile_import import remove_vpr_tile_prefix\n'), ((760, 20, 760, 52), 'lib.rr_graph.graph2.NodeType', 'graph2.NodeType', ({(760, 36, 760, 51): 'graph_node_type'}, {}), '(graph_node_type)', False, 'from lib.rr_graph import graph2\n'), ((782, 16, 788, 9), 'lib.rr_graph.tracks.Track', 'tracks.Track', (), '', False, 'from lib.rr_graph import tracks\n'), ((1034, 9, 1034, 59), 'lib.progressbar_utils.ProgressBar', 'progressbar_utils.ProgressBar', (), '', False, 'from lib import progressbar_utils\n'), ((1310, 14, 1316, 9), 'prjxray.roi.Roi', 'Roi', (), '', False, 'from prjxray.roi import Roi\n'), ((780, 28, 780, 73), 'lib.rr_graph.graph2.CanonicalLoc', 'graph2.CanonicalLoc', (), '', False, 'from lib.rr_graph import graph2\n'), ((871, 40, 871, 73), 'lib.connection_database.get_track_model', 'get_track_model', ({(871, 56, 871, 60): 'conn', (871, 62, 871, 72): 'track_pkey'}, {}), '(conn, track_pkey)', False, 'from lib.connection_database import get_wire_pkey, get_track_model\n'), ((1099, 22, 1099, 51), 'lib.rr_graph.graph2.ChannelList', 'graph2.ChannelList', ({(1099, 41, 1099, 44): 'idx', (1099, 46, 1099, 50): 'info'}, {}), '(idx, info)', False, 'from lib.rr_graph import graph2\n'), ((1104, 22, 1104, 51), 'lib.rr_graph.graph2.ChannelList', 'graph2.ChannelList', ({(1104, 41, 1104, 44): 'idx', (1104, 46, 1104, 50): 'info'}, {}), '(idx, info)', False, 'from lib.rr_graph import graph2\n'), ((1308, 26, 1308, 38), 'simplejson.load', 'json.load', ({(1308, 36, 1308, 37): 'f'}, {}), '(f)', True, 'import simplejson as json\n'), ((1322, 14, 1328, 9), 'prjxray.roi.Roi', 'Roi', (), '', False, 'from prjxray.roi import Roi\n'), ((1446, 12, 1446, 40), 'pickle.dump', 'pickle.dump', ({(1446, 24, 1446, 36): 'node_mapping', (1446, 38, 1446, 39): 'f'}, {}), '(node_mapping, f)', False, 'import pickle\n'), ((794, 19, 797, 13), 'lib.rr_graph.graph2.NodeTiming', 'graph2.NodeTiming', (), '', False, 'from lib.rr_graph import graph2\n'), ((842, 28, 842, 71), 'lib.connection_database.get_wire_pkey', 'get_wire_pkey', ({(842, 42, 842, 46): 'conn', (842, 48, 842, 57): 'tile_name', (842, 59, 842, 70): "pin['wire']"}, {}), "(conn, tile_name, pin['wire'])", False, 'from lib.connection_database import get_wire_pkey, get_track_model\n'), ((1381, 31, 1388, 25), 'lib.rr_graph.graph2.SwitchTiming', 'graph2.SwitchTiming', (), '', False, 'from lib.rr_graph import graph2\n'), ((1389, 31, 1392, 25), 'lib.rr_graph.graph2.SwitchSizing', 'graph2.SwitchSizing', (), '', False, 'from lib.rr_graph import graph2\n')] |
andrewbowen19/ClusterEclipsingBinaries | testing/onQuest/longClusters/m67/OLD-analyseEBLSSTm67.py | e554cb6bb613e0d3703314e50fcf5289f50bf572 | #########################
#########################
# Need to account for limit in input period
#########################
#########################
# Baseline M67 long script -- NO crowding
# New script copied from quest - want to take p and ecc from each population (all, obs, rec) and put them into separate file
# Doing this so we don't have to run analyse each time
# Can write separate script for p-ecc plots
# Quest paths in this version of script
import pandas as pd
import numpy as np
import os
from astropy.coordinates import SkyCoord
from astropy import units, constants
from astropy.modeling import models, fitting
import scipy.stats
from scipy.integrate import quad
#for Quest
import matplotlib
matplotlib.use('Agg')
doIndividualPlots = True
from matplotlib import pyplot as plt
def file_len(fname):
i = 0
with open(fname) as f:
for i, l in enumerate(f):
pass
return i + 1
def getPhs(sigma, m1=1*units.solMass, m2=1*units.solMass, m3=0.5*units.solMass):
Phs = np.pi*constants.G/np.sqrt(2.)*(m1*m2/m3)**(3./2.)*(m1 + m2)**(-0.5)*sigma**(-3.)
return Phs.decompose().to(units.day)
#similar to field, but limiting by the hard-soft boundary
def fitRagfb():
x = [0.05, 0.1, 1, 8, 15] #estimates of midpoints in bins, and using this: https://sites.uni.edu/morgans/astro/course/Notes/section2/spectralmasses.html
y = [0.20, 0.35, 0.50, 0.70, 0.75]
init = models.PowerLaw1D(amplitude=0.5, x_0=1, alpha=-1.)
fitter = fitting.LevMarLSQFitter()
fit = fitter(init, x, y)
return fit
def RagNormal(x, cdf = False):
mean = 5.03
std = 2.28
if (cdf):
return scipy.stats.norm.cdf(x,mean,std)
return scipy.stats.norm.pdf(x,mean,std)
def saveHist(histAll, histObs, histRec, bin_edges, xtitle, fname, filters = ['u_', 'g_', 'r_', 'i_', 'z_', 'y_','all']):
c1 = '#5687A6' #Dali Blue (Andrew's AAS Poster)
c2 = '#A62B1F' #Dai Red
c3 = '#BF8A26' #Dali Beige
fig,ax1 = plt.subplots(figsize=(8,6), sharex=True)#can change to include cdf with ax1, ax2
histAll = np.insert(histAll,0,0)
histObs = np.insert(histObs,0,0)
for f in filters:
histRec[f] = np.insert(histRec[f],0,0)
#PDF
ax1.step(bin_edges, histAll/np.sum(histAll), color=c1)
ax1.step(bin_edges, histObs/np.sum(histObs), color=c2)
for f in filters:
lw = 1
if (f == 'all'):
lw = 0.5
ax1.step(bin_edges, histRec[f]/np.sum(histRec[f]), color=c3, linewidth=lw)
ax1.set_ylabel('PDF')
ax1.set_yscale('log')
ax1.set_title('Globular Clusters - Baseline', fontsize = 16)
ax1.set_xlabel(xtitle)
#CDF
#cdfAll = []
#cdfObs = []
#cdfRec = dict()
#for f in filters:
# cdfRec[f] = []
# for i in range(len(histAll)):
# cdfAll.append(np.sum(histAll[:i])/np.sum(histAll))
# for i in range(len(histObs)):
# cdfObs.append(np.sum(histObs[:i])/np.sum(histObs))
# for f in filters:
# for i in range(len(histRec[f])):
# cdfRec[f].append(np.sum(histRec[f][:i])/np.sum(histRec[f]))
#ax2.step(bin_edges, cdfAll, color=c1)
#ax2.step(bin_edges, cdfObs, color=c2)
#for f in filters:
# lw = 1
# if (f == 'all'):
# lw = 0.5
# ax2.step(bin_edges, cdfRec[f], color=c3, linewidth=lw)
#ax2.set_ylabel('CDF')
#ax2.set_xlabel(xtitle)
fig.subplots_adjust(hspace=0)
fig.savefig('./plots/' + fname+'.pdf',format='pdf', bbox_inches = 'tight')
#write to a text file
with open('./eblsst_files/' + fname+'.csv','w') as fl:
outline = 'binEdges,histAll,histObs'
for f in filters:
outline += ','+f+'histRec'
outline += '\n'
fl.write(outline)
for i in range(len(bin_edges)):
outline = str(bin_edges[i])+','+str(histAll[i])+','+str(histObs[i])
for f in filters:
outline += ','+str(histRec[f][i])
outline += '\n'
fl.write(outline)
if __name__ == "__main__":
filters = ['u_', 'g_', 'r_', 'i_', 'z_', 'y_', 'all']
#get the Raghavan binary fraction fit
fbFit= fitRagfb()
print(fbFit)
#to normalize
intAll, err = quad(RagNormal, -20, 20)
intCut, err = quad(RagNormal, -20, np.log10(365*10.))
intNorm = intCut/intAll
#cutoff in percent error for "recovered"
Pcut = 0.1
#assumed mean stellar mass
mMean = 0.5
#minimum number of lines to consider in file
Nlim = 3
if (doIndividualPlots):
fmass, axmass = plt.subplots()
fqrat, axqrat = plt.subplots()
fecc, axecc = plt.subplots()
flper, axlper = plt.subplots()
fdist, axdist = plt.subplots()
fmag, axmag = plt.subplots()
frad, axrad = plt.subplots()
#bins for all the histograms
Nbins = 25
mbins = np.arange(0,10, 0.1, dtype='float')
qbins = np.arange(0,1, 0.1, dtype='float')
ebins = np.arange(0, 1.05, 0.05, dtype='float')
lpbins = np.arange(-2, 10, 0.5, dtype='float')
dbins = np.arange(0, 40, 1, dtype='float')
magbins = np.arange(11, 25, 1, dtype='float')
rbins = np.arange(0, 100, 0.2, dtype='float')
#blanks for the histograms
#All
m1hAll = np.zeros_like(mbins)[1:]
qhAll = np.zeros_like(qbins)[1:]
ehAll = np.zeros_like(ebins)[1:]
lphAll = np.zeros_like(lpbins)[1:]
dhAll = np.zeros_like(dbins)[1:]
maghAll = np.zeros_like(magbins)[1:]
rhAll = np.zeros_like(rbins)[1:]
#Observable
m1hObs = np.zeros_like(mbins)[1:]
qhObs = np.zeros_like(qbins)[1:]
ehObs = np.zeros_like(ebins)[1:]
lphObs = np.zeros_like(lpbins)[1:]
dhObs = np.zeros_like(dbins)[1:]
maghObs = np.zeros_like(magbins)[1:]
rhObs = np.zeros_like(rbins)[1:]
#Recovered
m1hRec = dict()
qhRec = dict()
ehRec = dict()
lphRec = dict()
dhRec = dict()
maghRec = dict()
rhRec = dict()
for f in filters:
m1hRec[f] = np.zeros_like(mbins)[1:]
qhRec[f] = np.zeros_like(qbins)[1:]
ehRec[f] = np.zeros_like(ebins)[1:]
lphRec[f] = np.zeros_like(lpbins)[1:]
dhRec[f] = np.zeros_like(dbins)[1:]
maghRec[f] = np.zeros_like(magbins)[1:]
rhRec[f] = np.zeros_like(rbins)[1:]
RA = []
Dec = []
recFrac = []
recN = []
rawN = []
obsN = []
fileN = []
fileObsN = []
fileRecN = []
allNPrsa = []
obsNPrsa = []
recNPrsa = []
# Lists for period and eccentricity for Andrew's circularization plots
eccAll = []
eccObs = []
eccRec = []
pAll = []
pObs = []
pRec = []
# Using prsa dataframes for these lists because of period cutoff at 1000 days
# Dataframes to write to files later; 3 files for each sub-population - append everything to these
peccAll = pd.DataFrame(columns = ['e', 'p'])
peccObs = pd.DataFrame(columns = ['e', 'p'])
peccRec = pd.DataFrame(columns = ['e', 'p'])
#Read in all the data and make the histograms
d = "./input_files/"
files = os.listdir(d)
IDs = []
for i, f in enumerate(files):
print(round(i/len(files),4), f)
fl = file_len(d+f)
if (fl >= 4):
#read in the header
header = pd.read_csv(d+f, nrows=1)
######################
#NEED TO ACCOUNT FOR THE BINARY FRACTION when combining histograms
#####################
Nmult = header['clusterMass'][0]/mMean
#Nmult = 1.
RA.append(header['OpSimRA'])
Dec.append(header['OpSimDec'])
#read in rest of the file
data = pd.read_csv(d+f, header = 2).fillna(-999)
rF = 0.
rN = 0.
Nrec = 0.
Nobs = 0.
raN = 0.
obN = 0.
fiN = 0.
fioN = 0.
firN = 0.
NallPrsa = 0.
NobsPrsa = 0.
NrecPrsa = 0.
Nall = len(data.index)/intNorm ###is this correct? (and the only place I need to normalize?)
prsa = data.loc[(data['appMagMean_r'] <= 19.5) & (data['appMagMean_r'] > 15.8) & (data['p'] < 1000) & (data['p'] > 0.5)]
# Appending for Andrew
eccAll.append(prsa['e'].values)
pAll.append(prsa['p'].values)
NallPrsa = len(prsa.index)
if (Nall >= Nlim):
#create histograms
#All
m1hAll0, m1b = np.histogram(data["m1"], bins=mbins)
qhAll0, qb = np.histogram(data["m2"]/data["m1"], bins=qbins)
ehAll0, eb = np.histogram(data["e"], bins=ebins)
lphAll0, lpb = np.histogram(np.ma.log10(data["p"].values).filled(-999), bins=lpbins)
dhAll0, db = np.histogram(data["d"], bins=dbins)
maghAll0, magb = np.histogram(data["appMagMean_r"], bins=magbins)
rhAll0, rb = np.histogram(data["r2"]/data["r1"], bins=rbins)
if (doIndividualPlots):
axmass.step(m1b[0:-1], m1hAll0/np.sum(m1hAll0), color='black', alpha=0.1)
axqrat.step(qb[0:-1], qhAll0/np.sum(qhAll0), color='black', alpha=0.1)
axecc.step(eb[0:-1], ehAll0/np.sum(ehAll0), color='black', alpha=0.1)
axlper.step(lpb[0:-1], lphAll0/np.sum(lphAll0), color='black', alpha=0.1)
axdist.step(db[0:-1], dhAll0/np.sum(dhAll0), color='black', alpha=0.1)
axmag.step(magb[0:-1], maghAll0/np.sum(maghAll0), color='black', alpha=0.1)
axrad.step(rb[0:-1], rhAll0/np.sum(rhAll0), color='black', alpha=0.1)
#account for the binary fraction, as a function of mass
dm1 = np.diff(m1b)
m1val = m1b[:-1] + dm1/2.
fb = np.sum(m1hAll0/len(data.index)*fbFit(m1val))
#account for the hard-soft boundary
Phs = getPhs(header['clusterVdisp'].iloc[0]*units.km/units.s).to(units.day).value
fb *= RagNormal(np.log10(Phs), cdf = True)
print("fb, Phs = ", fb, Phs)
Nmult *= fb
m1hAll += m1hAll0/Nall*Nmult
qhAll += qhAll0/Nall*Nmult
ehAll += ehAll0/Nall*Nmult
lphAll += lphAll0/Nall*Nmult
dhAll += dhAll0/Nall*Nmult
maghAll += maghAll0/Nall*Nmult
rhAll += rhAll0/Nall*Nmult
#Obs
obs = data.loc[data['LSM_PERIOD'] != -999]
Nobs = len(obs.index)
prsaObs = data.loc[(data['appMagMean_r'] <= 19.5) & (data['appMagMean_r'] > 15.8) & (data['p'] < 1000) & (data['p'] >0.5) & (data['LSM_PERIOD'] != -999)]
NobsPrsa = len(prsaObs.index)
# Appending for Andrew's files
eccObs.append(prsaObs['e'].values)
pObs.append(prsaObs['p'].values)
if (Nobs >= Nlim):
m1hObs0, m1b = np.histogram(obs["m1"], bins=mbins)
qhObs0, qb = np.histogram(obs["m2"]/obs["m1"], bins=qbins)
ehObs0, eb = np.histogram(obs["e"], bins=ebins)
lphObs0, lpb = np.histogram(np.ma.log10(obs["p"].values).filled(-999), bins=lpbins)
dhObs0, db = np.histogram(obs["d"], bins=dbins)
maghObs0, magb = np.histogram(obs["appMagMean_r"], bins=magbins)
rhObs0, rb = np.histogram(obs["r2"]/obs["r1"], bins=rbins)
m1hObs += m1hObs0/Nall*Nmult
qhObs += qhObs0/Nall*Nmult
ehObs += ehObs0/Nall*Nmult
lphObs += lphObs0/Nall*Nmult
dhObs += dhObs0/Nall*Nmult
maghObs += maghObs0/Nall*Nmult
rhObs += rhObs0/Nall*Nmult
#Rec
recCombined = pd.DataFrame()
prsaRecCombined = pd.DataFrame()
for filt in filters:
key = filt+'LSS_PERIOD'
if (filt == 'all'):
key = 'LSM_PERIOD'
fullP = abs(data[key] - data['p'])/data['p']
halfP = abs(data[key] - 0.5*data['p'])/(0.5*data['p'])
twiceP = abs(data[key] - 2.*data['p'])/(2.*data['p'])
rec = data.loc[(data[key] != -999) & ( (fullP < Pcut) | (halfP < Pcut) | (twiceP < Pcut))]
prsaRec = data.loc[(data['appMagMean_r'] <= 19.5) & (data['appMagMean_r'] >15.8) & (data['p'] < 1000) & (data['p'] >0.5) & (data['LSM_PERIOD'] != -999) & ( (fullP < Pcut) | (halfP < Pcut) | (twiceP < Pcut))]
Nrec = len(rec.index)
#I'd like to account for all filters here to have more accurate numbers
recCombined = recCombined.append(rec)
prsaRecCombined = prsaRecCombined.append(prsaRec)
# Going to use prsaRecCombined for ecc-p plots to account for all filters
eccRec.append(prsaRec['e'].values)
pRec.append(prsaRec['p'].values)
if (filt == 'all'):
recCombined.drop_duplicates(inplace=True)
prsaRecCombined.drop_duplicates(inplace=True)
if (Nrec >= Nlim):
m1hRec0, m1b = np.histogram(rec["m1"], bins=mbins)
qhRec0, qb = np.histogram(rec["m2"]/rec["m1"], bins=qbins)
ehRec0, eb = np.histogram(rec["e"], bins=ebins)
lphRec0, lpb = np.histogram(np.ma.log10(rec["p"].values).filled(-999), bins=lpbins)
dhRec0, db = np.histogram(rec["d"], bins=dbins)
maghRec0, magb = np.histogram(rec["appMagMean_r"], bins=magbins)
rhRec0, rb = np.histogram(rec["r2"]/rec["r1"], bins=rbins)
m1hRec[filt] += m1hRec0/Nall*Nmult
qhRec[filt] += qhRec0/Nall*Nmult
ehRec[filt] += ehRec0/Nall*Nmult
lphRec[filt] += lphRec0/Nall*Nmult
dhRec[filt] += dhRec0/Nall*Nmult
maghRec[filt] += maghRec0/Nall*Nmult
rhRec[filt] += rhRec0/Nall*Nmult
#for the mollweide
if (filt == 'all'):
Nrec = len(recCombined.index)
rF = Nrec/Nall
rN = Nrec/Nall*Nmult
raN = Nmult
obN = Nobs/Nall*Nmult
fiN = Nall
fioN = Nobs
firN = Nrec
NrecPrsa = len(prsaRecCombined.index)
NrecPrsa = NrecPrsa/Nall*Nmult
NobsPrsa = NobsPrsa/Nall*Nmult
NallPrsa = NallPrsa/Nall*Nmult
recFrac.append(rF)
recN.append(rN)
rawN.append(raN)
obsN.append(obN)
fileN.append(fiN)
fileObsN.append(fioN)
fileRecN.append(firN)
allNPrsa.append(NallPrsa)
obsNPrsa.append(NobsPrsa)
recNPrsa.append(NrecPrsa)
#print(np.sum(lphRec), np.sum(recN), np.sum(lphRec)/np.sum(recN), np.sum(lphRec0), Nrec, np.sum(lphRec0)/Nrec, np.sum(lphObs), np.sum(obsN), np.sum(lphObs)/np.sum(obsN))
# Concatenating p and ecc lists
eccAll = np.concatenate(eccAll)
eccObs = np.concatenate(eccObs)
eccRec = np.concatenate(eccRec)
pAll = np.concatenate(pAll)
pObs = np.concatenate(pObs)
pRec = np.concatenate(pRec)
# print('Ecc lists:', eccAll, eccObs, eccRec)
# print('P lists:', pAll, pObs, pRec)
# Appending lists with all the p/ecc values to our dataframes
# All dataframe
peccAll['e'] = eccAll
peccAll['p'] = pAll
# Observable dataframe
peccObs['e'] = eccObs
peccObs['p'] = pObs
# Recovered dataframe
peccRec['e'] = eccRec
peccRec['p'] = pRec
# print('Final Dataframes:', peccAll, peccObs, peccRec)
# print(peccRec.columns)
# 3 letter code corresponds to scenario (OC/GC, baseline/colossus, crowding/no crowding)
peccAll.to_csv('./pecc/all-M67BN-ecc-p.csv', header = ['e', 'p'])
peccObs.to_csv('./pecc/obs-M67BN-ecc-p.csv', header = ['e', 'p'])
peccRec.to_csv('./pecc/rec-M67BN-ecc-p.csv', header = ['e', 'p'])
#plot and save the histograms
saveHist(m1hAll, m1hObs, m1hRec, m1b, 'm1 (Msolar)', 'EBLSST_m1hist')
saveHist(qhAll, qhObs, qhRec, qb, 'q (m2/m1)', 'EBLSST_qhist')
saveHist(ehAll, ehObs, ehRec, eb, 'e', 'EBLSST_ehist')
saveHist(lphAll, lphObs, lphRec, lpb, 'log(P [days])', 'EBLSST_lphist')
saveHist(dhAll, dhObs, dhRec, db, 'd (kpc)', 'EBLSST_dhist')
saveHist(maghAll, maghObs, maghRec, magb, 'mag', 'EBLSST_maghist')
saveHist(rhAll, rhObs, rhRec, rb, 'r2/r1', 'EBLSST_rhist')
#make the mollweide
coords = SkyCoord(RA, Dec, unit=(units.degree, units.degree),frame='icrs')
lGal = coords.galactic.l.wrap_at(180.*units.degree).degree
bGal = coords.galactic.b.wrap_at(180.*units.degree).degree
RAwrap = coords.ra.wrap_at(180.*units.degree).degree
Decwrap = coords.dec.wrap_at(180.*units.degree).degree
f, ax = plt.subplots(subplot_kw={'projection': "mollweide"}, figsize=(8,5))
ax.grid(True)
#ax.set_xlabel(r"$l$",fontsize=16)
#ax.set_ylabel(r"$b$",fontsize=16)
#mlw = ax.scatter(lGal.ravel()*np.pi/180., bGal.ravel()*np.pi/180., c=np.log10(np.array(recFrac)*100.), cmap='viridis_r', s = 4)
ax.set_xlabel("RA",fontsize=16)
ax.set_ylabel("Dec",fontsize=16)
mlw = ax.scatter(np.array(RAwrap).ravel()*np.pi/180., np.array(Decwrap).ravel()*np.pi/180., c=np.array(recFrac)*100., cmap='viridis_r', s = 4)
cbar = f.colorbar(mlw, shrink=0.7)
cbar.set_label(r'% recovered')
f.savefig('./plots/' + 'mollweide_pct.pdf',format='pdf', bbox_inches = 'tight')
f, ax = plt.subplots(subplot_kw={'projection': "mollweide"}, figsize=(8,5))
ax.grid(True)
#ax.set_xlabel(r"$l$",fontsize=16)
#ax.set_ylabel(r"$b$",fontsize=16)
#mlw = ax.scatter(lGal.ravel()*np.pi/180., bGal.ravel()*np.pi/180., c=np.log10(np.array(recN)), cmap='viridis_r', s = 4)
ax.set_xlabel("RA",fontsize=16)
ax.set_ylabel("Dec",fontsize=16)
mlw = ax.scatter(np.array(RAwrap).ravel()*np.pi/180., np.array(Decwrap).ravel()*np.pi/180., c=np.log10(np.array(recN)), cmap='viridis_r', s = 4)
cbar = f.colorbar(mlw, shrink=0.7)
cbar.set_label(r'log10(N) recovered')
f.savefig('./plots/' + 'mollweide_N.pdf',format='pdf', bbox_inches = 'tight')
if (doIndividualPlots):
fmass.savefig('./plots/' + 'massPDFall.pdf',format='pdf', bbox_inches = 'tight')
fqrat.savefig('./plots/' + 'qPDFall.pdf',format='pdf', bbox_inches = 'tight')
fecc.savefig('./plots/' + 'eccPDFall.pdf',format='pdf', bbox_inches = 'tight')
flper.savefig('./plots/' + 'lperPDFall.pdf',format='pdf', bbox_inches = 'tight')
fdist.savefig('./plots/' + 'distPDFall.pdf',format='pdf', bbox_inches = 'tight')
fmag.savefig('./plots/' + 'magPDFall.pdf',format='pdf', bbox_inches = 'tight')
frad.savefig('./plots/' + 'radPDFall.pdf',format='pdf', bbox_inches = 'tight')
print("###################")
print("number of binaries in input files (raw, log):",np.sum(fileN), np.log10(np.sum(fileN)))
print("number of binaries in tested with gatspy (raw, log):",np.sum(fileObsN), np.log10(np.sum(fileObsN)))
print("number of binaries in recovered with gatspy (raw, log):",np.sum(fileRecN), np.log10(np.sum(fileRecN)))
print("recovered/observable*100 with gatspy:",np.sum(fileRecN)/np.sum(fileObsN)*100.)
print("###################")
print("total in sample (raw, log):",np.sum(rawN), np.log10(np.sum(rawN)))
print("total observable (raw, log):",np.sum(obsN), np.log10(np.sum(obsN)))
print("total recovered (raw, log):",np.sum(recN), np.log10(np.sum(recN)))
print("recovered/observable*100:",np.sum(recN)/np.sum(obsN)*100.)
print("###################")
print("total in Prsa 15.8<r<19.5 P<1000d sample (raw, log):",np.sum(allNPrsa), np.log10(np.sum(allNPrsa)))
print("total observable in Prsa 15.8<r<19.5 P<1000d sample (raw, log):",np.sum(obsNPrsa), np.log10(np.sum(obsNPrsa)))
print("total recovered in Prsa 15.8<r<19.5 P<1000d sample (raw, log):",np.sum(recNPrsa), np.log10(np.sum(recNPrsa)))
print("Prsa 15.8<r<19.5 P<1000d rec/obs*100:",np.sum(recNPrsa)/np.sum(obsNPrsa)*100.)
| [((24, 0, 24, 21), 'matplotlib.use', 'matplotlib.use', ({(24, 15, 24, 20): '"""Agg"""'}, {}), "('Agg')", False, 'import matplotlib\n'), ((44, 8, 44, 58), 'astropy.modeling.models.PowerLaw1D', 'models.PowerLaw1D', (), '', False, 'from astropy.modeling import models, fitting\n'), ((45, 10, 45, 35), 'astropy.modeling.fitting.LevMarLSQFitter', 'fitting.LevMarLSQFitter', ({}, {}), '()', False, 'from astropy.modeling import models, fitting\n'), ((63, 11, 63, 51), 'matplotlib.pyplot.subplots', 'plt.subplots', (), '', True, 'from matplotlib import pyplot as plt\n'), ((65, 11, 65, 33), 'numpy.insert', 'np.insert', ({(65, 21, 65, 28): 'histAll', (65, 29, 65, 30): '0', (65, 31, 65, 32): '0'}, {}), '(histAll, 0, 0)', True, 'import numpy as np\n'), ((66, 11, 66, 33), 'numpy.insert', 'np.insert', ({(66, 21, 66, 28): 'histObs', (66, 29, 66, 30): '0', (66, 31, 66, 32): '0'}, {}), '(histObs, 0, 0)', True, 'import numpy as np\n'), ((132, 15, 132, 39), 'scipy.integrate.quad', 'quad', ({(132, 20, 132, 29): 'RagNormal', (132, 31, 132, 34): '-20', (132, 36, 132, 38): '20'}, {}), '(RagNormal, -20, 20)', False, 'from scipy.integrate import quad\n'), ((156, 9, 156, 44), 'numpy.arange', 'np.arange', (), '', True, 'import numpy as np\n'), ((157, 9, 157, 43), 'numpy.arange', 'np.arange', (), '', True, 'import numpy as np\n'), ((158, 9, 158, 48), 'numpy.arange', 'np.arange', (), '', True, 'import numpy as np\n'), ((159, 10, 159, 47), 'numpy.arange', 'np.arange', (), '', True, 'import numpy as np\n'), ((160, 9, 160, 43), 'numpy.arange', 'np.arange', (), '', True, 'import numpy as np\n'), ((161, 11, 161, 46), 'numpy.arange', 'np.arange', (), '', True, 'import numpy as np\n'), ((162, 9, 162, 46), 'numpy.arange', 'np.arange', (), '', True, 'import numpy as np\n'), ((223, 11, 223, 45), 'pandas.DataFrame', 'pd.DataFrame', (), '', True, 'import pandas as pd\n'), ((224, 11, 224, 45), 'pandas.DataFrame', 'pd.DataFrame', (), '', True, 'import pandas as pd\n'), ((225, 11, 225, 45), 'pandas.DataFrame', 'pd.DataFrame', (), '', True, 'import pandas as pd\n'), ((229, 9, 229, 22), 'os.listdir', 'os.listdir', ({(229, 20, 229, 21): 'd'}, {}), '(d)', False, 'import os\n'), ((407, 10, 407, 32), 'numpy.concatenate', 'np.concatenate', ({(407, 25, 407, 31): 'eccAll'}, {}), '(eccAll)', True, 'import numpy as np\n'), ((408, 10, 408, 32), 'numpy.concatenate', 'np.concatenate', ({(408, 25, 408, 31): 'eccObs'}, {}), '(eccObs)', True, 'import numpy as np\n'), ((409, 10, 409, 32), 'numpy.concatenate', 'np.concatenate', ({(409, 25, 409, 31): 'eccRec'}, {}), '(eccRec)', True, 'import numpy as np\n'), ((411, 8, 411, 28), 'numpy.concatenate', 'np.concatenate', ({(411, 23, 411, 27): 'pAll'}, {}), '(pAll)', True, 'import numpy as np\n'), ((412, 8, 412, 28), 'numpy.concatenate', 'np.concatenate', ({(412, 23, 412, 27): 'pObs'}, {}), '(pObs)', True, 'import numpy as np\n'), ((413, 8, 413, 28), 'numpy.concatenate', 'np.concatenate', ({(413, 23, 413, 27): 'pRec'}, {}), '(pRec)', True, 'import numpy as np\n'), ((450, 10, 450, 75), 'astropy.coordinates.SkyCoord', 'SkyCoord', (), '', False, 'from astropy.coordinates import SkyCoord\n'), ((456, 9, 456, 76), 'matplotlib.pyplot.subplots', 'plt.subplots', (), '', True, 'from matplotlib import pyplot as plt\n'), ((468, 9, 468, 76), 'matplotlib.pyplot.subplots', 'plt.subplots', (), '', True, 'from matplotlib import pyplot as plt\n'), ((68, 15, 68, 40), 'numpy.insert', 'np.insert', ({(68, 25, 68, 35): 'histRec[f]', (68, 36, 68, 37): '0', (68, 38, 68, 39): '0'}, {}), '(histRec[f], 0, 0)', True, 'import numpy as np\n'), ((133, 36, 133, 53), 'numpy.log10', 'np.log10', ({(133, 45, 133, 52): '365 * 10.0'}, {}), '(365 * 10.0)', True, 'import numpy as np\n'), ((146, 18, 146, 32), 'matplotlib.pyplot.subplots', 'plt.subplots', ({}, {}), '()', True, 'from matplotlib import pyplot as plt\n'), ((147, 18, 147, 32), 'matplotlib.pyplot.subplots', 'plt.subplots', ({}, {}), '()', True, 'from matplotlib import pyplot as plt\n'), ((148, 16, 148, 30), 'matplotlib.pyplot.subplots', 'plt.subplots', ({}, {}), '()', True, 'from matplotlib import pyplot as plt\n'), ((149, 18, 149, 32), 'matplotlib.pyplot.subplots', 'plt.subplots', ({}, {}), '()', True, 'from matplotlib import pyplot as plt\n'), ((150, 18, 150, 32), 'matplotlib.pyplot.subplots', 'plt.subplots', ({}, {}), '()', True, 'from matplotlib import pyplot as plt\n'), ((151, 16, 151, 30), 'matplotlib.pyplot.subplots', 'plt.subplots', ({}, {}), '()', True, 'from matplotlib import pyplot as plt\n'), ((152, 16, 152, 30), 'matplotlib.pyplot.subplots', 'plt.subplots', ({}, {}), '()', True, 'from matplotlib import pyplot as plt\n'), ((166, 10, 166, 30), 'numpy.zeros_like', 'np.zeros_like', ({(166, 24, 166, 29): 'mbins'}, {}), '(mbins)', True, 'import numpy as np\n'), ((167, 9, 167, 29), 'numpy.zeros_like', 'np.zeros_like', ({(167, 23, 167, 28): 'qbins'}, {}), '(qbins)', True, 'import numpy as np\n'), ((168, 9, 168, 29), 'numpy.zeros_like', 'np.zeros_like', ({(168, 23, 168, 28): 'ebins'}, {}), '(ebins)', True, 'import numpy as np\n'), ((169, 10, 169, 31), 'numpy.zeros_like', 'np.zeros_like', ({(169, 24, 169, 30): 'lpbins'}, {}), '(lpbins)', True, 'import numpy as np\n'), ((170, 9, 170, 29), 'numpy.zeros_like', 'np.zeros_like', ({(170, 23, 170, 28): 'dbins'}, {}), '(dbins)', True, 'import numpy as np\n'), ((171, 11, 171, 33), 'numpy.zeros_like', 'np.zeros_like', ({(171, 25, 171, 32): 'magbins'}, {}), '(magbins)', True, 'import numpy as np\n'), ((172, 9, 172, 29), 'numpy.zeros_like', 'np.zeros_like', ({(172, 23, 172, 28): 'rbins'}, {}), '(rbins)', True, 'import numpy as np\n'), ((174, 10, 174, 30), 'numpy.zeros_like', 'np.zeros_like', ({(174, 24, 174, 29): 'mbins'}, {}), '(mbins)', True, 'import numpy as np\n'), ((175, 9, 175, 29), 'numpy.zeros_like', 'np.zeros_like', ({(175, 23, 175, 28): 'qbins'}, {}), '(qbins)', True, 'import numpy as np\n'), ((176, 9, 176, 29), 'numpy.zeros_like', 'np.zeros_like', ({(176, 23, 176, 28): 'ebins'}, {}), '(ebins)', True, 'import numpy as np\n'), ((177, 10, 177, 31), 'numpy.zeros_like', 'np.zeros_like', ({(177, 24, 177, 30): 'lpbins'}, {}), '(lpbins)', True, 'import numpy as np\n'), ((178, 9, 178, 29), 'numpy.zeros_like', 'np.zeros_like', ({(178, 23, 178, 28): 'dbins'}, {}), '(dbins)', True, 'import numpy as np\n'), ((179, 11, 179, 33), 'numpy.zeros_like', 'np.zeros_like', ({(179, 25, 179, 32): 'magbins'}, {}), '(magbins)', True, 'import numpy as np\n'), ((180, 9, 180, 29), 'numpy.zeros_like', 'np.zeros_like', ({(180, 23, 180, 28): 'rbins'}, {}), '(rbins)', True, 'import numpy as np\n'), ((490, 55, 490, 68), 'numpy.sum', 'np.sum', ({(490, 62, 490, 67): 'fileN'}, {}), '(fileN)', True, 'import numpy as np\n'), ((491, 62, 491, 78), 'numpy.sum', 'np.sum', ({(491, 69, 491, 77): 'fileObsN'}, {}), '(fileObsN)', True, 'import numpy as np\n'), ((492, 65, 492, 81), 'numpy.sum', 'np.sum', ({(492, 72, 492, 80): 'fileRecN'}, {}), '(fileRecN)', True, 'import numpy as np\n'), ((495, 37, 495, 49), 'numpy.sum', 'np.sum', ({(495, 44, 495, 48): 'rawN'}, {}), '(rawN)', True, 'import numpy as np\n'), ((496, 38, 496, 50), 'numpy.sum', 'np.sum', ({(496, 45, 496, 49): 'obsN'}, {}), '(obsN)', True, 'import numpy as np\n'), ((497, 37, 497, 49), 'numpy.sum', 'np.sum', ({(497, 44, 497, 48): 'recN'}, {}), '(recN)', True, 'import numpy as np\n'), ((500, 62, 500, 78), 'numpy.sum', 'np.sum', ({(500, 69, 500, 77): 'allNPrsa'}, {}), '(allNPrsa)', True, 'import numpy as np\n'), ((501, 73, 501, 89), 'numpy.sum', 'np.sum', ({(501, 80, 501, 88): 'obsNPrsa'}, {}), '(obsNPrsa)', True, 'import numpy as np\n'), ((502, 72, 502, 88), 'numpy.sum', 'np.sum', ({(502, 79, 502, 87): 'recNPrsa'}, {}), '(recNPrsa)', True, 'import numpy as np\n'), ((71, 29, 71, 44), 'numpy.sum', 'np.sum', ({(71, 36, 71, 43): 'histAll'}, {}), '(histAll)', True, 'import numpy as np\n'), ((72, 29, 72, 44), 'numpy.sum', 'np.sum', ({(72, 36, 72, 43): 'histObs'}, {}), '(histObs)', True, 'import numpy as np\n'), ((77, 32, 77, 50), 'numpy.sum', 'np.sum', ({(77, 39, 77, 49): 'histRec[f]'}, {}), '(histRec[f])', True, 'import numpy as np\n'), ((190, 14, 190, 34), 'numpy.zeros_like', 'np.zeros_like', ({(190, 28, 190, 33): 'mbins'}, {}), '(mbins)', True, 'import numpy as np\n'), ((191, 13, 191, 33), 'numpy.zeros_like', 'np.zeros_like', ({(191, 27, 191, 32): 'qbins'}, {}), '(qbins)', True, 'import numpy as np\n'), ((192, 13, 192, 33), 'numpy.zeros_like', 'np.zeros_like', ({(192, 27, 192, 32): 'ebins'}, {}), '(ebins)', True, 'import numpy as np\n'), ((193, 14, 193, 35), 'numpy.zeros_like', 'np.zeros_like', ({(193, 28, 193, 34): 'lpbins'}, {}), '(lpbins)', True, 'import numpy as np\n'), ((194, 13, 194, 33), 'numpy.zeros_like', 'np.zeros_like', ({(194, 27, 194, 32): 'dbins'}, {}), '(dbins)', True, 'import numpy as np\n'), ((195, 15, 195, 37), 'numpy.zeros_like', 'np.zeros_like', ({(195, 29, 195, 36): 'magbins'}, {}), '(magbins)', True, 'import numpy as np\n'), ((196, 13, 196, 33), 'numpy.zeros_like', 'np.zeros_like', ({(196, 27, 196, 32): 'rbins'}, {}), '(rbins)', True, 'import numpy as np\n'), ((236, 12, 236, 37), 'pandas.read_csv', 'pd.read_csv', (), '', True, 'import pandas as pd\n'), ((490, 79, 490, 92), 'numpy.sum', 'np.sum', ({(490, 86, 490, 91): 'fileN'}, {}), '(fileN)', True, 'import numpy as np\n'), ((491, 89, 491, 105), 'numpy.sum', 'np.sum', ({(491, 96, 491, 104): 'fileObsN'}, {}), '(fileObsN)', True, 'import numpy as np\n'), ((492, 92, 492, 108), 'numpy.sum', 'np.sum', ({(492, 99, 492, 107): 'fileRecN'}, {}), '(fileRecN)', True, 'import numpy as np\n'), ((495, 60, 495, 72), 'numpy.sum', 'np.sum', ({(495, 67, 495, 71): 'rawN'}, {}), '(rawN)', True, 'import numpy as np\n'), ((496, 61, 496, 73), 'numpy.sum', 'np.sum', ({(496, 68, 496, 72): 'obsN'}, {}), '(obsN)', True, 'import numpy as np\n'), ((497, 60, 497, 72), 'numpy.sum', 'np.sum', ({(497, 67, 497, 71): 'recN'}, {}), '(recN)', True, 'import numpy as np\n'), ((500, 89, 500, 105), 'numpy.sum', 'np.sum', ({(500, 96, 500, 104): 'allNPrsa'}, {}), '(allNPrsa)', True, 'import numpy as np\n'), ((501, 100, 501, 116), 'numpy.sum', 'np.sum', ({(501, 107, 501, 115): 'obsNPrsa'}, {}), '(obsNPrsa)', True, 'import numpy as np\n'), ((502, 99, 502, 115), 'numpy.sum', 'np.sum', ({(502, 106, 502, 114): 'recNPrsa'}, {}), '(recNPrsa)', True, 'import numpy as np\n'), ((271, 19, 271, 55), 'numpy.histogram', 'np.histogram', (), '', True, 'import numpy as np\n'), ((272, 17, 272, 64), 'numpy.histogram', 'np.histogram', (), '', True, 'import numpy as np\n'), ((273, 17, 273, 52), 'numpy.histogram', 'np.histogram', (), '', True, 'import numpy as np\n'), ((275, 17, 275, 52), 'numpy.histogram', 'np.histogram', (), '', True, 'import numpy as np\n'), ((276, 21, 276, 69), 'numpy.histogram', 'np.histogram', (), '', True, 'import numpy as np\n'), ((277, 17, 277, 64), 'numpy.histogram', 'np.histogram', (), '', True, 'import numpy as np\n'), ((289, 10, 289, 22), 'numpy.diff', 'np.diff', ({(289, 18, 289, 21): 'm1b'}, {}), '(m1b)', True, 'import numpy as np\n'), ((463, 95, 463, 112), 'numpy.array', 'np.array', ({(463, 104, 463, 111): 'recFrac'}, {}), '(recFrac)', True, 'import numpy as np\n'), ((475, 104, 475, 118), 'numpy.array', 'np.array', ({(475, 113, 475, 117): 'recN'}, {}), '(recN)', True, 'import numpy as np\n'), ((493, 47, 493, 63), 'numpy.sum', 'np.sum', ({(493, 54, 493, 62): 'fileRecN'}, {}), '(fileRecN)', True, 'import numpy as np\n'), ((493, 64, 493, 80), 'numpy.sum', 'np.sum', ({(493, 71, 493, 79): 'fileObsN'}, {}), '(fileObsN)', True, 'import numpy as np\n'), ((498, 35, 498, 47), 'numpy.sum', 'np.sum', ({(498, 42, 498, 46): 'recN'}, {}), '(recN)', True, 'import numpy as np\n'), ((498, 48, 498, 60), 'numpy.sum', 'np.sum', ({(498, 55, 498, 59): 'obsN'}, {}), '(obsN)', True, 'import numpy as np\n'), ((503, 47, 503, 63), 'numpy.sum', 'np.sum', ({(503, 54, 503, 62): 'recNPrsa'}, {}), '(recNPrsa)', True, 'import numpy as np\n'), ((503, 64, 503, 80), 'numpy.sum', 'np.sum', ({(503, 71, 503, 79): 'obsNPrsa'}, {}), '(obsNPrsa)', True, 'import numpy as np\n'), ((37, 25, 37, 36), 'numpy.sqrt', 'np.sqrt', ({(37, 33, 37, 35): '(2.0)'}, {}), '(2.0)', True, 'import numpy as np\n'), ((247, 10, 247, 38), 'pandas.read_csv', 'pd.read_csv', (), '', True, 'import pandas as pd\n'), ((294, 20, 294, 33), 'numpy.log10', 'np.log10', ({(294, 29, 294, 32): 'Phs'}, {}), '(Phs)', True, 'import numpy as np\n'), ((318, 20, 318, 55), 'numpy.histogram', 'np.histogram', (), '', True, 'import numpy as np\n'), ((319, 18, 319, 63), 'numpy.histogram', 'np.histogram', (), '', True, 'import numpy as np\n'), ((320, 18, 320, 52), 'numpy.histogram', 'np.histogram', (), '', True, 'import numpy as np\n'), ((322, 18, 322, 52), 'numpy.histogram', 'np.histogram', (), '', True, 'import numpy as np\n'), ((323, 22, 323, 69), 'numpy.histogram', 'np.histogram', (), '', True, 'import numpy as np\n'), ((324, 18, 324, 63), 'numpy.histogram', 'np.histogram', (), '', True, 'import numpy as np\n'), ((334, 19, 334, 33), 'pandas.DataFrame', 'pd.DataFrame', ({}, {}), '()', True, 'import pandas as pd\n'), ((335, 23, 335, 37), 'pandas.DataFrame', 'pd.DataFrame', ({}, {}), '()', True, 'import pandas as pd\n'), ((463, 18, 463, 34), 'numpy.array', 'np.array', ({(463, 27, 463, 33): 'RAwrap'}, {}), '(RAwrap)', True, 'import numpy as np\n'), ((463, 55, 463, 72), 'numpy.array', 'np.array', ({(463, 64, 463, 71): 'Decwrap'}, {}), '(Decwrap)', True, 'import numpy as np\n'), ((475, 18, 475, 34), 'numpy.array', 'np.array', ({(475, 27, 475, 33): 'RAwrap'}, {}), '(RAwrap)', True, 'import numpy as np\n'), ((475, 55, 475, 72), 'numpy.array', 'np.array', ({(475, 64, 475, 71): 'Decwrap'}, {}), '(Decwrap)', True, 'import numpy as np\n'), ((274, 32, 274, 61), 'numpy.ma.log10', 'np.ma.log10', ({(274, 44, 274, 60): "data['p'].values"}, {}), "(data['p'].values)", True, 'import numpy as np\n'), ((280, 36, 280, 51), 'numpy.sum', 'np.sum', ({(280, 43, 280, 50): 'm1hAll0'}, {}), '(m1hAll0)', True, 'import numpy as np\n'), ((281, 34, 281, 48), 'numpy.sum', 'np.sum', ({(281, 41, 281, 47): 'qhAll0'}, {}), '(qhAll0)', True, 'import numpy as np\n'), ((282, 33, 282, 47), 'numpy.sum', 'np.sum', ({(282, 40, 282, 46): 'ehAll0'}, {}), '(ehAll0)', True, 'import numpy as np\n'), ((283, 36, 283, 51), 'numpy.sum', 'np.sum', ({(283, 43, 283, 50): 'lphAll0'}, {}), '(lphAll0)', True, 'import numpy as np\n'), ((284, 34, 284, 48), 'numpy.sum', 'np.sum', ({(284, 41, 284, 47): 'dhAll0'}, {}), '(dhAll0)', True, 'import numpy as np\n'), ((285, 37, 285, 53), 'numpy.sum', 'np.sum', ({(285, 44, 285, 52): 'maghAll0'}, {}), '(maghAll0)', True, 'import numpy as np\n'), ((286, 33, 286, 47), 'numpy.sum', 'np.sum', ({(286, 40, 286, 46): 'rhAll0'}, {}), '(rhAll0)', True, 'import numpy as np\n'), ((361, 22, 361, 57), 'numpy.histogram', 'np.histogram', (), '', True, 'import numpy as np\n'), ((362, 20, 362, 65), 'numpy.histogram', 'np.histogram', (), '', True, 'import numpy as np\n'), ((363, 20, 363, 54), 'numpy.histogram', 'np.histogram', (), '', True, 'import numpy as np\n'), ((365, 20, 365, 54), 'numpy.histogram', 'np.histogram', (), '', True, 'import numpy as np\n'), ((366, 24, 366, 71), 'numpy.histogram', 'np.histogram', (), '', True, 'import numpy as np\n'), ((367, 20, 367, 65), 'numpy.histogram', 'np.histogram', (), '', True, 'import numpy as np\n'), ((321, 33, 321, 61), 'numpy.ma.log10', 'np.ma.log10', ({(321, 45, 321, 60): "obs['p'].values"}, {}), "(obs['p'].values)", True, 'import numpy as np\n'), ((364, 35, 364, 63), 'numpy.ma.log10', 'np.ma.log10', ({(364, 47, 364, 62): "rec['p'].values"}, {}), "(rec['p'].values)", True, 'import numpy as np\n')] |
ckamtsikis/cmssw | CondTools/BeamSpot/test/BeamSpotRcdPrinter_cfg.py | ea19fe642bb7537cbf58451dcf73aa5fd1b66250 | import FWCore.ParameterSet.Config as cms
import os
process = cms.Process("summary")
process.MessageLogger = cms.Service( "MessageLogger",
debugModules = cms.untracked.vstring( "*" ),
cout = cms.untracked.PSet( threshold = cms.untracked.string( "DEBUG" ) ),
destinations = cms.untracked.vstring( "cout" )
)
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(1)
)
process.source = cms.Source("EmptySource",
numberEventsInRun = cms.untracked.uint32(1),
firstRun = cms.untracked.uint32(1)
)
process.load("CondCore.CondDB.CondDB_cfi")
process.load("CondTools.BeamSpot.BeamSpotRcdPrinter_cfi")
### 2018 Prompt
process.BeamSpotRcdPrinter.tagName = "BeamSpotObjects_PCL_byLumi_v0_prompt"
process.BeamSpotRcdPrinter.startIOV = 1350646955507767
process.BeamSpotRcdPrinter.endIOV = 1406876667347162
process.BeamSpotRcdPrinter.output = "summary2018_Prompt.txt"
### 2017 ReReco
#process.BeamSpotRcdPrinter.tagName = "BeamSpotObjects_LumiBased_v4_offline"
#process.BeamSpotRcdPrinter.startIOV = 1275820035276801
#process.BeamSpotRcdPrinter.endIOV = 1316235677532161
### 2018 ABC ReReco
#process.BeamSpotRcdPrinter.tagName = "BeamSpotObjects_LumiBased_v4_offline"
#process.BeamSpotRcdPrinter.startIOV = 1354018504835073
#process.BeamSpotRcdPrinter.endIOV = 1374668707594734
### 2018D Prompt
#process.BeamSpotRcdPrinter.tagName = "BeamSpotObjects_PCL_byLumi_v0_prompt"
#process.BeamSpotRcdPrinter.startIOV = 1377280047710242
#process.BeamSpotRcdPrinter.endIOV = 1406876667347162
process.p = cms.Path(process.BeamSpotRcdPrinter)
| [((4, 10, 4, 32), 'FWCore.ParameterSet.Config.Process', 'cms.Process', ({(4, 22, 4, 31): '"""summary"""'}, {}), "('summary')", True, 'import FWCore.ParameterSet.Config as cms\n'), ((44, 12, 44, 48), 'FWCore.ParameterSet.Config.Path', 'cms.Path', ({(44, 21, 44, 47): 'process.BeamSpotRcdPrinter'}, {}), '(process.BeamSpotRcdPrinter)', True, 'import FWCore.ParameterSet.Config as cms\n'), ((7, 52, 7, 80), 'FWCore.ParameterSet.Config.untracked.vstring', 'cms.untracked.vstring', ({(7, 75, 7, 78): '"""*"""'}, {}), "('*')", True, 'import FWCore.ParameterSet.Config as cms\n'), ((9, 52, 9, 83), 'FWCore.ParameterSet.Config.untracked.vstring', 'cms.untracked.vstring', ({(9, 75, 9, 81): '"""cout"""'}, {}), "('cout')", True, 'import FWCore.ParameterSet.Config as cms\n'), ((13, 12, 13, 34), 'FWCore.ParameterSet.Config.untracked.int32', 'cms.untracked.int32', ({(13, 32, 13, 33): '1'}, {}), '(1)', True, 'import FWCore.ParameterSet.Config as cms\n'), ((16, 24, 16, 47), 'FWCore.ParameterSet.Config.untracked.uint32', 'cms.untracked.uint32', ({(16, 45, 16, 46): '1'}, {}), '(1)', True, 'import FWCore.ParameterSet.Config as cms\n'), ((17, 15, 17, 38), 'FWCore.ParameterSet.Config.untracked.uint32', 'cms.untracked.uint32', ({(17, 36, 17, 37): '1'}, {}), '(1)', True, 'import FWCore.ParameterSet.Config as cms\n'), ((8, 76, 8, 107), 'FWCore.ParameterSet.Config.untracked.string', 'cms.untracked.string', ({(8, 98, 8, 105): '"""DEBUG"""'}, {}), "('DEBUG')", True, 'import FWCore.ParameterSet.Config as cms\n')] |
NAVANEETHA-BS/Django-Reactjs-Redux-Register-login-logout-Homepage--Project | django/authentication/api/urls.py | f29ed189b988a2d46d76b3c58cf77d1ed58ca64d | from django.urls import path
from rest_framework_simplejwt.views import (
TokenObtainPairView,
TokenRefreshView,
TokenVerifyView
)
urlpatterns = [
path('obtain/', TokenObtainPairView.as_view(), name='token_obtain_pair'),
path('refresh/', TokenRefreshView.as_view(), name='token_refresh'),
path('verify/', TokenVerifyView.as_view(), name='token_verify'),
]
| [((9, 20, 9, 49), 'rest_framework_simplejwt.views.TokenObtainPairView.as_view', 'TokenObtainPairView.as_view', ({}, {}), '()', False, 'from rest_framework_simplejwt.views import TokenObtainPairView, TokenRefreshView, TokenVerifyView\n'), ((10, 21, 10, 47), 'rest_framework_simplejwt.views.TokenRefreshView.as_view', 'TokenRefreshView.as_view', ({}, {}), '()', False, 'from rest_framework_simplejwt.views import TokenObtainPairView, TokenRefreshView, TokenVerifyView\n'), ((11, 20, 11, 45), 'rest_framework_simplejwt.views.TokenVerifyView.as_view', 'TokenVerifyView.as_view', ({}, {}), '()', False, 'from rest_framework_simplejwt.views import TokenObtainPairView, TokenRefreshView, TokenVerifyView\n')] |
nsetzer/YueMusicPlayer | yue/core/explorer/ftpsource.py | feaf6fe5c046b1a7f6b7774d4e86a2fbb1e431cf |
from ftplib import FTP,error_perm, all_errors
import posixpath
from io import BytesIO,SEEK_SET
from .source import DataSource
import sys
import re
reftp = re.compile('(ssh|ftp)\:\/\/(([^@:]+)?:?([^@]+)?@)?([^:]+)(:[0-9]+)?\/(.*)')
def parseFTPurl( url ):
m = reftp.match( url )
if m:
g = m.groups()
result = {
"mode" : g[0],
"username" : g[2] or "",
"password" : g[3] or "",
"hostname" : g[4] or "",
"port" : int(g[5][1:]) if g[5] else 0,
"path" : g[6] or "/",
}
if result['port'] == 0:
if result['mode'] == ssh:
result['port'] = 22
else:
result['port'] = 21 # ftp port default
return result
raise ValueError("invalid: %s"%url)
def utf8_fix(s):
return ''.join([ a if ord(a)<128 else "%02X"%ord(a) for a in s])
class FTPWriter(object):
"""docstring for FTPWriter"""
def __init__(self, ftp, path):
super(FTPWriter, self).__init__()
self.ftp = ftp
self.path = path
self.file = BytesIO()
def write(self,data):
return self.file.write(data)
def seek(self,pos,whence=SEEK_SET):
return self.file.seek(pos,whence)
def tell(self):
return self.file.tell()
def close(self):
self.file.seek(0)
text = "STOR " + utf8_fix(self.path)
self.ftp.storbinary(text, self.file)
def __enter__(self):
return self
def __exit__(self,typ,val,tb):
if typ is None:
self.close()
class FTPReader(object):
"""docstring for FTPWriter"""
def __init__(self, ftp, path):
super(FTPReader, self).__init__()
self.ftp = ftp
self.path = path
self.file = BytesIO()
# open the file
text = "RETR " + utf8_fix(self.path)
self.ftp.retrbinary(text, self.file.write)
self.file.seek(0)
def read(self,n=None):
return self.file.read(n)
def seek(self,pos,whence=SEEK_SET):
return self.file.seek(pos,whence)
def tell(self):
return self.file.tell()
def close(self):
self.file.close()
def __enter__(self):
return self
def __exit__(self,typ,val,tb):
if typ is None:
self.close()
class FTPSource(DataSource):
"""
there is some sort of problem with utf-8/latin-1 and ftplib
storbinary must accepts a STRING, since it builds a cmd and add
the CRLF to the input argument using the plus operator.
the command fails when given unicode text (ord > 127) and also
fails whenm given a byte string.
"""
# TODO: turn this into a directory generator
# which first loads the directory, then loops over
# loaded items.
# TODO: on windows we need a way to view available
# drive letters
def __init__(self, host, port, username="", password=""):
super(FTPSource, self).__init__()
self.ftp = FTP()
self.ftp.connect(host,port)
self.ftp.login(username,password)
self.hostname = "%s:%d"%(host,port)
def root(self):
return "/"
def close(self):
try:
self.ftp.quit()
except all_errors as e:
sys.stderr.write("Error Closing FTP connection\n")
sys.stderr.write("%s\n"%e)
super().close()
def fix(self, path):
return utf8_fix(path)
def join(self,*args):
return posixpath.join(*args)
def breakpath(self,path):
return [ x for x in path.replace("/","\\").split("\\") if x ]
def relpath(self,path,base):
return posixpath.relpath(path,base)
def normpath(self,path,root=None):
if root and not path.startswith("/"):
path = posixpath.join(root,path)
return posixpath.normpath( path )
def listdir(self,path):
return self.ftp.nlst(path)
def parent(self,path):
# TODO: if path is C:\\ return empty string ?
# empty string returns drives
p,_ = posixpath.split(path)
return p
def move(self,oldpath,newpath):
self.ftp.rename(oldpath,newpath)
def delete(self,path):
# todo support removing directory rmdir()
path = utf8_fix(path)
if self.exists( path ):
if self.isdir(path):
try:
self.ftp.rmd(path)
except Exception as e:
print("ftp delete error: %s"%e)
else:
try:
self.ftp.delete(path)
except Exception as e:
print("ftp delete error: %s"%e)
def open(self,path,mode):
if mode=="wb":
return FTPWriter(self.ftp,path)
elif mode=="rb":
return FTPReader(self.ftp,path)
raise NotImplementedError(mode)
def exists(self,path):
path = utf8_fix(path)
p,n=posixpath.split(path)
lst = set(self.listdir(p))
return n in lst
def isdir(self,path):
path = utf8_fix(path)
try:
return self.ftp.size(path) is None
except error_perm:
# TODO: to think about more later,
# under my use-case, I'm only asking if a path is a directory
# if I Already think it exists. Under the current FTP impl
# ftp.size() fails for various reasons unless the file exists
# and is an accessable file. I can infer that a failure to
# determine the size means that the path is a directory,
# but this does not hold true under other use cases.
# I can't cache listdir calls, but if I could, then I could
# use that to determine if the file exists
return True#self.exists( path )
def mkdir(self,path):
# this is a really ugly quick and dirty solution
path = utf8_fix(path)
if not self.exists(path):
p = self.parent( path )
try:
if not self.exists(p):
self.ftp.mkd( p )
self.ftp.mkd(path)
except Exception as e:
print("ftp mkd error: %s"%e)
def split(self,path):
return posixpath.split(path)
def splitext(self,path):
return posixpath.splitext(path)
def stat(self,path):
try:
size = self.ftp.size(path)
except error_perm:
size = None
result = {
"isDir" : size is None,
"isLink": False,
"mtime" : 0,
"ctime" : 0,
"size" : size or 0,
"name" : self.split(path)[1],
"mode" : 0
}
return result
def stat_fast(self,path):
# not fast for thus file system :(
try:
size = self.ftp.size(path)
except error_perm:
size = None
result = {
"name" : self.split(path)[1],
"size" : size or 0,
"isDir" : size is None,
"isLink" : False,
}
return result
def chmod(self,path,mode):
print("chmod not implemented")
def getExportPath(self,path):
return self.hostname+path
| [((10, 8, 10, 83), 're.compile', 're.compile', ({(10, 19, 10, 82): '"""(ssh|ftp)\\\\:\\\\/\\\\/(([^@:]+)?:?([^@]+)?@)?([^:]+)(:[0-9]+)?\\\\/(.*)"""'}, {}), "('(ssh|ftp)\\\\:\\\\/\\\\/(([^@:]+)?:?([^@]+)?@)?([^:]+)(:[0-9]+)?\\\\/(.*)')", False, 'import re\n'), ((42, 20, 42, 29), 'io.BytesIO', 'BytesIO', ({}, {}), '()', False, 'from io import BytesIO, SEEK_SET\n'), ((71, 20, 71, 29), 'io.BytesIO', 'BytesIO', ({}, {}), '()', False, 'from io import BytesIO, SEEK_SET\n'), ((117, 19, 117, 24), 'ftplib.FTP', 'FTP', ({}, {}), '()', False, 'from ftplib import FTP, error_perm, all_errors\n'), ((140, 15, 140, 36), 'posixpath.join', 'posixpath.join', ({(140, 30, 140, 35): '*args'}, {}), '(*args)', False, 'import posixpath\n'), ((146, 15, 146, 43), 'posixpath.relpath', 'posixpath.relpath', ({(146, 33, 146, 37): 'path', (146, 38, 146, 42): 'base'}, {}), '(path, base)', False, 'import posixpath\n'), ((151, 15, 151, 41), 'posixpath.normpath', 'posixpath.normpath', ({(151, 35, 151, 39): 'path'}, {}), '(path)', False, 'import posixpath\n'), ((159, 14, 159, 35), 'posixpath.split', 'posixpath.split', ({(159, 30, 159, 34): 'path'}, {}), '(path)', False, 'import posixpath\n'), ((189, 12, 189, 33), 'posixpath.split', 'posixpath.split', ({(189, 28, 189, 32): 'path'}, {}), '(path)', False, 'import posixpath\n'), ((223, 15, 223, 36), 'posixpath.split', 'posixpath.split', ({(223, 31, 223, 35): 'path'}, {}), '(path)', False, 'import posixpath\n'), ((226, 15, 226, 39), 'posixpath.splitext', 'posixpath.splitext', ({(226, 34, 226, 38): 'path'}, {}), '(path)', False, 'import posixpath\n'), ((150, 19, 150, 44), 'posixpath.join', 'posixpath.join', ({(150, 34, 150, 38): 'root', (150, 39, 150, 43): 'path'}, {}), '(root, path)', False, 'import posixpath\n'), ((131, 12, 131, 62), 'sys.stderr.write', 'sys.stderr.write', ({(131, 29, 131, 61): '"""Error Closing FTP connection\n"""'}, {}), "('Error Closing FTP connection\\n')", False, 'import sys\n'), ((132, 12, 132, 38), 'sys.stderr.write', 'sys.stderr.write', ({(132, 29, 132, 37): "('%s\\n' % e)"}, {}), "('%s\\n' % e)", False, 'import sys\n')] |
roshanmaskey/plaso | tests/engine/knowledge_base.py | 637856f578eb4bc81f62b97d7f483f69314e7f47 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Tests for the knowledge base."""
import unittest
from plaso.containers import artifacts
from plaso.engine import knowledge_base
from tests import test_lib as shared_test_lib
class KnowledgeBaseTest(shared_test_lib.BaseTestCase):
"""Tests for the knowledge base."""
# pylint: disable=protected-access
_MACOS_PATHS = [
'/Users/dude/Library/Application Data/Google/Chrome/Default/Extensions',
('/Users/dude/Library/Application Data/Google/Chrome/Default/Extensions/'
'apdfllckaahabafndbhieahigkjlhalf'),
'/private/var/log/system.log',
'/Users/frank/Library/Application Data/Google/Chrome/Default',
'/Users/hans/Library/Application Data/Google/Chrome/Default',
('/Users/frank/Library/Application Data/Google/Chrome/Default/'
'Extensions/pjkljhegncpnkpknbcohdijeoejaedia'),
'/Users/frank/Library/Application Data/Google/Chrome/Default/Extensions']
_MACOS_USERS = [
{'name': 'root', 'path': '/var/root', 'sid': '0'},
{'name': 'frank', 'path': '/Users/frank', 'sid': '4052'},
{'name': 'hans', 'path': '/Users/hans', 'sid': '4352'},
{'name': 'dude', 'path': '/Users/dude', 'sid': '1123'}]
_WINDOWS_PATHS = [
'C:\\Users\\Dude\\SomeFolder\\Chrome\\Default\\Extensions',
('C:\\Users\\Dude\\SomeNoneStandardFolder\\Chrome\\Default\\Extensions\\'
'hmjkmjkepdijhoojdojkdfohbdgmmhki'),
('C:\\Users\\frank\\AppData\\Local\\Google\\Chrome\\Extensions\\'
'blpcfgokakmgnkcojhhkbfbldkacnbeo'),
'C:\\Users\\frank\\AppData\\Local\\Google\\Chrome\\Extensions',
('C:\\Users\\frank\\AppData\\Local\\Google\\Chrome\\Extensions\\'
'icppfcnhkcmnfdhfhphakoifcfokfdhg'),
'C:\\Windows\\System32',
'C:\\Stuff/with path separator\\Folder']
_WINDOWS_USERS = [
{'name': 'dude', 'path': 'C:\\Users\\dude', 'sid': 'S-1'},
{'name': 'frank', 'path': 'C:\\Users\\frank', 'sid': 'S-2'}]
def _SetUserAccounts(self, knowledge_base_object, users):
"""Sets the user accounts in the knowledge base.
Args:
knowledge_base_object (KnowledgeBase): knowledge base.
users (list[dict[str,str])): users.
"""
for user in users:
identifier = user.get('sid', user.get('uid', None))
if not identifier:
continue
user_account = artifacts.UserAccountArtifact(
identifier=identifier, user_directory=user.get('path', None),
username=user.get('name', None))
knowledge_base_object.AddUserAccount(user_account)
def testCodepageProperty(self):
"""Tests the codepage property."""
knowledge_base_object = knowledge_base.KnowledgeBase()
self.assertEqual(knowledge_base_object.codepage, 'cp1252')
def testHostnameProperty(self):
"""Tests the hostname property."""
knowledge_base_object = knowledge_base.KnowledgeBase()
self.assertEqual(knowledge_base_object.hostname, '')
def testOperatingSystemProperty(self):
"""Tests the operating_system property."""
knowledge_base_object = knowledge_base.KnowledgeBase()
operating_system = knowledge_base_object.GetValue('operating_system')
self.assertIsNone(operating_system)
knowledge_base_object.SetValue('operating_system', 'Windows')
operating_system = knowledge_base_object.GetValue('operating_system')
self.assertEqual(operating_system, 'Windows')
def testTimezoneProperty(self):
"""Tests the timezone property."""
knowledge_base_object = knowledge_base.KnowledgeBase()
self.assertEqual(knowledge_base_object.timezone.zone, 'UTC')
def testUserAccountsProperty(self):
"""Tests the user accounts property."""
knowledge_base_object = knowledge_base.KnowledgeBase()
self.assertEqual(len(knowledge_base_object.user_accounts), 0)
user_account = artifacts.UserAccountArtifact(
identifier='1000', user_directory='/home/testuser',
username='testuser')
knowledge_base_object.AddUserAccount(user_account)
self.assertEqual(len(knowledge_base_object.user_accounts), 1)
def testYearProperty(self):
"""Tests the year property."""
knowledge_base_object = knowledge_base.KnowledgeBase()
self.assertEqual(knowledge_base_object.year, 0)
def testAddUserAccount(self):
"""Tests the AddUserAccount function."""
knowledge_base_object = knowledge_base.KnowledgeBase()
user_account = artifacts.UserAccountArtifact(
identifier='1000', user_directory='/home/testuser',
username='testuser')
knowledge_base_object.AddUserAccount(user_account)
with self.assertRaises(KeyError):
knowledge_base_object.AddUserAccount(user_account)
def testAddEnvironmentVariable(self):
"""Tests the AddEnvironmentVariable function."""
knowledge_base_object = knowledge_base.KnowledgeBase()
environment_variable = artifacts.EnvironmentVariableArtifact(
case_sensitive=False, name='SystemRoot', value='C:\\Windows')
knowledge_base_object.AddEnvironmentVariable(environment_variable)
with self.assertRaises(KeyError):
knowledge_base_object.AddEnvironmentVariable(environment_variable)
def testGetEnvironmentVariable(self):
"""Tests the GetEnvironmentVariable functions."""
knowledge_base_object = knowledge_base.KnowledgeBase()
environment_variable = artifacts.EnvironmentVariableArtifact(
case_sensitive=False, name='SystemRoot', value='C:\\Windows')
knowledge_base_object.AddEnvironmentVariable(environment_variable)
test_environment_variable = knowledge_base_object.GetEnvironmentVariable(
'SystemRoot')
self.assertIsNotNone(test_environment_variable)
test_environment_variable = knowledge_base_object.GetEnvironmentVariable(
'sYsTeMrOoT')
self.assertIsNotNone(test_environment_variable)
test_environment_variable = knowledge_base_object.GetEnvironmentVariable(
'Bogus')
self.assertIsNone(test_environment_variable)
def testGetEnvironmentVariables(self):
"""Tests the GetEnvironmentVariables function."""
knowledge_base_object = knowledge_base.KnowledgeBase()
environment_variable = artifacts.EnvironmentVariableArtifact(
case_sensitive=False, name='SystemRoot', value='C:\\Windows')
knowledge_base_object.AddEnvironmentVariable(environment_variable)
environment_variable = artifacts.EnvironmentVariableArtifact(
case_sensitive=False, name='WinDir', value='C:\\Windows')
knowledge_base_object.AddEnvironmentVariable(environment_variable)
environment_variables = knowledge_base_object.GetEnvironmentVariables()
self.assertEqual(len(environment_variables), 2)
def testGetHostname(self):
"""Tests the GetHostname function."""
knowledge_base_object = knowledge_base.KnowledgeBase()
hostname = knowledge_base_object.GetHostname()
self.assertEqual(hostname, '')
# TODO: add tests for GetMountPoint.
def testGetSourceConfigurationArtifacts(self):
"""Tests the GetSourceConfigurationArtifacts function."""
knowledge_base_object = knowledge_base.KnowledgeBase()
hostname_artifact = artifacts.HostnameArtifact(name='myhost.mydomain')
knowledge_base_object.SetHostname(hostname_artifact)
user_account = artifacts.UserAccountArtifact(
identifier='1000', user_directory='/home/testuser',
username='testuser')
knowledge_base_object.AddUserAccount(user_account)
source_configurations = (
knowledge_base_object.GetSourceConfigurationArtifacts())
self.assertEqual(len(source_configurations), 1)
self.assertIsNotNone(source_configurations[0])
system_configuration = source_configurations[0].system_configuration
self.assertIsNotNone(system_configuration)
self.assertIsNotNone(system_configuration.hostname)
self.assertEqual(system_configuration.hostname.name, 'myhost.mydomain')
def testGetSystemConfigurationArtifact(self):
"""Tests the _GetSystemConfigurationArtifact function."""
knowledge_base_object = knowledge_base.KnowledgeBase()
hostname_artifact = artifacts.HostnameArtifact(name='myhost.mydomain')
knowledge_base_object.SetHostname(hostname_artifact)
user_account = artifacts.UserAccountArtifact(
identifier='1000', user_directory='/home/testuser',
username='testuser')
knowledge_base_object.AddUserAccount(user_account)
system_configuration = (
knowledge_base_object._GetSystemConfigurationArtifact())
self.assertIsNotNone(system_configuration)
self.assertIsNotNone(system_configuration.hostname)
self.assertEqual(system_configuration.hostname.name, 'myhost.mydomain')
# TODO: add tests for GetTextPrepend.
def testGetUsernameByIdentifier(self):
"""Tests the GetUsernameByIdentifier function."""
knowledge_base_object = knowledge_base.KnowledgeBase()
user_account = artifacts.UserAccountArtifact(
identifier='1000', user_directory='/home/testuser',
username='testuser')
knowledge_base_object.AddUserAccount(user_account)
usename = knowledge_base_object.GetUsernameByIdentifier('1000')
self.assertEqual(usename, 'testuser')
usename = knowledge_base_object.GetUsernameByIdentifier(1000)
self.assertEqual(usename, '')
usename = knowledge_base_object.GetUsernameByIdentifier('1001')
self.assertEqual(usename, '')
def testGetUsernameForPath(self):
"""Tests the GetUsernameForPath function."""
knowledge_base_object = knowledge_base.KnowledgeBase()
self._SetUserAccounts(knowledge_base_object, self._MACOS_USERS)
username = knowledge_base_object.GetUsernameForPath(
self._MACOS_PATHS[0])
self.assertEqual(username, 'dude')
username = knowledge_base_object.GetUsernameForPath(
self._MACOS_PATHS[4])
self.assertEqual(username, 'hans')
username = knowledge_base_object.GetUsernameForPath(
self._WINDOWS_PATHS[0])
self.assertIsNone(username)
knowledge_base_object = knowledge_base.KnowledgeBase()
self._SetUserAccounts(knowledge_base_object, self._WINDOWS_USERS)
username = knowledge_base_object.GetUsernameForPath(
self._WINDOWS_PATHS[0])
self.assertEqual(username, 'dude')
username = knowledge_base_object.GetUsernameForPath(
self._WINDOWS_PATHS[2])
self.assertEqual(username, 'frank')
username = knowledge_base_object.GetUsernameForPath(
self._MACOS_PATHS[2])
self.assertIsNone(username)
def testGetSetValue(self):
"""Tests the Get and SetValue functions."""
knowledge_base_object = knowledge_base.KnowledgeBase()
expected_value = 'test value'
knowledge_base_object.SetValue('Test', expected_value)
value = knowledge_base_object.GetValue('Test')
self.assertEqual(value, expected_value)
value = knowledge_base_object.GetValue('tEsT')
self.assertEqual(value, expected_value)
value = knowledge_base_object.GetValue('Bogus')
self.assertIsNone(value)
def testHasUserAccounts(self):
"""Tests the HasUserAccounts function."""
knowledge_base_object = knowledge_base.KnowledgeBase()
self.assertFalse(knowledge_base_object.HasUserAccounts())
user_account = artifacts.UserAccountArtifact(
identifier='1000', user_directory='/home/testuser',
username='testuser')
knowledge_base_object.AddUserAccount(user_account)
self.assertTrue(knowledge_base_object.HasUserAccounts())
def testReadSystemConfigurationArtifact(self):
"""Tests the ReadSystemConfigurationArtifact function."""
knowledge_base_object = knowledge_base.KnowledgeBase()
system_configuration = artifacts.SystemConfigurationArtifact()
system_configuration.hostname = artifacts.HostnameArtifact(
name='myhost.mydomain')
user_account = artifacts.UserAccountArtifact(
identifier='1000', user_directory='/home/testuser',
username='testuser')
system_configuration.user_accounts.append(user_account)
knowledge_base_object.ReadSystemConfigurationArtifact(system_configuration)
hostname = knowledge_base_object.GetHostname()
self.assertEqual(hostname, 'myhost.mydomain')
def testSetActiveSession(self):
"""Tests the SetActiveSession function."""
knowledge_base_object = knowledge_base.KnowledgeBase()
knowledge_base_object.SetActiveSession('ddda05bedf324cbd99fa8c24b8a0037a')
self.assertEqual(
knowledge_base_object._active_session,
'ddda05bedf324cbd99fa8c24b8a0037a')
knowledge_base_object.SetActiveSession(
knowledge_base_object._DEFAULT_ACTIVE_SESSION)
self.assertEqual(
knowledge_base_object._active_session,
knowledge_base_object._DEFAULT_ACTIVE_SESSION)
def testSetCodepage(self):
"""Tests the SetCodepage function."""
knowledge_base_object = knowledge_base.KnowledgeBase()
knowledge_base_object.SetCodepage('cp1252')
with self.assertRaises(ValueError):
knowledge_base_object.SetCodepage('bogus')
def testSetHostname(self):
"""Tests the SetHostname function."""
knowledge_base_object = knowledge_base.KnowledgeBase()
hostname_artifact = artifacts.HostnameArtifact(name='myhost.mydomain')
knowledge_base_object.SetHostname(hostname_artifact)
# TODO: add tests for SetMountPoint.
# TODO: add tests for SetTextPrepend.
def testSetTimeZone(self):
"""Tests the SetTimeZone function."""
knowledge_base_object = knowledge_base.KnowledgeBase()
time_zone_artifact = artifacts.TimeZoneArtifact(
localized_name='Eastern (standaardtijd)', mui_form='@tzres.dll,-112',
name='Eastern Standard Time')
knowledge_base_object.AddAvailableTimeZone(time_zone_artifact)
# Set an IANA time zone name.
knowledge_base_object.SetTimeZone('Europe/Zurich')
self.assertEqual(knowledge_base_object._time_zone.zone, 'Europe/Zurich')
# Set a Windows time zone name.
knowledge_base_object.SetTimeZone('Eastern Standard Time')
self.assertEqual(knowledge_base_object._time_zone.zone, 'America/New_York')
# Set a localized Windows time zone name.
knowledge_base_object.SetTimeZone('Eastern (standaardtijd)')
self.assertEqual(knowledge_base_object._time_zone.zone, 'America/New_York')
# Set a MUI form Windows time zone name.
knowledge_base_object.SetTimeZone('@tzres.dll,-112')
self.assertEqual(knowledge_base_object._time_zone.zone, 'America/New_York')
with self.assertRaises(ValueError):
knowledge_base_object.SetTimeZone('Bogus')
if __name__ == '__main__':
unittest.main()
| [((390, 2, 390, 17), 'unittest.main', 'unittest.main', ({}, {}), '()', False, 'import unittest\n'), ((71, 28, 71, 58), 'plaso.engine.knowledge_base.KnowledgeBase', 'knowledge_base.KnowledgeBase', ({}, {}), '()', False, 'from plaso.engine import knowledge_base\n'), ((77, 28, 77, 58), 'plaso.engine.knowledge_base.KnowledgeBase', 'knowledge_base.KnowledgeBase', ({}, {}), '()', False, 'from plaso.engine import knowledge_base\n'), ((83, 28, 83, 58), 'plaso.engine.knowledge_base.KnowledgeBase', 'knowledge_base.KnowledgeBase', ({}, {}), '()', False, 'from plaso.engine import knowledge_base\n'), ((95, 28, 95, 58), 'plaso.engine.knowledge_base.KnowledgeBase', 'knowledge_base.KnowledgeBase', ({}, {}), '()', False, 'from plaso.engine import knowledge_base\n'), ((101, 28, 101, 58), 'plaso.engine.knowledge_base.KnowledgeBase', 'knowledge_base.KnowledgeBase', ({}, {}), '()', False, 'from plaso.engine import knowledge_base\n'), ((105, 19, 107, 28), 'plaso.containers.artifacts.UserAccountArtifact', 'artifacts.UserAccountArtifact', (), '', False, 'from plaso.containers import artifacts\n'), ((114, 28, 114, 58), 'plaso.engine.knowledge_base.KnowledgeBase', 'knowledge_base.KnowledgeBase', ({}, {}), '()', False, 'from plaso.engine import knowledge_base\n'), ((120, 28, 120, 58), 'plaso.engine.knowledge_base.KnowledgeBase', 'knowledge_base.KnowledgeBase', ({}, {}), '()', False, 'from plaso.engine import knowledge_base\n'), ((122, 19, 124, 28), 'plaso.containers.artifacts.UserAccountArtifact', 'artifacts.UserAccountArtifact', (), '', False, 'from plaso.containers import artifacts\n'), ((132, 28, 132, 58), 'plaso.engine.knowledge_base.KnowledgeBase', 'knowledge_base.KnowledgeBase', ({}, {}), '()', False, 'from plaso.engine import knowledge_base\n'), ((134, 27, 135, 69), 'plaso.containers.artifacts.EnvironmentVariableArtifact', 'artifacts.EnvironmentVariableArtifact', (), '', False, 'from plaso.containers import artifacts\n'), ((144, 28, 144, 58), 'plaso.engine.knowledge_base.KnowledgeBase', 'knowledge_base.KnowledgeBase', ({}, {}), '()', False, 'from plaso.engine import knowledge_base\n'), ((146, 27, 147, 69), 'plaso.containers.artifacts.EnvironmentVariableArtifact', 'artifacts.EnvironmentVariableArtifact', (), '', False, 'from plaso.containers import artifacts\n'), ((164, 28, 164, 58), 'plaso.engine.knowledge_base.KnowledgeBase', 'knowledge_base.KnowledgeBase', ({}, {}), '()', False, 'from plaso.engine import knowledge_base\n'), ((166, 27, 167, 69), 'plaso.containers.artifacts.EnvironmentVariableArtifact', 'artifacts.EnvironmentVariableArtifact', (), '', False, 'from plaso.containers import artifacts\n'), ((170, 27, 171, 65), 'plaso.containers.artifacts.EnvironmentVariableArtifact', 'artifacts.EnvironmentVariableArtifact', (), '', False, 'from plaso.containers import artifacts\n'), ((179, 28, 179, 58), 'plaso.engine.knowledge_base.KnowledgeBase', 'knowledge_base.KnowledgeBase', ({}, {}), '()', False, 'from plaso.engine import knowledge_base\n'), ((188, 28, 188, 58), 'plaso.engine.knowledge_base.KnowledgeBase', 'knowledge_base.KnowledgeBase', ({}, {}), '()', False, 'from plaso.engine import knowledge_base\n'), ((190, 24, 190, 74), 'plaso.containers.artifacts.HostnameArtifact', 'artifacts.HostnameArtifact', (), '', False, 'from plaso.containers import artifacts\n'), ((193, 19, 195, 28), 'plaso.containers.artifacts.UserAccountArtifact', 'artifacts.UserAccountArtifact', (), '', False, 'from plaso.containers import artifacts\n'), ((210, 28, 210, 58), 'plaso.engine.knowledge_base.KnowledgeBase', 'knowledge_base.KnowledgeBase', ({}, {}), '()', False, 'from plaso.engine import knowledge_base\n'), ((212, 24, 212, 74), 'plaso.containers.artifacts.HostnameArtifact', 'artifacts.HostnameArtifact', (), '', False, 'from plaso.containers import artifacts\n'), ((215, 19, 217, 28), 'plaso.containers.artifacts.UserAccountArtifact', 'artifacts.UserAccountArtifact', (), '', False, 'from plaso.containers import artifacts\n'), ((230, 28, 230, 58), 'plaso.engine.knowledge_base.KnowledgeBase', 'knowledge_base.KnowledgeBase', ({}, {}), '()', False, 'from plaso.engine import knowledge_base\n'), ((232, 19, 234, 28), 'plaso.containers.artifacts.UserAccountArtifact', 'artifacts.UserAccountArtifact', (), '', False, 'from plaso.containers import artifacts\n'), ((248, 28, 248, 58), 'plaso.engine.knowledge_base.KnowledgeBase', 'knowledge_base.KnowledgeBase', ({}, {}), '()', False, 'from plaso.engine import knowledge_base\n'), ((263, 28, 263, 58), 'plaso.engine.knowledge_base.KnowledgeBase', 'knowledge_base.KnowledgeBase', ({}, {}), '()', False, 'from plaso.engine import knowledge_base\n'), ((280, 28, 280, 58), 'plaso.engine.knowledge_base.KnowledgeBase', 'knowledge_base.KnowledgeBase', ({}, {}), '()', False, 'from plaso.engine import knowledge_base\n'), ((296, 28, 296, 58), 'plaso.engine.knowledge_base.KnowledgeBase', 'knowledge_base.KnowledgeBase', ({}, {}), '()', False, 'from plaso.engine import knowledge_base\n'), ((300, 19, 302, 28), 'plaso.containers.artifacts.UserAccountArtifact', 'artifacts.UserAccountArtifact', (), '', False, 'from plaso.containers import artifacts\n'), ((309, 28, 309, 58), 'plaso.engine.knowledge_base.KnowledgeBase', 'knowledge_base.KnowledgeBase', ({}, {}), '()', False, 'from plaso.engine import knowledge_base\n'), ((311, 27, 311, 66), 'plaso.containers.artifacts.SystemConfigurationArtifact', 'artifacts.SystemConfigurationArtifact', ({}, {}), '()', False, 'from plaso.containers import artifacts\n'), ((312, 36, 313, 31), 'plaso.containers.artifacts.HostnameArtifact', 'artifacts.HostnameArtifact', (), '', False, 'from plaso.containers import artifacts\n'), ((315, 19, 317, 28), 'plaso.containers.artifacts.UserAccountArtifact', 'artifacts.UserAccountArtifact', (), '', False, 'from plaso.containers import artifacts\n'), ((327, 28, 327, 58), 'plaso.engine.knowledge_base.KnowledgeBase', 'knowledge_base.KnowledgeBase', ({}, {}), '()', False, 'from plaso.engine import knowledge_base\n'), ((342, 28, 342, 58), 'plaso.engine.knowledge_base.KnowledgeBase', 'knowledge_base.KnowledgeBase', ({}, {}), '()', False, 'from plaso.engine import knowledge_base\n'), ((351, 28, 351, 58), 'plaso.engine.knowledge_base.KnowledgeBase', 'knowledge_base.KnowledgeBase', ({}, {}), '()', False, 'from plaso.engine import knowledge_base\n'), ((353, 24, 353, 74), 'plaso.containers.artifacts.HostnameArtifact', 'artifacts.HostnameArtifact', (), '', False, 'from plaso.containers import artifacts\n'), ((361, 28, 361, 58), 'plaso.engine.knowledge_base.KnowledgeBase', 'knowledge_base.KnowledgeBase', ({}, {}), '()', False, 'from plaso.engine import knowledge_base\n'), ((363, 25, 365, 37), 'plaso.containers.artifacts.TimeZoneArtifact', 'artifacts.TimeZoneArtifact', (), '', False, 'from plaso.containers import artifacts\n')] |
BYJRK/LeetCode-Solutions | Problems/Dynamic Programming/140. Word Break II.py | 008467e1717309066a519acb8623d2f84071b64a | # https://leetcode.com/problems/word-break-ii/
from typing import List
class Solution:
def wordBreak(self, s: str, wordDict: List[str]) -> List[str]:
# 做一个快速的检查,如果 s 中存在所有 word 都不包含的字母,则直接退出
set1 = set(s)
set2 = set(''.join(wordDict))
if not set1.issubset(set2):
return []
# dp[i] 的意思是,子字符串 s[:i] 能以怎样的方式进行分割
# 如果是 [[]] 则表示开头
# 如果是 [None],则表示还没有访问到,或没有办法进行分割
# 如果是 [['a', 'b'], ['ab']] 则表示目前已经有两种方式拼出这个子字符串
dp = [None] * (len(s) + 1)
dp[0] = [[]]
for i in range(len(s) + 1):
# 如果当前子字符串无法分割,则跳过
if dp[i] is None:
continue
tmp = s[i:]
for w in wordDict:
idx = len(w) + i
if idx > len(s):
continue
if tmp.startswith(w):
if dp[idx] is None:
dp[idx] = []
# 将目前的所有方式全部添加到新的位置,并在每个的最后追加当前的单词
for dic in dp[i]:
dp[idx].append(dic + [w])
if dp[-1] is None:
return []
return [' '.join(res) for res in dp[-1]]
def wordBreak_dfs(self, s: str, wordDict: List[str]) -> List[str]:
def dfs(s: str, memo={}):
if s in memo:
return memo[s]
if len(s) == 0:
return [[]]
res = []
for w in wordDict:
if s.startswith(w):
tmp = s[len(w):]
combos = dfs(tmp, memo)
for combo in combos:
res.append([w] + combo)
memo[s] = res
return res
return dfs(s)
s = Solution()
print(s.wordBreak_dfs('catsanddog', ["cat", "cats", "and", "sand", "dog"]))
print(s.wordBreak_dfs('pineapplepenapple', [
"apple", "pen", "applepen", "pine", "pineapple"]))
# text = "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaabaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"
# words = ["a", "aa", "aaa", "aaaa", "aaaaa", "aaaaaa",
# "aaaaaaa", "aaaaaaaa", "aaaaaaaaa", "aaaaaaaaaa"]
# print(s.wordBreak(text, words))
| [] |
banhr/neutron | neutron/tests/unit/db/test_migration.py | 4b3e73648327ce9f4d3437986a8663372f577f1b | # Copyright 2012 New Dream Network, LLC (DreamHost)
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import os
import re
import sys
import textwrap
from alembic.autogenerate import api as alembic_ag_api
from alembic import config as alembic_config
from alembic.operations import ops as alembic_ops
from alembic import script as alembic_script
import fixtures
import mock
from neutron_lib.utils import helpers
from oslo_utils import fileutils
import pkg_resources
import sqlalchemy as sa
from testtools import matchers
from neutron.conf.db import migration_cli
from neutron.db import migration
from neutron.db.migration import autogen
from neutron.db.migration import cli
from neutron.tests import base
from neutron.tests import tools
from neutron.tests.unit import testlib_api
class FakeConfig(object):
service = ''
class FakeRevision(object):
path = 'fakepath'
def __init__(self, labels=None, down_revision=None, is_branch_point=False):
if not labels:
labels = set()
self.branch_labels = labels
self.down_revision = down_revision
self.is_branch_point = is_branch_point
self.revision = helpers.get_random_string(10)
self.module = mock.MagicMock()
class MigrationEntrypointsMemento(fixtures.Fixture):
'''Create a copy of the migration entrypoints map so it can be restored
during test cleanup.
'''
def _setUp(self):
self.ep_backup = {}
for proj, ep in migration_cli.migration_entrypoints.items():
self.ep_backup[proj] = copy.copy(ep)
self.addCleanup(self.restore)
def restore(self):
migration_cli.migration_entrypoints = self.ep_backup
class TestDbMigration(base.BaseTestCase):
def setUp(self):
super(TestDbMigration, self).setUp()
mock.patch('alembic.op.get_bind').start()
self.mock_alembic_is_offline = mock.patch(
'alembic.context.is_offline_mode', return_value=False).start()
self.mock_alembic_is_offline.return_value = False
self.mock_sa_inspector = mock.patch(
'sqlalchemy.engine.reflection.Inspector').start()
def _prepare_mocked_sqlalchemy_inspector(self):
mock_inspector = mock.MagicMock()
mock_inspector.get_table_names.return_value = ['foo', 'bar']
mock_inspector.get_columns.return_value = [{'name': 'foo_column'},
{'name': 'bar_column'}]
self.mock_sa_inspector.from_engine.return_value = mock_inspector
def test_schema_has_table(self):
self._prepare_mocked_sqlalchemy_inspector()
self.assertTrue(migration.schema_has_table('foo'))
def test_schema_has_table_raises_if_offline(self):
self.mock_alembic_is_offline.return_value = True
self.assertRaises(RuntimeError, migration.schema_has_table, 'foo')
def test_schema_has_column_missing_table(self):
self._prepare_mocked_sqlalchemy_inspector()
self.assertFalse(migration.schema_has_column('meh', 'meh'))
def test_schema_has_column(self):
self._prepare_mocked_sqlalchemy_inspector()
self.assertTrue(migration.schema_has_column('foo', 'foo_column'))
def test_schema_has_column_raises_if_offline(self):
self.mock_alembic_is_offline.return_value = True
self.assertRaises(RuntimeError, migration.schema_has_column,
'foo', 'foo_col')
def test_schema_has_column_missing_column(self):
self._prepare_mocked_sqlalchemy_inspector()
self.assertFalse(migration.schema_has_column(
'foo', column_name='meh'))
class TestCli(base.BaseTestCase):
def setUp(self):
super(TestCli, self).setUp()
self.do_alembic_cmd_p = mock.patch.object(cli, 'do_alembic_command')
self.do_alembic_cmd = self.do_alembic_cmd_p.start()
self.mock_alembic_err = mock.patch('alembic.util.err').start()
self.mock_alembic_warn = mock.patch('alembic.util.warn').start()
self.mock_alembic_err.side_effect = SystemExit
def mocked_root_dir(cfg):
return os.path.join('/fake/dir', cli._get_project_base(cfg))
mock_root = mock.patch.object(cli, '_get_package_root_dir').start()
mock_root.side_effect = mocked_root_dir
# Avoid creating fake directories
mock.patch('oslo_utils.fileutils.ensure_tree').start()
# Set up some configs and entrypoints for tests to chew on
self.configs = []
self.projects = ('neutron', 'networking-foo', 'neutron-fwaas')
ini = os.path.join(os.path.dirname(cli.__file__), 'alembic.ini')
self.useFixture(MigrationEntrypointsMemento())
migration_cli.migration_entrypoints = {}
for project in self.projects:
config = alembic_config.Config(ini)
config.set_main_option('neutron_project', project)
module_name = project.replace('-', '_') + '.db.migration'
attrs = ('alembic_migrations',)
script_location = ':'.join([module_name, attrs[0]])
config.set_main_option('script_location', script_location)
self.configs.append(config)
entrypoint = pkg_resources.EntryPoint(project,
module_name,
attrs=attrs)
migration_cli.migration_entrypoints[project] = entrypoint
def _main_test_helper(self, argv, func_name, exp_kwargs=[{}]):
with mock.patch.object(sys, 'argv', argv),\
mock.patch.object(cli, 'run_sanity_checks'),\
mock.patch.object(cli, 'validate_revisions'):
cli.main()
def _append_version_path(args):
args = copy.copy(args)
if 'autogenerate' in args and not args['autogenerate']:
args['version_path'] = mock.ANY
return args
self.do_alembic_cmd.assert_has_calls(
[mock.call(mock.ANY, func_name, **_append_version_path(kwargs))
for kwargs in exp_kwargs]
)
def test_stamp(self):
self._main_test_helper(
['prog', 'stamp', 'foo'],
'stamp',
[{'revision': 'foo', 'sql': False}]
)
self._main_test_helper(
['prog', 'stamp', 'foo', '--sql'],
'stamp',
[{'revision': 'foo', 'sql': True}]
)
def _validate_cmd(self, cmd):
self._main_test_helper(
['prog', cmd],
cmd,
[{'verbose': False}])
self._main_test_helper(
['prog', cmd, '--verbose'],
cmd,
[{'verbose': True}])
def test_branches(self):
self._validate_cmd('branches')
def test_current(self):
self._validate_cmd('current')
def test_history(self):
self._validate_cmd('history')
def test_heads(self):
self._validate_cmd('heads')
def test_check_migration(self):
with mock.patch.object(cli, 'validate_head_files') as validate:
self._main_test_helper(['prog', 'check_migration'], 'branches')
self.assertEqual(len(self.projects), validate.call_count)
def _test_database_sync_revision(self, separate_branches=True):
with mock.patch.object(cli, 'update_head_files') as update:
if separate_branches:
mock.patch('os.path.exists').start()
expected_kwargs = [{
'message': 'message', 'sql': False, 'autogenerate': True,
}]
self._main_test_helper(
['prog', 'revision', '--autogenerate', '-m', 'message'],
'revision',
expected_kwargs
)
self.assertEqual(len(self.projects), update.call_count)
update.reset_mock()
expected_kwargs = [{
'message': 'message',
'sql': True,
'autogenerate': False,
'head': cli._get_branch_head(branch)
} for branch in cli.MIGRATION_BRANCHES]
for kwarg in expected_kwargs:
kwarg['autogenerate'] = False
kwarg['sql'] = True
self._main_test_helper(
['prog', 'revision', '--sql', '-m', 'message'],
'revision',
expected_kwargs
)
self.assertEqual(len(self.projects), update.call_count)
update.reset_mock()
expected_kwargs = [{
'message': 'message',
'sql': False,
'autogenerate': False,
'head': 'expand@head'
}]
self._main_test_helper(
['prog', 'revision', '-m', 'message', '--expand'],
'revision',
expected_kwargs
)
self.assertEqual(len(self.projects), update.call_count)
update.reset_mock()
for kwarg in expected_kwargs:
kwarg['head'] = 'contract@head'
self._main_test_helper(
['prog', 'revision', '-m', 'message', '--contract'],
'revision',
expected_kwargs
)
self.assertEqual(len(self.projects), update.call_count)
def test_database_sync_revision(self):
self._test_database_sync_revision()
def test_database_sync_revision_no_branches(self):
# Test that old branchless approach is still supported
self._test_database_sync_revision(separate_branches=False)
def test_upgrade_revision(self):
self._main_test_helper(
['prog', 'upgrade', '--sql', 'head'],
'upgrade',
[{'desc': None, 'revision': 'heads', 'sql': True}]
)
def test_upgrade_delta(self):
self._main_test_helper(
['prog', 'upgrade', '--delta', '3'],
'upgrade',
[{'desc': None, 'revision': '+3', 'sql': False}]
)
def test_upgrade_revision_delta(self):
self._main_test_helper(
['prog', 'upgrade', 'kilo', '--delta', '3'],
'upgrade',
[{'desc': None, 'revision': 'kilo+3', 'sql': False}]
)
def test_upgrade_expand(self):
self._main_test_helper(
['prog', 'upgrade', '--expand'],
'upgrade',
[{'desc': cli.EXPAND_BRANCH,
'revision': 'expand@head',
'sql': False}]
)
def test_upgrade_expand_contract_are_mutually_exclusive(self):
with testlib_api.ExpectedException(SystemExit):
self._main_test_helper(
['prog', 'upgrade', '--expand --contract'], 'upgrade')
def _test_upgrade_conflicts_with_revision(self, mode):
with testlib_api.ExpectedException(SystemExit):
self._main_test_helper(
['prog', 'upgrade', '--%s revision1' % mode], 'upgrade')
def _test_upgrade_conflicts_with_delta(self, mode):
with testlib_api.ExpectedException(SystemExit):
self._main_test_helper(
['prog', 'upgrade', '--%s +3' % mode], 'upgrade')
def _test_revision_autogenerate_conflicts_with_branch(self, branch):
with testlib_api.ExpectedException(SystemExit):
self._main_test_helper(
['prog', 'revision', '--autogenerate', '--%s' % branch],
'revision')
def test_revision_autogenerate_conflicts_with_expand(self):
self._test_revision_autogenerate_conflicts_with_branch(
cli.EXPAND_BRANCH)
def test_revision_autogenerate_conflicts_with_contract(self):
self._test_revision_autogenerate_conflicts_with_branch(
cli.CONTRACT_BRANCH)
def test_upgrade_expand_conflicts_with_revision(self):
self._test_upgrade_conflicts_with_revision('expand')
def test_upgrade_contract_conflicts_with_revision(self):
self._test_upgrade_conflicts_with_revision('contract')
def test_upgrade_expand_conflicts_with_delta(self):
self._test_upgrade_conflicts_with_delta('expand')
def test_upgrade_contract_conflicts_with_delta(self):
self._test_upgrade_conflicts_with_delta('contract')
def test_upgrade_contract(self):
self._main_test_helper(
['prog', 'upgrade', '--contract'],
'upgrade',
[{'desc': cli.CONTRACT_BRANCH,
'revision': 'contract@head',
'sql': False}]
)
@mock.patch('alembic.script.ScriptDirectory.walk_revisions')
def test_upgrade_milestone_expand_before_contract(self, walk_mock):
c_revs = [FakeRevision(labels={cli.CONTRACT_BRANCH}) for r in range(5)]
c_revs[1].module.neutron_milestone = [migration.LIBERTY]
e_revs = [FakeRevision(labels={cli.EXPAND_BRANCH}) for r in range(5)]
e_revs[3].module.neutron_milestone = [migration.LIBERTY]
walk_mock.return_value = c_revs + e_revs
self._main_test_helper(
['prog', '--subproject', 'neutron', 'upgrade', 'liberty'],
'upgrade',
[{'desc': cli.EXPAND_BRANCH,
'revision': e_revs[3].revision,
'sql': False},
{'desc': cli.CONTRACT_BRANCH,
'revision': c_revs[1].revision,
'sql': False}]
)
def assert_command_fails(self, command):
# Avoid cluttering stdout with argparse error messages
mock.patch('argparse.ArgumentParser._print_message').start()
with mock.patch.object(sys, 'argv', command), mock.patch.object(
cli, 'run_sanity_checks'):
self.assertRaises(SystemExit, cli.main)
def test_downgrade_fails(self):
self.assert_command_fails(['prog', 'downgrade', '--sql', 'juno'])
def test_upgrade_negative_relative_revision_fails(self):
self.assert_command_fails(['prog', 'upgrade', '-2'])
def test_upgrade_negative_delta_fails(self):
self.assert_command_fails(['prog', 'upgrade', '--delta', '-2'])
def test_upgrade_rejects_delta_with_relative_revision(self):
self.assert_command_fails(['prog', 'upgrade', '+2', '--delta', '3'])
def _test_validate_head_files_helper(self, heads, contract_head='',
expand_head=''):
fake_config = self.configs[0]
head_files_not_exist = (contract_head == expand_head == '')
with mock.patch('alembic.script.ScriptDirectory.from_config') as fc,\
mock.patch('os.path.exists') as os_mock:
if head_files_not_exist:
os_mock.return_value = False
else:
os_mock.return_value = True
fc.return_value.get_heads.return_value = heads
revs = {heads[0]: FakeRevision(labels=cli.CONTRACT_BRANCH),
heads[1]: FakeRevision(labels=cli.EXPAND_BRANCH)}
fc.return_value.get_revision.side_effect = revs.__getitem__
mock_open_con = self.useFixture(
tools.OpenFixture(cli._get_contract_head_file_path(
fake_config), contract_head + '\n')).mock_open
mock_open_ex = self.useFixture(
tools.OpenFixture(cli._get_expand_head_file_path(
fake_config), expand_head + '\n')).mock_open
if contract_head in heads and expand_head in heads:
cli.validate_head_files(fake_config)
elif head_files_not_exist:
cli.validate_head_files(fake_config)
self.assertTrue(self.mock_alembic_warn.called)
else:
self.assertRaises(
SystemExit,
cli.validate_head_files,
fake_config
)
self.assertTrue(self.mock_alembic_err.called)
if contract_head in heads and expand_head in heads:
mock_open_ex.assert_called_with(
cli._get_expand_head_file_path(fake_config))
mock_open_con.assert_called_with(
cli._get_contract_head_file_path(fake_config))
if not head_files_not_exist:
fc.assert_called_once_with(fake_config)
def test_validate_head_files_success(self):
self._test_validate_head_files_helper(['a', 'b'], contract_head='a',
expand_head='b')
def test_validate_head_files_missing_file(self):
self._test_validate_head_files_helper(['a', 'b'])
def test_validate_head_files_wrong_contents(self):
self._test_validate_head_files_helper(['a', 'b'], contract_head='c',
expand_head='d')
@mock.patch.object(fileutils, 'delete_if_exists')
def test_update_head_files_success(self, *mocks):
heads = ['a', 'b']
mock_open_con = self.useFixture(
tools.OpenFixture(cli._get_contract_head_file_path(
self.configs[0]))).mock_open
mock_open_ex = self.useFixture(
tools.OpenFixture(cli._get_expand_head_file_path(
self.configs[0]))).mock_open
with mock.patch('alembic.script.ScriptDirectory.from_config') as fc:
fc.return_value.get_heads.return_value = heads
revs = {heads[0]: FakeRevision(labels=cli.CONTRACT_BRANCH),
heads[1]: FakeRevision(labels=cli.EXPAND_BRANCH)}
fc.return_value.get_revision.side_effect = revs.__getitem__
cli.update_head_files(self.configs[0])
mock_open_con.return_value.write.assert_called_with(
heads[0] + '\n')
mock_open_ex.return_value.write.assert_called_with(heads[1] + '\n')
old_head_file = cli._get_head_file_path(
self.configs[0])
old_heads_file = cli._get_heads_file_path(
self.configs[0])
delete_if_exists = mocks[0]
self.assertIn(mock.call(old_head_file),
delete_if_exists.call_args_list)
self.assertIn(mock.call(old_heads_file),
delete_if_exists.call_args_list)
def test_get_project_base(self):
config = alembic_config.Config()
config.set_main_option('script_location', 'a.b.c:d')
proj_base = cli._get_project_base(config)
self.assertEqual('a', proj_base)
def test_get_root_versions_dir(self):
config = alembic_config.Config()
config.set_main_option('script_location', 'a.b.c:d')
versions_dir = cli._get_root_versions_dir(config)
self.assertEqual('/fake/dir/a/a/b/c/d/versions', versions_dir)
def test_get_subproject_script_location(self):
foo_ep = cli._get_subproject_script_location('networking-foo')
expected = 'networking_foo.db.migration:alembic_migrations'
self.assertEqual(expected, foo_ep)
def test_get_subproject_script_location_not_installed(self):
self.assertRaises(
SystemExit, cli._get_subproject_script_location, 'not-installed')
def test_get_subproject_base_not_installed(self):
self.assertRaises(
SystemExit, cli._get_subproject_base, 'not-installed')
def test__compare_labels_ok(self):
labels = {'label1', 'label2'}
fake_revision = FakeRevision(labels)
cli._compare_labels(fake_revision, {'label1', 'label2'})
def test__compare_labels_fail_unexpected_labels(self):
labels = {'label1', 'label2', 'label3'}
fake_revision = FakeRevision(labels)
self.assertRaises(
SystemExit,
cli._compare_labels, fake_revision, {'label1', 'label2'})
@mock.patch.object(cli, '_compare_labels')
def test__validate_single_revision_labels_branchless_fail_different_labels(
self, compare_mock):
fake_down_revision = FakeRevision()
fake_revision = FakeRevision(down_revision=fake_down_revision)
script_dir = mock.Mock()
script_dir.get_revision.return_value = fake_down_revision
cli._validate_single_revision_labels(script_dir, fake_revision,
label=None)
expected_labels = set()
compare_mock.assert_has_calls(
[mock.call(revision, expected_labels)
for revision in (fake_revision, fake_down_revision)]
)
@mock.patch.object(cli, '_compare_labels')
def test__validate_single_revision_labels_branches_fail_different_labels(
self, compare_mock):
fake_down_revision = FakeRevision()
fake_revision = FakeRevision(down_revision=fake_down_revision)
script_dir = mock.Mock()
script_dir.get_revision.return_value = fake_down_revision
cli._validate_single_revision_labels(
script_dir, fake_revision, label='fakebranch')
expected_labels = {'fakebranch'}
compare_mock.assert_has_calls(
[mock.call(revision, expected_labels)
for revision in (fake_revision, fake_down_revision)]
)
@mock.patch.object(cli, '_validate_single_revision_labels')
def test__validate_revision_validates_branches(self, validate_mock):
script_dir = mock.Mock()
fake_revision = FakeRevision()
branch = cli.MIGRATION_BRANCHES[0]
fake_revision.path = os.path.join('/fake/path', branch)
cli._validate_revision(script_dir, fake_revision)
validate_mock.assert_called_with(
script_dir, fake_revision, label=branch)
@mock.patch.object(cli, '_validate_single_revision_labels')
def test__validate_revision_validates_branchless_migrations(
self, validate_mock):
script_dir = mock.Mock()
fake_revision = FakeRevision()
cli._validate_revision(script_dir, fake_revision)
validate_mock.assert_called_with(script_dir, fake_revision)
@mock.patch.object(cli, '_validate_revision')
@mock.patch('alembic.script.ScriptDirectory.walk_revisions')
def test_validate_revisions_walks_thru_all_revisions(
self, walk_mock, validate_mock):
revisions = [FakeRevision() for i in range(10)]
walk_mock.return_value = revisions
cli.validate_revisions(self.configs[0])
validate_mock.assert_has_calls(
[mock.call(mock.ANY, revision) for revision in revisions]
)
@mock.patch.object(cli, '_validate_revision')
@mock.patch('alembic.script.ScriptDirectory.walk_revisions')
def test_validate_revisions_fails_on_multiple_branch_points(
self, walk_mock, validate_mock):
revisions = [FakeRevision(is_branch_point=True) for i in range(2)]
walk_mock.return_value = revisions
self.assertRaises(
SystemExit, cli.validate_revisions, self.configs[0])
@mock.patch('alembic.script.ScriptDirectory.walk_revisions')
def test__get_branch_points(self, walk_mock):
revisions = [FakeRevision(is_branch_point=tools.get_random_boolean)
for i in range(50)]
walk_mock.return_value = revisions
script_dir = alembic_script.ScriptDirectory.from_config(
self.configs[0])
self.assertEqual(set(rev for rev in revisions if rev.is_branch_point),
set(cli._get_branch_points(script_dir)))
@mock.patch.object(cli, '_get_version_branch_path')
def test_autogen_process_directives(self, get_version_branch_path):
get_version_branch_path.side_effect = lambda cfg, release, branch: (
"/foo/expand" if branch == 'expand' else "/foo/contract")
migration_script = alembic_ops.MigrationScript(
'eced083f5df',
# these directives will be split into separate
# expand/contract scripts
alembic_ops.UpgradeOps(
ops=[
alembic_ops.CreateTableOp(
'organization',
[
sa.Column('id', sa.Integer(), primary_key=True),
sa.Column('name', sa.String(50), nullable=False)
]
),
alembic_ops.ModifyTableOps(
'user',
ops=[
alembic_ops.AddColumnOp(
'user',
sa.Column('organization_id', sa.Integer())
),
alembic_ops.CreateForeignKeyOp(
'org_fk', 'user', 'organization',
['organization_id'], ['id']
),
alembic_ops.DropConstraintOp(
'user', 'uq_user_org'
),
alembic_ops.DropColumnOp(
'user', 'organization_name'
)
]
)
]
),
# these will be discarded
alembic_ops.DowngradeOps(
ops=[
alembic_ops.AddColumnOp(
'user', sa.Column(
'organization_name', sa.String(50), nullable=True)
),
alembic_ops.CreateUniqueConstraintOp(
'uq_user_org', 'user',
['user_name', 'organization_name']
),
alembic_ops.ModifyTableOps(
'user',
ops=[
alembic_ops.DropConstraintOp('org_fk', 'user'),
alembic_ops.DropColumnOp('user', 'organization_id')
]
),
alembic_ops.DropTableOp('organization')
]
),
message='create the organization table and '
'replace user.organization_name'
)
directives = [migration_script]
autogen.process_revision_directives(
mock.Mock(), mock.Mock(), directives
)
expand = directives[0]
contract = directives[1]
self.assertEqual("/foo/expand", expand.version_path)
self.assertEqual("/foo/contract", contract.version_path)
self.assertTrue(expand.downgrade_ops.is_empty())
self.assertTrue(contract.downgrade_ops.is_empty())
def _get_regex(s):
s = textwrap.dedent(s)
s = re.escape(s)
# alembic 0.8.9 added additional leading '# ' before comments
return s.replace('\\#\\#\\#\\ ', '(# )?### ')
expected_regex = ("""\
### commands auto generated by Alembic - please adjust! ###
op.create_table('organization',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(length=50), nullable=False),
sa.PrimaryKeyConstraint('id')
)
op.add_column('user', """
"""sa.Column('organization_id', sa.Integer(), nullable=True))
op.create_foreign_key('org_fk', 'user', """
"""'organization', ['organization_id'], ['id'])
### end Alembic commands ###""")
self.assertThat(
alembic_ag_api.render_python_code(expand.upgrade_ops),
matchers.MatchesRegex(_get_regex(expected_regex)))
expected_regex = ("""\
### commands auto generated by Alembic - please adjust! ###
op.drop_constraint('user', 'uq_user_org', type_=None)
op.drop_column('user', 'organization_name')
### end Alembic commands ###""")
self.assertThat(
alembic_ag_api.render_python_code(contract.upgrade_ops),
matchers.MatchesRegex(_get_regex(expected_regex)))
@mock.patch('alembic.script.ScriptDirectory.walk_revisions')
def test__find_milestone_revisions_one_branch(self, walk_mock):
c_revs = [FakeRevision(labels={cli.CONTRACT_BRANCH}) for r in range(5)]
c_revs[1].module.neutron_milestone = [migration.LIBERTY]
walk_mock.return_value = c_revs
m = cli._find_milestone_revisions(self.configs[0], 'liberty',
cli.CONTRACT_BRANCH)
self.assertEqual(1, len(m))
m = cli._find_milestone_revisions(self.configs[0], 'liberty',
cli.EXPAND_BRANCH)
self.assertEqual(0, len(m))
@mock.patch('alembic.script.ScriptDirectory.walk_revisions')
def test__find_milestone_revisions_two_branches(self, walk_mock):
c_revs = [FakeRevision(labels={cli.CONTRACT_BRANCH}) for r in range(5)]
c_revs[1].module.neutron_milestone = [migration.LIBERTY]
e_revs = [FakeRevision(labels={cli.EXPAND_BRANCH}) for r in range(5)]
e_revs[3].module.neutron_milestone = [migration.LIBERTY]
walk_mock.return_value = c_revs + e_revs
m = cli._find_milestone_revisions(self.configs[0], 'liberty')
self.assertEqual(2, len(m))
m = cli._find_milestone_revisions(self.configs[0], 'mitaka')
self.assertEqual(0, len(m))
@mock.patch('alembic.script.ScriptDirectory.walk_revisions')
def test__find_milestone_revisions_branchless(self, walk_mock):
revisions = [FakeRevision() for r in range(5)]
revisions[2].module.neutron_milestone = [migration.LIBERTY]
walk_mock.return_value = revisions
m = cli._find_milestone_revisions(self.configs[0], 'liberty')
self.assertEqual(1, len(m))
m = cli._find_milestone_revisions(self.configs[0], 'mitaka')
self.assertEqual(0, len(m))
class TestSafetyChecks(base.BaseTestCase):
def test_validate_revisions(self, *mocks):
cli.validate_revisions(cli.get_neutron_config())
| [((358, 5, 358, 64), 'mock.patch', 'mock.patch', ({(358, 16, 358, 63): '"""alembic.script.ScriptDirectory.walk_revisions"""'}, {}), "('alembic.script.ScriptDirectory.walk_revisions')", False, 'import mock\n'), ((451, 5, 451, 53), 'mock.patch.object', 'mock.patch.object', ({(451, 23, 451, 32): 'fileutils', (451, 34, 451, 52): '"""delete_if_exists"""'}, {}), "(fileutils, 'delete_if_exists')", False, 'import mock\n'), ((517, 5, 517, 46), 'mock.patch.object', 'mock.patch.object', ({(517, 23, 517, 26): 'cli', (517, 28, 517, 45): '"""_compare_labels"""'}, {}), "(cli, '_compare_labels')", False, 'import mock\n'), ((535, 5, 535, 46), 'mock.patch.object', 'mock.patch.object', ({(535, 23, 535, 26): 'cli', (535, 28, 535, 45): '"""_compare_labels"""'}, {}), "(cli, '_compare_labels')", False, 'import mock\n'), ((553, 5, 553, 63), 'mock.patch.object', 'mock.patch.object', ({(553, 23, 553, 26): 'cli', (553, 28, 553, 62): '"""_validate_single_revision_labels"""'}, {}), "(cli, '_validate_single_revision_labels')", False, 'import mock\n'), ((563, 5, 563, 63), 'mock.patch.object', 'mock.patch.object', ({(563, 23, 563, 26): 'cli', (563, 28, 563, 62): '"""_validate_single_revision_labels"""'}, {}), "(cli, '_validate_single_revision_labels')", False, 'import mock\n'), ((572, 5, 572, 49), 'mock.patch.object', 'mock.patch.object', ({(572, 23, 572, 26): 'cli', (572, 28, 572, 48): '"""_validate_revision"""'}, {}), "(cli, '_validate_revision')", False, 'import mock\n'), ((573, 5, 573, 64), 'mock.patch', 'mock.patch', ({(573, 16, 573, 63): '"""alembic.script.ScriptDirectory.walk_revisions"""'}, {}), "('alembic.script.ScriptDirectory.walk_revisions')", False, 'import mock\n'), ((584, 5, 584, 49), 'mock.patch.object', 'mock.patch.object', ({(584, 23, 584, 26): 'cli', (584, 28, 584, 48): '"""_validate_revision"""'}, {}), "(cli, '_validate_revision')", False, 'import mock\n'), ((585, 5, 585, 64), 'mock.patch', 'mock.patch', ({(585, 16, 585, 63): '"""alembic.script.ScriptDirectory.walk_revisions"""'}, {}), "('alembic.script.ScriptDirectory.walk_revisions')", False, 'import mock\n'), ((594, 5, 594, 64), 'mock.patch', 'mock.patch', ({(594, 16, 594, 63): '"""alembic.script.ScriptDirectory.walk_revisions"""'}, {}), "('alembic.script.ScriptDirectory.walk_revisions')", False, 'import mock\n'), ((604, 5, 604, 55), 'mock.patch.object', 'mock.patch.object', ({(604, 23, 604, 26): 'cli', (604, 28, 604, 54): '"""_get_version_branch_path"""'}, {}), "(cli, '_get_version_branch_path')", False, 'import mock\n'), ((712, 5, 712, 64), 'mock.patch', 'mock.patch', ({(712, 16, 712, 63): '"""alembic.script.ScriptDirectory.walk_revisions"""'}, {}), "('alembic.script.ScriptDirectory.walk_revisions')", False, 'import mock\n'), ((725, 5, 725, 64), 'mock.patch', 'mock.patch', ({(725, 16, 725, 63): '"""alembic.script.ScriptDirectory.walk_revisions"""'}, {}), "('alembic.script.ScriptDirectory.walk_revisions')", False, 'import mock\n'), ((739, 5, 739, 64), 'mock.patch', 'mock.patch', ({(739, 16, 739, 63): '"""alembic.script.ScriptDirectory.walk_revisions"""'}, {}), "('alembic.script.ScriptDirectory.walk_revisions')", False, 'import mock\n'), ((56, 24, 56, 53), 'neutron_lib.utils.helpers.get_random_string', 'helpers.get_random_string', ({(56, 50, 56, 52): '10'}, {}), '(10)', False, 'from neutron_lib.utils import helpers\n'), ((57, 22, 57, 38), 'mock.MagicMock', 'mock.MagicMock', ({}, {}), '()', False, 'import mock\n'), ((67, 24, 67, 67), 'neutron.conf.db.migration_cli.migration_entrypoints.items', 'migration_cli.migration_entrypoints.items', ({}, {}), '()', False, 'from neutron.conf.db import migration_cli\n'), ((87, 25, 87, 41), 'mock.MagicMock', 'mock.MagicMock', ({}, {}), '()', False, 'import mock\n'), ((123, 32, 123, 76), 'mock.patch.object', 'mock.patch.object', ({(123, 50, 123, 53): 'cli', (123, 55, 123, 75): '"""do_alembic_command"""'}, {}), "(cli, 'do_alembic_command')", False, 'import mock\n'), ((481, 17, 481, 40), 'alembic.config.Config', 'alembic_config.Config', ({}, {}), '()', True, 'from alembic import config as alembic_config\n'), ((483, 20, 483, 49), 'neutron.db.migration.cli._get_project_base', 'cli._get_project_base', ({(483, 42, 483, 48): 'config'}, {}), '(config)', False, 'from neutron.db.migration import cli\n'), ((487, 17, 487, 40), 'alembic.config.Config', 'alembic_config.Config', ({}, {}), '()', True, 'from alembic import config as alembic_config\n'), ((489, 23, 489, 57), 'neutron.db.migration.cli._get_root_versions_dir', 'cli._get_root_versions_dir', ({(489, 50, 489, 56): 'config'}, {}), '(config)', False, 'from neutron.db.migration import cli\n'), ((493, 17, 493, 70), 'neutron.db.migration.cli._get_subproject_script_location', 'cli._get_subproject_script_location', ({(493, 53, 493, 69): '"""networking-foo"""'}, {}), "('networking-foo')", False, 'from neutron.db.migration import cli\n'), ((508, 8, 508, 64), 'neutron.db.migration.cli._compare_labels', 'cli._compare_labels', ({(508, 28, 508, 41): 'fake_revision', (508, 43, 508, 63): "{'label1', 'label2'}"}, {}), "(fake_revision, {'label1', 'label2'})", False, 'from neutron.db.migration import cli\n'), ((524, 21, 524, 32), 'mock.Mock', 'mock.Mock', ({}, {}), '()', False, 'import mock\n'), ((526, 8, 527, 56), 'neutron.db.migration.cli._validate_single_revision_labels', 'cli._validate_single_revision_labels', (), '', False, 'from neutron.db.migration import cli\n'), ((542, 21, 542, 32), 'mock.Mock', 'mock.Mock', ({}, {}), '()', False, 'import mock\n'), ((544, 8, 545, 58), 'neutron.db.migration.cli._validate_single_revision_labels', 'cli._validate_single_revision_labels', (), '', False, 'from neutron.db.migration import cli\n'), ((555, 21, 555, 32), 'mock.Mock', 'mock.Mock', ({}, {}), '()', False, 'import mock\n'), ((558, 29, 558, 63), 'os.path.join', 'os.path.join', ({(558, 42, 558, 54): '"""/fake/path"""', (558, 56, 558, 62): 'branch'}, {}), "('/fake/path', branch)", False, 'import os\n'), ((559, 8, 559, 57), 'neutron.db.migration.cli._validate_revision', 'cli._validate_revision', ({(559, 31, 559, 41): 'script_dir', (559, 43, 559, 56): 'fake_revision'}, {}), '(script_dir, fake_revision)', False, 'from neutron.db.migration import cli\n'), ((567, 21, 567, 32), 'mock.Mock', 'mock.Mock', ({}, {}), '()', False, 'import mock\n'), ((569, 8, 569, 57), 'neutron.db.migration.cli._validate_revision', 'cli._validate_revision', ({(569, 31, 569, 41): 'script_dir', (569, 43, 569, 56): 'fake_revision'}, {}), '(script_dir, fake_revision)', False, 'from neutron.db.migration import cli\n'), ((579, 8, 579, 47), 'neutron.db.migration.cli.validate_revisions', 'cli.validate_revisions', ({(579, 31, 579, 46): 'self.configs[0]'}, {}), '(self.configs[0])', False, 'from neutron.db.migration import cli\n'), ((599, 21, 600, 28), 'alembic.script.ScriptDirectory.from_config', 'alembic_script.ScriptDirectory.from_config', ({(600, 12, 600, 27): 'self.configs[0]'}, {}), '(self.configs[0])', True, 'from alembic import script as alembic_script\n'), ((718, 12, 719, 62), 'neutron.db.migration.cli._find_milestone_revisions', 'cli._find_milestone_revisions', ({(718, 42, 718, 57): 'self.configs[0]', (718, 59, 718, 68): '"""liberty"""', (719, 42, 719, 61): 'cli.CONTRACT_BRANCH'}, {}), "(self.configs[0], 'liberty', cli.CONTRACT_BRANCH)", False, 'from neutron.db.migration import cli\n'), ((721, 12, 722, 60), 'neutron.db.migration.cli._find_milestone_revisions', 'cli._find_milestone_revisions', ({(721, 42, 721, 57): 'self.configs[0]', (721, 59, 721, 68): '"""liberty"""', (722, 42, 722, 59): 'cli.EXPAND_BRANCH'}, {}), "(self.configs[0], 'liberty', cli.EXPAND_BRANCH)", False, 'from neutron.db.migration import cli\n'), ((733, 12, 733, 69), 'neutron.db.migration.cli._find_milestone_revisions', 'cli._find_milestone_revisions', ({(733, 42, 733, 57): 'self.configs[0]', (733, 59, 733, 68): '"""liberty"""'}, {}), "(self.configs[0], 'liberty')", False, 'from neutron.db.migration import cli\n'), ((736, 12, 736, 68), 'neutron.db.migration.cli._find_milestone_revisions', 'cli._find_milestone_revisions', ({(736, 42, 736, 57): 'self.configs[0]', (736, 59, 736, 67): '"""mitaka"""'}, {}), "(self.configs[0], 'mitaka')", False, 'from neutron.db.migration import cli\n'), ((745, 12, 745, 69), 'neutron.db.migration.cli._find_milestone_revisions', 'cli._find_milestone_revisions', ({(745, 42, 745, 57): 'self.configs[0]', (745, 59, 745, 68): '"""liberty"""'}, {}), "(self.configs[0], 'liberty')", False, 'from neutron.db.migration import cli\n'), ((748, 12, 748, 68), 'neutron.db.migration.cli._find_milestone_revisions', 'cli._find_milestone_revisions', ({(748, 42, 748, 57): 'self.configs[0]', (748, 59, 748, 67): '"""mitaka"""'}, {}), "(self.configs[0], 'mitaka')", False, 'from neutron.db.migration import cli\n'), ((68, 35, 68, 48), 'copy.copy', 'copy.copy', ({(68, 45, 68, 47): 'ep'}, {}), '(ep)', False, 'import copy\n'), ((95, 24, 95, 57), 'neutron.db.migration.schema_has_table', 'migration.schema_has_table', ({(95, 51, 95, 56): '"""foo"""'}, {}), "('foo')", False, 'from neutron.db import migration\n'), ((103, 25, 103, 66), 'neutron.db.migration.schema_has_column', 'migration.schema_has_column', ({(103, 53, 103, 58): '"""meh"""', (103, 60, 103, 65): '"""meh"""'}, {}), "('meh', 'meh')", False, 'from neutron.db import migration\n'), ((107, 24, 107, 72), 'neutron.db.migration.schema_has_column', 'migration.schema_has_column', ({(107, 52, 107, 57): '"""foo"""', (107, 59, 107, 71): '"""foo_column"""'}, {}), "('foo', 'foo_column')", False, 'from neutron.db import migration\n'), ((116, 25, 117, 37), 'neutron.db.migration.schema_has_column', 'migration.schema_has_column', (), '', False, 'from neutron.db import migration\n'), ((139, 27, 139, 56), 'os.path.dirname', 'os.path.dirname', ({(139, 43, 139, 55): 'cli.__file__'}, {}), '(cli.__file__)', False, 'import os\n'), ((143, 21, 143, 47), 'alembic.config.Config', 'alembic_config.Config', ({(143, 43, 143, 46): 'ini'}, {}), '(ini)', True, 'from alembic import config as alembic_config\n'), ((150, 25, 152, 62), 'pkg_resources.EntryPoint', 'pkg_resources.EntryPoint', (), '', False, 'import pkg_resources\n'), ((156, 13, 156, 49), 'mock.patch.object', 'mock.patch.object', ({(156, 31, 156, 34): 'sys', (156, 36, 156, 42): '"""argv"""', (156, 44, 156, 48): 'argv'}, {}), "(sys, 'argv', argv)", False, 'import mock\n'), ((157, 16, 157, 59), 'mock.patch.object', 'mock.patch.object', ({(157, 34, 157, 37): 'cli', (157, 39, 157, 58): '"""run_sanity_checks"""'}, {}), "(cli, 'run_sanity_checks')", False, 'import mock\n'), ((158, 16, 158, 60), 'mock.patch.object', 'mock.patch.object', ({(158, 34, 158, 37): 'cli', (158, 39, 158, 59): '"""validate_revisions"""'}, {}), "(cli, 'validate_revisions')", False, 'import mock\n'), ((160, 12, 160, 22), 'neutron.db.migration.cli.main', 'cli.main', ({}, {}), '()', False, 'from neutron.db.migration import cli\n'), ((210, 13, 210, 58), 'mock.patch.object', 'mock.patch.object', ({(210, 31, 210, 34): 'cli', (210, 36, 210, 57): '"""validate_head_files"""'}, {}), "(cli, 'validate_head_files')", False, 'import mock\n'), ((215, 13, 215, 56), 'mock.patch.object', 'mock.patch.object', ({(215, 31, 215, 34): 'cli', (215, 36, 215, 55): '"""update_head_files"""'}, {}), "(cli, 'update_head_files')", False, 'import mock\n'), ((309, 13, 309, 54), 'neutron.tests.unit.testlib_api.ExpectedException', 'testlib_api.ExpectedException', ({(309, 43, 309, 53): 'SystemExit'}, {}), '(SystemExit)', False, 'from neutron.tests.unit import testlib_api\n'), ((314, 13, 314, 54), 'neutron.tests.unit.testlib_api.ExpectedException', 'testlib_api.ExpectedException', ({(314, 43, 314, 53): 'SystemExit'}, {}), '(SystemExit)', False, 'from neutron.tests.unit import testlib_api\n'), ((319, 13, 319, 54), 'neutron.tests.unit.testlib_api.ExpectedException', 'testlib_api.ExpectedException', ({(319, 43, 319, 53): 'SystemExit'}, {}), '(SystemExit)', False, 'from neutron.tests.unit import testlib_api\n'), ((324, 13, 324, 54), 'neutron.tests.unit.testlib_api.ExpectedException', 'testlib_api.ExpectedException', ({(324, 43, 324, 53): 'SystemExit'}, {}), '(SystemExit)', False, 'from neutron.tests.unit import testlib_api\n'), ((379, 13, 379, 52), 'mock.patch.object', 'mock.patch.object', ({(379, 31, 379, 34): 'sys', (379, 36, 379, 42): '"""argv"""', (379, 44, 379, 51): 'command'}, {}), "(sys, 'argv', command)", False, 'import mock\n'), ((379, 54, 380, 41), 'mock.patch.object', 'mock.patch.object', ({(380, 16, 380, 19): 'cli', (380, 21, 380, 40): '"""run_sanity_checks"""'}, {}), "(cli, 'run_sanity_checks')", False, 'import mock\n'), ((399, 13, 399, 69), 'mock.patch', 'mock.patch', ({(399, 24, 399, 68): '"""alembic.script.ScriptDirectory.from_config"""'}, {}), "('alembic.script.ScriptDirectory.from_config')", False, 'import mock\n'), ((400, 16, 400, 44), 'mock.patch', 'mock.patch', ({(400, 27, 400, 43): '"""os.path.exists"""'}, {}), "('os.path.exists')", False, 'import mock\n'), ((460, 13, 460, 69), 'mock.patch', 'mock.patch', ({(460, 24, 460, 68): '"""alembic.script.ScriptDirectory.from_config"""'}, {}), "('alembic.script.ScriptDirectory.from_config')", False, 'import mock\n'), ((465, 12, 465, 50), 'neutron.db.migration.cli.update_head_files', 'cli.update_head_files', ({(465, 34, 465, 49): 'self.configs[0]'}, {}), '(self.configs[0])', False, 'from neutron.db.migration import cli\n'), ((470, 28, 471, 32), 'neutron.db.migration.cli._get_head_file_path', 'cli._get_head_file_path', ({(471, 16, 471, 31): 'self.configs[0]'}, {}), '(self.configs[0])', False, 'from neutron.db.migration import cli\n'), ((472, 29, 473, 32), 'neutron.db.migration.cli._get_heads_file_path', 'cli._get_heads_file_path', ({(473, 16, 473, 31): 'self.configs[0]'}, {}), '(self.configs[0])', False, 'from neutron.db.migration import cli\n'), ((671, 12, 671, 23), 'mock.Mock', 'mock.Mock', ({}, {}), '()', False, 'import mock\n'), ((671, 25, 671, 36), 'mock.Mock', 'mock.Mock', ({}, {}), '()', False, 'import mock\n'), ((682, 16, 682, 34), 'textwrap.dedent', 'textwrap.dedent', ({(682, 32, 682, 33): 's'}, {}), '(s)', False, 'import textwrap\n'), ((683, 16, 683, 28), 're.escape', 're.escape', ({(683, 26, 683, 27): 's'}, {}), '(s)', False, 'import re\n'), ((700, 12, 700, 65), 'alembic.autogenerate.api.render_python_code', 'alembic_ag_api.render_python_code', ({(700, 46, 700, 64): 'expand.upgrade_ops'}, {}), '(expand.upgrade_ops)', True, 'from alembic.autogenerate import api as alembic_ag_api\n'), ((709, 12, 709, 67), 'alembic.autogenerate.api.render_python_code', 'alembic_ag_api.render_python_code', ({(709, 46, 709, 66): 'contract.upgrade_ops'}, {}), '(contract.upgrade_ops)', True, 'from alembic.autogenerate import api as alembic_ag_api\n'), ((755, 31, 755, 55), 'neutron.db.migration.cli.get_neutron_config', 'cli.get_neutron_config', ({}, {}), '()', False, 'from neutron.db.migration import cli\n'), ((79, 8, 79, 41), 'mock.patch', 'mock.patch', ({(79, 19, 79, 40): '"""alembic.op.get_bind"""'}, {}), "('alembic.op.get_bind')", False, 'import mock\n'), ((80, 39, 81, 66), 'mock.patch', 'mock.patch', (), '', False, 'import mock\n'), ((83, 33, 84, 53), 'mock.patch', 'mock.patch', ({(84, 12, 84, 52): '"""sqlalchemy.engine.reflection.Inspector"""'}, {}), "('sqlalchemy.engine.reflection.Inspector')", False, 'import mock\n'), ((125, 32, 125, 62), 'mock.patch', 'mock.patch', ({(125, 43, 125, 61): '"""alembic.util.err"""'}, {}), "('alembic.util.err')", False, 'import mock\n'), ((126, 33, 126, 64), 'mock.patch', 'mock.patch', ({(126, 44, 126, 63): '"""alembic.util.warn"""'}, {}), "('alembic.util.warn')", False, 'import mock\n'), ((130, 45, 130, 71), 'neutron.db.migration.cli._get_project_base', 'cli._get_project_base', ({(130, 67, 130, 70): 'cfg'}, {}), '(cfg)', False, 'from neutron.db.migration import cli\n'), ((131, 20, 131, 67), 'mock.patch.object', 'mock.patch.object', ({(131, 38, 131, 41): 'cli', (131, 43, 131, 66): '"""_get_package_root_dir"""'}, {}), "(cli, '_get_package_root_dir')", False, 'import mock\n'), ((134, 8, 134, 54), 'mock.patch', 'mock.patch', ({(134, 19, 134, 53): '"""oslo_utils.fileutils.ensure_tree"""'}, {}), "('oslo_utils.fileutils.ensure_tree')", False, 'import mock\n'), ((163, 23, 163, 38), 'copy.copy', 'copy.copy', ({(163, 33, 163, 37): 'args'}, {}), '(args)', False, 'import copy\n'), ((378, 8, 378, 60), 'mock.patch', 'mock.patch', ({(378, 19, 378, 59): '"""argparse.ArgumentParser._print_message"""'}, {}), "('argparse.ArgumentParser._print_message')", False, 'import mock\n'), ((419, 16, 419, 52), 'neutron.db.migration.cli.validate_head_files', 'cli.validate_head_files', ({(419, 40, 419, 51): 'fake_config'}, {}), '(fake_config)', False, 'from neutron.db.migration import cli\n'), ((475, 26, 475, 50), 'mock.call', 'mock.call', ({(475, 36, 475, 49): 'old_head_file'}, {}), '(old_head_file)', False, 'import mock\n'), ((477, 26, 477, 51), 'mock.call', 'mock.call', ({(477, 36, 477, 50): 'old_heads_file'}, {}), '(old_heads_file)', False, 'import mock\n'), ((531, 13, 531, 49), 'mock.call', 'mock.call', ({(531, 23, 531, 31): 'revision', (531, 33, 531, 48): 'expected_labels'}, {}), '(revision, expected_labels)', False, 'import mock\n'), ((549, 13, 549, 49), 'mock.call', 'mock.call', ({(549, 23, 549, 31): 'revision', (549, 33, 549, 48): 'expected_labels'}, {}), '(revision, expected_labels)', False, 'import mock\n'), ((581, 13, 581, 42), 'mock.call', 'mock.call', ({(581, 23, 581, 31): 'mock.ANY', (581, 33, 581, 41): 'revision'}, {}), '(mock.ANY, revision)', False, 'import mock\n'), ((602, 29, 602, 63), 'neutron.db.migration.cli._get_branch_points', 'cli._get_branch_points', ({(602, 52, 602, 62): 'script_dir'}, {}), '(script_dir)', False, 'from neutron.db.migration import cli\n'), ((233, 24, 233, 52), 'neutron.db.migration.cli._get_branch_head', 'cli._get_branch_head', ({(233, 45, 233, 51): 'branch'}, {}), '(branch)', False, 'from neutron.db.migration import cli\n'), ((421, 16, 421, 52), 'neutron.db.migration.cli.validate_head_files', 'cli.validate_head_files', ({(421, 40, 421, 51): 'fake_config'}, {}), '(fake_config)', False, 'from neutron.db.migration import cli\n'), ((433, 20, 433, 63), 'neutron.db.migration.cli._get_expand_head_file_path', 'cli._get_expand_head_file_path', ({(433, 51, 433, 62): 'fake_config'}, {}), '(fake_config)', False, 'from neutron.db.migration import cli\n'), ((435, 20, 435, 65), 'neutron.db.migration.cli._get_contract_head_file_path', 'cli._get_contract_head_file_path', ({(435, 53, 435, 64): 'fake_config'}, {}), '(fake_config)', False, 'from neutron.db.migration import cli\n'), ((455, 38, 456, 40), 'neutron.db.migration.cli._get_contract_head_file_path', 'cli._get_contract_head_file_path', ({(456, 24, 456, 39): 'self.configs[0]'}, {}), '(self.configs[0])', False, 'from neutron.db.migration import cli\n'), ((458, 30, 459, 32), 'neutron.db.migration.cli._get_expand_head_file_path', 'cli._get_expand_head_file_path', ({(459, 16, 459, 31): 'self.configs[0]'}, {}), '(self.configs[0])', False, 'from neutron.db.migration import cli\n'), ((217, 16, 217, 44), 'mock.patch', 'mock.patch', ({(217, 27, 217, 43): '"""os.path.exists"""'}, {}), "('os.path.exists')", False, 'import mock\n'), ((412, 34, 413, 32), 'neutron.db.migration.cli._get_contract_head_file_path', 'cli._get_contract_head_file_path', ({(413, 20, 413, 31): 'fake_config'}, {}), '(fake_config)', False, 'from neutron.db.migration import cli\n'), ((415, 34, 416, 32), 'neutron.db.migration.cli._get_expand_head_file_path', 'cli._get_expand_head_file_path', ({(416, 20, 416, 31): 'fake_config'}, {}), '(fake_config)', False, 'from neutron.db.migration import cli\n'), ((651, 20, 654, 21), 'alembic.operations.ops.CreateUniqueConstraintOp', 'alembic_ops.CreateUniqueConstraintOp', ({(652, 24, 652, 37): '"""uq_user_org"""', (652, 39, 652, 45): '"""user"""', (653, 24, 653, 58): "['user_name', 'organization_name']"}, {}), "('uq_user_org', 'user', ['user_name',\n 'organization_name'])", True, 'from alembic.operations import ops as alembic_ops\n'), ((662, 20, 662, 59), 'alembic.operations.ops.DropTableOp', 'alembic_ops.DropTableOp', ({(662, 44, 662, 58): '"""organization"""'}, {}), "('organization')", True, 'from alembic.operations import ops as alembic_ops\n'), ((649, 49, 649, 62), 'sqlalchemy.String', 'sa.String', ({(649, 59, 649, 61): '50'}, {}), '(50)', True, 'import sqlalchemy as sa\n'), ((619, 44, 619, 56), 'sqlalchemy.Integer', 'sa.Integer', ({}, {}), '()', True, 'import sqlalchemy as sa\n'), ((620, 46, 620, 59), 'sqlalchemy.String', 'sa.String', ({(620, 56, 620, 58): '50'}, {}), '(50)', True, 'import sqlalchemy as sa\n'), ((630, 28, 633, 29), 'alembic.operations.ops.CreateForeignKeyOp', 'alembic_ops.CreateForeignKeyOp', ({(631, 32, 631, 40): '"""org_fk"""', (631, 42, 631, 48): '"""user"""', (631, 50, 631, 64): '"""organization"""', (632, 32, 632, 51): "['organization_id']", (632, 53, 632, 59): "['id']"}, {}), "('org_fk', 'user', 'organization', [\n 'organization_id'], ['id'])", True, 'from alembic.operations import ops as alembic_ops\n'), ((634, 28, 636, 29), 'alembic.operations.ops.DropConstraintOp', 'alembic_ops.DropConstraintOp', ({(635, 32, 635, 38): '"""user"""', (635, 40, 635, 53): '"""uq_user_org"""'}, {}), "('user', 'uq_user_org')", True, 'from alembic.operations import ops as alembic_ops\n'), ((637, 28, 639, 29), 'alembic.operations.ops.DropColumnOp', 'alembic_ops.DropColumnOp', ({(638, 32, 638, 38): '"""user"""', (638, 40, 638, 59): '"""organization_name"""'}, {}), "('user', 'organization_name')", True, 'from alembic.operations import ops as alembic_ops\n'), ((658, 28, 658, 74), 'alembic.operations.ops.DropConstraintOp', 'alembic_ops.DropConstraintOp', ({(658, 57, 658, 65): '"""org_fk"""', (658, 67, 658, 73): '"""user"""'}, {}), "('org_fk', 'user')", True, 'from alembic.operations import ops as alembic_ops\n'), ((659, 28, 659, 79), 'alembic.operations.ops.DropColumnOp', 'alembic_ops.DropColumnOp', ({(659, 53, 659, 59): '"""user"""', (659, 61, 659, 78): '"""organization_id"""'}, {}), "('user', 'organization_id')", True, 'from alembic.operations import ops as alembic_ops\n'), ((628, 61, 628, 73), 'sqlalchemy.Integer', 'sa.Integer', ({}, {}), '()', True, 'import sqlalchemy as sa\n')] |
hoostus/prime-harvesting | withdrawal/floor_ceiling.py | 6606b94ea7859fbf217dbea4ace856e3fa4d154e | from decimal import Decimal
from .abc import WithdrawalStrategy
# Bengen's Floor-to-Ceiling, as described in McClung's Living Off Your Money
class FloorCeiling(WithdrawalStrategy):
def __init__(self, portfolio, harvest_strategy, rate=.05, floor=.9, ceiling=1.25):
super().__init__(portfolio, harvest_strategy)
self.floor = Decimal(floor)
self.ceiling = Decimal(ceiling)
self.rate = Decimal(rate)
def start(self):
amount = self.rate * self.portfolio.value
self.initial_amount = amount
return amount
def next(self):
amount = self.rate * self.portfolio.value
initial_amount_inflation_adjusted = self.initial_amount * self.cumulative_inflation
floor = initial_amount_inflation_adjusted * self.floor
ceiling = initial_amount_inflation_adjusted * self.ceiling
amount = max(amount, floor)
amount = min(amount, ceiling)
return amount
| [((10, 21, 10, 35), 'decimal.Decimal', 'Decimal', ({(10, 29, 10, 34): 'floor'}, {}), '(floor)', False, 'from decimal import Decimal\n'), ((11, 23, 11, 39), 'decimal.Decimal', 'Decimal', ({(11, 31, 11, 38): 'ceiling'}, {}), '(ceiling)', False, 'from decimal import Decimal\n'), ((12, 20, 12, 33), 'decimal.Decimal', 'Decimal', ({(12, 28, 12, 32): 'rate'}, {}), '(rate)', False, 'from decimal import Decimal\n')] |
rcolistete/MicroPython_MiniCurso_ProjOrientado | 20190426/6_BME280_WiFi/bme280.py | c82affe833587141c4c05ee08ea84b095bfe845f | """
MicroPython driver for Bosh BME280 temperature, pressure and humidity I2C sensor:
https://www.bosch-sensortec.com/bst/products/all_products/bme280
Authors: Nelio Goncalves Godoi, Roberto Colistete Jr
Version: 3.1.2 @ 2018/04
License: MIT License (https://opensource.org/licenses/MIT)
"""
import time
from ustruct import unpack, unpack_from
from array import array
# BME280 default address
BME280_I2CADDR = 0x76
# BME280_I2CADDR = 0x77
OSAMPLE_0 = 0
OSAMPLE_1 = 1
OSAMPLE_2 = 2
OSAMPLE_4 = 3
OSAMPLE_8 = 4
OSAMPLE_16 = 5
BME280_REGISTER_STATUS = 0xF3
BME280_REGISTER_CONTROL_HUM = 0xF2
BME280_REGISTER_CONTROL = 0xF4
BME280_REGISTER_CONTROL_IIR = 0xF5
FILTER_OFF = 0
FILTER_2 = 1
FILTER_4 = 2
FILTER_8 = 3
FILTER_16 = 4
CELSIUS = 'C'
FAHRENHEIT = 'F'
KELVIN = 'K'
class BME280(object):
def __init__(self,
temperature_mode=OSAMPLE_2,
pressure_mode=OSAMPLE_16,
humidity_mode=OSAMPLE_1,
temperature_scale=CELSIUS,
iir=FILTER_16,
address=BME280_I2CADDR,
i2c=None):
osamples = [
OSAMPLE_0,
OSAMPLE_1,
OSAMPLE_2,
OSAMPLE_4,
OSAMPLE_8,
OSAMPLE_16]
msg_error = 'Unexpected {} operating mode value {0}.'
if temperature_mode not in osamples:
raise ValueError(msg_error.format("temperature", temperature_mode))
self.temperature_mode = temperature_mode
if pressure_mode not in osamples:
raise ValueError(msg_error.format("pressure", pressure_mode))
self.pressure_mode = pressure_mode
if humidity_mode not in osamples:
raise ValueError(msg_error.format("humidity", humidity_mode))
self.humidity_mode = humidity_mode
msg_error = 'Unexpected low pass IIR filter setting value {0}.'
if iir not in [FILTER_OFF, FILTER_2, FILTER_4, FILTER_8, FILTER_16]:
raise ValueError(msg_error.format(iir))
self.iir = iir
msg_error = 'Unexpected temperature scale value {0}.'
if temperature_scale not in [CELSIUS, FAHRENHEIT, KELVIN]:
raise ValueError(msg_error.format(temperature_scale))
self.temperature_scale = temperature_scale
del msg_error
self.address = address
if i2c is None:
raise ValueError('An I2C object is required.')
self.i2c = i2c
dig_88_a1 = self.i2c.readfrom_mem(self.address, 0x88, 26)
dig_e1_e7 = self.i2c.readfrom_mem(self.address, 0xE1, 7)
self.dig_T1, self.dig_T2, self.dig_T3, self.dig_P1, \
self.dig_P2, self.dig_P3, self.dig_P4, self.dig_P5, \
self.dig_P6, self.dig_P7, self.dig_P8, self.dig_P9, \
_, self.dig_H1 = unpack("<HhhHhhhhhhhhBB", dig_88_a1)
self.dig_H2, self.dig_H3 = unpack("<hB", dig_e1_e7)
e4_sign = unpack_from("<b", dig_e1_e7, 3)[0]
self.dig_H4 = (e4_sign << 4) | (dig_e1_e7[4] & 0xF)
e6_sign = unpack_from("<b", dig_e1_e7, 5)[0]
self.dig_H5 = (e6_sign << 4) | (dig_e1_e7[4] >> 4)
self.dig_H6 = unpack_from("<b", dig_e1_e7, 6)[0]
self.i2c.writeto_mem(
self.address,
BME280_REGISTER_CONTROL,
bytearray([0x24]))
time.sleep(0.002)
self.t_fine = 0
self._l1_barray = bytearray(1)
self._l8_barray = bytearray(8)
self._l3_resultarray = array("i", [0, 0, 0])
self._l1_barray[0] = self.iir << 2
self.i2c.writeto_mem(
self.address,
BME280_REGISTER_CONTROL_IIR,
self._l1_barray)
time.sleep(0.002)
self._l1_barray[0] = self.humidity_mode
self.i2c.writeto_mem(
self.address,
BME280_REGISTER_CONTROL_HUM,
self._l1_barray)
def read_raw_data(self, result):
self._l1_barray[0] = (
self.pressure_mode << 5 |
self.temperature_mode << 2 | 1)
self.i2c.writeto_mem(
self.address,
BME280_REGISTER_CONTROL,
self._l1_barray)
osamples_1_16 = [
OSAMPLE_1,
OSAMPLE_2,
OSAMPLE_4,
OSAMPLE_8,
OSAMPLE_16]
sleep_time = 1250
if self.temperature_mode in osamples_1_16:
sleep_time += 2300*(1 << self.temperature_mode)
if self.pressure_mode in osamples_1_16:
sleep_time += 575 + (2300*(1 << self.pressure_mode))
if self.humidity_mode in osamples_1_16:
sleep_time += 575 + (2300*(1 << self.humidity_mode))
time.sleep_us(sleep_time)
while (unpack('<H',
self.i2c.readfrom_mem(
self.address,
BME280_REGISTER_STATUS, 2))[0] & 0x08):
time.sleep(0.001)
self.i2c.readfrom_mem_into(self.address, 0xF7, self._l8_barray)
readout = self._l8_barray
raw_press = ((readout[0] << 16) | (readout[1] << 8) | readout[2]) >> 4
raw_temp = ((readout[3] << 16) | (readout[4] << 8) | readout[5]) >> 4
raw_hum = (readout[6] << 8) | readout[7]
result[0] = raw_temp
result[1] = raw_press
result[2] = raw_hum
def read_compensated_data(self, result=None):
""" Get raw data and compensa the same """
self.read_raw_data(self._l3_resultarray)
raw_temp, raw_press, raw_hum = self._l3_resultarray
var1 = ((raw_temp >> 3) - (self.dig_T1 << 1)) * (self.dig_T2 >> 11)
var2 = (raw_temp >> 4) - self.dig_T1
var2 = var2 * ((raw_temp >> 4) - self.dig_T1)
var2 = ((var2 >> 12) * self.dig_T3) >> 14
self.t_fine = var1 + var2
temp = (self.t_fine * 5 + 128) >> 8
var1 = self.t_fine - 128000
var2 = var1 * var1 * self.dig_P6
var2 = var2 + ((var1 * self.dig_P5) << 17)
var2 = var2 + (self.dig_P4 << 35)
var1 = (((var1 * var1 * self.dig_P3) >> 8) +
((var1 * self.dig_P2) << 12))
var1 = (((1 << 47) + var1) * self.dig_P1) >> 33
if var1 == 0:
pressure = 0
else:
p = 1048576 - raw_press
p = (((p << 31) - var2) * 3125) // var1
var1 = (self.dig_P9 * (p >> 13) * (p >> 13)) >> 25
var2 = (self.dig_P8 * p) >> 19
pressure = ((p + var1 + var2) >> 8) + (self.dig_P7 << 4)
h = self.t_fine - 76800
h = (((((raw_hum << 14) - (self.dig_H4 << 20) -
(self.dig_H5 * h)) + 16384)
>> 15) * (((((((h * self.dig_H6) >> 10) *
(((h * self.dig_H3) >> 11) + 32768)) >> 10) +
2097152) * self.dig_H2 + 8192) >> 14))
h = h - (((((h >> 15) * (h >> 15)) >> 7) * self.dig_H1) >> 4)
h = 0 if h < 0 else h
h = 419430400 if h > 419430400 else h
humidity = h >> 12
if result:
result[0] = temp
result[1] = pressure
result[2] = humidity
return result
return array("i", (temp, pressure, humidity))
@property
def values(self):
temp, pres, humi = self.read_compensated_data()
temp = temp/100
if self.temperature_scale == 'F':
temp = 32 + (temp*1.8)
elif self.temperature_scale == 'K':
temp = temp + 273.15
pres = pres/256
humi = humi/1024
return (temp, pres, humi)
@property
def formated_values(self):
t, p, h = self.values
temp = "{} "+self.temperature_scale
return (temp.format(t), "{} Pa".format(p), "{} %".format(h))
@property
def temperature(self):
t, _, _ = self.values
return t
@property
def pressure(self):
_, p, _ = self.values
return p
@property
def pressure_precision(self):
_, p, _ = self.read_compensated_data()
pi = float(p // 256)
pd = (p % 256)/256
return (pi, pd)
@property
def humidity(self):
_, _, h = self.values
return h
def altitude(self, pressure_sea_level=1013.25):
pi, pd = self.pressure_precision()
return 44330*(1-((float(pi+pd)/100)/pressure_sea_level)**(1/5.255))
| [((86, 29, 86, 65), 'ustruct.unpack', 'unpack', ({(86, 36, 86, 53): '"""<HhhHhhhhhhhhBB"""', (86, 55, 86, 64): 'dig_88_a1'}, {}), "('<HhhHhhhhhhhhBB', dig_88_a1)", False, 'from ustruct import unpack, unpack_from\n'), ((87, 35, 87, 59), 'ustruct.unpack', 'unpack', ({(87, 42, 87, 47): '"""<hB"""', (87, 49, 87, 58): 'dig_e1_e7'}, {}), "('<hB', dig_e1_e7)", False, 'from ustruct import unpack, unpack_from\n'), ((97, 8, 97, 25), 'time.sleep', 'time.sleep', ({(97, 19, 97, 24): '(0.002)'}, {}), '(0.002)', False, 'import time\n'), ((101, 31, 101, 52), 'array.array', 'array', ({(101, 37, 101, 40): '"""i"""', (101, 42, 101, 51): '[0, 0, 0]'}, {}), "('i', [0, 0, 0])", False, 'from array import array\n'), ((107, 8, 107, 25), 'time.sleep', 'time.sleep', ({(107, 19, 107, 24): '(0.002)'}, {}), '(0.002)', False, 'import time\n'), ((138, 8, 138, 33), 'time.sleep_us', 'time.sleep_us', ({(138, 22, 138, 32): 'sleep_time'}, {}), '(sleep_time)', False, 'import time\n'), ((200, 15, 200, 53), 'array.array', 'array', ({(200, 21, 200, 24): '"""i"""', (200, 26, 200, 52): '(temp, pressure, humidity)'}, {}), "('i', (temp, pressure, humidity))", False, 'from array import array\n'), ((88, 18, 88, 49), 'ustruct.unpack_from', 'unpack_from', ({(88, 30, 88, 34): '"""<b"""', (88, 36, 88, 45): 'dig_e1_e7', (88, 47, 88, 48): '(3)'}, {}), "('<b', dig_e1_e7, 3)", False, 'from ustruct import unpack, unpack_from\n'), ((90, 18, 90, 49), 'ustruct.unpack_from', 'unpack_from', ({(90, 30, 90, 34): '"""<b"""', (90, 36, 90, 45): 'dig_e1_e7', (90, 47, 90, 48): '(5)'}, {}), "('<b', dig_e1_e7, 5)", False, 'from ustruct import unpack, unpack_from\n'), ((92, 22, 92, 53), 'ustruct.unpack_from', 'unpack_from', ({(92, 34, 92, 38): '"""<b"""', (92, 40, 92, 49): 'dig_e1_e7', (92, 51, 92, 52): '(6)'}, {}), "('<b', dig_e1_e7, 6)", False, 'from ustruct import unpack, unpack_from\n'), ((143, 12, 143, 29), 'time.sleep', 'time.sleep', ({(143, 23, 143, 28): '(0.001)'}, {}), '(0.001)', False, 'import time\n')] |
colpal/airfloss | airflow/contrib/secrets/hashicorp_vault.py | 1857cf309b69d4c2d60e9bb67f731eb01d0ecda1 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Objects relating to sourcing connections & variables from Hashicorp Vault
"""
from typing import Optional
import hvac
from cached_property import cached_property
from hvac.exceptions import InvalidPath, VaultError
from airflow.exceptions import AirflowException
from airflow.secrets import BaseSecretsBackend
from airflow.utils.log.logging_mixin import LoggingMixin
class VaultBackend(BaseSecretsBackend, LoggingMixin):
"""
Retrieves Connections and Variables from Hashicorp Vault
Configurable via ``airflow.cfg`` as follows:
.. code-block:: ini
[secrets]
backend = airflow.contrib.secrets.hashicorp_vault.VaultBackend
backend_kwargs = {
"connections_path": "connections",
"url": "http://127.0.0.1:8200",
"mount_point": "airflow"
}
For example, if your keys are under ``connections`` path in ``airflow`` mount_point, this
would be accessible if you provide ``{"connections_path": "connections"}`` and request
conn_id ``smtp_default``.
:param connections_path: Specifies the path of the secret to read to get Connections.
(default: 'connections')
:type connections_path: str
:param variables_path: Specifies the path of the secret to read to get Variables.
(default: 'variables')
:type variables_path: str
:param config_path: Specifies the path of the secret to read Airflow Configurations
(default: 'configs').
:type config_path: str
:param url: Base URL for the Vault instance being addressed.
:type url: str
:param auth_type: Authentication Type for Vault (one of 'token', 'ldap', 'userpass', 'approle',
'github', 'gcp', 'kubernetes'). Default is ``token``.
:type auth_type: str
:param mount_point: The "path" the secret engine was mounted on. (Default: ``secret``)
:type mount_point: str
:param token: Authentication token to include in requests sent to Vault.
(for ``token`` and ``github`` auth_type)
:type token: str
:param kv_engine_version: Select the version of the engine to run (``1`` or ``2``, default: ``2``)
:type kv_engine_version: int
:param username: Username for Authentication (for ``ldap`` and ``userpass`` auth_type)
:type username: str
:param password: Password for Authentication (for ``ldap`` and ``userpass`` auth_type)
:type password: str
:param role_id: Role ID for Authentication (for ``approle`` auth_type)
:type role_id: str
:param kubernetes_role: Role for Authentication (for ``kubernetes`` auth_type)
:type kubernetes_role: str
:param kubernetes_jwt_path: Path for kubernetes jwt token (for ``kubernetes`` auth_type, deafult:
``/var/run/secrets/kubernetes.io/serviceaccount/token``)
:type kubernetes_jwt_path: str
:param secret_id: Secret ID for Authentication (for ``approle`` auth_type)
:type secret_id: str
:param gcp_key_path: Path to GCP Credential JSON file (for ``gcp`` auth_type)
:type gcp_key_path: str
:param gcp_scopes: Comma-separated string containing GCP scopes (for ``gcp`` auth_type)
:type gcp_scopes: str
"""
def __init__( # pylint: disable=too-many-arguments
self,
connections_path='connections', # type: str
variables_path='variables', # type: str
config_path='config', # type: str
url=None, # type: Optional[str]
auth_type='token', # type: str
mount_point='secret', # type: str
kv_engine_version=2, # type: int
token=None, # type: Optional[str]
username=None, # type: Optional[str]
password=None, # type: Optional[str]
role_id=None, # type: Optional[str]
kubernetes_role=None, # type: Optional[str]
kubernetes_jwt_path='/var/run/secrets/kubernetes.io/serviceaccount/token', # type: str
secret_id=None, # type: Optional[str]
gcp_key_path=None, # type: Optional[str]
gcp_scopes=None, # type: Optional[str]
**kwargs
):
super(VaultBackend, self).__init__()
self.connections_path = connections_path.rstrip('/')
if variables_path != None:
self.variables_path = variables_path.rstrip('/')
else:
self.variables_path = variables_path
self.config_path = config_path.rstrip('/')
self.url = url
self.auth_type = auth_type
self.kwargs = kwargs
self.token = token
self.username = username
self.password = password
self.role_id = role_id
self.kubernetes_role = kubernetes_role
self.kubernetes_jwt_path = kubernetes_jwt_path
self.secret_id = secret_id
self.mount_point = mount_point
self.kv_engine_version = kv_engine_version
self.gcp_key_path = gcp_key_path
self.gcp_scopes = gcp_scopes
@cached_property
def client(self):
# type: () -> hvac.Client
"""
Return an authenticated Hashicorp Vault client
"""
_client = hvac.Client(url=self.url, **self.kwargs)
if self.auth_type == "token":
if not self.token:
raise VaultError("token cannot be None for auth_type='token'")
_client.token = self.token
elif self.auth_type == "ldap":
_client.auth.ldap.login(
username=self.username, password=self.password)
elif self.auth_type == "userpass":
_client.auth_userpass(username=self.username, password=self.password)
elif self.auth_type == "approle":
_client.auth_approle(role_id=self.role_id, secret_id=self.secret_id)
elif self.auth_type == "kubernetes":
if not self.kubernetes_role:
raise VaultError("kubernetes_role cannot be None for auth_type='kubernetes'")
with open(self.kubernetes_jwt_path) as f:
jwt = f.read()
_client.auth_kubernetes(role=self.kubernetes_role, jwt=jwt)
elif self.auth_type == "github":
_client.auth.github.login(token=self.token)
elif self.auth_type == "gcp":
from airflow.contrib.utils.gcp_credentials_provider import (
get_credentials_and_project_id,
_get_scopes
)
scopes = _get_scopes(self.gcp_scopes)
credentials, _ = get_credentials_and_project_id(key_path=self.gcp_key_path, scopes=scopes)
_client.auth.gcp.configure(credentials=credentials)
else:
raise AirflowException("Authentication type '{}' not supported".format(self.auth_type))
if _client.is_authenticated():
return _client
else:
raise VaultError("Vault Authentication Error!")
def get_conn_uri(self, conn_id):
# type: (str) -> Optional[str]
"""
Get secret value from Vault. Store the secret in the form of URI
:param conn_id: connection id
:type conn_id: str
"""
response = self._get_secret(self.connections_path, conn_id)
return response.get("conn_uri") if response else None
def get_variable(self, key):
# type: (str) -> Optional[str]
"""
Get Airflow Variable
:param key: Variable Key
:return: Variable Value
"""
if self.variables_path == None:
return None
else:
response = self._get_secret(self.variables_path, key)
return response.get("value") if response else None
def _get_secret(self, path_prefix, secret_id):
# type: (str, str) -> Optional[dict]
"""
Get secret value from Vault.
:param path_prefix: Prefix for the Path to get Secret
:type path_prefix: str
:param secret_id: Secret Key
:type secret_id: str
"""
secret_path = self.build_path(path_prefix, secret_id)
try:
if self.kv_engine_version == 1:
response = self.client.secrets.kv.v1.read_secret(
path=secret_path, mount_point=self.mount_point
)
else:
response = self.client.secrets.kv.v2.read_secret_version(
path=secret_path, mount_point=self.mount_point)
except InvalidPath:
self.log.info("Secret %s not found in Path: %s", secret_id, secret_path)
return None
return_data = response["data"] if self.kv_engine_version == 1 else response["data"]["data"]
return return_data
def get_config(self, key):
# type: (str) -> Optional[str]
"""
Get Airflow Configuration
:param key: Configuration Option Key
:type key: str
:rtype: str
:return: Configuration Option Value retrieved from the vault
"""
response = self._get_secret(self.config_path, key)
return response.get("value") if response else None
| [((140, 18, 140, 58), 'hvac.Client', 'hvac.Client', (), '', False, 'import hvac\n'), ((174, 18, 174, 59), 'hvac.exceptions.VaultError', 'VaultError', ({(174, 29, 174, 58): '"""Vault Authentication Error!"""'}, {}), "('Vault Authentication Error!')", False, 'from hvac.exceptions import InvalidPath, VaultError\n'), ((143, 22, 143, 78), 'hvac.exceptions.VaultError', 'VaultError', ({(143, 33, 143, 77): '"""token cannot be None for auth_type=\'token\'"""'}, {}), '("token cannot be None for auth_type=\'token\'")', False, 'from hvac.exceptions import InvalidPath, VaultError\n'), ((154, 22, 154, 93), 'hvac.exceptions.VaultError', 'VaultError', ({(154, 33, 154, 92): '"""kubernetes_role cannot be None for auth_type=\'kubernetes\'"""'}, {}), '("kubernetes_role cannot be None for auth_type=\'kubernetes\'")', False, 'from hvac.exceptions import InvalidPath, VaultError\n'), ((165, 21, 165, 49), 'airflow.contrib.utils.gcp_credentials_provider._get_scopes', '_get_scopes', ({(165, 33, 165, 48): 'self.gcp_scopes'}, {}), '(self.gcp_scopes)', False, 'from airflow.contrib.utils.gcp_credentials_provider import get_credentials_and_project_id, _get_scopes\n'), ((166, 29, 166, 102), 'airflow.contrib.utils.gcp_credentials_provider.get_credentials_and_project_id', 'get_credentials_and_project_id', (), '', False, 'from airflow.contrib.utils.gcp_credentials_provider import get_credentials_and_project_id, _get_scopes\n')] |
AdamCoscia/eve-trajectory-mining | Trajectory_Mining/Bag_of_Words/Comp_Corr_KD_CosDist/comp_dist_partialKD.py | 134f142a5665f66fbf92aada8dd6252fab64ddff | # -*- coding: utf-8 -*-
"""Computes distance between killmails by text similarity.
Edit Distance Metrics
- Levenshtein Distance
- Damerau-Levenshtein Distance
- Jaro Distance
- Jaro-Winkler Distance
- Match Rating Approach Comparison
- Hamming Distance
Vector Distance Metrics
- Jaccard Similarity
- Cosine Distance
Written By: Adam Coscia
Updated On: 11/09/2019
"""
# Start timing
import time
start = time.time()
total = 0
def lap(msg):
"""Records time elapsed."""
global start, total
elapsed = (time.time() - start) - total
total = time.time() - start
if elapsed > 3600:
print(f'(+{elapsed/3600:.2f}h|t:{total/3600:.2f}h) {msg}')
elif elapsed > 60:
if total > 3600:
print(f'(+{elapsed/60:.2f}m|t:{total/3600:.2f}h) {msg}')
else:
print(f'(+{elapsed/60:.2f}m|t:{total/60:.2f}m) {msg}')
else:
if total > 3600:
print(f'(+{elapsed:.3f}s|t:{total/3600:.2f}h) {msg}')
elif total > 60:
print(f'(+{elapsed:.3f}s|t:{total/60:.2f}m) {msg}')
else:
print(f'(+{elapsed:.3f}s|t:{total:.3f}s) {msg}')
lap("Importing modules...")
from ast import literal_eval
from functools import reduce
import os
import sys
import numpy as np
import pandas as pd
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics.pairwise import linear_kernel
def get_long_text_cosine_distance(los1, los2):
"""Calculates cosine distance between two killmails' item lists.
1. Converts collection of long text items to raw document representation.
2. Converts the collection of raw documents to a matrix of TF-IDF features
using TfidfVectorizer (combines vector counting and TF-IDF calculator).
3. Computes cosine similarity between feature vectors. Uses linear kernel
since TF-IDF matrix will be normalized already.
Arguments:
los1: First document, a list of raw strings.
los2: Second document, a list of raw strings.
Returns:
cosine distance as a value between 0-1, with 1 being identical.
"""
if type(los1) == float or type(los2) == float:
return 0
if len(los1) == 0 or len(los2) == 0:
return 0
doc1 = reduce(lambda x, y: f'{x} {y}', [x[0] for x in los1]) # Create bag of words
doc2 = reduce(lambda x, y: f'{x} {y}', [x[0] for x in los2]) # Create bag of words
tfidf = TfidfVectorizer().fit_transform([doc1, doc2]) # Vectorize the bag of words
cos_dist = linear_kernel(tfidf[0:1], tfidf[1:2]).flatten()[0] # Compute cosine distance
return cos_dist
def get_short_text_cosine_distance(los1, los2):
"""Calculates cosine distance between two killmails' item lists.
1. Converts collection of short text items to raw document representation.
2. Converts the collection of raw documents to a matrix of TF-IDF features
using TfidfVectorizer (combines vector counting and TF-IDF calculator).
3. Computes cosine similarity between feature vectors. Uses linear kernel
since TF-IDF matrix will be normalized already.
Arguments:
los1: First document, a list of raw strings.
los2: Second document, a list of raw strings.
Returns:
cosine distance as a value between 0-1, with 1 being identical and 0
being complete different.
"""
if type(los1) == float or type(los2) == float:
return 0
if len(los1) == 0 or len(los2) == 0:
return 0
doc1 = reduce(lambda x, y: f'{x} {y}', [x[1] for x in los1]) # Create bag of words
doc2 = reduce(lambda x, y: f'{x} {y}', [x[1] for x in los2]) # Create bag of words
tfidf = TfidfVectorizer().fit_transform([doc1, doc2]) # Vectorize the bag of words
cos_dist = linear_kernel(tfidf[0:1], tfidf[1:2]).flatten()[0] # Compute cosine distance
return cos_dist
# Load CSV from local file
lap("Loading CSV data from local file...")
df = pd.read_csv(f'data/all_victims_complete_partialKD.csv', encoding='utf-8')
df = df.drop(columns=['HighSlotISK', 'MidSlotISK', 'LowSlotISK', 'type', 'fill'])
df = df.dropna()
# Convert items column to correct data type
lap("Converting 'item' column value types...")
df['items'] = df['items'].apply(literal_eval)
# Group DataFrame by character_id and compute distance series for each group
lap("Computing cosine distances and change in kd by grouping character_id's...")
groupby = df.groupby('character_id') # group dataframe by character_id
num_groups = len(groupby) # get number of groups
count = 0 # current group number out of number of groups
groups = [] # list to append modified group dataframes to
for name, gp in groupby:
# Order the observations and prepare the dataframe
gp = (gp.sort_values(by=['killmail_id'])
.reset_index()
.drop('index', axis=1))
# Generate change in kills over change in deaths and change in kd ratio
kills1 = gp['k_count']
kills2 = gp['k_count'].shift()
deaths1 = gp['d_count']
deaths2 = gp['d_count'].shift()
idx = len(gp.columns)
gp.insert(idx, 'del_kdratio', (kills2 - kills1) / (deaths2 - deaths1))
gp.insert(idx+1, 'kd_ratio_diff', gp['kd_ratio']-gp['kd_ratio'].shift())
# Generate pairs of observations sequentially to compare
pairs = []
items1 = gp['items']
items2 = gp['items'].shift()
for i in range(1, len(gp)): # Start from 1 to avoid adding nan pair
los1 = items1.iloc[i]
los2 = items2.iloc[i]
pairs.append((los2, los1))
# Generate distance series using pairs list and different metrics
# start distance series with nan due to starting range at 1
cos_dist_lt = [np.nan] # cosine distance b/w long text BoW
cos_dist_st = [np.nan] # cosine distance b/w short text BoW
for pair in pairs:
cos_dist_lt.append(get_long_text_cosine_distance(pair[0], pair[1]))
cos_dist_st.append(get_short_text_cosine_distance(pair[0], pair[1]))
idx = len(gp.columns)
gp.insert(idx, 'cos_dist_lt', cos_dist_lt)
gp.insert(idx, 'cos_dist_st', cos_dist_st)
groups.append(gp)
# Record progress
count += 1
print(f"Progress {count/num_groups:2.1%}", end="\r")
lap("Concatenating resulting groups and writing to file...")
df_res = pd.concat(groups)
df_res.to_csv(f'data/useable_victims_distancesAndKD.csv')
lap("Exit")
| [((22, 8, 22, 19), 'time.time', 'time.time', ({}, {}), '()', False, 'import time\n'), ((114, 5, 114, 78), 'pandas.read_csv', 'pd.read_csv', (), '', True, 'import pandas as pd\n'), ((165, 9, 165, 26), 'pandas.concat', 'pd.concat', ({(165, 19, 165, 25): 'groups'}, {}), '(groups)', True, 'import pandas as pd\n'), ((78, 11, 78, 64), 'functools.reduce', 'reduce', ({(78, 18, 78, 41): "lambda x, y: f'{x} {y}'", (78, 43, 78, 63): '[x[0] for x in los1]'}, {}), "(lambda x, y: f'{x} {y}', [x[0] for x in los1])", False, 'from functools import reduce\n'), ((79, 11, 79, 64), 'functools.reduce', 'reduce', ({(79, 18, 79, 41): "lambda x, y: f'{x} {y}'", (79, 43, 79, 63): '[x[0] for x in los2]'}, {}), "(lambda x, y: f'{x} {y}', [x[0] for x in los2])", False, 'from functools import reduce\n'), ((105, 11, 105, 64), 'functools.reduce', 'reduce', ({(105, 18, 105, 41): "lambda x, y: f'{x} {y}'", (105, 43, 105, 63): '[x[1] for x in los1]'}, {}), "(lambda x, y: f'{x} {y}', [x[1] for x in los1])", False, 'from functools import reduce\n'), ((106, 11, 106, 64), 'functools.reduce', 'reduce', ({(106, 18, 106, 41): "lambda x, y: f'{x} {y}'", (106, 43, 106, 63): '[x[1] for x in los2]'}, {}), "(lambda x, y: f'{x} {y}', [x[1] for x in los2])", False, 'from functools import reduce\n'), ((29, 12, 29, 23), 'time.time', 'time.time', ({}, {}), '()', False, 'import time\n'), ((28, 15, 28, 26), 'time.time', 'time.time', ({}, {}), '()', False, 'import time\n'), ((80, 12, 80, 29), 'sklearn.feature_extraction.text.TfidfVectorizer', 'TfidfVectorizer', ({}, {}), '()', False, 'from sklearn.feature_extraction.text import TfidfVectorizer\n'), ((107, 12, 107, 29), 'sklearn.feature_extraction.text.TfidfVectorizer', 'TfidfVectorizer', ({}, {}), '()', False, 'from sklearn.feature_extraction.text import TfidfVectorizer\n'), ((81, 15, 81, 52), 'sklearn.metrics.pairwise.linear_kernel', 'linear_kernel', ({(81, 29, 81, 39): 'tfidf[0:1]', (81, 41, 81, 51): 'tfidf[1:2]'}, {}), '(tfidf[0:1], tfidf[1:2])', False, 'from sklearn.metrics.pairwise import linear_kernel\n'), ((108, 15, 108, 52), 'sklearn.metrics.pairwise.linear_kernel', 'linear_kernel', ({(108, 29, 108, 39): 'tfidf[0:1]', (108, 41, 108, 51): 'tfidf[1:2]'}, {}), '(tfidf[0:1], tfidf[1:2])', False, 'from sklearn.metrics.pairwise import linear_kernel\n')] |
Dalkio/custom-alphazero | src/chess/utils.py | e24ee8c646a37bf9509b99ca6c96d3f6e69ee4db | import numpy as np
from itertools import product
from typing import List
from src.config import ConfigChess
from src.chess.board import Board
from src.chess.move import Move
def get_all_possible_moves() -> List[Move]:
all_possible_moves = set()
array = np.zeros((ConfigChess.board_size, ConfigChess.board_size)).astype("int8")
for i, j, piece in product(
range(ConfigChess.board_size), range(ConfigChess.board_size), ["Q", "N"]
):
array[i][j] = Board.piece_symbol_to_int(piece)
all_possible_moves.update(
set(map(lambda move: Move(uci=move.uci()), Board(array=array).legal_moves))
)
array[i][j] = 0
# underpromotion moves
array[1, :] = Board.piece_symbol_to_int("P")
all_possible_moves.update(
set(map(lambda move: Move(uci=move.uci()), Board(array=array).legal_moves))
)
array[0, :] = Board.piece_symbol_to_int("p")
all_possible_moves.update(
set(map(lambda move: Move(uci=move.uci()), Board(array=array).legal_moves))
)
# no need to add castling moves: they have already be added with queen moves under UCI notation
return sorted(list(all_possible_moves))
| [((22, 18, 22, 48), 'src.chess.board.Board.piece_symbol_to_int', 'Board.piece_symbol_to_int', ({(22, 44, 22, 47): '"""P"""'}, {}), "('P')", False, 'from src.chess.board import Board\n'), ((26, 18, 26, 48), 'src.chess.board.Board.piece_symbol_to_int', 'Board.piece_symbol_to_int', ({(26, 44, 26, 47): '"""p"""'}, {}), "('p')", False, 'from src.chess.board import Board\n'), ((16, 22, 16, 54), 'src.chess.board.Board.piece_symbol_to_int', 'Board.piece_symbol_to_int', ({(16, 48, 16, 53): 'piece'}, {}), '(piece)', False, 'from src.chess.board import Board\n'), ((12, 12, 12, 70), 'numpy.zeros', 'np.zeros', ({(12, 21, 12, 69): '(ConfigChess.board_size, ConfigChess.board_size)'}, {}), '((ConfigChess.board_size, ConfigChess.board_size))', True, 'import numpy as np\n'), ((24, 51, 24, 69), 'src.chess.board.Board', 'Board', (), '', False, 'from src.chess.board import Board\n'), ((28, 51, 28, 69), 'src.chess.board.Board', 'Board', (), '', False, 'from src.chess.board import Board\n'), ((18, 55, 18, 73), 'src.chess.board.Board', 'Board', (), '', False, 'from src.chess.board import Board\n')] |
christymarc/mfac | multirotor.py | 29449a0c79e618059fa6f67ae7ab76711543c513 | from random import gauss
class MultiRotor:
"""Simple vertical dynamics for a multirotor vehicle."""
GRAVITY = -9.81
def __init__(
self, altitude=10, velocity=0, mass=1.54, emc=10.0, dt=0.05, noise=0.1
):
"""
Args:
altitude (float): initial altitude of the vehicle
velocity (float): initial velocity of the vehicle
mass (float): mass of the vehicle
emc (float): electromechanical constant for the vehicle
dt (float): simulation time step
noise (float): standard deviation of normally distributed simulation noise
"""
self.y0 = altitude
self.y1 = velocity
self.mass = mass
self.emc = emc
self.dt = dt
self.noise = noise
def step(self, effort):
"""Advance the multirotor simulation and apply motor forces.
Args:
effort (float): related to the upward thrust of the vehicle,
it must be >= 0
Return:
The current state (altitude, velocity) of the vehicle.
"""
effort = max(0, effort)
scaled_effort = self.emc / self.mass * effort
net_acceleration = MultiRotor.GRAVITY - 0.75 * self.y1 + scaled_effort
# Don't let the vehcicle fall through the ground
if self.y0 <= 0 and net_acceleration < 0:
y0dot = 0
y1dot = 0
else:
y0dot = self.y1
y1dot = net_acceleration
self.y0 += y0dot * self.dt
self.y1 += y1dot * self.dt
self.y0 += gauss(0, self.noise)
return self.y0, self.y1
def get_altitude(self):
"""Return the current altitude."""
return self.y0
def get_delta_time(self):
"""Return the simulation time step."""
return self.dt
| [((57, 19, 57, 39), 'random.gauss', 'gauss', ({(57, 25, 57, 26): '(0)', (57, 28, 57, 38): 'self.noise'}, {}), '(0, self.noise)', False, 'from random import gauss\n')] |
cuenca-mx/stpmex-python | stpmex/client.py | 93f630cd05cea927b32f5aeb5f9b958c4ee91af9 | import re
from typing import Any, ClassVar, Dict, List, NoReturn, Union
from cryptography.exceptions import UnsupportedAlgorithm
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import serialization
from requests import Response, Session
from .exc import (
AccountDoesNotExist,
BankCodeClabeMismatch,
ClaveRastreoAlreadyInUse,
DuplicatedAccount,
InvalidAccountType,
InvalidAmount,
InvalidField,
InvalidInstitution,
InvalidPassphrase,
InvalidRfcOrCurp,
InvalidTrackingKey,
MandatoryField,
NoOrdenesEncontradas,
NoServiceResponse,
PldRejected,
SameAccount,
SignatureValidationError,
StpmexException,
)
from .resources import CuentaFisica, Orden, Resource, Saldo
from .version import __version__ as client_version
DEMO_HOST = 'https://demo.stpmex.com:7024'
PROD_HOST = 'https://prod.stpmex.com'
class Client:
base_url: str
soap_url: str
session: Session
# resources
cuentas: ClassVar = CuentaFisica
ordenes: ClassVar = Orden
saldos: ClassVar = Saldo
def __init__(
self,
empresa: str,
priv_key: str,
priv_key_passphrase: str,
demo: bool = False,
base_url: str = None,
soap_url: str = None,
timeout: tuple = None,
):
self.timeout = timeout
self.session = Session()
self.session.headers['User-Agent'] = f'stpmex-python/{client_version}'
if demo:
host_url = DEMO_HOST
self.session.verify = False
else:
host_url = PROD_HOST
self.session.verify = True
self.base_url = base_url or f'{host_url}/speiws/rest'
self.soap_url = (
soap_url or f'{host_url}/spei/webservices/SpeiConsultaServices'
)
try:
self.pkey = serialization.load_pem_private_key(
priv_key.encode('utf-8'),
priv_key_passphrase.encode('ascii'),
default_backend(),
)
except (ValueError, TypeError, UnsupportedAlgorithm):
raise InvalidPassphrase
Resource.empresa = empresa
Resource._client = self
def post(
self, endpoint: str, data: Dict[str, Any]
) -> Union[Dict[str, Any], List[Any]]:
return self.request('post', endpoint, data)
def put(
self, endpoint: str, data: Dict[str, Any]
) -> Union[Dict[str, Any], List[Any]]:
return self.request('put', endpoint, data)
def delete(
self, endpoint: str, data: Dict[str, Any]
) -> Union[Dict[str, Any], List[Any]]:
return self.request('delete', endpoint, data)
def request(
self, method: str, endpoint: str, data: Dict[str, Any], **kwargs: Any
) -> Union[Dict[str, Any], List[Any]]:
url = self.base_url + endpoint
response = self.session.request(
method,
url,
json=data,
timeout=self.timeout,
**kwargs,
)
self._check_response(response)
resultado = response.json()
if 'resultado' in resultado: # Some responses are enveloped
resultado = resultado['resultado']
return resultado
@staticmethod
def _check_response(response: Response) -> None:
if not response.ok:
response.raise_for_status()
resp = response.json()
if isinstance(resp, dict):
try:
_raise_description_error_exc(resp)
except KeyError:
...
try:
assert resp['descripcion']
_raise_description_exc(resp)
except (AssertionError, KeyError):
...
response.raise_for_status()
def _raise_description_error_exc(resp: Dict) -> NoReturn:
id = resp['resultado']['id']
error = resp['resultado']['descripcionError']
if id == 0 and error == 'No se recibió respuesta del servicio':
raise NoServiceResponse(**resp['resultado'])
elif id == 0 and error == 'Error validando la firma':
raise SignatureValidationError(**resp['resultado'])
elif id == 0 and re.match(r'El campo .+ es obligatorio', error):
raise MandatoryField(**resp['resultado'])
elif id == -1 and re.match(
r'La clave de rastreo .+ ya fue utilizada', error
):
raise ClaveRastreoAlreadyInUse(**resp['resultado'])
elif id == -7 and re.match(r'La cuenta .+ no existe', error):
raise AccountDoesNotExist(**resp['resultado'])
elif id == -9 and re.match(r'La Institucion \d+ no es valida', error):
raise InvalidInstitution(**resp['resultado'])
elif id == -11 and re.match(r'El tipo de cuenta \d+ es invalido', error):
raise InvalidAccountType(**resp['resultado'])
elif id == -20 and re.match(r'El monto {.+} no es válido', error):
raise InvalidAmount(**resp['resultado'])
elif id == -22 and 'no coincide para la institucion operante' in error:
raise BankCodeClabeMismatch(**resp['resultado'])
elif id == -24 and re.match(r'Cuenta {\d+} - {MISMA_CUENTA}', error):
raise SameAccount(**resp['resultado'])
elif id == -34 and 'Clave rastreo invalida' in error:
raise InvalidTrackingKey(**resp['resultado'])
elif id == -100 and error.startswith('No se encontr'):
raise NoOrdenesEncontradas
elif id == -200 and 'Se rechaza por PLD' in error:
raise PldRejected(**resp['resultado'])
else:
raise StpmexException(**resp['resultado'])
def _raise_description_exc(resp: Dict) -> NoReturn:
id = resp['id']
desc = resp['descripcion']
if id == 0 and 'Cuenta en revisión' in desc:
# STP regresa esta respuesta cuando se registra
# una cuenta. No se levanta excepción porque
# todas las cuentas pasan por este status.
...
elif id == 1 and desc == 'rfc/curp invalido':
raise InvalidRfcOrCurp(**resp)
elif id == 1 and re.match(r'El campo \w+ es invalido', desc):
raise InvalidField(**resp)
elif id == 3 and desc == 'Cuenta Duplicada':
raise DuplicatedAccount(**resp)
elif id == 5 and re.match(r'El campo .* obligatorio \w+', desc):
raise MandatoryField(**resp)
else:
raise StpmexException(**resp)
| [((57, 23, 57, 32), 'requests.Session', 'Session', ({}, {}), '()', False, 'from requests import Response, Session\n'), ((74, 16, 74, 33), 'cryptography.hazmat.backends.default_backend', 'default_backend', ({}, {}), '()', False, 'from cryptography.hazmat.backends import default_backend\n'), ((139, 21, 139, 67), 're.match', 're.match', ({(139, 30, 139, 59): '"""El campo .+ es obligatorio"""', (139, 61, 139, 66): 'error'}, {}), "('El campo .+ es obligatorio', error)", False, 'import re\n'), ((178, 21, 178, 64), 're.match', 're.match', ({(178, 30, 178, 57): '"""El campo \\\\w+ es invalido"""', (178, 59, 178, 63): 'desc'}, {}), "('El campo \\\\w+ es invalido', desc)", False, 'import re\n'), ((141, 22, 143, 5), 're.match', 're.match', ({(142, 8, 142, 50): '"""La clave de rastreo .+ ya fue utilizada"""', (142, 52, 142, 57): 'error'}, {}), "('La clave de rastreo .+ ya fue utilizada', error)", False, 'import re\n'), ((145, 22, 145, 64), 're.match', 're.match', ({(145, 31, 145, 56): '"""La cuenta .+ no existe"""', (145, 58, 145, 63): 'error'}, {}), "('La cuenta .+ no existe', error)", False, 'import re\n'), ((182, 21, 182, 67), 're.match', 're.match', ({(182, 30, 182, 60): '"""El campo .* obligatorio \\\\w+"""', (182, 62, 182, 66): 'desc'}, {}), "('El campo .* obligatorio \\\\w+', desc)", False, 'import re\n'), ((147, 22, 147, 73), 're.match', 're.match', ({(147, 31, 147, 65): '"""La Institucion \\\\d+ no es valida"""', (147, 67, 147, 72): 'error'}, {}), "('La Institucion \\\\d+ no es valida', error)", False, 'import re\n'), ((149, 23, 149, 76), 're.match', 're.match', ({(149, 32, 149, 68): '"""El tipo de cuenta \\\\d+ es invalido"""', (149, 70, 149, 75): 'error'}, {}), "('El tipo de cuenta \\\\d+ es invalido', error)", False, 'import re\n'), ((151, 23, 151, 70), 're.match', 're.match', ({(151, 32, 151, 62): '"""El monto {.+} no es válido"""', (151, 64, 151, 69): 'error'}, {}), "('El monto {.+} no es válido', error)", False, 'import re\n'), ((155, 23, 155, 72), 're.match', 're.match', ({(155, 32, 155, 64): '"""Cuenta {\\\\d+} - {MISMA_CUENTA}"""', (155, 66, 155, 71): 'error'}, {}), "('Cuenta {\\\\d+} - {MISMA_CUENTA}', error)", False, 'import re\n')] |
menify/sandbox | aql/tests/types/aql_test_list_types.py | 32166c71044f0d5b414335b2b6559adc571f568c | import sys
import os.path
import timeit
sys.path.insert( 0, os.path.normpath(os.path.join( os.path.dirname( __file__ ), '..') ))
from aql_tests import skip, AqlTestCase, runLocalTests
from aql.util_types import UniqueList, SplitListType, List, ValueListType
#//===========================================================================//
class TestListTypes( AqlTestCase ):
def test_unique_list(self):
ul = UniqueList( [1,2,3,2,1,3] ); ul.selfTest()
self.assertEqual( ul, [2,3,1])
self.assertEqual( list(ul), [1,2,3])
ul = UniqueList()
ul.append( 1 ); ul.selfTest()
ul.append( 3 ); ul.selfTest()
ul.append( 1 ); ul.selfTest()
ul.append( 2 ); ul.selfTest()
ul.append( 3 ); ul.selfTest()
ul.append( 1 ); ul.selfTest()
self.assertEqual( list(ul), [1,3,2])
ul.append_front( 2 ); ul.selfTest()
self.assertEqual( list(ul), [2,1,3])
ul.extend( [4,1,2,2,5] ); ul.selfTest()
self.assertEqual( list(ul), [2,1,3,4,5])
ul.extend_front( [1,2,2,3,1,1,5,5] ); ul.selfTest()
self.assertEqual( list(ul), [1,2,3,5,4])
self.assertEqual( list(ul), [1,2,3,5,4])
ul.remove( 1 ); ul.selfTest()
self.assertEqual( list(ul), [2,3,5,4])
ul.remove( 5 ); ul.selfTest()
self.assertEqual( list(ul), [2,3,4])
ul.remove( 55 ); ul.selfTest()
self.assertEqual( list(ul), [2,3,4])
self.assertEqual( ul.pop(), 4 ); ul.selfTest()
self.assertEqual( ul.pop_front(), 2 ); ul.selfTest()
self.assertEqual( ul.pop_front(), 3 ); ul.selfTest()
ul += [1,2,2,2,3,1,2,4,3,3,5,4,5,5]; ul.selfTest()
self.assertEqual( list(ul), [1,2,3,4,5])
ul -= [2,2,2,4,33]; ul.selfTest()
self.assertEqual( list(ul), [1,3,5])
self.assertEqual( ul[0], 1)
self.assertEqual( ul[2], 5)
self.assertEqual( ul[1], 3)
self.assertIn( 1, ul)
self.assertEqual( list(reversed(ul)), [5,3,1])
ul.reverse(); ul.selfTest()
self.assertEqual( ul, [5,3,1] )
ul.reverse(); ul.selfTest()
self.assertEqual( str(ul), "[1, 3, 5]" )
self.assertEqual( ul, UniqueList([1, 3, 5]) )
self.assertEqual( ul, UniqueList(ul) )
self.assertLess( UniqueList([1,2,2,2,3]), UniqueList([1,2,1,1,1,4]) )
self.assertLess( UniqueList([1,2,2,2,3]), [1,2,1,1,1,4] )
#//===========================================================================//
def test_splitlist(self):
l = SplitListType( List, ", \t\n\r" )("1,2, 3,,, \n\r\t4")
self.assertEqual( l, ['1','2','3','4'] )
self.assertEqual( l, "1,2,3,4" )
self.assertEqual( l, "1 2 3 4" )
self.assertEqual( str(l), "1,2,3,4" )
l += "7, 8"
self.assertEqual( l, ['1','2','3','4','7','8'] )
l -= "2, 3"
self.assertEqual( l, ['1','4','7','8'] )
l -= "5"
self.assertEqual( l, ['1','4','7','8'] )
l.extend_front( "10,12" )
self.assertEqual( l, ['10','12','1','4','7','8'] )
l.extend( "0,-1" )
self.assertEqual( l, ['10','12','1','4','7','8', '0', '-1'] )
#//===========================================================================//
def test_valuelist(self):
l = SplitListType( ValueListType( List, int ), ", \t\n\r" )("1,2, 3,,, \n\r\t4")
self.assertEqual( l, [1,2,3,4] )
self.assertEqual( l, "1,2,3,4" )
self.assertEqual( l, "1 2 3 4" )
self.assertEqual( str(l), "1,2,3,4" )
l += [7, 8]
self.assertEqual( l, ['1','2','3','4','7','8'] )
l += 78
self.assertEqual( l, ['1','2','3','4','7','8', 78] )
l -= 78
self.assertEqual( l, ['1','2','3','4','7','8'] )
l -= "2, 3"
self.assertEqual( l, ['1','4','7','8'] )
l -= "5"
self.assertEqual( l, ['1','4','7','8'] )
l.extend_front( "10,12" )
self.assertEqual( l, ['10','12','1','4','7','8'] )
l.extend( "0,-1" )
self.assertEqual( l, [10,12,1,4,7,8,0,-1] )
l[0] = "5"
self.assertEqual( l, [5,12,1,4,7,8,0,-1] )
#//===========================================================================//
def test_list(self):
l = List([1,2,3,4])
self.assertEqual( l, [1,2,3,4] )
l += [7, 8]
self.assertEqual( l, [1,2,3,4,7,8] )
l += 78
self.assertEqual( l, [1,2,3,4,7,8,78] )
l -= 78
self.assertEqual( l, [1,2,3,4,7,8] )
l -= [2, 3]
self.assertEqual( l, [1,4,7,8] )
l -= 5
self.assertEqual( l, [1,4,7,8] )
l.extend_front( [10,12] )
self.assertEqual( l, [10,12,1,4,7,8] )
l.extend( [0,-1] )
self.assertEqual( l, [10,12,1,4,7,8, 0, -1] )
#//===========================================================================//
if __name__ == "__main__":
runLocalTests()
| [((173, 2, 173, 17), 'aql_tests.runLocalTests', 'runLocalTests', ({}, {}), '()', False, 'from aql_tests import skip, AqlTestCase, runLocalTests\n'), ((16, 9, 16, 36), 'aql.util_types.UniqueList', 'UniqueList', ({(16, 21, 16, 34): '[1, 2, 3, 2, 1, 3]'}, {}), '([1, 2, 3, 2, 1, 3])', False, 'from aql.util_types import UniqueList, SplitListType, List, ValueListType\n'), ((21, 9, 21, 21), 'aql.util_types.UniqueList', 'UniqueList', ({}, {}), '()', False, 'from aql.util_types import UniqueList, SplitListType, List, ValueListType\n'), ((146, 8, 146, 23), 'aql.util_types.List', 'List', ({(146, 13, 146, 22): '[1, 2, 3, 4]'}, {}), '([1, 2, 3, 4])', False, 'from aql.util_types import UniqueList, SplitListType, List, ValueListType\n'), ((78, 26, 78, 47), 'aql.util_types.UniqueList', 'UniqueList', ({(78, 37, 78, 46): '[1, 3, 5]'}, {}), '([1, 3, 5])', False, 'from aql.util_types import UniqueList, SplitListType, List, ValueListType\n'), ((79, 26, 79, 40), 'aql.util_types.UniqueList', 'UniqueList', ({(79, 37, 79, 39): 'ul'}, {}), '(ul)', False, 'from aql.util_types import UniqueList, SplitListType, List, ValueListType\n'), ((80, 21, 80, 44), 'aql.util_types.UniqueList', 'UniqueList', ({(80, 32, 80, 43): '[1, 2, 2, 2, 3]'}, {}), '([1, 2, 2, 2, 3])', False, 'from aql.util_types import UniqueList, SplitListType, List, ValueListType\n'), ((80, 46, 80, 71), 'aql.util_types.UniqueList', 'UniqueList', ({(80, 57, 80, 70): '[1, 2, 1, 1, 1, 4]'}, {}), '([1, 2, 1, 1, 1, 4])', False, 'from aql.util_types import UniqueList, SplitListType, List, ValueListType\n'), ((81, 21, 81, 44), 'aql.util_types.UniqueList', 'UniqueList', ({(81, 32, 81, 43): '[1, 2, 2, 2, 3]'}, {}), '([1, 2, 2, 2, 3])', False, 'from aql.util_types import UniqueList, SplitListType, List, ValueListType\n'), ((87, 8, 87, 41), 'aql.util_types.SplitListType', 'SplitListType', ({(87, 23, 87, 27): 'List', (87, 29, 87, 39): "', \\t\\n\\r'"}, {}), "(List, ', \\t\\n\\r')", False, 'from aql.util_types import UniqueList, SplitListType, List, ValueListType\n'), ((112, 23, 112, 49), 'aql.util_types.ValueListType', 'ValueListType', ({(112, 38, 112, 42): 'List', (112, 44, 112, 47): 'int'}, {}), '(List, int)', False, 'from aql.util_types import UniqueList, SplitListType, List, ValueListType\n')] |
swatishayna/OnlineEDAAutomation | logger_application/logger.py | a1bfe8b1dee51a4872529a98f6e1136922329e3e | from datetime import datetime
from src.utils import uploaded_file
import os
class App_Logger:
def __init__(self):
pass
def log(self, file_object, email, log_message, log_writer_id):
self.now = datetime.now()
self.date = self.now.date()
self.current_time = self.now.strftime("%H:%M:%S")
file_object.write(
email+ "_eda_" + log_writer_id + "\t\t" +str(self.date) + "/" + str(self.current_time) + "\t\t" +email+ "\t\t" +log_message +"\n")
| [((10, 19, 10, 33), 'datetime.datetime.now', 'datetime.now', ({}, {}), '()', False, 'from datetime import datetime\n')] |
dstarikov/metavault | metasync/params.py | 1933cc6cd828ee9c594a45a78238a9a319de0143 | # config params
KB = 1024
MB = 1024*KB
GB = 1024*MB
# name of meta root dir
META_DIR = ".metasync"
# batching time for daemon
SYNC_WAIT = 3
# blob size
BLOB_UNIT = 32*MB
# Increase of Paxos proposal number
PAXOS_PNUM_INC = 10
# authentication directory
import os
AUTH_DIR = os.path.join(os.path.expanduser("~"), ".metasync")
| [((21, 24, 21, 47), 'os.path.expanduser', 'os.path.expanduser', ({(21, 43, 21, 46): '"""~"""'}, {}), "('~')", False, 'import os\n')] |
Dragonway/LeetCode | py/tests/test_valid_parentheses.py | 53ed9e9bcc1ed6955b013e0d37d2a684c2ec7135 | import unittest
from py.tests.utils import test
from py import valid_parentheses as vp
class TestValidParentheses(unittest.TestCase):
@test(vp.Solution.is_valid)
def test_valid_parentheses(self) -> None:
test("()", result=True)
test("()[]{}", result=True)
test("(]", result=False)
test("([)]", result=False)
test("{[]}", result=True)
test("", result=True)
test(")()", result=False)
test("(())((())))", result=False)
| [((8, 5, 8, 31), 'py.tests.utils.test', 'test', ({(8, 10, 8, 30): 'vp.Solution.is_valid'}, {}), '(vp.Solution.is_valid)', False, 'from py.tests.utils import test\n'), ((10, 8, 10, 40), 'py.tests.utils.test', 'test', (), '', False, 'from py.tests.utils import test\n'), ((11, 8, 11, 40), 'py.tests.utils.test', 'test', (), '', False, 'from py.tests.utils import test\n'), ((12, 8, 12, 41), 'py.tests.utils.test', 'test', (), '', False, 'from py.tests.utils import test\n'), ((13, 8, 13, 41), 'py.tests.utils.test', 'test', (), '', False, 'from py.tests.utils import test\n'), ((14, 8, 14, 40), 'py.tests.utils.test', 'test', (), '', False, 'from py.tests.utils import test\n'), ((15, 8, 15, 40), 'py.tests.utils.test', 'test', (), '', False, 'from py.tests.utils import test\n'), ((16, 8, 16, 41), 'py.tests.utils.test', 'test', (), '', False, 'from py.tests.utils import test\n'), ((17, 8, 17, 41), 'py.tests.utils.test', 'test', (), '', False, 'from py.tests.utils import test\n')] |
AchintyaSrivastava/HITNET-Stereo-Depth-estimation | hitnet/hitnet.py | 90654dafc8c8bdf5c17079d3cb8bf7ad6d3da166 | import tensorflow as tf
import numpy as np
import time
import cv2
from hitnet.utils_hitnet import *
drivingStereo_config = CameraConfig(0.546, 1000)
class HitNet():
def __init__(self, model_path, model_type=ModelType.eth3d, camera_config=drivingStereo_config):
self.fps = 0
self.timeLastPrediction = time.time()
self.frameCounter = 0
self.camera_config = camera_config
# Initialize model
self.model = self.initialize_model(model_path, model_type)
def __call__(self, left_img, right_img):
return self.estimate_disparity(left_img, right_img)
def initialize_model(self, model_path, model_type):
self.model_type = model_type
with tf.io.gfile.GFile(model_path, "rb") as f:
graph_def = tf.compat.v1.GraphDef()
loaded = graph_def.ParseFromString(f.read())
# Wrap frozen graph to ConcreteFunctions
if self.model_type == ModelType.flyingthings:
model = wrap_frozen_graph(graph_def=graph_def,
inputs="input:0",
outputs=["reference_output_disparity:0","secondary_output_disparity:0"])
else:
model = wrap_frozen_graph(graph_def=graph_def,
inputs="input:0",
outputs="reference_output_disparity:0")
return model
def estimate_disparity(self, left_img, right_img):
input_tensor = self.prepare_input(left_img, right_img)
# Perform inference on the image
if self.model_type == ModelType.flyingthings:
left_disparity, right_disparity = self.inference(input_tensor)
self.disparity_map = left_disparity
else:
self.disparity_map = self.inference(input_tensor)
return self.disparity_map
def get_depth(self):
return self.camera_config.f*self.camera_config.baseline/self.disparity_map
def prepare_input(self, left_img, right_img):
if (self.model_type == ModelType.eth3d):
# Shape (1, None, None, 2)
left_img = cv2.cvtColor(left_img, cv2.COLOR_BGR2GRAY)
right_img = cv2.cvtColor(right_img, cv2.COLOR_BGR2GRAY)
left_img = np.expand_dims(left_img,2)
right_img = np.expand_dims(right_img,2)
combined_img = np.concatenate((left_img, right_img), axis=-1) / 255.0
else:
# Shape (1, None, None, 6)
left_img = cv2.cvtColor(left_img, cv2.COLOR_BGR2RGB)
right_img = cv2.cvtColor(right_img, cv2.COLOR_BGR2RGB)
combined_img = np.concatenate((left_img, right_img), axis=-1) / 255.0
return tf.convert_to_tensor(np.expand_dims(combined_img, 0), dtype=tf.float32)
def inference(self, input_tensor):
output = self.model(input_tensor)
return np.squeeze(output)
| [((15, 28, 15, 39), 'time.time', 'time.time', ({}, {}), '()', False, 'import time\n'), ((86, 9, 86, 27), 'numpy.squeeze', 'np.squeeze', ({(86, 20, 86, 26): 'output'}, {}), '(output)', True, 'import numpy as np\n'), ((30, 7, 30, 42), 'tensorflow.io.gfile.GFile', 'tf.io.gfile.GFile', ({(30, 25, 30, 35): 'model_path', (30, 37, 30, 41): '"""rb"""'}, {}), "(model_path, 'rb')", True, 'import tensorflow as tf\n'), ((31, 15, 31, 38), 'tensorflow.compat.v1.GraphDef', 'tf.compat.v1.GraphDef', ({}, {}), '()', True, 'import tensorflow as tf\n'), ((68, 14, 68, 56), 'cv2.cvtColor', 'cv2.cvtColor', ({(68, 27, 68, 35): 'left_img', (68, 37, 68, 55): 'cv2.COLOR_BGR2GRAY'}, {}), '(left_img, cv2.COLOR_BGR2GRAY)', False, 'import cv2\n'), ((69, 15, 69, 58), 'cv2.cvtColor', 'cv2.cvtColor', ({(69, 28, 69, 37): 'right_img', (69, 39, 69, 57): 'cv2.COLOR_BGR2GRAY'}, {}), '(right_img, cv2.COLOR_BGR2GRAY)', False, 'import cv2\n'), ((71, 14, 71, 40), 'numpy.expand_dims', 'np.expand_dims', ({(71, 29, 71, 37): 'left_img', (71, 38, 71, 39): '2'}, {}), '(left_img, 2)', True, 'import numpy as np\n'), ((72, 15, 72, 42), 'numpy.expand_dims', 'np.expand_dims', ({(72, 30, 72, 39): 'right_img', (72, 40, 72, 41): '2'}, {}), '(right_img, 2)', True, 'import numpy as np\n'), ((76, 14, 76, 55), 'cv2.cvtColor', 'cv2.cvtColor', ({(76, 27, 76, 35): 'left_img', (76, 37, 76, 54): 'cv2.COLOR_BGR2RGB'}, {}), '(left_img, cv2.COLOR_BGR2RGB)', False, 'import cv2\n'), ((77, 15, 77, 57), 'cv2.cvtColor', 'cv2.cvtColor', ({(77, 28, 77, 37): 'right_img', (77, 39, 77, 56): 'cv2.COLOR_BGR2RGB'}, {}), '(right_img, cv2.COLOR_BGR2RGB)', False, 'import cv2\n'), ((81, 30, 81, 61), 'numpy.expand_dims', 'np.expand_dims', ({(81, 45, 81, 57): 'combined_img', (81, 59, 81, 60): '(0)'}, {}), '(combined_img, 0)', True, 'import numpy as np\n'), ((73, 18, 73, 64), 'numpy.concatenate', 'np.concatenate', (), '', True, 'import numpy as np\n'), ((78, 18, 78, 64), 'numpy.concatenate', 'np.concatenate', (), '', True, 'import numpy as np\n')] |
datamade/just-spaces | fobi_custom/plugins/form_elements/fields/intercept/household_tenure/fobi_form_elements.py | cc2b7d1518e5da65a403413d39a309fa3e2ac122 | from django import forms
from fobi.base import FormFieldPlugin, form_element_plugin_registry
from .forms import HouseholdTenureForm
class HouseholdTenurePlugin(FormFieldPlugin):
"""HouseholdTenurePlugin."""
uid = "household_tenure"
name = "What year did you move into your current address?"
form = HouseholdTenureForm
group = "Intercept" # Group to which the plugin belongs to
def get_form_field_instances(self, request=None, form_entry=None,
form_element_entries=None, **kwargs):
field_kwargs = {
'required': self.data.required,
'label': self.data.label,
'widget': forms.widgets.NumberInput(attrs={}),
}
return [(self.data.name, forms.IntegerField, field_kwargs)]
form_element_plugin_registry.register(HouseholdTenurePlugin)
| [((28, 0, 28, 60), 'fobi.base.form_element_plugin_registry.register', 'form_element_plugin_registry.register', ({(28, 38, 28, 59): 'HouseholdTenurePlugin'}, {}), '(HouseholdTenurePlugin)', False, 'from fobi.base import FormFieldPlugin, form_element_plugin_registry\n'), ((22, 22, 22, 57), 'django.forms.widgets.NumberInput', 'forms.widgets.NumberInput', (), '', False, 'from django import forms\n')] |
fullscreennl/monkeyswipe | utils/scripts/OOOlevelGen/src/sprites/__init__.py | c56192e202674dd5ab18023f6cf14cf51e95fbd0 | __all__ = ['EnemyBucketWithStar',
'Nut',
'Beam',
'Enemy',
'Friend',
'Hero',
'Launcher',
'Rotor',
'SpikeyBuddy',
'Star',
'Wizard',
'EnemyEquipedRotor',
'CyclingEnemyObject',
'Joints',
'Bomb',
'Contacts']
| [] |
mazzaAnt/StackGAN-v2 | code/trainer.py | dcf696f34bc8e360179eec9e7f2e9e66eec8b9a0 | from __future__ import print_function
from six.moves import range
import torchvision.transforms as transforms
import torch.backends.cudnn as cudnn
import torch
import torch.nn as nn
from torch.autograd import Variable
import torch.optim as optim
import torchvision.utils as vutils
import numpy as np
import os
import time
from PIL import Image, ImageFont, ImageDraw
from copy import deepcopy
from miscc.config import cfg
from miscc.utils import mkdir_p
from CaptionDatasets import *
from tensorboard import summary
from tensorboard import FileWriter
from model import G_NET, D_NET64, D_NET128, D_NET256, D_NET512, D_NET1024, INCEPTION_V3
# ################## Shared functions ###################
def compute_mean_covariance(img):
batch_size = img.size(0)
channel_num = img.size(1)
height = img.size(2)
width = img.size(3)
num_pixels = height * width
# batch_size * channel_num * 1 * 1
mu = img.mean(2, keepdim=True).mean(3, keepdim=True)
# batch_size * channel_num * num_pixels
img_hat = img - mu.expand_as(img)
img_hat = img_hat.view(batch_size, channel_num, num_pixels)
# batch_size * num_pixels * channel_num
img_hat_transpose = img_hat.transpose(1, 2)
# batch_size * channel_num * channel_num
covariance = torch.bmm(img_hat, img_hat_transpose)
covariance = covariance / num_pixels
return mu, covariance
def KL_loss(mu, logvar):
# -0.5 * sum(1 + log(sigma^2) - mu^2 - sigma^2)
KLD_element = mu.pow(2).add_(logvar.exp()).mul_(-1).add_(1).add_(logvar)
KLD = torch.mean(KLD_element).mul_(-0.5)
return KLD
def weights_init(m):
classname = m.__class__.__name__
if classname.find('Conv') != -1:
nn.init.orthogonal(m.weight.data, 1.0)
elif classname.find('BatchNorm') != -1:
m.weight.data.normal_(1.0, 0.02)
m.bias.data.fill_(0)
elif classname.find('Linear') != -1:
nn.init.orthogonal(m.weight.data, 1.0)
if m.bias is not None:
m.bias.data.fill_(0.0)
def load_params(model, new_param):
for p, new_p in zip(model.parameters(), new_param):
p.data.copy_(new_p)
def copy_G_params(model):
flatten = deepcopy(list(p.data for p in model.parameters()))
return flatten
def compute_inception_score(predictions, num_splits=1):
# print('predictions', predictions.shape)
scores = []
for i in range(num_splits):
istart = i * predictions.shape[0] // num_splits
iend = (i + 1) * predictions.shape[0] // num_splits
part = predictions[istart:iend, :]
kl = part * \
(np.log(part) - np.log(np.expand_dims(np.mean(part, 0), 0)))
kl = np.mean(np.sum(kl, 1))
scores.append(np.exp(kl))
return np.mean(scores), np.std(scores)
def negative_log_posterior_probability(predictions, num_splits=1):
# print('predictions', predictions.shape)
scores = []
for i in range(num_splits):
istart = i * predictions.shape[0] // num_splits
iend = (i + 1) * predictions.shape[0] // num_splits
part = predictions[istart:iend, :]
result = -1. * np.log(np.max(part, 1))
result = np.mean(result)
scores.append(result)
return np.mean(scores), np.std(scores)
def load_network(gpus):
netG = G_NET()
netG.apply(weights_init)
netG = torch.nn.DataParallel(netG, device_ids=gpus)
print(netG)
netsD = []
if cfg.TREE.BRANCH_NUM > 0:
netsD.append(D_NET64())
if cfg.TREE.BRANCH_NUM > 1:
netsD.append(D_NET128())
if cfg.TREE.BRANCH_NUM > 2:
netsD.append(D_NET256())
if cfg.TREE.BRANCH_NUM > 3:
netsD.append(D_NET512())
if cfg.TREE.BRANCH_NUM > 4:
netsD.append(D_NET1024())
# TODO: if cfg.TREE.BRANCH_NUM > 5:
for i in range(len(netsD)):
netsD[i].apply(weights_init)
netsD[i] = torch.nn.DataParallel(netsD[i], device_ids=gpus)
# print(netsD[i])
print('# of netsD', len(netsD))
count = 0
if cfg.TRAIN.NET_G != '':
state_dict = torch.load(cfg.TRAIN.NET_G)
netG.load_state_dict(state_dict)
print('Load ', cfg.TRAIN.NET_G)
istart = cfg.TRAIN.NET_G.rfind('_') + 1
iend = cfg.TRAIN.NET_G.rfind('.')
count = cfg.TRAIN.NET_G[istart:iend]
count = int(count) + 1
if cfg.TRAIN.NET_D != '':
for i in range(len(netsD)):
print('Load %s_%d.pth' % (cfg.TRAIN.NET_D, i))
state_dict = torch.load('%s%d.pth' % (cfg.TRAIN.NET_D, i))
netsD[i].load_state_dict(state_dict)
inception_model = INCEPTION_V3()
if cfg.CUDA:
netG.cuda()
for i in range(len(netsD)):
netsD[i].cuda()
inception_model = inception_model.cuda()
inception_model.eval()
return netG, netsD, len(netsD), inception_model, count
def define_optimizers(netG, netsD):
optimizersD = []
num_Ds = len(netsD)
for i in range(num_Ds):
opt = optim.Adam(netsD[i].parameters(),
lr=cfg.TRAIN.DISCRIMINATOR_LR,
betas=(0.5, 0.999))
optimizersD.append(opt)
# G_opt_paras = []
# for p in netG.parameters():
# if p.requires_grad:
# G_opt_paras.append(p)
optimizerG = optim.Adam(netG.parameters(),
lr=cfg.TRAIN.GENERATOR_LR,
betas=(0.5, 0.999))
return optimizerG, optimizersD
def save_model(netG, avg_param_G, netsD, epoch, model_dir):
load_params(netG, avg_param_G)
torch.save(
netG.state_dict(),
'%s/netG_%d.pth' % (model_dir, epoch))
for i in range(len(netsD)):
netD = netsD[i]
torch.save(
netD.state_dict(),
'%s/netD%d.pth' % (model_dir, i))
print('Save G/Ds models.')
def save_real(imgs_tcpu, image_dir):
num = cfg.TRAIN.VIS_COUNT
# The range of real_img (i.e., self.imgs_tcpu[i][0:num])
# is changed to [0, 1] by function vutils.save_image
real_img = imgs_tcpu[-1][0:num]
vutils.save_image(
real_img, '%s/real_samples.png' % (image_dir),
normalize=True)
real_img_set = vutils.make_grid(real_img).numpy()
real_img_set = np.transpose(real_img_set, (1, 2, 0))
real_img_set = real_img_set * 255
real_img_set = real_img_set.astype(np.uint8)
sup_real_img = summary.image('real_img', real_img_set)
def save_img_results(imgs_tcpu, fake_imgs, num_imgs,
count, image_dir, summary_writer):
num = cfg.TRAIN.VIS_COUNT
# The range of real_img (i.e., self.imgs_tcpu[i][0:num])
# is changed to [0, 1] by function vutils.save_image
real_img = imgs_tcpu[-1][0:num]
vutils.save_image(
real_img, '%s/real_samples.png' % (image_dir),
normalize=True)
real_img_set = vutils.make_grid(real_img).numpy()
real_img_set = np.transpose(real_img_set, (1, 2, 0))
real_img_set = real_img_set * 255
real_img_set = real_img_set.astype(np.uint8)
sup_real_img = summary.image('real_img', real_img_set)
summary_writer.add_summary(sup_real_img, count)
for i in range(num_imgs):
fake_img = fake_imgs[i][0:num]
# The range of fake_img.data (i.e., self.fake_imgs[i][0:num])
# is still [-1. 1]...
vutils.save_image(
fake_img.data, '%s/count_%09d_fake_samples_%d.png' %
(image_dir, count, i), normalize=True)
fake_img_set = vutils.make_grid(fake_img.data).cpu().numpy()
fake_img_set = np.transpose(fake_img_set, (1, 2, 0))
fake_img_set = (fake_img_set + 1) * 255 / 2
fake_img_set = fake_img_set.astype(np.uint8)
sup_fake_img = summary.image('fake_img%d' % i, fake_img_set)
summary_writer.add_summary(sup_fake_img, count)
summary_writer.flush()
# ################# Text to image task############################ #
class condGANTrainer(object):
def __init__(self, output_dir, data_loader, imsize):
if cfg.TRAIN.FLAG:
self.model_dir = os.path.join(output_dir, 'Model')
self.image_dir = os.path.join(output_dir, 'Image')
self.log_dir = os.path.join(output_dir, 'Log')
mkdir_p(self.model_dir)
mkdir_p(self.image_dir)
mkdir_p(self.log_dir)
self.summary_writer = FileWriter(self.log_dir)
s_gpus = cfg.GPU_ID.split(',')
self.gpus = [int(ix) for ix in s_gpus]
self.num_gpus = len(self.gpus)
torch.cuda.set_device(self.gpus[0])
cudnn.benchmark = True
self.batch_size = cfg.TRAIN.BATCH_SIZE * self.num_gpus
self.max_epoch = cfg.TRAIN.MAX_EPOCH
self.snapshot_interval = cfg.TRAIN.SNAPSHOT_INTERVAL
self.data_loader = data_loader
self.num_batches = len(self.data_loader)
def prepare_data(self, data):
imgs, w_imgs, t_embedding, _ = data
real_vimgs, wrong_vimgs = [], []
if cfg.CUDA:
vembedding = Variable(t_embedding).cuda()
else:
vembedding = Variable(t_embedding)
for i in range(self.num_Ds):
if cfg.CUDA:
real_vimgs.append(Variable(imgs[i]).cuda())
wrong_vimgs.append(Variable(w_imgs[i]).cuda())
else:
real_vimgs.append(Variable(imgs[i]))
wrong_vimgs.append(Variable(w_imgs[i]))
return imgs, real_vimgs, wrong_vimgs, vembedding
def train_Dnet(self, idx, count):
flag = count % 100
batch_size = self.real_imgs[0].size(0)
criterion, mu = self.criterion, self.mu
netD, optD = self.netsD[idx], self.optimizersD[idx]
real_imgs = self.real_imgs[idx]
wrong_imgs = self.wrong_imgs[idx]
fake_imgs = self.fake_imgs[idx]
#
netD.zero_grad()
# Forward
real_labels = self.real_labels[:batch_size]
fake_labels = self.fake_labels[:batch_size]
# for real
real_logits = netD(real_imgs, mu.detach())
wrong_logits = netD(wrong_imgs, mu.detach())
fake_logits = netD(fake_imgs.detach(), mu.detach())
#
errD_real = criterion(real_logits[0], real_labels)
errD_wrong = criterion(wrong_logits[0], fake_labels)
errD_fake = criterion(fake_logits[0], fake_labels)
if len(real_logits) > 1 and cfg.TRAIN.COEFF.UNCOND_LOSS > 0:
errD_real_uncond = cfg.TRAIN.COEFF.UNCOND_LOSS * \
criterion(real_logits[1], real_labels)
errD_wrong_uncond = cfg.TRAIN.COEFF.UNCOND_LOSS * \
criterion(wrong_logits[1], real_labels)
errD_fake_uncond = cfg.TRAIN.COEFF.UNCOND_LOSS * \
criterion(fake_logits[1], fake_labels)
#
errD_real = errD_real + errD_real_uncond
errD_wrong = errD_wrong + errD_wrong_uncond
errD_fake = errD_fake + errD_fake_uncond
#
errD = errD_real + errD_wrong + errD_fake
else:
errD = errD_real + 0.5 * (errD_wrong + errD_fake)
# backward
errD.backward()
# update parameters
optD.step()
# log
if flag == 0:
summary_D = summary.scalar('D_loss%d' % idx, errD.item())
self.summary_writer.add_summary(summary_D, count)
return errD
def train_Gnet(self, count):
self.netG.zero_grad()
errG_total = 0
flag = count % 100
batch_size = self.real_imgs[0].size(0)
criterion, mu, logvar = self.criterion, self.mu, self.logvar
real_labels = self.real_labels[:batch_size]
for i in range(self.num_Ds):
outputs = self.netsD[i](self.fake_imgs[i], mu)
errG = criterion(outputs[0], real_labels)
if len(outputs) > 1 and cfg.TRAIN.COEFF.UNCOND_LOSS > 0:
errG_patch = cfg.TRAIN.COEFF.UNCOND_LOSS *\
criterion(outputs[1], real_labels)
errG = errG + errG_patch
errG_total = errG_total + errG
if flag == 0:
summary_D = summary.scalar('G_loss%d' % i, errG.item())
self.summary_writer.add_summary(summary_D, count)
# Compute color consistency losses
if cfg.TRAIN.COEFF.COLOR_LOSS > 0:
if self.num_Ds > 1:
mu1, covariance1 = compute_mean_covariance(self.fake_imgs[-1])
mu2, covariance2 = \
compute_mean_covariance(self.fake_imgs[-2].detach())
like_mu2 = cfg.TRAIN.COEFF.COLOR_LOSS * nn.MSELoss()(mu1, mu2)
like_cov2 = cfg.TRAIN.COEFF.COLOR_LOSS * 5 * \
nn.MSELoss()(covariance1, covariance2)
errG_total = errG_total + like_mu2 + like_cov2
if flag == 0:
sum_mu = summary.scalar('G_like_mu2', like_mu2.item())
self.summary_writer.add_summary(sum_mu, count)
sum_cov = summary.scalar('G_like_cov2', like_cov2.item())
self.summary_writer.add_summary(sum_cov, count)
if self.num_Ds > 2:
mu1, covariance1 = compute_mean_covariance(self.fake_imgs[-2])
mu2, covariance2 = \
compute_mean_covariance(self.fake_imgs[-3].detach())
like_mu1 = cfg.TRAIN.COEFF.COLOR_LOSS * nn.MSELoss()(mu1, mu2)
like_cov1 = cfg.TRAIN.COEFF.COLOR_LOSS * 5 * \
nn.MSELoss()(covariance1, covariance2)
errG_total = errG_total + like_mu1 + like_cov1
if flag == 0:
sum_mu = summary.scalar('G_like_mu1', like_mu1.item())
self.summary_writer.add_summary(sum_mu, count)
sum_cov = summary.scalar('G_like_cov1', like_cov1.item())
self.summary_writer.add_summary(sum_cov, count)
kl_loss = KL_loss(mu, logvar) * cfg.TRAIN.COEFF.KL
errG_total = errG_total + kl_loss
# Postpone the backward propagation
# errG_total.backward()
# self.optimizerG.step()
return kl_loss, errG_total
def train(self):
self.netG, self.netsD, self.num_Ds,\
self.inception_model, start_count = load_network(self.gpus)
avg_param_G = copy_G_params(self.netG)
self.optimizerG, self.optimizersD = \
define_optimizers(self.netG, self.netsD)
self.criterion = nn.BCELoss()
self.SATcriterion = nn.CrossEntropyLoss()
self.real_labels = Variable(torch.FloatTensor(self.batch_size).fill_(1))
self.fake_labels = Variable(torch.FloatTensor(self.batch_size).fill_(0))
self.gradient_one = torch.FloatTensor([1.0])
self.gradient_half = torch.FloatTensor([0.5])
nz = cfg.GAN.Z_DIM
noise = Variable(torch.FloatTensor(self.batch_size, nz))
fixed_noise = Variable(torch.FloatTensor(self.batch_size, nz).normal_(0, 1))
# Data parameters
data_folder = 'birds_output' # folder with data files saved by create_input_files.py
data_name = 'CUB_5_cap_per_img_5_min_word_freq' # base name shared by data files
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
# Show, Attend, and Tell Dataloader
train_loader = torch.utils.data.DataLoader(
CaptionDataset(data_folder, data_name, 'TRAIN', transform=transforms.Compose([normalize])),
batch_size=self.batch_size, shuffle=True, num_workers=int(cfg.WORKERS), pin_memory=True)
if cfg.CUDA:
self.criterion.cuda()
self.SATcriterion.cuda() # Compute SATloss
self.real_labels = self.real_labels.cuda()
self.fake_labels = self.fake_labels.cuda()
self.gradient_one = self.gradient_one.cuda()
self.gradient_half = self.gradient_half.cuda()
noise, fixed_noise = noise.cuda(), fixed_noise.cuda()
predictions = []
count = start_count
start_epoch = start_count // (self.num_batches)
for epoch in range(start_epoch, self.max_epoch):
start_t = time.time()
# for step, data in enumerate(self.data_loader, 0):
for step, data in enumerate(zip(self.data_loader, train_loader), 0):
data_1 = data[0]
_, caps, caplens = data[1]
data = data_1
#######################################################
# (0) Prepare training data
######################################################
self.imgs_tcpu, self.real_imgs, self.wrong_imgs, \
self.txt_embedding = self.prepare_data(data)
# Testing line for real samples
if epoch == start_epoch and step == 0:
print ('Checking real samples at first...')
save_real(self.imgs_tcpu, self.image_dir)
#######################################################
# (1) Generate fake images
######################################################
noise.data.normal_(0, 1)
self.fake_imgs, self.mu, self.logvar = \
self.netG(noise, self.txt_embedding)
# len(self.fake_imgs) = NUM_BRANCHES
# self.fake_imgs[0].shape = [batch_size, 3, 64, 64]
# self.fake_imgs[1].shape = [batch_size, 3, 128, 128]
# self.fake_imgs[2].shape = [batch_size, 3, 256, 256]
#######################################################
# (*) Forward fake images to SAT
######################################################
from SATmodels import Encoder, DecoderWithAttention
from torch.nn.utils.rnn import pack_padded_sequence
fine_tune_encoder = False
# Read word map
word_map_file = os.path.join(data_folder, 'WORDMAP_' + data_name + '.json')
with open(word_map_file, 'r') as j:
word_map = json.load(j)
# Define the encoder/decoder structure for SAT model
decoder = DecoderWithAttention(attention_dim=512,
embed_dim=512,
decoder_dim=512,
vocab_size=len(word_map),
dropout=0.5).cuda()
decoder_optimizer = torch.optim.Adam(params=filter(lambda p: p.requires_grad, decoder.parameters()),
lr=4e-4)
encoder = Encoder().cuda()
encoder.fine_tune(fine_tune_encoder)
encoder_optimizer = torch.optim.Adam(params=filter(lambda p: p.requires_grad, encoder.parameters()),
lr=1e-4) if fine_tune_encoder else None
SATloss = 0
# Compute the SAT loss after forwarding the SAT model
for idx in range(len(self.fake_imgs)):
img = encoder(self.fake_imgs[idx])
scores, caps_sorted, decode_lengths, alphas, sort_ind = decoder(img, caps, caplens)
targets = caps_sorted[:, 1:]
scores, _ = pack_padded_sequence(scores, decode_lengths, batch_first=True).cuda()
targets, _ = pack_padded_sequence(targets, decode_lengths, batch_first=True).cuda()
SATloss += self.SATcriterion(scores, targets) + 1 * ((1. - alphas.sum(dim=1)) ** 2).mean()
# Set zero_grad for encoder/decoder
decoder_optimizer.zero_grad()
if encoder_optimizer is not None:
encoder_optimizer.zero_grad()
#######################################################
# (2) Update D network
######################################################
errD_total = 0
for i in range(self.num_Ds):
errD = self.train_Dnet(i, count)
errD_total += errD
#######################################################
# (3) Update G network: maximize log(D(G(z)))
######################################################
kl_loss, errG_total = self.train_Gnet(count)
for p, avg_p in zip(self.netG.parameters(), avg_param_G):
avg_p.mul_(0.999).add_(0.001, p.data)
# Combine with G and SAT first, then back propagation
errG_total += SATloss
errG_total.backward()
self.optimizerG.step()
#######################################################
# (*) Update SAT network:
######################################################
# Update weights
decoder_optimizer.step()
if encoder_optimizer is not None:
encoder_optimizer.step()
#######################################################
# (*) Prediction and Inception score:
######################################################
pred = self.inception_model(self.fake_imgs[-1].detach())
predictions.append(pred.data.cpu().numpy())
if count % 100 == 0:
summary_D = summary.scalar('D_loss', errD_total.item())
summary_G = summary.scalar('G_loss', errG_total.item())
summary_KL = summary.scalar('KL_loss', kl_loss.item())
self.summary_writer.add_summary(summary_D, count)
self.summary_writer.add_summary(summary_G, count)
self.summary_writer.add_summary(summary_KL, count)
count += 1
#######################################################
# (*) Save Images/Log/Model per SNAPSHOT_INTERVAL:
######################################################
if count % cfg.TRAIN.SNAPSHOT_INTERVAL == 0:
save_model(self.netG, avg_param_G, self.netsD, count, self.model_dir)
# Save images
backup_para = copy_G_params(self.netG)
load_params(self.netG, avg_param_G)
#
self.fake_imgs, _, _ = self.netG(fixed_noise, self.txt_embedding)
save_img_results(self.imgs_tcpu, self.fake_imgs, self.num_Ds,
count, self.image_dir, self.summary_writer)
#
load_params(self.netG, backup_para)
# Compute inception score
if len(predictions) > 500:
predictions = np.concatenate(predictions, 0)
mean, std = compute_inception_score(predictions, 10)
# print('mean:', mean, 'std', std)
m_incep = summary.scalar('Inception_mean', mean)
self.summary_writer.add_summary(m_incep, count)
#
mean_nlpp, std_nlpp = negative_log_posterior_probability(predictions, 10)
m_nlpp = summary.scalar('NLPP_mean', mean_nlpp)
self.summary_writer.add_summary(m_nlpp, count)
#
predictions = []
end_t = time.time()
print('''[%d/%d][%d]
Loss_D: %.2f Loss_G: %.2f Loss_KL: %.2f Time: %.2fs
''' # D(real): %.4f D(wrong):%.4f D(fake) %.4f
% (epoch, self.max_epoch, self.num_batches,
errD_total.item(), errG_total.item(),
kl_loss.item(), end_t - start_t))
save_model(self.netG, avg_param_G, self.netsD, count, self.model_dir)
self.summary_writer.close()
def save_superimages(self, images_list, filenames,
save_dir, split_dir, imsize):
batch_size = images_list[0].size(0)
num_sentences = len(images_list)
for i in range(batch_size):
s_tmp = '%s/super/%s/%s' %\
(save_dir, split_dir, filenames[i])
folder = s_tmp[:s_tmp.rfind('/')]
if not os.path.isdir(folder):
print('Make a new folder: ', folder)
mkdir_p(folder)
#
savename = '%s_%d.png' % (s_tmp, imsize)
super_img = []
for j in range(num_sentences):
img = images_list[j][i]
# print(img.size())
img = img.view(1, 3, imsize, imsize)
# print(img.size())
super_img.append(img)
# break
super_img = torch.cat(super_img, 0)
vutils.save_image(super_img, savename, nrow=10, normalize=True)
def save_singleimages(self, images, filenames,
save_dir, split_dir, sentenceID, imsize):
for i in range(images.size(0)):
s_tmp = '%s/single_samples/%s/%s' %\
(save_dir, split_dir, filenames[i])
folder = s_tmp[:s_tmp.rfind('/')]
if not os.path.isdir(folder):
print('Make a new folder: ', folder)
mkdir_p(folder)
fullpath = '%s_%d_sentence%d.png' % (s_tmp, imsize, sentenceID)
# range from [-1, 1] to [0, 255]
img = images[i].add(1).div(2).mul(255).clamp(0, 255).byte()
ndarr = img.permute(1, 2, 0).data.cpu().numpy()
im = Image.fromarray(ndarr)
im.save(fullpath)
def evaluate(self, split_dir):
if cfg.TRAIN.NET_G == '':
print('Error: the path for morels is not found!')
else:
# Build and load the generator
if split_dir == 'test':
split_dir = 'valid'
netG = G_NET()
netG.apply(weights_init)
netG = torch.nn.DataParallel(netG, device_ids=self.gpus)
print(netG)
# state_dict = torch.load(cfg.TRAIN.NET_G)
state_dict = \
torch.load(cfg.TRAIN.NET_G,
map_location=lambda storage, loc: storage)
netG.load_state_dict(state_dict)
print('Load ', cfg.TRAIN.NET_G)
# the path to save generated images
s_tmp = cfg.TRAIN.NET_G
istart = s_tmp.rfind('_') + 1
iend = s_tmp.rfind('.')
iteration = int(s_tmp[istart:iend])
s_tmp = s_tmp[:s_tmp.rfind('/')]
save_dir = '%s/iteration%d' % (s_tmp, iteration)
nz = cfg.GAN.Z_DIM
noise = Variable(torch.FloatTensor(self.batch_size, nz))
if cfg.CUDA:
netG.cuda()
noise = noise.cuda()
# switch to evaluate mode
netG.eval()
for step, data in enumerate(self.data_loader, 0):
imgs, t_embeddings, filenames = data
if cfg.CUDA:
t_embeddings = Variable(t_embeddings).cuda()
else:
t_embeddings = Variable(t_embeddings)
# print(t_embeddings[:, 0, :], t_embeddings.size(1))
embedding_dim = t_embeddings.size(1)
batch_size = imgs[0].size(0)
noise.data.resize_(batch_size, nz)
noise.data.normal_(0, 1)
fake_img_list = []
for i in range(embedding_dim):
fake_imgs, _, _ = netG(noise, t_embeddings[:, i, :])
if cfg.TEST.B_EXAMPLE:
# fake_img_list.append(fake_imgs[0].data.cpu())
# fake_img_list.append(fake_imgs[1].data.cpu())
fake_img_list.append(fake_imgs[2].data.cpu())
else:
self.save_singleimages(fake_imgs[-1], filenames,
save_dir, split_dir, i, 256)
# self.save_singleimages(fake_imgs[-2], filenames,
# save_dir, split_dir, i, 128)
# self.save_singleimages(fake_imgs[-3], filenames,
# save_dir, split_dir, i, 64)
# break
if cfg.TEST.B_EXAMPLE:
# self.save_superimages(fake_img_list, filenames,
# save_dir, split_dir, 64)
# self.save_superimages(fake_img_list, filenames,
# save_dir, split_dir, 128)
self.save_superimages(fake_img_list, filenames,
save_dir, split_dir, 256)
| [((46, 17, 46, 54), 'torch.bmm', 'torch.bmm', ({(46, 27, 46, 34): 'img_hat', (46, 36, 46, 53): 'img_hat_transpose'}, {}), '(img_hat, img_hat_transpose)', False, 'import torch\n'), ((85, 13, 85, 30), 'six.moves.range', 'range', ({(85, 19, 85, 29): 'num_splits'}, {}), '(num_splits)', False, 'from six.moves import range\n'), ((99, 13, 99, 30), 'six.moves.range', 'range', ({(99, 19, 99, 29): 'num_splits'}, {}), '(num_splits)', False, 'from six.moves import range\n'), ((110, 11, 110, 18), 'model.G_NET', 'G_NET', ({}, {}), '()', False, 'from model import G_NET, D_NET64, D_NET128, D_NET256, D_NET512, D_NET1024, INCEPTION_V3\n'), ((112, 11, 112, 55), 'torch.nn.DataParallel', 'torch.nn.DataParallel', (), '', False, 'import torch\n'), ((151, 22, 151, 36), 'model.INCEPTION_V3', 'INCEPTION_V3', ({}, {}), '()', False, 'from model import G_NET, D_NET64, D_NET128, D_NET256, D_NET512, D_NET1024, INCEPTION_V3\n'), ((166, 13, 166, 26), 'six.moves.range', 'range', ({(166, 19, 166, 25): 'num_Ds'}, {}), '(num_Ds)', False, 'from six.moves import range\n'), ((200, 4, 202, 23), 'torchvision.utils.save_image', 'vutils.save_image', (), '', True, 'import torchvision.utils as vutils\n'), ((204, 19, 204, 56), 'numpy.transpose', 'np.transpose', ({(204, 32, 204, 44): 'real_img_set', (204, 46, 204, 55): '(1, 2, 0)'}, {}), '(real_img_set, (1, 2, 0))', True, 'import numpy as np\n'), ((207, 19, 207, 58), 'tensorboard.summary.image', 'summary.image', ({(207, 33, 207, 43): '"""real_img"""', (207, 45, 207, 57): 'real_img_set'}, {}), "('real_img', real_img_set)", False, 'from tensorboard import summary\n'), ((217, 4, 219, 23), 'torchvision.utils.save_image', 'vutils.save_image', (), '', True, 'import torchvision.utils as vutils\n'), ((221, 19, 221, 56), 'numpy.transpose', 'np.transpose', ({(221, 32, 221, 44): 'real_img_set', (221, 46, 221, 55): '(1, 2, 0)'}, {}), '(real_img_set, (1, 2, 0))', True, 'import numpy as np\n'), ((224, 19, 224, 58), 'tensorboard.summary.image', 'summary.image', ({(224, 33, 224, 43): '"""real_img"""', (224, 45, 224, 57): 'real_img_set'}, {}), "('real_img', real_img_set)", False, 'from tensorboard import summary\n'), ((227, 13, 227, 28), 'six.moves.range', 'range', ({(227, 19, 227, 27): 'num_imgs'}, {}), '(num_imgs)', False, 'from six.moves import range\n'), ((62, 8, 62, 46), 'torch.nn.init.orthogonal', 'nn.init.orthogonal', ({(62, 27, 62, 40): 'm.weight.data', (62, 42, 62, 45): '(1.0)'}, {}), '(m.weight.data, 1.0)', True, 'import torch.nn as nn\n'), ((93, 11, 93, 26), 'numpy.mean', 'np.mean', ({(93, 19, 93, 25): 'scores'}, {}), '(scores)', True, 'import numpy as np\n'), ((93, 28, 93, 42), 'numpy.std', 'np.std', ({(93, 35, 93, 41): 'scores'}, {}), '(scores)', True, 'import numpy as np\n'), ((104, 17, 104, 32), 'numpy.mean', 'np.mean', ({(104, 25, 104, 31): 'result'}, {}), '(result)', True, 'import numpy as np\n'), ((106, 11, 106, 26), 'numpy.mean', 'np.mean', ({(106, 19, 106, 25): 'scores'}, {}), '(scores)', True, 'import numpy as np\n'), ((106, 28, 106, 42), 'numpy.std', 'np.std', ({(106, 35, 106, 41): 'scores'}, {}), '(scores)', True, 'import numpy as np\n'), ((130, 19, 130, 67), 'torch.nn.DataParallel', 'torch.nn.DataParallel', (), '', False, 'import torch\n'), ((136, 21, 136, 48), 'torch.load', 'torch.load', ({(136, 32, 136, 47): 'cfg.TRAIN.NET_G'}, {}), '(cfg.TRAIN.NET_G)', False, 'import torch\n'), ((141, 15, 141, 41), 'miscc.config.cfg.TRAIN.NET_G.rfind', 'cfg.TRAIN.NET_G.rfind', ({(141, 37, 141, 40): '"""."""'}, {}), "('.')", False, 'from miscc.config import cfg\n'), ((231, 8, 233, 50), 'torchvision.utils.save_image', 'vutils.save_image', (), '', True, 'import torchvision.utils as vutils\n'), ((237, 23, 237, 60), 'numpy.transpose', 'np.transpose', ({(237, 36, 237, 48): 'fake_img_set', (237, 50, 237, 59): '(1, 2, 0)'}, {}), '(fake_img_set, (1, 2, 0))', True, 'import numpy as np\n'), ((241, 23, 241, 68), 'tensorboard.summary.image', 'summary.image', ({(241, 37, 241, 53): "'fake_img%d' % i", (241, 55, 241, 67): 'fake_img_set'}, {}), "('fake_img%d' % i, fake_img_set)", False, 'from tensorboard import summary\n'), ((258, 17, 258, 38), 'miscc.config.cfg.GPU_ID.split', 'cfg.GPU_ID.split', ({(258, 34, 258, 37): '""","""'}, {}), "(',')", False, 'from miscc.config import cfg\n'), ((261, 8, 261, 43), 'torch.cuda.set_device', 'torch.cuda.set_device', ({(261, 30, 261, 42): 'self.gpus[0]'}, {}), '(self.gpus[0])', False, 'import torch\n'), ((279, 17, 279, 35), 'six.moves.range', 'range', ({(279, 23, 279, 34): 'self.num_Ds'}, {}), '(self.num_Ds)', False, 'from six.moves import range\n'), ((342, 17, 342, 35), 'six.moves.range', 'range', ({(342, 23, 342, 34): 'self.num_Ds'}, {}), '(self.num_Ds)', False, 'from six.moves import range\n'), ((398, 25, 398, 37), 'torch.nn.BCELoss', 'nn.BCELoss', ({}, {}), '()', True, 'import torch.nn as nn\n'), ((399, 28, 399, 49), 'torch.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ({}, {}), '()', True, 'import torch.nn as nn\n'), ((404, 28, 404, 52), 'torch.FloatTensor', 'torch.FloatTensor', ({(404, 46, 404, 51): '[1.0]'}, {}), '([1.0])', False, 'import torch\n'), ((405, 29, 405, 53), 'torch.FloatTensor', 'torch.FloatTensor', ({(405, 47, 405, 52): '[0.5]'}, {}), '([0.5])', False, 'import torch\n'), ((415, 20, 415, 95), 'torchvision.transforms.Normalize', 'transforms.Normalize', (), '', True, 'import torchvision.transforms as transforms\n'), ((434, 21, 434, 55), 'six.moves.range', 'range', ({(434, 27, 434, 38): 'start_epoch', (434, 40, 434, 54): 'self.max_epoch'}, {}), '(start_epoch, self.max_epoch)', False, 'from six.moves import range\n'), ((599, 17, 599, 34), 'six.moves.range', 'range', ({(599, 23, 599, 33): 'batch_size'}, {}), '(batch_size)', False, 'from six.moves import range\n'), ((55, 10, 55, 33), 'torch.mean', 'torch.mean', ({(55, 21, 55, 32): 'KLD_element'}, {}), '(KLD_element)', False, 'import torch\n'), ((91, 21, 91, 34), 'numpy.sum', 'np.sum', ({(91, 28, 91, 30): 'kl', (91, 32, 91, 33): '1'}, {}), '(kl, 1)', True, 'import numpy as np\n'), ((92, 22, 92, 32), 'numpy.exp', 'np.exp', ({(92, 29, 92, 31): 'kl'}, {}), '(kl)', True, 'import numpy as np\n'), ((117, 21, 117, 30), 'model.D_NET64', 'D_NET64', ({}, {}), '()', False, 'from model import G_NET, D_NET64, D_NET128, D_NET256, D_NET512, D_NET1024, INCEPTION_V3\n'), ((119, 21, 119, 31), 'model.D_NET128', 'D_NET128', ({}, {}), '()', False, 'from model import G_NET, D_NET64, D_NET128, D_NET256, D_NET512, D_NET1024, INCEPTION_V3\n'), ((121, 21, 121, 31), 'model.D_NET256', 'D_NET256', ({}, {}), '()', False, 'from model import G_NET, D_NET64, D_NET128, D_NET256, D_NET512, D_NET1024, INCEPTION_V3\n'), ((123, 21, 123, 31), 'model.D_NET512', 'D_NET512', ({}, {}), '()', False, 'from model import G_NET, D_NET64, D_NET128, D_NET256, D_NET512, D_NET1024, INCEPTION_V3\n'), ((125, 21, 125, 32), 'model.D_NET1024', 'D_NET1024', ({}, {}), '()', False, 'from model import G_NET, D_NET64, D_NET128, D_NET256, D_NET512, D_NET1024, INCEPTION_V3\n'), ((140, 17, 140, 43), 'miscc.config.cfg.TRAIN.NET_G.rfind', 'cfg.TRAIN.NET_G.rfind', ({(140, 39, 140, 42): '"""_"""'}, {}), "('_')", False, 'from miscc.config import cfg\n'), ((148, 25, 148, 70), 'torch.load', 'torch.load', ({(148, 36, 148, 69): "'%s%d.pth' % (cfg.TRAIN.NET_D, i)"}, {}), "('%s%d.pth' % (cfg.TRAIN.NET_D, i))", False, 'import torch\n'), ((203, 19, 203, 45), 'torchvision.utils.make_grid', 'vutils.make_grid', ({(203, 36, 203, 44): 'real_img'}, {}), '(real_img)', True, 'import torchvision.utils as vutils\n'), ((220, 19, 220, 45), 'torchvision.utils.make_grid', 'vutils.make_grid', ({(220, 36, 220, 44): 'real_img'}, {}), '(real_img)', True, 'import torchvision.utils as vutils\n'), ((250, 29, 250, 62), 'os.path.join', 'os.path.join', ({(250, 42, 250, 52): 'output_dir', (250, 54, 250, 61): '"""Model"""'}, {}), "(output_dir, 'Model')", False, 'import os\n'), ((251, 29, 251, 62), 'os.path.join', 'os.path.join', ({(251, 42, 251, 52): 'output_dir', (251, 54, 251, 61): '"""Image"""'}, {}), "(output_dir, 'Image')", False, 'import os\n'), ((252, 27, 252, 58), 'os.path.join', 'os.path.join', ({(252, 40, 252, 50): 'output_dir', (252, 52, 252, 57): '"""Log"""'}, {}), "(output_dir, 'Log')", False, 'import os\n'), ((253, 12, 253, 35), 'miscc.utils.mkdir_p', 'mkdir_p', ({(253, 20, 253, 34): 'self.model_dir'}, {}), '(self.model_dir)', False, 'from miscc.utils import mkdir_p\n'), ((254, 12, 254, 35), 'miscc.utils.mkdir_p', 'mkdir_p', ({(254, 20, 254, 34): 'self.image_dir'}, {}), '(self.image_dir)', False, 'from miscc.utils import mkdir_p\n'), ((255, 12, 255, 33), 'miscc.utils.mkdir_p', 'mkdir_p', ({(255, 20, 255, 32): 'self.log_dir'}, {}), '(self.log_dir)', False, 'from miscc.utils import mkdir_p\n'), ((256, 34, 256, 58), 'tensorboard.FileWriter', 'FileWriter', ({(256, 45, 256, 57): 'self.log_dir'}, {}), '(self.log_dir)', False, 'from tensorboard import FileWriter\n'), ((278, 25, 278, 46), 'torch.autograd.Variable', 'Variable', ({(278, 34, 278, 45): 't_embedding'}, {}), '(t_embedding)', False, 'from torch.autograd import Variable\n'), ((408, 25, 408, 63), 'torch.FloatTensor', 'torch.FloatTensor', ({(408, 43, 408, 58): 'self.batch_size', (408, 60, 408, 62): 'nz'}, {}), '(self.batch_size, nz)', False, 'import torch\n'), ((435, 22, 435, 33), 'time.time', 'time.time', ({}, {}), '()', False, 'import time\n'), ((584, 20, 584, 31), 'time.time', 'time.time', ({}, {}), '()', False, 'import time\n'), ((609, 21, 609, 41), 'six.moves.range', 'range', ({(609, 27, 609, 40): 'num_sentences'}, {}), '(num_sentences)', False, 'from six.moves import range\n'), ((616, 24, 616, 47), 'torch.cat', 'torch.cat', ({(616, 34, 616, 43): 'super_img', (616, 45, 616, 46): '0'}, {}), '(super_img, 0)', False, 'import torch\n'), ((617, 12, 617, 75), 'torchvision.utils.save_image', 'vutils.save_image', (), '', True, 'import torchvision.utils as vutils\n'), ((633, 17, 633, 39), 'PIL.Image.fromarray', 'Image.fromarray', ({(633, 33, 633, 38): 'ndarr'}, {}), '(ndarr)', False, 'from PIL import Image, ImageFont, ImageDraw\n'), ((643, 19, 643, 26), 'model.G_NET', 'G_NET', ({}, {}), '()', False, 'from model import G_NET, D_NET64, D_NET128, D_NET256, D_NET512, D_NET1024, INCEPTION_V3\n'), ((645, 19, 645, 68), 'torch.nn.DataParallel', 'torch.nn.DataParallel', (), '', False, 'import torch\n'), ((649, 16, 650, 69), 'torch.load', 'torch.load', (), '', False, 'import torch\n'), ((67, 8, 67, 46), 'torch.nn.init.orthogonal', 'nn.init.orthogonal', ({(67, 27, 67, 40): 'm.weight.data', (67, 42, 67, 45): '(1.0)'}, {}), '(m.weight.data, 1.0)', True, 'import torch.nn as nn\n'), ((90, 13, 90, 25), 'numpy.log', 'np.log', ({(90, 20, 90, 24): 'part'}, {}), '(part)', True, 'import numpy as np\n'), ((103, 30, 103, 45), 'numpy.max', 'np.max', ({(103, 37, 103, 41): 'part', (103, 43, 103, 44): '(1)'}, {}), '(part, 1)', True, 'import numpy as np\n'), ((476, 32, 476, 91), 'os.path.join', 'os.path.join', ({(476, 45, 476, 56): 'data_folder', (476, 58, 476, 90): "'WORDMAP_' + data_name + '.json'"}, {}), "(data_folder, 'WORDMAP_' + data_name + '.json')", False, 'import os\n'), ((515, 25, 515, 43), 'six.moves.range', 'range', ({(515, 31, 515, 42): 'self.num_Ds'}, {}), '(self.num_Ds)', False, 'from six.moves import range\n'), ((603, 19, 603, 40), 'os.path.isdir', 'os.path.isdir', ({(603, 33, 603, 39): 'folder'}, {}), '(folder)', False, 'import os\n'), ((605, 16, 605, 31), 'miscc.utils.mkdir_p', 'mkdir_p', ({(605, 24, 605, 30): 'folder'}, {}), '(folder)', False, 'from miscc.utils import mkdir_p\n'), ((625, 19, 625, 40), 'os.path.isdir', 'os.path.isdir', ({(625, 33, 625, 39): 'folder'}, {}), '(folder)', False, 'import os\n'), ((627, 16, 627, 31), 'miscc.utils.mkdir_p', 'mkdir_p', ({(627, 24, 627, 30): 'folder'}, {}), '(folder)', False, 'from miscc.utils import mkdir_p\n'), ((663, 29, 663, 67), 'torch.FloatTensor', 'torch.FloatTensor', ({(663, 47, 663, 62): 'self.batch_size', (663, 64, 663, 66): 'nz'}, {}), '(self.batch_size, nz)', False, 'import torch\n'), ((684, 25, 684, 45), 'six.moves.range', 'range', ({(684, 31, 684, 44): 'embedding_dim'}, {}), '(embedding_dim)', False, 'from six.moves import range\n'), ((276, 25, 276, 46), 'torch.autograd.Variable', 'Variable', ({(276, 34, 276, 45): 't_embedding'}, {}), '(t_embedding)', False, 'from torch.autograd import Variable\n'), ((284, 34, 284, 51), 'torch.autograd.Variable', 'Variable', ({(284, 43, 284, 50): 'imgs[i]'}, {}), '(imgs[i])', False, 'from torch.autograd import Variable\n'), ((285, 35, 285, 54), 'torch.autograd.Variable', 'Variable', ({(285, 44, 285, 53): 'w_imgs[i]'}, {}), '(w_imgs[i])', False, 'from torch.autograd import Variable\n'), ((401, 36, 401, 70), 'torch.FloatTensor', 'torch.FloatTensor', ({(401, 54, 401, 69): 'self.batch_size'}, {}), '(self.batch_size)', False, 'import torch\n'), ((402, 36, 402, 70), 'torch.FloatTensor', 'torch.FloatTensor', ({(402, 54, 402, 69): 'self.batch_size'}, {}), '(self.batch_size)', False, 'import torch\n'), ((409, 31, 409, 69), 'torch.FloatTensor', 'torch.FloatTensor', ({(409, 49, 409, 64): 'self.batch_size', (409, 66, 409, 68): 'nz'}, {}), '(self.batch_size, nz)', False, 'import torch\n'), ((419, 78, 419, 109), 'torchvision.transforms.Compose', 'transforms.Compose', ({(419, 97, 419, 108): '[normalize]'}, {}), '([normalize])', True, 'import torchvision.transforms as transforms\n'), ((675, 35, 675, 57), 'torch.autograd.Variable', 'Variable', ({(675, 44, 675, 56): 't_embeddings'}, {}), '(t_embeddings)', False, 'from torch.autograd import Variable\n'), ((90, 50, 90, 66), 'numpy.mean', 'np.mean', ({(90, 58, 90, 62): 'part', (90, 64, 90, 65): '(0)'}, {}), '(part, 0)', True, 'import numpy as np\n'), ((235, 23, 235, 54), 'torchvision.utils.make_grid', 'vutils.make_grid', ({(235, 40, 235, 53): 'fake_img.data'}, {}), '(fake_img.data)', True, 'import torchvision.utils as vutils\n'), ((360, 56, 360, 68), 'torch.nn.MSELoss', 'nn.MSELoss', ({}, {}), '()', True, 'import torch.nn as nn\n'), ((362, 20, 362, 32), 'torch.nn.MSELoss', 'nn.MSELoss', ({}, {}), '()', True, 'import torch.nn as nn\n'), ((373, 56, 373, 68), 'torch.nn.MSELoss', 'nn.MSELoss', ({}, {}), '()', True, 'import torch.nn as nn\n'), ((375, 20, 375, 32), 'torch.nn.MSELoss', 'nn.MSELoss', ({}, {}), '()', True, 'import torch.nn as nn\n'), ((489, 26, 489, 35), 'SATmodels.Encoder', 'Encoder', ({}, {}), '()', False, 'from SATmodels import Encoder, DecoderWithAttention\n'), ((572, 38, 572, 68), 'numpy.concatenate', 'np.concatenate', ({(572, 53, 572, 64): 'predictions', (572, 66, 572, 67): '0'}, {}), '(predictions, 0)', True, 'import numpy as np\n'), ((575, 34, 575, 72), 'tensorboard.summary.scalar', 'summary.scalar', ({(575, 49, 575, 65): '"""Inception_mean"""', (575, 67, 575, 71): 'mean'}, {}), "('Inception_mean', mean)", False, 'from tensorboard import summary\n'), ((579, 33, 579, 71), 'tensorboard.summary.scalar', 'summary.scalar', ({(579, 48, 579, 59): '"""NLPP_mean"""', (579, 61, 579, 70): 'mean_nlpp'}, {}), "('NLPP_mean', mean_nlpp)", False, 'from tensorboard import summary\n'), ((281, 34, 281, 51), 'torch.autograd.Variable', 'Variable', ({(281, 43, 281, 50): 'imgs[i]'}, {}), '(imgs[i])', False, 'from torch.autograd import Variable\n'), ((282, 35, 282, 54), 'torch.autograd.Variable', 'Variable', ({(282, 44, 282, 53): 'w_imgs[i]'}, {}), '(w_imgs[i])', False, 'from torch.autograd import Variable\n'), ((501, 32, 501, 94), 'torch.nn.utils.rnn.pack_padded_sequence', 'pack_padded_sequence', (), '', False, 'from torch.nn.utils.rnn import pack_padded_sequence\n'), ((502, 33, 502, 96), 'torch.nn.utils.rnn.pack_padded_sequence', 'pack_padded_sequence', (), '', False, 'from torch.nn.utils.rnn import pack_padded_sequence\n'), ((673, 35, 673, 57), 'torch.autograd.Variable', 'Variable', ({(673, 44, 673, 56): 't_embeddings'}, {}), '(t_embeddings)', False, 'from torch.autograd import Variable\n')] |
LeaHolc/recepcija | spletni_vmesnik.py | bff9f804e795e45c2da214432042c0ae067783b0 | from bottle import TEMPLATE_PATH, route, run, template, redirect, get, post, request, response, auth_basic, Bottle, abort, error, static_file
import bottle
import controller
from controller import dobi_parcele_za_prikaz, dobi_info_parcele, dodaj_gosta_na_rezervacijo, naredi_rezervacijo, dobi_rezervacijo_po_id, zakljuci_na_datum_in_placaj, dobi_postavke_racuna
import datetime as dt
@bottle.get('/')
def root():
redirect('/domov')
@bottle.get('/domov')
def index():
parcele = dobi_parcele_za_prikaz(dt.date.today())
return template("domov", parcele=parcele, hide_header_back=True)
@bottle.get("/parcela/<id_parcele>")
def parcela(id_parcele):
'Preverimo stanje parcele'
rez, gostje = dobi_info_parcele(id_parcele, dt.date.today())
if rez is not None:
stanje = "Parcela je trenutno zasedena"
else:
stanje = "Parcela je trenutno na voljo"
return template('parcela', id_parcela=id_parcele, rezervacija=rez, stanje=stanje, gostje=gostje)
@bottle.get("/naredi-rezervacijo/<id_parcele>")
def nova_rezervacija(id_parcele=None):
print(id_parcele)
today = dt.date.today()
tomorrow = today + dt.timedelta(days=1)
return template('nova_rezervacija', id_parcele=id_parcele, today=today, tomorrow=tomorrow)
@bottle.post("/naredi-rezervacijo")
def naredi_novo_rezervacijo():
" V modelu naredi novo rezervacijo in ji doda prvega gosta"
# Preberemo lastnosti iz forme
ime = request.forms.ime#get("")
priimek = request.forms.priimek#get("")
emso = request.forms.emso#get("")
drzava = request.forms.drzava#get("")
id_parcele = request.forms.id_parcele#get("")
od = request.forms.zacetek#get("")
do = request.forms.konec#get("")
print(ime, priimek)
try:
datum_od = dt.datetime.fromisoformat(od).date()
datum_do = dt.datetime.fromisoformat(do).date()
except Exception as e:
print(e)
print("Napaka pri pretvorbi datumov")
return redirect("/naredi-rezervacijo")
rezervacija = naredi_rezervacijo(id_parcele)
dodaj_gosta_na_rezervacijo(rezervacija.id_rezervacije, {
"EMSO":emso,
"ime":ime,
"priimek":priimek,
"drzava":drzava,
}, datum_od, datum_do)
return redirect(f"/parcela/{id_parcele}")
@bottle.get("/dodaj-gosta/<id_rezervacije>")
def get_dodaj_gosta_na_rezervacijo(id_rezervacije):
today = dt.date.today()
tomorrow = today + dt.timedelta(days=1)
rezervacija = dobi_rezervacijo_po_id(id_rezervacije)
if not rezervacija:
return template("error", sporocilo="Rezervacija ne obstaja!", naslov="Napaka")
return template("dodajanje_gosta", id_rezervacije=id_rezervacije, today=today, tomorrow=tomorrow)
@bottle.post("/dodaj-gosta-na-rezervacijo")
def post_dodaj_gosta_na_rezervacijo():
" V modelu rezervaciji doda gosta"
# Preberemo lastnosti iz forme
ime = request.forms.ime
priimek = request.forms.priimek
emso = request.forms.emso#get("")
drzava = request.forms.drzava#get("")
id_rezervacije = request.forms.rez#get("")
od = request.forms.zacetek#get("")
do = request.forms.konec#get("")
try:
datum_od = dt.datetime.fromisoformat(od).date()
datum_do = dt.datetime.fromisoformat(do).date()
except Exception as e:
print(e)
print("Napaka pri pretvorbi datumov")
return redirect("/dodaj-gosta")
rezervacija = dobi_rezervacijo_po_id(id_rezervacije)
if not rezervacija:
return template("error", sporocilo="Rezervacija ne obstaja!", naslov="Napaka")
dodaj_gosta_na_rezervacijo(rezervacija.id_rezervacije, {
"EMSO":emso,
"ime":ime,
"priimek":priimek,
"drzava":drzava,
},datum_od,datum_do)
print(id_rezervacije)
return redirect(f"/parcela/{rezervacija.id_parcele}")
@bottle.get("/predracun/<id_rezervacije>")
def predracun(id_rezervacije):
rezervacija = dobi_rezervacijo_po_id(id_rezervacije)
if not rezervacija:
return template("error", sporocilo="Rezervacija ne obstaja!", naslov="Napaka")
today = dt.date.today()
gostje = rezervacija.gostje
sestevek, postavke = dobi_postavke_racuna(rezervacija)
slovar_cen = {}
slovar_kolicin = {}
for gost in gostje:
slovar_kolicin[gost] = len(gost.nocitve)
slovar_cen[gost] = format(gost.cena_nocitve() * slovar_kolicin.get(gost), '.2f')
return template("racun", id_rezervacije=id_rezervacije, sestevek=format(sestevek, '.2f'), gostje=gostje, today=today.strftime("%d/%m/%Y"), slovar_cen=slovar_cen, slovar_kolicin=slovar_kolicin)
@bottle.get("/zakljuci/<id_rezervacije>")
def racun(id_rezervacije):
rezervacija = dobi_rezervacijo_po_id(id_rezervacije)
if not rezervacija:
return template("error", sporocilo="Rezervacija ne obstaja!", naslov="Napaka")
today = dt.date.today()
gostje = rezervacija.gostje
sestevek, postavke = zakljuci_na_datum_in_placaj(rezervacija, dt.date.today())
slovar_cen = {}
slovar_kolicin = {}
for gost in gostje:
slovar_kolicin[gost] = len(gost.nocitve)
slovar_cen[gost] = format(gost.cena_nocitve() * slovar_kolicin.get(gost), '.2f')
return template("racun", id_rezervacije=id_rezervacije, sestevek=format(sestevek, '.2f'), gostje=gostje, today=today.strftime("%d/%m/%Y"), slovar_cen=slovar_cen, slovar_kolicin=slovar_kolicin)
@bottle.error(404)
def napaka404(a):
return template("error", sporocilo="Stran ne obstaja!", naslov="404")
@bottle.error(500)
def napaka500(a):
return template("error", sporocilo="Napaka streznika!", naslov="500")
bottle.run(reloader=True, debug=True)
| [((7, 1, 7, 16), 'bottle.get', 'bottle.get', ({(7, 12, 7, 15): '"""/"""'}, {}), "('/')", False, 'import bottle\n'), ((11, 1, 11, 21), 'bottle.get', 'bottle.get', ({(11, 12, 11, 20): '"""/domov"""'}, {}), "('/domov')", False, 'import bottle\n'), ((16, 1, 16, 36), 'bottle.get', 'bottle.get', ({(16, 12, 16, 35): '"""/parcela/<id_parcele>"""'}, {}), "('/parcela/<id_parcele>')", False, 'import bottle\n'), ((27, 1, 27, 47), 'bottle.get', 'bottle.get', ({(27, 12, 27, 46): '"""/naredi-rezervacijo/<id_parcele>"""'}, {}), "('/naredi-rezervacijo/<id_parcele>')", False, 'import bottle\n'), ((34, 1, 34, 35), 'bottle.post', 'bottle.post', ({(34, 13, 34, 34): '"""/naredi-rezervacijo"""'}, {}), "('/naredi-rezervacijo')", False, 'import bottle\n'), ((66, 1, 66, 44), 'bottle.get', 'bottle.get', ({(66, 12, 66, 43): '"""/dodaj-gosta/<id_rezervacije>"""'}, {}), "('/dodaj-gosta/<id_rezervacije>')", False, 'import bottle\n'), ((77, 1, 77, 43), 'bottle.post', 'bottle.post', ({(77, 13, 77, 42): '"""/dodaj-gosta-na-rezervacijo"""'}, {}), "('/dodaj-gosta-na-rezervacijo')", False, 'import bottle\n'), ((111, 1, 111, 42), 'bottle.get', 'bottle.get', ({(111, 12, 111, 41): '"""/predracun/<id_rezervacije>"""'}, {}), "('/predracun/<id_rezervacije>')", False, 'import bottle\n'), ((126, 1, 126, 41), 'bottle.get', 'bottle.get', ({(126, 12, 126, 40): '"""/zakljuci/<id_rezervacije>"""'}, {}), "('/zakljuci/<id_rezervacije>')", False, 'import bottle\n'), ((141, 1, 141, 18), 'bottle.error', 'bottle.error', ({(141, 14, 141, 17): '(404)'}, {}), '(404)', False, 'import bottle\n'), ((145, 1, 145, 18), 'bottle.error', 'bottle.error', ({(145, 14, 145, 17): '(500)'}, {}), '(500)', False, 'import bottle\n'), ((149, 0, 149, 37), 'bottle.run', 'bottle.run', (), '', False, 'import bottle\n'), ((9, 4, 9, 22), 'bottle.redirect', 'redirect', ({(9, 13, 9, 21): '"""/domov"""'}, {}), "('/domov')", False, 'from bottle import TEMPLATE_PATH, route, run, template, redirect, get, post, request, response, auth_basic, Bottle, abort, error, static_file\n'), ((14, 11, 14, 68), 'bottle.template', 'template', (), '', False, 'from bottle import TEMPLATE_PATH, route, run, template, redirect, get, post, request, response, auth_basic, Bottle, abort, error, static_file\n'), ((24, 11, 24, 100), 'bottle.template', 'template', (), '', False, 'from bottle import TEMPLATE_PATH, route, run, template, redirect, get, post, request, response, auth_basic, Bottle, abort, error, static_file\n'), ((30, 12, 30, 27), 'datetime.date.today', 'dt.date.today', ({}, {}), '()', True, 'import datetime as dt\n'), ((32, 11, 32, 94), 'bottle.template', 'template', (), '', False, 'from bottle import TEMPLATE_PATH, route, run, template, redirect, get, post, request, response, auth_basic, Bottle, abort, error, static_file\n'), ((57, 18, 57, 48), 'controller.naredi_rezervacijo', 'naredi_rezervacijo', ({(57, 37, 57, 47): 'id_parcele'}, {}), '(id_parcele)', False, 'from controller import dobi_parcele_za_prikaz, dobi_info_parcele, dodaj_gosta_na_rezervacijo, naredi_rezervacijo, dobi_rezervacijo_po_id, zakljuci_na_datum_in_placaj, dobi_postavke_racuna\n'), ((58, 4, 63, 26), 'controller.dodaj_gosta_na_rezervacijo', 'dodaj_gosta_na_rezervacijo', ({(58, 31, 58, 57): 'rezervacija.id_rezervacije', (58, 59, 63, 5): "{'EMSO': emso, 'ime': ime, 'priimek': priimek, 'drzava': drzava}", (63, 7, 63, 15): 'datum_od', (63, 17, 63, 25): 'datum_do'}, {}), "(rezervacija.id_rezervacije, {'EMSO': emso, 'ime':\n ime, 'priimek': priimek, 'drzava': drzava}, datum_od, datum_do)", False, 'from controller import dobi_parcele_za_prikaz, dobi_info_parcele, dodaj_gosta_na_rezervacijo, naredi_rezervacijo, dobi_rezervacijo_po_id, zakljuci_na_datum_in_placaj, dobi_postavke_racuna\n'), ((64, 11, 64, 45), 'bottle.redirect', 'redirect', ({(64, 20, 64, 44): 'f"""/parcela/{id_parcele}"""'}, {}), "(f'/parcela/{id_parcele}')", False, 'from bottle import TEMPLATE_PATH, route, run, template, redirect, get, post, request, response, auth_basic, Bottle, abort, error, static_file\n'), ((68, 12, 68, 27), 'datetime.date.today', 'dt.date.today', ({}, {}), '()', True, 'import datetime as dt\n'), ((71, 18, 71, 56), 'controller.dobi_rezervacijo_po_id', 'dobi_rezervacijo_po_id', ({(71, 41, 71, 55): 'id_rezervacije'}, {}), '(id_rezervacije)', False, 'from controller import dobi_parcele_za_prikaz, dobi_info_parcele, dodaj_gosta_na_rezervacijo, naredi_rezervacijo, dobi_rezervacijo_po_id, zakljuci_na_datum_in_placaj, dobi_postavke_racuna\n'), ((75, 11, 75, 101), 'bottle.template', 'template', (), '', False, 'from bottle import TEMPLATE_PATH, route, run, template, redirect, get, post, request, response, auth_basic, Bottle, abort, error, static_file\n'), ((99, 18, 99, 56), 'controller.dobi_rezervacijo_po_id', 'dobi_rezervacijo_po_id', ({(99, 41, 99, 55): 'id_rezervacije'}, {}), '(id_rezervacije)', False, 'from controller import dobi_parcele_za_prikaz, dobi_info_parcele, dodaj_gosta_na_rezervacijo, naredi_rezervacijo, dobi_rezervacijo_po_id, zakljuci_na_datum_in_placaj, dobi_postavke_racuna\n'), ((102, 4, 107, 24), 'controller.dodaj_gosta_na_rezervacijo', 'dodaj_gosta_na_rezervacijo', ({(102, 31, 102, 57): 'rezervacija.id_rezervacije', (102, 59, 107, 5): "{'EMSO': emso, 'ime': ime, 'priimek': priimek, 'drzava': drzava}", (107, 6, 107, 14): 'datum_od', (107, 15, 107, 23): 'datum_do'}, {}), "(rezervacija.id_rezervacije, {'EMSO': emso, 'ime':\n ime, 'priimek': priimek, 'drzava': drzava}, datum_od, datum_do)", False, 'from controller import dobi_parcele_za_prikaz, dobi_info_parcele, dodaj_gosta_na_rezervacijo, naredi_rezervacijo, dobi_rezervacijo_po_id, zakljuci_na_datum_in_placaj, dobi_postavke_racuna\n'), ((109, 11, 109, 57), 'bottle.redirect', 'redirect', ({(109, 20, 109, 56): 'f"""/parcela/{rezervacija.id_parcele}"""'}, {}), "(f'/parcela/{rezervacija.id_parcele}')", False, 'from bottle import TEMPLATE_PATH, route, run, template, redirect, get, post, request, response, auth_basic, Bottle, abort, error, static_file\n'), ((113, 18, 113, 56), 'controller.dobi_rezervacijo_po_id', 'dobi_rezervacijo_po_id', ({(113, 41, 113, 55): 'id_rezervacije'}, {}), '(id_rezervacije)', False, 'from controller import dobi_parcele_za_prikaz, dobi_info_parcele, dodaj_gosta_na_rezervacijo, naredi_rezervacijo, dobi_rezervacijo_po_id, zakljuci_na_datum_in_placaj, dobi_postavke_racuna\n'), ((116, 12, 116, 27), 'datetime.date.today', 'dt.date.today', ({}, {}), '()', True, 'import datetime as dt\n'), ((118, 25, 118, 58), 'controller.dobi_postavke_racuna', 'dobi_postavke_racuna', ({(118, 46, 118, 57): 'rezervacija'}, {}), '(rezervacija)', False, 'from controller import dobi_parcele_za_prikaz, dobi_info_parcele, dodaj_gosta_na_rezervacijo, naredi_rezervacijo, dobi_rezervacijo_po_id, zakljuci_na_datum_in_placaj, dobi_postavke_racuna\n'), ((128, 18, 128, 56), 'controller.dobi_rezervacijo_po_id', 'dobi_rezervacijo_po_id', ({(128, 41, 128, 55): 'id_rezervacije'}, {}), '(id_rezervacije)', False, 'from controller import dobi_parcele_za_prikaz, dobi_info_parcele, dodaj_gosta_na_rezervacijo, naredi_rezervacijo, dobi_rezervacijo_po_id, zakljuci_na_datum_in_placaj, dobi_postavke_racuna\n'), ((131, 12, 131, 27), 'datetime.date.today', 'dt.date.today', ({}, {}), '()', True, 'import datetime as dt\n'), ((143, 11, 143, 73), 'bottle.template', 'template', (), '', False, 'from bottle import TEMPLATE_PATH, route, run, template, redirect, get, post, request, response, auth_basic, Bottle, abort, error, static_file\n'), ((147, 11, 147, 73), 'bottle.template', 'template', (), '', False, 'from bottle import TEMPLATE_PATH, route, run, template, redirect, get, post, request, response, auth_basic, Bottle, abort, error, static_file\n'), ((13, 37, 13, 52), 'datetime.date.today', 'dt.date.today', ({}, {}), '()', True, 'import datetime as dt\n'), ((19, 48, 19, 63), 'datetime.date.today', 'dt.date.today', ({}, {}), '()', True, 'import datetime as dt\n'), ((31, 23, 31, 43), 'datetime.timedelta', 'dt.timedelta', (), '', True, 'import datetime as dt\n'), ((69, 23, 69, 43), 'datetime.timedelta', 'dt.timedelta', (), '', True, 'import datetime as dt\n'), ((73, 15, 73, 86), 'bottle.template', 'template', (), '', False, 'from bottle import TEMPLATE_PATH, route, run, template, redirect, get, post, request, response, auth_basic, Bottle, abort, error, static_file\n'), ((101, 15, 101, 86), 'bottle.template', 'template', (), '', False, 'from bottle import TEMPLATE_PATH, route, run, template, redirect, get, post, request, response, auth_basic, Bottle, abort, error, static_file\n'), ((115, 15, 115, 86), 'bottle.template', 'template', (), '', False, 'from bottle import TEMPLATE_PATH, route, run, template, redirect, get, post, request, response, auth_basic, Bottle, abort, error, static_file\n'), ((130, 15, 130, 86), 'bottle.template', 'template', (), '', False, 'from bottle import TEMPLATE_PATH, route, run, template, redirect, get, post, request, response, auth_basic, Bottle, abort, error, static_file\n'), ((133, 66, 133, 81), 'datetime.date.today', 'dt.date.today', ({}, {}), '()', True, 'import datetime as dt\n'), ((55, 15, 55, 46), 'bottle.redirect', 'redirect', ({(55, 24, 55, 45): '"""/naredi-rezervacijo"""'}, {}), "('/naredi-rezervacijo')", False, 'from bottle import TEMPLATE_PATH, route, run, template, redirect, get, post, request, response, auth_basic, Bottle, abort, error, static_file\n'), ((96, 15, 96, 39), 'bottle.redirect', 'redirect', ({(96, 24, 96, 38): '"""/dodaj-gosta"""'}, {}), "('/dodaj-gosta')", False, 'from bottle import TEMPLATE_PATH, route, run, template, redirect, get, post, request, response, auth_basic, Bottle, abort, error, static_file\n'), ((50, 19, 50, 48), 'datetime.datetime.fromisoformat', 'dt.datetime.fromisoformat', ({(50, 45, 50, 47): 'od'}, {}), '(od)', True, 'import datetime as dt\n'), ((51, 19, 51, 48), 'datetime.datetime.fromisoformat', 'dt.datetime.fromisoformat', ({(51, 45, 51, 47): 'do'}, {}), '(do)', True, 'import datetime as dt\n'), ((91, 19, 91, 48), 'datetime.datetime.fromisoformat', 'dt.datetime.fromisoformat', ({(91, 45, 91, 47): 'od'}, {}), '(od)', True, 'import datetime as dt\n'), ((92, 19, 92, 48), 'datetime.datetime.fromisoformat', 'dt.datetime.fromisoformat', ({(92, 45, 92, 47): 'do'}, {}), '(do)', True, 'import datetime as dt\n')] |
magictron/espnet | espnet/nets/pytorch_backend/transducer/initializer.py | 075cee8d586957241be3e54c47846fbb12a32310 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Parameter initialization for transducer RNN/Transformer parts."""
import six
from espnet.nets.pytorch_backend.initialization import lecun_normal_init_parameters
from espnet.nets.pytorch_backend.initialization import set_forget_bias_to_one
from espnet.nets.pytorch_backend.transformer.initializer import initialize
def initializer(model, args):
"""Initialize transducer model.
Args:
model (torch.nn.Module): transducer instance
args (Namespace): argument Namespace containing options
"""
if args.dtype != "transformer":
if args.etype == "transformer":
initialize(model.encoder, args.transformer_init)
lecun_normal_init_parameters(model.dec)
else:
lecun_normal_init_parameters(model)
model.dec.embed.weight.data.normal_(0, 1)
for l in six.moves.range(len(model.dec.decoder)):
set_forget_bias_to_one(model.dec.decoder[l].bias_ih)
else:
if args.etype == "transformer":
initialize(model, args.transformer_init)
else:
lecun_normal_init_parameters(model.encoder)
initialize(model.decoder, args.transformer_init)
| [((24, 12, 24, 60), 'espnet.nets.pytorch_backend.transformer.initializer.initialize', 'initialize', ({(24, 23, 24, 36): 'model.encoder', (24, 38, 24, 59): 'args.transformer_init'}, {}), '(model.encoder, args.transformer_init)', False, 'from espnet.nets.pytorch_backend.transformer.initializer import initialize\n'), ((25, 12, 25, 51), 'espnet.nets.pytorch_backend.initialization.lecun_normal_init_parameters', 'lecun_normal_init_parameters', ({(25, 41, 25, 50): 'model.dec'}, {}), '(model.dec)', False, 'from espnet.nets.pytorch_backend.initialization import lecun_normal_init_parameters\n'), ((27, 12, 27, 47), 'espnet.nets.pytorch_backend.initialization.lecun_normal_init_parameters', 'lecun_normal_init_parameters', ({(27, 41, 27, 46): 'model'}, {}), '(model)', False, 'from espnet.nets.pytorch_backend.initialization import lecun_normal_init_parameters\n'), ((32, 12, 32, 64), 'espnet.nets.pytorch_backend.initialization.set_forget_bias_to_one', 'set_forget_bias_to_one', ({(32, 35, 32, 63): 'model.dec.decoder[l].bias_ih'}, {}), '(model.dec.decoder[l].bias_ih)', False, 'from espnet.nets.pytorch_backend.initialization import set_forget_bias_to_one\n'), ((35, 12, 35, 52), 'espnet.nets.pytorch_backend.transformer.initializer.initialize', 'initialize', ({(35, 23, 35, 28): 'model', (35, 30, 35, 51): 'args.transformer_init'}, {}), '(model, args.transformer_init)', False, 'from espnet.nets.pytorch_backend.transformer.initializer import initialize\n'), ((37, 12, 37, 55), 'espnet.nets.pytorch_backend.initialization.lecun_normal_init_parameters', 'lecun_normal_init_parameters', ({(37, 41, 37, 54): 'model.encoder'}, {}), '(model.encoder)', False, 'from espnet.nets.pytorch_backend.initialization import lecun_normal_init_parameters\n'), ((38, 12, 38, 60), 'espnet.nets.pytorch_backend.transformer.initializer.initialize', 'initialize', ({(38, 23, 38, 36): 'model.decoder', (38, 38, 38, 59): 'args.transformer_init'}, {}), '(model.decoder, args.transformer_init)', False, 'from espnet.nets.pytorch_backend.transformer.initializer import initialize\n')] |
adelmassimo/EM-Algorithm-for-MMPP | evaluate.py | 23ae031076a464bfba5286cf6b5a1fa5e1cc66b1 | import model
import numpy as np
import datasetReader as df
import main
# Number of traces loaded T
T = 1
# Generate traces
traces_factory = df.DatasetFactory()
traces_factory.createDataset(T)
traces = traces_factory.traces
P0 = np.matrix("[ .02 0;"
"0 0 0.5;"
"0 0 0]")
P1 = np.matrix("[0.1 0 0;"
"0 0.5 0;"
"0 0 0.9]")
M = np.matrix("[0.25 0 0;"
"0 0.23 0;"
"0 0 0.85]")
def backward_likelihood(i, trace):
N = model.N
M = len( trace )
likelihoods = np.ones((N, 1))
if i < M:
P = main.randomization(P0, model.uniformization_rate, trace[i][0])
# P = stored_p_values[i, :, :]
likelihoods = np.multiply(
P.dot( model.P1 ).dot( backward_likelihood(i+1, trace) ),
model.M[:, trace[i][1]] )
if likelihoods.sum() != 0:
likelihoods = likelihoods / likelihoods.sum()
return likelihoods | [((9, 17, 9, 36), 'datasetReader.DatasetFactory', 'df.DatasetFactory', ({}, {}), '()', True, 'import datasetReader as df\n'), ((13, 5, 15, 25), 'numpy.matrix', 'np.matrix', ({(13, 15, 15, 24): '"""[ .02 0;0 0 0.5;0 0 0]"""'}, {}), "('[ .02 0;0 0 0.5;0 0 0]')", True, 'import numpy as np\n'), ((17, 5, 19, 27), 'numpy.matrix', 'np.matrix', ({(17, 15, 19, 26): '"""[0.1 0 0;0 0.5 0;0 0 0.9]"""'}, {}), "('[0.1 0 0;0 0.5 0;0 0 0.9]')", True, 'import numpy as np\n'), ((21, 4, 23, 28), 'numpy.matrix', 'np.matrix', ({(21, 14, 23, 27): '"""[0.25 0 0;0 0.23 0;0 0 0.85]"""'}, {}), "('[0.25 0 0;0 0.23 0;0 0 0.85]')", True, 'import numpy as np\n'), ((30, 18, 30, 33), 'numpy.ones', 'np.ones', ({(30, 26, 30, 32): '(N, 1)'}, {}), '((N, 1))', True, 'import numpy as np\n'), ((33, 12, 33, 74), 'main.randomization', 'main.randomization', ({(33, 31, 33, 33): 'P0', (33, 35, 33, 60): 'model.uniformization_rate', (33, 62, 33, 73): 'trace[i][0]'}, {}), '(P0, model.uniformization_rate, trace[i][0])', False, 'import main\n')] |
forestGzh/VTK | Imaging/Core/Testing/Python/TestHSVToRGB.py | bc98327275bd5cfa95c5825f80a2755a458b6da8 | #!/usr/bin/env python
import vtk
from vtk.util.misc import vtkGetDataRoot
VTK_DATA_ROOT = vtkGetDataRoot()
# Use the painter to draw using colors.
# This is not a pipeline object. It will support pipeline objects.
# Please do not use this object directly.
imageCanvas = vtk.vtkImageCanvasSource2D()
imageCanvas.SetNumberOfScalarComponents(3)
imageCanvas.SetScalarTypeToUnsignedChar()
imageCanvas.SetExtent(0,320,0,320,0,0)
imageCanvas.SetDrawColor(0,0,0)
imageCanvas.FillBox(0,511,0,511)
# r, g, b
imageCanvas.SetDrawColor(255,0,0)
imageCanvas.FillBox(0,50,0,100)
imageCanvas.SetDrawColor(128,128,0)
imageCanvas.FillBox(50,100,0,100)
imageCanvas.SetDrawColor(0,255,0)
imageCanvas.FillBox(100,150,0,100)
imageCanvas.SetDrawColor(0,128,128)
imageCanvas.FillBox(150,200,0,100)
imageCanvas.SetDrawColor(0,0,255)
imageCanvas.FillBox(200,250,0,100)
imageCanvas.SetDrawColor(128,0,128)
imageCanvas.FillBox(250,300,0,100)
# intensity scale
imageCanvas.SetDrawColor(5,5,5)
imageCanvas.FillBox(0,50,110,210)
imageCanvas.SetDrawColor(55,55,55)
imageCanvas.FillBox(50,100,110,210)
imageCanvas.SetDrawColor(105,105,105)
imageCanvas.FillBox(100,150,110,210)
imageCanvas.SetDrawColor(155,155,155)
imageCanvas.FillBox(150,200,110,210)
imageCanvas.SetDrawColor(205,205,205)
imageCanvas.FillBox(200,250,110,210)
imageCanvas.SetDrawColor(255,255,255)
imageCanvas.FillBox(250,300,110,210)
# saturation scale
imageCanvas.SetDrawColor(245,0,0)
imageCanvas.FillBox(0,50,220,320)
imageCanvas.SetDrawColor(213,16,16)
imageCanvas.FillBox(50,100,220,320)
imageCanvas.SetDrawColor(181,32,32)
imageCanvas.FillBox(100,150,220,320)
imageCanvas.SetDrawColor(149,48,48)
imageCanvas.FillBox(150,200,220,320)
imageCanvas.SetDrawColor(117,64,64)
imageCanvas.FillBox(200,250,220,320)
imageCanvas.SetDrawColor(85,80,80)
imageCanvas.FillBox(250,300,220,320)
convert = vtk.vtkImageRGBToHSV()
convert.SetInputConnection(imageCanvas.GetOutputPort())
convertBack = vtk.vtkImageHSVToRGB()
convertBack.SetInputConnection(convert.GetOutputPort())
cast = vtk.vtkImageCast()
cast.SetInputConnection(convertBack.GetOutputPort())
cast.SetOutputScalarTypeToFloat()
cast.ReleaseDataFlagOff()
viewer = vtk.vtkImageViewer()
viewer.SetInputConnection(convertBack.GetOutputPort())
#viewer SetInputConnection [imageCanvas GetOutputPort]
viewer.SetColorWindow(256)
viewer.SetColorLevel(127.5)
viewer.SetSize(320,320)
viewer.Render()
# --- end of script --
| [((4, 16, 4, 32), 'vtk.util.misc.vtkGetDataRoot', 'vtkGetDataRoot', ({}, {}), '()', False, 'from vtk.util.misc import vtkGetDataRoot\n'), ((9, 14, 9, 42), 'vtk.vtkImageCanvasSource2D', 'vtk.vtkImageCanvasSource2D', ({}, {}), '()', False, 'import vtk\n'), ((54, 10, 54, 32), 'vtk.vtkImageRGBToHSV', 'vtk.vtkImageRGBToHSV', ({}, {}), '()', False, 'import vtk\n'), ((56, 14, 56, 36), 'vtk.vtkImageHSVToRGB', 'vtk.vtkImageHSVToRGB', ({}, {}), '()', False, 'import vtk\n'), ((58, 7, 58, 25), 'vtk.vtkImageCast', 'vtk.vtkImageCast', ({}, {}), '()', False, 'import vtk\n'), ((62, 9, 62, 29), 'vtk.vtkImageViewer', 'vtk.vtkImageViewer', ({}, {}), '()', False, 'import vtk\n')] |
barizraihan/belajarpython | kelas_2b/echa.py | 57df4c939600dd34a519599d6c78178bfb55063b | import csv
class echa:
def werehousing(self):
with open('kelas_2b/echa.csv', 'r') as csvfile:
csv_reader = csv.reader(csvfile, delimiter=',')
for row in csv_reader:
print("menampilkan data barang:", row[0], row[1], row[2], row[3], row[4])
| [((6, 25, 6, 59), 'csv.reader', 'csv.reader', (), '', False, 'import csv\n')] |
dyollb/MONAI | tests/test_handler_surface_distance.py | 9084c452c48095c82c71d4391b3684006e5a3c56 | # Copyright 2020 - 2021 MONAI Consortium
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from typing import Tuple
import numpy as np
import torch
from ignite.engine import Engine
from monai.handlers import SurfaceDistance
def create_spherical_seg_3d(
radius: float = 20.0, centre: Tuple[int, int, int] = (49, 49, 49), im_shape: Tuple[int, int, int] = (99, 99, 99)
) -> np.ndarray:
"""
Return a 3D image with a sphere inside. Voxel values will be
1 inside the sphere, and 0 elsewhere.
Args:
radius: radius of sphere (in terms of number of voxels, can be partial)
centre: location of sphere centre.
im_shape: shape of image to create
See also:
:py:meth:`~create_test_image_3d`
"""
# Create image
image = np.zeros(im_shape, dtype=np.int32)
spy, spx, spz = np.ogrid[
-centre[0] : im_shape[0] - centre[0], -centre[1] : im_shape[1] - centre[1], -centre[2] : im_shape[2] - centre[2]
]
circle = (spx * spx + spy * spy + spz * spz) <= radius * radius
image[circle] = 1
image[~circle] = 0
return image
sampler_sphere = torch.Tensor(create_spherical_seg_3d(radius=20, centre=(20, 20, 20))).unsqueeze(0).unsqueeze(0)
# test input a list of channel-first tensor
sampler_sphere_gt = [torch.Tensor(create_spherical_seg_3d(radius=20, centre=(10, 20, 20))).unsqueeze(0)]
sampler_sphere_zeros = torch.zeros_like(sampler_sphere)
TEST_SAMPLE_1 = [sampler_sphere, sampler_sphere_gt]
TEST_SAMPLE_2 = [sampler_sphere_gt, sampler_sphere_gt]
TEST_SAMPLE_3 = [sampler_sphere_zeros, sampler_sphere_gt]
TEST_SAMPLE_4 = [sampler_sphere_zeros, sampler_sphere_zeros]
class TestHandlerSurfaceDistance(unittest.TestCase):
# TODO test multi node Surface Distance
def test_compute(self):
sur_metric = SurfaceDistance(include_background=True)
def _val_func(engine, batch):
pass
engine = Engine(_val_func)
sur_metric.attach(engine, "surface_distance")
y_pred, y = TEST_SAMPLE_1
sur_metric.update([y_pred, y])
self.assertAlmostEqual(sur_metric.compute(), 4.17133, places=4)
y_pred, y = TEST_SAMPLE_2
sur_metric.update([y_pred, y])
self.assertAlmostEqual(sur_metric.compute(), 2.08566, places=4)
y_pred, y = TEST_SAMPLE_3
sur_metric.update([y_pred, y])
self.assertAlmostEqual(sur_metric.compute(), float("inf"))
y_pred, y = TEST_SAMPLE_4
sur_metric.update([y_pred, y])
self.assertAlmostEqual(sur_metric.compute(), float("inf"))
def test_shape_mismatch(self):
sur_metric = SurfaceDistance(include_background=True)
with self.assertRaises((AssertionError, ValueError)):
y_pred = TEST_SAMPLE_1[0]
y = torch.ones((1, 1, 10, 10, 10))
sur_metric.update([y_pred, y])
if __name__ == "__main__":
unittest.main()
| [((52, 23, 52, 55), 'torch.zeros_like', 'torch.zeros_like', ({(52, 40, 52, 54): 'sampler_sphere'}, {}), '(sampler_sphere)', False, 'import torch\n'), ((38, 12, 38, 46), 'numpy.zeros', 'np.zeros', (), '', True, 'import numpy as np\n'), ((94, 4, 94, 19), 'unittest.main', 'unittest.main', ({}, {}), '()', False, 'import unittest\n'), ((64, 21, 64, 61), 'monai.handlers.SurfaceDistance', 'SurfaceDistance', (), '', False, 'from monai.handlers import SurfaceDistance\n'), ((69, 17, 69, 34), 'ignite.engine.Engine', 'Engine', ({(69, 24, 69, 33): '_val_func'}, {}), '(_val_func)', False, 'from ignite.engine import Engine\n'), ((86, 21, 86, 61), 'monai.handlers.SurfaceDistance', 'SurfaceDistance', (), '', False, 'from monai.handlers import SurfaceDistance\n'), ((89, 16, 89, 46), 'torch.ones', 'torch.ones', ({(89, 27, 89, 45): '(1, 1, 10, 10, 10)'}, {}), '((1, 1, 10, 10, 10))', False, 'import torch\n')] |
rom1mouret/anoflows | benchmarks/eval.py | 42381c06b8897e4510e73cda87ea97ea3f4a5579 | #!/usr/bin/env python3
import sys
import logging
import yaml
import pandas as pd
import numpy as np
from collections import defaultdict
from sklearn.model_selection import train_test_split
from sklearn.ensemble import IsolationForest
from sklearn.impute import SimpleImputer
from anoflows.hpo import find_best_flows
from data_loading import load_data
logging.getLogger().setLevel(logging.INFO)
if len(sys.argv) == 1:
logging.error("YAML data specification missing from the command line arguments")
exit(1)
spec_file = sys.argv[1]
df, spec = load_data(spec_file)
max_rows = min(len(df), spec.get("max_rows", 40000))
novelty_detection = spec.get("novelty", True)
normal_classes = spec["normal_classes"]
precision = defaultdict(list)
for rounds in range(spec.get("rounds", 1)):
# random sampling
df = df.sample(n=max_rows, replace=False)
label_col = spec["label_column"]
y = df[label_col].values
other = df.drop(label_col, inplace=False, axis=1)
X = other.values
# imputing
X = SimpleImputer(copy=False).fit_transform(X)
# train/test split
X_train, X_test, y_train, y_test = \
train_test_split(X, y, shuffle=False, test_size=0.5)
if novelty_detection:
keep = np.where(np.isin(y_train, normal_classes))[0]
X_train = X_train[keep, :]
y_train = y_train[keep]
# training
#flows, loss = find_best_flows(X_train, device='cpu', n_trials=1)
from anoflows.anoflow_bagging import AnoFlowBagging
flows = AnoFlowBagging()
flows.fit(X_train)
iforest = IsolationForest().fit(X_train)
# prediction
pred = {
"anoflows": flows.likelihood(X_test),
"iforest": iforest.decision_function(X_test)
}
# evaluation
y_true = np.where(np.isin(y_test, spec["anomaly_classes"]))[0]
ref = np.zeros(len(y_test))
ref[y_true] = 1
k = len(y_true)
for name, y_pred in pred.items():
anomaly_indices = y_pred.argsort()[:k]
prec = ref[anomaly_indices].sum() / k
logging.info("%s: %.1f%% (%d anomalies / %d rows)" % (name, 100*prec, k, len(y_test)))
precision[name].append(prec)
logging.info("* SUMMARY %s", spec_file)
for name, prec in precision.items():
prec = 100 * np.array(prec)
mean = np.mean(prec)
std = np.std(prec)
logging.info("%s; mean=%.1f%% std=%.1f%%" % (name, mean, std))
| [((25, 11, 25, 31), 'data_loading.load_data', 'load_data', ({(25, 21, 25, 30): 'spec_file'}, {}), '(spec_file)', False, 'from data_loading import load_data\n'), ((30, 12, 30, 29), 'collections.defaultdict', 'defaultdict', ({(30, 24, 30, 28): 'list'}, {}), '(list)', False, 'from collections import defaultdict\n'), ((75, 0, 75, 39), 'logging.info', 'logging.info', ({(75, 13, 75, 27): '"""* SUMMARY %s"""', (75, 29, 75, 38): 'spec_file'}, {}), "('* SUMMARY %s', spec_file)", False, 'import logging\n'), ((21, 4, 21, 84), 'logging.error', 'logging.error', ({(21, 18, 21, 83): '"""YAML data specification missing from the command line arguments"""'}, {}), "('YAML data specification missing from the command line arguments'\n )", False, 'import logging\n'), ((45, 8, 45, 60), 'sklearn.model_selection.train_test_split', 'train_test_split', (), '', False, 'from sklearn.model_selection import train_test_split\n'), ((54, 12, 54, 28), 'anoflows.anoflow_bagging.AnoFlowBagging', 'AnoFlowBagging', ({}, {}), '()', False, 'from anoflows.anoflow_bagging import AnoFlowBagging\n'), ((78, 11, 78, 24), 'numpy.mean', 'np.mean', ({(78, 19, 78, 23): 'prec'}, {}), '(prec)', True, 'import numpy as np\n'), ((79, 10, 79, 22), 'numpy.std', 'np.std', ({(79, 17, 79, 21): 'prec'}, {}), '(prec)', True, 'import numpy as np\n'), ((80, 4, 80, 66), 'logging.info', 'logging.info', ({(80, 17, 80, 65): "('%s; mean=%.1f%% std=%.1f%%' % (name, mean, std))"}, {}), "('%s; mean=%.1f%% std=%.1f%%' % (name, mean, std))", False, 'import logging\n'), ((18, 0, 18, 19), 'logging.getLogger', 'logging.getLogger', ({}, {}), '()', False, 'import logging\n'), ((77, 17, 77, 31), 'numpy.array', 'np.array', ({(77, 26, 77, 30): 'prec'}, {}), '(prec)', True, 'import numpy as np\n'), ((41, 8, 41, 33), 'sklearn.impute.SimpleImputer', 'SimpleImputer', (), '', False, 'from sklearn.impute import SimpleImputer\n'), ((56, 14, 56, 31), 'sklearn.ensemble.IsolationForest', 'IsolationForest', ({}, {}), '()', False, 'from sklearn.ensemble import IsolationForest\n'), ((65, 22, 65, 62), 'numpy.isin', 'np.isin', ({(65, 30, 65, 36): 'y_test', (65, 38, 65, 61): "spec['anomaly_classes']"}, {}), "(y_test, spec['anomaly_classes'])", True, 'import numpy as np\n'), ((47, 24, 47, 56), 'numpy.isin', 'np.isin', ({(47, 32, 47, 39): 'y_train', (47, 41, 47, 55): 'normal_classes'}, {}), '(y_train, normal_classes)', True, 'import numpy as np\n')] |
jamescurtin/pydantic | pydantic/version.py | 4f8f9396906a094626b770fb7cc8eecf03770ffe | __all__ = ['VERSION', 'version_info']
VERSION = '1.4a1'
def version_info() -> str:
import platform
import sys
from importlib import import_module
from pathlib import Path
from .main import compiled
optional_deps = []
for p in ('typing-extensions', 'email-validator', 'devtools'):
try:
import_module(p.replace('-', '_'))
except ImportError:
continue
optional_deps.append(p)
info = {
'pydantic version': VERSION,
'pydantic compiled': compiled,
'install path': Path(__file__).resolve().parent,
'python version': sys.version,
'platform': platform.platform(),
'optional deps. installed': optional_deps,
}
return '\n'.join('{:>30} {}'.format(k + ':', str(v).replace('\n', ' ')) for k, v in info.items())
| [((27, 20, 27, 39), 'platform.platform', 'platform.platform', ({}, {}), '()', False, 'import platform\n'), ((25, 24, 25, 38), 'pathlib.Path', 'Path', ({(25, 29, 25, 37): '__file__'}, {}), '(__file__)', False, 'from pathlib import Path\n')] |
siq/spire | spire/core/registry.py | 6365590277e9a6bfb6e4e0df5b2b47dba0f71711 | from scheme import Structure
__all__ = ('Configurable', 'Registry')
class Configurable(object):
"""A sentry class which indicates that subclasses can establish a configuration chain."""
class Registry(object):
"""The unit registry."""
dependencies = {}
schemas = {}
units = {}
@classmethod
def is_configurable(cls, obj):
return (obj is not Configurable and issubclass(obj, Configurable) and
Configurable not in obj.__bases__)
@classmethod
def purge(cls):
cls.schemas = {}
cls.units = {}
@classmethod
def register_dependency(cls, dependency):
token = dependency.token
if not token:
return
if token not in cls.dependencies:
cls.dependencies[token] = type(dependency)
if not dependency.configurable:
return
configuration = dependency.unit.configuration
if token in cls.schemas:
structure = cls.schemas[token]
if configuration.required and not dependency.optional and not structure.required:
structure.required = True
else:
schema = dependency.construct_schema(generic=True, name=token)
if dependency.optional:
schema = schema.clone(required=False)
cls.schemas[token] = schema
@classmethod
def register_unit(cls, unit):
cls.units[unit.identity] = unit
if cls.is_configurable(unit):
queue = [(unit, [unit.identity], None)]
while queue:
subject, tokens, dependency = queue.pop(0)
if subject.configuration:
token = '/'.join(tokens)
if dependency:
structure = dependency.construct_schema(name=token)
if dependency.token and structure.required:
structure = structure.clone(required=False)
else:
structure = subject.configuration.schema.clone(required=False,
name=token)
cls.schemas[token] = structure
for attr, subdependency in subject.dependencies.iteritems():
queue.append((subdependency.unit, tokens + [attr], subdependency))
| [] |
berrange/oslo.devsupport | oslo_devsupport/model/__init__.py | 463c5842e95c5f8a7009ab1041f290e3a1050a06 |
from .command import *
from .database import *
from .entrypoint import *
from .group import *
from .http import *
from .messaging import *
from .method import *
from .operation import *
from .stack import *
from .threads import *
| [] |
nng555/fairseq | scripts/extract.py | c9730a125825a85f33042e1b9fd1959b8ca829e5 | #!/usr/bin/env python3
#
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Extracts random constraints from reference files."""
import argparse
import random
import sys
from sacrebleu import extract_ngrams
def get_phrase(words, index, length):
assert index < len(words) - length + 1
phr = " ".join(words[index : index + length])
for i in range(index, index + length):
words.pop(index)
return phr
def main(args):
if args.seed:
random.seed(args.seed)
for line in sys.stdin:
constraints = []
def add_constraint(constraint):
constraints.append(constraint)
source = line.rstrip()
if "\t" in line:
source, target = line.split("\t")
if args.add_sos:
target = f"<s> {target}"
if args.add_eos:
target = f"{target} </s>"
if len(target.split()) >= args.len:
words = [target]
num = args.number
choices = {}
for i in range(num):
if len(words) == 0:
break
segmentno = random.choice(range(len(words)))
segment = words.pop(segmentno)
tokens = segment.split()
phrase_index = random.choice(range(len(tokens)))
choice = " ".join(
tokens[phrase_index : min(len(tokens), phrase_index + args.len)]
)
for j in range(
phrase_index, min(len(tokens), phrase_index + args.len)
):
tokens.pop(phrase_index)
if phrase_index > 0:
words.append(" ".join(tokens[0:phrase_index]))
if phrase_index + 1 < len(tokens):
words.append(" ".join(tokens[phrase_index:]))
choices[target.find(choice)] = choice
# mask out with spaces
target = target.replace(choice, " " * len(choice), 1)
for key in sorted(choices.keys()):
add_constraint(choices[key])
print(source, *constraints, sep="\t")
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--number", "-n", type=int, default=1, help="number of phrases")
parser.add_argument("--len", "-l", type=int, default=1, help="phrase length")
parser.add_argument(
"--add-sos", default=False, action="store_true", help="add <s> token"
)
parser.add_argument(
"--add-eos", default=False, action="store_true", help="add </s> token"
)
parser.add_argument("--seed", "-s", default=0, type=int)
args = parser.parse_args()
Main(args)
| [((80, 13, 80, 38), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ({}, {}), '()', False, 'import argparse\n'), ((28, 8, 28, 30), 'random.seed', 'random.seed', ({(28, 20, 28, 29): 'args.seed'}, {}), '(args.seed)', False, 'import random\n')] |
gouchi/appimage-builder | AppImageBuilder/commands/file.py | 40e9851c573179e066af116fb906e9cad8099b59 | # Copyright 2020 Alexis Lopez Zubieta
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
import os
from .command import Command
class FileError(RuntimeError):
pass
class File(Command):
def __init__(self):
super().__init__('file')
self.log_stdout = False
self.log_command = False
def query(self, path):
self._run(['file', '-b', '--exclude', 'ascii', path])
if self.return_code != 0:
raise FileError('\n'.join(self.stderr))
return '\n'.join(self.stdout)
def is_executable_elf(self, path):
output = self.query(path)
result = ('ELF' in output) and ('executable' in output)
return result
| [] |
dainst/chronoi-corpus-processing | text_selection/analyse_zenon_scrape.py | 7f508a7572e1022c4c88d1477db029e6619a1f0c | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import argparse
import csv
import furl
import json
import re
import sys
from collections import defaultdict
def filter_records_without_url(records: []) -> []:
return [r for r in records if any(r.get("urls"))]
def build_furl(url: str) -> furl.furl:
try:
furl_obj = furl.furl(url)
if not furl_obj.host:
furl_obj = furl.furl("http://" + url)
return furl_obj
except ValueError:
return furl.furl("https://invalid-url.xyz")
def determine_host(url: str) -> str:
furl_obj = build_furl(url)
return re.sub(r"^www[0-9]*\.", "", furl_obj.host)
def build_hosts_to_urls(records: []) -> {str: {str}}:
result = defaultdict(set)
for record in records:
for url in record.get("urls"):
host = determine_host(url.get("url"))
result[host].add(url.get("url"))
return result
def print_most_common_url_hosts(hosts_to_urls: {}, n: int):
hosts = [h for h in hosts_to_urls.keys() if len(hosts_to_urls[h]) > n]
hosts = sorted(hosts, key=lambda h: len(hosts_to_urls[h]))
for host in hosts:
print("% 6d\t%s" % (len(hosts_to_urls[host]), host))
def print_urls_for_host(hosts_to_urls: {}, host: str):
urls = hosts_to_urls.get(host, [])
for url in urls:
print(url)
if not any(urls):
print(f"No urls for host: '{host}'", file=sys.stderr)
def print_how_often_url_patterns_cooccur(records: [{}], pattern1: str, pattern2: str):
# It should be ok, to only pattern match the hosts here...
ids1 = {r.get("id") for r in records if record_has_matching_url(r, pattern1)}
ids2 = {r.get("id") for r in records if record_has_matching_url(r, pattern2)}
ids_both = ids1.intersection(ids2)
for host, number in {pattern1: len(ids1), pattern2: len(ids2), "both": len(ids_both)}.items():
print(f"{host}: {number}")
def record_has_matching_url(record: {}, pattern: str) -> bool:
return any(record_get_urls_matching(record, pattern))
def record_get_urls_matching(record: {}, pattern: str) -> [{}]:
result = []
for url in record.get("urls"):
if any(re.findall(pattern, url.get("url"))):
result.append(url)
return result
def record_remove_urls_not_matching(record: {}, pattern: str):
record["urls"] = record_get_urls_matching(record, pattern)
def earliest_year(year_strings: [str]) -> str:
years = []
for year_s in year_strings:
try:
years.append(int(year_s))
except ValueError:
print(f"Not a string that is a year: '{year_s}'", file=sys.stderr)
continue
return str(sorted(years)[0]) if any(years) else ""
def main(args: argparse.Namespace):
with open(args.scrape_file, "r") as file:
records = json.load(file)
records = filter_records_without_url(records)
# filter urls by the user-provided filter list
if args.desc_filters:
with open(args.desc_filters, "r") as file:
filters = file.read().splitlines()
for record in records:
record["urls"] = [url for url in record.get("urls") if url.get("desc") not in filters]
records = filter_records_without_url(records)
# print unique hosts or urls, then exit
if args.print_host_urls or args.print_common_hosts >= 0:
hosts_to_urls = build_hosts_to_urls(records)
if args.print_common_hosts >= 0:
print_most_common_url_hosts(hosts_to_urls, n=args.print_common_hosts)
elif args.print_host_urls:
print_urls_for_host(hosts_to_urls, host=args.print_host_urls)
exit(0)
# check in how many records the two given hosts co-occur, then exit
if args.patterns_cooccur:
host1, host2 = args.patterns_cooccur.split(",")
print_how_often_url_patterns_cooccur(records, host1, host2)
exit(0)
# do some selection based on a url pattern, remove all non-matching urls from the record
if args.select_by_url:
pattern = args.select_by_url
records = [r for r in records if record_has_matching_url(r, pattern)]
for record in records:
record_remove_urls_not_matching(record, pattern)
# sort the records by id, to be extra sure, that we get the same order every time this is called
# print each line as a csv column
records = sorted(records, key=lambda r: r.get("id"))
writer = csv.writer(sys.stdout, delimiter=",", quoting=csv.QUOTE_ALL)
for record in records:
to_print = []
if args.print_id:
to_print.append(record.get("id", ""))
if args.print_url:
to_print.append(record.get("urls")[0].get("url") if any(record.get("urls")) else "")
if args.print_pub_date:
to_print.append(earliest_year(record.get("publicationDates", [])))
if args.print_languages:
to_print.append("|".join(record.get("languages", [])))
writer.writerow(to_print)
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description="Process a file with zenon json records and print some information about them.")
parser.add_argument("scrape_file", type=str, help="The file that contains the zenon dumps as json.")
parser.add_argument("--desc-filters", type=str, help="A file to filter urls by. Excludes urls with 'desc' fields matching a line in the file.")
# these are arguments to print some specific information
parser.add_argument("--print-common-hosts", type=int, default=-1, help="Print hosts that appear more than n times in the records urls, then exit.")
parser.add_argument("--print-host-urls", type=str, help="Print all urls for the host, then exit.")
parser.add_argument("--patterns-cooccur", type=str, help="Format: 'pattern1,pattern2', print how often these occur in single records url fields, then exit.")
# these are meant to work together select by a url pattern then print information about the records
parser.add_argument("--select-by-url", type=str, help="Give a pattern for a url to select records by.")
parser.add_argument("--print-url", action="store_true", help="Print the first of each urls for the selected records. (Ignores other urls present on the records if --select-url is given.)")
parser.add_argument("--print-pub-date", action="store_true", help="Print the earliest publication year for each of the selected records.")
parser.add_argument("--print-id", action="store_true", help="Print the selected records' ids")
parser.add_argument("--print-languages", action="store_true", help="Print the selected records' languages")
main(parser.parse_args())
| [((30, 11, 30, 53), 're.sub', 're.sub', ({(30, 18, 30, 33): '"""^www[0-9]*\\\\."""', (30, 35, 30, 37): '""""""', (30, 39, 30, 52): 'furl_obj.host'}, {}), "('^www[0-9]*\\\\.', '', furl_obj.host)", False, 'import re\n'), ((34, 13, 34, 29), 'collections.defaultdict', 'defaultdict', ({(34, 25, 34, 28): 'set'}, {}), '(set)', False, 'from collections import defaultdict\n'), ((132, 13, 132, 73), 'csv.writer', 'csv.writer', (), '', False, 'import csv\n'), ((146, 13, 147, 100), 'argparse.ArgumentParser', 'argparse.ArgumentParser', (), '', False, 'import argparse\n'), ((20, 19, 20, 33), 'furl.furl', 'furl.furl', ({(20, 29, 20, 32): 'url'}, {}), '(url)', False, 'import furl\n'), ((95, 18, 95, 33), 'json.load', 'json.load', ({(95, 28, 95, 32): 'file'}, {}), '(file)', False, 'import json\n'), ((22, 23, 22, 49), 'furl.furl', 'furl.furl', ({(22, 33, 22, 48): "'http://' + url"}, {}), "('http://' + url)", False, 'import furl\n'), ((25, 15, 25, 51), 'furl.furl', 'furl.furl', ({(25, 25, 25, 50): '"""https://invalid-url.xyz"""'}, {}), "('https://invalid-url.xyz')", False, 'import furl\n')] |
wfarner/commons | src/python/twitter/pants/targets/java_antlr_library.py | 42988a7a49f012665174538cca53604c7846ee86 | # ==================================================================================================
# Copyright 2012 Twitter, Inc.
# --------------------------------------------------------------------------------------------------
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this work except in compliance with the License.
# You may obtain a copy of the License in the LICENSE file, or at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==================================================================================================
__author__ = 'Brian Larson'
from twitter.pants.targets.exportable_jvm_library import ExportableJvmLibrary
class JavaAntlrLibrary(ExportableJvmLibrary):
"""Defines a target that builds java stubs from an Antlr grammar file."""
def __init__(self,
name,
sources,
provides = None,
dependencies = None,
excludes = None,
compiler = 'antlr3'):
"""name: The name of this module target, addressable via pants via the portion of the spec
following the colon
sources: A list of paths containing the Antlr source files this module's jar is compiled from
provides: An optional Dependency object indicating the The ivy artifact to export
dependencies: An optional list of Dependency objects specifying the binary (jar) dependencies of
this module.
excludes: An optional list of dependency exclude patterns to filter all of this module's
transitive dependencies against.
compiler: The name of the compiler used to compile the ANTLR files.
Currently only supports 'antlr3' and 'antlr4'"""
ExportableJvmLibrary.__init__(self,
name,
sources,
provides,
dependencies,
excludes)
self.add_labels('codegen')
if compiler not in ['antlr3', 'antlr4']:
raise ValueError("Illegal value for 'compiler': {}".format(compiler))
self.compiler = compiler
def _as_jar_dependency(self):
return ExportableJvmLibrary._as_jar_dependency(self).with_sources()
| [((44, 4, 49, 43), 'twitter.pants.targets.exportable_jvm_library.ExportableJvmLibrary.__init__', 'ExportableJvmLibrary.__init__', ({(44, 34, 44, 38): 'self', (45, 34, 45, 38): 'name', (46, 34, 46, 41): 'sources', (47, 34, 47, 42): 'provides', (48, 34, 48, 46): 'dependencies', (49, 34, 49, 42): 'excludes'}, {}), '(self, name, sources, provides, dependencies,\n excludes)', False, 'from twitter.pants.targets.exportable_jvm_library import ExportableJvmLibrary\n'), ((57, 11, 57, 56), 'twitter.pants.targets.exportable_jvm_library.ExportableJvmLibrary._as_jar_dependency', 'ExportableJvmLibrary._as_jar_dependency', ({(57, 51, 57, 55): 'self'}, {}), '(self)', False, 'from twitter.pants.targets.exportable_jvm_library import ExportableJvmLibrary\n')] |
devs-cloud/python_ml | bigml/tests/create_pca_steps_bck.py | 05d90f5ce1862a5d2d8ff99d2e46446dc1d5af3c | # -*- coding: utf-8 -*-
#!/usr/bin/env python
#
# Copyright 2018-2020 BigML
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import time
import json
import os
from datetime import datetime, timedelta
from world import world
from nose.tools import eq_, assert_less
from bigml.api import HTTP_CREATED
from bigml.api import HTTP_ACCEPTED
from bigml.api import FINISHED
from bigml.api import FAULTY
from bigml.api import get_status
from read_pca_steps import i_get_the_pca
#@step(r'the pca name is "(.*)"')
def i_check_pca_name(step, name):
pca_name = world.pca['name']
eq_(name, pca_name)
#@step(r'I create a PCA from a dataset$')
def i_create_a_pca_from_dataset(step):
dataset = world.dataset.get('resource')
resource = world.api.create_pca(dataset, {'name': 'new PCA'})
world.status = resource['code']
eq_(world.status, HTTP_CREATED)
world.location = resource['location']
world.pca = resource['object']
world.pcas.append(resource['resource'])
#@step(r'I create a PCA from a dataset$')
def i_create_a_pca_with_params(step, params):
params = json.loads(params)
dataset = world.dataset.get('resource')
resource = world.api.create_pca(dataset, params)
world.status = resource['code']
eq_(world.status, HTTP_CREATED)
world.location = resource['location']
world.pca = resource['object']
world.pcas.append(resource['resource'])
def i_create_a_pca(step):
i_create_a_pca_from_dataset(step)
#@step(r'I update the PCA name to "(.*)"$')
def i_update_pca_name(step, name):
resource = world.api.update_pca(world.pca['resource'],
{'name': name})
world.status = resource['code']
eq_(world.status, HTTP_ACCEPTED)
world.location = resource['location']
world.pca = resource['object']
#@step(r'I wait until the PCA status code is either (\d) or (-\d) less than (\d+)')
def wait_until_pca_status_code_is(step, code1, code2, secs):
start = datetime.utcnow()
delta = int(secs) * world.delta
pca_id = world.pca['resource']
i_get_the_pca(step, pca_id)
status = get_status(world.pca)
while (status['code'] != int(code1) and
status['code'] != int(code2)):
time.sleep(3)
assert_less(datetime.utcnow() - start, timedelta(seconds=delta))
i_get_the_pca(step, pca_id)
status = get_status(world.pca)
eq_(status['code'], int(code1))
#@step(r'I wait until the PCA is ready less than (\d+)')
def the_pca_is_finished_in_less_than(step, secs):
wait_until_pca_status_code_is(step, FINISHED, FAULTY, secs)
| [((37, 4, 37, 23), 'nose.tools.eq_', 'eq_', ({(37, 8, 37, 12): 'name', (37, 14, 37, 22): 'pca_name'}, {}), '(name, pca_name)', False, 'from nose.tools import eq_, assert_less\n'), ((41, 14, 41, 43), 'world.world.dataset.get', 'world.dataset.get', ({(41, 32, 41, 42): '"""resource"""'}, {}), "('resource')", False, 'from world import world\n'), ((42, 15, 42, 65), 'world.world.api.create_pca', 'world.api.create_pca', ({(42, 36, 42, 43): 'dataset', (42, 45, 42, 64): "{'name': 'new PCA'}"}, {}), "(dataset, {'name': 'new PCA'})", False, 'from world import world\n'), ((44, 4, 44, 35), 'nose.tools.eq_', 'eq_', ({(44, 8, 44, 20): 'world.status', (44, 22, 44, 34): 'HTTP_CREATED'}, {}), '(world.status, HTTP_CREATED)', False, 'from nose.tools import eq_, assert_less\n'), ((47, 4, 47, 43), 'world.world.pcas.append', 'world.pcas.append', ({(47, 22, 47, 42): "resource['resource']"}, {}), "(resource['resource'])", False, 'from world import world\n'), ((52, 13, 52, 31), 'json.loads', 'json.loads', ({(52, 24, 52, 30): 'params'}, {}), '(params)', False, 'import json\n'), ((53, 14, 53, 43), 'world.world.dataset.get', 'world.dataset.get', ({(53, 32, 53, 42): '"""resource"""'}, {}), "('resource')", False, 'from world import world\n'), ((54, 15, 54, 52), 'world.world.api.create_pca', 'world.api.create_pca', ({(54, 36, 54, 43): 'dataset', (54, 45, 54, 51): 'params'}, {}), '(dataset, params)', False, 'from world import world\n'), ((56, 4, 56, 35), 'nose.tools.eq_', 'eq_', ({(56, 8, 56, 20): 'world.status', (56, 22, 56, 34): 'HTTP_CREATED'}, {}), '(world.status, HTTP_CREATED)', False, 'from nose.tools import eq_, assert_less\n'), ((59, 4, 59, 43), 'world.world.pcas.append', 'world.pcas.append', ({(59, 22, 59, 42): "resource['resource']"}, {}), "(resource['resource'])", False, 'from world import world\n'), ((67, 15, 68, 51), 'world.world.api.update_pca', 'world.api.update_pca', ({(67, 36, 67, 57): "world.pca['resource']", (68, 36, 68, 50): "{'name': name}"}, {}), "(world.pca['resource'], {'name': name})", False, 'from world import world\n'), ((70, 4, 70, 36), 'nose.tools.eq_', 'eq_', ({(70, 8, 70, 20): 'world.status', (70, 22, 70, 35): 'HTTP_ACCEPTED'}, {}), '(world.status, HTTP_ACCEPTED)', False, 'from nose.tools import eq_, assert_less\n'), ((77, 12, 77, 29), 'datetime.datetime.utcnow', 'datetime.utcnow', ({}, {}), '()', False, 'from datetime import datetime, timedelta\n'), ((80, 4, 80, 31), 'read_pca_steps.i_get_the_pca', 'i_get_the_pca', ({(80, 18, 80, 22): 'step', (80, 24, 80, 30): 'pca_id'}, {}), '(step, pca_id)', False, 'from read_pca_steps import i_get_the_pca\n'), ((81, 13, 81, 34), 'bigml.api.get_status', 'get_status', ({(81, 24, 81, 33): 'world.pca'}, {}), '(world.pca)', False, 'from bigml.api import get_status\n'), ((84, 11, 84, 24), 'time.sleep', 'time.sleep', ({(84, 22, 84, 23): '(3)'}, {}), '(3)', False, 'import time\n'), ((86, 11, 86, 38), 'read_pca_steps.i_get_the_pca', 'i_get_the_pca', ({(86, 25, 86, 29): 'step', (86, 31, 86, 37): 'pca_id'}, {}), '(step, pca_id)', False, 'from read_pca_steps import i_get_the_pca\n'), ((87, 20, 87, 41), 'bigml.api.get_status', 'get_status', ({(87, 31, 87, 40): 'world.pca'}, {}), '(world.pca)', False, 'from bigml.api import get_status\n'), ((85, 50, 85, 74), 'datetime.timedelta', 'timedelta', (), '', False, 'from datetime import datetime, timedelta\n'), ((85, 23, 85, 40), 'datetime.datetime.utcnow', 'datetime.utcnow', ({}, {}), '()', False, 'from datetime import datetime, timedelta\n')] |
Pasmikh/quiz_please_bot | config.py | 2b619b359d8021be57b404525013c53403d6cde1 | days_of_week = ['Monday','Tuesday','Wednesday','Thursday','Friday','Saturday', 'Sunday']
operation = ''
options = ['Info', 'Check-in/Out', 'Edit games', 'Back']
admins = ['admin1_telegram_nickname', 'admin2_telegram_nickname']
avail_days = []
TOKEN = 'bot_token'
group_id = id_of_group_chat | [] |
ostin-r/automate-boring-stuff-solutions | Chapter 8/sandwich-maker.py | 78f0a2981e6520ff2907285e666168a0f35eba02 | '''
Austin Richards 2/20/21
sandwich-maker.py uses pyinputplus to validate user input for sandwich preferences
'''
import pyinputplus as ip
def get_cost(food_name):
'''gets the cost of items in sandwich_builder'''
food_dict = {
'sourdough':1.75,
'rye':2.0,
'wheat':1.50,
'white':1.25,
'chicken':2.0,
'turkey':1.50,
'ham':2.0,
'tofu':1.25,
'cheddar':2.0,
'swiss':2.5,
'mozzarella':2.5,
'yes':0.25, # toppings return 'yes' in sandwich_builder(), so I made them all cost 0.25
'no':0 # saying no to a topping costs nothing
}
return food_dict[food_name]
def sandwich_builder():
print('Enter your sandwich preferences below:\n')
bread_prompt = 'What bread type would you like? (sourdough, rye, wheat, or white)\n'
bread_type = ip.inputChoice(['sourdough', 'rye', 'wheat', 'white'], prompt=bread_prompt)
protein_prompt = 'What type of protein would you like? (chicken, turkey, ham, or tofu)\n'
protein_type = ip.inputChoice(['chicken', 'turkey', 'ham', 'tofu'], prompt=protein_prompt)
mayo = ip.inputYesNo(prompt='Would you like mayo?\n')
mustard = ip.inputYesNo(prompt='Would you like mustard?\n')
tomato = ip.inputYesNo(prompt='Would you like tomato?\n')
lettuce = ip.inputYesNo(prompt='Would you like lettuce?\n')
like_cheese = ip.inputYesNo(prompt='Do you like cheese on your sandwich?\n')
if like_cheese is 'yes':
cheese_prompt = 'What kind of cheese would you like? (cheddar, swiss, mozzarella)\n'
cheese_type = ip.inputChoice(['cheddar', 'swiss', 'mozzarella'], prompt=cheese_prompt)
sandwich = []
cost = 0
sandwich.extend([bread_type, protein_type, cheese_type, mayo, mustard, tomato, lettuce])
for item in sandwich:
cost += get_cost(item)
else:
sandwich = []
cost = 0
sandwich.extend([bread_type, protein_type, mayo, mustard, tomato, lettuce])
for item in sandwich:
cost += get_cost(item)
how_many_prompt = 'How many sandwiches would you like?\n'
how_many = ip.inputInt(min=1, prompt=how_many_prompt)
print('\nFinal cost: ${}'.format(round(cost * how_many * 1.06, 2)))
sandwich_builder() | [((33, 19, 33, 94), 'pyinputplus.inputChoice', 'ip.inputChoice', (), '', True, 'import pyinputplus as ip\n'), ((36, 21, 36, 96), 'pyinputplus.inputChoice', 'ip.inputChoice', (), '', True, 'import pyinputplus as ip\n'), ((38, 14, 38, 60), 'pyinputplus.inputYesNo', 'ip.inputYesNo', (), '', True, 'import pyinputplus as ip\n'), ((39, 14, 39, 63), 'pyinputplus.inputYesNo', 'ip.inputYesNo', (), '', True, 'import pyinputplus as ip\n'), ((40, 14, 40, 62), 'pyinputplus.inputYesNo', 'ip.inputYesNo', (), '', True, 'import pyinputplus as ip\n'), ((41, 14, 41, 63), 'pyinputplus.inputYesNo', 'ip.inputYesNo', (), '', True, 'import pyinputplus as ip\n'), ((43, 18, 43, 80), 'pyinputplus.inputYesNo', 'ip.inputYesNo', (), '', True, 'import pyinputplus as ip\n'), ((66, 15, 66, 57), 'pyinputplus.inputInt', 'ip.inputInt', (), '', True, 'import pyinputplus as ip\n'), ((47, 24, 47, 96), 'pyinputplus.inputChoice', 'ip.inputChoice', (), '', True, 'import pyinputplus as ip\n')] |
My-Novel-Management/storybuilderunite | tests/core/test_headerupdater.py | c003d3451e237f574c54a87ea7d4fd8da8e833be | # -*- coding: utf-8 -*-
'''
HeaderUpdater class test
========================
'''
import unittest
from tests.testutils import print_testtitle, validate_with_fail
from builder.commands.scode import SCode, SCmd
from builder.containers.chapter import Chapter
from builder.containers.episode import Episode
from builder.containers.scene import Scene
from builder.containers.story import Story
from builder.core import headerupdater as hd
class HeaderUpdaterTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
print_testtitle(hd.__name__, 'HeaderUpdater class')
def test_instance(self):
tmp = hd.HeaderUpdater()
self.assertIsInstance(tmp, hd.HeaderUpdater)
def test_title_of(self):
data = [
# (src, expect, exp_opt)
(True, Story('test',), ('test',), 1),
]
def checker(src, expect, exp_opt):
tmp = hd.HeaderUpdater()._title_of(src)
self.assertIsInstance(tmp, SCode)
self.assertEqual(tmp.cmd, SCmd.TAG_TITLE)
self.assertEqual(tmp.script, expect)
self.assertEqual(tmp.option, exp_opt)
validate_with_fail(self, 'title_of', checker, data)
def test_outline_of(self):
data = [
# (src, expect)
(True, Story('test',outline='apple'), ('apple',)),
]
def checker(src, expect):
tmp = hd.HeaderUpdater()._outline_of(src)
self.assertIsInstance(tmp, SCode)
self.assertEqual(tmp.cmd, SCmd.TAG_COMMENT)
self.assertEqual(tmp.script, expect)
validate_with_fail(self, 'outline_of', checker, data)
def test_end_of(self):
data = [
# (src, expect)
(True, Chapter('test',), SCmd.END_CHAPTER),
]
validate_with_fail(self, 'end_of',
lambda src, expect: self.assertEqual(
hd.HeaderUpdater()._end_of(src).cmd, expect),
data)
| [((21, 8, 21, 59), 'tests.testutils.print_testtitle', 'print_testtitle', ({(21, 24, 21, 35): 'hd.__name__', (21, 37, 21, 58): '"""HeaderUpdater class"""'}, {}), "(hd.__name__, 'HeaderUpdater class')", False, 'from tests.testutils import print_testtitle, validate_with_fail\n'), ((24, 14, 24, 32), 'builder.core.headerupdater.HeaderUpdater', 'hd.HeaderUpdater', ({}, {}), '()', True, 'from builder.core import headerupdater as hd\n'), ((38, 8, 38, 59), 'tests.testutils.validate_with_fail', 'validate_with_fail', ({(38, 27, 38, 31): 'self', (38, 33, 38, 43): '"""title_of"""', (38, 45, 38, 52): 'checker', (38, 54, 38, 58): 'data'}, {}), "(self, 'title_of', checker, data)", False, 'from tests.testutils import print_testtitle, validate_with_fail\n'), ((50, 8, 50, 61), 'tests.testutils.validate_with_fail', 'validate_with_fail', ({(50, 27, 50, 31): 'self', (50, 33, 50, 45): '"""outline_of"""', (50, 47, 50, 54): 'checker', (50, 56, 50, 60): 'data'}, {}), "(self, 'outline_of', checker, data)", False, 'from tests.testutils import print_testtitle, validate_with_fail\n'), ((30, 23, 30, 37), 'builder.containers.story.Story', 'Story', ({(30, 29, 30, 35): '"""test"""'}, {}), "('test')", False, 'from builder.containers.story import Story\n'), ((43, 23, 43, 52), 'builder.containers.story.Story', 'Story', (), '', False, 'from builder.containers.story import Story\n'), ((55, 23, 55, 39), 'builder.containers.chapter.Chapter', 'Chapter', ({(55, 31, 55, 37): '"""test"""'}, {}), "('test')", False, 'from builder.containers.chapter import Chapter\n'), ((33, 18, 33, 36), 'builder.core.headerupdater.HeaderUpdater', 'hd.HeaderUpdater', ({}, {}), '()', True, 'from builder.core import headerupdater as hd\n'), ((46, 18, 46, 36), 'builder.core.headerupdater.HeaderUpdater', 'hd.HeaderUpdater', ({}, {}), '()', True, 'from builder.core import headerupdater as hd\n'), ((59, 20, 59, 38), 'builder.core.headerupdater.HeaderUpdater', 'hd.HeaderUpdater', ({}, {}), '()', True, 'from builder.core import headerupdater as hd\n')] |
aernesto/Lab_DotsDB_Utilities | dotsDB/test_vlen_datasets.py | d8458b4126d80daeb5084234889fc6674158ea0f | import numpy as np
import h5py
filename = "test_vlen_datasets_np_bool.h5"
rows = [np.array([np.True_, np.False_]),
np.array([np.True_, np.True_, np.False_])]
f = h5py.File(filename, 'x') # create file, fails if exists
vlen_data_type = h5py.special_dtype(vlen=np.bool_)
dset = f.create_dataset("vlen_matrix", (2,),
compression="gzip",
compression_opts=9,
fletcher32=True,
dtype=vlen_data_type)
for r in range(len(rows)):
dset[r] = rows[r]
f.flush()
f.close()
f = h5py.File(filename, 'r')
dsetr = f["vlen_matrix"]
for r in range(dsetr.shape[0]):
print(dsetr[r])
| [((9, 4, 9, 28), 'h5py.File', 'h5py.File', ({(9, 14, 9, 22): 'filename', (9, 24, 9, 27): '"""x"""'}, {}), "(filename, 'x')", False, 'import h5py\n'), ((11, 17, 11, 50), 'h5py.special_dtype', 'h5py.special_dtype', (), '', False, 'import h5py\n'), ((25, 4, 25, 28), 'h5py.File', 'h5py.File', ({(25, 14, 25, 22): 'filename', (25, 24, 25, 27): '"""r"""'}, {}), "(filename, 'r')", False, 'import h5py\n'), ((6, 8, 6, 39), 'numpy.array', 'np.array', ({(6, 17, 6, 38): '[np.True_, np.False_]'}, {}), '([np.True_, np.False_])', True, 'import numpy as np\n'), ((7, 8, 7, 49), 'numpy.array', 'np.array', ({(7, 17, 7, 48): '[np.True_, np.True_, np.False_]'}, {}), '([np.True_, np.True_, np.False_])', True, 'import numpy as np\n')] |
g4idrijs/CardiacUltrasoundPhaseEstimation | utils.py | 6bd2e157240133b6e306a7ca931d3d3b96647b88 | import os, time
import numpy as np
import scipy.signal
import scipy.misc
import scipy.ndimage.filters
import matplotlib.pyplot as plt
import PIL
from PIL import ImageDraw
import angles
import cv2
import SimpleITK as sitk
def cvShowImage(imDisp, strName, strAnnotation='', textColor=(0, 0, 255),
resizeAmount=None):
if resizeAmount is not None:
imDisp = cv2.resize(imDisp.copy(), None, fx=resizeAmount,
fy=resizeAmount)
imDisp = cv2.cvtColor(imDisp, cv2.COLOR_GRAY2RGB)
if len(strAnnotation) > 0:
cv2.putText(imDisp, strAnnotation, (10, 20), cv2.FONT_HERSHEY_PLAIN,
2.0, textColor, thickness=2)
cv2.imshow(strName, imDisp)
def cvShowColorImage(imDisp, strName, strAnnotation='', textColor=(0, 0, 255),
resizeAmount=None):
if resizeAmount is not None:
imDisp = cv2.resize(imDisp.copy(), None, fx=resizeAmount,
fy=resizeAmount)
if len(strAnnotation) > 0:
cv2.putText(imDisp, strAnnotation, (10, 20), cv2.FONT_HERSHEY_PLAIN,
2.0, textColor, thickness=2)
cv2.imshow(strName, imDisp)
def mplotShowImage(imInput):
plt.imshow(imInput, cmap=plt.cm.gray)
plt.grid(False)
plt.xticks(())
plt.yticks(())
def normalizeArray(a):
return np.single(0.0 + a - a.min()) / (a.max() - a.min())
def AddTextOnImage(imInput, strText, loc=(2, 2), color=255):
imInputPIL = PIL.Image.fromarray(imInput)
d = ImageDraw.Draw(imInputPIL)
d.text(loc, strText, fill=color)
return np.asarray(imInputPIL)
def AddTextOnVideo(imVideo, strText, loc=(2, 2)):
imVideoOut = np.zeros_like(imVideo)
for i in range(imVideo.shape[2]):
imVideoOut[:, :, i] = AddTextOnImage(imVideo[:, :, i], strText, loc)
return imVideoOut
def cvShowVideo(imVideo, strWindowName, waitTime=30, resizeAmount=None):
if not isinstance(imVideo, list):
imVideo = [imVideo]
strWindowName = [strWindowName]
# find max number of frames
maxFrames = 0
for vid in range(len(imVideo)):
if imVideo[vid].shape[-1] > maxFrames:
maxFrames = imVideo[vid].shape[2]
# display video
blnLoop = True
fid = 0
while True:
for vid in range(len(imVideo)):
curVideoFid = fid % imVideo[vid].shape[2]
imCur = imVideo[vid][:, :, curVideoFid]
# resize image if requested
if resizeAmount:
imCur = scipy.misc.imresize(imCur, resizeAmount)
# show image
cvShowImage(imCur, strWindowName[vid], '%d' % (curVideoFid + 1))
# look for "esc" key
k = cv2.waitKey(waitTime) & 0xff
if blnLoop:
if k == 27:
break
elif k == ord(' '):
blnLoop = False
else:
fid = (fid + 1) % maxFrames
else:
if k == 27: # escape
break
elif k == ord(' '): # space
blnLoop = True
elif k == 81: # left arrow
fid = (fid - 1) % maxFrames
elif k == 83: # right arrow
fid = (fid + 1) % maxFrames
for vid in range(len(imVideo)):
cv2.destroyWindow(strWindowName[vid])
def normalizeArray(a, bounds=None):
if bounds is None:
return (0.0 + a - a.min()) / (a.max() - a.min())
else:
b = (0.0 + a - bounds[0]) / (bounds[1] - bounds[0])
b[b < 0] = bounds[0]
b[b > bounds[1]] = bounds[1]
return b
def loadVideoFromFile(dataFilePath, sigmaSmooth=None, resizeAmount=None):
vidseq = cv2.VideoCapture(dataFilePath)
print vidseq, vidseq.isOpened()
# print metadata
metadata = {}
numFrames = vidseq.get(cv2.CAP_PROP_FRAME_COUNT)
print '\tFRAME_COUNT = ', numFrames
metadata['FRAME_COUNT'] = numFrames
frameHeight = vidseq.get(cv2.CAP_PROP_FRAME_HEIGHT)
if frameHeight > 0:
print '\tFRAME HEIGHT = ', frameHeight
metadata['FRAME_HEIGHT'] = frameHeight
frameWidth = vidseq.get(cv2.CAP_PROP_FRAME_WIDTH)
if frameWidth > 0:
print '\tFRAME WIDTH = ', frameWidth
metadata['FRAME_WIDTH'] = frameWidth
fps = vidseq.get(cv2.CAP_PROP_FPS)
if fps > 0:
print '\tFPS = ', fps
metadata['FPS'] = fps
fmt = vidseq.get(cv2.CAP_PROP_FORMAT)
if fmt > 0:
print '\FORMAT = ', fmt
metadata['FORMAT'] = fmt
vmode = vidseq.get(cv2.CAP_PROP_MODE)
if vmode > 0:
print '\MODE = ', vmode
metadata['MODE'] = MODE
# smooth if wanted
if sigmaSmooth:
wSmooth = 4 * sigmaSmooth + 1
print metadata
# read video frames
imInput = []
fid = 0
prevPercent = 0
print '\n'
while True:
valid_object, frame = vidseq.read()
if not valid_object:
break
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
if resizeAmount:
frame = scipy.misc.imresize(frame, resizeAmount)
if sigmaSmooth:
frame = cv2.GaussianBlur(frame, (wSmooth, wSmooth), 0)
imInput.append(frame)
# update progress
fid += 1
curPercent = np.floor(100.0 * fid / numFrames)
if curPercent > prevPercent:
prevPercent = curPercent
print '%.2d%%' % curPercent,
print '\n'
imInput = np.dstack(imInput)
vidseq.release()
return (imInput, metadata)
def writeVideoToFile(imVideo, filename, codec='DIVX', fps=30, isColor=False):
# start timer
tStart = time.time()
# write video
# fourcc = cv2.FOURCC(*list(codec)) # opencv 2.4
fourcc = cv2.VideoWriter_fourcc(*list(codec))
height, width = imVideo.shape[:2]
writer = cv2.VideoWriter(filename, fourcc, fps=fps,
frameSize=(width, height), isColor=isColor)
print writer.isOpened()
numFrames = imVideo.shape[-1]
for fid in range(numFrames):
if isColor:
writer.write(imVideo[:, :, :, fid].astype('uint8'))
else:
writer.write(imVideo[:, :, fid].astype('uint8'))
# end timer
tEnd = time.time()
print 'Writing video {} took {} seconds'.format(filename, tEnd - tStart)
# release
writer.release()
def writeVideoAsTiffStack(imVideo, strFilePrefix):
# start timer
tStart = time.time()
for fid in range(imVideo.shape[2]):
plt.imsave(strFilePrefix + '.%.3d.tif' % (fid + 1), imVideo[:, :, fid])
# end timer
tEnd = time.time()
print 'Writing video {} took {} seconds'.format(strFilePrefix,
tEnd - tStart)
def mplotShowMIP(im, axis, xlabel=None, ylabel=None, title=None):
plt.imshow(im.max(axis))
if title:
plt.title(title)
if xlabel:
plt.xlabel(xlabel)
if ylabel:
plt.ylabel(ylabel)
def convertFromRFtoBMode(imInputRF):
return np.abs(scipy.signal.hilbert(imInputRF, axis=0))
def normalizeAngles(angleList, angle_range):
return np.array(
[angles.normalize(i, angle_range[0], angle_range[1]) for i in
angleList])
def SaveFigToDisk(saveDir, fileName, saveext=('.png', '.eps'), **kwargs):
for ext in saveext:
plt.savefig(os.path.join(saveDir, fileName + ext), **kwargs)
def SaveImageToDisk(im, saveDir, fileName, saveext=('.png',)):
for ext in saveext:
plt.imsave(os.path.join(saveDir, fileName + ext), im)
def generateGatedVideoUsingSplineInterp(imInput, numOutFrames, minFrame,
maxFrame, splineOrder):
tZoom = np.float(numOutFrames) / (maxFrame - minFrame + 1)
return scipy.ndimage.interpolation.zoom(
imInput[:, :, minFrame:maxFrame + 1], (1, 1, tZoom), order=splineOrder)
def ncorr(imA, imB):
imA = (imA - imA.mean()) / imA.std()
imB = (imB - imB.mean()) / imB.std()
return np.mean(imA * imB)
def vis_checkerboard(im1, im2):
im_chk = sitk.CheckerBoard(sitk.GetImageFromArray(im1),
sitk.GetImageFromArray(im2))
return sitk.GetArrayFromImage(im_chk)
def fig2data(fig):
"""
@brief Convert a Matplotlib figure to a 4D numpy array with
RGBA channels and return it
@param fig a matplotlib figure
@return a numpy 3D array of RGBA values
"""
# draw the renderer
fig.canvas.draw()
# Get the RGBA buffer from the figure
w, h = fig.canvas.get_width_height()
buf = np.fromstring(fig.canvas.tostring_argb(), dtype=np.uint8)
buf.shape = (w, h, 4)
# canvas.tostring_argb give pixmap in ARGB mode.
# Roll the ALPHA channel to have it in RGBA mode
buf = np.roll(buf, 3, axis=2)
return buf | [] |
akash143143/weasyl | weasyl/emailer.py | be42a2313e657e97c4a48432379e37b6a3d4a4af | from __future__ import absolute_import
import re
from email.mime.text import MIMEText
from smtplib import SMTP
from weasyl import define, macro
EMAIL_ADDRESS = re.compile(r"^[a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\.[a-zA-Z0-9-.]+\Z")
def normalize_address(address):
"""
Converts an e-mail address to a consistent representation.
Returns None if the given address is not considered valid.
"""
address = address.strip()
if not EMAIL_ADDRESS.match(address):
return None
local, domain = address.split("@", 1)
return "%s@%s" % (local, domain.lower())
def send(mailto, subject, content):
"""Send an e-mail.
`mailto` must be a normalized e-mail address to send this e-mail to. The
system email will be designated as the sender.
"""
message = MIMEText(content.strip())
message["To"] = mailto
message["From"] = macro.MACRO_EMAIL_ADDRESS
message["Subject"] = subject
# smtp.sendmail() only converts CR and LF (produced by MIMEText and our templates) to CRLF in Python 3. In Python 2, we need this:
msg_crlf = re.sub(r"\r\n|[\r\n]", "\r\n", message.as_string())
smtp = SMTP(define.config_read_setting('host', "localhost", section='smtp'))
try:
smtp.sendmail(
from_addr=macro.MACRO_EMAIL_ADDRESS,
to_addrs=[mailto],
msg=msg_crlf,
)
finally:
smtp.quit()
define.metric('increment', 'emails')
| [((10, 16, 10, 80), 're.compile', 're.compile', ({(10, 27, 10, 79): '"""^[a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\\\\.[a-zA-Z0-9-.]+\\\\Z"""'}, {}), "('^[a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\\\\.[a-zA-Z0-9-.]+\\\\Z')", False, 'import re\n'), ((53, 4, 53, 40), 'weasyl.define.metric', 'define.metric', ({(53, 18, 53, 29): '"""increment"""', (53, 31, 53, 39): '"""emails"""'}, {}), "('increment', 'emails')", False, 'from weasyl import define, macro\n'), ((42, 16, 42, 79), 'weasyl.define.config_read_setting', 'define.config_read_setting', (), '', False, 'from weasyl import define, macro\n')] |
ricklupton/sphinx_probs_rdf | tests/test_missing_process.py | bcae27a37162c1a4c4b329af6759a0b5b52cab7a | import pytest
from rdflib import Graph, Namespace, Literal
from rdflib.namespace import RDF, RDFS
from sphinx_probs_rdf.directives import PROBS
SYS = Namespace("http://example.org/system/")
@pytest.mark.sphinx(
'probs_rdf', testroot='missing',
confoverrides={'probs_rdf_system_prefix': str(SYS)})
def test_builder_reports_warning_for_missing_process(app, status, warning):
app.builder.build_all()
assert "build succeeded" not in status.getvalue()
warnings = warning.getvalue().strip()
assert 'WARNING: Requested child "http://example.org/system/Missing" of "http://example.org/system/ErrorMissingProcess" is not a Process' in warnings
| [((7, 6, 7, 45), 'rdflib.Namespace', 'Namespace', ({(7, 16, 7, 44): '"""http://example.org/system/"""'}, {}), "('http://example.org/system/')", False, 'from rdflib import Graph, Namespace, Literal\n')] |
thekushalpokhrel/Python_Programs_SoftDev_DataAnalysis | analysis_functionarcademix.py | e56e0e853aca4367ebf99ae18e920b80f39bd133 | #analysis function for three level game
def stat_analysis(c1,c2,c3):
#ask question for viewing analysis of game
analysis=input('\nDo you want to see your game analysis? (Yes/No) ')
if analysis=='Yes':
levels=['Level 1','Level 2','Level 3']
#calculating the score of levels
l1_score= c1*10
l2_score= c2*10
l3_score= c3*10
level_score=[l1_score,l2_score,l3_score]
#plot bar chart
plt.bar(levels,level_score,color='blue',edgecolor='black')
plt.title('Levelwise Scores',fontsize=16)#add title
plt.xlabel('Levels')#set x-axis label
plt.ylabel('Scores')#set y-axis label
plt.show()
print('\nDescriptive Statistics of Scores:')
#find mean value
print('\nMean: ',statistics.mean(level_score))
#find median value
print('\nMediand: ',statistics.median(level_score))
#Mode calculation
#create numPy array of values with only one mode
arr_val = np.array(level_score)
#find unique values in array along with their counts
vals, uni_val_counts = np.unique(arr_val, return_counts=True)
#find mode
mode_value = np.argwhere(counts == np.max(uni_val_counts))
print('\nMode: ',vals[mode_value].flatten().tolist())
#find variance
print('\nVariance: ',np.var(level_score))
#find standard deviation
print('\nStandard Deviation: ',statistics.stdev(level_score))
print('\nGood Bye.See you later!!!')
elif analysis=='No':
print('\nGood Bye.See you later!!!')
else:
print('Invalid value enter')
stat_analysis(c1,c2,c3)
| [] |
TechnoTanuki/Python_BMP | Hello_Cone.py | d6f7e7a4b74f7d6e8761d618c156d37c97726038 | notice = """
Cone Demo
-----------------------------------
| Copyright 2022 by Joel C. Alcarez |
| [[email protected]] |
|-----------------------------------|
| We make absolutely no warranty |
| of any kind, expressed or implied |
|-----------------------------------|
| This graphics library outputs |
| to a bitmap file. |
-----------------------------------
"""
from Python_BMP.BITMAPlib import(
newBMP,
centercoord,
plot3Dsolid,
getRGBfactors,
rotvec3D,
conevertandsurface,
saveBMP
)
import subprocess as proc
from os import path
def main():
print(notice)
imgedt = 'mspaint' # replace with another editor if Unix
rootdir = path.dirname(__file__) # get path of this script
mx = my = 250 # x=y square bmp
file = 'HelloCone.bmp' # some random file name as string
bmp = newBMP(mx, my, 24) # RGB bmp
cenpt = centercoord(bmp) # helper method to get center of a bitmap
cf = getRGBfactors() # color info with presets
d, translationvector = 400, [0, 0, 200] # be careful with these variables or object goes offscreen
isSolid = True # toggle solid or outline
showoutline = False # can show outline even if solid
cf = getRGBfactors() # color list
color = cf['brightyellow'] # color of solid
outlinecolor = 0 # outline color
rotation = rotvec3D(25,240,70) # rotation vector (x,y,z) in degrees
vcen = (1,0,0) # x y z coords
r = 40 # radius of cone
zlen = 40 # height of cone
deganglestep = 5 # how finely we tile flat surfaces around the cone
obj3D = conevertandsurface(vcen, r, zlen, deganglestep)# A solid is defined by vertices and surfaces
plot3Dsolid(bmp, obj3D, isSolid, color,
showoutline, outlinecolor,
rotation, translationvector, d, cenpt)
saveBMP(file, bmp) # save file
print('Saved to %s in %s\nAll done close %s to finish' % \
(file, rootdir, imgedt))
ret = proc.call([imgedt, file])
if __name__=="__main__":
main()
| [((30, 18, 30, 40), 'os.path.dirname', 'path.dirname', ({(30, 31, 30, 39): '__file__'}, {}), '(__file__)', False, 'from os import path\n'), ((33, 14, 33, 32), 'Python_BMP.BITMAPlib.newBMP', 'newBMP', ({(33, 21, 33, 23): 'mx', (33, 25, 33, 27): 'my', (33, 29, 33, 31): '24'}, {}), '(mx, my, 24)', False, 'from Python_BMP.BITMAPlib import newBMP, centercoord, plot3Dsolid, getRGBfactors, rotvec3D, conevertandsurface, saveBMP\n'), ((34, 16, 34, 32), 'Python_BMP.BITMAPlib.centercoord', 'centercoord', ({(34, 28, 34, 31): 'bmp'}, {}), '(bmp)', False, 'from Python_BMP.BITMAPlib import newBMP, centercoord, plot3Dsolid, getRGBfactors, rotvec3D, conevertandsurface, saveBMP\n'), ((35, 13, 35, 28), 'Python_BMP.BITMAPlib.getRGBfactors', 'getRGBfactors', ({}, {}), '()', False, 'from Python_BMP.BITMAPlib import newBMP, centercoord, plot3Dsolid, getRGBfactors, rotvec3D, conevertandsurface, saveBMP\n'), ((39, 13, 39, 28), 'Python_BMP.BITMAPlib.getRGBfactors', 'getRGBfactors', ({}, {}), '()', False, 'from Python_BMP.BITMAPlib import newBMP, centercoord, plot3Dsolid, getRGBfactors, rotvec3D, conevertandsurface, saveBMP\n'), ((42, 19, 42, 38), 'Python_BMP.BITMAPlib.rotvec3D', 'rotvec3D', ({(42, 28, 42, 30): '25', (42, 31, 42, 34): '240', (42, 35, 42, 37): '70'}, {}), '(25, 240, 70)', False, 'from Python_BMP.BITMAPlib import newBMP, centercoord, plot3Dsolid, getRGBfactors, rotvec3D, conevertandsurface, saveBMP\n'), ((47, 16, 47, 63), 'Python_BMP.BITMAPlib.conevertandsurface', 'conevertandsurface', ({(47, 35, 47, 39): 'vcen', (47, 41, 47, 42): 'r', (47, 44, 47, 48): 'zlen', (47, 50, 47, 62): 'deganglestep'}, {}), '(vcen, r, zlen, deganglestep)', False, 'from Python_BMP.BITMAPlib import newBMP, centercoord, plot3Dsolid, getRGBfactors, rotvec3D, conevertandsurface, saveBMP\n'), ((48, 8, 50, 54), 'Python_BMP.BITMAPlib.plot3Dsolid', 'plot3Dsolid', ({(48, 20, 48, 23): 'bmp', (48, 25, 48, 30): 'obj3D', (48, 32, 48, 39): 'isSolid', (48, 41, 48, 46): 'color', (49, 16, 49, 27): 'showoutline', (49, 29, 49, 41): 'outlinecolor', (50, 16, 50, 24): 'rotation', (50, 26, 50, 43): 'translationvector', (50, 45, 50, 46): 'd', (50, 48, 50, 53): 'cenpt'}, {}), '(bmp, obj3D, isSolid, color, showoutline, outlinecolor, rotation,\n translationvector, d, cenpt)', False, 'from Python_BMP.BITMAPlib import newBMP, centercoord, plot3Dsolid, getRGBfactors, rotvec3D, conevertandsurface, saveBMP\n'), ((51, 8, 51, 26), 'Python_BMP.BITMAPlib.saveBMP', 'saveBMP', ({(51, 16, 51, 20): 'file', (51, 22, 51, 25): 'bmp'}, {}), '(file, bmp)', False, 'from Python_BMP.BITMAPlib import newBMP, centercoord, plot3Dsolid, getRGBfactors, rotvec3D, conevertandsurface, saveBMP\n'), ((54, 14, 54, 39), 'subprocess.call', 'proc.call', ({(54, 24, 54, 38): '[imgedt, file]'}, {}), '([imgedt, file])', True, 'import subprocess as proc\n')] |
AndrewKirby2/data_synthesis | analysis/training_curve_6D.py | 656858137a348fd5dcb57bcd04bdfece2b9eac1b | """ Plot a training curve for the 6D data simulator of CT*
"""
import numpy as np
import matplotlib.pyplot as plt
from sklearn.preprocessing import StandardScaler
from sklearn.gaussian_process import GaussianProcessRegressor
from sklearn.gaussian_process.kernels import RBF, WhiteKernel, Matern
from sklearn.metrics import mean_squared_error
from sklearn.pipeline import Pipeline
import sys
sys.path.append(r'/home/andrewkirby72/phd_work/data_synthesis')
from GP_machine_learning.GP_machine_learning_functions import *
from regular_array_sampling.functions import regular_array_monte_carlo
# create array to store results for plotting
rmse = np.ones((25, 2))
noise = 0.01
# create array of sampled regular array layouts
#cand_points = regular_array_monte_carlo(10000)
# create testing points
X_test, y_test = create_testing_points_regular(noise)
n = 0
n_target = 0
n_train = 0
while n_train < 200:
n_target = 100 +100*n
# create training points
X_train, y_train, n_train = \
create_training_points_irregular(n_target, noise)
# fit GP regression and calculate rmse
kernel = 1.0 ** 2 * RBF(length_scale=[1., 1., 1., 1., 1., 1.]) \
+ WhiteKernel(noise_level=1e-5, noise_level_bounds=[1e-10, 1])
pipe = Pipeline([('scaler', StandardScaler()),
('gp', GaussianProcessRegressor(kernel=kernel,
n_restarts_optimizer=20))])
pipe.fit(X_train, y_train)
y_predict = pipe.predict(X_test)
mse = mean_squared_error(y_test, y_predict)
# report rmse
print(n_train, np.sqrt(mse))
rmse[n, 0] = n_train
rmse[n, 1] = np.sqrt(mse)
n += 1
plt.scatter(rmse[:, 0], rmse[:, 1])
plt.yscale('log')
plt.ylim([1e-3, 1e-1])
plt.xlim([0, 200])
plt.title('Training curve RBF - 6D 1% noise - irregular array training - max change halved')
plt.ylabel('RMSE')
plt.xlabel('Training points')
plt.savefig('analysis/GP_machine_learning_plots/\
gp_training_curve_RBF_irregular_training_maxchangehalved_regular_testing.png')
| [((12, 0, 12, 63), 'sys.path.append', 'sys.path.append', ({(12, 16, 12, 62): '"""/home/andrewkirby72/phd_work/data_synthesis"""'}, {}), "('/home/andrewkirby72/phd_work/data_synthesis')", False, 'import sys\n'), ((17, 7, 17, 23), 'numpy.ones', 'np.ones', ({(17, 15, 17, 22): '(25, 2)'}, {}), '((25, 2))', True, 'import numpy as np\n'), ((49, 0, 49, 35), 'matplotlib.pyplot.scatter', 'plt.scatter', ({(49, 12, 49, 22): 'rmse[:, (0)]', (49, 24, 49, 34): 'rmse[:, (1)]'}, {}), '(rmse[:, (0)], rmse[:, (1)])', True, 'import matplotlib.pyplot as plt\n'), ((50, 0, 50, 17), 'matplotlib.pyplot.yscale', 'plt.yscale', ({(50, 11, 50, 16): '"""log"""'}, {}), "('log')", True, 'import matplotlib.pyplot as plt\n'), ((51, 0, 51, 22), 'matplotlib.pyplot.ylim', 'plt.ylim', ({(51, 9, 51, 21): '[0.001, 0.1]'}, {}), '([0.001, 0.1])', True, 'import matplotlib.pyplot as plt\n'), ((52, 0, 52, 18), 'matplotlib.pyplot.xlim', 'plt.xlim', ({(52, 9, 52, 17): '[0, 200]'}, {}), '([0, 200])', True, 'import matplotlib.pyplot as plt\n'), ((53, 0, 53, 92), 'matplotlib.pyplot.title', 'plt.title', ({(53, 10, 53, 91): '"""Training curve RBF - 6D 1% noise - irregular array training - max change halved"""'}, {}), "(\n 'Training curve RBF - 6D 1% noise - irregular array training - max change halved'\n )", True, 'import matplotlib.pyplot as plt\n'), ((54, 0, 54, 18), 'matplotlib.pyplot.ylabel', 'plt.ylabel', ({(54, 11, 54, 17): '"""RMSE"""'}, {}), "('RMSE')", True, 'import matplotlib.pyplot as plt\n'), ((55, 0, 55, 29), 'matplotlib.pyplot.xlabel', 'plt.xlabel', ({(55, 11, 55, 28): '"""Training points"""'}, {}), "('Training points')", True, 'import matplotlib.pyplot as plt\n'), ((56, 0, 57, 78), 'matplotlib.pyplot.savefig', 'plt.savefig', ({(56, 12, 57, 77): '"""analysis/GP_machine_learning_plots/gp_training_curve_RBF_irregular_training_maxchangehalved_regular_testing.png"""'}, {}), "(\n 'analysis/GP_machine_learning_plots/gp_training_curve_RBF_irregular_training_maxchangehalved_regular_testing.png'\n )", True, 'import matplotlib.pyplot as plt\n'), ((42, 10, 42, 47), 'sklearn.metrics.mean_squared_error', 'mean_squared_error', ({(42, 29, 42, 35): 'y_test', (42, 37, 42, 46): 'y_predict'}, {}), '(y_test, y_predict)', False, 'from sklearn.metrics import mean_squared_error\n'), ((46, 17, 46, 29), 'numpy.sqrt', 'np.sqrt', ({(46, 25, 46, 28): 'mse'}, {}), '(mse)', True, 'import numpy as np\n'), ((36, 10, 36, 70), 'sklearn.gaussian_process.kernels.WhiteKernel', 'WhiteKernel', (), '', False, 'from sklearn.gaussian_process.kernels import RBF, WhiteKernel, Matern\n'), ((44, 19, 44, 31), 'numpy.sqrt', 'np.sqrt', ({(44, 27, 44, 30): 'mse'}, {}), '(mse)', True, 'import numpy as np\n'), ((35, 24, 35, 66), 'sklearn.gaussian_process.kernels.RBF', 'RBF', (), '', False, 'from sklearn.gaussian_process.kernels import RBF, WhiteKernel, Matern\n'), ((37, 32, 37, 48), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ({}, {}), '()', False, 'from sklearn.preprocessing import StandardScaler\n'), ((38, 27, 39, 45), 'sklearn.gaussian_process.GaussianProcessRegressor', 'GaussianProcessRegressor', (), '', False, 'from sklearn.gaussian_process import GaussianProcessRegressor\n')] |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.