index
int64 0
10k
| blob_id
stringlengths 40
40
| step-1
stringlengths 13
984k
| step-2
stringlengths 6
1.23M
⌀ | step-3
stringlengths 15
1.34M
⌀ | step-4
stringlengths 30
1.34M
⌀ | step-5
stringlengths 64
1.2M
⌀ | step-ids
sequencelengths 1
5
|
---|---|---|---|---|---|---|---|
1,300 | 753bdbf080e7a8652c39e40beeae51f74382d606 | <mask token>
| <mask token>
def test_detector():
detector = Detector(n_jobs=1)
assert detector['n_jobs'] == 1
assert type(detector) == Detector
inputFname = os.path.join(get_test_data_path(), 'input.jpg')
out = detector.detect_image(inputFname=inputFname)
assert type(out) == Fex
assert len(out) == 1
assert out.happiness.values[0] > 0
outputFname = os.path.join(get_test_data_path(), 'output.csv')
out = detector.detect_image(inputFname=inputFname, outputFname=outputFname)
assert out
assert os.path.exists(outputFname)
out = pd.read_csv(outputFname)
assert out.happiness.values[0] > 0
inputFname = os.path.join(get_test_data_path(), 'input.mp4')
out = detector.detect_video(inputFname=inputFname)
assert len(out) == 72
outputFname = os.path.join(get_test_data_path(), 'output.csv')
out = detector.detect_video(inputFname=inputFname, outputFname=outputFname)
assert out
assert os.path.exists(outputFname)
out = pd.read_csv(outputFname)
assert out.happiness.values.max() > 0
| <mask token>
from feat.detector import Detector
from feat.data import Fex
from feat.utils import get_resource_path
from .utils import get_test_data_path
import pandas as pd
import feat
import os
import wget
def test_detector():
detector = Detector(n_jobs=1)
assert detector['n_jobs'] == 1
assert type(detector) == Detector
inputFname = os.path.join(get_test_data_path(), 'input.jpg')
out = detector.detect_image(inputFname=inputFname)
assert type(out) == Fex
assert len(out) == 1
assert out.happiness.values[0] > 0
outputFname = os.path.join(get_test_data_path(), 'output.csv')
out = detector.detect_image(inputFname=inputFname, outputFname=outputFname)
assert out
assert os.path.exists(outputFname)
out = pd.read_csv(outputFname)
assert out.happiness.values[0] > 0
inputFname = os.path.join(get_test_data_path(), 'input.mp4')
out = detector.detect_video(inputFname=inputFname)
assert len(out) == 72
outputFname = os.path.join(get_test_data_path(), 'output.csv')
out = detector.detect_video(inputFname=inputFname, outputFname=outputFname)
assert out
assert os.path.exists(outputFname)
out = pd.read_csv(outputFname)
assert out.happiness.values.max() > 0
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Tests for `feat` package."""
from feat.detector import Detector
from feat.data import Fex
from feat.utils import get_resource_path
from .utils import get_test_data_path
import pandas as pd
import feat
import os
import wget
# def test_models():
# print("Downloading FEX emotion model.")
# fex_emotion_model = "https://github.com/cosanlab/feat/releases/download/v0.1/fer_aug_model.h5"
# wget.download(fex_emotion_model, get_resource_path())
# if os.path.exists(os.path.join(get_resource_path(), "fer_aug_model.h5")):
# print("\nFEX emotion model downloaded successfully.\n")
# else:
# print("Something went wrong. Model not found in directory.")
# print("Downloading landmark detection model.")
# lbfmodel = "https://github.com/cosanlab/feat/releases/download/v0.1/lbfmodel.yaml"
# wget.download(lbfmodel, get_resource_path())
# if os.path.exists(os.path.join(get_resource_path(), "lbfmodel.yaml")):
# print("\nLandmark detection model downloaded successfully.\n")
# else:
# print("Something went wrong. Model not found in directory.")
# emotion_model = "fer_aug_model.h5"
# emotion_model_path = os.path.join(get_resource_path(), emotion_model)
# print("PATH TO EMOTION MODEL",emotion_model_path)
# assert os.path.exists(emotion_model_path)==True
# landmark_model = "lbfmodel.yaml"
# landmark_model_path = os.path.join(get_resource_path(), landmark_model)
# assert os.path.exists(landmark_model_path)==True
def test_detector():
detector = Detector(n_jobs=1)
assert detector['n_jobs']==1
assert type(detector)==Detector
# Test detect image
inputFname = os.path.join(get_test_data_path(), "input.jpg")
out = detector.detect_image(inputFname = inputFname)
assert type(out) == Fex
assert len(out) == 1
assert out.happiness.values[0] > 0
outputFname = os.path.join(get_test_data_path(), "output.csv")
out = detector.detect_image(inputFname=inputFname, outputFname=outputFname)
assert out
assert os.path.exists(outputFname)
out = pd.read_csv(outputFname)
assert out.happiness.values[0] > 0
# Test detect video
inputFname = os.path.join(get_test_data_path(), "input.mp4")
out = detector.detect_video(inputFname=inputFname)
assert len(out)==72
outputFname = os.path.join(get_test_data_path(), "output.csv")
out = detector.detect_video(inputFname=inputFname, outputFname=outputFname)
assert out
assert os.path.exists(outputFname)
out = pd.read_csv(outputFname)
assert out.happiness.values.max() > 0 | null | [
0,
1,
2,
3
] |
1,301 | 1e24952006afebb7bf10a83077fc4effd5cc9c58 | <mask token>
| print('Different Code!!!')
| #Sample Python Code
print("Different Code!!!")
#print("Hello World!")
| null | null | [
0,
1,
2
] |
1,302 | fc26574ac8628d7e2896e3e6d055ac61264c7db0 | <mask token>
| <mask token>
FIGURES_DIR.mkdir(exist_ok=True, parents=True)
| <mask token>
script_name = pathlib.Path(sys.argv[0]).stem
FIGURES_DIR = pathlib.Path(__file__).parents[2
] / 'figures' / 'simulations' / script_name
FIGURES_DIR.mkdir(exist_ok=True, parents=True)
| import sys
import pathlib
from matplotlib import pyplot as plt
import matplotlib as mpl
script_name = pathlib.Path(sys.argv[0]).stem
FIGURES_DIR = pathlib.Path(__file__).parents[2
] / 'figures' / 'simulations' / script_name
FIGURES_DIR.mkdir(exist_ok=True, parents=True)
| import sys
import pathlib
from matplotlib import pyplot as plt
import matplotlib as mpl
script_name = pathlib.Path(sys.argv[0]).stem
FIGURES_DIR = pathlib.Path(
__file__).parents[2] / "figures" / "simulations" / script_name
FIGURES_DIR.mkdir(exist_ok=True, parents=True)
# mpl.rc("text", usetex=True)
# mpl.rc("font", family="serif")
# mpl.rc(
# "text.latex",
# preamble=r"\usepackage{mathpazo} \usepackage{eulervm} \usepackage{amssymb}"
# r"\usepackage{amsmath} \usepackage{bm} \usepackage{DejaVuSans}",
# )
| [
0,
1,
2,
3,
4
] |
1,303 | 8ddb7abb480ea8ee674c59719c0946f133ef0a4b | <mask token>
class addItemThread(QThread):
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
class Example(QWidget):
def __init__(self):
super(Example, self).__init__()
mthread = addItemThread()
mthread.update_qvix.connect(self.update_qvix)
mthread.update_north.connect(self.update_north)
mthread.update_vol.connect(self.update_volume)
mthread.update_month.connect(self.update_month)
mthread.update_iv.connect(self.update_iv)
mthread.update_greek.connect(self.update_greek)
mthread.start()
self.initUI()
def initUI(self):
self.setGeometry(400, 400, 1200, 620)
self.setWindowTitle('不被仓位左右思想,没找到弱点不要重仓')
self.gridLayout = QGridLayout(self)
self.plot()
"""
buttom
"""
self.label_greek = QLabel('label_greek')
self.label_greek.setStyleSheet('background-color:rgb(250,250,250)')
self.gridLayout.addWidget(self.label_greek, 2, 0, 1, 3)
"""
right
"""
def plot(self):
pg.setConfigOption('background', 'w')
pg.setConfigOption('foreground', 'k')
pw_iv50 = pg.PlotWidget(title='50-IV')
self.plt_iv50_1 = pw_iv50.plot(symbol='o', pen=pg.mkPen('r', width=
1), symbolSize=12, symbolBrush=(0, 255, 0))
self.plt_iv50_2 = pw_iv50.plot(symbol='o', pen=pg.mkPen('g', width=
1), symbolSize=12, symbolBrush=(0, 255, 0))
self.plt_iv50_3 = pw_iv50.plot(symbol='o', pen=pg.mkPen('r', width=
1), symbolSize=10, symbolBrush=(0, 170, 0))
self.plt_iv50_4 = pw_iv50.plot(symbol='o', pen=pg.mkPen('g', width=
1), symbolSize=10, symbolBrush=(0, 170, 0))
self.plt_iv50_5 = pw_iv50.plot(symbol='o', pen=pg.mkPen('r', width=
1), symbolSize=8, symbolBrush=(0, 85, 0))
self.plt_iv50_6 = pw_iv50.plot(symbol='o', pen=pg.mkPen('g', width=
1), symbolSize=8, symbolBrush=(0, 85, 0))
self.plt_iv50_7 = pw_iv50.plot(symbol='o', pen=pg.mkPen('r', width=
1), symbolSize=6, symbolBrush=(0, 0, 0))
self.plt_iv50_8 = pw_iv50.plot(symbol='o', pen=pg.mkPen('g', width=
1), symbolSize=6, symbolBrush=(0, 0, 0))
self.gridLayout.addWidget(pw_iv50, 0, 0)
plt300 = pg.PlotWidget(title='300-IV')
self.plt_iv300_1 = plt300.plot(symbol='o', pen=pg.mkPen('r', width=
1), symbolSize=12, symbolBrush=(0, 255, 0))
self.plt_iv300_2 = plt300.plot(symbol='o', pen=pg.mkPen('g', width=
1), symbolSize=12, symbolBrush=(0, 255, 0))
self.plt_iv300_3 = plt300.plot(symbol='o', pen=pg.mkPen('r', width=
1), symbolSize=10, symbolBrush=(0, 170, 0))
self.plt_iv300_4 = plt300.plot(symbol='o', pen=pg.mkPen('g', width=
1), symbolSize=10, symbolBrush=(0, 170, 0))
self.plt_iv300_5 = plt300.plot(symbol='o', pen=pg.mkPen('r', width=
1), symbolSize=8, symbolBrush=(0, 85, 0))
self.plt_iv300_6 = plt300.plot(symbol='o', pen=pg.mkPen('g', width=
1), symbolSize=8, symbolBrush=(0, 85, 0))
self.plt_iv300_7 = plt300.plot(symbol='o', pen=pg.mkPen('r', width=
1), symbolSize=6, symbolBrush=(0, 0, 0))
self.plt_iv300_8 = plt300.plot(symbol='o', pen=pg.mkPen('g', width=
1), symbolSize=6, symbolBrush=(0, 0, 0))
self.gridLayout.addWidget(plt300, 0, 1)
pw_month = pg.PlotWidget(title='MONTH-50-300-MONTH')
pw_month.showGrid(x=False, y=True)
pw_month.addLegend(offset=(30, 100))
self.plt_month50 = pw_month.plot(name='50')
self.plt_month300 = pw_month.plot(name='300')
self.gridLayout.addWidget(pw_month, 0, 2)
pw_qvix = pg.PlotWidget(title='QVIX')
pw_qvix.showGrid(x=True, y=True)
pw_qvix.addLegend()
self.plt_qvix = pw_qvix.plot(pen=pg.mkPen('d', width=4), name='iv')
self.gridLayout.addWidget(pw_qvix, 1, 0)
pw_north = pg.PlotWidget(title='NORTH')
pw_north.showGrid(x=False, y=True)
pw_north.addLegend()
self.plt_north_hgt = pw_north.plot(pen=pg.mkPen('b', width=2), name
='hgt')
self.plt_north_sgt = pw_north.plot(pen=pg.mkPen('g', width=1), name
='sgt')
self.plt_north_all = pw_north.plot(pen=pg.mkPen('d', width=1), name
='all')
self.gridLayout.addWidget(pw_north, 1, 1)
pw_volume = pg.PlotWidget(title='VOLUME')
pw_volume.showGrid(x=False, y=True)
self.plt_volume = pw_volume.plot(name='volume')
self.stock_50 = pw_volume.plot(name='stock_50')
self.gridLayout.addWidget(pw_volume, 1, 2)
def update_qvix(self, df):
df = df.drop(['Pre', 'max', 'min'], axis=1)
self.plt_qvix.setData(df.index.values, df['QVIX'])
def update_north(self, df):
self.plt_north_hgt.setData(df['hgt'].astype(float) / 10000)
self.plt_north_sgt.setData(df['sgt'].astype(float) / 10000)
self.plt_north_all.setData(df['all'].astype(float) / 10000)
def update_volume(self, data, ser):
self.plt_volume.setPen(pg.mkPen('b', width=3))
self.plt_volume.setData(data.values)
self.stock_50.setData(ser)
def update_month(self, data):
data.columns = ['data', '50iv', 'data2', '300iv']
self.plt_month50.setData(data['50iv'])
self.plt_month50.setPen(pg.mkPen('r', width=2))
self.plt_month300.setData(data['300iv'])
self.plt_month300.setPen(pg.mkPen('b', width=1))
def update_iv(self, data50, data300):
data50.sort_index(inplace=True)
data50 = data50.astype(float)
data50[data50 < 1] = np.nan
self.plt_iv50_1.setData(data50.iloc[:, 0])
self.plt_iv50_2.setData(data50.iloc[:, 5])
self.plt_iv50_3.setData(data50.iloc[:, 1])
self.plt_iv50_4.setData(data50.iloc[:, 6])
self.plt_iv50_5.setData(data50.iloc[:, 2])
self.plt_iv50_6.setData(data50.iloc[:, 7])
self.plt_iv50_7.setData(data50.iloc[:, 3])
self.plt_iv50_8.setData(data50.iloc[:, 8])
data300.sort_index(inplace=True)
data300 = data300.astype(float)
data300[data300 < 1] = np.nan
self.plt_iv300_1.setData(data300.iloc[:, 0])
self.plt_iv300_2.setData(data300.iloc[:, 5])
self.plt_iv300_3.setData(data300.iloc[:, 1])
self.plt_iv300_4.setData(data300.iloc[:, 6])
self.plt_iv300_5.setData(data300.iloc[:, 2])
self.plt_iv300_6.setData(data300.iloc[:, 7])
self.plt_iv300_7.setData(data300.iloc[:, 3])
self.plt_iv300_8.setData(data300.iloc[:, 8])
def update_greek(self, gk):
text = 'DELTA:{}GAMMA:{}VEGA:{}THETA:{}'.format(gk[0], gk[1], gk[2],
gk[3])
self.label_greek.setText(text)
<mask token>
| <mask token>
class addItemThread(QThread):
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
def __init__(self, *args, **kwargs):
super(addItemThread, self).__init__(*args, **kwargs)
self.data_model = DataModel()
self.num = 0
<mask token>
class Example(QWidget):
def __init__(self):
super(Example, self).__init__()
mthread = addItemThread()
mthread.update_qvix.connect(self.update_qvix)
mthread.update_north.connect(self.update_north)
mthread.update_vol.connect(self.update_volume)
mthread.update_month.connect(self.update_month)
mthread.update_iv.connect(self.update_iv)
mthread.update_greek.connect(self.update_greek)
mthread.start()
self.initUI()
def initUI(self):
self.setGeometry(400, 400, 1200, 620)
self.setWindowTitle('不被仓位左右思想,没找到弱点不要重仓')
self.gridLayout = QGridLayout(self)
self.plot()
"""
buttom
"""
self.label_greek = QLabel('label_greek')
self.label_greek.setStyleSheet('background-color:rgb(250,250,250)')
self.gridLayout.addWidget(self.label_greek, 2, 0, 1, 3)
"""
right
"""
def plot(self):
pg.setConfigOption('background', 'w')
pg.setConfigOption('foreground', 'k')
pw_iv50 = pg.PlotWidget(title='50-IV')
self.plt_iv50_1 = pw_iv50.plot(symbol='o', pen=pg.mkPen('r', width=
1), symbolSize=12, symbolBrush=(0, 255, 0))
self.plt_iv50_2 = pw_iv50.plot(symbol='o', pen=pg.mkPen('g', width=
1), symbolSize=12, symbolBrush=(0, 255, 0))
self.plt_iv50_3 = pw_iv50.plot(symbol='o', pen=pg.mkPen('r', width=
1), symbolSize=10, symbolBrush=(0, 170, 0))
self.plt_iv50_4 = pw_iv50.plot(symbol='o', pen=pg.mkPen('g', width=
1), symbolSize=10, symbolBrush=(0, 170, 0))
self.plt_iv50_5 = pw_iv50.plot(symbol='o', pen=pg.mkPen('r', width=
1), symbolSize=8, symbolBrush=(0, 85, 0))
self.plt_iv50_6 = pw_iv50.plot(symbol='o', pen=pg.mkPen('g', width=
1), symbolSize=8, symbolBrush=(0, 85, 0))
self.plt_iv50_7 = pw_iv50.plot(symbol='o', pen=pg.mkPen('r', width=
1), symbolSize=6, symbolBrush=(0, 0, 0))
self.plt_iv50_8 = pw_iv50.plot(symbol='o', pen=pg.mkPen('g', width=
1), symbolSize=6, symbolBrush=(0, 0, 0))
self.gridLayout.addWidget(pw_iv50, 0, 0)
plt300 = pg.PlotWidget(title='300-IV')
self.plt_iv300_1 = plt300.plot(symbol='o', pen=pg.mkPen('r', width=
1), symbolSize=12, symbolBrush=(0, 255, 0))
self.plt_iv300_2 = plt300.plot(symbol='o', pen=pg.mkPen('g', width=
1), symbolSize=12, symbolBrush=(0, 255, 0))
self.plt_iv300_3 = plt300.plot(symbol='o', pen=pg.mkPen('r', width=
1), symbolSize=10, symbolBrush=(0, 170, 0))
self.plt_iv300_4 = plt300.plot(symbol='o', pen=pg.mkPen('g', width=
1), symbolSize=10, symbolBrush=(0, 170, 0))
self.plt_iv300_5 = plt300.plot(symbol='o', pen=pg.mkPen('r', width=
1), symbolSize=8, symbolBrush=(0, 85, 0))
self.plt_iv300_6 = plt300.plot(symbol='o', pen=pg.mkPen('g', width=
1), symbolSize=8, symbolBrush=(0, 85, 0))
self.plt_iv300_7 = plt300.plot(symbol='o', pen=pg.mkPen('r', width=
1), symbolSize=6, symbolBrush=(0, 0, 0))
self.plt_iv300_8 = plt300.plot(symbol='o', pen=pg.mkPen('g', width=
1), symbolSize=6, symbolBrush=(0, 0, 0))
self.gridLayout.addWidget(plt300, 0, 1)
pw_month = pg.PlotWidget(title='MONTH-50-300-MONTH')
pw_month.showGrid(x=False, y=True)
pw_month.addLegend(offset=(30, 100))
self.plt_month50 = pw_month.plot(name='50')
self.plt_month300 = pw_month.plot(name='300')
self.gridLayout.addWidget(pw_month, 0, 2)
pw_qvix = pg.PlotWidget(title='QVIX')
pw_qvix.showGrid(x=True, y=True)
pw_qvix.addLegend()
self.plt_qvix = pw_qvix.plot(pen=pg.mkPen('d', width=4), name='iv')
self.gridLayout.addWidget(pw_qvix, 1, 0)
pw_north = pg.PlotWidget(title='NORTH')
pw_north.showGrid(x=False, y=True)
pw_north.addLegend()
self.plt_north_hgt = pw_north.plot(pen=pg.mkPen('b', width=2), name
='hgt')
self.plt_north_sgt = pw_north.plot(pen=pg.mkPen('g', width=1), name
='sgt')
self.plt_north_all = pw_north.plot(pen=pg.mkPen('d', width=1), name
='all')
self.gridLayout.addWidget(pw_north, 1, 1)
pw_volume = pg.PlotWidget(title='VOLUME')
pw_volume.showGrid(x=False, y=True)
self.plt_volume = pw_volume.plot(name='volume')
self.stock_50 = pw_volume.plot(name='stock_50')
self.gridLayout.addWidget(pw_volume, 1, 2)
def update_qvix(self, df):
df = df.drop(['Pre', 'max', 'min'], axis=1)
self.plt_qvix.setData(df.index.values, df['QVIX'])
def update_north(self, df):
self.plt_north_hgt.setData(df['hgt'].astype(float) / 10000)
self.plt_north_sgt.setData(df['sgt'].astype(float) / 10000)
self.plt_north_all.setData(df['all'].astype(float) / 10000)
def update_volume(self, data, ser):
self.plt_volume.setPen(pg.mkPen('b', width=3))
self.plt_volume.setData(data.values)
self.stock_50.setData(ser)
def update_month(self, data):
data.columns = ['data', '50iv', 'data2', '300iv']
self.plt_month50.setData(data['50iv'])
self.plt_month50.setPen(pg.mkPen('r', width=2))
self.plt_month300.setData(data['300iv'])
self.plt_month300.setPen(pg.mkPen('b', width=1))
def update_iv(self, data50, data300):
data50.sort_index(inplace=True)
data50 = data50.astype(float)
data50[data50 < 1] = np.nan
self.plt_iv50_1.setData(data50.iloc[:, 0])
self.plt_iv50_2.setData(data50.iloc[:, 5])
self.plt_iv50_3.setData(data50.iloc[:, 1])
self.plt_iv50_4.setData(data50.iloc[:, 6])
self.plt_iv50_5.setData(data50.iloc[:, 2])
self.plt_iv50_6.setData(data50.iloc[:, 7])
self.plt_iv50_7.setData(data50.iloc[:, 3])
self.plt_iv50_8.setData(data50.iloc[:, 8])
data300.sort_index(inplace=True)
data300 = data300.astype(float)
data300[data300 < 1] = np.nan
self.plt_iv300_1.setData(data300.iloc[:, 0])
self.plt_iv300_2.setData(data300.iloc[:, 5])
self.plt_iv300_3.setData(data300.iloc[:, 1])
self.plt_iv300_4.setData(data300.iloc[:, 6])
self.plt_iv300_5.setData(data300.iloc[:, 2])
self.plt_iv300_6.setData(data300.iloc[:, 7])
self.plt_iv300_7.setData(data300.iloc[:, 3])
self.plt_iv300_8.setData(data300.iloc[:, 8])
def update_greek(self, gk):
text = 'DELTA:{}GAMMA:{}VEGA:{}THETA:{}'.format(gk[0], gk[1], gk[2],
gk[3])
self.label_greek.setText(text)
<mask token>
| <mask token>
class addItemThread(QThread):
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
def __init__(self, *args, **kwargs):
super(addItemThread, self).__init__(*args, **kwargs)
self.data_model = DataModel()
self.num = 0
def run(self, *args, **kwargs):
while True:
df = LoadNet().get_QVIX()
self.update_qvix.emit(df)
df_north = LoadNet().get_north()
self.update_north.emit(df_north)
df_vol, cha = Volume().update()
data, last = LoadNet().get_50_163()
ser = (data['current'] - last) / last
self.update_vol.emit(df_vol, ser)
if not self.data_model.df_op.empty:
df_month = self.data_model.iv_month_50300()
self.update_month.emit(df_month)
df_iv50, df_iv300 = self.data_model.get_iv()
self.update_iv.emit(df_iv50, df_iv300)
hp = HoldPositions()
greek = hp.update(self.data_model.df_op)
self.update_greek.emit(greek)
time.sleep(3)
class Example(QWidget):
def __init__(self):
super(Example, self).__init__()
mthread = addItemThread()
mthread.update_qvix.connect(self.update_qvix)
mthread.update_north.connect(self.update_north)
mthread.update_vol.connect(self.update_volume)
mthread.update_month.connect(self.update_month)
mthread.update_iv.connect(self.update_iv)
mthread.update_greek.connect(self.update_greek)
mthread.start()
self.initUI()
def initUI(self):
self.setGeometry(400, 400, 1200, 620)
self.setWindowTitle('不被仓位左右思想,没找到弱点不要重仓')
self.gridLayout = QGridLayout(self)
self.plot()
"""
buttom
"""
self.label_greek = QLabel('label_greek')
self.label_greek.setStyleSheet('background-color:rgb(250,250,250)')
self.gridLayout.addWidget(self.label_greek, 2, 0, 1, 3)
"""
right
"""
def plot(self):
pg.setConfigOption('background', 'w')
pg.setConfigOption('foreground', 'k')
pw_iv50 = pg.PlotWidget(title='50-IV')
self.plt_iv50_1 = pw_iv50.plot(symbol='o', pen=pg.mkPen('r', width=
1), symbolSize=12, symbolBrush=(0, 255, 0))
self.plt_iv50_2 = pw_iv50.plot(symbol='o', pen=pg.mkPen('g', width=
1), symbolSize=12, symbolBrush=(0, 255, 0))
self.plt_iv50_3 = pw_iv50.plot(symbol='o', pen=pg.mkPen('r', width=
1), symbolSize=10, symbolBrush=(0, 170, 0))
self.plt_iv50_4 = pw_iv50.plot(symbol='o', pen=pg.mkPen('g', width=
1), symbolSize=10, symbolBrush=(0, 170, 0))
self.plt_iv50_5 = pw_iv50.plot(symbol='o', pen=pg.mkPen('r', width=
1), symbolSize=8, symbolBrush=(0, 85, 0))
self.plt_iv50_6 = pw_iv50.plot(symbol='o', pen=pg.mkPen('g', width=
1), symbolSize=8, symbolBrush=(0, 85, 0))
self.plt_iv50_7 = pw_iv50.plot(symbol='o', pen=pg.mkPen('r', width=
1), symbolSize=6, symbolBrush=(0, 0, 0))
self.plt_iv50_8 = pw_iv50.plot(symbol='o', pen=pg.mkPen('g', width=
1), symbolSize=6, symbolBrush=(0, 0, 0))
self.gridLayout.addWidget(pw_iv50, 0, 0)
plt300 = pg.PlotWidget(title='300-IV')
self.plt_iv300_1 = plt300.plot(symbol='o', pen=pg.mkPen('r', width=
1), symbolSize=12, symbolBrush=(0, 255, 0))
self.plt_iv300_2 = plt300.plot(symbol='o', pen=pg.mkPen('g', width=
1), symbolSize=12, symbolBrush=(0, 255, 0))
self.plt_iv300_3 = plt300.plot(symbol='o', pen=pg.mkPen('r', width=
1), symbolSize=10, symbolBrush=(0, 170, 0))
self.plt_iv300_4 = plt300.plot(symbol='o', pen=pg.mkPen('g', width=
1), symbolSize=10, symbolBrush=(0, 170, 0))
self.plt_iv300_5 = plt300.plot(symbol='o', pen=pg.mkPen('r', width=
1), symbolSize=8, symbolBrush=(0, 85, 0))
self.plt_iv300_6 = plt300.plot(symbol='o', pen=pg.mkPen('g', width=
1), symbolSize=8, symbolBrush=(0, 85, 0))
self.plt_iv300_7 = plt300.plot(symbol='o', pen=pg.mkPen('r', width=
1), symbolSize=6, symbolBrush=(0, 0, 0))
self.plt_iv300_8 = plt300.plot(symbol='o', pen=pg.mkPen('g', width=
1), symbolSize=6, symbolBrush=(0, 0, 0))
self.gridLayout.addWidget(plt300, 0, 1)
pw_month = pg.PlotWidget(title='MONTH-50-300-MONTH')
pw_month.showGrid(x=False, y=True)
pw_month.addLegend(offset=(30, 100))
self.plt_month50 = pw_month.plot(name='50')
self.plt_month300 = pw_month.plot(name='300')
self.gridLayout.addWidget(pw_month, 0, 2)
pw_qvix = pg.PlotWidget(title='QVIX')
pw_qvix.showGrid(x=True, y=True)
pw_qvix.addLegend()
self.plt_qvix = pw_qvix.plot(pen=pg.mkPen('d', width=4), name='iv')
self.gridLayout.addWidget(pw_qvix, 1, 0)
pw_north = pg.PlotWidget(title='NORTH')
pw_north.showGrid(x=False, y=True)
pw_north.addLegend()
self.plt_north_hgt = pw_north.plot(pen=pg.mkPen('b', width=2), name
='hgt')
self.plt_north_sgt = pw_north.plot(pen=pg.mkPen('g', width=1), name
='sgt')
self.plt_north_all = pw_north.plot(pen=pg.mkPen('d', width=1), name
='all')
self.gridLayout.addWidget(pw_north, 1, 1)
pw_volume = pg.PlotWidget(title='VOLUME')
pw_volume.showGrid(x=False, y=True)
self.plt_volume = pw_volume.plot(name='volume')
self.stock_50 = pw_volume.plot(name='stock_50')
self.gridLayout.addWidget(pw_volume, 1, 2)
def update_qvix(self, df):
df = df.drop(['Pre', 'max', 'min'], axis=1)
self.plt_qvix.setData(df.index.values, df['QVIX'])
def update_north(self, df):
self.plt_north_hgt.setData(df['hgt'].astype(float) / 10000)
self.plt_north_sgt.setData(df['sgt'].astype(float) / 10000)
self.plt_north_all.setData(df['all'].astype(float) / 10000)
def update_volume(self, data, ser):
self.plt_volume.setPen(pg.mkPen('b', width=3))
self.plt_volume.setData(data.values)
self.stock_50.setData(ser)
def update_month(self, data):
data.columns = ['data', '50iv', 'data2', '300iv']
self.plt_month50.setData(data['50iv'])
self.plt_month50.setPen(pg.mkPen('r', width=2))
self.plt_month300.setData(data['300iv'])
self.plt_month300.setPen(pg.mkPen('b', width=1))
def update_iv(self, data50, data300):
data50.sort_index(inplace=True)
data50 = data50.astype(float)
data50[data50 < 1] = np.nan
self.plt_iv50_1.setData(data50.iloc[:, 0])
self.plt_iv50_2.setData(data50.iloc[:, 5])
self.plt_iv50_3.setData(data50.iloc[:, 1])
self.plt_iv50_4.setData(data50.iloc[:, 6])
self.plt_iv50_5.setData(data50.iloc[:, 2])
self.plt_iv50_6.setData(data50.iloc[:, 7])
self.plt_iv50_7.setData(data50.iloc[:, 3])
self.plt_iv50_8.setData(data50.iloc[:, 8])
data300.sort_index(inplace=True)
data300 = data300.astype(float)
data300[data300 < 1] = np.nan
self.plt_iv300_1.setData(data300.iloc[:, 0])
self.plt_iv300_2.setData(data300.iloc[:, 5])
self.plt_iv300_3.setData(data300.iloc[:, 1])
self.plt_iv300_4.setData(data300.iloc[:, 6])
self.plt_iv300_5.setData(data300.iloc[:, 2])
self.plt_iv300_6.setData(data300.iloc[:, 7])
self.plt_iv300_7.setData(data300.iloc[:, 3])
self.plt_iv300_8.setData(data300.iloc[:, 8])
def update_greek(self, gk):
text = 'DELTA:{}GAMMA:{}VEGA:{}THETA:{}'.format(gk[0], gk[1], gk[2],
gk[3])
self.label_greek.setText(text)
<mask token>
| <mask token>
class addItemThread(QThread):
update_qvix = pyqtSignal(pd.DataFrame)
update_north = pyqtSignal(pd.DataFrame)
update_vol = pyqtSignal(pd.Series, pd.Series)
update_month = pyqtSignal(pd.DataFrame)
update_iv = pyqtSignal(pd.DataFrame, pd.DataFrame)
update_greek = pyqtSignal(list)
def __init__(self, *args, **kwargs):
super(addItemThread, self).__init__(*args, **kwargs)
self.data_model = DataModel()
self.num = 0
def run(self, *args, **kwargs):
while True:
df = LoadNet().get_QVIX()
self.update_qvix.emit(df)
df_north = LoadNet().get_north()
self.update_north.emit(df_north)
df_vol, cha = Volume().update()
data, last = LoadNet().get_50_163()
ser = (data['current'] - last) / last
self.update_vol.emit(df_vol, ser)
if not self.data_model.df_op.empty:
df_month = self.data_model.iv_month_50300()
self.update_month.emit(df_month)
df_iv50, df_iv300 = self.data_model.get_iv()
self.update_iv.emit(df_iv50, df_iv300)
hp = HoldPositions()
greek = hp.update(self.data_model.df_op)
self.update_greek.emit(greek)
time.sleep(3)
class Example(QWidget):
def __init__(self):
super(Example, self).__init__()
mthread = addItemThread()
mthread.update_qvix.connect(self.update_qvix)
mthread.update_north.connect(self.update_north)
mthread.update_vol.connect(self.update_volume)
mthread.update_month.connect(self.update_month)
mthread.update_iv.connect(self.update_iv)
mthread.update_greek.connect(self.update_greek)
mthread.start()
self.initUI()
def initUI(self):
self.setGeometry(400, 400, 1200, 620)
self.setWindowTitle('不被仓位左右思想,没找到弱点不要重仓')
self.gridLayout = QGridLayout(self)
self.plot()
"""
buttom
"""
self.label_greek = QLabel('label_greek')
self.label_greek.setStyleSheet('background-color:rgb(250,250,250)')
self.gridLayout.addWidget(self.label_greek, 2, 0, 1, 3)
"""
right
"""
def plot(self):
pg.setConfigOption('background', 'w')
pg.setConfigOption('foreground', 'k')
pw_iv50 = pg.PlotWidget(title='50-IV')
self.plt_iv50_1 = pw_iv50.plot(symbol='o', pen=pg.mkPen('r', width=
1), symbolSize=12, symbolBrush=(0, 255, 0))
self.plt_iv50_2 = pw_iv50.plot(symbol='o', pen=pg.mkPen('g', width=
1), symbolSize=12, symbolBrush=(0, 255, 0))
self.plt_iv50_3 = pw_iv50.plot(symbol='o', pen=pg.mkPen('r', width=
1), symbolSize=10, symbolBrush=(0, 170, 0))
self.plt_iv50_4 = pw_iv50.plot(symbol='o', pen=pg.mkPen('g', width=
1), symbolSize=10, symbolBrush=(0, 170, 0))
self.plt_iv50_5 = pw_iv50.plot(symbol='o', pen=pg.mkPen('r', width=
1), symbolSize=8, symbolBrush=(0, 85, 0))
self.plt_iv50_6 = pw_iv50.plot(symbol='o', pen=pg.mkPen('g', width=
1), symbolSize=8, symbolBrush=(0, 85, 0))
self.plt_iv50_7 = pw_iv50.plot(symbol='o', pen=pg.mkPen('r', width=
1), symbolSize=6, symbolBrush=(0, 0, 0))
self.plt_iv50_8 = pw_iv50.plot(symbol='o', pen=pg.mkPen('g', width=
1), symbolSize=6, symbolBrush=(0, 0, 0))
self.gridLayout.addWidget(pw_iv50, 0, 0)
plt300 = pg.PlotWidget(title='300-IV')
self.plt_iv300_1 = plt300.plot(symbol='o', pen=pg.mkPen('r', width=
1), symbolSize=12, symbolBrush=(0, 255, 0))
self.plt_iv300_2 = plt300.plot(symbol='o', pen=pg.mkPen('g', width=
1), symbolSize=12, symbolBrush=(0, 255, 0))
self.plt_iv300_3 = plt300.plot(symbol='o', pen=pg.mkPen('r', width=
1), symbolSize=10, symbolBrush=(0, 170, 0))
self.plt_iv300_4 = plt300.plot(symbol='o', pen=pg.mkPen('g', width=
1), symbolSize=10, symbolBrush=(0, 170, 0))
self.plt_iv300_5 = plt300.plot(symbol='o', pen=pg.mkPen('r', width=
1), symbolSize=8, symbolBrush=(0, 85, 0))
self.plt_iv300_6 = plt300.plot(symbol='o', pen=pg.mkPen('g', width=
1), symbolSize=8, symbolBrush=(0, 85, 0))
self.plt_iv300_7 = plt300.plot(symbol='o', pen=pg.mkPen('r', width=
1), symbolSize=6, symbolBrush=(0, 0, 0))
self.plt_iv300_8 = plt300.plot(symbol='o', pen=pg.mkPen('g', width=
1), symbolSize=6, symbolBrush=(0, 0, 0))
self.gridLayout.addWidget(plt300, 0, 1)
pw_month = pg.PlotWidget(title='MONTH-50-300-MONTH')
pw_month.showGrid(x=False, y=True)
pw_month.addLegend(offset=(30, 100))
self.plt_month50 = pw_month.plot(name='50')
self.plt_month300 = pw_month.plot(name='300')
self.gridLayout.addWidget(pw_month, 0, 2)
pw_qvix = pg.PlotWidget(title='QVIX')
pw_qvix.showGrid(x=True, y=True)
pw_qvix.addLegend()
self.plt_qvix = pw_qvix.plot(pen=pg.mkPen('d', width=4), name='iv')
self.gridLayout.addWidget(pw_qvix, 1, 0)
pw_north = pg.PlotWidget(title='NORTH')
pw_north.showGrid(x=False, y=True)
pw_north.addLegend()
self.plt_north_hgt = pw_north.plot(pen=pg.mkPen('b', width=2), name
='hgt')
self.plt_north_sgt = pw_north.plot(pen=pg.mkPen('g', width=1), name
='sgt')
self.plt_north_all = pw_north.plot(pen=pg.mkPen('d', width=1), name
='all')
self.gridLayout.addWidget(pw_north, 1, 1)
pw_volume = pg.PlotWidget(title='VOLUME')
pw_volume.showGrid(x=False, y=True)
self.plt_volume = pw_volume.plot(name='volume')
self.stock_50 = pw_volume.plot(name='stock_50')
self.gridLayout.addWidget(pw_volume, 1, 2)
def update_qvix(self, df):
df = df.drop(['Pre', 'max', 'min'], axis=1)
self.plt_qvix.setData(df.index.values, df['QVIX'])
def update_north(self, df):
self.plt_north_hgt.setData(df['hgt'].astype(float) / 10000)
self.plt_north_sgt.setData(df['sgt'].astype(float) / 10000)
self.plt_north_all.setData(df['all'].astype(float) / 10000)
def update_volume(self, data, ser):
self.plt_volume.setPen(pg.mkPen('b', width=3))
self.plt_volume.setData(data.values)
self.stock_50.setData(ser)
def update_month(self, data):
data.columns = ['data', '50iv', 'data2', '300iv']
self.plt_month50.setData(data['50iv'])
self.plt_month50.setPen(pg.mkPen('r', width=2))
self.plt_month300.setData(data['300iv'])
self.plt_month300.setPen(pg.mkPen('b', width=1))
def update_iv(self, data50, data300):
data50.sort_index(inplace=True)
data50 = data50.astype(float)
data50[data50 < 1] = np.nan
self.plt_iv50_1.setData(data50.iloc[:, 0])
self.plt_iv50_2.setData(data50.iloc[:, 5])
self.plt_iv50_3.setData(data50.iloc[:, 1])
self.plt_iv50_4.setData(data50.iloc[:, 6])
self.plt_iv50_5.setData(data50.iloc[:, 2])
self.plt_iv50_6.setData(data50.iloc[:, 7])
self.plt_iv50_7.setData(data50.iloc[:, 3])
self.plt_iv50_8.setData(data50.iloc[:, 8])
data300.sort_index(inplace=True)
data300 = data300.astype(float)
data300[data300 < 1] = np.nan
self.plt_iv300_1.setData(data300.iloc[:, 0])
self.plt_iv300_2.setData(data300.iloc[:, 5])
self.plt_iv300_3.setData(data300.iloc[:, 1])
self.plt_iv300_4.setData(data300.iloc[:, 6])
self.plt_iv300_5.setData(data300.iloc[:, 2])
self.plt_iv300_6.setData(data300.iloc[:, 7])
self.plt_iv300_7.setData(data300.iloc[:, 3])
self.plt_iv300_8.setData(data300.iloc[:, 8])
def update_greek(self, gk):
text = 'DELTA:{}GAMMA:{}VEGA:{}THETA:{}'.format(gk[0], gk[1], gk[2],
gk[3])
self.label_greek.setText(text)
<mask token>
| from PyQt5.QtWidgets import QPushButton,QWidget,QApplication,QGridLayout,QListWidget,QLineEdit,QVBoxLayout,QLabel
import pyqtgraph as pg
import sys
import numpy as np
from tools import DataModel,HoldPositions
from load_sina import LoadNet
import time
from get_day_histroy import history
import pandas as pd
from volume import Volume
from PyQt5.QtCore import QThread, pyqtSignal, QDateTime
class addItemThread(QThread):
update_qvix = pyqtSignal(pd.DataFrame)
update_north = pyqtSignal(pd.DataFrame)
update_vol = pyqtSignal(pd.Series,pd.Series)
update_month = pyqtSignal(pd.DataFrame)
update_iv =pyqtSignal(pd.DataFrame,pd.DataFrame)
update_greek = pyqtSignal(list)
def __init__(self,*args, **kwargs):
super(addItemThread, self).__init__(*args, **kwargs)
self.data_model =DataModel()
self.num = 0
def run(self, *args, **kwargs):
while True:
df =LoadNet().get_QVIX()
self.update_qvix.emit(df)
df_north =LoadNet().get_north()
self.update_north.emit(df_north)
df_vol ,cha= Volume().update()
data ,last = LoadNet().get_50_163()
ser = (data['current']-last)/last
self.update_vol.emit(df_vol,ser)
if not self.data_model.df_op.empty:
df_month = self.data_model.iv_month_50300()
self.update_month.emit(df_month)
df_iv50,df_iv300 = self.data_model.get_iv()
self.update_iv.emit(df_iv50,df_iv300)
hp = HoldPositions()
greek = hp.update(self.data_model.df_op)
self.update_greek.emit(greek)
time.sleep(3)
class Example(QWidget):
def __init__(self):
super(Example, self).__init__()
mthread = addItemThread()
mthread.update_qvix.connect(self.update_qvix)
mthread.update_north.connect(self.update_north)
mthread.update_vol.connect(self.update_volume)
mthread.update_month.connect(self.update_month)
mthread.update_iv.connect(self.update_iv)
mthread.update_greek.connect(self.update_greek)
mthread.start()
self.initUI()
def initUI(self):
self.setGeometry(400,400,1200,620)
self.setWindowTitle("不被仓位左右思想,没找到弱点不要重仓")
self.gridLayout = QGridLayout(self)
self.plot()
'''
buttom
'''
self.label_greek = QLabel('label_greek')
self.label_greek.setStyleSheet("background-color:rgb(250,250,250)")
self.gridLayout.addWidget(self.label_greek, 2, 0,1,3)
'''
right
'''
# wight_r = QWidget(self)
# layout_r = QVBoxLayout()
# wight_r.setLayout(layout_r)
# btn_calculated = QPushButton('计算收益')
# layout_r.addWidget(btn_calculated)
# self.gridLayout.addWidget(wight_r, 0, 3,2,1)
def plot(self):
pg.setConfigOption('background', 'w')
pg.setConfigOption('foreground', 'k')
pw_iv50 = pg.PlotWidget(title='50-IV')
self.plt_iv50_1 = pw_iv50.plot(symbol="o",pen=pg.mkPen("r",width=1),symbolSize=12,symbolBrush=(0,255,0))
self.plt_iv50_2 = pw_iv50.plot(symbol="o",pen=pg.mkPen("g",width=1),symbolSize=12,symbolBrush=(0,255,0))
self.plt_iv50_3 = pw_iv50.plot(symbol="o",pen=pg.mkPen("r",width=1),symbolSize=10,symbolBrush=(0,170,0))
self.plt_iv50_4 = pw_iv50.plot(symbol="o",pen=pg.mkPen("g",width=1),symbolSize=10,symbolBrush=(0,170,0))
self.plt_iv50_5 = pw_iv50.plot(symbol="o",pen=pg.mkPen("r",width=1),symbolSize=8,symbolBrush=(0,85,0))
self.plt_iv50_6 = pw_iv50.plot(symbol="o",pen=pg.mkPen("g",width=1),symbolSize=8,symbolBrush=(0,85,0))
self.plt_iv50_7 = pw_iv50.plot(symbol="o",pen=pg.mkPen("r",width=1),symbolSize=6,symbolBrush=(0,0,0))
self.plt_iv50_8 = pw_iv50.plot(symbol="o",pen=pg.mkPen("g",width=1),symbolSize=6,symbolBrush=(0,0,0))
self.gridLayout.addWidget(pw_iv50, 0, 0)
plt300 = pg.PlotWidget(title='300-IV')
self.plt_iv300_1 = plt300.plot(symbol="o",pen=pg.mkPen("r",width=1),symbolSize=12,symbolBrush=(0,255,0))
self.plt_iv300_2 = plt300.plot(symbol="o",pen=pg.mkPen("g",width=1),symbolSize=12,symbolBrush=(0,255,0))
self.plt_iv300_3 = plt300.plot(symbol="o",pen=pg.mkPen("r",width=1),symbolSize=10,symbolBrush=(0,170,0))
self.plt_iv300_4 = plt300.plot(symbol="o",pen=pg.mkPen("g",width=1),symbolSize=10,symbolBrush=(0,170,0))
self.plt_iv300_5 = plt300.plot(symbol="o",pen=pg.mkPen("r",width=1),symbolSize=8,symbolBrush=(0,85,0))
self.plt_iv300_6 = plt300.plot(symbol="o",pen=pg.mkPen("g",width=1),symbolSize=8,symbolBrush=(0,85,0))
self.plt_iv300_7 = plt300.plot(symbol="o",pen=pg.mkPen("r",width=1),symbolSize=6,symbolBrush=(0,0,0))
self.plt_iv300_8 = plt300.plot(symbol="o",pen=pg.mkPen("g",width=1),symbolSize=6,symbolBrush=(0,0,0))
self.gridLayout.addWidget(plt300, 0, 1)
pw_month = pg.PlotWidget(title='MONTH-50-300-MONTH')
pw_month.showGrid(x=False,y=True)
pw_month.addLegend(offset=(30, 100))
self.plt_month50 = pw_month.plot(name="50")
self.plt_month300 = pw_month.plot(name="300")
self.gridLayout.addWidget(pw_month, 0, 2)
pw_qvix = pg.PlotWidget( title='QVIX')
pw_qvix.showGrid(x=True,y=True)
pw_qvix.addLegend()
self.plt_qvix = pw_qvix.plot(pen=pg.mkPen("d",width=4),name="iv")
self.gridLayout.addWidget(pw_qvix, 1, 0)
pw_north = pg.PlotWidget( title='NORTH')
pw_north.showGrid(x=False,y=True)
pw_north.addLegend()
self.plt_north_hgt =pw_north.plot(pen=pg.mkPen("b",width=2),name="hgt")
self.plt_north_sgt =pw_north.plot(pen=pg.mkPen("g",width=1),name="sgt")
self.plt_north_all =pw_north.plot(pen=pg.mkPen("d",width=1),name="all")
self.gridLayout.addWidget(pw_north, 1, 1)
pw_volume = pg.PlotWidget( title='VOLUME')
pw_volume.showGrid(x=False,y=True)
self.plt_volume =pw_volume.plot(name="volume")
self.stock_50 =pw_volume.plot(name="stock_50")
self.gridLayout.addWidget(pw_volume, 1, 2)
def update_qvix(self,df):
df = df.drop(['Pre','max','min'],axis=1)
self.plt_qvix.setData(df.index.values, df['QVIX'])
def update_north(self,df):
self.plt_north_hgt.setData( df['hgt'].astype(float)/10000)
self.plt_north_sgt.setData( df['sgt'].astype(float)/10000)
self.plt_north_all.setData(df['all'].astype(float)/10000)
def update_volume(self,data,ser):
self.plt_volume.setPen(pg.mkPen("b",width=3))
self.plt_volume.setData(data.values)
self.stock_50.setData(ser)
def update_month(self,data):
data.columns=['data','50iv','data2','300iv']
self.plt_month50.setData(data['50iv'])
self.plt_month50.setPen(pg.mkPen("r",width=2))
self.plt_month300.setData(data['300iv'])
self.plt_month300.setPen(pg.mkPen("b",width=1))
def update_iv(self,data50,data300):
data50.sort_index(inplace=True)
data50 = data50.astype(float)
data50[data50<1]=np.nan
self.plt_iv50_1.setData(data50.iloc[:,0])
self.plt_iv50_2.setData(data50.iloc[:,5])
self.plt_iv50_3.setData(data50.iloc[:,1])
self.plt_iv50_4.setData(data50.iloc[:,6])
self.plt_iv50_5.setData(data50.iloc[:,2])
self.plt_iv50_6.setData(data50.iloc[:,7])
self.plt_iv50_7.setData(data50.iloc[:,3])
self.plt_iv50_8.setData(data50.iloc[:,8])
data300.sort_index(inplace=True)
data300 = data300.astype(float)
data300[data300<1]=np.nan
self.plt_iv300_1.setData(data300.iloc[:,0])
self.plt_iv300_2.setData(data300.iloc[:,5])
self.plt_iv300_3.setData(data300.iloc[:,1])
self.plt_iv300_4.setData(data300.iloc[:,6])
self.plt_iv300_5.setData(data300.iloc[:,2])
self.plt_iv300_6.setData(data300.iloc[:,7])
self.plt_iv300_7.setData(data300.iloc[:,3])
self.plt_iv300_8.setData(data300.iloc[:,8])
def update_greek(self,gk):
text = 'DELTA:{}GAMMA:{}VEGA:{}THETA:{}'.format(gk[0],gk[1],gk[2],gk[3])
self.label_greek.setText(text)
if __name__ == '__main__':
app = QApplication(sys.argv)
ex = Example()
ex.show()
sys.exit(app.exec_()) | [
11,
12,
13,
14,
17
] |
1,304 | c3de6cd76ca7180a1a4d236bb2a6a18f7594f38b | <mask token>
| <mask token>
for i in range(3):
numList[i] = int(sys.stdin.readline())
<mask token>
for i in intList:
print(resultList.count(str(i)))
| <mask token>
numList = list(range(3))
for i in range(3):
numList[i] = int(sys.stdin.readline())
result = numList[0] * numList[1] * numList[2]
resultList = list(str(result))
intList = list(range(10))
for i in intList:
print(resultList.count(str(i)))
| import sys
numList = list(range(3))
for i in range(3):
numList[i] = int(sys.stdin.readline())
result = numList[0] * numList[1] * numList[2]
resultList = list(str(result))
intList = list(range(10))
for i in intList:
print(resultList.count(str(i)))
| null | [
0,
1,
2,
3
] |
1,305 | 8d8ea6ad7a3ed1a1e6e96ab75260ecf6e8211d32 | <mask token>
| <mask token>
st.merge()
st.detrend(type='demean')
st.remove_response()
st.filter('bandpass', freqmin=F1, freqmax=F2, corners=4)
st.trim(t1, t2)
<mask token>
plt.suptitle(LABEL)
<mask token>
ax.plot(st[0].times(reftime=orig_time), st[0].data * 1000, linewidth=0.2,
color='darkred')
<mask token>
for phase in PHASES:
phase = [phase]
tt = model.get_travel_times(source_depth_in_km=EVT_Z,
distance_in_degree=dist, phase_list=phase)
ax.vlines(tt[0].time, ymin, ymax, color='blue', linewidth=1.2, zorder=3,
linestyle='--', alpha=0.5)
ax.text(tt[0].time * 1.02, ymax, phase[0], fontsize=12,
horizontalalignment='left', verticalalignment='top')
ax.set_xlabel('Time after earthquake (s)')
ax.set_title("""{:}.{:}.{:}.{:}
Bandpass filter: {:}-{:} Hz""".format(st[0]
.stats.network, st[0].stats.station, st[0].stats.location, st[0].stats.
channel, F1, F2))
ax.set_ylabel('Ground velocity (mm/s)')
<mask token>
ax3.set_title('Epicentral distance: {:3.1f}$^\\circ$'.format(dist))
plt.tight_layout(rect=[0, 0.03, 1, 0.95])
plt.savefig('traces.png')
plt.show()
| <mask token>
NETWORK = 'AM'
STATION = 'RAEBE'
CHANNEL = 'EHZ'
EQ_TIME = '2020-01-07T08:24:26'
T_START = 0
T_END = 1250
PHASES = ['P', 'S']
EVT_LAT = 17.916
EVT_LON = -66.813
EVT_Z = 10
STA_LAT = 51.33
STA_LON = -0.49
F1 = 0.3
F2 = 0.7
LABEL = 'M 6.4 Puerto Rico'
MODEL = 'iasp91'
client = Client('http://fdsnws.raspberryshakedata.com')
orig_time = UTCDateTime(EQ_TIME)
t1 = orig_time - T_START
t2 = orig_time + T_END
st = client.get_waveforms(NETWORK, STATION, '00', CHANNEL, starttime=t1,
endtime=t2, attach_response=True)
st.merge()
st.detrend(type='demean')
st.remove_response()
st.filter('bandpass', freqmin=F1, freqmax=F2, corners=4)
st.trim(t1, t2)
fig = plt.figure(figsize=(12, 8))
plt.suptitle(LABEL)
ax = plt.subplot(121)
dist = locations2degrees(EVT_LAT, EVT_LON, STA_LAT, STA_LON)
model = TauPyModel(model=MODEL)
ax.plot(st[0].times(reftime=orig_time), st[0].data * 1000, linewidth=0.2,
color='darkred')
ymin, ymax = ax.get_ylim()
for phase in PHASES:
phase = [phase]
tt = model.get_travel_times(source_depth_in_km=EVT_Z,
distance_in_degree=dist, phase_list=phase)
ax.vlines(tt[0].time, ymin, ymax, color='blue', linewidth=1.2, zorder=3,
linestyle='--', alpha=0.5)
ax.text(tt[0].time * 1.02, ymax, phase[0], fontsize=12,
horizontalalignment='left', verticalalignment='top')
ax.set_xlabel('Time after earthquake (s)')
ax.set_title("""{:}.{:}.{:}.{:}
Bandpass filter: {:}-{:} Hz""".format(st[0]
.stats.network, st[0].stats.station, st[0].stats.location, st[0].stats.
channel, F1, F2))
ax.set_ylabel('Ground velocity (mm/s)')
ax2 = plt.subplot(122, projection='polar')
arrivals = model.get_ray_paths(source_depth_in_km=EVT_Z, distance_in_degree
=dist, phase_list=PHASES)
ax3 = arrivals.plot_rays(phase, legend=False, ax=ax2, show=False,
label_arrivals=True)
ax3.set_title('Epicentral distance: {:3.1f}$^\\circ$'.format(dist))
plt.tight_layout(rect=[0, 0.03, 1, 0.95])
plt.savefig('traces.png')
plt.show()
| <mask token>
from obspy.clients.fdsn import Client
from obspy import UTCDateTime
from obspy.taup import TauPyModel
from obspy.geodetics.base import locations2degrees
import matplotlib.pyplot as plt
NETWORK = 'AM'
STATION = 'RAEBE'
CHANNEL = 'EHZ'
EQ_TIME = '2020-01-07T08:24:26'
T_START = 0
T_END = 1250
PHASES = ['P', 'S']
EVT_LAT = 17.916
EVT_LON = -66.813
EVT_Z = 10
STA_LAT = 51.33
STA_LON = -0.49
F1 = 0.3
F2 = 0.7
LABEL = 'M 6.4 Puerto Rico'
MODEL = 'iasp91'
client = Client('http://fdsnws.raspberryshakedata.com')
orig_time = UTCDateTime(EQ_TIME)
t1 = orig_time - T_START
t2 = orig_time + T_END
st = client.get_waveforms(NETWORK, STATION, '00', CHANNEL, starttime=t1,
endtime=t2, attach_response=True)
st.merge()
st.detrend(type='demean')
st.remove_response()
st.filter('bandpass', freqmin=F1, freqmax=F2, corners=4)
st.trim(t1, t2)
fig = plt.figure(figsize=(12, 8))
plt.suptitle(LABEL)
ax = plt.subplot(121)
dist = locations2degrees(EVT_LAT, EVT_LON, STA_LAT, STA_LON)
model = TauPyModel(model=MODEL)
ax.plot(st[0].times(reftime=orig_time), st[0].data * 1000, linewidth=0.2,
color='darkred')
ymin, ymax = ax.get_ylim()
for phase in PHASES:
phase = [phase]
tt = model.get_travel_times(source_depth_in_km=EVT_Z,
distance_in_degree=dist, phase_list=phase)
ax.vlines(tt[0].time, ymin, ymax, color='blue', linewidth=1.2, zorder=3,
linestyle='--', alpha=0.5)
ax.text(tt[0].time * 1.02, ymax, phase[0], fontsize=12,
horizontalalignment='left', verticalalignment='top')
ax.set_xlabel('Time after earthquake (s)')
ax.set_title("""{:}.{:}.{:}.{:}
Bandpass filter: {:}-{:} Hz""".format(st[0]
.stats.network, st[0].stats.station, st[0].stats.location, st[0].stats.
channel, F1, F2))
ax.set_ylabel('Ground velocity (mm/s)')
ax2 = plt.subplot(122, projection='polar')
arrivals = model.get_ray_paths(source_depth_in_km=EVT_Z, distance_in_degree
=dist, phase_list=PHASES)
ax3 = arrivals.plot_rays(phase, legend=False, ax=ax2, show=False,
label_arrivals=True)
ax3.set_title('Epicentral distance: {:3.1f}$^\\circ$'.format(dist))
plt.tight_layout(rect=[0, 0.03, 1, 0.95])
plt.savefig('traces.png')
plt.show()
| #!/usr/bin/env python
"""
Script to download and plot RaspberryShake station data
Also computes and plots theoretical phase arrival times and raypaths.
See https://docs.obspy.org/packages/obspy.taup.html for more info on
Earth models and phase-nmaing nomenclature.
Stephen Hicks
Imperial College London
Feb 2020
"""
from obspy.clients.fdsn import Client
from obspy import UTCDateTime
from obspy.taup import TauPyModel
from obspy.geodetics.base import locations2degrees
import matplotlib.pyplot as plt
# Start of parameters to define
NETWORK = "AM" # AM = RaspberryShake network
STATION = "RAEBE" # Station code of station to get data for
CHANNEL = "EHZ" # channel to grab data for (e.g. EHZ, SHZ, EHE, EHN)
EQ_TIME = "2020-01-07T08:24:26" # origin time of earthquake
T_START = 0 # Length in seconds of data to plot before origin time
T_END = 1250 # Length in seconds of data to plot after origin time
PHASES = ["P", "S"] # list of phases to compute theoretical times for
EVT_LAT = 17.916 # Latitude of event
EVT_LON = -66.813 # Longitude of event
EVT_Z = 10 # Depth of event
STA_LAT = 51.33 # Latitude of station
STA_LON = -0.49 # Longitude of station
F1 = 0.3 # High-pass filter corner
F2 = 0.7 # Low-pass filter corner
LABEL = "M 6.4 Puerto Rico" # Title to plot on figure
MODEL = 'iasp91' # Velocity model to predict travel-times through
# End of parameters to define
# Define fdsn client to get data from
client = Client('http://fdsnws.raspberryshakedata.com')
# Define start and end time
orig_time = UTCDateTime(EQ_TIME)
t1 = orig_time - T_START
t2 = orig_time + T_END
# Download and filfter data
st = client.get_waveforms(NETWORK, STATION, "00", CHANNEL,
starttime=t1, endtime=t2, attach_response=True)
st.merge()
st.detrend(type="demean")
st.remove_response()
st.filter("bandpass", freqmin=F1, freqmax=F2, corners=4)
st.trim(t1, t2)
# Set-up figure
fig = plt.figure(figsize=(12, 8))
plt.suptitle(LABEL)
ax = plt.subplot(121)
# Set-up taup travel-time model
dist = locations2degrees(EVT_LAT, EVT_LON, STA_LAT, STA_LON)
model = TauPyModel(model=MODEL)
# Now plot the waveform data
ax.plot(st[0].times(reftime=orig_time), st[0].data*1000, linewidth=0.2,
color="darkred")
ymin, ymax = ax.get_ylim()
# Now plot the theoretical arrival times
for phase in PHASES:
phase = [phase]
tt = model.get_travel_times(source_depth_in_km=EVT_Z,
distance_in_degree=dist,
phase_list=phase)
ax.vlines(tt[0].time, ymin, ymax, color="blue",
linewidth=1.2, zorder=3, linestyle="--", alpha=0.5)
ax.text(tt[0].time*1.02, ymax, phase[0], fontsize=12,
horizontalalignment="left", verticalalignment="top")
ax.set_xlabel("Time after earthquake (s)")
ax.set_title("{:}.{:}.{:}.{:}\nBandpass filter: {:}-{:} Hz".format(
st[0].stats.network, st[0].stats.station, st[0].stats.location,
st[0].stats.channel, F1, F2))
ax.set_ylabel("Ground velocity (mm/s)")
# Now plot the raypaths through the Earth
ax2 = plt.subplot(122, projection='polar')
arrivals = model.get_ray_paths(
source_depth_in_km=EVT_Z, distance_in_degree=dist,
phase_list=PHASES)
ax3 = arrivals.plot_rays(phase, legend=False, ax=ax2, show=False,
label_arrivals=True)
ax3.set_title("Epicentral distance: {:3.1f}$^\circ$".format(dist))
# Save and plot the figure
plt.tight_layout(rect=[0, 0.03, 1, 0.95])
plt.savefig("traces.png")
plt.show()
| [
0,
1,
2,
3,
4
] |
1,306 | 85c2a4163a3132794186b95b4068f6c6e1104828 | <mask token>
| <mask token>
class Migration(migrations.Migration):
<mask token>
<mask token>
| <mask token>
class Migration(migrations.Migration):
dependencies = [('cms', '0020_old_tree_cleanup'), ('styleguide',
'0002_flexcontainer')]
operations = [migrations.CreateModel(name='ContentSection', fields=[(
'cmsplugin_ptr', models.OneToOneField(auto_created=True, on_delete=
django.db.models.deletion.CASCADE, parent_link=True, primary_key=
True, related_name='styleguide_contentsection', serialize=False, to
='cms.CMSPlugin')), ('background_color', models.CharField(choices=[
('navy', '#1c2532'), ('light', '#f3f4f5'), ('white', '#ffffff')],
default='white', max_length=20))], options={'abstract': False},
bases=('cms.cmsplugin',)), migrations.AlterField(model_name=
'flexcontainer', name='spacing', field=models.CharField(choices=[(
'flex-start', 'flex-start'), ('flex-end', 'flex-end'), ('center',
'center'), ('space-between', 'space-between'), ('space-around',
'space-around')], default='flex-start', max_length=13))]
| from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [('cms', '0020_old_tree_cleanup'), ('styleguide',
'0002_flexcontainer')]
operations = [migrations.CreateModel(name='ContentSection', fields=[(
'cmsplugin_ptr', models.OneToOneField(auto_created=True, on_delete=
django.db.models.deletion.CASCADE, parent_link=True, primary_key=
True, related_name='styleguide_contentsection', serialize=False, to
='cms.CMSPlugin')), ('background_color', models.CharField(choices=[
('navy', '#1c2532'), ('light', '#f3f4f5'), ('white', '#ffffff')],
default='white', max_length=20))], options={'abstract': False},
bases=('cms.cmsplugin',)), migrations.AlterField(model_name=
'flexcontainer', name='spacing', field=models.CharField(choices=[(
'flex-start', 'flex-start'), ('flex-end', 'flex-end'), ('center',
'center'), ('space-between', 'space-between'), ('space-around',
'space-around')], default='flex-start', max_length=13))]
| # -*- coding: utf-8 -*-
# Generated by Django 1.11.13 on 2018-06-27 21:49
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('cms', '0020_old_tree_cleanup'),
('styleguide', '0002_flexcontainer'),
]
operations = [
migrations.CreateModel(
name='ContentSection',
fields=[
('cmsplugin_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, related_name='styleguide_contentsection', serialize=False, to='cms.CMSPlugin')),
('background_color', models.CharField(choices=[('navy', '#1c2532'), ('light', '#f3f4f5'), ('white', '#ffffff')], default='white', max_length=20)),
],
options={
'abstract': False,
},
bases=('cms.cmsplugin',),
),
migrations.AlterField(
model_name='flexcontainer',
name='spacing',
field=models.CharField(choices=[('flex-start', 'flex-start'), ('flex-end', 'flex-end'), ('center', 'center'), ('space-between', 'space-between'), ('space-around', 'space-around')], default='flex-start', max_length=13),
),
]
| [
0,
1,
2,
3,
4
] |
1,307 | 38f9cddfde4787ead2314fc70c1f4d91a3da9687 | <mask token>
class TemplateParser:
<mask token>
<mask token>
def __init__(self, template=None, providers=None, date_generator=None):
self.fake = Faker()
self.fake.add_provider(FileDataSourceProvider)
self.fake.add_provider(NumbersProvider)
self.fake.add_provider(InternetProvider)
self.template = template
self.providers = {} if providers is None else providers
self.date_generator = (TemplateParser.null_date_generator if
date_generator is None else date_generator)
<mask token>
def process(self, date_generator=None, **kwargs):
"""Procces template, parsing it"""
template = Template(self.template)
if date_generator is None:
date_generator = self.date_generator
return template.render(fake=self.fake, datetime=datetime,
date_generator=date_generator, next=next, **self.providers, **
kwargs)
| <mask token>
class TemplateParser:
<mask token>
<mask token>
def __init__(self, template=None, providers=None, date_generator=None):
self.fake = Faker()
self.fake.add_provider(FileDataSourceProvider)
self.fake.add_provider(NumbersProvider)
self.fake.add_provider(InternetProvider)
self.template = template
self.providers = {} if providers is None else providers
self.date_generator = (TemplateParser.null_date_generator if
date_generator is None else date_generator)
@staticmethod
def null_date_generator():
"""Generate now date"""
return str(datetime.now())
def process(self, date_generator=None, **kwargs):
"""Procces template, parsing it"""
template = Template(self.template)
if date_generator is None:
date_generator = self.date_generator
return template.render(fake=self.fake, datetime=datetime,
date_generator=date_generator, next=next, **self.providers, **
kwargs)
| <mask token>
class TemplateParser:
"""Parser for templates, using jinja2 and Faker"""
fake = None
def __init__(self, template=None, providers=None, date_generator=None):
self.fake = Faker()
self.fake.add_provider(FileDataSourceProvider)
self.fake.add_provider(NumbersProvider)
self.fake.add_provider(InternetProvider)
self.template = template
self.providers = {} if providers is None else providers
self.date_generator = (TemplateParser.null_date_generator if
date_generator is None else date_generator)
@staticmethod
def null_date_generator():
"""Generate now date"""
return str(datetime.now())
def process(self, date_generator=None, **kwargs):
"""Procces template, parsing it"""
template = Template(self.template)
if date_generator is None:
date_generator = self.date_generator
return template.render(fake=self.fake, datetime=datetime,
date_generator=date_generator, next=next, **self.providers, **
kwargs)
| <mask token>
from datetime import datetime
from jinja2 import Template
from faker import Faker
from faker.providers.internet import Provider as InternetProvider
from ..providers.file_data_source_provider import FileDataSourceProvider
from ..providers.numbers_provider import NumbersProvider
class TemplateParser:
"""Parser for templates, using jinja2 and Faker"""
fake = None
def __init__(self, template=None, providers=None, date_generator=None):
self.fake = Faker()
self.fake.add_provider(FileDataSourceProvider)
self.fake.add_provider(NumbersProvider)
self.fake.add_provider(InternetProvider)
self.template = template
self.providers = {} if providers is None else providers
self.date_generator = (TemplateParser.null_date_generator if
date_generator is None else date_generator)
@staticmethod
def null_date_generator():
"""Generate now date"""
return str(datetime.now())
def process(self, date_generator=None, **kwargs):
"""Procces template, parsing it"""
template = Template(self.template)
if date_generator is None:
date_generator = self.date_generator
return template.render(fake=self.fake, datetime=datetime,
date_generator=date_generator, next=next, **self.providers, **
kwargs)
| # -*- coding: utf-8 -*-
"""Template parser for Faker"""
from datetime import datetime
from jinja2 import Template
from faker import Faker
from faker.providers.internet import Provider as InternetProvider
from ..providers.file_data_source_provider import FileDataSourceProvider
from ..providers.numbers_provider import NumbersProvider
class TemplateParser:
"""Parser for templates, using jinja2 and Faker"""
fake = None
def __init__(self, template=None, providers=None, date_generator=None):
self.fake = Faker()
self.fake.add_provider(FileDataSourceProvider)
self.fake.add_provider(NumbersProvider)
# Ips networks emails etc..
self.fake.add_provider(InternetProvider)
self.template = template
self.providers = {} if providers is None else providers
self.date_generator = TemplateParser.null_date_generator \
if date_generator is None else date_generator
@staticmethod
def null_date_generator():
"""Generate now date"""
return str(datetime.now())
def process(self, date_generator=None, **kwargs):
"""Procces template, parsing it"""
template = Template(self.template)
if date_generator is None:
date_generator = self.date_generator
# Only the passed objects will be accessible from the template
# the next built-in needs to be passed for next(date_generator) to work
return template.render(fake=self.fake, datetime=datetime,
date_generator=date_generator,
next=next, **self.providers, **kwargs)
| [
3,
4,
6,
7,
8
] |
1,308 | 13da16ba89e4743b12d9b8e24929864747f8bbf2 | <mask token>
| <mask token>
class ModD(Soppa):
<mask token>
<mask token>
| <mask token>
class ModD(Soppa):
needs = ['test_project.modf']
something = 1
| from soppa.contrib import *
class ModD(Soppa):
needs = ['test_project.modf']
something = 1
| null | [
0,
1,
2,
3
] |
1,309 | 3eb071fa826c838d847e3f97abe3b706760a1336 | '''
Faraday Penetration Test IDE
Copyright (C) 2013 Infobyte LLC (http://www.infobytesec.com/)
See the file 'doc/LICENSE' for the license information
'''
"""
This module contains some useful functions to embedd an IPython shell.
This allows to interactively test things.
TODO: create a QT Widget capable of running the IPython shell whitout
blocking the entire app. Kind of the http://ipython.scipy.org/moin/Cookbook/EmbeddingInGTK
"""
import traceback
import model.api
IPYTHON_BANNER = "\n".join(["-"*45,
"Starting embedded IPython Shell...",
"Press CTRL + D to exit.",
"-"*45])
IPYTHON_EXIT_MSG = "\n".join(["-"*45,
"Exiting IPython Shell...",
"Returning normal execution.",
"-"*45])
__ipython_active = False
def embedd_ipython011(local_ns={}, global_ns={}):
from IPython.config.loader import Config
from IPython.frontend.terminal.embed import InteractiveShellEmbed
cfg = Config()
ipshell = InteractiveShellEmbed(config=cfg,
banner1 = IPYTHON_BANNER,
exit_msg = IPYTHON_EXIT_MSG)
ipshell(local_ns=local_ns, global_ns=global_ns)
def embedd_ipython010(local_ns={}, global_ns={}):
from IPython.Shell import IPShellEmbed
ipshell = IPShellEmbed( [""],
banner = IPYTHON_BANNER,
exit_msg = IPYTHON_EXIT_MSG
)
ipshell(local_ns=local_ns, global_ns=global_ns)
def embedd(local_ns={}, global_ns={}):
global __ipython_active
if __ipython_active:
return
__ipython_active = True
try:
import IPython
version = IPython.__version__.split(".")[1]
if int(version) > 10:
embedd_ipython011(local_ns, global_ns)
else:
embedd_ipython010(local_ns, global_ns)
except Exception, e:
msg = "An error ocurred while trying to embedd the IPython Shell\n%s"
model.api.log(msg % str(e), "ERROR")
model.api.devlog(msg % traceback.format_exc())
finally:
__ipython_active = False
def embeddQT(local_ns={}, global_ns={}):
global __ipython_active
if __ipython_active:
return
__ipython_active = True
try:
from IPython.Shell import IPShellQt
ipshell = IPShellQt( [""],
user_ns=local_ns,
user_global_ns=global_ns
)
ipshell.run()
except Exception:
model.api.devlog("An error ocurred while trying to embedd the IPython Shell\n%s" % traceback.format_exc())
finally:
__ipython_active = False
| null | null | null | null | [
0
] |
1,310 | 723d8819b5341f1397163533f59c17ba1a74b77d | """
get poly data(face center, face id, etc), select face, create object by face data
setPosition for vertex (random)
import sys
module_path = '/home/shrimo/Desktop/course/git/vfx_dev/maya/general_lesson'
if module_path not in sys.path:
sys.path.append(module_path)
import lesson_v01
reload(lesson_v01)
lesson_v01.start()
"""
import maya.cmds as cmds
import maya.api.OpenMaya as om2
import random
class Face():
def __init__(self, shape, face_index, vertex, center):
self.face_path = '{shape}.f[{index}]'.format(
shape=shape,
index=face_index)
self.vertex = vertex
self.face_index = face_index
self.face_center = center
def get_shapes():
# get selected object
# print(cmds.ls())
# print(cmds.ls(selection=True))
return cmds.ls(selection=True, shapes=True, dagObjects=True)
def get_faces(shapes):
# cmds.select(clear=True)
# print(shapes)
face_data = []
for shape in shapes:
mSel = om2.MSelectionList()
mSel.add(shape)
mDagPath, mObj = mSel.getComponent(0)
geo = om2.MItMeshPolygon(mDagPath, mObj)
while not geo.isDone():
center = geo.center()
print 'face index: {}'.format(geo.index())
vertices = []
for i in geo.getPoints(om2.MSpace.kWorld):
vertices.append((i[0], i[1], i[2]))
face_in = Face(shape, geo.index(), vertices, center)
face_data.append(face_in)
geo.next(0)
return face_data
def get_vertex(shapes):
vertex_data = []
spc = om2.MSpace.kWorld
for shape in shapes:
mSel = om2.MSelectionList()
mSel.add(shape)
mDagPath, mObj = mSel.getComponent(0)
vtx = om2.MItMeshVertex(mDagPath, mObj)
while not vtx.isDone():
vtx_pos = vtx.position(spc)
print 'vertex index: {}'.format(vtx.index()), vtx_pos
face_in = Face(shape, vtx.index(), vtx_pos, None)
vertex_data.append(face_in)
vtx.next()
return vertex_data
def set_pos_vertex(shapes, up_y):
spc = om2.MSpace.kWorld
for shape in shapes:
mSel = om2.MSelectionList()
mSel.add(shape)
mDagPath, mObj = mSel.getComponent(0)
vtx = om2.MItMeshVertex(mDagPath, mObj)
while not vtx.isDone():
vtx_pos = vtx.position(spc)
print 'vertex:'+str(vtx.index()), vtx_pos.y
if vtx.index() & 1:
vtx_pos.y += up_y
vtx.setPosition(vtx_pos, spc)
vtx.next()
vtx.updateSurface()
def set_random_vertex(shapes, up_y):
spc = om2.MSpace.kWorld
for shape in shapes:
mSel = om2.MSelectionList()
mSel.add(shape)
mDagPath, mObj = mSel.getComponent(0)
vtx = om2.MItMeshVertex(mDagPath, mObj)
while not vtx.isDone():
vtx_pos = vtx.position(spc)
print 'vertex:'+str(vtx.index()), vtx_pos.y
vtx_pos.z += random.uniform(0, up_y)
vtx.setPosition(vtx_pos, spc)
vtx.next()
vtx.updateSurface()
def create_boxes(shapes, group_name, shape_name, on_face):
if on_face:
face_data = get_faces(shapes)
else:
face_data = get_vertex(shapes)
cmds.group(em=True, name=group_name)
for face in face_data:
# print(face.face_index, face.face_path, face.face_center)
if face.face_index & 1:
cmds.select(face.face_path, add=True)
p_name = shape_name + str(face.face_index)
cmds.polyCube(n=p_name) # create polyCube name by p_ + face index
cmds.setAttr(p_name+'.scale', 0.3, 0.3, 0.3)
if on_face:
cmds.setAttr(
p_name+'.translate', face.face_center[0], face.face_center[1], face.face_center[2])
else:
cmds.setAttr(p_name+'.translate', face.vertex.x,
face.vertex.y, face.vertex.z)
cmds.select(all=True)
cmds.parent(p_name, group_name)
# cmds.group(p_name, parent=group_name)
cmds.select(all=True)
def start():
# shapes = cmds.ls(selection=True, shapes=True, dagObjects=True)
# set_pos_vertex(get_shapes(), 1)
# set_random_vertex(get_shapes(), 1)
create_boxes(get_shapes(), 'boxes', 'v_', 0)
| null | null | null | null | [
0
] |
1,311 | 325efe65030ad3488a7fc45c0d4a289eb0b17196 | <mask token>
class StepUtilTest(wf_testcase.WaterfallTestCase):
def testGetLowerBoundBuildNumber(self):
self.assertEqual(5, step_util._GetLowerBoundBuildNumber(5, 100))
self.assertEqual(50, step_util._GetLowerBoundBuildNumber(None, 100,
200))
self.assertEqual(100, step_util._GetLowerBoundBuildNumber(None, 600,
500))
def testGetBoundingIsolatedTargets(self):
lower_bound_commit_position = 1000
upper_bound_commit_position = 1010
requested_commit_position = 1005
build_id = 10000
target_name = 'browser_tests'
master_name = 'm'
builder_name = 'b'
luci_name = 'chromium'
bucket_name = 'ci'
gitiles_host = 'chromium.googlesource.com'
gitiles_project = 'chromium/src'
gitiles_ref = 'refs/heads/master'
gerrit_patch = ''
lower_bound_revision = 'r1000'
upper_bound_revision = 'r1010'
lower_bound_target = IsolatedTarget.Create(build_id - 1, luci_name,
bucket_name, master_name, builder_name, gitiles_host,
gitiles_project, gitiles_ref, gerrit_patch, target_name,
'hash_1', lower_bound_commit_position, lower_bound_revision)
lower_bound_target.put()
upper_bound_target = IsolatedTarget.Create(build_id, luci_name,
bucket_name, master_name, builder_name, gitiles_host,
gitiles_project, gitiles_ref, gerrit_patch, target_name,
'hash_2', upper_bound_commit_position, upper_bound_revision)
upper_bound_target.put()
self.assertEqual((lower_bound_target, upper_bound_target),
step_util.GetBoundingIsolatedTargets(master_name, builder_name,
target_name, requested_commit_position))
<mask token>
@mock.patch.object(build_util, 'GetBuildInfo')
def testGetValidBuildSearchAscendingOutOfRange(self, mocked_get_build_info
):
master_name = 'm'
builder_name = 'b'
step_name = 's'
invalid_build_100 = BuildInfo(master_name, builder_name, 100)
invalid_build_101 = BuildInfo(master_name, builder_name, 101)
valid_build_102 = BuildInfo(master_name, builder_name, 102)
valid_build_102.commit_position = 1020
mocked_get_build_info.side_effect = [invalid_build_100,
invalid_build_101, valid_build_102]
self.assertIsNone(step_util.GetValidBuild(master_name, builder_name,
100, step_name, True, 1))
@mock.patch.object(build_util, 'GetBuildInfo')
def testGetValidBuildSearchDescending(self, mocked_get_build_info):
master_name = 'm'
builder_name = 'b'
step_name = 's'
invalid_build_100 = BuildInfo(master_name, builder_name, 100)
invalid_build_99 = BuildInfo(master_name, builder_name, 99)
valid_build_98 = BuildInfo(master_name, builder_name, 98)
valid_build_98.commit_position = 980
mocked_get_build_info.side_effect = [invalid_build_100,
invalid_build_99, valid_build_98]
self.assertEqual(valid_build_98, step_util.GetValidBuild(
master_name, builder_name, 100, step_name, True, 2))
<mask token>
@mock.patch.object(swarming, 'CanFindSwarmingTaskFromBuildForAStep',
return_value=True)
@mock.patch.object(build_util, 'GetBuildInfo', _MockedGetBuildInfo)
def testGetValidBoundingBuildsForStepCommitBeforeEarliestBuild(self, *_):
lower_bound_build_number = 3
lower_bound, upper_bound = step_util.GetValidBoundingBuildsForStep('m',
'b', 's', lower_bound_build_number, 100, 10)
self.assertIsNone(lower_bound)
self.assertEqual(lower_bound_build_number, upper_bound.build_number)
<mask token>
<mask token>
@mock.patch.object(swarming, 'CanFindSwarmingTaskFromBuildForAStep',
return_value=False)
@mock.patch.object(build_util, 'GetBuildInfo', _MockedGetBuildInfo)
def testGetValidBoundingBuildsForStepCommitAfterLatestBuildInvalid(self, *_
):
upper_bound_build_number = 5
lower_bound, upper_bound = step_util.GetValidBoundingBuildsForStep('m',
'b', 's', None, upper_bound_build_number, 10000)
self.assertIsNone(lower_bound)
self.assertIsNone(upper_bound)
@mock.patch.object(swarming, 'CanFindSwarmingTaskFromBuildForAStep',
return_value=True)
@mock.patch.object(build_util, 'GetBuildInfo', _MockedGetBuildInfo)
def testGetValidBoundingBuildsForStepCommitRightAtUpperBound(self, *_):
upper_bound_build_number = 4
lower_bound, upper_bound = step_util.GetValidBoundingBuildsForStep('m',
'b', 's', None, upper_bound_build_number, 50)
self.assertEqual(50, lower_bound.commit_position)
self.assertEqual(50, upper_bound.commit_position)
<mask token>
def testIsStepSupportedByFinditObjectNone(self):
self.assertFalse(step_util.IsStepSupportedByFindit(None, 'step', 'm'))
<mask token>
def testIsStepSupportedByFinditOtherIsolatedScriptTest(self):
self.assertFalse(step_util.IsStepSupportedByFindit(
WebkitLayoutTestResults(None), 'telemetry_perf_tests', 'm'))
@mock.patch.object(waterfall_config, 'StepIsSupportedForMaster',
return_value=True)
def testIsStepSupportedByFinditWebkitLayoutTests(self, _):
self.assertTrue(step_util.IsStepSupportedByFindit(
WebkitLayoutTestResults(None), 'webkit_layout_tests', 'm'))
<mask token>
@parameterized.expand([({'step_log_return': wf_testcase.
SAMPLE_STEP_METADATA, 'expected_step_metadata': wf_testcase.
SAMPLE_STEP_METADATA},), ({'step_log_return': wf_testcase.
SAMPLE_STEP_METADATA, 'expected_step_metadata': wf_testcase.
SAMPLE_STEP_METADATA},), ({'step_log_return': None,
'expected_step_metadata': None},), ({'step_log_return': None,
'expected_step_metadata': None},)])
@mock.patch.object(step_util, 'GetStepLogForLuciBuild')
def testGetStepMetadata(self, cases, mock_step_log):
mock_step_log.return_value = cases['step_log_return']
step_metadata = step_util.GetStepMetadata(123, 'step')
self.assertEqual(cases['expected_step_metadata'], step_metadata)
@mock.patch.object(step_util, 'GetStepLogForLuciBuild')
def testGetStepMetadataPartialMatch(self, mock_step_log):
step_util.GetStepMetadata(123, 'step', True)
self.assertIn(True, mock_step_log.call_args[0])
step_util.GetStepMetadata(123, 'step', False)
self.assertIn(False, mock_step_log.call_args[0])
<mask token>
<mask token>
<mask token>
<mask token>
@mock.patch.object(step_util, 'GetStepLogForLuciBuild', return_value=
wf_testcase.SAMPLE_STEP_METADATA)
@mock.patch.object(build_util, 'DownloadBuildData')
def testLegacyGetStepMetadataFromLUCIBuild(self, mock_build, _):
build = WfBuild.Create('m', 'b', 123)
build.build_id = '8948240770002521488'
build.put()
mock_build.return_value = build
step_metadata = step_util.GetWaterfallBuildStepLog('m', 'b', 123,
's', None, 'step_metadata')
self.assertEqual(step_metadata, wf_testcase.SAMPLE_STEP_METADATA)
@mock.patch.object(build_util, 'DownloadBuildData', return_value=
MockWaterfallBuild())
@mock.patch.object(logdog_util, '_GetAnnotationsProtoForPath',
return_value='step')
@mock.patch.object(logdog_util, '_GetStreamForStep', return_value='stream')
@mock.patch.object(logdog_util, 'GetStepLogLegacy', return_value=
'log1/nlog2')
def testGetStepLogStdio(self, *_):
self.assertEqual('log1/nlog2', step_util.GetWaterfallBuildStepLog(
'm', 'b', 123, 's', None))
<mask token>
<mask token>
@mock.patch.object(step_util, '_GetStepLogViewUrl', return_value=None)
@mock.patch.object(logdog_util, 'GetLogFromViewUrl')
@mock.patch.object(buildbucket_client, 'GetV2Build')
def testGetStepLogForLuciBuildNoViewUrl(self, mock_get_build,
mock_get_log, _):
build_id = '8945610992972640896'
mock_log = common_pb2.Log()
mock_log.name = 'step_metadata'
mock_log.view_url = 'view_url'
mock_step = Step()
mock_step.name = 's'
mock_step.logs.extend([mock_log])
mock_build = Build()
mock_build.id = int(build_id)
mock_build.steps.extend([mock_step])
mock_get_build.return_value = mock_build
self.assertIsNone(step_util.GetStepLogForLuciBuild(build_id, 's',
None, 'step_metadata'))
self.assertFalse(mock_get_log.called)
<mask token>
@mock.patch.object(buildbucket_client, 'GetV2Build')
@mock.patch.object(step_util, 'GetStepLogFromBuildObject')
def testGetStepLogForLuciBuildPartialMatch(self, mock_log_from_build, _):
step_util.GetStepLogForLuciBuild('87654321', 's', None)
self.assertIn(False, mock_log_from_build.call_args[0])
step_util.GetStepLogForLuciBuild('87654321', 's', None, True)
self.assertIn(True, mock_log_from_build.call_args[0])
@mock.patch.object(step_util, '_GetStepLogViewUrl', return_value=None)
def testGetStepLogFromBuildObjectPartialMatch(self, mock_get_log_url):
step_util.GetStepLogFromBuildObject(Build(), 'full_step_name',
'http_client')
self.assertIn(False, mock_get_log_url.call_args[0])
step_util.GetStepLogFromBuildObject(Build(), 'full_step_name',
'http_client', partial_match=True)
self.assertIn(True, mock_get_log_url.call_args[0])
def testGetStepLogViewUrlNoMatchingLog(self):
build_id = 8945610992972640896
mock_log = common_pb2.Log()
mock_log.name = 'another_log'
mock_log.view_url = 'view_url'
mock_step1 = Step()
mock_step1.name = 's1'
mock_step1.logs.extend([mock_log])
mock_step2 = Step()
mock_step2.name = 's2'
mock_step2.logs.extend([mock_log])
mock_build = Build()
mock_build.id = build_id
mock_build.steps.extend([mock_step1, mock_step2])
self.assertIsNone(step_util._GetStepLogViewUrl(mock_build, 's2', 'log')
)
@parameterized.expand([(True, 'step_name', 'view_url',
'view_url_partial_match'), (False, 'step_name', 'view_url', None)])
def testGetStepLogViewUrlPartialMatching(self, partial_match,
full_step_name, expected_url_in_build1, expected_url_in_build2):
mock_step1 = Step()
mock_step1.name = 'step_name'
mock_log1 = common_pb2.Log()
mock_log1.name = 'log'
mock_log1.view_url = 'view_url'
mock_step1.logs.extend([mock_log1])
mock_step2 = Step()
mock_step2.name = 'step_name_longer'
mock_log2 = common_pb2.Log()
mock_log2.name = 'log'
mock_log2.view_url = 'view_url_partial_match'
mock_step2.logs.extend([mock_log2])
mock_build1 = Build()
mock_build1.steps.extend([mock_step1, mock_step2])
self.assertEqual(expected_url_in_build1, step_util.
_GetStepLogViewUrl(mock_build1, full_step_name, 'log',
partial_match=partial_match))
mock_build2 = Build()
mock_build2.steps.extend([mock_step2])
self.assertEqual(expected_url_in_build2, step_util.
_GetStepLogViewUrl(mock_build2, full_step_name, 'log',
partial_match=partial_match))
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
@mock.patch.object(step_util, 'LegacyGetStepMetadata', return_value=None)
def testLegacyGetIsolateTargetNameStepMetadataIsNone(self, _):
self.assertEqual(None, step_util.LegacyGetIsolateTargetName('m',
'b', 200, 'viz_browser_tests (with patch) on Android'))
<mask token>
@parameterized.expand([({'isolate_target_name': 'isolate_target'},
'isolate_target'), (None, None), ({'a': 'b'}, None)])
@mock.patch.object(step_util, 'GetStepMetadata')
def testGetIsolateTargetName(self, step_metadata,
expected_isolate_target, mocked_get_stepmeta):
mocked_get_stepmeta.return_value = step_metadata
self.assertEqual(expected_isolate_target, step_util.
GetIsolateTargetName(123, 'full step name'))
@mock.patch.object(step_util, 'GetStepMetadata')
def testGetIsolateTargetPartialMatch(self, mock_get_step_metadata):
step_util.GetIsolateTargetName(123, 'full step name')
self.assertIn(False, mock_get_step_metadata.call_args[0])
step_util.GetIsolateTargetName(123, 'full step name', True)
self.assertIn(True, mock_get_step_metadata.call_args[0])
@parameterized.expand([(wf_testcase.SAMPLE_STEP_METADATA, 'platform'),
(None, None)])
@mock.patch.object(step_util, 'GetStepMetadata')
def testGetOS(self, mock_fn_return, expected_platform, mock_fn):
mock_fn.return_value = mock_fn_return
self.assertEqual(expected_platform, step_util.GetOS(123,
'builder_name', 'step_name'))
<mask token>
@mock.patch.object(step_util, 'GetStepMetadata', return_value=
wf_testcase.SAMPLE_STEP_METADATA)
def testGetOSCached(self, mock_fn):
self.assertEqual('platform', step_util.GetOS(123, 'builder_name',
'step_name'))
self.assertEqual(1, mock_fn.call_count)
self.assertEqual('platform', step_util.GetOS(123, 'builder_name',
'step_name'))
self.assertEqual(1, mock_fn.call_count)
def testGetStepStartAndEndTime(self):
build_id = '8945610992972640896'
start_time = datetime.datetime(2019, 3, 6)
end_time = datetime.datetime(2019, 3, 6, 0, 0, 10)
step = Step()
step.name = 's'
step.start_time.FromDatetime(start_time)
step.end_time.FromDatetime(end_time)
build = Build()
build.id = int(build_id)
build.steps.extend([step])
self.assertEqual((start_time, end_time), step_util.
GetStepStartAndEndTime(build, 's'))
self.assertEqual((None, None), step_util.GetStepStartAndEndTime(
build, 's2'))
| <mask token>
class StepUtilTest(wf_testcase.WaterfallTestCase):
def testGetLowerBoundBuildNumber(self):
self.assertEqual(5, step_util._GetLowerBoundBuildNumber(5, 100))
self.assertEqual(50, step_util._GetLowerBoundBuildNumber(None, 100,
200))
self.assertEqual(100, step_util._GetLowerBoundBuildNumber(None, 600,
500))
def testGetBoundingIsolatedTargets(self):
lower_bound_commit_position = 1000
upper_bound_commit_position = 1010
requested_commit_position = 1005
build_id = 10000
target_name = 'browser_tests'
master_name = 'm'
builder_name = 'b'
luci_name = 'chromium'
bucket_name = 'ci'
gitiles_host = 'chromium.googlesource.com'
gitiles_project = 'chromium/src'
gitiles_ref = 'refs/heads/master'
gerrit_patch = ''
lower_bound_revision = 'r1000'
upper_bound_revision = 'r1010'
lower_bound_target = IsolatedTarget.Create(build_id - 1, luci_name,
bucket_name, master_name, builder_name, gitiles_host,
gitiles_project, gitiles_ref, gerrit_patch, target_name,
'hash_1', lower_bound_commit_position, lower_bound_revision)
lower_bound_target.put()
upper_bound_target = IsolatedTarget.Create(build_id, luci_name,
bucket_name, master_name, builder_name, gitiles_host,
gitiles_project, gitiles_ref, gerrit_patch, target_name,
'hash_2', upper_bound_commit_position, upper_bound_revision)
upper_bound_target.put()
self.assertEqual((lower_bound_target, upper_bound_target),
step_util.GetBoundingIsolatedTargets(master_name, builder_name,
target_name, requested_commit_position))
<mask token>
@mock.patch.object(build_util, 'GetBuildInfo')
def testGetValidBuildSearchAscendingOutOfRange(self, mocked_get_build_info
):
master_name = 'm'
builder_name = 'b'
step_name = 's'
invalid_build_100 = BuildInfo(master_name, builder_name, 100)
invalid_build_101 = BuildInfo(master_name, builder_name, 101)
valid_build_102 = BuildInfo(master_name, builder_name, 102)
valid_build_102.commit_position = 1020
mocked_get_build_info.side_effect = [invalid_build_100,
invalid_build_101, valid_build_102]
self.assertIsNone(step_util.GetValidBuild(master_name, builder_name,
100, step_name, True, 1))
@mock.patch.object(build_util, 'GetBuildInfo')
def testGetValidBuildSearchDescending(self, mocked_get_build_info):
master_name = 'm'
builder_name = 'b'
step_name = 's'
invalid_build_100 = BuildInfo(master_name, builder_name, 100)
invalid_build_99 = BuildInfo(master_name, builder_name, 99)
valid_build_98 = BuildInfo(master_name, builder_name, 98)
valid_build_98.commit_position = 980
mocked_get_build_info.side_effect = [invalid_build_100,
invalid_build_99, valid_build_98]
self.assertEqual(valid_build_98, step_util.GetValidBuild(
master_name, builder_name, 100, step_name, True, 2))
@mock.patch.object(swarming, 'CanFindSwarmingTaskFromBuildForAStep',
return_value=True)
@mock.patch.object(build_util, 'GetBuildInfo', _MockedGetBuildInfo)
def testGetValidBoundingBuildsForStepExactMatch(self, *_):
lower_bound, upper_bound = step_util.GetValidBoundingBuildsForStep('m',
'b', 's', 0, 100, 30)
self.assertEqual(1, lower_bound.build_number)
self.assertEqual(2, upper_bound.build_number)
@mock.patch.object(swarming, 'CanFindSwarmingTaskFromBuildForAStep',
return_value=True)
@mock.patch.object(build_util, 'GetBuildInfo', _MockedGetBuildInfo)
def testGetValidBoundingBuildsForStepCommitBeforeEarliestBuild(self, *_):
lower_bound_build_number = 3
lower_bound, upper_bound = step_util.GetValidBoundingBuildsForStep('m',
'b', 's', lower_bound_build_number, 100, 10)
self.assertIsNone(lower_bound)
self.assertEqual(lower_bound_build_number, upper_bound.build_number)
<mask token>
<mask token>
@mock.patch.object(swarming, 'CanFindSwarmingTaskFromBuildForAStep',
return_value=False)
@mock.patch.object(build_util, 'GetBuildInfo', _MockedGetBuildInfo)
def testGetValidBoundingBuildsForStepCommitAfterLatestBuildInvalid(self, *_
):
upper_bound_build_number = 5
lower_bound, upper_bound = step_util.GetValidBoundingBuildsForStep('m',
'b', 's', None, upper_bound_build_number, 10000)
self.assertIsNone(lower_bound)
self.assertIsNone(upper_bound)
@mock.patch.object(swarming, 'CanFindSwarmingTaskFromBuildForAStep',
return_value=True)
@mock.patch.object(build_util, 'GetBuildInfo', _MockedGetBuildInfo)
def testGetValidBoundingBuildsForStepCommitRightAtUpperBound(self, *_):
upper_bound_build_number = 4
lower_bound, upper_bound = step_util.GetValidBoundingBuildsForStep('m',
'b', 's', None, upper_bound_build_number, 50)
self.assertEqual(50, lower_bound.commit_position)
self.assertEqual(50, upper_bound.commit_position)
<mask token>
def testIsStepSupportedByFinditObjectNone(self):
self.assertFalse(step_util.IsStepSupportedByFindit(None, 'step', 'm'))
<mask token>
def testIsStepSupportedByFinditOtherIsolatedScriptTest(self):
self.assertFalse(step_util.IsStepSupportedByFindit(
WebkitLayoutTestResults(None), 'telemetry_perf_tests', 'm'))
@mock.patch.object(waterfall_config, 'StepIsSupportedForMaster',
return_value=True)
def testIsStepSupportedByFinditWebkitLayoutTests(self, _):
self.assertTrue(step_util.IsStepSupportedByFindit(
WebkitLayoutTestResults(None), 'webkit_layout_tests', 'm'))
<mask token>
@parameterized.expand([({'step_log_return': wf_testcase.
SAMPLE_STEP_METADATA, 'expected_step_metadata': wf_testcase.
SAMPLE_STEP_METADATA},), ({'step_log_return': wf_testcase.
SAMPLE_STEP_METADATA, 'expected_step_metadata': wf_testcase.
SAMPLE_STEP_METADATA},), ({'step_log_return': None,
'expected_step_metadata': None},), ({'step_log_return': None,
'expected_step_metadata': None},)])
@mock.patch.object(step_util, 'GetStepLogForLuciBuild')
def testGetStepMetadata(self, cases, mock_step_log):
mock_step_log.return_value = cases['step_log_return']
step_metadata = step_util.GetStepMetadata(123, 'step')
self.assertEqual(cases['expected_step_metadata'], step_metadata)
@mock.patch.object(step_util, 'GetStepLogForLuciBuild')
def testGetStepMetadataPartialMatch(self, mock_step_log):
step_util.GetStepMetadata(123, 'step', True)
self.assertIn(True, mock_step_log.call_args[0])
step_util.GetStepMetadata(123, 'step', False)
self.assertIn(False, mock_step_log.call_args[0])
<mask token>
@mock.patch.object(build_util, 'DownloadBuildData', return_value=
MockWaterfallBuild())
@mock.patch.object(logdog_util, 'GetStepLogLegacy', return_value=':')
def testMalformattedNinjaInfo(self, *_):
step_metadata = step_util.GetWaterfallBuildStepLog('m', 'b', 123,
's', None, 'json.output[ninja_info]')
self.assertIsNone(step_metadata)
@mock.patch.object(build_util, 'DownloadBuildData', return_value=
MockWaterfallBuild())
@mock.patch.object(logdog_util, '_GetAnnotationsProtoForPath',
return_value=None)
def testLegacyGetStepMetadataStepNone(self, *_):
step_metadata = step_util.GetWaterfallBuildStepLog('m', 'b', 123,
's', None, 'step_metadata')
self.assertIsNone(step_metadata)
<mask token>
@mock.patch.object(step_util, 'GetStepLogForLuciBuild', return_value=
wf_testcase.SAMPLE_STEP_METADATA)
@mock.patch.object(build_util, 'DownloadBuildData')
def testLegacyGetStepMetadataFromLUCIBuild(self, mock_build, _):
build = WfBuild.Create('m', 'b', 123)
build.build_id = '8948240770002521488'
build.put()
mock_build.return_value = build
step_metadata = step_util.GetWaterfallBuildStepLog('m', 'b', 123,
's', None, 'step_metadata')
self.assertEqual(step_metadata, wf_testcase.SAMPLE_STEP_METADATA)
@mock.patch.object(build_util, 'DownloadBuildData', return_value=
MockWaterfallBuild())
@mock.patch.object(logdog_util, '_GetAnnotationsProtoForPath',
return_value='step')
@mock.patch.object(logdog_util, '_GetStreamForStep', return_value='stream')
@mock.patch.object(logdog_util, 'GetStepLogLegacy', return_value=
'log1/nlog2')
def testGetStepLogStdio(self, *_):
self.assertEqual('log1/nlog2', step_util.GetWaterfallBuildStepLog(
'm', 'b', 123, 's', None))
@mock.patch.object(build_util, 'DownloadBuildData', return_value=
MockWaterfallBuild())
@mock.patch.object(logdog_util, 'GetStepLogLegacy', return_value='log')
@mock.patch.object(logging, 'error')
def testGetStepLogNotJosonLoadable(self, mocked_log, *_):
self.assertIsNone(step_util.GetWaterfallBuildStepLog('m', 'b', 123,
's', None, 'step_metadata'))
mocked_log.assert_called_with(
'Failed to json load data for step_metadata. Data is: log.')
<mask token>
@mock.patch.object(step_util, '_GetStepLogViewUrl', return_value=None)
@mock.patch.object(logdog_util, 'GetLogFromViewUrl')
@mock.patch.object(buildbucket_client, 'GetV2Build')
def testGetStepLogForLuciBuildNoViewUrl(self, mock_get_build,
mock_get_log, _):
build_id = '8945610992972640896'
mock_log = common_pb2.Log()
mock_log.name = 'step_metadata'
mock_log.view_url = 'view_url'
mock_step = Step()
mock_step.name = 's'
mock_step.logs.extend([mock_log])
mock_build = Build()
mock_build.id = int(build_id)
mock_build.steps.extend([mock_step])
mock_get_build.return_value = mock_build
self.assertIsNone(step_util.GetStepLogForLuciBuild(build_id, 's',
None, 'step_metadata'))
self.assertFalse(mock_get_log.called)
<mask token>
@mock.patch.object(buildbucket_client, 'GetV2Build')
@mock.patch.object(step_util, 'GetStepLogFromBuildObject')
def testGetStepLogForLuciBuildPartialMatch(self, mock_log_from_build, _):
step_util.GetStepLogForLuciBuild('87654321', 's', None)
self.assertIn(False, mock_log_from_build.call_args[0])
step_util.GetStepLogForLuciBuild('87654321', 's', None, True)
self.assertIn(True, mock_log_from_build.call_args[0])
@mock.patch.object(step_util, '_GetStepLogViewUrl', return_value=None)
def testGetStepLogFromBuildObjectPartialMatch(self, mock_get_log_url):
step_util.GetStepLogFromBuildObject(Build(), 'full_step_name',
'http_client')
self.assertIn(False, mock_get_log_url.call_args[0])
step_util.GetStepLogFromBuildObject(Build(), 'full_step_name',
'http_client', partial_match=True)
self.assertIn(True, mock_get_log_url.call_args[0])
def testGetStepLogViewUrlNoMatchingLog(self):
build_id = 8945610992972640896
mock_log = common_pb2.Log()
mock_log.name = 'another_log'
mock_log.view_url = 'view_url'
mock_step1 = Step()
mock_step1.name = 's1'
mock_step1.logs.extend([mock_log])
mock_step2 = Step()
mock_step2.name = 's2'
mock_step2.logs.extend([mock_log])
mock_build = Build()
mock_build.id = build_id
mock_build.steps.extend([mock_step1, mock_step2])
self.assertIsNone(step_util._GetStepLogViewUrl(mock_build, 's2', 'log')
)
@parameterized.expand([(True, 'step_name', 'view_url',
'view_url_partial_match'), (False, 'step_name', 'view_url', None)])
def testGetStepLogViewUrlPartialMatching(self, partial_match,
full_step_name, expected_url_in_build1, expected_url_in_build2):
mock_step1 = Step()
mock_step1.name = 'step_name'
mock_log1 = common_pb2.Log()
mock_log1.name = 'log'
mock_log1.view_url = 'view_url'
mock_step1.logs.extend([mock_log1])
mock_step2 = Step()
mock_step2.name = 'step_name_longer'
mock_log2 = common_pb2.Log()
mock_log2.name = 'log'
mock_log2.view_url = 'view_url_partial_match'
mock_step2.logs.extend([mock_log2])
mock_build1 = Build()
mock_build1.steps.extend([mock_step1, mock_step2])
self.assertEqual(expected_url_in_build1, step_util.
_GetStepLogViewUrl(mock_build1, full_step_name, 'log',
partial_match=partial_match))
mock_build2 = Build()
mock_build2.steps.extend([mock_step2])
self.assertEqual(expected_url_in_build2, step_util.
_GetStepLogViewUrl(mock_build2, full_step_name, 'log',
partial_match=partial_match))
<mask token>
<mask token>
<mask token>
@mock.patch.object(step_util, 'GetStepLogForLuciBuild')
def testGetStepMetadataCached(self, mock_fn, *_):
mock_fn.side_effect = [None, {'canonical_step_name': 'step_name'}]
self.assertEqual(None, step_util.GetStepMetadata(123,
'step_name on a platform'))
self.assertTrue(mock_fn.call_count == 1)
self.assertEqual({'canonical_step_name': 'step_name'}, step_util.
GetStepMetadata(123, 'step_name on a platform'))
self.assertTrue(mock_fn.call_count == 2)
self.assertEqual({'canonical_step_name': 'step_name'}, step_util.
GetStepMetadata(123, 'step_name on a platform'))
self.assertTrue(mock_fn.call_count == 2)
<mask token>
<mask token>
@mock.patch.object(step_util, 'GetStepMetadata')
def testGetCanonicalStepNamePartialMatch(self, mock_get_step_metadata):
step_util.GetCanonicalStepName(123, 'full step name')
self.assertIn(False, mock_get_step_metadata.call_args[0])
step_util.GetCanonicalStepName(123, 'full step name', True)
self.assertIn(True, mock_get_step_metadata.call_args[0])
<mask token>
@mock.patch.object(step_util, 'LegacyGetStepMetadata', return_value=None)
def testLegacyGetIsolateTargetNameStepMetadataIsNone(self, _):
self.assertEqual(None, step_util.LegacyGetIsolateTargetName('m',
'b', 200, 'viz_browser_tests (with patch) on Android'))
<mask token>
@parameterized.expand([({'isolate_target_name': 'isolate_target'},
'isolate_target'), (None, None), ({'a': 'b'}, None)])
@mock.patch.object(step_util, 'GetStepMetadata')
def testGetIsolateTargetName(self, step_metadata,
expected_isolate_target, mocked_get_stepmeta):
mocked_get_stepmeta.return_value = step_metadata
self.assertEqual(expected_isolate_target, step_util.
GetIsolateTargetName(123, 'full step name'))
@mock.patch.object(step_util, 'GetStepMetadata')
def testGetIsolateTargetPartialMatch(self, mock_get_step_metadata):
step_util.GetIsolateTargetName(123, 'full step name')
self.assertIn(False, mock_get_step_metadata.call_args[0])
step_util.GetIsolateTargetName(123, 'full step name', True)
self.assertIn(True, mock_get_step_metadata.call_args[0])
@parameterized.expand([(wf_testcase.SAMPLE_STEP_METADATA, 'platform'),
(None, None)])
@mock.patch.object(step_util, 'GetStepMetadata')
def testGetOS(self, mock_fn_return, expected_platform, mock_fn):
mock_fn.return_value = mock_fn_return
self.assertEqual(expected_platform, step_util.GetOS(123,
'builder_name', 'step_name'))
<mask token>
@mock.patch.object(step_util, 'GetStepMetadata', return_value=
wf_testcase.SAMPLE_STEP_METADATA)
def testGetOSCached(self, mock_fn):
self.assertEqual('platform', step_util.GetOS(123, 'builder_name',
'step_name'))
self.assertEqual(1, mock_fn.call_count)
self.assertEqual('platform', step_util.GetOS(123, 'builder_name',
'step_name'))
self.assertEqual(1, mock_fn.call_count)
def testGetStepStartAndEndTime(self):
build_id = '8945610992972640896'
start_time = datetime.datetime(2019, 3, 6)
end_time = datetime.datetime(2019, 3, 6, 0, 0, 10)
step = Step()
step.name = 's'
step.start_time.FromDatetime(start_time)
step.end_time.FromDatetime(end_time)
build = Build()
build.id = int(build_id)
build.steps.extend([step])
self.assertEqual((start_time, end_time), step_util.
GetStepStartAndEndTime(build, 's'))
self.assertEqual((None, None), step_util.GetStepStartAndEndTime(
build, 's2'))
| <mask token>
class StepUtilTest(wf_testcase.WaterfallTestCase):
def testGetLowerBoundBuildNumber(self):
self.assertEqual(5, step_util._GetLowerBoundBuildNumber(5, 100))
self.assertEqual(50, step_util._GetLowerBoundBuildNumber(None, 100,
200))
self.assertEqual(100, step_util._GetLowerBoundBuildNumber(None, 600,
500))
def testGetBoundingIsolatedTargets(self):
lower_bound_commit_position = 1000
upper_bound_commit_position = 1010
requested_commit_position = 1005
build_id = 10000
target_name = 'browser_tests'
master_name = 'm'
builder_name = 'b'
luci_name = 'chromium'
bucket_name = 'ci'
gitiles_host = 'chromium.googlesource.com'
gitiles_project = 'chromium/src'
gitiles_ref = 'refs/heads/master'
gerrit_patch = ''
lower_bound_revision = 'r1000'
upper_bound_revision = 'r1010'
lower_bound_target = IsolatedTarget.Create(build_id - 1, luci_name,
bucket_name, master_name, builder_name, gitiles_host,
gitiles_project, gitiles_ref, gerrit_patch, target_name,
'hash_1', lower_bound_commit_position, lower_bound_revision)
lower_bound_target.put()
upper_bound_target = IsolatedTarget.Create(build_id, luci_name,
bucket_name, master_name, builder_name, gitiles_host,
gitiles_project, gitiles_ref, gerrit_patch, target_name,
'hash_2', upper_bound_commit_position, upper_bound_revision)
upper_bound_target.put()
self.assertEqual((lower_bound_target, upper_bound_target),
step_util.GetBoundingIsolatedTargets(master_name, builder_name,
target_name, requested_commit_position))
<mask token>
@mock.patch.object(build_util, 'GetBuildInfo')
def testGetValidBuildSearchAscendingOutOfRange(self, mocked_get_build_info
):
master_name = 'm'
builder_name = 'b'
step_name = 's'
invalid_build_100 = BuildInfo(master_name, builder_name, 100)
invalid_build_101 = BuildInfo(master_name, builder_name, 101)
valid_build_102 = BuildInfo(master_name, builder_name, 102)
valid_build_102.commit_position = 1020
mocked_get_build_info.side_effect = [invalid_build_100,
invalid_build_101, valid_build_102]
self.assertIsNone(step_util.GetValidBuild(master_name, builder_name,
100, step_name, True, 1))
@mock.patch.object(build_util, 'GetBuildInfo')
def testGetValidBuildSearchDescending(self, mocked_get_build_info):
master_name = 'm'
builder_name = 'b'
step_name = 's'
invalid_build_100 = BuildInfo(master_name, builder_name, 100)
invalid_build_99 = BuildInfo(master_name, builder_name, 99)
valid_build_98 = BuildInfo(master_name, builder_name, 98)
valid_build_98.commit_position = 980
mocked_get_build_info.side_effect = [invalid_build_100,
invalid_build_99, valid_build_98]
self.assertEqual(valid_build_98, step_util.GetValidBuild(
master_name, builder_name, 100, step_name, True, 2))
@mock.patch.object(swarming, 'CanFindSwarmingTaskFromBuildForAStep',
return_value=True)
@mock.patch.object(build_util, 'GetBuildInfo', _MockedGetBuildInfo)
def testGetValidBoundingBuildsForStepExactMatch(self, *_):
lower_bound, upper_bound = step_util.GetValidBoundingBuildsForStep('m',
'b', 's', 0, 100, 30)
self.assertEqual(1, lower_bound.build_number)
self.assertEqual(2, upper_bound.build_number)
@mock.patch.object(swarming, 'CanFindSwarmingTaskFromBuildForAStep',
return_value=True)
@mock.patch.object(build_util, 'GetBuildInfo', _MockedGetBuildInfo)
def testGetValidBoundingBuildsForStepCommitBeforeEarliestBuild(self, *_):
lower_bound_build_number = 3
lower_bound, upper_bound = step_util.GetValidBoundingBuildsForStep('m',
'b', 's', lower_bound_build_number, 100, 10)
self.assertIsNone(lower_bound)
self.assertEqual(lower_bound_build_number, upper_bound.build_number)
<mask token>
<mask token>
@mock.patch.object(swarming, 'CanFindSwarmingTaskFromBuildForAStep',
return_value=False)
@mock.patch.object(build_util, 'GetBuildInfo', _MockedGetBuildInfo)
def testGetValidBoundingBuildsForStepCommitAfterLatestBuildInvalid(self, *_
):
upper_bound_build_number = 5
lower_bound, upper_bound = step_util.GetValidBoundingBuildsForStep('m',
'b', 's', None, upper_bound_build_number, 10000)
self.assertIsNone(lower_bound)
self.assertIsNone(upper_bound)
@mock.patch.object(swarming, 'CanFindSwarmingTaskFromBuildForAStep',
return_value=True)
@mock.patch.object(build_util, 'GetBuildInfo', _MockedGetBuildInfo)
def testGetValidBoundingBuildsForStepCommitRightAtUpperBound(self, *_):
upper_bound_build_number = 4
lower_bound, upper_bound = step_util.GetValidBoundingBuildsForStep('m',
'b', 's', None, upper_bound_build_number, 50)
self.assertEqual(50, lower_bound.commit_position)
self.assertEqual(50, upper_bound.commit_position)
@mock.patch.object(swarming, 'CanFindSwarmingTaskFromBuildForAStep',
return_value=True)
@mock.patch.object(build_util, 'GetBuildInfo', _MockedGetBuildInfo)
def testGetValidBoundingBuildsForStepCommitRightAtLowerBound(self, *_):
upper_bound_build_number = 4
lower_bound_build_number = 1
lower_bound, upper_bound = step_util.GetValidBoundingBuildsForStep('m',
'b', 's', lower_bound_build_number, upper_bound_build_number, 20)
self.assertEqual(20, lower_bound.commit_position)
self.assertEqual(20, upper_bound.commit_position)
def testIsStepSupportedByFinditObjectNone(self):
self.assertFalse(step_util.IsStepSupportedByFindit(None, 'step', 'm'))
@mock.patch.object(waterfall_config, 'StepIsSupportedForMaster',
return_value=False)
def testStepNotSupportedByFindit(self, _):
self.assertFalse(step_util.IsStepSupportedByFindit(
WebkitLayoutTestResults(None), 'step', 'm'))
def testIsStepSupportedByFinditOtherIsolatedScriptTest(self):
self.assertFalse(step_util.IsStepSupportedByFindit(
WebkitLayoutTestResults(None), 'telemetry_perf_tests', 'm'))
@mock.patch.object(waterfall_config, 'StepIsSupportedForMaster',
return_value=True)
def testIsStepSupportedByFinditWebkitLayoutTests(self, _):
self.assertTrue(step_util.IsStepSupportedByFindit(
WebkitLayoutTestResults(None), 'webkit_layout_tests', 'm'))
@mock.patch.object(waterfall_config, 'StepIsSupportedForMaster',
return_value=True)
def testIsStepSupportedByFinditGtests(self, _):
self.assertTrue(step_util.IsStepSupportedByFindit(GtestTestResults(
None), 'browser_tests', 'm'))
@parameterized.expand([({'step_log_return': wf_testcase.
SAMPLE_STEP_METADATA, 'expected_step_metadata': wf_testcase.
SAMPLE_STEP_METADATA},), ({'step_log_return': wf_testcase.
SAMPLE_STEP_METADATA, 'expected_step_metadata': wf_testcase.
SAMPLE_STEP_METADATA},), ({'step_log_return': None,
'expected_step_metadata': None},), ({'step_log_return': None,
'expected_step_metadata': None},)])
@mock.patch.object(step_util, 'GetStepLogForLuciBuild')
def testGetStepMetadata(self, cases, mock_step_log):
mock_step_log.return_value = cases['step_log_return']
step_metadata = step_util.GetStepMetadata(123, 'step')
self.assertEqual(cases['expected_step_metadata'], step_metadata)
@mock.patch.object(step_util, 'GetStepLogForLuciBuild')
def testGetStepMetadataPartialMatch(self, mock_step_log):
step_util.GetStepMetadata(123, 'step', True)
self.assertIn(True, mock_step_log.call_args[0])
step_util.GetStepMetadata(123, 'step', False)
self.assertIn(False, mock_step_log.call_args[0])
@mock.patch.object(logdog_util, '_GetAnnotationsProtoForPath',
return_value='step')
@mock.patch.object(logdog_util, '_GetStreamForStep', return_value=
'log_stream')
@mock.patch.object(logdog_util, 'GetStepLogLegacy', return_value=json.
dumps(wf_testcase.SAMPLE_STEP_METADATA))
@mock.patch.object(build_util, 'DownloadBuildData', return_value=
MockWaterfallBuild())
def testLegacyGetStepMetadata(self, *_):
step_metadata = step_util.GetWaterfallBuildStepLog('m', 'b', 123,
's', None, 'step_metadata')
self.assertEqual(step_metadata, wf_testcase.SAMPLE_STEP_METADATA)
@mock.patch.object(build_util, 'DownloadBuildData', return_value=
MockWaterfallBuild())
@mock.patch.object(logdog_util, 'GetStepLogLegacy', return_value=':')
def testMalformattedNinjaInfo(self, *_):
step_metadata = step_util.GetWaterfallBuildStepLog('m', 'b', 123,
's', None, 'json.output[ninja_info]')
self.assertIsNone(step_metadata)
@mock.patch.object(build_util, 'DownloadBuildData', return_value=
MockWaterfallBuild())
@mock.patch.object(logdog_util, '_GetAnnotationsProtoForPath',
return_value=None)
def testLegacyGetStepMetadataStepNone(self, *_):
step_metadata = step_util.GetWaterfallBuildStepLog('m', 'b', 123,
's', None, 'step_metadata')
self.assertIsNone(step_metadata)
@mock.patch.object(build_util, 'DownloadBuildData', return_value=
MockWaterfallBuild())
@mock.patch.object(logdog_util, '_GetAnnotationsProtoForPath',
return_value='step')
@mock.patch.object(logdog_util, '_GetStreamForStep', return_value=None)
def testLegacyGetStepMetadataStreamNone(self, *_):
step_metadata = step_util.GetWaterfallBuildStepLog('m', 'b', 123,
's', None, 'step_metadata')
self.assertIsNone(step_metadata)
@mock.patch.object(step_util, 'GetStepLogForLuciBuild', return_value=
wf_testcase.SAMPLE_STEP_METADATA)
@mock.patch.object(build_util, 'DownloadBuildData')
def testLegacyGetStepMetadataFromLUCIBuild(self, mock_build, _):
build = WfBuild.Create('m', 'b', 123)
build.build_id = '8948240770002521488'
build.put()
mock_build.return_value = build
step_metadata = step_util.GetWaterfallBuildStepLog('m', 'b', 123,
's', None, 'step_metadata')
self.assertEqual(step_metadata, wf_testcase.SAMPLE_STEP_METADATA)
@mock.patch.object(build_util, 'DownloadBuildData', return_value=
MockWaterfallBuild())
@mock.patch.object(logdog_util, '_GetAnnotationsProtoForPath',
return_value='step')
@mock.patch.object(logdog_util, '_GetStreamForStep', return_value='stream')
@mock.patch.object(logdog_util, 'GetStepLogLegacy', return_value=
'log1/nlog2')
def testGetStepLogStdio(self, *_):
self.assertEqual('log1/nlog2', step_util.GetWaterfallBuildStepLog(
'm', 'b', 123, 's', None))
@mock.patch.object(build_util, 'DownloadBuildData', return_value=
MockWaterfallBuild())
@mock.patch.object(logdog_util, 'GetStepLogLegacy', return_value='log')
@mock.patch.object(logging, 'error')
def testGetStepLogNotJosonLoadable(self, mocked_log, *_):
self.assertIsNone(step_util.GetWaterfallBuildStepLog('m', 'b', 123,
's', None, 'step_metadata'))
mocked_log.assert_called_with(
'Failed to json load data for step_metadata. Data is: log.')
@mock.patch.object(buildbucket_client, 'GetV2Build', return_value=None)
def testGetStepLogForLuciBuildError(self, _):
self.assertIsNone(step_util.GetStepLogForLuciBuild('87654321', 's',
None))
@mock.patch.object(step_util, '_GetStepLogViewUrl', return_value=None)
@mock.patch.object(logdog_util, 'GetLogFromViewUrl')
@mock.patch.object(buildbucket_client, 'GetV2Build')
def testGetStepLogForLuciBuildNoViewUrl(self, mock_get_build,
mock_get_log, _):
build_id = '8945610992972640896'
mock_log = common_pb2.Log()
mock_log.name = 'step_metadata'
mock_log.view_url = 'view_url'
mock_step = Step()
mock_step.name = 's'
mock_step.logs.extend([mock_log])
mock_build = Build()
mock_build.id = int(build_id)
mock_build.steps.extend([mock_step])
mock_get_build.return_value = mock_build
self.assertIsNone(step_util.GetStepLogForLuciBuild(build_id, 's',
None, 'step_metadata'))
self.assertFalse(mock_get_log.called)
<mask token>
@mock.patch.object(buildbucket_client, 'GetV2Build')
@mock.patch.object(step_util, 'GetStepLogFromBuildObject')
def testGetStepLogForLuciBuildPartialMatch(self, mock_log_from_build, _):
step_util.GetStepLogForLuciBuild('87654321', 's', None)
self.assertIn(False, mock_log_from_build.call_args[0])
step_util.GetStepLogForLuciBuild('87654321', 's', None, True)
self.assertIn(True, mock_log_from_build.call_args[0])
@mock.patch.object(step_util, '_GetStepLogViewUrl', return_value=None)
def testGetStepLogFromBuildObjectPartialMatch(self, mock_get_log_url):
step_util.GetStepLogFromBuildObject(Build(), 'full_step_name',
'http_client')
self.assertIn(False, mock_get_log_url.call_args[0])
step_util.GetStepLogFromBuildObject(Build(), 'full_step_name',
'http_client', partial_match=True)
self.assertIn(True, mock_get_log_url.call_args[0])
def testGetStepLogViewUrlNoMatchingLog(self):
build_id = 8945610992972640896
mock_log = common_pb2.Log()
mock_log.name = 'another_log'
mock_log.view_url = 'view_url'
mock_step1 = Step()
mock_step1.name = 's1'
mock_step1.logs.extend([mock_log])
mock_step2 = Step()
mock_step2.name = 's2'
mock_step2.logs.extend([mock_log])
mock_build = Build()
mock_build.id = build_id
mock_build.steps.extend([mock_step1, mock_step2])
self.assertIsNone(step_util._GetStepLogViewUrl(mock_build, 's2', 'log')
)
@parameterized.expand([(True, 'step_name', 'view_url',
'view_url_partial_match'), (False, 'step_name', 'view_url', None)])
def testGetStepLogViewUrlPartialMatching(self, partial_match,
full_step_name, expected_url_in_build1, expected_url_in_build2):
mock_step1 = Step()
mock_step1.name = 'step_name'
mock_log1 = common_pb2.Log()
mock_log1.name = 'log'
mock_log1.view_url = 'view_url'
mock_step1.logs.extend([mock_log1])
mock_step2 = Step()
mock_step2.name = 'step_name_longer'
mock_log2 = common_pb2.Log()
mock_log2.name = 'log'
mock_log2.view_url = 'view_url_partial_match'
mock_step2.logs.extend([mock_log2])
mock_build1 = Build()
mock_build1.steps.extend([mock_step1, mock_step2])
self.assertEqual(expected_url_in_build1, step_util.
_GetStepLogViewUrl(mock_build1, full_step_name, 'log',
partial_match=partial_match))
mock_build2 = Build()
mock_build2.steps.extend([mock_step2])
self.assertEqual(expected_url_in_build2, step_util.
_GetStepLogViewUrl(mock_build2, full_step_name, 'log',
partial_match=partial_match))
@mock.patch.object(step_util, 'GetWaterfallBuildStepLog', return_value=
{'canonical_step_name': 'unsupported_step1'})
def testStepIsSupportedForMaster(self, _):
master_name = 'master1'
builder_name = 'b'
build_number = 123
step_name = 'unsupported_step1 on master1'
self.assertFalse(step_util.StepIsSupportedForMaster(master_name,
builder_name, build_number, step_name))
<mask token>
@mock.patch.object(step_util, 'GetWaterfallBuildStepLog')
def testLegacyGetStepMetadataCached(self, mock_fn):
mock_fn.side_effect = ['invalid', {'canonical_step_name': 'step_name'}]
self.assertEqual('invalid', step_util.LegacyGetStepMetadata('m',
'b', 201, 'step_name on a platform'))
self.assertTrue(mock_fn.call_count == 1)
self.assertEqual({'canonical_step_name': 'step_name'}, step_util.
LegacyGetStepMetadata('m', 'b', 201, 'step_name on a platform'))
self.assertTrue(mock_fn.call_count == 2)
self.assertEqual({'canonical_step_name': 'step_name'}, step_util.
LegacyGetStepMetadata('m', 'b', 201, 'step_name on a platform'))
self.assertTrue(mock_fn.call_count == 2)
@mock.patch.object(step_util, 'GetStepLogForLuciBuild')
def testGetStepMetadataCached(self, mock_fn, *_):
mock_fn.side_effect = [None, {'canonical_step_name': 'step_name'}]
self.assertEqual(None, step_util.GetStepMetadata(123,
'step_name on a platform'))
self.assertTrue(mock_fn.call_count == 1)
self.assertEqual({'canonical_step_name': 'step_name'}, step_util.
GetStepMetadata(123, 'step_name on a platform'))
self.assertTrue(mock_fn.call_count == 2)
self.assertEqual({'canonical_step_name': 'step_name'}, step_util.
GetStepMetadata(123, 'step_name on a platform'))
self.assertTrue(mock_fn.call_count == 2)
@mock.patch.object(step_util, 'LegacyGetStepMetadata', return_value={
'canonical_step_name': 'step_name'})
def testLegacyGetCanonicalStep(self, _):
self.assertEqual('step_name', step_util.LegacyGetCanonicalStepName(
'm', 'b', 200, 'step_name on a platform'))
@parameterized.expand([({'canonical_step_name': 'step_name'},
'step_name'), (None, 'step_name'), ({'a': 'b'}, None)])
@mock.patch.object(step_util, 'GetStepMetadata')
def testGetCanonicalStepName(self, step_metadata,
expected_canonical_step, mocked_get_step):
mocked_get_step.return_value = step_metadata
self.assertEqual(expected_canonical_step, step_util.
GetCanonicalStepName(123, 'step_name (with patch)'))
@mock.patch.object(step_util, 'GetStepMetadata')
def testGetCanonicalStepNamePartialMatch(self, mock_get_step_metadata):
step_util.GetCanonicalStepName(123, 'full step name')
self.assertIn(False, mock_get_step_metadata.call_args[0])
step_util.GetCanonicalStepName(123, 'full step name', True)
self.assertIn(True, mock_get_step_metadata.call_args[0])
<mask token>
@mock.patch.object(step_util, 'LegacyGetStepMetadata', return_value=None)
def testLegacyGetIsolateTargetNameStepMetadataIsNone(self, _):
self.assertEqual(None, step_util.LegacyGetIsolateTargetName('m',
'b', 200, 'viz_browser_tests (with patch) on Android'))
@mock.patch.object(step_util, 'LegacyGetStepMetadata', return_value={
'a': 'b'})
def testLegacyGetIsolateTargetNameIsolateTargetNameIsMissing(self, _):
self.assertEqual(None, step_util.LegacyGetIsolateTargetName('m',
'b', 200, 'viz_browser_tests (with patch) on Android'))
@parameterized.expand([({'isolate_target_name': 'isolate_target'},
'isolate_target'), (None, None), ({'a': 'b'}, None)])
@mock.patch.object(step_util, 'GetStepMetadata')
def testGetIsolateTargetName(self, step_metadata,
expected_isolate_target, mocked_get_stepmeta):
mocked_get_stepmeta.return_value = step_metadata
self.assertEqual(expected_isolate_target, step_util.
GetIsolateTargetName(123, 'full step name'))
@mock.patch.object(step_util, 'GetStepMetadata')
def testGetIsolateTargetPartialMatch(self, mock_get_step_metadata):
step_util.GetIsolateTargetName(123, 'full step name')
self.assertIn(False, mock_get_step_metadata.call_args[0])
step_util.GetIsolateTargetName(123, 'full step name', True)
self.assertIn(True, mock_get_step_metadata.call_args[0])
@parameterized.expand([(wf_testcase.SAMPLE_STEP_METADATA, 'platform'),
(None, None)])
@mock.patch.object(step_util, 'GetStepMetadata')
def testGetOS(self, mock_fn_return, expected_platform, mock_fn):
mock_fn.return_value = mock_fn_return
self.assertEqual(expected_platform, step_util.GetOS(123,
'builder_name', 'step_name'))
<mask token>
@mock.patch.object(step_util, 'GetStepMetadata', return_value=
wf_testcase.SAMPLE_STEP_METADATA)
def testGetOSCached(self, mock_fn):
self.assertEqual('platform', step_util.GetOS(123, 'builder_name',
'step_name'))
self.assertEqual(1, mock_fn.call_count)
self.assertEqual('platform', step_util.GetOS(123, 'builder_name',
'step_name'))
self.assertEqual(1, mock_fn.call_count)
def testGetStepStartAndEndTime(self):
build_id = '8945610992972640896'
start_time = datetime.datetime(2019, 3, 6)
end_time = datetime.datetime(2019, 3, 6, 0, 0, 10)
step = Step()
step.name = 's'
step.start_time.FromDatetime(start_time)
step.end_time.FromDatetime(end_time)
build = Build()
build.id = int(build_id)
build.steps.extend([step])
self.assertEqual((start_time, end_time), step_util.
GetStepStartAndEndTime(build, 's'))
self.assertEqual((None, None), step_util.GetStepStartAndEndTime(
build, 's2'))
| <mask token>
class StepUtilTest(wf_testcase.WaterfallTestCase):
def testGetLowerBoundBuildNumber(self):
self.assertEqual(5, step_util._GetLowerBoundBuildNumber(5, 100))
self.assertEqual(50, step_util._GetLowerBoundBuildNumber(None, 100,
200))
self.assertEqual(100, step_util._GetLowerBoundBuildNumber(None, 600,
500))
def testGetBoundingIsolatedTargets(self):
lower_bound_commit_position = 1000
upper_bound_commit_position = 1010
requested_commit_position = 1005
build_id = 10000
target_name = 'browser_tests'
master_name = 'm'
builder_name = 'b'
luci_name = 'chromium'
bucket_name = 'ci'
gitiles_host = 'chromium.googlesource.com'
gitiles_project = 'chromium/src'
gitiles_ref = 'refs/heads/master'
gerrit_patch = ''
lower_bound_revision = 'r1000'
upper_bound_revision = 'r1010'
lower_bound_target = IsolatedTarget.Create(build_id - 1, luci_name,
bucket_name, master_name, builder_name, gitiles_host,
gitiles_project, gitiles_ref, gerrit_patch, target_name,
'hash_1', lower_bound_commit_position, lower_bound_revision)
lower_bound_target.put()
upper_bound_target = IsolatedTarget.Create(build_id, luci_name,
bucket_name, master_name, builder_name, gitiles_host,
gitiles_project, gitiles_ref, gerrit_patch, target_name,
'hash_2', upper_bound_commit_position, upper_bound_revision)
upper_bound_target.put()
self.assertEqual((lower_bound_target, upper_bound_target),
step_util.GetBoundingIsolatedTargets(master_name, builder_name,
target_name, requested_commit_position))
@mock.patch.object(build_util, 'GetBuildInfo')
def testGetValidBuildSearchAscendingWithinRange(self, mocked_get_build_info
):
master_name = 'm'
builder_name = 'b'
step_name = 's'
invalid_build_100 = BuildInfo(master_name, builder_name, 100)
invalid_build_101 = BuildInfo(master_name, builder_name, 101)
valid_build_102 = BuildInfo(master_name, builder_name, 102)
valid_build_102.commit_position = 1020
mocked_get_build_info.side_effect = [invalid_build_100,
invalid_build_101, valid_build_102]
self.assertEqual(valid_build_102, step_util.GetValidBuild(
master_name, builder_name, 100, step_name, True, 2))
@mock.patch.object(build_util, 'GetBuildInfo')
def testGetValidBuildSearchAscendingOutOfRange(self, mocked_get_build_info
):
master_name = 'm'
builder_name = 'b'
step_name = 's'
invalid_build_100 = BuildInfo(master_name, builder_name, 100)
invalid_build_101 = BuildInfo(master_name, builder_name, 101)
valid_build_102 = BuildInfo(master_name, builder_name, 102)
valid_build_102.commit_position = 1020
mocked_get_build_info.side_effect = [invalid_build_100,
invalid_build_101, valid_build_102]
self.assertIsNone(step_util.GetValidBuild(master_name, builder_name,
100, step_name, True, 1))
@mock.patch.object(build_util, 'GetBuildInfo')
def testGetValidBuildSearchDescending(self, mocked_get_build_info):
master_name = 'm'
builder_name = 'b'
step_name = 's'
invalid_build_100 = BuildInfo(master_name, builder_name, 100)
invalid_build_99 = BuildInfo(master_name, builder_name, 99)
valid_build_98 = BuildInfo(master_name, builder_name, 98)
valid_build_98.commit_position = 980
mocked_get_build_info.side_effect = [invalid_build_100,
invalid_build_99, valid_build_98]
self.assertEqual(valid_build_98, step_util.GetValidBuild(
master_name, builder_name, 100, step_name, True, 2))
@mock.patch.object(swarming, 'CanFindSwarmingTaskFromBuildForAStep',
return_value=True)
@mock.patch.object(build_util, 'GetBuildInfo', _MockedGetBuildInfo)
def testGetValidBoundingBuildsForStepExactMatch(self, *_):
lower_bound, upper_bound = step_util.GetValidBoundingBuildsForStep('m',
'b', 's', 0, 100, 30)
self.assertEqual(1, lower_bound.build_number)
self.assertEqual(2, upper_bound.build_number)
@mock.patch.object(swarming, 'CanFindSwarmingTaskFromBuildForAStep',
return_value=True)
@mock.patch.object(build_util, 'GetBuildInfo', _MockedGetBuildInfo)
def testGetValidBoundingBuildsForStepCommitBeforeEarliestBuild(self, *_):
lower_bound_build_number = 3
lower_bound, upper_bound = step_util.GetValidBoundingBuildsForStep('m',
'b', 's', lower_bound_build_number, 100, 10)
self.assertIsNone(lower_bound)
self.assertEqual(lower_bound_build_number, upper_bound.build_number)
@mock.patch.object(swarming, 'CanFindSwarmingTaskFromBuildForAStep',
return_value=False)
@mock.patch.object(build_util, 'GetBuildInfo', _MockedGetBuildInfo)
def testGetValidBoundingBuildsForStepCommitBeforeEarliestBuildInValid(self,
*_):
lower_bound_build_number = 3
lower_bound, upper_bound = step_util.GetValidBoundingBuildsForStep('m',
'b', 's', lower_bound_build_number, 100, 10)
self.assertIsNone(lower_bound)
self.assertIsNone(upper_bound)
@mock.patch.object(swarming, 'CanFindSwarmingTaskFromBuildForAStep',
return_value=True)
@mock.patch.object(build_util, 'GetBuildInfo', _MockedGetBuildInfo)
def testGetValidBoundingBuildsForStepCommitAfterLatestBuild(self, *_):
upper_bound_build_number = 5
lower_bound, upper_bound = step_util.GetValidBoundingBuildsForStep('m',
'b', 's', None, upper_bound_build_number, 10000)
self.assertEqual(upper_bound_build_number, lower_bound.build_number)
self.assertIsNone(upper_bound)
@mock.patch.object(swarming, 'CanFindSwarmingTaskFromBuildForAStep',
return_value=False)
@mock.patch.object(build_util, 'GetBuildInfo', _MockedGetBuildInfo)
def testGetValidBoundingBuildsForStepCommitAfterLatestBuildInvalid(self, *_
):
upper_bound_build_number = 5
lower_bound, upper_bound = step_util.GetValidBoundingBuildsForStep('m',
'b', 's', None, upper_bound_build_number, 10000)
self.assertIsNone(lower_bound)
self.assertIsNone(upper_bound)
@mock.patch.object(swarming, 'CanFindSwarmingTaskFromBuildForAStep',
return_value=True)
@mock.patch.object(build_util, 'GetBuildInfo', _MockedGetBuildInfo)
def testGetValidBoundingBuildsForStepCommitRightAtUpperBound(self, *_):
upper_bound_build_number = 4
lower_bound, upper_bound = step_util.GetValidBoundingBuildsForStep('m',
'b', 's', None, upper_bound_build_number, 50)
self.assertEqual(50, lower_bound.commit_position)
self.assertEqual(50, upper_bound.commit_position)
@mock.patch.object(swarming, 'CanFindSwarmingTaskFromBuildForAStep',
return_value=True)
@mock.patch.object(build_util, 'GetBuildInfo', _MockedGetBuildInfo)
def testGetValidBoundingBuildsForStepCommitRightAtLowerBound(self, *_):
upper_bound_build_number = 4
lower_bound_build_number = 1
lower_bound, upper_bound = step_util.GetValidBoundingBuildsForStep('m',
'b', 's', lower_bound_build_number, upper_bound_build_number, 20)
self.assertEqual(20, lower_bound.commit_position)
self.assertEqual(20, upper_bound.commit_position)
def testIsStepSupportedByFinditObjectNone(self):
self.assertFalse(step_util.IsStepSupportedByFindit(None, 'step', 'm'))
@mock.patch.object(waterfall_config, 'StepIsSupportedForMaster',
return_value=False)
def testStepNotSupportedByFindit(self, _):
self.assertFalse(step_util.IsStepSupportedByFindit(
WebkitLayoutTestResults(None), 'step', 'm'))
def testIsStepSupportedByFinditOtherIsolatedScriptTest(self):
self.assertFalse(step_util.IsStepSupportedByFindit(
WebkitLayoutTestResults(None), 'telemetry_perf_tests', 'm'))
@mock.patch.object(waterfall_config, 'StepIsSupportedForMaster',
return_value=True)
def testIsStepSupportedByFinditWebkitLayoutTests(self, _):
self.assertTrue(step_util.IsStepSupportedByFindit(
WebkitLayoutTestResults(None), 'webkit_layout_tests', 'm'))
@mock.patch.object(waterfall_config, 'StepIsSupportedForMaster',
return_value=True)
def testIsStepSupportedByFinditGtests(self, _):
self.assertTrue(step_util.IsStepSupportedByFindit(GtestTestResults(
None), 'browser_tests', 'm'))
@parameterized.expand([({'step_log_return': wf_testcase.
SAMPLE_STEP_METADATA, 'expected_step_metadata': wf_testcase.
SAMPLE_STEP_METADATA},), ({'step_log_return': wf_testcase.
SAMPLE_STEP_METADATA, 'expected_step_metadata': wf_testcase.
SAMPLE_STEP_METADATA},), ({'step_log_return': None,
'expected_step_metadata': None},), ({'step_log_return': None,
'expected_step_metadata': None},)])
@mock.patch.object(step_util, 'GetStepLogForLuciBuild')
def testGetStepMetadata(self, cases, mock_step_log):
mock_step_log.return_value = cases['step_log_return']
step_metadata = step_util.GetStepMetadata(123, 'step')
self.assertEqual(cases['expected_step_metadata'], step_metadata)
@mock.patch.object(step_util, 'GetStepLogForLuciBuild')
def testGetStepMetadataPartialMatch(self, mock_step_log):
step_util.GetStepMetadata(123, 'step', True)
self.assertIn(True, mock_step_log.call_args[0])
step_util.GetStepMetadata(123, 'step', False)
self.assertIn(False, mock_step_log.call_args[0])
@mock.patch.object(logdog_util, '_GetAnnotationsProtoForPath',
return_value='step')
@mock.patch.object(logdog_util, '_GetStreamForStep', return_value=
'log_stream')
@mock.patch.object(logdog_util, 'GetStepLogLegacy', return_value=json.
dumps(wf_testcase.SAMPLE_STEP_METADATA))
@mock.patch.object(build_util, 'DownloadBuildData', return_value=
MockWaterfallBuild())
def testLegacyGetStepMetadata(self, *_):
step_metadata = step_util.GetWaterfallBuildStepLog('m', 'b', 123,
's', None, 'step_metadata')
self.assertEqual(step_metadata, wf_testcase.SAMPLE_STEP_METADATA)
@mock.patch.object(build_util, 'DownloadBuildData', return_value=
MockWaterfallBuild())
@mock.patch.object(logdog_util, 'GetStepLogLegacy', return_value=':')
def testMalformattedNinjaInfo(self, *_):
step_metadata = step_util.GetWaterfallBuildStepLog('m', 'b', 123,
's', None, 'json.output[ninja_info]')
self.assertIsNone(step_metadata)
@mock.patch.object(build_util, 'DownloadBuildData', return_value=
MockWaterfallBuild())
@mock.patch.object(logdog_util, '_GetAnnotationsProtoForPath',
return_value=None)
def testLegacyGetStepMetadataStepNone(self, *_):
step_metadata = step_util.GetWaterfallBuildStepLog('m', 'b', 123,
's', None, 'step_metadata')
self.assertIsNone(step_metadata)
@mock.patch.object(build_util, 'DownloadBuildData', return_value=
MockWaterfallBuild())
@mock.patch.object(logdog_util, '_GetAnnotationsProtoForPath',
return_value='step')
@mock.patch.object(logdog_util, '_GetStreamForStep', return_value=None)
def testLegacyGetStepMetadataStreamNone(self, *_):
step_metadata = step_util.GetWaterfallBuildStepLog('m', 'b', 123,
's', None, 'step_metadata')
self.assertIsNone(step_metadata)
@mock.patch.object(step_util, 'GetStepLogForLuciBuild', return_value=
wf_testcase.SAMPLE_STEP_METADATA)
@mock.patch.object(build_util, 'DownloadBuildData')
def testLegacyGetStepMetadataFromLUCIBuild(self, mock_build, _):
build = WfBuild.Create('m', 'b', 123)
build.build_id = '8948240770002521488'
build.put()
mock_build.return_value = build
step_metadata = step_util.GetWaterfallBuildStepLog('m', 'b', 123,
's', None, 'step_metadata')
self.assertEqual(step_metadata, wf_testcase.SAMPLE_STEP_METADATA)
@mock.patch.object(build_util, 'DownloadBuildData', return_value=
MockWaterfallBuild())
@mock.patch.object(logdog_util, '_GetAnnotationsProtoForPath',
return_value='step')
@mock.patch.object(logdog_util, '_GetStreamForStep', return_value='stream')
@mock.patch.object(logdog_util, 'GetStepLogLegacy', return_value=
'log1/nlog2')
def testGetStepLogStdio(self, *_):
self.assertEqual('log1/nlog2', step_util.GetWaterfallBuildStepLog(
'm', 'b', 123, 's', None))
@mock.patch.object(build_util, 'DownloadBuildData', return_value=
MockWaterfallBuild())
@mock.patch.object(logdog_util, 'GetStepLogLegacy', return_value='log')
@mock.patch.object(logging, 'error')
def testGetStepLogNotJosonLoadable(self, mocked_log, *_):
self.assertIsNone(step_util.GetWaterfallBuildStepLog('m', 'b', 123,
's', None, 'step_metadata'))
mocked_log.assert_called_with(
'Failed to json load data for step_metadata. Data is: log.')
@mock.patch.object(buildbucket_client, 'GetV2Build', return_value=None)
def testGetStepLogForLuciBuildError(self, _):
self.assertIsNone(step_util.GetStepLogForLuciBuild('87654321', 's',
None))
@mock.patch.object(step_util, '_GetStepLogViewUrl', return_value=None)
@mock.patch.object(logdog_util, 'GetLogFromViewUrl')
@mock.patch.object(buildbucket_client, 'GetV2Build')
def testGetStepLogForLuciBuildNoViewUrl(self, mock_get_build,
mock_get_log, _):
build_id = '8945610992972640896'
mock_log = common_pb2.Log()
mock_log.name = 'step_metadata'
mock_log.view_url = 'view_url'
mock_step = Step()
mock_step.name = 's'
mock_step.logs.extend([mock_log])
mock_build = Build()
mock_build.id = int(build_id)
mock_build.steps.extend([mock_step])
mock_get_build.return_value = mock_build
self.assertIsNone(step_util.GetStepLogForLuciBuild(build_id, 's',
None, 'step_metadata'))
self.assertFalse(mock_get_log.called)
<mask token>
@mock.patch.object(buildbucket_client, 'GetV2Build')
@mock.patch.object(step_util, 'GetStepLogFromBuildObject')
def testGetStepLogForLuciBuildPartialMatch(self, mock_log_from_build, _):
step_util.GetStepLogForLuciBuild('87654321', 's', None)
self.assertIn(False, mock_log_from_build.call_args[0])
step_util.GetStepLogForLuciBuild('87654321', 's', None, True)
self.assertIn(True, mock_log_from_build.call_args[0])
@mock.patch.object(step_util, '_GetStepLogViewUrl', return_value=None)
def testGetStepLogFromBuildObjectPartialMatch(self, mock_get_log_url):
step_util.GetStepLogFromBuildObject(Build(), 'full_step_name',
'http_client')
self.assertIn(False, mock_get_log_url.call_args[0])
step_util.GetStepLogFromBuildObject(Build(), 'full_step_name',
'http_client', partial_match=True)
self.assertIn(True, mock_get_log_url.call_args[0])
def testGetStepLogViewUrlNoMatchingLog(self):
build_id = 8945610992972640896
mock_log = common_pb2.Log()
mock_log.name = 'another_log'
mock_log.view_url = 'view_url'
mock_step1 = Step()
mock_step1.name = 's1'
mock_step1.logs.extend([mock_log])
mock_step2 = Step()
mock_step2.name = 's2'
mock_step2.logs.extend([mock_log])
mock_build = Build()
mock_build.id = build_id
mock_build.steps.extend([mock_step1, mock_step2])
self.assertIsNone(step_util._GetStepLogViewUrl(mock_build, 's2', 'log')
)
@parameterized.expand([(True, 'step_name', 'view_url',
'view_url_partial_match'), (False, 'step_name', 'view_url', None)])
def testGetStepLogViewUrlPartialMatching(self, partial_match,
full_step_name, expected_url_in_build1, expected_url_in_build2):
mock_step1 = Step()
mock_step1.name = 'step_name'
mock_log1 = common_pb2.Log()
mock_log1.name = 'log'
mock_log1.view_url = 'view_url'
mock_step1.logs.extend([mock_log1])
mock_step2 = Step()
mock_step2.name = 'step_name_longer'
mock_log2 = common_pb2.Log()
mock_log2.name = 'log'
mock_log2.view_url = 'view_url_partial_match'
mock_step2.logs.extend([mock_log2])
mock_build1 = Build()
mock_build1.steps.extend([mock_step1, mock_step2])
self.assertEqual(expected_url_in_build1, step_util.
_GetStepLogViewUrl(mock_build1, full_step_name, 'log',
partial_match=partial_match))
mock_build2 = Build()
mock_build2.steps.extend([mock_step2])
self.assertEqual(expected_url_in_build2, step_util.
_GetStepLogViewUrl(mock_build2, full_step_name, 'log',
partial_match=partial_match))
@mock.patch.object(step_util, 'GetWaterfallBuildStepLog', return_value=
{'canonical_step_name': 'unsupported_step1'})
def testStepIsSupportedForMaster(self, _):
master_name = 'master1'
builder_name = 'b'
build_number = 123
step_name = 'unsupported_step1 on master1'
self.assertFalse(step_util.StepIsSupportedForMaster(master_name,
builder_name, build_number, step_name))
def testStepIsSupportedForMasterCompile(self):
master_name = 'm'
builder_name = 'b'
build_number = 123
step_name = 'compile'
self.assertTrue(step_util.StepIsSupportedForMaster(master_name,
builder_name, build_number, step_name))
@mock.patch.object(step_util, 'GetWaterfallBuildStepLog')
def testLegacyGetStepMetadataCached(self, mock_fn):
mock_fn.side_effect = ['invalid', {'canonical_step_name': 'step_name'}]
self.assertEqual('invalid', step_util.LegacyGetStepMetadata('m',
'b', 201, 'step_name on a platform'))
self.assertTrue(mock_fn.call_count == 1)
self.assertEqual({'canonical_step_name': 'step_name'}, step_util.
LegacyGetStepMetadata('m', 'b', 201, 'step_name on a platform'))
self.assertTrue(mock_fn.call_count == 2)
self.assertEqual({'canonical_step_name': 'step_name'}, step_util.
LegacyGetStepMetadata('m', 'b', 201, 'step_name on a platform'))
self.assertTrue(mock_fn.call_count == 2)
@mock.patch.object(step_util, 'GetStepLogForLuciBuild')
def testGetStepMetadataCached(self, mock_fn, *_):
mock_fn.side_effect = [None, {'canonical_step_name': 'step_name'}]
self.assertEqual(None, step_util.GetStepMetadata(123,
'step_name on a platform'))
self.assertTrue(mock_fn.call_count == 1)
self.assertEqual({'canonical_step_name': 'step_name'}, step_util.
GetStepMetadata(123, 'step_name on a platform'))
self.assertTrue(mock_fn.call_count == 2)
self.assertEqual({'canonical_step_name': 'step_name'}, step_util.
GetStepMetadata(123, 'step_name on a platform'))
self.assertTrue(mock_fn.call_count == 2)
@mock.patch.object(step_util, 'LegacyGetStepMetadata', return_value={
'canonical_step_name': 'step_name'})
def testLegacyGetCanonicalStep(self, _):
self.assertEqual('step_name', step_util.LegacyGetCanonicalStepName(
'm', 'b', 200, 'step_name on a platform'))
@parameterized.expand([({'canonical_step_name': 'step_name'},
'step_name'), (None, 'step_name'), ({'a': 'b'}, None)])
@mock.patch.object(step_util, 'GetStepMetadata')
def testGetCanonicalStepName(self, step_metadata,
expected_canonical_step, mocked_get_step):
mocked_get_step.return_value = step_metadata
self.assertEqual(expected_canonical_step, step_util.
GetCanonicalStepName(123, 'step_name (with patch)'))
@mock.patch.object(step_util, 'GetStepMetadata')
def testGetCanonicalStepNamePartialMatch(self, mock_get_step_metadata):
step_util.GetCanonicalStepName(123, 'full step name')
self.assertIn(False, mock_get_step_metadata.call_args[0])
step_util.GetCanonicalStepName(123, 'full step name', True)
self.assertIn(True, mock_get_step_metadata.call_args[0])
@mock.patch.object(step_util, 'LegacyGetStepMetadata', return_value={
'isolate_target_name': 'browser_tests'})
def testLegacyGetIsolateTargetName(self, _):
self.assertEqual('browser_tests', step_util.
LegacyGetIsolateTargetName('m', 'b', 200,
'viz_browser_tests (with patch) on Android'))
@mock.patch.object(step_util, 'LegacyGetStepMetadata', return_value=None)
def testLegacyGetIsolateTargetNameStepMetadataIsNone(self, _):
self.assertEqual(None, step_util.LegacyGetIsolateTargetName('m',
'b', 200, 'viz_browser_tests (with patch) on Android'))
@mock.patch.object(step_util, 'LegacyGetStepMetadata', return_value={
'a': 'b'})
def testLegacyGetIsolateTargetNameIsolateTargetNameIsMissing(self, _):
self.assertEqual(None, step_util.LegacyGetIsolateTargetName('m',
'b', 200, 'viz_browser_tests (with patch) on Android'))
@parameterized.expand([({'isolate_target_name': 'isolate_target'},
'isolate_target'), (None, None), ({'a': 'b'}, None)])
@mock.patch.object(step_util, 'GetStepMetadata')
def testGetIsolateTargetName(self, step_metadata,
expected_isolate_target, mocked_get_stepmeta):
mocked_get_stepmeta.return_value = step_metadata
self.assertEqual(expected_isolate_target, step_util.
GetIsolateTargetName(123, 'full step name'))
@mock.patch.object(step_util, 'GetStepMetadata')
def testGetIsolateTargetPartialMatch(self, mock_get_step_metadata):
step_util.GetIsolateTargetName(123, 'full step name')
self.assertIn(False, mock_get_step_metadata.call_args[0])
step_util.GetIsolateTargetName(123, 'full step name', True)
self.assertIn(True, mock_get_step_metadata.call_args[0])
@parameterized.expand([(wf_testcase.SAMPLE_STEP_METADATA, 'platform'),
(None, None)])
@mock.patch.object(step_util, 'GetStepMetadata')
def testGetOS(self, mock_fn_return, expected_platform, mock_fn):
mock_fn.return_value = mock_fn_return
self.assertEqual(expected_platform, step_util.GetOS(123,
'builder_name', 'step_name'))
@mock.patch.object(step_util, 'GetStepMetadata')
def testGetOSPartialMatch(self, mock_get_step_metadata):
step_util.GetOS(123, 'builder_name', 'step_name')
self.assertIn(False, mock_get_step_metadata.call_args[0])
step_util.GetOS(123, 'builder_name', 'step_name', True)
self.assertIn(True, mock_get_step_metadata.call_args[0])
@mock.patch.object(step_util, 'GetStepMetadata', return_value=
wf_testcase.SAMPLE_STEP_METADATA)
def testGetOSCached(self, mock_fn):
self.assertEqual('platform', step_util.GetOS(123, 'builder_name',
'step_name'))
self.assertEqual(1, mock_fn.call_count)
self.assertEqual('platform', step_util.GetOS(123, 'builder_name',
'step_name'))
self.assertEqual(1, mock_fn.call_count)
def testGetStepStartAndEndTime(self):
build_id = '8945610992972640896'
start_time = datetime.datetime(2019, 3, 6)
end_time = datetime.datetime(2019, 3, 6, 0, 0, 10)
step = Step()
step.name = 's'
step.start_time.FromDatetime(start_time)
step.end_time.FromDatetime(end_time)
build = Build()
build.id = int(build_id)
build.steps.extend([step])
self.assertEqual((start_time, end_time), step_util.
GetStepStartAndEndTime(build, 's'))
self.assertEqual((None, None), step_util.GetStepStartAndEndTime(
build, 's2'))
| # Copyright 2018 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import datetime
import json
import logging
import mock
from parameterized import parameterized
from buildbucket_proto import common_pb2
from buildbucket_proto.build_pb2 import Build
from buildbucket_proto.step_pb2 import Step
from common.waterfall import buildbucket_client
from infra_api_clients import logdog_util
from libs.test_results.gtest_test_results import GtestTestResults
from libs.test_results.webkit_layout_test_results import WebkitLayoutTestResults
from model.isolated_target import IsolatedTarget
from model.wf_build import WfBuild
from services import step_util
from services import swarming
from waterfall import build_util
from waterfall import waterfall_config
from waterfall.build_info import BuildInfo
from waterfall.test import wf_testcase
class MockWaterfallBuild(object):
def __init__(self):
self.build_id = None
self.log_location = 'logdog://logs.chromium.org/chromium/buildbucket/path'
def _MockedGetBuildInfo(master_name, builder_name, build_number):
build = BuildInfo(master_name, builder_name, build_number)
build.commit_position = (build_number + 1) * 10
build.result = (
common_pb2.SUCCESS if build_number > 4 else common_pb2.INFRA_FAILURE)
return build
class StepUtilTest(wf_testcase.WaterfallTestCase):
def testGetLowerBoundBuildNumber(self):
self.assertEqual(5, step_util._GetLowerBoundBuildNumber(5, 100))
self.assertEqual(50, step_util._GetLowerBoundBuildNumber(None, 100, 200))
self.assertEqual(100, step_util._GetLowerBoundBuildNumber(None, 600, 500))
def testGetBoundingIsolatedTargets(self):
lower_bound_commit_position = 1000
upper_bound_commit_position = 1010
requested_commit_position = 1005
build_id = 10000
target_name = 'browser_tests'
master_name = 'm'
builder_name = 'b'
luci_name = 'chromium'
bucket_name = 'ci'
gitiles_host = 'chromium.googlesource.com'
gitiles_project = 'chromium/src'
gitiles_ref = 'refs/heads/master'
gerrit_patch = ''
lower_bound_revision = 'r1000'
upper_bound_revision = 'r1010'
lower_bound_target = IsolatedTarget.Create(
build_id - 1, luci_name, bucket_name, master_name, builder_name,
gitiles_host, gitiles_project, gitiles_ref, gerrit_patch, target_name,
'hash_1', lower_bound_commit_position, lower_bound_revision)
lower_bound_target.put()
upper_bound_target = IsolatedTarget.Create(
build_id, luci_name, bucket_name, master_name, builder_name,
gitiles_host, gitiles_project, gitiles_ref, gerrit_patch, target_name,
'hash_2', upper_bound_commit_position, upper_bound_revision)
upper_bound_target.put()
self.assertEqual((lower_bound_target, upper_bound_target),
step_util.GetBoundingIsolatedTargets(
master_name, builder_name, target_name,
requested_commit_position))
@mock.patch.object(build_util, 'GetBuildInfo')
def testGetValidBuildSearchAscendingWithinRange(self, mocked_get_build_info):
master_name = 'm'
builder_name = 'b'
step_name = 's'
invalid_build_100 = BuildInfo(master_name, builder_name, 100)
invalid_build_101 = BuildInfo(master_name, builder_name, 101)
valid_build_102 = BuildInfo(master_name, builder_name, 102)
valid_build_102.commit_position = 1020
mocked_get_build_info.side_effect = [
invalid_build_100,
invalid_build_101,
valid_build_102,
]
self.assertEqual(
valid_build_102,
step_util.GetValidBuild(master_name, builder_name, 100, step_name, True,
2))
@mock.patch.object(build_util, 'GetBuildInfo')
def testGetValidBuildSearchAscendingOutOfRange(self, mocked_get_build_info):
master_name = 'm'
builder_name = 'b'
step_name = 's'
invalid_build_100 = BuildInfo(master_name, builder_name, 100)
invalid_build_101 = BuildInfo(master_name, builder_name, 101)
valid_build_102 = BuildInfo(master_name, builder_name, 102)
valid_build_102.commit_position = 1020
mocked_get_build_info.side_effect = [
invalid_build_100,
invalid_build_101,
valid_build_102,
]
self.assertIsNone(
step_util.GetValidBuild(master_name, builder_name, 100, step_name, True,
1))
@mock.patch.object(build_util, 'GetBuildInfo')
def testGetValidBuildSearchDescending(self, mocked_get_build_info):
master_name = 'm'
builder_name = 'b'
step_name = 's'
invalid_build_100 = BuildInfo(master_name, builder_name, 100)
invalid_build_99 = BuildInfo(master_name, builder_name, 99)
valid_build_98 = BuildInfo(master_name, builder_name, 98)
valid_build_98.commit_position = 980
mocked_get_build_info.side_effect = [
invalid_build_100,
invalid_build_99,
valid_build_98,
]
self.assertEqual(
valid_build_98,
step_util.GetValidBuild(master_name, builder_name, 100, step_name, True,
2))
@mock.patch.object(
swarming, 'CanFindSwarmingTaskFromBuildForAStep', return_value=True)
@mock.patch.object(build_util, 'GetBuildInfo', _MockedGetBuildInfo)
def testGetValidBoundingBuildsForStepExactMatch(self, *_):
lower_bound, upper_bound = step_util.GetValidBoundingBuildsForStep(
'm', 'b', 's', 0, 100, 30)
self.assertEqual(1, lower_bound.build_number)
self.assertEqual(2, upper_bound.build_number)
@mock.patch.object(
swarming, 'CanFindSwarmingTaskFromBuildForAStep', return_value=True)
@mock.patch.object(build_util, 'GetBuildInfo', _MockedGetBuildInfo)
def testGetValidBoundingBuildsForStepCommitBeforeEarliestBuild(self, *_):
lower_bound_build_number = 3
lower_bound, upper_bound = step_util.GetValidBoundingBuildsForStep(
'm', 'b', 's', lower_bound_build_number, 100, 10)
self.assertIsNone(lower_bound)
self.assertEqual(lower_bound_build_number, upper_bound.build_number)
@mock.patch.object(
swarming, 'CanFindSwarmingTaskFromBuildForAStep', return_value=False)
@mock.patch.object(build_util, 'GetBuildInfo', _MockedGetBuildInfo)
def testGetValidBoundingBuildsForStepCommitBeforeEarliestBuildInValid(
self, *_):
lower_bound_build_number = 3
lower_bound, upper_bound = step_util.GetValidBoundingBuildsForStep(
'm', 'b', 's', lower_bound_build_number, 100, 10)
self.assertIsNone(lower_bound)
self.assertIsNone(upper_bound)
@mock.patch.object(
swarming, 'CanFindSwarmingTaskFromBuildForAStep', return_value=True)
@mock.patch.object(build_util, 'GetBuildInfo', _MockedGetBuildInfo)
def testGetValidBoundingBuildsForStepCommitAfterLatestBuild(self, *_):
upper_bound_build_number = 5
lower_bound, upper_bound = step_util.GetValidBoundingBuildsForStep(
'm', 'b', 's', None, upper_bound_build_number, 10000)
self.assertEqual(upper_bound_build_number, lower_bound.build_number)
self.assertIsNone(upper_bound)
@mock.patch.object(
swarming, 'CanFindSwarmingTaskFromBuildForAStep', return_value=False)
@mock.patch.object(build_util, 'GetBuildInfo', _MockedGetBuildInfo)
def testGetValidBoundingBuildsForStepCommitAfterLatestBuildInvalid(self, *_):
upper_bound_build_number = 5
lower_bound, upper_bound = step_util.GetValidBoundingBuildsForStep(
'm', 'b', 's', None, upper_bound_build_number, 10000)
self.assertIsNone(lower_bound)
self.assertIsNone(upper_bound)
@mock.patch.object(
swarming, 'CanFindSwarmingTaskFromBuildForAStep', return_value=True)
@mock.patch.object(build_util, 'GetBuildInfo', _MockedGetBuildInfo)
def testGetValidBoundingBuildsForStepCommitRightAtUpperBound(self, *_):
upper_bound_build_number = 4
lower_bound, upper_bound = step_util.GetValidBoundingBuildsForStep(
'm', 'b', 's', None, upper_bound_build_number, 50)
self.assertEqual(50, lower_bound.commit_position)
self.assertEqual(50, upper_bound.commit_position)
@mock.patch.object(
swarming, 'CanFindSwarmingTaskFromBuildForAStep', return_value=True)
@mock.patch.object(build_util, 'GetBuildInfo', _MockedGetBuildInfo)
def testGetValidBoundingBuildsForStepCommitRightAtLowerBound(self, *_):
upper_bound_build_number = 4
lower_bound_build_number = 1
lower_bound, upper_bound = step_util.GetValidBoundingBuildsForStep(
'm', 'b', 's', lower_bound_build_number, upper_bound_build_number, 20)
self.assertEqual(20, lower_bound.commit_position)
self.assertEqual(20, upper_bound.commit_position)
def testIsStepSupportedByFinditObjectNone(self):
self.assertFalse(step_util.IsStepSupportedByFindit(None, 'step', 'm'))
@mock.patch.object(
waterfall_config, 'StepIsSupportedForMaster', return_value=False)
def testStepNotSupportedByFindit(self, _):
self.assertFalse(
step_util.IsStepSupportedByFindit(
WebkitLayoutTestResults(None), 'step', 'm'))
def testIsStepSupportedByFinditOtherIsolatedScriptTest(self):
self.assertFalse(
step_util.IsStepSupportedByFindit(
WebkitLayoutTestResults(None), 'telemetry_perf_tests', 'm'))
@mock.patch.object(
waterfall_config, 'StepIsSupportedForMaster', return_value=True)
def testIsStepSupportedByFinditWebkitLayoutTests(self, _):
self.assertTrue(
step_util.IsStepSupportedByFindit(
WebkitLayoutTestResults(None), 'webkit_layout_tests', 'm'))
@mock.patch.object(
waterfall_config, 'StepIsSupportedForMaster', return_value=True)
def testIsStepSupportedByFinditGtests(self, _):
self.assertTrue(
step_util.IsStepSupportedByFindit(
GtestTestResults(None), 'browser_tests', 'm'))
@parameterized.expand([
({
'step_log_return': wf_testcase.SAMPLE_STEP_METADATA,
'expected_step_metadata': wf_testcase.SAMPLE_STEP_METADATA
},),
({
'step_log_return': wf_testcase.SAMPLE_STEP_METADATA,
'expected_step_metadata': wf_testcase.SAMPLE_STEP_METADATA
},),
({
'step_log_return': None,
'expected_step_metadata': None
},),
({
'step_log_return': None,
'expected_step_metadata': None
},),
])
@mock.patch.object(step_util, 'GetStepLogForLuciBuild')
def testGetStepMetadata(self, cases, mock_step_log):
mock_step_log.return_value = cases['step_log_return']
step_metadata = step_util.GetStepMetadata(123, 'step')
self.assertEqual(cases['expected_step_metadata'], step_metadata)
@mock.patch.object(step_util, 'GetStepLogForLuciBuild')
def testGetStepMetadataPartialMatch(self, mock_step_log):
step_util.GetStepMetadata(123, 'step', True)
self.assertIn(True, mock_step_log.call_args[0])
step_util.GetStepMetadata(123, 'step', False)
self.assertIn(False, mock_step_log.call_args[0])
@mock.patch.object(
logdog_util, '_GetAnnotationsProtoForPath', return_value='step')
@mock.patch.object(
logdog_util, '_GetStreamForStep', return_value='log_stream')
@mock.patch.object(
logdog_util,
'GetStepLogLegacy',
return_value=json.dumps(wf_testcase.SAMPLE_STEP_METADATA))
@mock.patch.object(
build_util, 'DownloadBuildData', return_value=MockWaterfallBuild())
def testLegacyGetStepMetadata(self, *_):
step_metadata = step_util.GetWaterfallBuildStepLog('m', 'b', 123, 's', None,
'step_metadata')
self.assertEqual(step_metadata, wf_testcase.SAMPLE_STEP_METADATA)
@mock.patch.object(
build_util, 'DownloadBuildData', return_value=MockWaterfallBuild())
@mock.patch.object(logdog_util, 'GetStepLogLegacy', return_value=':')
def testMalformattedNinjaInfo(self, *_):
step_metadata = step_util.GetWaterfallBuildStepLog(
'm', 'b', 123, 's', None, 'json.output[ninja_info]')
self.assertIsNone(step_metadata)
@mock.patch.object(
build_util, 'DownloadBuildData', return_value=MockWaterfallBuild())
@mock.patch.object(
logdog_util, '_GetAnnotationsProtoForPath', return_value=None)
def testLegacyGetStepMetadataStepNone(self, *_):
step_metadata = step_util.GetWaterfallBuildStepLog('m', 'b', 123, 's', None,
'step_metadata')
self.assertIsNone(step_metadata)
@mock.patch.object(
build_util, 'DownloadBuildData', return_value=MockWaterfallBuild())
@mock.patch.object(
logdog_util, '_GetAnnotationsProtoForPath', return_value='step')
@mock.patch.object(logdog_util, '_GetStreamForStep', return_value=None)
def testLegacyGetStepMetadataStreamNone(self, *_):
step_metadata = step_util.GetWaterfallBuildStepLog('m', 'b', 123, 's', None,
'step_metadata')
self.assertIsNone(step_metadata)
@mock.patch.object(
step_util,
'GetStepLogForLuciBuild',
return_value=wf_testcase.SAMPLE_STEP_METADATA)
@mock.patch.object(build_util, 'DownloadBuildData')
def testLegacyGetStepMetadataFromLUCIBuild(self, mock_build, _):
build = WfBuild.Create('m', 'b', 123)
build.build_id = '8948240770002521488'
build.put()
mock_build.return_value = build
step_metadata = step_util.GetWaterfallBuildStepLog('m', 'b', 123, 's', None,
'step_metadata')
self.assertEqual(step_metadata, wf_testcase.SAMPLE_STEP_METADATA)
@mock.patch.object(
build_util, 'DownloadBuildData', return_value=MockWaterfallBuild())
@mock.patch.object(
logdog_util, '_GetAnnotationsProtoForPath', return_value='step')
@mock.patch.object(logdog_util, '_GetStreamForStep', return_value='stream')
@mock.patch.object(logdog_util, 'GetStepLogLegacy', return_value='log1/nlog2')
def testGetStepLogStdio(self, *_):
self.assertEqual(
'log1/nlog2',
step_util.GetWaterfallBuildStepLog('m', 'b', 123, 's', None))
@mock.patch.object(
build_util, 'DownloadBuildData', return_value=MockWaterfallBuild())
@mock.patch.object(logdog_util, 'GetStepLogLegacy', return_value='log')
@mock.patch.object(logging, 'error')
def testGetStepLogNotJosonLoadable(self, mocked_log, *_):
self.assertIsNone(
step_util.GetWaterfallBuildStepLog('m', 'b', 123, 's', None,
'step_metadata'))
mocked_log.assert_called_with(
'Failed to json load data for step_metadata. Data is: log.')
@mock.patch.object(buildbucket_client, 'GetV2Build', return_value=None)
def testGetStepLogForLuciBuildError(self, _):
self.assertIsNone(step_util.GetStepLogForLuciBuild('87654321', 's', None))
@mock.patch.object(step_util, '_GetStepLogViewUrl', return_value=None)
@mock.patch.object(logdog_util, 'GetLogFromViewUrl')
@mock.patch.object(buildbucket_client, 'GetV2Build')
def testGetStepLogForLuciBuildNoViewUrl(self, mock_get_build, mock_get_log,
_):
build_id = '8945610992972640896'
mock_log = common_pb2.Log()
mock_log.name = 'step_metadata'
mock_log.view_url = 'view_url'
mock_step = Step()
mock_step.name = 's'
mock_step.logs.extend([mock_log])
mock_build = Build()
mock_build.id = int(build_id)
mock_build.steps.extend([mock_step])
mock_get_build.return_value = mock_build
self.assertIsNone(
step_util.GetStepLogForLuciBuild(build_id, 's', None, 'step_metadata'))
self.assertFalse(mock_get_log.called)
@mock.patch.object(
step_util, '_ParseStepLogIfAppropriate', return_value='log')
@mock.patch.object(logdog_util, 'GetLogFromViewUrl', return_value='log')
@mock.patch.object(buildbucket_client, 'GetV2Build')
def testGetStepLogForLuciBuild(self, mock_get_build, mock_get_log, _):
build_id = '8945610992972640896'
mock_log = common_pb2.Log()
mock_log.name = 'step_metadata'
mock_log.view_url = 'view_url'
mock_step = Step()
mock_step.name = 's'
mock_step.logs.extend([mock_log])
mock_build = Build()
mock_build.id = int(build_id)
mock_build.steps.extend([mock_step])
mock_get_build.return_value = mock_build
self.assertEqual(
'log',
step_util.GetStepLogForLuciBuild(build_id, 's', None, 'step_metadata'))
mock_get_log.assert_called_once_with('view_url', None)
@mock.patch.object(buildbucket_client, 'GetV2Build')
@mock.patch.object(step_util, 'GetStepLogFromBuildObject')
def testGetStepLogForLuciBuildPartialMatch(self, mock_log_from_build, _):
step_util.GetStepLogForLuciBuild('87654321', 's', None)
self.assertIn(False, mock_log_from_build.call_args[0])
step_util.GetStepLogForLuciBuild('87654321', 's', None, True)
self.assertIn(True, mock_log_from_build.call_args[0])
@mock.patch.object(step_util, '_GetStepLogViewUrl', return_value=None)
def testGetStepLogFromBuildObjectPartialMatch(self, mock_get_log_url):
step_util.GetStepLogFromBuildObject(Build(), 'full_step_name',
'http_client')
self.assertIn(False, mock_get_log_url.call_args[0])
step_util.GetStepLogFromBuildObject(
Build(), 'full_step_name', 'http_client', partial_match=True)
self.assertIn(True, mock_get_log_url.call_args[0])
def testGetStepLogViewUrlNoMatchingLog(self):
build_id = 8945610992972640896
mock_log = common_pb2.Log()
mock_log.name = 'another_log'
mock_log.view_url = 'view_url'
mock_step1 = Step()
mock_step1.name = 's1'
mock_step1.logs.extend([mock_log])
mock_step2 = Step()
mock_step2.name = 's2'
mock_step2.logs.extend([mock_log])
mock_build = Build()
mock_build.id = build_id
mock_build.steps.extend([mock_step1, mock_step2])
self.assertIsNone(step_util._GetStepLogViewUrl(mock_build, 's2', 'log'))
@parameterized.expand([
(True, 'step_name', 'view_url', 'view_url_partial_match'),
(False, 'step_name', 'view_url', None),
])
def testGetStepLogViewUrlPartialMatching(self, partial_match, full_step_name,
expected_url_in_build1,
expected_url_in_build2):
mock_step1 = Step()
mock_step1.name = 'step_name'
mock_log1 = common_pb2.Log()
mock_log1.name = 'log'
mock_log1.view_url = 'view_url'
mock_step1.logs.extend([mock_log1])
mock_step2 = Step()
mock_step2.name = 'step_name_longer'
mock_log2 = common_pb2.Log()
mock_log2.name = 'log'
mock_log2.view_url = 'view_url_partial_match'
mock_step2.logs.extend([mock_log2])
mock_build1 = Build()
mock_build1.steps.extend([mock_step1, mock_step2])
self.assertEqual(
expected_url_in_build1,
step_util._GetStepLogViewUrl(
mock_build1, full_step_name, 'log', partial_match=partial_match))
mock_build2 = Build()
mock_build2.steps.extend([mock_step2])
self.assertEqual(
expected_url_in_build2,
step_util._GetStepLogViewUrl(
mock_build2, full_step_name, 'log', partial_match=partial_match))
@mock.patch.object(
step_util,
'GetWaterfallBuildStepLog',
return_value={'canonical_step_name': 'unsupported_step1'})
def testStepIsSupportedForMaster(self, _):
master_name = 'master1'
builder_name = 'b'
build_number = 123
step_name = 'unsupported_step1 on master1'
self.assertFalse(
step_util.StepIsSupportedForMaster(master_name, builder_name,
build_number, step_name))
def testStepIsSupportedForMasterCompile(self):
master_name = 'm'
builder_name = 'b'
build_number = 123
step_name = 'compile'
self.assertTrue(
step_util.StepIsSupportedForMaster(master_name, builder_name,
build_number, step_name))
@mock.patch.object(step_util, 'GetWaterfallBuildStepLog')
def testLegacyGetStepMetadataCached(self, mock_fn):
mock_fn.side_effect = ['invalid', {'canonical_step_name': 'step_name'}]
# Returns the invalid step_metadata but not cache it.
self.assertEqual(
'invalid',
step_util.LegacyGetStepMetadata('m', 'b', 201,
'step_name on a platform'))
self.assertTrue(mock_fn.call_count == 1)
# Returns the valid step_metadata and cache it.
self.assertEqual({
'canonical_step_name': 'step_name'
}, step_util.LegacyGetStepMetadata('m', 'b', 201,
'step_name on a platform'))
self.assertTrue(mock_fn.call_count == 2)
self.assertEqual({
'canonical_step_name': 'step_name'
}, step_util.LegacyGetStepMetadata('m', 'b', 201,
'step_name on a platform'))
self.assertTrue(mock_fn.call_count == 2)
@mock.patch.object(step_util, 'GetStepLogForLuciBuild')
def testGetStepMetadataCached(self, mock_fn, *_):
mock_fn.side_effect = [None, {'canonical_step_name': 'step_name'}]
# Returns the invalid step_metadata but not cache it.
self.assertEqual(None,
step_util.GetStepMetadata(123, 'step_name on a platform'))
self.assertTrue(mock_fn.call_count == 1)
# Returns the valid step_metadata and cache it.
self.assertEqual({
'canonical_step_name': 'step_name'
}, step_util.GetStepMetadata(123, 'step_name on a platform'))
self.assertTrue(mock_fn.call_count == 2)
self.assertEqual({
'canonical_step_name': 'step_name'
}, step_util.GetStepMetadata(123, 'step_name on a platform'))
self.assertTrue(mock_fn.call_count == 2)
@mock.patch.object(
step_util,
'LegacyGetStepMetadata',
return_value={'canonical_step_name': 'step_name'})
def testLegacyGetCanonicalStep(self, _):
self.assertEqual(
'step_name',
step_util.LegacyGetCanonicalStepName('m', 'b', 200,
'step_name on a platform'))
@parameterized.expand([({
'canonical_step_name': 'step_name'
}, 'step_name'), (None, 'step_name'), ({
'a': 'b'
}, None)])
@mock.patch.object(step_util, 'GetStepMetadata')
def testGetCanonicalStepName(self, step_metadata, expected_canonical_step,
mocked_get_step):
mocked_get_step.return_value = step_metadata
self.assertEqual(
expected_canonical_step,
step_util.GetCanonicalStepName(123, 'step_name (with patch)'))
@mock.patch.object(step_util, 'GetStepMetadata')
def testGetCanonicalStepNamePartialMatch(self, mock_get_step_metadata):
step_util.GetCanonicalStepName(123, 'full step name')
self.assertIn(False, mock_get_step_metadata.call_args[0])
step_util.GetCanonicalStepName(123, 'full step name', True)
self.assertIn(True, mock_get_step_metadata.call_args[0])
@mock.patch.object(
step_util,
'LegacyGetStepMetadata',
return_value={'isolate_target_name': 'browser_tests'})
def testLegacyGetIsolateTargetName(self, _):
self.assertEqual(
'browser_tests',
step_util.LegacyGetIsolateTargetName(
'm', 'b', 200, 'viz_browser_tests (with patch) on Android'))
@mock.patch.object(step_util, 'LegacyGetStepMetadata', return_value=None)
def testLegacyGetIsolateTargetNameStepMetadataIsNone(self, _):
self.assertEqual(
None,
step_util.LegacyGetIsolateTargetName(
'm', 'b', 200, 'viz_browser_tests (with patch) on Android'))
@mock.patch.object(
step_util, 'LegacyGetStepMetadata', return_value={'a': 'b'})
def testLegacyGetIsolateTargetNameIsolateTargetNameIsMissing(self, _):
self.assertEqual(
None,
step_util.LegacyGetIsolateTargetName(
'm', 'b', 200, 'viz_browser_tests (with patch) on Android'))
@parameterized.expand([({
'isolate_target_name': 'isolate_target'
}, 'isolate_target'), (None, None), ({
'a': 'b'
}, None)])
@mock.patch.object(step_util, 'GetStepMetadata')
def testGetIsolateTargetName(self, step_metadata, expected_isolate_target,
mocked_get_stepmeta):
mocked_get_stepmeta.return_value = step_metadata
self.assertEqual(expected_isolate_target,
step_util.GetIsolateTargetName(123, 'full step name'))
@mock.patch.object(step_util, 'GetStepMetadata')
def testGetIsolateTargetPartialMatch(self, mock_get_step_metadata):
step_util.GetIsolateTargetName(123, 'full step name')
self.assertIn(False, mock_get_step_metadata.call_args[0])
step_util.GetIsolateTargetName(123, 'full step name', True)
self.assertIn(True, mock_get_step_metadata.call_args[0])
@parameterized.expand([(wf_testcase.SAMPLE_STEP_METADATA, 'platform'),
(None, None)])
@mock.patch.object(step_util, 'GetStepMetadata')
def testGetOS(self, mock_fn_return, expected_platform, mock_fn):
mock_fn.return_value = mock_fn_return
self.assertEqual(expected_platform,
step_util.GetOS(123, 'builder_name', 'step_name'))
@mock.patch.object(step_util, 'GetStepMetadata')
def testGetOSPartialMatch(self, mock_get_step_metadata):
step_util.GetOS(123, 'builder_name', 'step_name')
self.assertIn(False, mock_get_step_metadata.call_args[0])
step_util.GetOS(123, 'builder_name', 'step_name', True)
self.assertIn(True, mock_get_step_metadata.call_args[0])
@mock.patch.object(
step_util,
'GetStepMetadata',
return_value=wf_testcase.SAMPLE_STEP_METADATA)
def testGetOSCached(self, mock_fn):
self.assertEqual('platform',
step_util.GetOS(123, 'builder_name', 'step_name'))
self.assertEqual(1, mock_fn.call_count)
self.assertEqual('platform',
step_util.GetOS(123, 'builder_name', 'step_name'))
self.assertEqual(1, mock_fn.call_count)
def testGetStepStartAndEndTime(self):
build_id = '8945610992972640896'
start_time = datetime.datetime(2019, 3, 6)
end_time = datetime.datetime(2019, 3, 6, 0, 0, 10)
step = Step()
step.name = 's'
step.start_time.FromDatetime(start_time)
step.end_time.FromDatetime(end_time)
build = Build()
build.id = int(build_id)
build.steps.extend([step])
self.assertEqual((start_time, end_time),
step_util.GetStepStartAndEndTime(build, 's'))
self.assertEqual((None, None), step_util.GetStepStartAndEndTime(
build, 's2'))
| [
26,
32,
43,
49,
55
] |
1,312 | aec311cae7cb6cbe3e3a927a133ec20a2d2afbf5 | <mask token>
| class Solution:
<mask token>
| class Solution:
def letterCombinations(self, digits):
"""
:type digits: str
:rtype: List[str]
"""
if not digits:
return []
result_set = []
letters = {'2': 'abc', '3': 'def', '4': 'ghi', '5': 'jkl', '6':
'mno', '7': 'pqrs', '8': 'tuv', '9': 'wxyz'}
def permutate(index, result, result_set):
if index == len(digits):
result_set.append(''.join(result))
return
for letter in letters[digits[index]]:
result[index] = letter
permutate(index + 1, result, result_set)
permutate(0, ['' for _ in digits], result_set)
return result_set
| null | null | [
0,
1,
2
] |
1,313 | 2dbb1051b35898288db629fd0c5b3887c429e9b8 | <mask token>
def SetCommon(Common, XmlCommon):
XmlTag = 'Usage'
Common.Usage = XmlAttribute(XmlCommon, XmlTag).split()
XmlTag = 'FeatureFlag'
Common.FeatureFlag = XmlAttribute(XmlCommon, XmlTag)
XmlTag = 'SupArchList'
Common.SupArchList = XmlAttribute(XmlCommon, XmlTag).split()
XmlTag = XmlNodeName(XmlCommon) + '/' + 'HelpText'
Common.HelpText = XmlElement(XmlCommon, XmlTag)
def SetIdentification(CommonHeader, XmlCommonHeader, NameTag, FileName):
XmlParentTag = XmlNodeName(XmlCommonHeader)
XmlTag = XmlParentTag + '/' + NameTag
CommonHeader.Name = XmlElement(XmlCommonHeader, XmlTag)
XmlTag = XmlParentTag + '/' + 'GuidValue'
CommonHeader.Guid = XmlElement(XmlCommonHeader, XmlTag)
XmlTag = XmlParentTag + '/' + 'Version'
CommonHeader.Version = XmlElement(XmlCommonHeader, XmlTag)
CommonHeader.FileName = os.path.basename(FileName)
CommonHeader.FullPath = os.path.abspath(FileName)
<mask token>
def AddToSpecificationDict(SpecificationDict, SpecificationString):
"""Abstract specification name, value pair from Specification String"""
for SpecificationMatch in mReSpecification.finditer(SpecificationString):
Specification = SpecificationMatch.group('Specification')
Value = SpecificationMatch.group('Value')
SpecificationDict[Specification] = Value
<mask token>
def LoadClonedRecord(XmlCloned):
ClonedRecord = ClonedRecordClass()
XmlTag = 'Id'
ClonedRecord.Id = int(XmlAttribute(XmlCloned, XmlTag))
XmlTag = 'FarGuid'
ClonedRecord.FarGuid = XmlAttribute(XmlCloned, XmlTag)
XmlTag = 'Cloned/PackageGuid'
ClonedRecord.PackageGuid = XmlElement(XmlCloned, XmlTag)
XmlTag = 'Cloned/PackageVersion'
ClonedRecord.PackageVersion = XmlElement(XmlCloned, XmlTag)
XmlTag = 'Cloned/ModuleGuid'
ClonedRecord.ModuleGuid = XmlElement(XmlCloned, XmlTag)
XmlTag = 'Cloned/ModuleVersion'
ClonedRecord.ModuleVersion = XmlElement(XmlCloned, XmlTag)
return ClonedRecord
def LoadGuidProtocolPpiCommon(XmlGuidProtocolPpiCommon):
GuidProtocolPpiCommon = GuidProtocolPpiCommonClass()
XmlTag = 'Name'
GuidProtocolPpiCommon.Name = XmlAttribute(XmlGuidProtocolPpiCommon, XmlTag)
XmlParent = XmlNodeName(XmlGuidProtocolPpiCommon)
if XmlParent == 'Entry':
XmlTag = '%s/C_Name' % XmlParent
elif XmlParent == 'GuidCNames':
XmlTag = '%s/GuidCName' % XmlParent
else:
XmlTag = '%s/%sCName' % (XmlParent, XmlParent)
GuidProtocolPpiCommon.CName = XmlElement(XmlGuidProtocolPpiCommon, XmlTag)
XmlTag = XmlParent + '/' + 'GuidValue'
GuidProtocolPpiCommon.Guid = XmlElement(XmlGuidProtocolPpiCommon, XmlTag)
if XmlParent.endswith('Notify'):
GuidProtocolPpiCommon.Notify = True
XmlTag = 'GuidTypeList'
GuidTypes = XmlAttribute(XmlGuidProtocolPpiCommon, XmlTag)
GuidProtocolPpiCommon.GuidTypeList = GuidTypes.split()
XmlTag = 'SupModuleList'
SupModules = XmlAttribute(XmlGuidProtocolPpiCommon, XmlTag)
GuidProtocolPpiCommon.SupModuleList = SupModules.split()
SetCommon(GuidProtocolPpiCommon, XmlGuidProtocolPpiCommon)
return GuidProtocolPpiCommon
def LoadPcd(XmlPcd):
"""Return a new PcdClass object equivalent to XmlPcd"""
Pcd = PcdClass()
XmlTag = 'PcdEntry/C_Name'
Pcd.CName = XmlElement(XmlPcd, XmlTag)
XmlTag = 'PcdEntry/Token'
Pcd.Token = XmlElement(XmlPcd, XmlTag)
XmlTag = 'PcdEntry/TokenSpaceGuidCName'
Pcd.TokenSpaceGuidCName = XmlElement(XmlPcd, XmlTag)
XmlTag = 'PcdEntry/DatumType'
Pcd.DatumType = XmlElement(XmlPcd, XmlTag)
XmlTag = 'PcdEntry/MaxDatumSize'
Pcd.MaxDatumSize = XmlElement(XmlPcd, XmlTag)
XmlTag = 'PcdEntry/DefaultValue'
Pcd.DefaultValue = XmlElement(XmlPcd, XmlTag)
XmlTag = 'PcdItemType'
Pcd.ItemType = XmlAttribute(XmlPcd, XmlTag)
XmlTag = 'PcdEntry/ValidUsage'
Pcd.ValidUsage = XmlElement(XmlPcd, XmlTag).split()
XmlTag = 'SupModuleList'
Pcd.SupModuleList = XmlAttribute(XmlPcd, XmlTag).split()
SetCommon(Pcd, XmlPcd)
return Pcd
<mask token>
def StoreTextFile(TextFile, Content):
EdkLogger.verbose(Content)
TextFile.write(Content)
def AddToSection(Section, Arch, Item):
SectionArch = Section.get(Arch, [])
if Item not in SectionArch:
SectionArch.append(Item)
Section[Arch] = SectionArch
<mask token>
def GetUserExtensions(UserExtensions):
UserId = UserExtensions.UserID
Identifier = UserExtensions.Identifier
Content = UserExtensions.Content
return '[UserExtensions.%s.%s]\n %s\n\n' % (UserId, Identifier, Content)
<mask token>
def GetXmlFileInfo(FileName, TagTuple):
XmlDom = XmlParseFile(FileName)
return tuple([XmlElement(XmlDom, XmlTag) for XmlTag in TagTuple])
def MigrationOptionParser(Source, Destinate, ToolName, VersionNumber=1.0):
UsageString = '%s [-a] [-v|-q] [-o <output_file>] <input_file>' % ToolName
Version = '%s Version %.2f' % (ToolName, VersionNumber)
Copyright = 'Copyright (c) 2007, Intel Corporation. All rights reserved.'
Parser = OptionParser(description=Copyright, version=Version, usage=
UsageString)
Parser.add_option('-o', '--output', dest='OutputFile', help=
'The name of the %s file to be created.' % Destinate)
Parser.add_option('-a', '--auto', dest='AutoWrite', action='store_true',
default=False, help=
'Automatically create the %s file using the name of the %s file and replacing file extension'
% (Source, Destinate))
Parser.add_option('-q', '--quiet', action='store_true', type=None, help
='Disable all messages except FATAL ERRORS.')
Parser.add_option('-v', '--verbose', action='store_true', type=None,
help='Turn on verbose output with informational messages printed.')
Options, Args = Parser.parse_args()
if Options.verbose:
EdkLogger.setLevel(EdkLogger.VERBOSE)
elif Options.quiet:
EdkLogger.setLevel(EdkLogger.QUIET)
else:
EdkLogger.setLevel(EdkLogger.INFO)
if len(Args) == 0:
raise MigrationError(PARAMETER_MISSING, name='Input file', usage=
Parser.get_usage())
if len(Args) > 1:
raise MigrationError(PARAMETER_INVALID, name='Too many input files',
usage=Parser.get_usage())
InputFile = Args[0]
if not os.path.exists(InputFile):
raise MigrationError(FILE_NOT_FOUND, name=InputFile)
if Options.OutputFile:
if Options.AutoWrite:
raise MigrationError(OPTION_CONFLICT, arg1='-o', arg2='-a',
usage=Parser.get_usage())
elif Options.AutoWrite:
Options.OutputFile = os.path.splitext(InputFile)[0
] + '.' + Destinate.lower()
else:
raise MigrationError(OPTION_MISSING, name='-o', usage=Parser.
get_usage())
return Options, InputFile
<mask token>
| <mask token>
def SetCommon(Common, XmlCommon):
XmlTag = 'Usage'
Common.Usage = XmlAttribute(XmlCommon, XmlTag).split()
XmlTag = 'FeatureFlag'
Common.FeatureFlag = XmlAttribute(XmlCommon, XmlTag)
XmlTag = 'SupArchList'
Common.SupArchList = XmlAttribute(XmlCommon, XmlTag).split()
XmlTag = XmlNodeName(XmlCommon) + '/' + 'HelpText'
Common.HelpText = XmlElement(XmlCommon, XmlTag)
def SetIdentification(CommonHeader, XmlCommonHeader, NameTag, FileName):
XmlParentTag = XmlNodeName(XmlCommonHeader)
XmlTag = XmlParentTag + '/' + NameTag
CommonHeader.Name = XmlElement(XmlCommonHeader, XmlTag)
XmlTag = XmlParentTag + '/' + 'GuidValue'
CommonHeader.Guid = XmlElement(XmlCommonHeader, XmlTag)
XmlTag = XmlParentTag + '/' + 'Version'
CommonHeader.Version = XmlElement(XmlCommonHeader, XmlTag)
CommonHeader.FileName = os.path.basename(FileName)
CommonHeader.FullPath = os.path.abspath(FileName)
<mask token>
def AddToSpecificationDict(SpecificationDict, SpecificationString):
"""Abstract specification name, value pair from Specification String"""
for SpecificationMatch in mReSpecification.finditer(SpecificationString):
Specification = SpecificationMatch.group('Specification')
Value = SpecificationMatch.group('Value')
SpecificationDict[Specification] = Value
def SetCommonHeader(CommonHeader, XmlCommonHeader):
"""Set all attributes of CommonHeaderClass object from XmlCommonHeader"""
XmlParent = XmlNodeName(XmlCommonHeader)
XmlTag = XmlParent + '/' + 'Abstract'
CommonHeader.Abstract = XmlElement(XmlCommonHeader, XmlTag)
XmlTag = XmlParent + '/' + 'Description'
CommonHeader.Description = XmlElement(XmlCommonHeader, XmlTag)
XmlTag = XmlParent + '/' + 'Copyright'
CommonHeader.Copyright = XmlElement(XmlCommonHeader, XmlTag)
XmlTag = XmlParent + '/' + 'License'
CommonHeader.License = XmlElement(XmlCommonHeader, XmlTag)
XmlTag = XmlParent + '/' + 'Specification'
Specification = XmlElement(XmlCommonHeader, XmlTag)
AddToSpecificationDict(CommonHeader.Specification, Specification)
XmlTag = XmlParent + '/' + 'ModuleType'
CommonHeader.ModuleType = XmlElement(XmlCommonHeader, XmlTag)
def LoadClonedRecord(XmlCloned):
ClonedRecord = ClonedRecordClass()
XmlTag = 'Id'
ClonedRecord.Id = int(XmlAttribute(XmlCloned, XmlTag))
XmlTag = 'FarGuid'
ClonedRecord.FarGuid = XmlAttribute(XmlCloned, XmlTag)
XmlTag = 'Cloned/PackageGuid'
ClonedRecord.PackageGuid = XmlElement(XmlCloned, XmlTag)
XmlTag = 'Cloned/PackageVersion'
ClonedRecord.PackageVersion = XmlElement(XmlCloned, XmlTag)
XmlTag = 'Cloned/ModuleGuid'
ClonedRecord.ModuleGuid = XmlElement(XmlCloned, XmlTag)
XmlTag = 'Cloned/ModuleVersion'
ClonedRecord.ModuleVersion = XmlElement(XmlCloned, XmlTag)
return ClonedRecord
def LoadGuidProtocolPpiCommon(XmlGuidProtocolPpiCommon):
GuidProtocolPpiCommon = GuidProtocolPpiCommonClass()
XmlTag = 'Name'
GuidProtocolPpiCommon.Name = XmlAttribute(XmlGuidProtocolPpiCommon, XmlTag)
XmlParent = XmlNodeName(XmlGuidProtocolPpiCommon)
if XmlParent == 'Entry':
XmlTag = '%s/C_Name' % XmlParent
elif XmlParent == 'GuidCNames':
XmlTag = '%s/GuidCName' % XmlParent
else:
XmlTag = '%s/%sCName' % (XmlParent, XmlParent)
GuidProtocolPpiCommon.CName = XmlElement(XmlGuidProtocolPpiCommon, XmlTag)
XmlTag = XmlParent + '/' + 'GuidValue'
GuidProtocolPpiCommon.Guid = XmlElement(XmlGuidProtocolPpiCommon, XmlTag)
if XmlParent.endswith('Notify'):
GuidProtocolPpiCommon.Notify = True
XmlTag = 'GuidTypeList'
GuidTypes = XmlAttribute(XmlGuidProtocolPpiCommon, XmlTag)
GuidProtocolPpiCommon.GuidTypeList = GuidTypes.split()
XmlTag = 'SupModuleList'
SupModules = XmlAttribute(XmlGuidProtocolPpiCommon, XmlTag)
GuidProtocolPpiCommon.SupModuleList = SupModules.split()
SetCommon(GuidProtocolPpiCommon, XmlGuidProtocolPpiCommon)
return GuidProtocolPpiCommon
def LoadPcd(XmlPcd):
"""Return a new PcdClass object equivalent to XmlPcd"""
Pcd = PcdClass()
XmlTag = 'PcdEntry/C_Name'
Pcd.CName = XmlElement(XmlPcd, XmlTag)
XmlTag = 'PcdEntry/Token'
Pcd.Token = XmlElement(XmlPcd, XmlTag)
XmlTag = 'PcdEntry/TokenSpaceGuidCName'
Pcd.TokenSpaceGuidCName = XmlElement(XmlPcd, XmlTag)
XmlTag = 'PcdEntry/DatumType'
Pcd.DatumType = XmlElement(XmlPcd, XmlTag)
XmlTag = 'PcdEntry/MaxDatumSize'
Pcd.MaxDatumSize = XmlElement(XmlPcd, XmlTag)
XmlTag = 'PcdEntry/DefaultValue'
Pcd.DefaultValue = XmlElement(XmlPcd, XmlTag)
XmlTag = 'PcdItemType'
Pcd.ItemType = XmlAttribute(XmlPcd, XmlTag)
XmlTag = 'PcdEntry/ValidUsage'
Pcd.ValidUsage = XmlElement(XmlPcd, XmlTag).split()
XmlTag = 'SupModuleList'
Pcd.SupModuleList = XmlAttribute(XmlPcd, XmlTag).split()
SetCommon(Pcd, XmlPcd)
return Pcd
def LoadLibraryClass(XmlLibraryClass):
LibraryClass = LibraryClassClass()
XmlTag = 'LibraryClass/Keyword'
LibraryClass.LibraryClass = XmlElement(XmlLibraryClass, XmlTag)
if LibraryClass.LibraryClass == '':
XmlTag = 'Name'
LibraryClass.LibraryClass = XmlAttribute(XmlLibraryClass, XmlTag)
XmlTag = 'LibraryClass/IncludeHeader'
LibraryClass.IncludeHeader = XmlElement(XmlLibraryClass, XmlTag)
XmlTag = 'RecommendedInstanceVersion'
RecommendedInstanceVersion = XmlAttribute(XmlLibraryClass, XmlTag)
LibraryClass.RecommendedInstanceVersion = RecommendedInstanceVersion
XmlTag = 'RecommendedInstanceGuid'
RecommendedInstanceGuid = XmlAttribute(XmlLibraryClass, XmlTag)
LibraryClass.RecommendedInstanceGuid = RecommendedInstanceGuid
XmlTag = 'SupModuleList'
SupModules = XmlAttribute(XmlLibraryClass, XmlTag)
LibraryClass.SupModuleList = SupModules.split()
SetCommon(LibraryClass, XmlLibraryClass)
return LibraryClass
def LoadBuildOption(XmlBuildOption):
"""Return a new BuildOptionClass object equivalent to XmlBuildOption"""
BuildOption = BuildOptionClass()
BuildOption.Option = XmlElementData(XmlBuildOption)
XmlTag = 'BuildTargets'
BuildOption.BuildTargetList = XmlAttribute(XmlBuildOption, XmlTag).split()
XmlTag = 'ToolChainFamily'
BuildOption.ToolChainFamily = XmlAttribute(XmlBuildOption, XmlTag)
XmlTag = 'TagName'
BuildOption.TagName = XmlAttribute(XmlBuildOption, XmlTag)
XmlTag = 'ToolCode'
BuildOption.ToolCode = XmlAttribute(XmlBuildOption, XmlTag)
XmlTag = 'SupArchList'
BuildOption.SupArchList = XmlAttribute(XmlBuildOption, XmlTag).split()
return BuildOption
def LoadUserExtensions(XmlUserExtensions):
UserExtensions = UserExtensionsClass()
XmlTag = 'UserID'
UserExtensions.UserID = XmlAttribute(XmlUserExtensions, XmlTag)
XmlTag = 'Identifier'
UserExtensions.Identifier = XmlAttribute(XmlUserExtensions, XmlTag)
UserExtensions.Content = XmlElementData(XmlUserExtensions)
return UserExtensions
def StoreTextFile(TextFile, Content):
EdkLogger.verbose(Content)
TextFile.write(Content)
def AddToSection(Section, Arch, Item):
SectionArch = Section.get(Arch, [])
if Item not in SectionArch:
SectionArch.append(Item)
Section[Arch] = SectionArch
<mask token>
def StoreHeader(TextFile, CommonHeader):
CopyRight = CommonHeader.Copyright
Abstract = CommonHeader.Abstract
Description = CommonHeader.Description
License = CommonHeader.License
Header = '#/** @file\n#\n'
Header += '# ' + Abstract + '\n#\n'
Header += '# ' + Description.strip().replace('\n', '\n# ') + '\n'
Header += '# ' + CopyRight + '\n#\n'
Header += '# ' + License.replace('\n', '\n# ').replace(' ', ' ')
Header += '\n#\n#**/\n\n'
StoreTextFile(TextFile, Header)
def StoreDefinesSection(TextFile, DefinesTupleList):
Section = '[Defines]\n'
for DefineItem in DefinesTupleList:
Section += ' %-30s = %s\n' % DefineItem
Section += '\n\n'
StoreTextFile(TextFile, Section)
def GetUserExtensions(UserExtensions):
UserId = UserExtensions.UserID
Identifier = UserExtensions.Identifier
Content = UserExtensions.Content
return '[UserExtensions.%s.%s]\n %s\n\n' % (UserId, Identifier, Content)
<mask token>
def GetTextFileInfo(FileName, TagTuple):
ValueTuple = [''] * len(TagTuple)
try:
for Line in open(FileName):
Line = Line.split('#', 1)[0]
MatchEquation = mReEquation.match(Line)
if MatchEquation:
Tag = MatchEquation.group(1).upper()
Value = MatchEquation.group(2)
for Index in range(len(TagTuple)):
if TagTuple[Index] == Tag:
ValueTuple[Index] = Value
except:
EdkLogger.info('IO Error in reading file %s' % FileName)
return ValueTuple
def GetXmlFileInfo(FileName, TagTuple):
XmlDom = XmlParseFile(FileName)
return tuple([XmlElement(XmlDom, XmlTag) for XmlTag in TagTuple])
def MigrationOptionParser(Source, Destinate, ToolName, VersionNumber=1.0):
UsageString = '%s [-a] [-v|-q] [-o <output_file>] <input_file>' % ToolName
Version = '%s Version %.2f' % (ToolName, VersionNumber)
Copyright = 'Copyright (c) 2007, Intel Corporation. All rights reserved.'
Parser = OptionParser(description=Copyright, version=Version, usage=
UsageString)
Parser.add_option('-o', '--output', dest='OutputFile', help=
'The name of the %s file to be created.' % Destinate)
Parser.add_option('-a', '--auto', dest='AutoWrite', action='store_true',
default=False, help=
'Automatically create the %s file using the name of the %s file and replacing file extension'
% (Source, Destinate))
Parser.add_option('-q', '--quiet', action='store_true', type=None, help
='Disable all messages except FATAL ERRORS.')
Parser.add_option('-v', '--verbose', action='store_true', type=None,
help='Turn on verbose output with informational messages printed.')
Options, Args = Parser.parse_args()
if Options.verbose:
EdkLogger.setLevel(EdkLogger.VERBOSE)
elif Options.quiet:
EdkLogger.setLevel(EdkLogger.QUIET)
else:
EdkLogger.setLevel(EdkLogger.INFO)
if len(Args) == 0:
raise MigrationError(PARAMETER_MISSING, name='Input file', usage=
Parser.get_usage())
if len(Args) > 1:
raise MigrationError(PARAMETER_INVALID, name='Too many input files',
usage=Parser.get_usage())
InputFile = Args[0]
if not os.path.exists(InputFile):
raise MigrationError(FILE_NOT_FOUND, name=InputFile)
if Options.OutputFile:
if Options.AutoWrite:
raise MigrationError(OPTION_CONFLICT, arg1='-o', arg2='-a',
usage=Parser.get_usage())
elif Options.AutoWrite:
Options.OutputFile = os.path.splitext(InputFile)[0
] + '.' + Destinate.lower()
else:
raise MigrationError(OPTION_MISSING, name='-o', usage=Parser.
get_usage())
return Options, InputFile
<mask token>
| <mask token>
def SetCommon(Common, XmlCommon):
XmlTag = 'Usage'
Common.Usage = XmlAttribute(XmlCommon, XmlTag).split()
XmlTag = 'FeatureFlag'
Common.FeatureFlag = XmlAttribute(XmlCommon, XmlTag)
XmlTag = 'SupArchList'
Common.SupArchList = XmlAttribute(XmlCommon, XmlTag).split()
XmlTag = XmlNodeName(XmlCommon) + '/' + 'HelpText'
Common.HelpText = XmlElement(XmlCommon, XmlTag)
def SetIdentification(CommonHeader, XmlCommonHeader, NameTag, FileName):
XmlParentTag = XmlNodeName(XmlCommonHeader)
XmlTag = XmlParentTag + '/' + NameTag
CommonHeader.Name = XmlElement(XmlCommonHeader, XmlTag)
XmlTag = XmlParentTag + '/' + 'GuidValue'
CommonHeader.Guid = XmlElement(XmlCommonHeader, XmlTag)
XmlTag = XmlParentTag + '/' + 'Version'
CommonHeader.Version = XmlElement(XmlCommonHeader, XmlTag)
CommonHeader.FileName = os.path.basename(FileName)
CommonHeader.FullPath = os.path.abspath(FileName)
<mask token>
def AddToSpecificationDict(SpecificationDict, SpecificationString):
"""Abstract specification name, value pair from Specification String"""
for SpecificationMatch in mReSpecification.finditer(SpecificationString):
Specification = SpecificationMatch.group('Specification')
Value = SpecificationMatch.group('Value')
SpecificationDict[Specification] = Value
def SetCommonHeader(CommonHeader, XmlCommonHeader):
"""Set all attributes of CommonHeaderClass object from XmlCommonHeader"""
XmlParent = XmlNodeName(XmlCommonHeader)
XmlTag = XmlParent + '/' + 'Abstract'
CommonHeader.Abstract = XmlElement(XmlCommonHeader, XmlTag)
XmlTag = XmlParent + '/' + 'Description'
CommonHeader.Description = XmlElement(XmlCommonHeader, XmlTag)
XmlTag = XmlParent + '/' + 'Copyright'
CommonHeader.Copyright = XmlElement(XmlCommonHeader, XmlTag)
XmlTag = XmlParent + '/' + 'License'
CommonHeader.License = XmlElement(XmlCommonHeader, XmlTag)
XmlTag = XmlParent + '/' + 'Specification'
Specification = XmlElement(XmlCommonHeader, XmlTag)
AddToSpecificationDict(CommonHeader.Specification, Specification)
XmlTag = XmlParent + '/' + 'ModuleType'
CommonHeader.ModuleType = XmlElement(XmlCommonHeader, XmlTag)
def LoadClonedRecord(XmlCloned):
ClonedRecord = ClonedRecordClass()
XmlTag = 'Id'
ClonedRecord.Id = int(XmlAttribute(XmlCloned, XmlTag))
XmlTag = 'FarGuid'
ClonedRecord.FarGuid = XmlAttribute(XmlCloned, XmlTag)
XmlTag = 'Cloned/PackageGuid'
ClonedRecord.PackageGuid = XmlElement(XmlCloned, XmlTag)
XmlTag = 'Cloned/PackageVersion'
ClonedRecord.PackageVersion = XmlElement(XmlCloned, XmlTag)
XmlTag = 'Cloned/ModuleGuid'
ClonedRecord.ModuleGuid = XmlElement(XmlCloned, XmlTag)
XmlTag = 'Cloned/ModuleVersion'
ClonedRecord.ModuleVersion = XmlElement(XmlCloned, XmlTag)
return ClonedRecord
def LoadGuidProtocolPpiCommon(XmlGuidProtocolPpiCommon):
GuidProtocolPpiCommon = GuidProtocolPpiCommonClass()
XmlTag = 'Name'
GuidProtocolPpiCommon.Name = XmlAttribute(XmlGuidProtocolPpiCommon, XmlTag)
XmlParent = XmlNodeName(XmlGuidProtocolPpiCommon)
if XmlParent == 'Entry':
XmlTag = '%s/C_Name' % XmlParent
elif XmlParent == 'GuidCNames':
XmlTag = '%s/GuidCName' % XmlParent
else:
XmlTag = '%s/%sCName' % (XmlParent, XmlParent)
GuidProtocolPpiCommon.CName = XmlElement(XmlGuidProtocolPpiCommon, XmlTag)
XmlTag = XmlParent + '/' + 'GuidValue'
GuidProtocolPpiCommon.Guid = XmlElement(XmlGuidProtocolPpiCommon, XmlTag)
if XmlParent.endswith('Notify'):
GuidProtocolPpiCommon.Notify = True
XmlTag = 'GuidTypeList'
GuidTypes = XmlAttribute(XmlGuidProtocolPpiCommon, XmlTag)
GuidProtocolPpiCommon.GuidTypeList = GuidTypes.split()
XmlTag = 'SupModuleList'
SupModules = XmlAttribute(XmlGuidProtocolPpiCommon, XmlTag)
GuidProtocolPpiCommon.SupModuleList = SupModules.split()
SetCommon(GuidProtocolPpiCommon, XmlGuidProtocolPpiCommon)
return GuidProtocolPpiCommon
def LoadPcd(XmlPcd):
"""Return a new PcdClass object equivalent to XmlPcd"""
Pcd = PcdClass()
XmlTag = 'PcdEntry/C_Name'
Pcd.CName = XmlElement(XmlPcd, XmlTag)
XmlTag = 'PcdEntry/Token'
Pcd.Token = XmlElement(XmlPcd, XmlTag)
XmlTag = 'PcdEntry/TokenSpaceGuidCName'
Pcd.TokenSpaceGuidCName = XmlElement(XmlPcd, XmlTag)
XmlTag = 'PcdEntry/DatumType'
Pcd.DatumType = XmlElement(XmlPcd, XmlTag)
XmlTag = 'PcdEntry/MaxDatumSize'
Pcd.MaxDatumSize = XmlElement(XmlPcd, XmlTag)
XmlTag = 'PcdEntry/DefaultValue'
Pcd.DefaultValue = XmlElement(XmlPcd, XmlTag)
XmlTag = 'PcdItemType'
Pcd.ItemType = XmlAttribute(XmlPcd, XmlTag)
XmlTag = 'PcdEntry/ValidUsage'
Pcd.ValidUsage = XmlElement(XmlPcd, XmlTag).split()
XmlTag = 'SupModuleList'
Pcd.SupModuleList = XmlAttribute(XmlPcd, XmlTag).split()
SetCommon(Pcd, XmlPcd)
return Pcd
def LoadLibraryClass(XmlLibraryClass):
LibraryClass = LibraryClassClass()
XmlTag = 'LibraryClass/Keyword'
LibraryClass.LibraryClass = XmlElement(XmlLibraryClass, XmlTag)
if LibraryClass.LibraryClass == '':
XmlTag = 'Name'
LibraryClass.LibraryClass = XmlAttribute(XmlLibraryClass, XmlTag)
XmlTag = 'LibraryClass/IncludeHeader'
LibraryClass.IncludeHeader = XmlElement(XmlLibraryClass, XmlTag)
XmlTag = 'RecommendedInstanceVersion'
RecommendedInstanceVersion = XmlAttribute(XmlLibraryClass, XmlTag)
LibraryClass.RecommendedInstanceVersion = RecommendedInstanceVersion
XmlTag = 'RecommendedInstanceGuid'
RecommendedInstanceGuid = XmlAttribute(XmlLibraryClass, XmlTag)
LibraryClass.RecommendedInstanceGuid = RecommendedInstanceGuid
XmlTag = 'SupModuleList'
SupModules = XmlAttribute(XmlLibraryClass, XmlTag)
LibraryClass.SupModuleList = SupModules.split()
SetCommon(LibraryClass, XmlLibraryClass)
return LibraryClass
def LoadBuildOption(XmlBuildOption):
"""Return a new BuildOptionClass object equivalent to XmlBuildOption"""
BuildOption = BuildOptionClass()
BuildOption.Option = XmlElementData(XmlBuildOption)
XmlTag = 'BuildTargets'
BuildOption.BuildTargetList = XmlAttribute(XmlBuildOption, XmlTag).split()
XmlTag = 'ToolChainFamily'
BuildOption.ToolChainFamily = XmlAttribute(XmlBuildOption, XmlTag)
XmlTag = 'TagName'
BuildOption.TagName = XmlAttribute(XmlBuildOption, XmlTag)
XmlTag = 'ToolCode'
BuildOption.ToolCode = XmlAttribute(XmlBuildOption, XmlTag)
XmlTag = 'SupArchList'
BuildOption.SupArchList = XmlAttribute(XmlBuildOption, XmlTag).split()
return BuildOption
def LoadUserExtensions(XmlUserExtensions):
UserExtensions = UserExtensionsClass()
XmlTag = 'UserID'
UserExtensions.UserID = XmlAttribute(XmlUserExtensions, XmlTag)
XmlTag = 'Identifier'
UserExtensions.Identifier = XmlAttribute(XmlUserExtensions, XmlTag)
UserExtensions.Content = XmlElementData(XmlUserExtensions)
return UserExtensions
def StoreTextFile(TextFile, Content):
EdkLogger.verbose(Content)
TextFile.write(Content)
def AddToSection(Section, Arch, Item):
SectionArch = Section.get(Arch, [])
if Item not in SectionArch:
SectionArch.append(Item)
Section[Arch] = SectionArch
def GetSection(SectionName, Method, ObjectList):
SupportedArches = ['common', 'Ia32', 'X64', 'Ipf', 'Ebc', 'ARM', 'AARCH64']
SectionDict = {}
for Object in ObjectList:
Item = Method(Object)
if Item == '':
continue
Item = ' %s' % Item
Arches = Object.SupArchList
if len(Arches) == 0:
AddToSection(SectionDict, 'common', Item)
else:
for Arch in SupportedArches:
if Arch.upper() in Arches:
AddToSection(SectionDict, Arch, Item)
Section = ''
for Arch in SupportedArches:
SectionArch = '\n'.join(SectionDict.get(Arch, []))
if SectionArch != '':
Section += '[%s.%s]\n%s\n' % (SectionName, Arch, SectionArch)
Section += '\n'
if Section != '':
Section += '\n'
return Section
def StoreHeader(TextFile, CommonHeader):
CopyRight = CommonHeader.Copyright
Abstract = CommonHeader.Abstract
Description = CommonHeader.Description
License = CommonHeader.License
Header = '#/** @file\n#\n'
Header += '# ' + Abstract + '\n#\n'
Header += '# ' + Description.strip().replace('\n', '\n# ') + '\n'
Header += '# ' + CopyRight + '\n#\n'
Header += '# ' + License.replace('\n', '\n# ').replace(' ', ' ')
Header += '\n#\n#**/\n\n'
StoreTextFile(TextFile, Header)
def StoreDefinesSection(TextFile, DefinesTupleList):
Section = '[Defines]\n'
for DefineItem in DefinesTupleList:
Section += ' %-30s = %s\n' % DefineItem
Section += '\n\n'
StoreTextFile(TextFile, Section)
def GetUserExtensions(UserExtensions):
UserId = UserExtensions.UserID
Identifier = UserExtensions.Identifier
Content = UserExtensions.Content
return '[UserExtensions.%s.%s]\n %s\n\n' % (UserId, Identifier, Content)
<mask token>
def GetTextFileInfo(FileName, TagTuple):
ValueTuple = [''] * len(TagTuple)
try:
for Line in open(FileName):
Line = Line.split('#', 1)[0]
MatchEquation = mReEquation.match(Line)
if MatchEquation:
Tag = MatchEquation.group(1).upper()
Value = MatchEquation.group(2)
for Index in range(len(TagTuple)):
if TagTuple[Index] == Tag:
ValueTuple[Index] = Value
except:
EdkLogger.info('IO Error in reading file %s' % FileName)
return ValueTuple
def GetXmlFileInfo(FileName, TagTuple):
XmlDom = XmlParseFile(FileName)
return tuple([XmlElement(XmlDom, XmlTag) for XmlTag in TagTuple])
def MigrationOptionParser(Source, Destinate, ToolName, VersionNumber=1.0):
UsageString = '%s [-a] [-v|-q] [-o <output_file>] <input_file>' % ToolName
Version = '%s Version %.2f' % (ToolName, VersionNumber)
Copyright = 'Copyright (c) 2007, Intel Corporation. All rights reserved.'
Parser = OptionParser(description=Copyright, version=Version, usage=
UsageString)
Parser.add_option('-o', '--output', dest='OutputFile', help=
'The name of the %s file to be created.' % Destinate)
Parser.add_option('-a', '--auto', dest='AutoWrite', action='store_true',
default=False, help=
'Automatically create the %s file using the name of the %s file and replacing file extension'
% (Source, Destinate))
Parser.add_option('-q', '--quiet', action='store_true', type=None, help
='Disable all messages except FATAL ERRORS.')
Parser.add_option('-v', '--verbose', action='store_true', type=None,
help='Turn on verbose output with informational messages printed.')
Options, Args = Parser.parse_args()
if Options.verbose:
EdkLogger.setLevel(EdkLogger.VERBOSE)
elif Options.quiet:
EdkLogger.setLevel(EdkLogger.QUIET)
else:
EdkLogger.setLevel(EdkLogger.INFO)
if len(Args) == 0:
raise MigrationError(PARAMETER_MISSING, name='Input file', usage=
Parser.get_usage())
if len(Args) > 1:
raise MigrationError(PARAMETER_INVALID, name='Too many input files',
usage=Parser.get_usage())
InputFile = Args[0]
if not os.path.exists(InputFile):
raise MigrationError(FILE_NOT_FOUND, name=InputFile)
if Options.OutputFile:
if Options.AutoWrite:
raise MigrationError(OPTION_CONFLICT, arg1='-o', arg2='-a',
usage=Parser.get_usage())
elif Options.AutoWrite:
Options.OutputFile = os.path.splitext(InputFile)[0
] + '.' + Destinate.lower()
else:
raise MigrationError(OPTION_MISSING, name='-o', usage=Parser.
get_usage())
return Options, InputFile
if __name__ == '__main__':
pass
| <mask token>
def SetCommon(Common, XmlCommon):
XmlTag = 'Usage'
Common.Usage = XmlAttribute(XmlCommon, XmlTag).split()
XmlTag = 'FeatureFlag'
Common.FeatureFlag = XmlAttribute(XmlCommon, XmlTag)
XmlTag = 'SupArchList'
Common.SupArchList = XmlAttribute(XmlCommon, XmlTag).split()
XmlTag = XmlNodeName(XmlCommon) + '/' + 'HelpText'
Common.HelpText = XmlElement(XmlCommon, XmlTag)
def SetIdentification(CommonHeader, XmlCommonHeader, NameTag, FileName):
XmlParentTag = XmlNodeName(XmlCommonHeader)
XmlTag = XmlParentTag + '/' + NameTag
CommonHeader.Name = XmlElement(XmlCommonHeader, XmlTag)
XmlTag = XmlParentTag + '/' + 'GuidValue'
CommonHeader.Guid = XmlElement(XmlCommonHeader, XmlTag)
XmlTag = XmlParentTag + '/' + 'Version'
CommonHeader.Version = XmlElement(XmlCommonHeader, XmlTag)
CommonHeader.FileName = os.path.basename(FileName)
CommonHeader.FullPath = os.path.abspath(FileName)
mReSpecification = re.compile('(?P<Specification>\\w+)\\s+(?P<Value>\\w*)')
def AddToSpecificationDict(SpecificationDict, SpecificationString):
"""Abstract specification name, value pair from Specification String"""
for SpecificationMatch in mReSpecification.finditer(SpecificationString):
Specification = SpecificationMatch.group('Specification')
Value = SpecificationMatch.group('Value')
SpecificationDict[Specification] = Value
def SetCommonHeader(CommonHeader, XmlCommonHeader):
"""Set all attributes of CommonHeaderClass object from XmlCommonHeader"""
XmlParent = XmlNodeName(XmlCommonHeader)
XmlTag = XmlParent + '/' + 'Abstract'
CommonHeader.Abstract = XmlElement(XmlCommonHeader, XmlTag)
XmlTag = XmlParent + '/' + 'Description'
CommonHeader.Description = XmlElement(XmlCommonHeader, XmlTag)
XmlTag = XmlParent + '/' + 'Copyright'
CommonHeader.Copyright = XmlElement(XmlCommonHeader, XmlTag)
XmlTag = XmlParent + '/' + 'License'
CommonHeader.License = XmlElement(XmlCommonHeader, XmlTag)
XmlTag = XmlParent + '/' + 'Specification'
Specification = XmlElement(XmlCommonHeader, XmlTag)
AddToSpecificationDict(CommonHeader.Specification, Specification)
XmlTag = XmlParent + '/' + 'ModuleType'
CommonHeader.ModuleType = XmlElement(XmlCommonHeader, XmlTag)
def LoadClonedRecord(XmlCloned):
ClonedRecord = ClonedRecordClass()
XmlTag = 'Id'
ClonedRecord.Id = int(XmlAttribute(XmlCloned, XmlTag))
XmlTag = 'FarGuid'
ClonedRecord.FarGuid = XmlAttribute(XmlCloned, XmlTag)
XmlTag = 'Cloned/PackageGuid'
ClonedRecord.PackageGuid = XmlElement(XmlCloned, XmlTag)
XmlTag = 'Cloned/PackageVersion'
ClonedRecord.PackageVersion = XmlElement(XmlCloned, XmlTag)
XmlTag = 'Cloned/ModuleGuid'
ClonedRecord.ModuleGuid = XmlElement(XmlCloned, XmlTag)
XmlTag = 'Cloned/ModuleVersion'
ClonedRecord.ModuleVersion = XmlElement(XmlCloned, XmlTag)
return ClonedRecord
def LoadGuidProtocolPpiCommon(XmlGuidProtocolPpiCommon):
GuidProtocolPpiCommon = GuidProtocolPpiCommonClass()
XmlTag = 'Name'
GuidProtocolPpiCommon.Name = XmlAttribute(XmlGuidProtocolPpiCommon, XmlTag)
XmlParent = XmlNodeName(XmlGuidProtocolPpiCommon)
if XmlParent == 'Entry':
XmlTag = '%s/C_Name' % XmlParent
elif XmlParent == 'GuidCNames':
XmlTag = '%s/GuidCName' % XmlParent
else:
XmlTag = '%s/%sCName' % (XmlParent, XmlParent)
GuidProtocolPpiCommon.CName = XmlElement(XmlGuidProtocolPpiCommon, XmlTag)
XmlTag = XmlParent + '/' + 'GuidValue'
GuidProtocolPpiCommon.Guid = XmlElement(XmlGuidProtocolPpiCommon, XmlTag)
if XmlParent.endswith('Notify'):
GuidProtocolPpiCommon.Notify = True
XmlTag = 'GuidTypeList'
GuidTypes = XmlAttribute(XmlGuidProtocolPpiCommon, XmlTag)
GuidProtocolPpiCommon.GuidTypeList = GuidTypes.split()
XmlTag = 'SupModuleList'
SupModules = XmlAttribute(XmlGuidProtocolPpiCommon, XmlTag)
GuidProtocolPpiCommon.SupModuleList = SupModules.split()
SetCommon(GuidProtocolPpiCommon, XmlGuidProtocolPpiCommon)
return GuidProtocolPpiCommon
def LoadPcd(XmlPcd):
"""Return a new PcdClass object equivalent to XmlPcd"""
Pcd = PcdClass()
XmlTag = 'PcdEntry/C_Name'
Pcd.CName = XmlElement(XmlPcd, XmlTag)
XmlTag = 'PcdEntry/Token'
Pcd.Token = XmlElement(XmlPcd, XmlTag)
XmlTag = 'PcdEntry/TokenSpaceGuidCName'
Pcd.TokenSpaceGuidCName = XmlElement(XmlPcd, XmlTag)
XmlTag = 'PcdEntry/DatumType'
Pcd.DatumType = XmlElement(XmlPcd, XmlTag)
XmlTag = 'PcdEntry/MaxDatumSize'
Pcd.MaxDatumSize = XmlElement(XmlPcd, XmlTag)
XmlTag = 'PcdEntry/DefaultValue'
Pcd.DefaultValue = XmlElement(XmlPcd, XmlTag)
XmlTag = 'PcdItemType'
Pcd.ItemType = XmlAttribute(XmlPcd, XmlTag)
XmlTag = 'PcdEntry/ValidUsage'
Pcd.ValidUsage = XmlElement(XmlPcd, XmlTag).split()
XmlTag = 'SupModuleList'
Pcd.SupModuleList = XmlAttribute(XmlPcd, XmlTag).split()
SetCommon(Pcd, XmlPcd)
return Pcd
def LoadLibraryClass(XmlLibraryClass):
LibraryClass = LibraryClassClass()
XmlTag = 'LibraryClass/Keyword'
LibraryClass.LibraryClass = XmlElement(XmlLibraryClass, XmlTag)
if LibraryClass.LibraryClass == '':
XmlTag = 'Name'
LibraryClass.LibraryClass = XmlAttribute(XmlLibraryClass, XmlTag)
XmlTag = 'LibraryClass/IncludeHeader'
LibraryClass.IncludeHeader = XmlElement(XmlLibraryClass, XmlTag)
XmlTag = 'RecommendedInstanceVersion'
RecommendedInstanceVersion = XmlAttribute(XmlLibraryClass, XmlTag)
LibraryClass.RecommendedInstanceVersion = RecommendedInstanceVersion
XmlTag = 'RecommendedInstanceGuid'
RecommendedInstanceGuid = XmlAttribute(XmlLibraryClass, XmlTag)
LibraryClass.RecommendedInstanceGuid = RecommendedInstanceGuid
XmlTag = 'SupModuleList'
SupModules = XmlAttribute(XmlLibraryClass, XmlTag)
LibraryClass.SupModuleList = SupModules.split()
SetCommon(LibraryClass, XmlLibraryClass)
return LibraryClass
def LoadBuildOption(XmlBuildOption):
"""Return a new BuildOptionClass object equivalent to XmlBuildOption"""
BuildOption = BuildOptionClass()
BuildOption.Option = XmlElementData(XmlBuildOption)
XmlTag = 'BuildTargets'
BuildOption.BuildTargetList = XmlAttribute(XmlBuildOption, XmlTag).split()
XmlTag = 'ToolChainFamily'
BuildOption.ToolChainFamily = XmlAttribute(XmlBuildOption, XmlTag)
XmlTag = 'TagName'
BuildOption.TagName = XmlAttribute(XmlBuildOption, XmlTag)
XmlTag = 'ToolCode'
BuildOption.ToolCode = XmlAttribute(XmlBuildOption, XmlTag)
XmlTag = 'SupArchList'
BuildOption.SupArchList = XmlAttribute(XmlBuildOption, XmlTag).split()
return BuildOption
def LoadUserExtensions(XmlUserExtensions):
UserExtensions = UserExtensionsClass()
XmlTag = 'UserID'
UserExtensions.UserID = XmlAttribute(XmlUserExtensions, XmlTag)
XmlTag = 'Identifier'
UserExtensions.Identifier = XmlAttribute(XmlUserExtensions, XmlTag)
UserExtensions.Content = XmlElementData(XmlUserExtensions)
return UserExtensions
def StoreTextFile(TextFile, Content):
EdkLogger.verbose(Content)
TextFile.write(Content)
def AddToSection(Section, Arch, Item):
SectionArch = Section.get(Arch, [])
if Item not in SectionArch:
SectionArch.append(Item)
Section[Arch] = SectionArch
def GetSection(SectionName, Method, ObjectList):
SupportedArches = ['common', 'Ia32', 'X64', 'Ipf', 'Ebc', 'ARM', 'AARCH64']
SectionDict = {}
for Object in ObjectList:
Item = Method(Object)
if Item == '':
continue
Item = ' %s' % Item
Arches = Object.SupArchList
if len(Arches) == 0:
AddToSection(SectionDict, 'common', Item)
else:
for Arch in SupportedArches:
if Arch.upper() in Arches:
AddToSection(SectionDict, Arch, Item)
Section = ''
for Arch in SupportedArches:
SectionArch = '\n'.join(SectionDict.get(Arch, []))
if SectionArch != '':
Section += '[%s.%s]\n%s\n' % (SectionName, Arch, SectionArch)
Section += '\n'
if Section != '':
Section += '\n'
return Section
def StoreHeader(TextFile, CommonHeader):
CopyRight = CommonHeader.Copyright
Abstract = CommonHeader.Abstract
Description = CommonHeader.Description
License = CommonHeader.License
Header = '#/** @file\n#\n'
Header += '# ' + Abstract + '\n#\n'
Header += '# ' + Description.strip().replace('\n', '\n# ') + '\n'
Header += '# ' + CopyRight + '\n#\n'
Header += '# ' + License.replace('\n', '\n# ').replace(' ', ' ')
Header += '\n#\n#**/\n\n'
StoreTextFile(TextFile, Header)
def StoreDefinesSection(TextFile, DefinesTupleList):
Section = '[Defines]\n'
for DefineItem in DefinesTupleList:
Section += ' %-30s = %s\n' % DefineItem
Section += '\n\n'
StoreTextFile(TextFile, Section)
def GetUserExtensions(UserExtensions):
UserId = UserExtensions.UserID
Identifier = UserExtensions.Identifier
Content = UserExtensions.Content
return '[UserExtensions.%s.%s]\n %s\n\n' % (UserId, Identifier, Content)
mReEquation = re.compile('\\s*(\\S+)\\s*=\\s*(\\S*)\\s*')
def GetTextFileInfo(FileName, TagTuple):
ValueTuple = [''] * len(TagTuple)
try:
for Line in open(FileName):
Line = Line.split('#', 1)[0]
MatchEquation = mReEquation.match(Line)
if MatchEquation:
Tag = MatchEquation.group(1).upper()
Value = MatchEquation.group(2)
for Index in range(len(TagTuple)):
if TagTuple[Index] == Tag:
ValueTuple[Index] = Value
except:
EdkLogger.info('IO Error in reading file %s' % FileName)
return ValueTuple
def GetXmlFileInfo(FileName, TagTuple):
XmlDom = XmlParseFile(FileName)
return tuple([XmlElement(XmlDom, XmlTag) for XmlTag in TagTuple])
def MigrationOptionParser(Source, Destinate, ToolName, VersionNumber=1.0):
UsageString = '%s [-a] [-v|-q] [-o <output_file>] <input_file>' % ToolName
Version = '%s Version %.2f' % (ToolName, VersionNumber)
Copyright = 'Copyright (c) 2007, Intel Corporation. All rights reserved.'
Parser = OptionParser(description=Copyright, version=Version, usage=
UsageString)
Parser.add_option('-o', '--output', dest='OutputFile', help=
'The name of the %s file to be created.' % Destinate)
Parser.add_option('-a', '--auto', dest='AutoWrite', action='store_true',
default=False, help=
'Automatically create the %s file using the name of the %s file and replacing file extension'
% (Source, Destinate))
Parser.add_option('-q', '--quiet', action='store_true', type=None, help
='Disable all messages except FATAL ERRORS.')
Parser.add_option('-v', '--verbose', action='store_true', type=None,
help='Turn on verbose output with informational messages printed.')
Options, Args = Parser.parse_args()
if Options.verbose:
EdkLogger.setLevel(EdkLogger.VERBOSE)
elif Options.quiet:
EdkLogger.setLevel(EdkLogger.QUIET)
else:
EdkLogger.setLevel(EdkLogger.INFO)
if len(Args) == 0:
raise MigrationError(PARAMETER_MISSING, name='Input file', usage=
Parser.get_usage())
if len(Args) > 1:
raise MigrationError(PARAMETER_INVALID, name='Too many input files',
usage=Parser.get_usage())
InputFile = Args[0]
if not os.path.exists(InputFile):
raise MigrationError(FILE_NOT_FOUND, name=InputFile)
if Options.OutputFile:
if Options.AutoWrite:
raise MigrationError(OPTION_CONFLICT, arg1='-o', arg2='-a',
usage=Parser.get_usage())
elif Options.AutoWrite:
Options.OutputFile = os.path.splitext(InputFile)[0
] + '.' + Destinate.lower()
else:
raise MigrationError(OPTION_MISSING, name='-o', usage=Parser.
get_usage())
return Options, InputFile
if __name__ == '__main__':
pass
| ## @file
# Contains several utilitities shared by migration tools.
#
# Copyright (c) 2007 - 2014, Intel Corporation. All rights reserved.<BR>
# This program and the accompanying materials
# are licensed and made available under the terms and conditions of the BSD License
# which accompanies this distribution. The full text of the license may be found at
# http://opensource.org/licenses/bsd-license.php
#
# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
# WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
#
##
# Import Modules
#
import Common.LongFilePathOs as os
import re
import EdkLogger
from optparse import OptionParser
from Common.BuildToolError import *
from XmlRoutines import *
from CommonDataClass.CommonClass import *
from Common.LongFilePathSupport import OpenLongFilePath as open
## Set all fields of CommonClass object.
#
# Set all attributes of CommonClass object from XML Dom object of XmlCommon.
#
# @param Common The destine CommonClass object.
# @param XmlCommon The source XML Dom object.
#
def SetCommon(Common, XmlCommon):
XmlTag = "Usage"
Common.Usage = XmlAttribute(XmlCommon, XmlTag).split()
XmlTag = "FeatureFlag"
Common.FeatureFlag = XmlAttribute(XmlCommon, XmlTag)
XmlTag = "SupArchList"
Common.SupArchList = XmlAttribute(XmlCommon, XmlTag).split()
XmlTag = XmlNodeName(XmlCommon) + "/" + "HelpText"
Common.HelpText = XmlElement(XmlCommon, XmlTag)
## Set some fields of CommonHeaderClass object.
#
# Set Name, Guid, FileName and FullPath fields of CommonHeaderClass object from
# XML Dom object of XmlCommonHeader, NameTag and FileName.
#
# @param CommonHeader The destine CommonClass object.
# @param XmlCommonHeader The source XML Dom object.
# @param NameTag The name tag in XML Dom object.
# @param FileName The file name of the XML file.
#
def SetIdentification(CommonHeader, XmlCommonHeader, NameTag, FileName):
XmlParentTag = XmlNodeName(XmlCommonHeader)
XmlTag = XmlParentTag + "/" + NameTag
CommonHeader.Name = XmlElement(XmlCommonHeader, XmlTag)
XmlTag = XmlParentTag + "/" + "GuidValue"
CommonHeader.Guid = XmlElement(XmlCommonHeader, XmlTag)
XmlTag = XmlParentTag + "/" + "Version"
CommonHeader.Version = XmlElement(XmlCommonHeader, XmlTag)
CommonHeader.FileName = os.path.basename(FileName)
CommonHeader.FullPath = os.path.abspath(FileName)
## Regular expression to match specification and value.
mReSpecification = re.compile(r"(?P<Specification>\w+)\s+(?P<Value>\w*)")
## Add specification to specification dictionary.
#
# Abstract specification name, value pair from Specification String and add them
# to specification dictionary.
#
# @param SpecificationDict The destine Specification dictionary.
# @param SpecificationString The source Specification String from which the
# specification name and value pair is abstracted.
#
def AddToSpecificationDict(SpecificationDict, SpecificationString):
"""Abstract specification name, value pair from Specification String"""
for SpecificationMatch in mReSpecification.finditer(SpecificationString):
Specification = SpecificationMatch.group("Specification")
Value = SpecificationMatch.group("Value")
SpecificationDict[Specification] = Value
## Set all fields of CommonHeaderClass object.
#
# Set all attributes of CommonHeaderClass object from XML Dom object of
# XmlCommonHeader, NameTag and FileName.
#
# @param CommonHeader The destine CommonClass object.
# @param XmlCommonHeader The source XML Dom object.
# @param NameTag The name tag in XML Dom object.
# @param FileName The file name of the XML file.
#
def SetCommonHeader(CommonHeader, XmlCommonHeader):
"""Set all attributes of CommonHeaderClass object from XmlCommonHeader"""
XmlParent = XmlNodeName(XmlCommonHeader)
XmlTag = XmlParent + "/" + "Abstract"
CommonHeader.Abstract = XmlElement(XmlCommonHeader, XmlTag)
XmlTag = XmlParent + "/" + "Description"
CommonHeader.Description = XmlElement(XmlCommonHeader, XmlTag)
XmlTag = XmlParent + "/" + "Copyright"
CommonHeader.Copyright = XmlElement(XmlCommonHeader, XmlTag)
XmlTag = XmlParent + "/" + "License"
CommonHeader.License = XmlElement(XmlCommonHeader, XmlTag)
XmlTag = XmlParent + "/" + "Specification"
Specification = XmlElement(XmlCommonHeader, XmlTag)
AddToSpecificationDict(CommonHeader.Specification, Specification)
XmlTag = XmlParent + "/" + "ModuleType"
CommonHeader.ModuleType = XmlElement(XmlCommonHeader, XmlTag)
## Load a new Cloned Record class object.
#
# Read an input XML ClonedRecord DOM object and return an object of Cloned Record
# contained in the DOM object.
#
# @param XmlCloned A child XML DOM object in a Common XML DOM.
#
# @retvel ClonedRecord A new Cloned Record object created by XmlCloned.
#
def LoadClonedRecord(XmlCloned):
ClonedRecord = ClonedRecordClass()
XmlTag = "Id"
ClonedRecord.Id = int(XmlAttribute(XmlCloned, XmlTag))
XmlTag = "FarGuid"
ClonedRecord.FarGuid = XmlAttribute(XmlCloned, XmlTag)
XmlTag = "Cloned/PackageGuid"
ClonedRecord.PackageGuid = XmlElement(XmlCloned, XmlTag)
XmlTag = "Cloned/PackageVersion"
ClonedRecord.PackageVersion = XmlElement(XmlCloned, XmlTag)
XmlTag = "Cloned/ModuleGuid"
ClonedRecord.ModuleGuid = XmlElement(XmlCloned, XmlTag)
XmlTag = "Cloned/ModuleVersion"
ClonedRecord.ModuleVersion = XmlElement(XmlCloned, XmlTag)
return ClonedRecord
## Load a new Guid/Protocol/Ppi common class object.
#
# Read an input XML Guid/Protocol/Ppi DOM object and return an object of
# Guid/Protocol/Ppi contained in the DOM object.
#
# @param XmlGuidProtocolPpiCommon A child XML DOM object in a Common XML DOM.
#
# @retvel GuidProtocolPpiCommon A new GuidProtocolPpiCommon class object
# created by XmlGuidProtocolPpiCommon.
#
def LoadGuidProtocolPpiCommon(XmlGuidProtocolPpiCommon):
GuidProtocolPpiCommon = GuidProtocolPpiCommonClass()
XmlTag = "Name"
GuidProtocolPpiCommon.Name = XmlAttribute(XmlGuidProtocolPpiCommon, XmlTag)
XmlParent = XmlNodeName(XmlGuidProtocolPpiCommon)
if XmlParent == "Entry":
XmlTag = "%s/C_Name" % XmlParent
elif XmlParent == "GuidCNames":
XmlTag = "%s/GuidCName" % XmlParent
else:
XmlTag = "%s/%sCName" % (XmlParent, XmlParent)
GuidProtocolPpiCommon.CName = XmlElement(XmlGuidProtocolPpiCommon, XmlTag)
XmlTag = XmlParent + "/" + "GuidValue"
GuidProtocolPpiCommon.Guid = XmlElement(XmlGuidProtocolPpiCommon, XmlTag)
if XmlParent.endswith("Notify"):
GuidProtocolPpiCommon.Notify = True
XmlTag = "GuidTypeList"
GuidTypes = XmlAttribute(XmlGuidProtocolPpiCommon, XmlTag)
GuidProtocolPpiCommon.GuidTypeList = GuidTypes.split()
XmlTag = "SupModuleList"
SupModules = XmlAttribute(XmlGuidProtocolPpiCommon, XmlTag)
GuidProtocolPpiCommon.SupModuleList = SupModules.split()
SetCommon(GuidProtocolPpiCommon, XmlGuidProtocolPpiCommon)
return GuidProtocolPpiCommon
## Load a new Pcd class object.
#
# Read an input XML Pcd DOM object and return an object of Pcd
# contained in the DOM object.
#
# @param XmlPcd A child XML DOM object in a Common XML DOM.
#
# @retvel Pcd A new Pcd object created by XmlPcd.
#
def LoadPcd(XmlPcd):
"""Return a new PcdClass object equivalent to XmlPcd"""
Pcd = PcdClass()
XmlTag = "PcdEntry/C_Name"
Pcd.CName = XmlElement(XmlPcd, XmlTag)
XmlTag = "PcdEntry/Token"
Pcd.Token = XmlElement(XmlPcd, XmlTag)
XmlTag = "PcdEntry/TokenSpaceGuidCName"
Pcd.TokenSpaceGuidCName = XmlElement(XmlPcd, XmlTag)
XmlTag = "PcdEntry/DatumType"
Pcd.DatumType = XmlElement(XmlPcd, XmlTag)
XmlTag = "PcdEntry/MaxDatumSize"
Pcd.MaxDatumSize = XmlElement(XmlPcd, XmlTag)
XmlTag = "PcdEntry/DefaultValue"
Pcd.DefaultValue = XmlElement(XmlPcd, XmlTag)
XmlTag = "PcdItemType"
Pcd.ItemType = XmlAttribute(XmlPcd, XmlTag)
XmlTag = "PcdEntry/ValidUsage"
Pcd.ValidUsage = XmlElement(XmlPcd, XmlTag).split()
XmlTag = "SupModuleList"
Pcd.SupModuleList = XmlAttribute(XmlPcd, XmlTag).split()
SetCommon(Pcd, XmlPcd)
return Pcd
## Load a new LibraryClass class object.
#
# Read an input XML LibraryClass DOM object and return an object of LibraryClass
# contained in the DOM object.
#
# @param XmlLibraryClass A child XML DOM object in a Common XML DOM.
#
# @retvel LibraryClass A new LibraryClass object created by XmlLibraryClass.
#
def LoadLibraryClass(XmlLibraryClass):
LibraryClass = LibraryClassClass()
XmlTag = "LibraryClass/Keyword"
LibraryClass.LibraryClass = XmlElement(XmlLibraryClass, XmlTag)
if LibraryClass.LibraryClass == "":
XmlTag = "Name"
LibraryClass.LibraryClass = XmlAttribute(XmlLibraryClass, XmlTag)
XmlTag = "LibraryClass/IncludeHeader"
LibraryClass.IncludeHeader = XmlElement(XmlLibraryClass, XmlTag)
XmlTag = "RecommendedInstanceVersion"
RecommendedInstanceVersion = XmlAttribute(XmlLibraryClass, XmlTag)
LibraryClass.RecommendedInstanceVersion = RecommendedInstanceVersion
XmlTag = "RecommendedInstanceGuid"
RecommendedInstanceGuid = XmlAttribute(XmlLibraryClass, XmlTag)
LibraryClass.RecommendedInstanceGuid = RecommendedInstanceGuid
XmlTag = "SupModuleList"
SupModules = XmlAttribute(XmlLibraryClass, XmlTag)
LibraryClass.SupModuleList = SupModules.split()
SetCommon(LibraryClass, XmlLibraryClass)
return LibraryClass
## Load a new Build Option class object.
#
# Read an input XML BuildOption DOM object and return an object of Build Option
# contained in the DOM object.
#
# @param XmlBuildOption A child XML DOM object in a Common XML DOM.
#
# @retvel BuildOption A new Build Option object created by XmlBuildOption.
#
def LoadBuildOption(XmlBuildOption):
"""Return a new BuildOptionClass object equivalent to XmlBuildOption"""
BuildOption = BuildOptionClass()
BuildOption.Option = XmlElementData(XmlBuildOption)
XmlTag = "BuildTargets"
BuildOption.BuildTargetList = XmlAttribute(XmlBuildOption, XmlTag).split()
XmlTag = "ToolChainFamily"
BuildOption.ToolChainFamily = XmlAttribute(XmlBuildOption, XmlTag)
XmlTag = "TagName"
BuildOption.TagName = XmlAttribute(XmlBuildOption, XmlTag)
XmlTag = "ToolCode"
BuildOption.ToolCode = XmlAttribute(XmlBuildOption, XmlTag)
XmlTag = "SupArchList"
BuildOption.SupArchList = XmlAttribute(XmlBuildOption, XmlTag).split()
return BuildOption
## Load a new User Extensions class object.
#
# Read an input XML UserExtensions DOM object and return an object of User
# Extensions contained in the DOM object.
#
# @param XmlUserExtensions A child XML DOM object in a Common XML DOM.
#
# @retvel UserExtensions A new User Extensions object created by
# XmlUserExtensions.
#
def LoadUserExtensions(XmlUserExtensions):
UserExtensions = UserExtensionsClass()
XmlTag = "UserID"
UserExtensions.UserID = XmlAttribute(XmlUserExtensions, XmlTag)
XmlTag = "Identifier"
UserExtensions.Identifier = XmlAttribute(XmlUserExtensions, XmlTag)
UserExtensions.Content = XmlElementData(XmlUserExtensions)
return UserExtensions
## Store content to a text file object.
#
# Write some text file content to a text file object. The contents may echo
# in screen in a verbose way.
#
# @param TextFile The text file object.
# @param Content The string object to be written to a text file.
#
def StoreTextFile(TextFile, Content):
EdkLogger.verbose(Content)
TextFile.write(Content)
## Add item to a section.
#
# Add an Item with specific CPU architecture to section dictionary.
# The possible duplication is ensured to be removed.
#
# @param Section Section dictionary indexed by CPU architecture.
# @param Arch CPU architecture: Ia32, X64, Ipf, ARM, AARCH64, Ebc or Common.
# @param Item The Item to be added to section dictionary.
#
def AddToSection(Section, Arch, Item):
SectionArch = Section.get(Arch, [])
if Item not in SectionArch:
SectionArch.append(Item)
Section[Arch] = SectionArch
## Get section contents.
#
# Return the content of section named SectionName.
# the contents is based on Methods and ObjectLists.
#
# @param SectionName The name of the section.
# @param Method A function returning a string item of an object.
# @param ObjectList The list of object.
#
# @retval Section The string content of a section.
#
def GetSection(SectionName, Method, ObjectList):
SupportedArches = ["common", "Ia32", "X64", "Ipf", "Ebc", "ARM", "AARCH64"]
SectionDict = {}
for Object in ObjectList:
Item = Method(Object)
if Item == "":
continue
Item = " %s" % Item
Arches = Object.SupArchList
if len(Arches) == 0:
AddToSection(SectionDict, "common", Item)
else:
for Arch in SupportedArches:
if Arch.upper() in Arches:
AddToSection(SectionDict, Arch, Item)
Section = ""
for Arch in SupportedArches:
SectionArch = "\n".join(SectionDict.get(Arch, []))
if SectionArch != "":
Section += "[%s.%s]\n%s\n" % (SectionName, Arch, SectionArch)
Section += "\n"
if Section != "":
Section += "\n"
return Section
## Store file header to a text file.
#
# Write standard file header to a text file. The content includes copyright,
# abstract, description and license extracted from CommonHeader class object.
#
# @param TextFile The text file object.
# @param CommonHeader The source CommonHeader class object.
#
def StoreHeader(TextFile, CommonHeader):
CopyRight = CommonHeader.Copyright
Abstract = CommonHeader.Abstract
Description = CommonHeader.Description
License = CommonHeader.License
Header = "#/** @file\n#\n"
Header += "# " + Abstract + "\n#\n"
Header += "# " + Description.strip().replace("\n", "\n# ") + "\n"
Header += "# " + CopyRight + "\n#\n"
Header += "# " + License.replace("\n", "\n# ").replace(" ", " ")
Header += "\n#\n#**/\n\n"
StoreTextFile(TextFile, Header)
## Store file header to a text file.
#
# Write Defines section to a text file. DefinesTupleList determines the content.
#
# @param TextFile The text file object.
# @param DefinesTupleList The list of (Tag, Value) to be added as one item.
#
def StoreDefinesSection(TextFile, DefinesTupleList):
Section = "[Defines]\n"
for DefineItem in DefinesTupleList:
Section += " %-30s = %s\n" % DefineItem
Section += "\n\n"
StoreTextFile(TextFile, Section)
## Return one User Extension section.
#
# Read the input UserExtentsions class object and return one section.
#
# @param UserExtensions An input UserExtensions class object.
#
# @retval UserExtensionSection A section representing UserExtensions object.
#
def GetUserExtensions(UserExtensions):
UserId = UserExtensions.UserID
Identifier = UserExtensions.Identifier
Content = UserExtensions.Content
return "[UserExtensions.%s.%s]\n %s\n\n" % (UserId, Identifier, Content)
## Regular expression to match an equation.
mReEquation = re.compile(r"\s*(\S+)\s*=\s*(\S*)\s*")
## Return a value tuple matching information in a text fle.
#
# Parse the text file and return a value tuple corresponding to an input tag
# tuple. In case of any error, an tuple of empty strings is returned.
#
# @param FileName The file name of the text file.
# @param TagTuple A tuple of tags as the key to the value.
#
# @param ValueTupe The returned tuple corresponding to the tag tuple.
#
def GetTextFileInfo(FileName, TagTuple):
ValueTuple = [""] * len(TagTuple)
try:
for Line in open(FileName):
Line = Line.split("#", 1)[0]
MatchEquation = mReEquation.match(Line)
if MatchEquation:
Tag = MatchEquation.group(1).upper()
Value = MatchEquation.group(2)
for Index in range(len(TagTuple)):
if TagTuple[Index] == Tag:
ValueTuple[Index] = Value
except:
EdkLogger.info("IO Error in reading file %s" % FileName)
return ValueTuple
## Return a value tuple matching information in an XML fle.
#
# Parse the XML file and return a value tuple corresponding to an input tag
# tuple. In case of any error, an tuple of empty strings is returned.
#
# @param FileName The file name of the XML file.
# @param TagTuple A tuple of tags as the key to the value.
#
# @param ValueTupe The returned tuple corresponding to the tag tuple.
#
def GetXmlFileInfo(FileName, TagTuple):
XmlDom = XmlParseFile(FileName)
return tuple([XmlElement(XmlDom, XmlTag) for XmlTag in TagTuple])
## Parse migration command line options
#
# Use standard Python module optparse to parse command line option of this tool.
#
# @param Source The source file type.
# @param Destinate The destinate file type.
#
# @retval Options A optparse object containing the parsed options.
# @retval InputFile Path of an source file to be migrated.
#
def MigrationOptionParser(Source, Destinate, ToolName, VersionNumber = 1.0):
# use clearer usage to override default usage message
UsageString = "%s [-a] [-v|-q] [-o <output_file>] <input_file>" % ToolName
Version = "%s Version %.2f" % (ToolName, VersionNumber)
Copyright = "Copyright (c) 2007, Intel Corporation. All rights reserved."
Parser = OptionParser(description=Copyright, version=Version, usage=UsageString)
Parser.add_option("-o", "--output", dest="OutputFile", help="The name of the %s file to be created." % Destinate)
Parser.add_option("-a", "--auto", dest="AutoWrite", action="store_true", default=False, help="Automatically create the %s file using the name of the %s file and replacing file extension" % (Source, Destinate))
Parser.add_option("-q", "--quiet", action="store_true", type=None, help="Disable all messages except FATAL ERRORS.")
Parser.add_option("-v", "--verbose", action="store_true", type=None, help="Turn on verbose output with informational messages printed.")
Options, Args = Parser.parse_args()
# Set logging level
if Options.verbose:
EdkLogger.setLevel(EdkLogger.VERBOSE)
elif Options.quiet:
EdkLogger.setLevel(EdkLogger.QUIET)
else:
EdkLogger.setLevel(EdkLogger.INFO)
# error check
if len(Args) == 0:
raise MigrationError(PARAMETER_MISSING, name="Input file", usage=Parser.get_usage())
if len(Args) > 1:
raise MigrationError(PARAMETER_INVALID, name="Too many input files", usage=Parser.get_usage())
InputFile = Args[0]
if not os.path.exists(InputFile):
raise MigrationError(FILE_NOT_FOUND, name=InputFile)
if Options.OutputFile:
if Options.AutoWrite:
raise MigrationError(OPTION_CONFLICT, arg1="-o", arg2="-a", usage=Parser.get_usage())
else:
if Options.AutoWrite:
Options.OutputFile = os.path.splitext(InputFile)[0] + "." + Destinate.lower()
else:
raise MigrationError(OPTION_MISSING, name="-o", usage=Parser.get_usage())
return Options, InputFile
# This acts like the main() function for the script, unless it is 'import'ed
# into another script.
if __name__ == '__main__':
pass
| [
11,
18,
20,
21,
23
] |
1,314 | 645f8f1ebd3bfa0ba32d5be8058b07e2a30ba9b5 | <mask token>
| <mask token>
class Migration(migrations.Migration):
<mask token>
<mask token>
| <mask token>
class Migration(migrations.Migration):
dependencies = [('barriers', '0011_auto_20170904_1658')]
operations = [migrations.CreateModel(name='BarrierCountry', fields=[(
'id', models.AutoField(auto_created=True, primary_key=True,
serialize=False, verbose_name='ID')), ('created_date', models.
DateTimeField(auto_now_add=True)), ('updated_date', models.
DateTimeField(auto_now=True)), ('is_deleted', models.BooleanField(
default=False)), ('name', models.CharField(max_length=100,
verbose_name='Country or Territory Name')), ('code', models.
CharField(blank=True, max_length=100, null=True, verbose_name=
'Country or Territory Code')), ('official_name', models.CharField(
blank=True, max_length=100, null=True, verbose_name=
'Offical Country or Territory name')), ('govuk_index_entry_code',
models.CharField(blank=True, max_length=10, null=True, verbose_name
='GOV.UK index code')), ('country_or_territory', models.CharField(
choices=[('CO', 'Country'), ('TE', 'Territory')], default='CO',
max_length=2, verbose_name='Country or Territory flag'))], options=
{'verbose_name_plural': 'countries or territories'}), migrations.
CreateModel(name='BarrierNotification', fields=[('id', models.
AutoField(auto_created=True, primary_key=True, serialize=False,
verbose_name='ID')), ('created_date', models.DateTimeField(
auto_now_add=True)), ('updated_date', models.DateTimeField(auto_now
=True)), ('is_deleted', models.BooleanField(default=False)), (
'title', models.TextField(blank=True, verbose_name='Title')), (
'description', models.TextField(blank=True, verbose_name=
'Description')), ('distribution_date', models.DateField(blank=True,
null=True, verbose_name='Distribution Date')), ('barrier_symbol',
models.CharField(blank=True, max_length=500, verbose_name=
'Barrier Symbol')), ('core_symbol', models.CharField(blank=True,
max_length=500, verbose_name='Core Symbol')), ('mab_type', models.
CharField(blank=True, max_length=500, verbose_name='Barrier type')),
('products_text', models.TextField(blank=True, verbose_name=
'Products')), ('product_codes', models.TextField(blank=True,
verbose_name='Product codes')), ('objectives', models.TextField(
blank=True, verbose_name='Objectives')), ('keywords', models.
TextField(blank=True, verbose_name='Keywords')), (
'regions_affected', models.TextField(blank=True, verbose_name=
'Regions affected')), ('comments_due_date', models.DateField(blank=
True, null=True, verbose_name='Final date for comments')), (
'notification_type', models.CharField(blank=True, max_length=50,
verbose_name='Notification type')), ('document_link', models.
CharField(blank=True, max_length=1500, verbose_name='Document link'
)), ('external_link', models.CharField(blank=True, max_length=1500,
verbose_name='External site link'))], options={'abstract': False}),
migrations.CreateModel(name='BarrierRecord', fields=[('id', models.
AutoField(auto_created=True, primary_key=True, serialize=False,
verbose_name='ID')), ('created_date', models.DateTimeField(
auto_now_add=True)), ('updated_date', models.DateTimeField(auto_now
=True)), ('is_deleted', models.BooleanField(default=False)), (
'status', models.CharField(blank=True, choices=[('Active', 'Active'
)], default=None, max_length=10, null=True)), ('title', models.
TextField(blank=True, verbose_name='Title')), ('description',
models.TextField(blank=True, verbose_name='Description')), (
'products_text', models.TextField(blank=True, verbose_name=
'Products affected')), ('sectors_text', models.TextField(blank=True,
verbose_name='Sectors affected')), ('source_id', models.CharField(
blank=True, max_length=20, null=True, verbose_name=
'ID in source system')), ('distribution_date', models.DateField(
blank=True, null=True, verbose_name='Distribution Date')), (
'external_link', models.CharField(blank=True, max_length=1500,
verbose_name='External site link'))], options={'abstract': False}),
migrations.CreateModel(name='BarrierReport', fields=[('id', models.
AutoField(auto_created=True, primary_key=True, serialize=False,
verbose_name='ID')), ('created_date', models.DateTimeField(
auto_now_add=True)), ('updated_date', models.DateTimeField(auto_now
=True)), ('is_deleted', models.BooleanField(default=False)), (
'status', models.CharField(blank=True, choices=[('Draft', 'Draft'),
('Submitted', 'Submitted')], default=None, max_length=10, null=True
)), ('name', models.CharField(blank=True, max_length=200, null=True
)), ('problem_description', models.TextField(blank=True, null=True)
), ('product_text', models.TextField(blank=True, null=True)), (
'product_code', models.CharField(blank=True, max_length=500, null=
True)), ('business_impact_description', models.TextField(blank=True,
null=True)), ('problem_duration_description', models.TextField(
blank=True, null=True)), ('other_companies_affected_choice', models
.CharField(blank=True, choices=[('Yes', 'Yes'), ('No', 'No'), (
'DontKnow', "Don't know")], default=None, max_length=10, null=True)
), ('other_countries_affected_description', models.TextField(blank=
True, null=True)), ('steps_taken_to_resolve', models.TextField(
blank=True, null=True)), ('outcome_looking_for', models.TextField(
blank=True, null=True)), ('support_desired_choice', models.
CharField(blank=True, choices=[('SUPPORT_DESIRED_NONE',
'None - this is for your information only'), (
'SUPPORT_DESIRED_LOCAL',
'Local engagement only with UK Government officials in the country I am trying to export to'
), ('SUPPORT_DESIRED_BROAD', 'Broader UK Government involvement'),
('SUPPORT_DESIRED_NOT_SURE', 'Not sure')], default=None, max_length
=10, null=True)), ('confidentiality_issues_description', models.
TextField(blank=True, null=True)), ('happy_to_publish_choice',
models.CharField(blank=True, choices=[('HAPPY_TO_PUBLISH_YES',
'Yes'), ('HAPPY_TO_PUBLISH_NO', 'No'), ('HAPPY_TO_PUBLISH_MAYBE',
'Maybe, following consultation with me')], default=None, max_length
=10, null=True)), ('any_other_details_description', models.
TextField(blank=True, null=True)), ('country', models.ForeignKey(
blank=True, null=True, on_delete=django.db.models.deletion.CASCADE,
to='barriers.BarrierCountry'))], options={'abstract': False}),
migrations.CreateModel(name='BarrierReporter', fields=[('id',
models.AutoField(auto_created=True, primary_key=True, serialize=
False, verbose_name='ID')), ('created_date', models.DateTimeField(
auto_now_add=True)), ('updated_date', models.DateTimeField(auto_now
=True)), ('is_deleted', models.BooleanField(default=False)), (
'name', models.CharField(blank=True, max_length=1500, verbose_name=
'Reporter name')), ('company', models.CharField(blank=True,
max_length=1500, verbose_name='Company name'))], options={
'abstract': False}), migrations.CreateModel(name='BarrierSource',
fields=[('id', models.AutoField(auto_created=True, primary_key=True,
serialize=False, verbose_name='ID')), ('created_date', models.
DateTimeField(auto_now_add=True)), ('updated_date', models.
DateTimeField(auto_now=True)), ('is_deleted', models.BooleanField(
default=False)), ('name', models.CharField(max_length=100)), (
'description', models.CharField(blank=True, max_length=500, null=
True)), ('short_name', models.CharField(blank=True, max_length=20,
null=True)), ('remote_url', models.URLField(blank=True, max_length=
20, null=True))], options={'abstract': False}), migrations.
CreateModel(name='BarrierTypeMapping', fields=[('id', models.
AutoField(auto_created=True, primary_key=True, serialize=False,
verbose_name='ID')), ('created_date', models.DateTimeField(
auto_now_add=True)), ('updated_date', models.DateTimeField(auto_now
=True)), ('is_deleted', models.BooleanField(default=False)), (
'destination_barrier_list', models.ForeignKey(on_delete=django.db.
models.deletion.CASCADE, related_name='destination_barrier_list',
to='barriers.BarrierSource'))], options={'abstract': False}),
migrations.RemoveField(model_name='marketaccessbarrier', name=
'barrier_types'), migrations.RenameField(model_name='barriertype',
old_name='ec_barrier_code', new_name='barrier_code'), migrations.
AlterField(model_name='barriertype', name='name', field=models.
CharField(max_length=200)), migrations.DeleteModel(name=
'MarketAccessBarrier'), migrations.AddField(model_name=
'barriertypemapping', name='destination_barrier_type', field=models
.ForeignKey(on_delete=django.db.models.deletion.CASCADE,
related_name='destination_barrier_type', to='barriers.BarrierType')
), migrations.AddField(model_name='barriertypemapping', name=
'source_barrier_list', field=models.ForeignKey(on_delete=django.db.
models.deletion.CASCADE, related_name='source_barrier_list', to=
'barriers.BarrierSource')), migrations.AddField(model_name=
'barriertypemapping', name='source_barrier_type', field=models.
ForeignKey(on_delete=django.db.models.deletion.CASCADE,
related_name='source_barrier_type', to='barriers.BarrierType')),
migrations.AddField(model_name='barrierreport', name='reporter',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.
models.deletion.CASCADE, to='barriers.BarrierReporter')),
migrations.AddField(model_name='barrierreport', name=
'top_level_barrier_type', field=models.ForeignKey(blank=True, null=
True, on_delete=django.db.models.deletion.CASCADE, related_name=
'barrier_reports', to='barriers.BarrierType')), migrations.AddField
(model_name='barrierrecord', name='barrier_source', field=models.
ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=
'barriers.BarrierSource')), migrations.AddField(model_name=
'barrierrecord', name='barrier_types', field=mptt.fields.
TreeManyToManyField(blank=True, db_index=True, related_name='types',
to='barriers.BarrierType')), migrations.AddField(model_name=
'barrierrecord', name='country', field=models.ForeignKey(blank=True,
null=True, on_delete=django.db.models.deletion.CASCADE, to=
'barriers.BarrierCountry')), migrations.AddField(model_name=
'barriernotification', name='barrier_source', field=models.
ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=
'barriers.BarrierSource')), migrations.AddField(model_name=
'barriernotification', name='barrier_types', field=mptt.fields.
TreeManyToManyField(blank=True, db_index=True, related_name=
'barrier_types', to='barriers.BarrierType')), migrations.AddField(
model_name='barriernotification', name='country', field=models.
ForeignKey(blank=True, null=True, on_delete=django.db.models.
deletion.CASCADE, related_name='notification_countries', to=
'barriers.BarrierCountry')), migrations.AddField(model_name=
'barriertype', name='barrier_source', field=models.ForeignKey(
default=1, on_delete=django.db.models.deletion.CASCADE, to=
'barriers.BarrierSource'))]
| from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
import mptt.fields
class Migration(migrations.Migration):
dependencies = [('barriers', '0011_auto_20170904_1658')]
operations = [migrations.CreateModel(name='BarrierCountry', fields=[(
'id', models.AutoField(auto_created=True, primary_key=True,
serialize=False, verbose_name='ID')), ('created_date', models.
DateTimeField(auto_now_add=True)), ('updated_date', models.
DateTimeField(auto_now=True)), ('is_deleted', models.BooleanField(
default=False)), ('name', models.CharField(max_length=100,
verbose_name='Country or Territory Name')), ('code', models.
CharField(blank=True, max_length=100, null=True, verbose_name=
'Country or Territory Code')), ('official_name', models.CharField(
blank=True, max_length=100, null=True, verbose_name=
'Offical Country or Territory name')), ('govuk_index_entry_code',
models.CharField(blank=True, max_length=10, null=True, verbose_name
='GOV.UK index code')), ('country_or_territory', models.CharField(
choices=[('CO', 'Country'), ('TE', 'Territory')], default='CO',
max_length=2, verbose_name='Country or Territory flag'))], options=
{'verbose_name_plural': 'countries or territories'}), migrations.
CreateModel(name='BarrierNotification', fields=[('id', models.
AutoField(auto_created=True, primary_key=True, serialize=False,
verbose_name='ID')), ('created_date', models.DateTimeField(
auto_now_add=True)), ('updated_date', models.DateTimeField(auto_now
=True)), ('is_deleted', models.BooleanField(default=False)), (
'title', models.TextField(blank=True, verbose_name='Title')), (
'description', models.TextField(blank=True, verbose_name=
'Description')), ('distribution_date', models.DateField(blank=True,
null=True, verbose_name='Distribution Date')), ('barrier_symbol',
models.CharField(blank=True, max_length=500, verbose_name=
'Barrier Symbol')), ('core_symbol', models.CharField(blank=True,
max_length=500, verbose_name='Core Symbol')), ('mab_type', models.
CharField(blank=True, max_length=500, verbose_name='Barrier type')),
('products_text', models.TextField(blank=True, verbose_name=
'Products')), ('product_codes', models.TextField(blank=True,
verbose_name='Product codes')), ('objectives', models.TextField(
blank=True, verbose_name='Objectives')), ('keywords', models.
TextField(blank=True, verbose_name='Keywords')), (
'regions_affected', models.TextField(blank=True, verbose_name=
'Regions affected')), ('comments_due_date', models.DateField(blank=
True, null=True, verbose_name='Final date for comments')), (
'notification_type', models.CharField(blank=True, max_length=50,
verbose_name='Notification type')), ('document_link', models.
CharField(blank=True, max_length=1500, verbose_name='Document link'
)), ('external_link', models.CharField(blank=True, max_length=1500,
verbose_name='External site link'))], options={'abstract': False}),
migrations.CreateModel(name='BarrierRecord', fields=[('id', models.
AutoField(auto_created=True, primary_key=True, serialize=False,
verbose_name='ID')), ('created_date', models.DateTimeField(
auto_now_add=True)), ('updated_date', models.DateTimeField(auto_now
=True)), ('is_deleted', models.BooleanField(default=False)), (
'status', models.CharField(blank=True, choices=[('Active', 'Active'
)], default=None, max_length=10, null=True)), ('title', models.
TextField(blank=True, verbose_name='Title')), ('description',
models.TextField(blank=True, verbose_name='Description')), (
'products_text', models.TextField(blank=True, verbose_name=
'Products affected')), ('sectors_text', models.TextField(blank=True,
verbose_name='Sectors affected')), ('source_id', models.CharField(
blank=True, max_length=20, null=True, verbose_name=
'ID in source system')), ('distribution_date', models.DateField(
blank=True, null=True, verbose_name='Distribution Date')), (
'external_link', models.CharField(blank=True, max_length=1500,
verbose_name='External site link'))], options={'abstract': False}),
migrations.CreateModel(name='BarrierReport', fields=[('id', models.
AutoField(auto_created=True, primary_key=True, serialize=False,
verbose_name='ID')), ('created_date', models.DateTimeField(
auto_now_add=True)), ('updated_date', models.DateTimeField(auto_now
=True)), ('is_deleted', models.BooleanField(default=False)), (
'status', models.CharField(blank=True, choices=[('Draft', 'Draft'),
('Submitted', 'Submitted')], default=None, max_length=10, null=True
)), ('name', models.CharField(blank=True, max_length=200, null=True
)), ('problem_description', models.TextField(blank=True, null=True)
), ('product_text', models.TextField(blank=True, null=True)), (
'product_code', models.CharField(blank=True, max_length=500, null=
True)), ('business_impact_description', models.TextField(blank=True,
null=True)), ('problem_duration_description', models.TextField(
blank=True, null=True)), ('other_companies_affected_choice', models
.CharField(blank=True, choices=[('Yes', 'Yes'), ('No', 'No'), (
'DontKnow', "Don't know")], default=None, max_length=10, null=True)
), ('other_countries_affected_description', models.TextField(blank=
True, null=True)), ('steps_taken_to_resolve', models.TextField(
blank=True, null=True)), ('outcome_looking_for', models.TextField(
blank=True, null=True)), ('support_desired_choice', models.
CharField(blank=True, choices=[('SUPPORT_DESIRED_NONE',
'None - this is for your information only'), (
'SUPPORT_DESIRED_LOCAL',
'Local engagement only with UK Government officials in the country I am trying to export to'
), ('SUPPORT_DESIRED_BROAD', 'Broader UK Government involvement'),
('SUPPORT_DESIRED_NOT_SURE', 'Not sure')], default=None, max_length
=10, null=True)), ('confidentiality_issues_description', models.
TextField(blank=True, null=True)), ('happy_to_publish_choice',
models.CharField(blank=True, choices=[('HAPPY_TO_PUBLISH_YES',
'Yes'), ('HAPPY_TO_PUBLISH_NO', 'No'), ('HAPPY_TO_PUBLISH_MAYBE',
'Maybe, following consultation with me')], default=None, max_length
=10, null=True)), ('any_other_details_description', models.
TextField(blank=True, null=True)), ('country', models.ForeignKey(
blank=True, null=True, on_delete=django.db.models.deletion.CASCADE,
to='barriers.BarrierCountry'))], options={'abstract': False}),
migrations.CreateModel(name='BarrierReporter', fields=[('id',
models.AutoField(auto_created=True, primary_key=True, serialize=
False, verbose_name='ID')), ('created_date', models.DateTimeField(
auto_now_add=True)), ('updated_date', models.DateTimeField(auto_now
=True)), ('is_deleted', models.BooleanField(default=False)), (
'name', models.CharField(blank=True, max_length=1500, verbose_name=
'Reporter name')), ('company', models.CharField(blank=True,
max_length=1500, verbose_name='Company name'))], options={
'abstract': False}), migrations.CreateModel(name='BarrierSource',
fields=[('id', models.AutoField(auto_created=True, primary_key=True,
serialize=False, verbose_name='ID')), ('created_date', models.
DateTimeField(auto_now_add=True)), ('updated_date', models.
DateTimeField(auto_now=True)), ('is_deleted', models.BooleanField(
default=False)), ('name', models.CharField(max_length=100)), (
'description', models.CharField(blank=True, max_length=500, null=
True)), ('short_name', models.CharField(blank=True, max_length=20,
null=True)), ('remote_url', models.URLField(blank=True, max_length=
20, null=True))], options={'abstract': False}), migrations.
CreateModel(name='BarrierTypeMapping', fields=[('id', models.
AutoField(auto_created=True, primary_key=True, serialize=False,
verbose_name='ID')), ('created_date', models.DateTimeField(
auto_now_add=True)), ('updated_date', models.DateTimeField(auto_now
=True)), ('is_deleted', models.BooleanField(default=False)), (
'destination_barrier_list', models.ForeignKey(on_delete=django.db.
models.deletion.CASCADE, related_name='destination_barrier_list',
to='barriers.BarrierSource'))], options={'abstract': False}),
migrations.RemoveField(model_name='marketaccessbarrier', name=
'barrier_types'), migrations.RenameField(model_name='barriertype',
old_name='ec_barrier_code', new_name='barrier_code'), migrations.
AlterField(model_name='barriertype', name='name', field=models.
CharField(max_length=200)), migrations.DeleteModel(name=
'MarketAccessBarrier'), migrations.AddField(model_name=
'barriertypemapping', name='destination_barrier_type', field=models
.ForeignKey(on_delete=django.db.models.deletion.CASCADE,
related_name='destination_barrier_type', to='barriers.BarrierType')
), migrations.AddField(model_name='barriertypemapping', name=
'source_barrier_list', field=models.ForeignKey(on_delete=django.db.
models.deletion.CASCADE, related_name='source_barrier_list', to=
'barriers.BarrierSource')), migrations.AddField(model_name=
'barriertypemapping', name='source_barrier_type', field=models.
ForeignKey(on_delete=django.db.models.deletion.CASCADE,
related_name='source_barrier_type', to='barriers.BarrierType')),
migrations.AddField(model_name='barrierreport', name='reporter',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.
models.deletion.CASCADE, to='barriers.BarrierReporter')),
migrations.AddField(model_name='barrierreport', name=
'top_level_barrier_type', field=models.ForeignKey(blank=True, null=
True, on_delete=django.db.models.deletion.CASCADE, related_name=
'barrier_reports', to='barriers.BarrierType')), migrations.AddField
(model_name='barrierrecord', name='barrier_source', field=models.
ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=
'barriers.BarrierSource')), migrations.AddField(model_name=
'barrierrecord', name='barrier_types', field=mptt.fields.
TreeManyToManyField(blank=True, db_index=True, related_name='types',
to='barriers.BarrierType')), migrations.AddField(model_name=
'barrierrecord', name='country', field=models.ForeignKey(blank=True,
null=True, on_delete=django.db.models.deletion.CASCADE, to=
'barriers.BarrierCountry')), migrations.AddField(model_name=
'barriernotification', name='barrier_source', field=models.
ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=
'barriers.BarrierSource')), migrations.AddField(model_name=
'barriernotification', name='barrier_types', field=mptt.fields.
TreeManyToManyField(blank=True, db_index=True, related_name=
'barrier_types', to='barriers.BarrierType')), migrations.AddField(
model_name='barriernotification', name='country', field=models.
ForeignKey(blank=True, null=True, on_delete=django.db.models.
deletion.CASCADE, related_name='notification_countries', to=
'barriers.BarrierCountry')), migrations.AddField(model_name=
'barriertype', name='barrier_source', field=models.ForeignKey(
default=1, on_delete=django.db.models.deletion.CASCADE, to=
'barriers.BarrierSource'))]
| # -*- coding: utf-8 -*-
# Generated by Django 1.11.4 on 2017-10-02 14:41
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
import mptt.fields
class Migration(migrations.Migration):
dependencies = [
('barriers', '0011_auto_20170904_1658'),
]
operations = [
migrations.CreateModel(
name='BarrierCountry',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_date', models.DateTimeField(auto_now_add=True)),
('updated_date', models.DateTimeField(auto_now=True)),
('is_deleted', models.BooleanField(default=False)),
('name', models.CharField(max_length=100, verbose_name='Country or Territory Name')),
('code', models.CharField(blank=True, max_length=100, null=True, verbose_name='Country or Territory Code')),
('official_name', models.CharField(blank=True, max_length=100, null=True, verbose_name='Offical Country or Territory name')),
('govuk_index_entry_code', models.CharField(blank=True, max_length=10, null=True, verbose_name='GOV.UK index code')),
('country_or_territory', models.CharField(choices=[('CO', 'Country'), ('TE', 'Territory')], default='CO', max_length=2, verbose_name='Country or Territory flag')),
],
options={
'verbose_name_plural': 'countries or territories',
},
),
migrations.CreateModel(
name='BarrierNotification',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_date', models.DateTimeField(auto_now_add=True)),
('updated_date', models.DateTimeField(auto_now=True)),
('is_deleted', models.BooleanField(default=False)),
('title', models.TextField(blank=True, verbose_name='Title')),
('description', models.TextField(blank=True, verbose_name='Description')),
('distribution_date', models.DateField(blank=True, null=True, verbose_name='Distribution Date')),
('barrier_symbol', models.CharField(blank=True, max_length=500, verbose_name='Barrier Symbol')),
('core_symbol', models.CharField(blank=True, max_length=500, verbose_name='Core Symbol')),
('mab_type', models.CharField(blank=True, max_length=500, verbose_name='Barrier type')),
('products_text', models.TextField(blank=True, verbose_name='Products')),
('product_codes', models.TextField(blank=True, verbose_name='Product codes')),
('objectives', models.TextField(blank=True, verbose_name='Objectives')),
('keywords', models.TextField(blank=True, verbose_name='Keywords')),
('regions_affected', models.TextField(blank=True, verbose_name='Regions affected')),
('comments_due_date', models.DateField(blank=True, null=True, verbose_name='Final date for comments')),
('notification_type', models.CharField(blank=True, max_length=50, verbose_name='Notification type')),
('document_link', models.CharField(blank=True, max_length=1500, verbose_name='Document link')),
('external_link', models.CharField(blank=True, max_length=1500, verbose_name='External site link')),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='BarrierRecord',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_date', models.DateTimeField(auto_now_add=True)),
('updated_date', models.DateTimeField(auto_now=True)),
('is_deleted', models.BooleanField(default=False)),
('status', models.CharField(blank=True, choices=[('Active', 'Active')], default=None, max_length=10, null=True)),
('title', models.TextField(blank=True, verbose_name='Title')),
('description', models.TextField(blank=True, verbose_name='Description')),
('products_text', models.TextField(blank=True, verbose_name='Products affected')),
('sectors_text', models.TextField(blank=True, verbose_name='Sectors affected')),
('source_id', models.CharField(blank=True, max_length=20, null=True, verbose_name='ID in source system')),
('distribution_date', models.DateField(blank=True, null=True, verbose_name='Distribution Date')),
('external_link', models.CharField(blank=True, max_length=1500, verbose_name='External site link')),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='BarrierReport',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_date', models.DateTimeField(auto_now_add=True)),
('updated_date', models.DateTimeField(auto_now=True)),
('is_deleted', models.BooleanField(default=False)),
('status', models.CharField(blank=True, choices=[('Draft', 'Draft'), ('Submitted', 'Submitted')], default=None, max_length=10, null=True)),
('name', models.CharField(blank=True, max_length=200, null=True)),
('problem_description', models.TextField(blank=True, null=True)),
('product_text', models.TextField(blank=True, null=True)),
('product_code', models.CharField(blank=True, max_length=500, null=True)),
('business_impact_description', models.TextField(blank=True, null=True)),
('problem_duration_description', models.TextField(blank=True, null=True)),
('other_companies_affected_choice', models.CharField(blank=True, choices=[('Yes', 'Yes'), ('No', 'No'), ('DontKnow', "Don't know")], default=None, max_length=10, null=True)),
('other_countries_affected_description', models.TextField(blank=True, null=True)),
('steps_taken_to_resolve', models.TextField(blank=True, null=True)),
('outcome_looking_for', models.TextField(blank=True, null=True)),
('support_desired_choice', models.CharField(blank=True, choices=[('SUPPORT_DESIRED_NONE', 'None - this is for your information only'), ('SUPPORT_DESIRED_LOCAL', 'Local engagement only with UK Government officials in the country I am trying to export to'), ('SUPPORT_DESIRED_BROAD', 'Broader UK Government involvement'), ('SUPPORT_DESIRED_NOT_SURE', 'Not sure')], default=None, max_length=10, null=True)),
('confidentiality_issues_description', models.TextField(blank=True, null=True)),
('happy_to_publish_choice', models.CharField(blank=True, choices=[('HAPPY_TO_PUBLISH_YES', 'Yes'), ('HAPPY_TO_PUBLISH_NO', 'No'), ('HAPPY_TO_PUBLISH_MAYBE', 'Maybe, following consultation with me')], default=None, max_length=10, null=True)),
('any_other_details_description', models.TextField(blank=True, null=True)),
('country', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='barriers.BarrierCountry')),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='BarrierReporter',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_date', models.DateTimeField(auto_now_add=True)),
('updated_date', models.DateTimeField(auto_now=True)),
('is_deleted', models.BooleanField(default=False)),
('name', models.CharField(blank=True, max_length=1500, verbose_name='Reporter name')),
('company', models.CharField(blank=True, max_length=1500, verbose_name='Company name')),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='BarrierSource',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_date', models.DateTimeField(auto_now_add=True)),
('updated_date', models.DateTimeField(auto_now=True)),
('is_deleted', models.BooleanField(default=False)),
('name', models.CharField(max_length=100)),
('description', models.CharField(blank=True, max_length=500, null=True)),
('short_name', models.CharField(blank=True, max_length=20, null=True)),
('remote_url', models.URLField(blank=True, max_length=20, null=True)),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='BarrierTypeMapping',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_date', models.DateTimeField(auto_now_add=True)),
('updated_date', models.DateTimeField(auto_now=True)),
('is_deleted', models.BooleanField(default=False)),
('destination_barrier_list', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='destination_barrier_list', to='barriers.BarrierSource')),
],
options={
'abstract': False,
},
),
migrations.RemoveField(
model_name='marketaccessbarrier',
name='barrier_types',
),
migrations.RenameField(
model_name='barriertype',
old_name='ec_barrier_code',
new_name='barrier_code',
),
migrations.AlterField(
model_name='barriertype',
name='name',
field=models.CharField(max_length=200),
),
migrations.DeleteModel(
name='MarketAccessBarrier',
),
migrations.AddField(
model_name='barriertypemapping',
name='destination_barrier_type',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='destination_barrier_type', to='barriers.BarrierType'),
),
migrations.AddField(
model_name='barriertypemapping',
name='source_barrier_list',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='source_barrier_list', to='barriers.BarrierSource'),
),
migrations.AddField(
model_name='barriertypemapping',
name='source_barrier_type',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='source_barrier_type', to='barriers.BarrierType'),
),
migrations.AddField(
model_name='barrierreport',
name='reporter',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='barriers.BarrierReporter'),
),
migrations.AddField(
model_name='barrierreport',
name='top_level_barrier_type',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='barrier_reports', to='barriers.BarrierType'),
),
migrations.AddField(
model_name='barrierrecord',
name='barrier_source',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='barriers.BarrierSource'),
),
migrations.AddField(
model_name='barrierrecord',
name='barrier_types',
field=mptt.fields.TreeManyToManyField(blank=True, db_index=True, related_name='types', to='barriers.BarrierType'),
),
migrations.AddField(
model_name='barrierrecord',
name='country',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='barriers.BarrierCountry'),
),
migrations.AddField(
model_name='barriernotification',
name='barrier_source',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='barriers.BarrierSource'),
),
migrations.AddField(
model_name='barriernotification',
name='barrier_types',
field=mptt.fields.TreeManyToManyField(blank=True, db_index=True, related_name='barrier_types', to='barriers.BarrierType'),
),
migrations.AddField(
model_name='barriernotification',
name='country',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='notification_countries', to='barriers.BarrierCountry'),
),
migrations.AddField(
model_name='barriertype',
name='barrier_source',
field=models.ForeignKey(default=1, on_delete=django.db.models.deletion.CASCADE, to='barriers.BarrierSource'),
),
]
| [
0,
1,
2,
3,
4
] |
1,315 | a34584a71fdff65e5b1bb15a6304af79774dac2c | <mask token>
def upgrade():
op.drop_constraint('component_files_component_id_fkey', 'component_files')
op.drop_constraint('components_topic_id_fkey', 'components')
op.drop_constraint('files_job_id_fkey', 'files')
op.drop_constraint('files_jobstate_id_fkey', 'files')
op.drop_constraint('files_team_id_fkey', 'files')
op.drop_constraint('files_test_id_fkey', 'files')
op.drop_constraint('jobdefinition_tests_jobdefinition_id_fkey',
'jobdefinition_tests')
op.drop_constraint('jobdefinition_tests_test_id_fkey',
'jobdefinition_tests')
op.drop_constraint('jobdefinitions_topic_id_fkey', 'jobdefinitions')
op.drop_constraint('jobs_team_id_fkey', 'jobs')
op.drop_constraint('jobs_jobdefinition_id_fkey', 'jobs')
op.drop_constraint('jobs_remoteci_id_fkey', 'jobs')
op.drop_constraint('jobs_previous_job_id_fkey', 'jobs')
op.drop_constraint('jobs_components_component_id_fkey', 'jobs_components')
op.drop_constraint('jobs_components_job_id_fkey', 'jobs_components')
op.drop_constraint('jobs_issues_issue_id_fkey', 'jobs_issues')
op.drop_constraint('jobs_issues_job_id_fkey', 'jobs_issues')
op.drop_constraint('jobstates_team_id_fkey', 'jobstates')
op.drop_constraint('jobstates_job_id_fkey', 'jobstates')
op.drop_constraint('logs_team_id_fkey', 'logs')
op.drop_constraint('logs_user_id_fkey', 'logs')
op.drop_constraint('metas_job_id_fkey', 'metas')
op.drop_constraint('remoteci_tests_test_id_fkey', 'remoteci_tests')
op.drop_constraint('remoteci_tests_remoteci_id_fkey', 'remoteci_tests')
op.drop_constraint('remotecis_team_id_fkey', 'remotecis')
op.drop_constraint('tests_team_id_fkey', 'tests')
op.drop_constraint('topic_tests_test_id_fkey', 'topic_tests')
op.drop_constraint('topic_tests_topic_id_fkey', 'topic_tests')
op.drop_constraint('topics_next_topic_fkey', 'topics')
op.drop_constraint('topics_teams_topic_id_fkey', 'topics_teams')
op.drop_constraint('topics_teams_team_id_fkey', 'topics_teams')
op.drop_constraint('user_remotecis_user_id_fkey', 'user_remotecis')
op.drop_constraint('user_remotecis_remoteci_id_fkey', 'user_remotecis')
op.drop_constraint('users_team_id_fkey', 'users')
op.execute(
'ALTER TABLE component_files ALTER COLUMN component_id TYPE UUID USING component_id::uuid'
)
op.execute(
'ALTER TABLE component_files ALTER COLUMN id TYPE UUID USING id::uuid'
)
op.execute(
'ALTER TABLE components ALTER COLUMN id TYPE UUID USING id::uuid'
)
op.execute(
'ALTER TABLE components ALTER COLUMN topic_id TYPE UUID USING topic_id::uuid'
)
op.execute(
'ALTER TABLE files ALTER COLUMN id TYPE UUID USING id::uuid'
)
op.execute(
'ALTER TABLE files ALTER COLUMN jobstate_id TYPE UUID USING jobstate_id::uuid'
)
op.execute(
'ALTER TABLE files ALTER COLUMN team_id TYPE UUID USING team_id::uuid'
)
op.execute(
'ALTER TABLE files ALTER COLUMN job_id TYPE UUID USING job_id::uuid'
)
op.execute(
'ALTER TABLE files ALTER COLUMN test_id TYPE UUID USING test_id::uuid'
)
op.execute(
'ALTER TABLE issues ALTER COLUMN id TYPE UUID USING id::uuid'
)
op.execute(
'ALTER TABLE jobdefinition_tests ALTER COLUMN jobdefinition_id TYPE UUID USING jobdefinition_id::uuid'
)
op.execute(
'ALTER TABLE jobdefinition_tests ALTER COLUMN test_id TYPE UUID USING test_id::uuid'
)
op.execute(
'ALTER TABLE jobdefinitions ALTER COLUMN id TYPE UUID USING id::uuid'
)
op.execute(
'ALTER TABLE jobdefinitions ALTER COLUMN topic_id TYPE UUID USING topic_id::uuid'
)
op.execute(
'ALTER TABLE jobs ALTER COLUMN id TYPE UUID USING id::uuid'
)
op.execute(
'ALTER TABLE jobs ALTER COLUMN jobdefinition_id TYPE UUID USING jobdefinition_id::uuid'
)
op.execute(
'ALTER TABLE jobs ALTER COLUMN remoteci_id TYPE UUID USING remoteci_id::uuid'
)
op.execute(
'ALTER TABLE jobs ALTER COLUMN team_id TYPE UUID USING team_id::uuid'
)
op.execute(
'ALTER TABLE jobs ALTER COLUMN previous_job_id TYPE UUID USING previous_job_id::uuid'
)
op.execute(
'ALTER TABLE jobs_components ALTER COLUMN component_id TYPE UUID USING component_id::uuid'
)
op.execute(
'ALTER TABLE jobs_components ALTER COLUMN job_id TYPE UUID USING job_id::uuid'
)
op.execute(
'ALTER TABLE jobs_issues ALTER COLUMN job_id TYPE UUID USING job_id::uuid'
)
op.execute(
'ALTER TABLE jobs_issues ALTER COLUMN issue_id TYPE UUID USING issue_id::uuid'
)
op.execute(
'ALTER TABLE jobstates ALTER COLUMN id TYPE UUID USING id::uuid'
)
op.execute(
'ALTER TABLE jobstates ALTER COLUMN job_id TYPE UUID USING job_id::uuid'
)
op.execute(
'ALTER TABLE jobstates ALTER COLUMN team_id TYPE UUID USING team_id::uuid'
)
op.execute(
'ALTER TABLE logs ALTER COLUMN id TYPE UUID USING id::uuid'
)
op.execute(
'ALTER TABLE logs ALTER COLUMN user_id TYPE UUID USING user_id::uuid'
)
op.execute(
'ALTER TABLE logs ALTER COLUMN team_id TYPE UUID USING team_id::uuid'
)
op.execute(
'ALTER TABLE metas ALTER COLUMN id TYPE UUID USING id::uuid'
)
op.execute(
'ALTER TABLE metas ALTER COLUMN job_id TYPE UUID USING job_id::uuid'
)
op.execute(
'ALTER TABLE remoteci_tests ALTER COLUMN remoteci_id TYPE UUID USING remoteci_id::uuid'
)
op.execute(
'ALTER TABLE remoteci_tests ALTER COLUMN test_id TYPE UUID USING test_id::uuid'
)
op.execute(
'ALTER TABLE remotecis ALTER COLUMN id TYPE UUID USING id::uuid'
)
op.execute(
'ALTER TABLE remotecis ALTER COLUMN team_id TYPE UUID USING team_id::uuid'
)
op.execute(
'ALTER TABLE teams ALTER COLUMN id TYPE UUID USING id::uuid'
)
op.execute(
'ALTER TABLE tests ALTER COLUMN id TYPE UUID USING id::uuid'
)
op.execute(
'ALTER TABLE tests ALTER COLUMN team_id TYPE UUID USING team_id::uuid'
)
op.execute(
'ALTER TABLE topic_tests ALTER COLUMN topic_id TYPE UUID USING topic_id::uuid'
)
op.execute(
'ALTER TABLE topic_tests ALTER COLUMN test_id TYPE UUID USING test_id::uuid'
)
op.execute(
'ALTER TABLE topics ALTER COLUMN id TYPE UUID USING id::uuid'
)
op.execute(
'ALTER TABLE topics ALTER COLUMN next_topic TYPE UUID USING next_topic::uuid'
)
op.execute(
'ALTER TABLE topics_teams ALTER COLUMN topic_id TYPE UUID USING topic_id::uuid'
)
op.execute(
'ALTER TABLE topics_teams ALTER COLUMN team_id TYPE UUID USING team_id::uuid'
)
op.execute(
'ALTER TABLE user_remotecis ALTER COLUMN user_id TYPE UUID USING user_id::uuid'
)
op.execute(
'ALTER TABLE user_remotecis ALTER COLUMN remoteci_id TYPE UUID USING remoteci_id::uuid'
)
op.execute(
'ALTER TABLE users ALTER COLUMN id TYPE UUID USING id::uuid'
)
op.execute(
'ALTER TABLE users ALTER COLUMN team_id TYPE UUID USING team_id::uuid'
)
op.create_foreign_key('component_files_component_id_fkey',
'component_files', 'components', ['component_id'], ['id'], ondelete
='CASCADE')
op.create_foreign_key('components_topic_id_fkey', 'components',
'topics', ['topic_id'], ['id'], ondelete='CASCADE')
op.create_foreign_key('files_job_id_fkey', 'files', 'jobs', ['job_id'],
['id'], ondelete='CASCADE')
op.create_foreign_key('files_jobstate_id_fkey', 'files', 'jobstates', [
'jobstate_id'], ['id'], ondelete='CASCADE')
op.create_foreign_key('files_team_id_fkey', 'files', 'teams', [
'team_id'], ['id'], ondelete='CASCADE')
op.create_foreign_key('files_test_id_fkey', 'files', 'tests', [
'test_id'], ['id'], ondelete='CASCADE')
op.create_foreign_key('jobdefinition_tests_jobdefinition_id_fkey',
'jobdefinition_tests', 'jobdefinitions', ['jobdefinition_id'], [
'id'], ondelete='CASCADE')
op.create_foreign_key('jobdefinition_tests_test_id_fkey',
'jobdefinition_tests', 'tests', ['test_id'], ['id'], ondelete='CASCADE'
)
op.create_foreign_key('jobdefinitions_topic_id_fkey', 'jobdefinitions',
'topics', ['topic_id'], ['id'], ondelete='CASCADE')
op.create_foreign_key('jobs_team_id_fkey', 'jobs', 'teams', ['team_id'],
['id'], ondelete='CASCADE')
op.create_foreign_key('jobs_jobdefinition_id_fkey', 'jobs',
'jobdefinitions', ['jobdefinition_id'], ['id'], ondelete='CASCADE')
op.create_foreign_key('jobs_remoteci_id_fkey', 'jobs', 'remotecis', [
'remoteci_id'], ['id'], ondelete='CASCADE')
op.create_foreign_key('jobs_previous_job_id_fkey', 'jobs', 'jobs', [
'previous_job_id'], ['id'])
op.create_foreign_key('jobs_components_component_id_fkey',
'jobs_components', 'components', ['component_id'], ['id'], ondelete
='CASCADE')
op.create_foreign_key('jobs_components_job_id_fkey', 'jobs_components',
'jobs', ['job_id'], ['id'], ondelete='CASCADE')
op.create_foreign_key('jobs_issues_issue_id_fkey', 'jobs_issues',
'issues', ['issue_id'], ['id'], ondelete='CASCADE')
op.create_foreign_key('jobs_issues_job_id_fkey', 'jobs_issues', 'jobs',
['job_id'], ['id'], ondelete='CASCADE')
op.create_foreign_key('jobstates_team_id_fkey', 'jobstates', 'teams', [
'team_id'], ['id'], ondelete='CASCADE')
op.create_foreign_key('jobstates_job_id_fkey', 'jobstates', 'jobs', [
'job_id'], ['id'], ondelete='CASCADE')
op.create_foreign_key('logs_team_id_fkey', 'logs', 'teams', ['team_id'],
['id'], ondelete='CASCADE')
op.create_foreign_key('logs_user_id_fkey', 'logs', 'users', ['user_id'],
['id'], ondelete='CASCADE')
op.create_foreign_key('metas_job_id_fkey', 'metas', 'jobs', ['job_id'],
['id'], ondelete='CASCADE')
op.create_foreign_key('remoteci_tests_test_id_fkey', 'remoteci_tests',
'tests', ['test_id'], ['id'], ondelete='CASCADE')
op.create_foreign_key('remoteci_tests_remoteci_id_fkey',
'remoteci_tests', 'remotecis', ['remoteci_id'], ['id'], ondelete=
'CASCADE')
op.create_foreign_key('remotecis_team_id_fkey', 'remotecis', 'teams', [
'team_id'], ['id'], ondelete='CASCADE')
op.create_foreign_key('tests_team_id_fkey', 'tests', 'teams', [
'team_id'], ['id'], ondelete='CASCADE')
op.create_foreign_key('topic_tests_test_id_fkey', 'topic_tests',
'tests', ['test_id'], ['id'], ondelete='CASCADE')
op.create_foreign_key('topic_tests_topic_id_fkey', 'topic_tests',
'topics', ['topic_id'], ['id'], ondelete='CASCADE')
op.create_foreign_key('topics_next_topic_fkey', 'topics', 'topics', [
'next_topic'], ['id'])
op.create_foreign_key('topics_teams_topic_id_fkey', 'topics_teams',
'topics', ['topic_id'], ['id'], ondelete='CASCADE')
op.create_foreign_key('topics_teams_team_id_fkey', 'topics_teams',
'teams', ['team_id'], ['id'], ondelete='CASCADE')
op.create_foreign_key('user_remotecis_user_id_fkey', 'user_remotecis',
'users', ['user_id'], ['id'], ondelete='CASCADE')
op.create_foreign_key('user_remotecis_remoteci_id_fkey',
'user_remotecis', 'remotecis', ['remoteci_id'], ['id'], ondelete=
'CASCADE')
op.create_foreign_key('users_team_id_fkey', 'users', 'teams', [
'team_id'], ['id'], ondelete='CASCADE')
<mask token>
| <mask token>
def upgrade():
op.drop_constraint('component_files_component_id_fkey', 'component_files')
op.drop_constraint('components_topic_id_fkey', 'components')
op.drop_constraint('files_job_id_fkey', 'files')
op.drop_constraint('files_jobstate_id_fkey', 'files')
op.drop_constraint('files_team_id_fkey', 'files')
op.drop_constraint('files_test_id_fkey', 'files')
op.drop_constraint('jobdefinition_tests_jobdefinition_id_fkey',
'jobdefinition_tests')
op.drop_constraint('jobdefinition_tests_test_id_fkey',
'jobdefinition_tests')
op.drop_constraint('jobdefinitions_topic_id_fkey', 'jobdefinitions')
op.drop_constraint('jobs_team_id_fkey', 'jobs')
op.drop_constraint('jobs_jobdefinition_id_fkey', 'jobs')
op.drop_constraint('jobs_remoteci_id_fkey', 'jobs')
op.drop_constraint('jobs_previous_job_id_fkey', 'jobs')
op.drop_constraint('jobs_components_component_id_fkey', 'jobs_components')
op.drop_constraint('jobs_components_job_id_fkey', 'jobs_components')
op.drop_constraint('jobs_issues_issue_id_fkey', 'jobs_issues')
op.drop_constraint('jobs_issues_job_id_fkey', 'jobs_issues')
op.drop_constraint('jobstates_team_id_fkey', 'jobstates')
op.drop_constraint('jobstates_job_id_fkey', 'jobstates')
op.drop_constraint('logs_team_id_fkey', 'logs')
op.drop_constraint('logs_user_id_fkey', 'logs')
op.drop_constraint('metas_job_id_fkey', 'metas')
op.drop_constraint('remoteci_tests_test_id_fkey', 'remoteci_tests')
op.drop_constraint('remoteci_tests_remoteci_id_fkey', 'remoteci_tests')
op.drop_constraint('remotecis_team_id_fkey', 'remotecis')
op.drop_constraint('tests_team_id_fkey', 'tests')
op.drop_constraint('topic_tests_test_id_fkey', 'topic_tests')
op.drop_constraint('topic_tests_topic_id_fkey', 'topic_tests')
op.drop_constraint('topics_next_topic_fkey', 'topics')
op.drop_constraint('topics_teams_topic_id_fkey', 'topics_teams')
op.drop_constraint('topics_teams_team_id_fkey', 'topics_teams')
op.drop_constraint('user_remotecis_user_id_fkey', 'user_remotecis')
op.drop_constraint('user_remotecis_remoteci_id_fkey', 'user_remotecis')
op.drop_constraint('users_team_id_fkey', 'users')
op.execute(
'ALTER TABLE component_files ALTER COLUMN component_id TYPE UUID USING component_id::uuid'
)
op.execute(
'ALTER TABLE component_files ALTER COLUMN id TYPE UUID USING id::uuid'
)
op.execute(
'ALTER TABLE components ALTER COLUMN id TYPE UUID USING id::uuid'
)
op.execute(
'ALTER TABLE components ALTER COLUMN topic_id TYPE UUID USING topic_id::uuid'
)
op.execute(
'ALTER TABLE files ALTER COLUMN id TYPE UUID USING id::uuid'
)
op.execute(
'ALTER TABLE files ALTER COLUMN jobstate_id TYPE UUID USING jobstate_id::uuid'
)
op.execute(
'ALTER TABLE files ALTER COLUMN team_id TYPE UUID USING team_id::uuid'
)
op.execute(
'ALTER TABLE files ALTER COLUMN job_id TYPE UUID USING job_id::uuid'
)
op.execute(
'ALTER TABLE files ALTER COLUMN test_id TYPE UUID USING test_id::uuid'
)
op.execute(
'ALTER TABLE issues ALTER COLUMN id TYPE UUID USING id::uuid'
)
op.execute(
'ALTER TABLE jobdefinition_tests ALTER COLUMN jobdefinition_id TYPE UUID USING jobdefinition_id::uuid'
)
op.execute(
'ALTER TABLE jobdefinition_tests ALTER COLUMN test_id TYPE UUID USING test_id::uuid'
)
op.execute(
'ALTER TABLE jobdefinitions ALTER COLUMN id TYPE UUID USING id::uuid'
)
op.execute(
'ALTER TABLE jobdefinitions ALTER COLUMN topic_id TYPE UUID USING topic_id::uuid'
)
op.execute(
'ALTER TABLE jobs ALTER COLUMN id TYPE UUID USING id::uuid'
)
op.execute(
'ALTER TABLE jobs ALTER COLUMN jobdefinition_id TYPE UUID USING jobdefinition_id::uuid'
)
op.execute(
'ALTER TABLE jobs ALTER COLUMN remoteci_id TYPE UUID USING remoteci_id::uuid'
)
op.execute(
'ALTER TABLE jobs ALTER COLUMN team_id TYPE UUID USING team_id::uuid'
)
op.execute(
'ALTER TABLE jobs ALTER COLUMN previous_job_id TYPE UUID USING previous_job_id::uuid'
)
op.execute(
'ALTER TABLE jobs_components ALTER COLUMN component_id TYPE UUID USING component_id::uuid'
)
op.execute(
'ALTER TABLE jobs_components ALTER COLUMN job_id TYPE UUID USING job_id::uuid'
)
op.execute(
'ALTER TABLE jobs_issues ALTER COLUMN job_id TYPE UUID USING job_id::uuid'
)
op.execute(
'ALTER TABLE jobs_issues ALTER COLUMN issue_id TYPE UUID USING issue_id::uuid'
)
op.execute(
'ALTER TABLE jobstates ALTER COLUMN id TYPE UUID USING id::uuid'
)
op.execute(
'ALTER TABLE jobstates ALTER COLUMN job_id TYPE UUID USING job_id::uuid'
)
op.execute(
'ALTER TABLE jobstates ALTER COLUMN team_id TYPE UUID USING team_id::uuid'
)
op.execute(
'ALTER TABLE logs ALTER COLUMN id TYPE UUID USING id::uuid'
)
op.execute(
'ALTER TABLE logs ALTER COLUMN user_id TYPE UUID USING user_id::uuid'
)
op.execute(
'ALTER TABLE logs ALTER COLUMN team_id TYPE UUID USING team_id::uuid'
)
op.execute(
'ALTER TABLE metas ALTER COLUMN id TYPE UUID USING id::uuid'
)
op.execute(
'ALTER TABLE metas ALTER COLUMN job_id TYPE UUID USING job_id::uuid'
)
op.execute(
'ALTER TABLE remoteci_tests ALTER COLUMN remoteci_id TYPE UUID USING remoteci_id::uuid'
)
op.execute(
'ALTER TABLE remoteci_tests ALTER COLUMN test_id TYPE UUID USING test_id::uuid'
)
op.execute(
'ALTER TABLE remotecis ALTER COLUMN id TYPE UUID USING id::uuid'
)
op.execute(
'ALTER TABLE remotecis ALTER COLUMN team_id TYPE UUID USING team_id::uuid'
)
op.execute(
'ALTER TABLE teams ALTER COLUMN id TYPE UUID USING id::uuid'
)
op.execute(
'ALTER TABLE tests ALTER COLUMN id TYPE UUID USING id::uuid'
)
op.execute(
'ALTER TABLE tests ALTER COLUMN team_id TYPE UUID USING team_id::uuid'
)
op.execute(
'ALTER TABLE topic_tests ALTER COLUMN topic_id TYPE UUID USING topic_id::uuid'
)
op.execute(
'ALTER TABLE topic_tests ALTER COLUMN test_id TYPE UUID USING test_id::uuid'
)
op.execute(
'ALTER TABLE topics ALTER COLUMN id TYPE UUID USING id::uuid'
)
op.execute(
'ALTER TABLE topics ALTER COLUMN next_topic TYPE UUID USING next_topic::uuid'
)
op.execute(
'ALTER TABLE topics_teams ALTER COLUMN topic_id TYPE UUID USING topic_id::uuid'
)
op.execute(
'ALTER TABLE topics_teams ALTER COLUMN team_id TYPE UUID USING team_id::uuid'
)
op.execute(
'ALTER TABLE user_remotecis ALTER COLUMN user_id TYPE UUID USING user_id::uuid'
)
op.execute(
'ALTER TABLE user_remotecis ALTER COLUMN remoteci_id TYPE UUID USING remoteci_id::uuid'
)
op.execute(
'ALTER TABLE users ALTER COLUMN id TYPE UUID USING id::uuid'
)
op.execute(
'ALTER TABLE users ALTER COLUMN team_id TYPE UUID USING team_id::uuid'
)
op.create_foreign_key('component_files_component_id_fkey',
'component_files', 'components', ['component_id'], ['id'], ondelete
='CASCADE')
op.create_foreign_key('components_topic_id_fkey', 'components',
'topics', ['topic_id'], ['id'], ondelete='CASCADE')
op.create_foreign_key('files_job_id_fkey', 'files', 'jobs', ['job_id'],
['id'], ondelete='CASCADE')
op.create_foreign_key('files_jobstate_id_fkey', 'files', 'jobstates', [
'jobstate_id'], ['id'], ondelete='CASCADE')
op.create_foreign_key('files_team_id_fkey', 'files', 'teams', [
'team_id'], ['id'], ondelete='CASCADE')
op.create_foreign_key('files_test_id_fkey', 'files', 'tests', [
'test_id'], ['id'], ondelete='CASCADE')
op.create_foreign_key('jobdefinition_tests_jobdefinition_id_fkey',
'jobdefinition_tests', 'jobdefinitions', ['jobdefinition_id'], [
'id'], ondelete='CASCADE')
op.create_foreign_key('jobdefinition_tests_test_id_fkey',
'jobdefinition_tests', 'tests', ['test_id'], ['id'], ondelete='CASCADE'
)
op.create_foreign_key('jobdefinitions_topic_id_fkey', 'jobdefinitions',
'topics', ['topic_id'], ['id'], ondelete='CASCADE')
op.create_foreign_key('jobs_team_id_fkey', 'jobs', 'teams', ['team_id'],
['id'], ondelete='CASCADE')
op.create_foreign_key('jobs_jobdefinition_id_fkey', 'jobs',
'jobdefinitions', ['jobdefinition_id'], ['id'], ondelete='CASCADE')
op.create_foreign_key('jobs_remoteci_id_fkey', 'jobs', 'remotecis', [
'remoteci_id'], ['id'], ondelete='CASCADE')
op.create_foreign_key('jobs_previous_job_id_fkey', 'jobs', 'jobs', [
'previous_job_id'], ['id'])
op.create_foreign_key('jobs_components_component_id_fkey',
'jobs_components', 'components', ['component_id'], ['id'], ondelete
='CASCADE')
op.create_foreign_key('jobs_components_job_id_fkey', 'jobs_components',
'jobs', ['job_id'], ['id'], ondelete='CASCADE')
op.create_foreign_key('jobs_issues_issue_id_fkey', 'jobs_issues',
'issues', ['issue_id'], ['id'], ondelete='CASCADE')
op.create_foreign_key('jobs_issues_job_id_fkey', 'jobs_issues', 'jobs',
['job_id'], ['id'], ondelete='CASCADE')
op.create_foreign_key('jobstates_team_id_fkey', 'jobstates', 'teams', [
'team_id'], ['id'], ondelete='CASCADE')
op.create_foreign_key('jobstates_job_id_fkey', 'jobstates', 'jobs', [
'job_id'], ['id'], ondelete='CASCADE')
op.create_foreign_key('logs_team_id_fkey', 'logs', 'teams', ['team_id'],
['id'], ondelete='CASCADE')
op.create_foreign_key('logs_user_id_fkey', 'logs', 'users', ['user_id'],
['id'], ondelete='CASCADE')
op.create_foreign_key('metas_job_id_fkey', 'metas', 'jobs', ['job_id'],
['id'], ondelete='CASCADE')
op.create_foreign_key('remoteci_tests_test_id_fkey', 'remoteci_tests',
'tests', ['test_id'], ['id'], ondelete='CASCADE')
op.create_foreign_key('remoteci_tests_remoteci_id_fkey',
'remoteci_tests', 'remotecis', ['remoteci_id'], ['id'], ondelete=
'CASCADE')
op.create_foreign_key('remotecis_team_id_fkey', 'remotecis', 'teams', [
'team_id'], ['id'], ondelete='CASCADE')
op.create_foreign_key('tests_team_id_fkey', 'tests', 'teams', [
'team_id'], ['id'], ondelete='CASCADE')
op.create_foreign_key('topic_tests_test_id_fkey', 'topic_tests',
'tests', ['test_id'], ['id'], ondelete='CASCADE')
op.create_foreign_key('topic_tests_topic_id_fkey', 'topic_tests',
'topics', ['topic_id'], ['id'], ondelete='CASCADE')
op.create_foreign_key('topics_next_topic_fkey', 'topics', 'topics', [
'next_topic'], ['id'])
op.create_foreign_key('topics_teams_topic_id_fkey', 'topics_teams',
'topics', ['topic_id'], ['id'], ondelete='CASCADE')
op.create_foreign_key('topics_teams_team_id_fkey', 'topics_teams',
'teams', ['team_id'], ['id'], ondelete='CASCADE')
op.create_foreign_key('user_remotecis_user_id_fkey', 'user_remotecis',
'users', ['user_id'], ['id'], ondelete='CASCADE')
op.create_foreign_key('user_remotecis_remoteci_id_fkey',
'user_remotecis', 'remotecis', ['remoteci_id'], ['id'], ondelete=
'CASCADE')
op.create_foreign_key('users_team_id_fkey', 'users', 'teams', [
'team_id'], ['id'], ondelete='CASCADE')
def downgrade():
pass
| <mask token>
revision = '1bb42ff54435'
down_revision = '6bbbf58ed9de'
branch_labels = None
depends_on = None
<mask token>
def upgrade():
op.drop_constraint('component_files_component_id_fkey', 'component_files')
op.drop_constraint('components_topic_id_fkey', 'components')
op.drop_constraint('files_job_id_fkey', 'files')
op.drop_constraint('files_jobstate_id_fkey', 'files')
op.drop_constraint('files_team_id_fkey', 'files')
op.drop_constraint('files_test_id_fkey', 'files')
op.drop_constraint('jobdefinition_tests_jobdefinition_id_fkey',
'jobdefinition_tests')
op.drop_constraint('jobdefinition_tests_test_id_fkey',
'jobdefinition_tests')
op.drop_constraint('jobdefinitions_topic_id_fkey', 'jobdefinitions')
op.drop_constraint('jobs_team_id_fkey', 'jobs')
op.drop_constraint('jobs_jobdefinition_id_fkey', 'jobs')
op.drop_constraint('jobs_remoteci_id_fkey', 'jobs')
op.drop_constraint('jobs_previous_job_id_fkey', 'jobs')
op.drop_constraint('jobs_components_component_id_fkey', 'jobs_components')
op.drop_constraint('jobs_components_job_id_fkey', 'jobs_components')
op.drop_constraint('jobs_issues_issue_id_fkey', 'jobs_issues')
op.drop_constraint('jobs_issues_job_id_fkey', 'jobs_issues')
op.drop_constraint('jobstates_team_id_fkey', 'jobstates')
op.drop_constraint('jobstates_job_id_fkey', 'jobstates')
op.drop_constraint('logs_team_id_fkey', 'logs')
op.drop_constraint('logs_user_id_fkey', 'logs')
op.drop_constraint('metas_job_id_fkey', 'metas')
op.drop_constraint('remoteci_tests_test_id_fkey', 'remoteci_tests')
op.drop_constraint('remoteci_tests_remoteci_id_fkey', 'remoteci_tests')
op.drop_constraint('remotecis_team_id_fkey', 'remotecis')
op.drop_constraint('tests_team_id_fkey', 'tests')
op.drop_constraint('topic_tests_test_id_fkey', 'topic_tests')
op.drop_constraint('topic_tests_topic_id_fkey', 'topic_tests')
op.drop_constraint('topics_next_topic_fkey', 'topics')
op.drop_constraint('topics_teams_topic_id_fkey', 'topics_teams')
op.drop_constraint('topics_teams_team_id_fkey', 'topics_teams')
op.drop_constraint('user_remotecis_user_id_fkey', 'user_remotecis')
op.drop_constraint('user_remotecis_remoteci_id_fkey', 'user_remotecis')
op.drop_constraint('users_team_id_fkey', 'users')
op.execute(
'ALTER TABLE component_files ALTER COLUMN component_id TYPE UUID USING component_id::uuid'
)
op.execute(
'ALTER TABLE component_files ALTER COLUMN id TYPE UUID USING id::uuid'
)
op.execute(
'ALTER TABLE components ALTER COLUMN id TYPE UUID USING id::uuid'
)
op.execute(
'ALTER TABLE components ALTER COLUMN topic_id TYPE UUID USING topic_id::uuid'
)
op.execute(
'ALTER TABLE files ALTER COLUMN id TYPE UUID USING id::uuid'
)
op.execute(
'ALTER TABLE files ALTER COLUMN jobstate_id TYPE UUID USING jobstate_id::uuid'
)
op.execute(
'ALTER TABLE files ALTER COLUMN team_id TYPE UUID USING team_id::uuid'
)
op.execute(
'ALTER TABLE files ALTER COLUMN job_id TYPE UUID USING job_id::uuid'
)
op.execute(
'ALTER TABLE files ALTER COLUMN test_id TYPE UUID USING test_id::uuid'
)
op.execute(
'ALTER TABLE issues ALTER COLUMN id TYPE UUID USING id::uuid'
)
op.execute(
'ALTER TABLE jobdefinition_tests ALTER COLUMN jobdefinition_id TYPE UUID USING jobdefinition_id::uuid'
)
op.execute(
'ALTER TABLE jobdefinition_tests ALTER COLUMN test_id TYPE UUID USING test_id::uuid'
)
op.execute(
'ALTER TABLE jobdefinitions ALTER COLUMN id TYPE UUID USING id::uuid'
)
op.execute(
'ALTER TABLE jobdefinitions ALTER COLUMN topic_id TYPE UUID USING topic_id::uuid'
)
op.execute(
'ALTER TABLE jobs ALTER COLUMN id TYPE UUID USING id::uuid'
)
op.execute(
'ALTER TABLE jobs ALTER COLUMN jobdefinition_id TYPE UUID USING jobdefinition_id::uuid'
)
op.execute(
'ALTER TABLE jobs ALTER COLUMN remoteci_id TYPE UUID USING remoteci_id::uuid'
)
op.execute(
'ALTER TABLE jobs ALTER COLUMN team_id TYPE UUID USING team_id::uuid'
)
op.execute(
'ALTER TABLE jobs ALTER COLUMN previous_job_id TYPE UUID USING previous_job_id::uuid'
)
op.execute(
'ALTER TABLE jobs_components ALTER COLUMN component_id TYPE UUID USING component_id::uuid'
)
op.execute(
'ALTER TABLE jobs_components ALTER COLUMN job_id TYPE UUID USING job_id::uuid'
)
op.execute(
'ALTER TABLE jobs_issues ALTER COLUMN job_id TYPE UUID USING job_id::uuid'
)
op.execute(
'ALTER TABLE jobs_issues ALTER COLUMN issue_id TYPE UUID USING issue_id::uuid'
)
op.execute(
'ALTER TABLE jobstates ALTER COLUMN id TYPE UUID USING id::uuid'
)
op.execute(
'ALTER TABLE jobstates ALTER COLUMN job_id TYPE UUID USING job_id::uuid'
)
op.execute(
'ALTER TABLE jobstates ALTER COLUMN team_id TYPE UUID USING team_id::uuid'
)
op.execute(
'ALTER TABLE logs ALTER COLUMN id TYPE UUID USING id::uuid'
)
op.execute(
'ALTER TABLE logs ALTER COLUMN user_id TYPE UUID USING user_id::uuid'
)
op.execute(
'ALTER TABLE logs ALTER COLUMN team_id TYPE UUID USING team_id::uuid'
)
op.execute(
'ALTER TABLE metas ALTER COLUMN id TYPE UUID USING id::uuid'
)
op.execute(
'ALTER TABLE metas ALTER COLUMN job_id TYPE UUID USING job_id::uuid'
)
op.execute(
'ALTER TABLE remoteci_tests ALTER COLUMN remoteci_id TYPE UUID USING remoteci_id::uuid'
)
op.execute(
'ALTER TABLE remoteci_tests ALTER COLUMN test_id TYPE UUID USING test_id::uuid'
)
op.execute(
'ALTER TABLE remotecis ALTER COLUMN id TYPE UUID USING id::uuid'
)
op.execute(
'ALTER TABLE remotecis ALTER COLUMN team_id TYPE UUID USING team_id::uuid'
)
op.execute(
'ALTER TABLE teams ALTER COLUMN id TYPE UUID USING id::uuid'
)
op.execute(
'ALTER TABLE tests ALTER COLUMN id TYPE UUID USING id::uuid'
)
op.execute(
'ALTER TABLE tests ALTER COLUMN team_id TYPE UUID USING team_id::uuid'
)
op.execute(
'ALTER TABLE topic_tests ALTER COLUMN topic_id TYPE UUID USING topic_id::uuid'
)
op.execute(
'ALTER TABLE topic_tests ALTER COLUMN test_id TYPE UUID USING test_id::uuid'
)
op.execute(
'ALTER TABLE topics ALTER COLUMN id TYPE UUID USING id::uuid'
)
op.execute(
'ALTER TABLE topics ALTER COLUMN next_topic TYPE UUID USING next_topic::uuid'
)
op.execute(
'ALTER TABLE topics_teams ALTER COLUMN topic_id TYPE UUID USING topic_id::uuid'
)
op.execute(
'ALTER TABLE topics_teams ALTER COLUMN team_id TYPE UUID USING team_id::uuid'
)
op.execute(
'ALTER TABLE user_remotecis ALTER COLUMN user_id TYPE UUID USING user_id::uuid'
)
op.execute(
'ALTER TABLE user_remotecis ALTER COLUMN remoteci_id TYPE UUID USING remoteci_id::uuid'
)
op.execute(
'ALTER TABLE users ALTER COLUMN id TYPE UUID USING id::uuid'
)
op.execute(
'ALTER TABLE users ALTER COLUMN team_id TYPE UUID USING team_id::uuid'
)
op.create_foreign_key('component_files_component_id_fkey',
'component_files', 'components', ['component_id'], ['id'], ondelete
='CASCADE')
op.create_foreign_key('components_topic_id_fkey', 'components',
'topics', ['topic_id'], ['id'], ondelete='CASCADE')
op.create_foreign_key('files_job_id_fkey', 'files', 'jobs', ['job_id'],
['id'], ondelete='CASCADE')
op.create_foreign_key('files_jobstate_id_fkey', 'files', 'jobstates', [
'jobstate_id'], ['id'], ondelete='CASCADE')
op.create_foreign_key('files_team_id_fkey', 'files', 'teams', [
'team_id'], ['id'], ondelete='CASCADE')
op.create_foreign_key('files_test_id_fkey', 'files', 'tests', [
'test_id'], ['id'], ondelete='CASCADE')
op.create_foreign_key('jobdefinition_tests_jobdefinition_id_fkey',
'jobdefinition_tests', 'jobdefinitions', ['jobdefinition_id'], [
'id'], ondelete='CASCADE')
op.create_foreign_key('jobdefinition_tests_test_id_fkey',
'jobdefinition_tests', 'tests', ['test_id'], ['id'], ondelete='CASCADE'
)
op.create_foreign_key('jobdefinitions_topic_id_fkey', 'jobdefinitions',
'topics', ['topic_id'], ['id'], ondelete='CASCADE')
op.create_foreign_key('jobs_team_id_fkey', 'jobs', 'teams', ['team_id'],
['id'], ondelete='CASCADE')
op.create_foreign_key('jobs_jobdefinition_id_fkey', 'jobs',
'jobdefinitions', ['jobdefinition_id'], ['id'], ondelete='CASCADE')
op.create_foreign_key('jobs_remoteci_id_fkey', 'jobs', 'remotecis', [
'remoteci_id'], ['id'], ondelete='CASCADE')
op.create_foreign_key('jobs_previous_job_id_fkey', 'jobs', 'jobs', [
'previous_job_id'], ['id'])
op.create_foreign_key('jobs_components_component_id_fkey',
'jobs_components', 'components', ['component_id'], ['id'], ondelete
='CASCADE')
op.create_foreign_key('jobs_components_job_id_fkey', 'jobs_components',
'jobs', ['job_id'], ['id'], ondelete='CASCADE')
op.create_foreign_key('jobs_issues_issue_id_fkey', 'jobs_issues',
'issues', ['issue_id'], ['id'], ondelete='CASCADE')
op.create_foreign_key('jobs_issues_job_id_fkey', 'jobs_issues', 'jobs',
['job_id'], ['id'], ondelete='CASCADE')
op.create_foreign_key('jobstates_team_id_fkey', 'jobstates', 'teams', [
'team_id'], ['id'], ondelete='CASCADE')
op.create_foreign_key('jobstates_job_id_fkey', 'jobstates', 'jobs', [
'job_id'], ['id'], ondelete='CASCADE')
op.create_foreign_key('logs_team_id_fkey', 'logs', 'teams', ['team_id'],
['id'], ondelete='CASCADE')
op.create_foreign_key('logs_user_id_fkey', 'logs', 'users', ['user_id'],
['id'], ondelete='CASCADE')
op.create_foreign_key('metas_job_id_fkey', 'metas', 'jobs', ['job_id'],
['id'], ondelete='CASCADE')
op.create_foreign_key('remoteci_tests_test_id_fkey', 'remoteci_tests',
'tests', ['test_id'], ['id'], ondelete='CASCADE')
op.create_foreign_key('remoteci_tests_remoteci_id_fkey',
'remoteci_tests', 'remotecis', ['remoteci_id'], ['id'], ondelete=
'CASCADE')
op.create_foreign_key('remotecis_team_id_fkey', 'remotecis', 'teams', [
'team_id'], ['id'], ondelete='CASCADE')
op.create_foreign_key('tests_team_id_fkey', 'tests', 'teams', [
'team_id'], ['id'], ondelete='CASCADE')
op.create_foreign_key('topic_tests_test_id_fkey', 'topic_tests',
'tests', ['test_id'], ['id'], ondelete='CASCADE')
op.create_foreign_key('topic_tests_topic_id_fkey', 'topic_tests',
'topics', ['topic_id'], ['id'], ondelete='CASCADE')
op.create_foreign_key('topics_next_topic_fkey', 'topics', 'topics', [
'next_topic'], ['id'])
op.create_foreign_key('topics_teams_topic_id_fkey', 'topics_teams',
'topics', ['topic_id'], ['id'], ondelete='CASCADE')
op.create_foreign_key('topics_teams_team_id_fkey', 'topics_teams',
'teams', ['team_id'], ['id'], ondelete='CASCADE')
op.create_foreign_key('user_remotecis_user_id_fkey', 'user_remotecis',
'users', ['user_id'], ['id'], ondelete='CASCADE')
op.create_foreign_key('user_remotecis_remoteci_id_fkey',
'user_remotecis', 'remotecis', ['remoteci_id'], ['id'], ondelete=
'CASCADE')
op.create_foreign_key('users_team_id_fkey', 'users', 'teams', [
'team_id'], ['id'], ondelete='CASCADE')
def downgrade():
pass
| <mask token>
revision = '1bb42ff54435'
down_revision = '6bbbf58ed9de'
branch_labels = None
depends_on = None
from alembic import op
def upgrade():
op.drop_constraint('component_files_component_id_fkey', 'component_files')
op.drop_constraint('components_topic_id_fkey', 'components')
op.drop_constraint('files_job_id_fkey', 'files')
op.drop_constraint('files_jobstate_id_fkey', 'files')
op.drop_constraint('files_team_id_fkey', 'files')
op.drop_constraint('files_test_id_fkey', 'files')
op.drop_constraint('jobdefinition_tests_jobdefinition_id_fkey',
'jobdefinition_tests')
op.drop_constraint('jobdefinition_tests_test_id_fkey',
'jobdefinition_tests')
op.drop_constraint('jobdefinitions_topic_id_fkey', 'jobdefinitions')
op.drop_constraint('jobs_team_id_fkey', 'jobs')
op.drop_constraint('jobs_jobdefinition_id_fkey', 'jobs')
op.drop_constraint('jobs_remoteci_id_fkey', 'jobs')
op.drop_constraint('jobs_previous_job_id_fkey', 'jobs')
op.drop_constraint('jobs_components_component_id_fkey', 'jobs_components')
op.drop_constraint('jobs_components_job_id_fkey', 'jobs_components')
op.drop_constraint('jobs_issues_issue_id_fkey', 'jobs_issues')
op.drop_constraint('jobs_issues_job_id_fkey', 'jobs_issues')
op.drop_constraint('jobstates_team_id_fkey', 'jobstates')
op.drop_constraint('jobstates_job_id_fkey', 'jobstates')
op.drop_constraint('logs_team_id_fkey', 'logs')
op.drop_constraint('logs_user_id_fkey', 'logs')
op.drop_constraint('metas_job_id_fkey', 'metas')
op.drop_constraint('remoteci_tests_test_id_fkey', 'remoteci_tests')
op.drop_constraint('remoteci_tests_remoteci_id_fkey', 'remoteci_tests')
op.drop_constraint('remotecis_team_id_fkey', 'remotecis')
op.drop_constraint('tests_team_id_fkey', 'tests')
op.drop_constraint('topic_tests_test_id_fkey', 'topic_tests')
op.drop_constraint('topic_tests_topic_id_fkey', 'topic_tests')
op.drop_constraint('topics_next_topic_fkey', 'topics')
op.drop_constraint('topics_teams_topic_id_fkey', 'topics_teams')
op.drop_constraint('topics_teams_team_id_fkey', 'topics_teams')
op.drop_constraint('user_remotecis_user_id_fkey', 'user_remotecis')
op.drop_constraint('user_remotecis_remoteci_id_fkey', 'user_remotecis')
op.drop_constraint('users_team_id_fkey', 'users')
op.execute(
'ALTER TABLE component_files ALTER COLUMN component_id TYPE UUID USING component_id::uuid'
)
op.execute(
'ALTER TABLE component_files ALTER COLUMN id TYPE UUID USING id::uuid'
)
op.execute(
'ALTER TABLE components ALTER COLUMN id TYPE UUID USING id::uuid'
)
op.execute(
'ALTER TABLE components ALTER COLUMN topic_id TYPE UUID USING topic_id::uuid'
)
op.execute(
'ALTER TABLE files ALTER COLUMN id TYPE UUID USING id::uuid'
)
op.execute(
'ALTER TABLE files ALTER COLUMN jobstate_id TYPE UUID USING jobstate_id::uuid'
)
op.execute(
'ALTER TABLE files ALTER COLUMN team_id TYPE UUID USING team_id::uuid'
)
op.execute(
'ALTER TABLE files ALTER COLUMN job_id TYPE UUID USING job_id::uuid'
)
op.execute(
'ALTER TABLE files ALTER COLUMN test_id TYPE UUID USING test_id::uuid'
)
op.execute(
'ALTER TABLE issues ALTER COLUMN id TYPE UUID USING id::uuid'
)
op.execute(
'ALTER TABLE jobdefinition_tests ALTER COLUMN jobdefinition_id TYPE UUID USING jobdefinition_id::uuid'
)
op.execute(
'ALTER TABLE jobdefinition_tests ALTER COLUMN test_id TYPE UUID USING test_id::uuid'
)
op.execute(
'ALTER TABLE jobdefinitions ALTER COLUMN id TYPE UUID USING id::uuid'
)
op.execute(
'ALTER TABLE jobdefinitions ALTER COLUMN topic_id TYPE UUID USING topic_id::uuid'
)
op.execute(
'ALTER TABLE jobs ALTER COLUMN id TYPE UUID USING id::uuid'
)
op.execute(
'ALTER TABLE jobs ALTER COLUMN jobdefinition_id TYPE UUID USING jobdefinition_id::uuid'
)
op.execute(
'ALTER TABLE jobs ALTER COLUMN remoteci_id TYPE UUID USING remoteci_id::uuid'
)
op.execute(
'ALTER TABLE jobs ALTER COLUMN team_id TYPE UUID USING team_id::uuid'
)
op.execute(
'ALTER TABLE jobs ALTER COLUMN previous_job_id TYPE UUID USING previous_job_id::uuid'
)
op.execute(
'ALTER TABLE jobs_components ALTER COLUMN component_id TYPE UUID USING component_id::uuid'
)
op.execute(
'ALTER TABLE jobs_components ALTER COLUMN job_id TYPE UUID USING job_id::uuid'
)
op.execute(
'ALTER TABLE jobs_issues ALTER COLUMN job_id TYPE UUID USING job_id::uuid'
)
op.execute(
'ALTER TABLE jobs_issues ALTER COLUMN issue_id TYPE UUID USING issue_id::uuid'
)
op.execute(
'ALTER TABLE jobstates ALTER COLUMN id TYPE UUID USING id::uuid'
)
op.execute(
'ALTER TABLE jobstates ALTER COLUMN job_id TYPE UUID USING job_id::uuid'
)
op.execute(
'ALTER TABLE jobstates ALTER COLUMN team_id TYPE UUID USING team_id::uuid'
)
op.execute(
'ALTER TABLE logs ALTER COLUMN id TYPE UUID USING id::uuid'
)
op.execute(
'ALTER TABLE logs ALTER COLUMN user_id TYPE UUID USING user_id::uuid'
)
op.execute(
'ALTER TABLE logs ALTER COLUMN team_id TYPE UUID USING team_id::uuid'
)
op.execute(
'ALTER TABLE metas ALTER COLUMN id TYPE UUID USING id::uuid'
)
op.execute(
'ALTER TABLE metas ALTER COLUMN job_id TYPE UUID USING job_id::uuid'
)
op.execute(
'ALTER TABLE remoteci_tests ALTER COLUMN remoteci_id TYPE UUID USING remoteci_id::uuid'
)
op.execute(
'ALTER TABLE remoteci_tests ALTER COLUMN test_id TYPE UUID USING test_id::uuid'
)
op.execute(
'ALTER TABLE remotecis ALTER COLUMN id TYPE UUID USING id::uuid'
)
op.execute(
'ALTER TABLE remotecis ALTER COLUMN team_id TYPE UUID USING team_id::uuid'
)
op.execute(
'ALTER TABLE teams ALTER COLUMN id TYPE UUID USING id::uuid'
)
op.execute(
'ALTER TABLE tests ALTER COLUMN id TYPE UUID USING id::uuid'
)
op.execute(
'ALTER TABLE tests ALTER COLUMN team_id TYPE UUID USING team_id::uuid'
)
op.execute(
'ALTER TABLE topic_tests ALTER COLUMN topic_id TYPE UUID USING topic_id::uuid'
)
op.execute(
'ALTER TABLE topic_tests ALTER COLUMN test_id TYPE UUID USING test_id::uuid'
)
op.execute(
'ALTER TABLE topics ALTER COLUMN id TYPE UUID USING id::uuid'
)
op.execute(
'ALTER TABLE topics ALTER COLUMN next_topic TYPE UUID USING next_topic::uuid'
)
op.execute(
'ALTER TABLE topics_teams ALTER COLUMN topic_id TYPE UUID USING topic_id::uuid'
)
op.execute(
'ALTER TABLE topics_teams ALTER COLUMN team_id TYPE UUID USING team_id::uuid'
)
op.execute(
'ALTER TABLE user_remotecis ALTER COLUMN user_id TYPE UUID USING user_id::uuid'
)
op.execute(
'ALTER TABLE user_remotecis ALTER COLUMN remoteci_id TYPE UUID USING remoteci_id::uuid'
)
op.execute(
'ALTER TABLE users ALTER COLUMN id TYPE UUID USING id::uuid'
)
op.execute(
'ALTER TABLE users ALTER COLUMN team_id TYPE UUID USING team_id::uuid'
)
op.create_foreign_key('component_files_component_id_fkey',
'component_files', 'components', ['component_id'], ['id'], ondelete
='CASCADE')
op.create_foreign_key('components_topic_id_fkey', 'components',
'topics', ['topic_id'], ['id'], ondelete='CASCADE')
op.create_foreign_key('files_job_id_fkey', 'files', 'jobs', ['job_id'],
['id'], ondelete='CASCADE')
op.create_foreign_key('files_jobstate_id_fkey', 'files', 'jobstates', [
'jobstate_id'], ['id'], ondelete='CASCADE')
op.create_foreign_key('files_team_id_fkey', 'files', 'teams', [
'team_id'], ['id'], ondelete='CASCADE')
op.create_foreign_key('files_test_id_fkey', 'files', 'tests', [
'test_id'], ['id'], ondelete='CASCADE')
op.create_foreign_key('jobdefinition_tests_jobdefinition_id_fkey',
'jobdefinition_tests', 'jobdefinitions', ['jobdefinition_id'], [
'id'], ondelete='CASCADE')
op.create_foreign_key('jobdefinition_tests_test_id_fkey',
'jobdefinition_tests', 'tests', ['test_id'], ['id'], ondelete='CASCADE'
)
op.create_foreign_key('jobdefinitions_topic_id_fkey', 'jobdefinitions',
'topics', ['topic_id'], ['id'], ondelete='CASCADE')
op.create_foreign_key('jobs_team_id_fkey', 'jobs', 'teams', ['team_id'],
['id'], ondelete='CASCADE')
op.create_foreign_key('jobs_jobdefinition_id_fkey', 'jobs',
'jobdefinitions', ['jobdefinition_id'], ['id'], ondelete='CASCADE')
op.create_foreign_key('jobs_remoteci_id_fkey', 'jobs', 'remotecis', [
'remoteci_id'], ['id'], ondelete='CASCADE')
op.create_foreign_key('jobs_previous_job_id_fkey', 'jobs', 'jobs', [
'previous_job_id'], ['id'])
op.create_foreign_key('jobs_components_component_id_fkey',
'jobs_components', 'components', ['component_id'], ['id'], ondelete
='CASCADE')
op.create_foreign_key('jobs_components_job_id_fkey', 'jobs_components',
'jobs', ['job_id'], ['id'], ondelete='CASCADE')
op.create_foreign_key('jobs_issues_issue_id_fkey', 'jobs_issues',
'issues', ['issue_id'], ['id'], ondelete='CASCADE')
op.create_foreign_key('jobs_issues_job_id_fkey', 'jobs_issues', 'jobs',
['job_id'], ['id'], ondelete='CASCADE')
op.create_foreign_key('jobstates_team_id_fkey', 'jobstates', 'teams', [
'team_id'], ['id'], ondelete='CASCADE')
op.create_foreign_key('jobstates_job_id_fkey', 'jobstates', 'jobs', [
'job_id'], ['id'], ondelete='CASCADE')
op.create_foreign_key('logs_team_id_fkey', 'logs', 'teams', ['team_id'],
['id'], ondelete='CASCADE')
op.create_foreign_key('logs_user_id_fkey', 'logs', 'users', ['user_id'],
['id'], ondelete='CASCADE')
op.create_foreign_key('metas_job_id_fkey', 'metas', 'jobs', ['job_id'],
['id'], ondelete='CASCADE')
op.create_foreign_key('remoteci_tests_test_id_fkey', 'remoteci_tests',
'tests', ['test_id'], ['id'], ondelete='CASCADE')
op.create_foreign_key('remoteci_tests_remoteci_id_fkey',
'remoteci_tests', 'remotecis', ['remoteci_id'], ['id'], ondelete=
'CASCADE')
op.create_foreign_key('remotecis_team_id_fkey', 'remotecis', 'teams', [
'team_id'], ['id'], ondelete='CASCADE')
op.create_foreign_key('tests_team_id_fkey', 'tests', 'teams', [
'team_id'], ['id'], ondelete='CASCADE')
op.create_foreign_key('topic_tests_test_id_fkey', 'topic_tests',
'tests', ['test_id'], ['id'], ondelete='CASCADE')
op.create_foreign_key('topic_tests_topic_id_fkey', 'topic_tests',
'topics', ['topic_id'], ['id'], ondelete='CASCADE')
op.create_foreign_key('topics_next_topic_fkey', 'topics', 'topics', [
'next_topic'], ['id'])
op.create_foreign_key('topics_teams_topic_id_fkey', 'topics_teams',
'topics', ['topic_id'], ['id'], ondelete='CASCADE')
op.create_foreign_key('topics_teams_team_id_fkey', 'topics_teams',
'teams', ['team_id'], ['id'], ondelete='CASCADE')
op.create_foreign_key('user_remotecis_user_id_fkey', 'user_remotecis',
'users', ['user_id'], ['id'], ondelete='CASCADE')
op.create_foreign_key('user_remotecis_remoteci_id_fkey',
'user_remotecis', 'remotecis', ['remoteci_id'], ['id'], ondelete=
'CASCADE')
op.create_foreign_key('users_team_id_fkey', 'users', 'teams', [
'team_id'], ['id'], ondelete='CASCADE')
def downgrade():
pass
| #
# Copyright (C) 2017 Red Hat, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Change varchar ID to UUID
Revision ID: 1bb42ff54435
Revises: 6bbbf58ed9de
Create Date: 2017-02-07 09:28:37.493302
"""
# revision identifiers, used by Alembic.
revision = '1bb42ff54435'
down_revision = '6bbbf58ed9de'
branch_labels = None
depends_on = None
from alembic import op
def upgrade():
# Drop constraint
op.drop_constraint('component_files_component_id_fkey', 'component_files')
op.drop_constraint('components_topic_id_fkey', 'components')
op.drop_constraint('files_job_id_fkey', 'files')
op.drop_constraint('files_jobstate_id_fkey', 'files')
op.drop_constraint('files_team_id_fkey', 'files')
op.drop_constraint('files_test_id_fkey', 'files')
op.drop_constraint('jobdefinition_tests_jobdefinition_id_fkey',
'jobdefinition_tests')
op.drop_constraint('jobdefinition_tests_test_id_fkey',
'jobdefinition_tests')
op.drop_constraint('jobdefinitions_topic_id_fkey', 'jobdefinitions')
op.drop_constraint('jobs_team_id_fkey', 'jobs')
op.drop_constraint('jobs_jobdefinition_id_fkey', 'jobs')
op.drop_constraint('jobs_remoteci_id_fkey', 'jobs')
op.drop_constraint('jobs_previous_job_id_fkey', 'jobs')
op.drop_constraint('jobs_components_component_id_fkey', 'jobs_components')
op.drop_constraint('jobs_components_job_id_fkey', 'jobs_components')
op.drop_constraint('jobs_issues_issue_id_fkey', 'jobs_issues')
op.drop_constraint('jobs_issues_job_id_fkey', 'jobs_issues')
op.drop_constraint('jobstates_team_id_fkey', 'jobstates')
op.drop_constraint('jobstates_job_id_fkey', 'jobstates')
op.drop_constraint('logs_team_id_fkey', 'logs')
op.drop_constraint('logs_user_id_fkey', 'logs')
op.drop_constraint('metas_job_id_fkey', 'metas')
op.drop_constraint('remoteci_tests_test_id_fkey', 'remoteci_tests')
op.drop_constraint('remoteci_tests_remoteci_id_fkey', 'remoteci_tests')
op.drop_constraint('remotecis_team_id_fkey', 'remotecis')
op.drop_constraint('tests_team_id_fkey', 'tests')
op.drop_constraint('topic_tests_test_id_fkey', 'topic_tests')
op.drop_constraint('topic_tests_topic_id_fkey', 'topic_tests')
op.drop_constraint('topics_next_topic_fkey', 'topics')
op.drop_constraint('topics_teams_topic_id_fkey', 'topics_teams')
op.drop_constraint('topics_teams_team_id_fkey', 'topics_teams')
op.drop_constraint('user_remotecis_user_id_fkey', 'user_remotecis')
op.drop_constraint('user_remotecis_remoteci_id_fkey', 'user_remotecis')
op.drop_constraint('users_team_id_fkey', 'users')
# Change type
# Table component_files
op.execute("ALTER TABLE component_files ALTER COLUMN component_id TYPE \
UUID USING component_id::uuid")
op.execute("ALTER TABLE component_files ALTER COLUMN id TYPE \
UUID USING id::uuid")
# Table components
op.execute("ALTER TABLE components ALTER COLUMN id TYPE \
UUID USING id::uuid")
op.execute("ALTER TABLE components ALTER COLUMN topic_id TYPE \
UUID USING topic_id::uuid")
# Table files
op.execute("ALTER TABLE files ALTER COLUMN id TYPE \
UUID USING id::uuid")
op.execute("ALTER TABLE files ALTER COLUMN jobstate_id TYPE \
UUID USING jobstate_id::uuid")
op.execute("ALTER TABLE files ALTER COLUMN team_id TYPE \
UUID USING team_id::uuid")
op.execute("ALTER TABLE files ALTER COLUMN job_id TYPE \
UUID USING job_id::uuid")
op.execute("ALTER TABLE files ALTER COLUMN test_id TYPE \
UUID USING test_id::uuid")
# Table issues
op.execute("ALTER TABLE issues ALTER COLUMN id TYPE \
UUID USING id::uuid")
# Table jobdefinition_tests
op.execute("ALTER TABLE jobdefinition_tests ALTER COLUMN jobdefinition_id \
TYPE UUID USING jobdefinition_id::uuid")
op.execute("ALTER TABLE jobdefinition_tests ALTER COLUMN test_id TYPE \
UUID USING test_id::uuid")
# Table jobdefinitions
op.execute("ALTER TABLE jobdefinitions ALTER COLUMN id TYPE \
UUID USING id::uuid")
op.execute("ALTER TABLE jobdefinitions ALTER COLUMN topic_id TYPE \
UUID USING topic_id::uuid")
# Table jobs
op.execute("ALTER TABLE jobs ALTER COLUMN id TYPE \
UUID USING id::uuid")
op.execute("ALTER TABLE jobs ALTER COLUMN jobdefinition_id TYPE \
UUID USING jobdefinition_id::uuid")
op.execute("ALTER TABLE jobs ALTER COLUMN remoteci_id TYPE \
UUID USING remoteci_id::uuid")
op.execute("ALTER TABLE jobs ALTER COLUMN team_id TYPE \
UUID USING team_id::uuid")
op.execute("ALTER TABLE jobs ALTER COLUMN previous_job_id TYPE \
UUID USING previous_job_id::uuid")
# Table jobs_components
op.execute("ALTER TABLE jobs_components ALTER COLUMN component_id TYPE \
UUID USING component_id::uuid")
op.execute("ALTER TABLE jobs_components ALTER COLUMN job_id TYPE \
UUID USING job_id::uuid")
# Table jobs_issues
op.execute("ALTER TABLE jobs_issues ALTER COLUMN job_id TYPE \
UUID USING job_id::uuid")
op.execute("ALTER TABLE jobs_issues ALTER COLUMN issue_id TYPE \
UUID USING issue_id::uuid")
# Table jobstates
op.execute("ALTER TABLE jobstates ALTER COLUMN id TYPE \
UUID USING id::uuid")
op.execute("ALTER TABLE jobstates ALTER COLUMN job_id TYPE \
UUID USING job_id::uuid")
op.execute("ALTER TABLE jobstates ALTER COLUMN team_id TYPE \
UUID USING team_id::uuid")
# Table logs
op.execute("ALTER TABLE logs ALTER COLUMN id TYPE \
UUID USING id::uuid")
op.execute("ALTER TABLE logs ALTER COLUMN user_id TYPE \
UUID USING user_id::uuid")
op.execute("ALTER TABLE logs ALTER COLUMN team_id TYPE \
UUID USING team_id::uuid")
# Table metas
op.execute("ALTER TABLE metas ALTER COLUMN id TYPE \
UUID USING id::uuid")
op.execute("ALTER TABLE metas ALTER COLUMN job_id TYPE \
UUID USING job_id::uuid")
# Table remoteci_tests
op.execute("ALTER TABLE remoteci_tests ALTER COLUMN remoteci_id TYPE \
UUID USING remoteci_id::uuid")
op.execute("ALTER TABLE remoteci_tests ALTER COLUMN test_id TYPE \
UUID USING test_id::uuid")
# Table remotecis
op.execute("ALTER TABLE remotecis ALTER COLUMN id TYPE \
UUID USING id::uuid")
op.execute("ALTER TABLE remotecis ALTER COLUMN team_id TYPE \
UUID USING team_id::uuid")
# Table teams
op.execute("ALTER TABLE teams ALTER COLUMN id TYPE \
UUID USING id::uuid")
# Table tests
op.execute("ALTER TABLE tests ALTER COLUMN id TYPE \
UUID USING id::uuid")
op.execute("ALTER TABLE tests ALTER COLUMN team_id TYPE \
UUID USING team_id::uuid")
# Table topic_tests
op.execute("ALTER TABLE topic_tests ALTER COLUMN topic_id TYPE \
UUID USING topic_id::uuid")
op.execute("ALTER TABLE topic_tests ALTER COLUMN test_id TYPE \
UUID USING test_id::uuid")
# Table topics
op.execute("ALTER TABLE topics ALTER COLUMN id TYPE \
UUID USING id::uuid")
op.execute("ALTER TABLE topics ALTER COLUMN next_topic TYPE \
UUID USING next_topic::uuid")
# Table topics_teams
op.execute("ALTER TABLE topics_teams ALTER COLUMN topic_id TYPE \
UUID USING topic_id::uuid")
op.execute("ALTER TABLE topics_teams ALTER COLUMN team_id TYPE \
UUID USING team_id::uuid")
# Table user_remotecis
op.execute("ALTER TABLE user_remotecis ALTER COLUMN user_id TYPE \
UUID USING user_id::uuid")
op.execute("ALTER TABLE user_remotecis ALTER COLUMN remoteci_id TYPE \
UUID USING remoteci_id::uuid")
# Table users
op.execute("ALTER TABLE users ALTER COLUMN id TYPE \
UUID USING id::uuid")
op.execute("ALTER TABLE users ALTER COLUMN team_id TYPE \
UUID USING team_id::uuid")
# Re-Create constraint
op.create_foreign_key('component_files_component_id_fkey',
'component_files', 'components',
['component_id'], ['id'], ondelete='CASCADE')
op.create_foreign_key('components_topic_id_fkey',
'components', 'topics',
['topic_id'], ['id'], ondelete='CASCADE')
op.create_foreign_key('files_job_id_fkey',
'files', 'jobs',
['job_id'], ['id'], ondelete='CASCADE')
op.create_foreign_key('files_jobstate_id_fkey',
'files', 'jobstates',
['jobstate_id'], ['id'], ondelete='CASCADE')
op.create_foreign_key('files_team_id_fkey',
'files', 'teams',
['team_id'], ['id'], ondelete='CASCADE')
op.create_foreign_key('files_test_id_fkey',
'files', 'tests',
['test_id'], ['id'], ondelete='CASCADE')
op.create_foreign_key('jobdefinition_tests_jobdefinition_id_fkey',
'jobdefinition_tests', 'jobdefinitions',
['jobdefinition_id'], ['id'], ondelete='CASCADE')
op.create_foreign_key('jobdefinition_tests_test_id_fkey',
'jobdefinition_tests', 'tests',
['test_id'], ['id'], ondelete='CASCADE')
op.create_foreign_key('jobdefinitions_topic_id_fkey',
'jobdefinitions', 'topics',
['topic_id'], ['id'], ondelete='CASCADE')
op.create_foreign_key('jobs_team_id_fkey',
'jobs', 'teams',
['team_id'], ['id'], ondelete='CASCADE')
op.create_foreign_key('jobs_jobdefinition_id_fkey',
'jobs', 'jobdefinitions',
['jobdefinition_id'], ['id'], ondelete='CASCADE')
op.create_foreign_key('jobs_remoteci_id_fkey',
'jobs', 'remotecis',
['remoteci_id'], ['id'], ondelete='CASCADE')
op.create_foreign_key('jobs_previous_job_id_fkey',
'jobs', 'jobs',
['previous_job_id'], ['id'])
op.create_foreign_key('jobs_components_component_id_fkey',
'jobs_components', 'components',
['component_id'], ['id'], ondelete='CASCADE')
op.create_foreign_key('jobs_components_job_id_fkey',
'jobs_components', 'jobs',
['job_id'], ['id'], ondelete='CASCADE')
op.create_foreign_key('jobs_issues_issue_id_fkey',
'jobs_issues', 'issues',
['issue_id'], ['id'], ondelete='CASCADE')
op.create_foreign_key('jobs_issues_job_id_fkey',
'jobs_issues', 'jobs',
['job_id'], ['id'], ondelete='CASCADE')
op.create_foreign_key('jobstates_team_id_fkey',
'jobstates', 'teams',
['team_id'], ['id'], ondelete='CASCADE')
op.create_foreign_key('jobstates_job_id_fkey',
'jobstates', 'jobs',
['job_id'], ['id'], ondelete='CASCADE')
op.create_foreign_key('logs_team_id_fkey',
'logs', 'teams',
['team_id'], ['id'], ondelete='CASCADE')
op.create_foreign_key('logs_user_id_fkey',
'logs', 'users',
['user_id'], ['id'], ondelete='CASCADE')
op.create_foreign_key('metas_job_id_fkey',
'metas', 'jobs',
['job_id'], ['id'], ondelete='CASCADE')
op.create_foreign_key('remoteci_tests_test_id_fkey',
'remoteci_tests', 'tests',
['test_id'], ['id'], ondelete='CASCADE')
op.create_foreign_key('remoteci_tests_remoteci_id_fkey',
'remoteci_tests', 'remotecis',
['remoteci_id'], ['id'], ondelete='CASCADE')
op.create_foreign_key('remotecis_team_id_fkey',
'remotecis', 'teams',
['team_id'], ['id'], ondelete='CASCADE')
op.create_foreign_key('tests_team_id_fkey',
'tests', 'teams',
['team_id'], ['id'], ondelete='CASCADE')
op.create_foreign_key('topic_tests_test_id_fkey',
'topic_tests', 'tests',
['test_id'], ['id'], ondelete='CASCADE')
op.create_foreign_key('topic_tests_topic_id_fkey',
'topic_tests', 'topics',
['topic_id'], ['id'], ondelete='CASCADE')
op.create_foreign_key('topics_next_topic_fkey',
'topics', 'topics',
['next_topic'], ['id'])
op.create_foreign_key('topics_teams_topic_id_fkey',
'topics_teams', 'topics',
['topic_id'], ['id'], ondelete='CASCADE')
op.create_foreign_key('topics_teams_team_id_fkey',
'topics_teams', 'teams',
['team_id'], ['id'], ondelete='CASCADE')
op.create_foreign_key('user_remotecis_user_id_fkey',
'user_remotecis', 'users',
['user_id'], ['id'], ondelete='CASCADE')
op.create_foreign_key('user_remotecis_remoteci_id_fkey',
'user_remotecis', 'remotecis',
['remoteci_id'], ['id'], ondelete='CASCADE')
op.create_foreign_key('users_team_id_fkey',
'users', 'teams',
['team_id'], ['id'], ondelete='CASCADE')
def downgrade():
pass
| [
1,
2,
3,
4,
5
] |
1,316 | a288e66e64d386afd13bfc7b5b13d4a47d15cd6d | <mask token>
class Client(Base):
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
class TakenBook(Base):
__tablename__ = 'taken_books'
id = Column(Integer, primary_key=True)
book_id = Column(Integer, ForeignKey('books.id'))
client_id = Column(Integer, ForeignKey('clients.id'))
taken_date = Column(Date)
return_date = Column(Date, default=None)
| <mask token>
class Author(Base):
<mask token>
<mask token>
<mask token>
<mask token>
class Book(Base):
__tablename__ = 'books'
id = Column(Integer, primary_key=True)
title = Column(String)
count = Column(Integer)
taken_count = Column(Integer)
authors_id = Column(postgresql.ARRAY(Integer), default=None)
publisher_id = Column(Integer)
publishing_year = Column(Integer)
class Client(Base):
__tablename__ = 'clients'
id = Column(Integer, primary_key=True)
type = Column(String)
full_name = Column(String)
taken_books_now_id = Column(postgresql.ARRAY(Integer), default=[])
all_taken_books_id = Column(postgresql.ARRAY(Integer), default=[])
class TakenBook(Base):
__tablename__ = 'taken_books'
id = Column(Integer, primary_key=True)
book_id = Column(Integer, ForeignKey('books.id'))
client_id = Column(Integer, ForeignKey('clients.id'))
taken_date = Column(Date)
return_date = Column(Date, default=None)
| <mask token>
class Author(Base):
__tablename__ = 'authors'
id = Column(Integer, primary_key=True)
full_name = Column(String)
taken_count = Column(Integer, default=0)
class Book(Base):
__tablename__ = 'books'
id = Column(Integer, primary_key=True)
title = Column(String)
count = Column(Integer)
taken_count = Column(Integer)
authors_id = Column(postgresql.ARRAY(Integer), default=None)
publisher_id = Column(Integer)
publishing_year = Column(Integer)
class Client(Base):
__tablename__ = 'clients'
id = Column(Integer, primary_key=True)
type = Column(String)
full_name = Column(String)
taken_books_now_id = Column(postgresql.ARRAY(Integer), default=[])
all_taken_books_id = Column(postgresql.ARRAY(Integer), default=[])
class TakenBook(Base):
__tablename__ = 'taken_books'
id = Column(Integer, primary_key=True)
book_id = Column(Integer, ForeignKey('books.id'))
client_id = Column(Integer, ForeignKey('clients.id'))
taken_date = Column(Date)
return_date = Column(Date, default=None)
| <mask token>
class Publisher(Base):
__tablename__ = 'publishers'
id = Column(Integer, primary_key=True)
name = Column(String)
class Author(Base):
__tablename__ = 'authors'
id = Column(Integer, primary_key=True)
full_name = Column(String)
taken_count = Column(Integer, default=0)
class Book(Base):
__tablename__ = 'books'
id = Column(Integer, primary_key=True)
title = Column(String)
count = Column(Integer)
taken_count = Column(Integer)
authors_id = Column(postgresql.ARRAY(Integer), default=None)
publisher_id = Column(Integer)
publishing_year = Column(Integer)
class Client(Base):
__tablename__ = 'clients'
id = Column(Integer, primary_key=True)
type = Column(String)
full_name = Column(String)
taken_books_now_id = Column(postgresql.ARRAY(Integer), default=[])
all_taken_books_id = Column(postgresql.ARRAY(Integer), default=[])
class TakenBook(Base):
__tablename__ = 'taken_books'
id = Column(Integer, primary_key=True)
book_id = Column(Integer, ForeignKey('books.id'))
client_id = Column(Integer, ForeignKey('clients.id'))
taken_date = Column(Date)
return_date = Column(Date, default=None)
| # created by RomaOkorosso at 21.03.2021
# models.py
from datetime import datetime
from sqlalchemy import (
Column,
Integer,
String,
Boolean,
DateTime,
ForeignKey,
Date
)
from sqlalchemy.dialects import postgresql
from sqlalchemy.orm import relationship
from Database.database import Base
class Publisher(Base):
__tablename__ = "publishers"
id = Column(Integer, primary_key=True)
name = Column(String)
class Author(Base):
__tablename__ = "authors"
id = Column(Integer, primary_key=True)
full_name = Column(String)
taken_count = Column(Integer, default=0)
class Book(Base):
__tablename__ = "books"
id = Column(Integer, primary_key=True)
title = Column(String)
count = Column(Integer)
taken_count = Column(Integer)
authors_id = Column(postgresql.ARRAY(Integer), default=None)
publisher_id = Column(Integer)
publishing_year = Column(Integer)
class Client(Base):
__tablename__ = "clients"
id = Column(Integer, primary_key=True)
type = Column(String)
full_name = Column(String)
taken_books_now_id = Column(postgresql.ARRAY(Integer), default=[])
all_taken_books_id = Column(postgresql.ARRAY(Integer), default=[])
class TakenBook(Base):
__tablename__ = "taken_books"
id = Column(Integer, primary_key=True)
book_id = Column(Integer, ForeignKey("books.id"))
client_id = Column(Integer, ForeignKey("clients.id"))
taken_date = Column(Date)
return_date = Column(Date, default=None)
| [
3,
7,
8,
10,
12
] |
1,317 | 2ec41e02c95a270455c096e85829b7220eeda0c7 | <mask token>
def validate_email(value, row_number):
error_message = _(u'Invalid e-mail address on "%d" line.')
return validators.EmailValidator(validators.email_re, unicode(
error_message % row_number), 'invalid')(value)
<mask token>
def get_externalsubscribers(file_obj):
pass_count = 0
fail_count = 0
PATH = '/tmp/import_subscribers.xls'
upload_handler(file_obj, PATH)
sheet = xlrd.open_workbook(PATH).sheet_by_index(0)
for i in range(1, sheet.nrows):
row = sheet.row(i)
if not row[0].value:
continue
subscriber = {}
subscriber['email'] = row[0].value
try:
validate_email(subscriber['email'].strip(), i)
pass_count += 1
except Exception as e:
fail_count += 1
continue
try:
subscriber['first_name'] = row[1].value
except IndexError:
pass
try:
subscriber['last_name'] = row[2].value
except IndexError:
pass
if not bool(Account.objects.filter(email=subscriber['email']).only(
'id')):
obj, created = ExternalSubscriber.objects.get_or_create(email=
subscriber['email'], defaults={'first_name': subscriber.get
('first_name'), 'last_name': subscriber.get('last_name')})
if not created:
for field in ['first_name', 'last_name']:
if subscriber.get(field) and getattr(obj, field
) != subscriber.get(field):
setattr(obj, field, subscriber.get(field))
obj.save()
return pass_count, fail_count
<mask token>
| <mask token>
def validate_email(value, row_number):
error_message = _(u'Invalid e-mail address on "%d" line.')
return validators.EmailValidator(validators.email_re, unicode(
error_message % row_number), 'invalid')(value)
def upload_handler(file_obj, path_to_save):
destination = open(path_to_save, 'wb+')
for chunk in file_obj.chunks():
destination.write(chunk)
destination.close()
def get_externalsubscribers(file_obj):
pass_count = 0
fail_count = 0
PATH = '/tmp/import_subscribers.xls'
upload_handler(file_obj, PATH)
sheet = xlrd.open_workbook(PATH).sheet_by_index(0)
for i in range(1, sheet.nrows):
row = sheet.row(i)
if not row[0].value:
continue
subscriber = {}
subscriber['email'] = row[0].value
try:
validate_email(subscriber['email'].strip(), i)
pass_count += 1
except Exception as e:
fail_count += 1
continue
try:
subscriber['first_name'] = row[1].value
except IndexError:
pass
try:
subscriber['last_name'] = row[2].value
except IndexError:
pass
if not bool(Account.objects.filter(email=subscriber['email']).only(
'id')):
obj, created = ExternalSubscriber.objects.get_or_create(email=
subscriber['email'], defaults={'first_name': subscriber.get
('first_name'), 'last_name': subscriber.get('last_name')})
if not created:
for field in ['first_name', 'last_name']:
if subscriber.get(field) and getattr(obj, field
) != subscriber.get(field):
setattr(obj, field, subscriber.get(field))
obj.save()
return pass_count, fail_count
<mask token>
| <mask token>
def validate_email(value, row_number):
error_message = _(u'Invalid e-mail address on "%d" line.')
return validators.EmailValidator(validators.email_re, unicode(
error_message % row_number), 'invalid')(value)
def upload_handler(file_obj, path_to_save):
destination = open(path_to_save, 'wb+')
for chunk in file_obj.chunks():
destination.write(chunk)
destination.close()
def get_externalsubscribers(file_obj):
pass_count = 0
fail_count = 0
PATH = '/tmp/import_subscribers.xls'
upload_handler(file_obj, PATH)
sheet = xlrd.open_workbook(PATH).sheet_by_index(0)
for i in range(1, sheet.nrows):
row = sheet.row(i)
if not row[0].value:
continue
subscriber = {}
subscriber['email'] = row[0].value
try:
validate_email(subscriber['email'].strip(), i)
pass_count += 1
except Exception as e:
fail_count += 1
continue
try:
subscriber['first_name'] = row[1].value
except IndexError:
pass
try:
subscriber['last_name'] = row[2].value
except IndexError:
pass
if not bool(Account.objects.filter(email=subscriber['email']).only(
'id')):
obj, created = ExternalSubscriber.objects.get_or_create(email=
subscriber['email'], defaults={'first_name': subscriber.get
('first_name'), 'last_name': subscriber.get('last_name')})
if not created:
for field in ['first_name', 'last_name']:
if subscriber.get(field) and getattr(obj, field
) != subscriber.get(field):
setattr(obj, field, subscriber.get(field))
obj.save()
return pass_count, fail_count
@render_to('newsletter/import_subscribers_form.html')
def import_subscribers(request):
if request.method == 'POST':
form = ExternalSubscriberUpload(request.POST, request.FILES)
if form.is_valid():
passed, failed = get_externalsubscribers(form.cleaned_data['xls'])
messages.add_message(request, messages.INFO, _(
'Subscribers successfuly imported. %(passed)d added and %(failed)d failed '
) % {'passed': passed, 'failed': failed})
return redirect('admin:newsletter_externalsubscriber_changelist')
else:
form = ExternalSubscriberUpload()
return {'form': form}
| import xlrd
from django.shortcuts import redirect
from django.contrib import messages
from django.utils.translation import ugettext_lazy as _
from django.core import validators
from utils.views import render_to
from accounts.models import Account
from .models import ExternalSubscriber
from .forms import ExternalSubscriberUpload
def validate_email(value, row_number):
error_message = _(u'Invalid e-mail address on "%d" line.')
return validators.EmailValidator(validators.email_re, unicode(
error_message % row_number), 'invalid')(value)
def upload_handler(file_obj, path_to_save):
destination = open(path_to_save, 'wb+')
for chunk in file_obj.chunks():
destination.write(chunk)
destination.close()
def get_externalsubscribers(file_obj):
pass_count = 0
fail_count = 0
PATH = '/tmp/import_subscribers.xls'
upload_handler(file_obj, PATH)
sheet = xlrd.open_workbook(PATH).sheet_by_index(0)
for i in range(1, sheet.nrows):
row = sheet.row(i)
if not row[0].value:
continue
subscriber = {}
subscriber['email'] = row[0].value
try:
validate_email(subscriber['email'].strip(), i)
pass_count += 1
except Exception as e:
fail_count += 1
continue
try:
subscriber['first_name'] = row[1].value
except IndexError:
pass
try:
subscriber['last_name'] = row[2].value
except IndexError:
pass
if not bool(Account.objects.filter(email=subscriber['email']).only(
'id')):
obj, created = ExternalSubscriber.objects.get_or_create(email=
subscriber['email'], defaults={'first_name': subscriber.get
('first_name'), 'last_name': subscriber.get('last_name')})
if not created:
for field in ['first_name', 'last_name']:
if subscriber.get(field) and getattr(obj, field
) != subscriber.get(field):
setattr(obj, field, subscriber.get(field))
obj.save()
return pass_count, fail_count
@render_to('newsletter/import_subscribers_form.html')
def import_subscribers(request):
if request.method == 'POST':
form = ExternalSubscriberUpload(request.POST, request.FILES)
if form.is_valid():
passed, failed = get_externalsubscribers(form.cleaned_data['xls'])
messages.add_message(request, messages.INFO, _(
'Subscribers successfuly imported. %(passed)d added and %(failed)d failed '
) % {'passed': passed, 'failed': failed})
return redirect('admin:newsletter_externalsubscriber_changelist')
else:
form = ExternalSubscriberUpload()
return {'form': form}
| import xlrd
from django.shortcuts import redirect
from django.contrib import messages
from django.utils.translation import ugettext_lazy as _
from django.core import validators
from utils.views import render_to
from accounts.models import Account
from .models import ExternalSubscriber
from .forms import ExternalSubscriberUpload
def validate_email(value, row_number):
error_message = _(u'Invalid e-mail address on "%d" line.')
return validators.EmailValidator(
validators.email_re,
unicode(error_message % row_number),
'invalid'
)(value)
def upload_handler(file_obj, path_to_save):
destination = open(path_to_save, 'wb+')
for chunk in file_obj.chunks():
destination.write(chunk)
destination.close()
def get_externalsubscribers(file_obj):
pass_count = 0
fail_count = 0
PATH = '/tmp/import_subscribers.xls'
upload_handler(file_obj, PATH)
sheet = xlrd.open_workbook(PATH).sheet_by_index(0)
for i in range(1,sheet.nrows):
row = sheet.row(i)
if not row[0].value:
continue
subscriber = {}
subscriber['email'] = row[0].value
try:
validate_email(subscriber['email'].strip(), i)
pass_count+=1
except Exception as e:
fail_count+=1
#print e, u'"%s"' % subscriber['email']
continue
try:
subscriber['first_name'] = row[1].value
except IndexError:
pass
try:
subscriber['last_name'] = row[2].value
except IndexError:
pass
if not bool(Account.objects.filter(email=subscriber['email']).only('id')):
obj, created = ExternalSubscriber.objects.get_or_create(
email=subscriber['email'],
defaults={
'first_name': subscriber.get('first_name'),
'last_name': subscriber.get('last_name'),
}
)
if not created:
for field in ['first_name', 'last_name']:
if subscriber.get(field) and\
getattr(obj, field) != subscriber.get(field):
setattr(obj, field, subscriber.get(field))
obj.save()
return pass_count, fail_count
@render_to('newsletter/import_subscribers_form.html')
def import_subscribers(request):
if request.method == 'POST':
form = ExternalSubscriberUpload(request.POST, request.FILES)
if form.is_valid():
passed, failed = get_externalsubscribers(form.cleaned_data['xls'])
messages.add_message(request, messages.INFO, _('Subscribers successfuly imported. %(passed)d added and %(failed)d failed ') % {'passed':passed, 'failed': failed})
return redirect('admin:newsletter_externalsubscriber_changelist')
else:
form = ExternalSubscriberUpload()
return {'form': form}
| [
2,
3,
4,
5,
6
] |
1,318 | b42414b7d8ed80d8794ab7c49dfde1e5df0721f1 | <mask token>
| <mask token>
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
ALLOWED_HOSTS = []
INSTALLED_APPS = ['django.contrib.admin', 'django.contrib.auth',
'django.contrib.contenttypes', 'django.contrib.sessions',
'django.contrib.messages', 'django.contrib.staticfiles',
'admin_honeypot', 'bootstrap3', 'el_pagination', 'compressor',
'accounts', 'bot', 'home', 'pages', 'serve_media', 'events', 'gallery',
'groups', 'django_rq', 'surveys']
MIDDLEWARE_CLASSES = ['django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.locale.LocaleMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'csp.middleware.CSPMiddleware']
ROOT_URLCONF = 'config.urls'
TEMPLATES = [{'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates'), os.path.join(BASE_DIR,
'templates/error_pages')], 'APP_DIRS': True, 'OPTIONS': {
'context_processors': ['django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages']}}]
WSGI_APPLICATION = 'config.wsgi.application'
AUTH_PASSWORD_VALIDATORS = [{'NAME':
'django.contrib.auth.password_validation.UserAttributeSimilarityValidator'
}, {'NAME':
'django.contrib.auth.password_validation.MinimumLengthValidator'}, {
'NAME':
'django.contrib.auth.password_validation.CommonPasswordValidator'}, {
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator'}
]
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
LOCALE_PATHS = [os.path.join(BASE_DIR, 'static/locale/')]
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'assets/')
LOGIN_REDIRECT_URL = '/home'
TELEGRAM_TOKEN = os.environ.get('GROUPSOME_TELEGRAM_TOKEN')
TELEGRAM_WEBHOOK_SECRET = os.environ.get('GROUPSOME_TELEGRAM_WEBHOOK_SECRET')
TELEGRAM_BOT_USERNAME = 'groupsomebot'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
MEDIA_SERVE_USING_NGINX = False
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
'compressor.finders.CompressorFinder', 'pipeline.finders.PipelineFinder')
STATICFILES_STORAGE = 'pipeline.storage.PipelineStorage'
PIPELINE = {'PIPELINE_ENABLED': True, 'COMPILERS': (
'pipeline.compilers.stylus.StylusCompiler',), 'STYLESHEETS': {'main': {
'source_filenames': ('style/main.styl',), 'output_filename':
'style/main.css'}}, 'STYLUS_ARGUMENTS': '-c'}
CSP_STYLE_SRC = "'self'", "'unsafe-inline'", 'fonts.googleapis.com'
CSP_FONT_SRC = "'self'", 'fonts.gstatic.com'
| <mask token>
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
ALLOWED_HOSTS = []
INSTALLED_APPS = ['django.contrib.admin', 'django.contrib.auth',
'django.contrib.contenttypes', 'django.contrib.sessions',
'django.contrib.messages', 'django.contrib.staticfiles',
'admin_honeypot', 'bootstrap3', 'el_pagination', 'compressor',
'accounts', 'bot', 'home', 'pages', 'serve_media', 'events', 'gallery',
'groups', 'django_rq', 'surveys']
MIDDLEWARE_CLASSES = ['django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.locale.LocaleMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'csp.middleware.CSPMiddleware']
ROOT_URLCONF = 'config.urls'
TEMPLATES = [{'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates'), os.path.join(BASE_DIR,
'templates/error_pages')], 'APP_DIRS': True, 'OPTIONS': {
'context_processors': ['django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages']}}]
WSGI_APPLICATION = 'config.wsgi.application'
AUTH_PASSWORD_VALIDATORS = [{'NAME':
'django.contrib.auth.password_validation.UserAttributeSimilarityValidator'
}, {'NAME':
'django.contrib.auth.password_validation.MinimumLengthValidator'}, {
'NAME':
'django.contrib.auth.password_validation.CommonPasswordValidator'}, {
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator'}
]
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
LOCALE_PATHS = [os.path.join(BASE_DIR, 'static/locale/')]
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'assets/')
LOGIN_REDIRECT_URL = '/home'
TELEGRAM_TOKEN = os.environ.get('GROUPSOME_TELEGRAM_TOKEN')
TELEGRAM_WEBHOOK_SECRET = os.environ.get('GROUPSOME_TELEGRAM_WEBHOOK_SECRET')
TELEGRAM_BOT_USERNAME = 'groupsomebot'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
MEDIA_SERVE_USING_NGINX = False
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
'compressor.finders.CompressorFinder', 'pipeline.finders.PipelineFinder')
STATICFILES_STORAGE = 'pipeline.storage.PipelineStorage'
PIPELINE = {'PIPELINE_ENABLED': True, 'COMPILERS': (
'pipeline.compilers.stylus.StylusCompiler',), 'STYLESHEETS': {'main': {
'source_filenames': ('style/main.styl',), 'output_filename':
'style/main.css'}}, 'STYLUS_ARGUMENTS': '-c'}
CSP_STYLE_SRC = "'self'", "'unsafe-inline'", 'fonts.googleapis.com'
CSP_FONT_SRC = "'self'", 'fonts.gstatic.com'
| """
Django settings for gamelibrary project.
Generated by 'django-admin startproject' using Django 1.9.5.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.9/ref/settings/
"""
import os
# from django.conf.global_settings import TEMPLATE_CONTEXT_PROCESSORS
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
# SECURITY WARNING: don't run with debug turned on in production!
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'admin_honeypot',
'bootstrap3',
'el_pagination',
'compressor',
# 'pipeline',
'accounts',
'bot',
'home',
'pages',
'serve_media',
'events',
'gallery',
'groups',
'django_rq',
'surveys',
]
MIDDLEWARE_CLASSES = [
# 'django.middleware.cache.UpdateCacheMiddleware',
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.locale.LocaleMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
# 'django.middleware.cache.FetchFromCacheMiddleware',
'csp.middleware.CSPMiddleware',
]
ROOT_URLCONF = 'config.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates'), os.path.join(BASE_DIR, 'templates/error_pages')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'config.wsgi.application'
# Password validation
# https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.9/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
LOCALE_PATHS = [
os.path.join(BASE_DIR, 'static/locale/'),
]
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.9/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'assets/')
# Redirect to here after Login
LOGIN_REDIRECT_URL = '/home'
TELEGRAM_TOKEN = os.environ.get('GROUPSOME_TELEGRAM_TOKEN')
TELEGRAM_WEBHOOK_SECRET = os.environ.get('GROUPSOME_TELEGRAM_WEBHOOK_SECRET')
TELEGRAM_BOT_USERNAME = "groupsomebot"
# Media root directory
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
MEDIA_SERVE_USING_NGINX = False
# Needed for Endless Scrolling
# TEMPLATE_CONTEXT_PROCESSORS += (
# 'django.core.context_processors.request',
# )
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
'compressor.finders.CompressorFinder',
'pipeline.finders.PipelineFinder',
)
STATICFILES_STORAGE = 'pipeline.storage.PipelineStorage'
PIPELINE = {
'PIPELINE_ENABLED': True,
'COMPILERS': (
'pipeline.compilers.stylus.StylusCompiler',
),
'STYLESHEETS': {
'main': {
'source_filenames': (
'style/main.styl',
),
'output_filename': 'style/main.css',
}
},
'STYLUS_ARGUMENTS': '-c',
}
CSP_STYLE_SRC = ("'self'", "'unsafe-inline'", "fonts.googleapis.com")
CSP_FONT_SRC = ("'self'", "fonts.gstatic.com")
| null | [
0,
1,
2,
3
] |
1,319 | 449ae193f8817d4ee2fe67eadf72d9c19b2c5e53 | <mask token>
class MovieRankings(models.Model):
<mask token>
<mask token>
<mask token>
class Movie(models.Model):
"""
电影的数据库表格
"""
movie_name = models.CharField(max_length=64, blank=True)
douban_link = models.CharField(max_length=256, null=True, blank=True)
douban_score = models.CharField(max_length=64, null=True, blank=True)
douban_counter = models.PositiveIntegerField(default=0, blank=True)
imdb_link = models.CharField(max_length=256, null=True, blank=True)
imdb_score = models.CharField(max_length=64, null=True, blank=True)
imdb_counter = models.PositiveIntegerField(default=0, blank=True)
nomovie_link = models.CharField(max_length=256, null=True, blank=True)
nomovie_score = models.CharField(max_length=64, null=True, blank=True)
nomovie_counter = models.PositiveIntegerField(default=0, blank=True)
country = models.CharField(max_length=64, null=True, blank=True)
dateyear = models.CharField(max_length=64, null=True, blank=True)
actor = models.CharField(max_length=256, null=True, blank=True)
director = models.CharField(max_length=256, null=True, blank=True)
style = models.CharField(max_length=64, null=True, blank=True)
movie_address = models.CharField(max_length=256, null=True, blank=True)
download_link = models.CharField(max_length=256, null=True, blank=True)
counter = models.PositiveIntegerField(default=0, blank=True)
original = models.CharField(max_length=256, null=True, blank=True)
status = models.IntegerField(null=True, blank=True)
image = models.CharField(max_length=256, null=True, blank=True)
spidertime = models.DateTimeField(auto_now_add=True, null=True)
aboutmovie = models.CharField(max_length=256, null=True, blank=True)
language = models.CharField(max_length=64, null=True, blank=True)
dyttsearch = models.CharField(max_length=256, null=True, blank=True)
dyttdetail = models.CharField(max_length=256, null=True, blank=True)
movierankings = models.ForeignKey(MovieRankings, null=True, blank=True)
def __unicode__(self):
return self.movie_name
class MovieHistory(models.Model):
user = models.ForeignKey(User)
movie = models.ForeignKey(Movie)
date = models.DateTimeField(auto_now_add=True)
marked = models.IntegerField(blank=True, null=True)
def __unicode__(self):
return '{%s}--{%s}' % (self.user.username, self.movie.movie_name)
| <mask token>
class MovieRankings(models.Model):
<mask token>
name = models.CharField(max_length=100)
def __unicode__(self):
return self.name
class Movie(models.Model):
"""
电影的数据库表格
"""
movie_name = models.CharField(max_length=64, blank=True)
douban_link = models.CharField(max_length=256, null=True, blank=True)
douban_score = models.CharField(max_length=64, null=True, blank=True)
douban_counter = models.PositiveIntegerField(default=0, blank=True)
imdb_link = models.CharField(max_length=256, null=True, blank=True)
imdb_score = models.CharField(max_length=64, null=True, blank=True)
imdb_counter = models.PositiveIntegerField(default=0, blank=True)
nomovie_link = models.CharField(max_length=256, null=True, blank=True)
nomovie_score = models.CharField(max_length=64, null=True, blank=True)
nomovie_counter = models.PositiveIntegerField(default=0, blank=True)
country = models.CharField(max_length=64, null=True, blank=True)
dateyear = models.CharField(max_length=64, null=True, blank=True)
actor = models.CharField(max_length=256, null=True, blank=True)
director = models.CharField(max_length=256, null=True, blank=True)
style = models.CharField(max_length=64, null=True, blank=True)
movie_address = models.CharField(max_length=256, null=True, blank=True)
download_link = models.CharField(max_length=256, null=True, blank=True)
counter = models.PositiveIntegerField(default=0, blank=True)
original = models.CharField(max_length=256, null=True, blank=True)
status = models.IntegerField(null=True, blank=True)
image = models.CharField(max_length=256, null=True, blank=True)
spidertime = models.DateTimeField(auto_now_add=True, null=True)
aboutmovie = models.CharField(max_length=256, null=True, blank=True)
language = models.CharField(max_length=64, null=True, blank=True)
dyttsearch = models.CharField(max_length=256, null=True, blank=True)
dyttdetail = models.CharField(max_length=256, null=True, blank=True)
movierankings = models.ForeignKey(MovieRankings, null=True, blank=True)
def __unicode__(self):
return self.movie_name
class MovieHistory(models.Model):
user = models.ForeignKey(User)
movie = models.ForeignKey(Movie)
date = models.DateTimeField(auto_now_add=True)
marked = models.IntegerField(blank=True, null=True)
def __unicode__(self):
return '{%s}--{%s}' % (self.user.username, self.movie.movie_name)
| <mask token>
class MovieRankings(models.Model):
"""
各种电影排行榜.
"""
name = models.CharField(max_length=100)
def __unicode__(self):
return self.name
class Movie(models.Model):
"""
电影的数据库表格
"""
movie_name = models.CharField(max_length=64, blank=True)
douban_link = models.CharField(max_length=256, null=True, blank=True)
douban_score = models.CharField(max_length=64, null=True, blank=True)
douban_counter = models.PositiveIntegerField(default=0, blank=True)
imdb_link = models.CharField(max_length=256, null=True, blank=True)
imdb_score = models.CharField(max_length=64, null=True, blank=True)
imdb_counter = models.PositiveIntegerField(default=0, blank=True)
nomovie_link = models.CharField(max_length=256, null=True, blank=True)
nomovie_score = models.CharField(max_length=64, null=True, blank=True)
nomovie_counter = models.PositiveIntegerField(default=0, blank=True)
country = models.CharField(max_length=64, null=True, blank=True)
dateyear = models.CharField(max_length=64, null=True, blank=True)
actor = models.CharField(max_length=256, null=True, blank=True)
director = models.CharField(max_length=256, null=True, blank=True)
style = models.CharField(max_length=64, null=True, blank=True)
movie_address = models.CharField(max_length=256, null=True, blank=True)
download_link = models.CharField(max_length=256, null=True, blank=True)
counter = models.PositiveIntegerField(default=0, blank=True)
original = models.CharField(max_length=256, null=True, blank=True)
status = models.IntegerField(null=True, blank=True)
image = models.CharField(max_length=256, null=True, blank=True)
spidertime = models.DateTimeField(auto_now_add=True, null=True)
aboutmovie = models.CharField(max_length=256, null=True, blank=True)
language = models.CharField(max_length=64, null=True, blank=True)
dyttsearch = models.CharField(max_length=256, null=True, blank=True)
dyttdetail = models.CharField(max_length=256, null=True, blank=True)
movierankings = models.ForeignKey(MovieRankings, null=True, blank=True)
def __unicode__(self):
return self.movie_name
class MovieHistory(models.Model):
user = models.ForeignKey(User)
movie = models.ForeignKey(Movie)
date = models.DateTimeField(auto_now_add=True)
marked = models.IntegerField(blank=True, null=True)
def __unicode__(self):
return '{%s}--{%s}' % (self.user.username, self.movie.movie_name)
| from __future__ import unicode_literals
import markdown
from django.db import models
from django.contrib.auth.models import User
from datetime import datetime
class MovieRankings(models.Model):
"""
各种电影排行榜.
"""
name = models.CharField(max_length=100)
def __unicode__(self):
return self.name
class Movie(models.Model):
"""
电影的数据库表格
"""
movie_name = models.CharField(max_length=64, blank=True)
douban_link = models.CharField(max_length=256, null=True, blank=True)
douban_score = models.CharField(max_length=64, null=True, blank=True)
douban_counter = models.PositiveIntegerField(default=0, blank=True)
imdb_link = models.CharField(max_length=256, null=True, blank=True)
imdb_score = models.CharField(max_length=64, null=True, blank=True)
imdb_counter = models.PositiveIntegerField(default=0, blank=True)
nomovie_link = models.CharField(max_length=256, null=True, blank=True)
nomovie_score = models.CharField(max_length=64, null=True, blank=True)
nomovie_counter = models.PositiveIntegerField(default=0, blank=True)
country = models.CharField(max_length=64, null=True, blank=True)
dateyear = models.CharField(max_length=64, null=True, blank=True)
actor = models.CharField(max_length=256, null=True, blank=True)
director = models.CharField(max_length=256, null=True, blank=True)
style = models.CharField(max_length=64, null=True, blank=True)
movie_address = models.CharField(max_length=256, null=True, blank=True)
download_link = models.CharField(max_length=256, null=True, blank=True)
counter = models.PositiveIntegerField(default=0, blank=True)
original = models.CharField(max_length=256, null=True, blank=True)
status = models.IntegerField(null=True, blank=True)
image = models.CharField(max_length=256, null=True, blank=True)
spidertime = models.DateTimeField(auto_now_add=True, null=True)
aboutmovie = models.CharField(max_length=256, null=True, blank=True)
language = models.CharField(max_length=64, null=True, blank=True)
dyttsearch = models.CharField(max_length=256, null=True, blank=True)
dyttdetail = models.CharField(max_length=256, null=True, blank=True)
movierankings = models.ForeignKey(MovieRankings, null=True, blank=True)
def __unicode__(self):
return self.movie_name
class MovieHistory(models.Model):
user = models.ForeignKey(User)
movie = models.ForeignKey(Movie)
date = models.DateTimeField(auto_now_add=True)
marked = models.IntegerField(blank=True, null=True)
def __unicode__(self):
return '{%s}--{%s}' % (self.user.username, self.movie.movie_name)
| # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import markdown
from django.db import models
from django.contrib.auth.models import User
from datetime import datetime
class MovieRankings(models.Model):
"""
各种电影排行榜.
"""
name = models.CharField(max_length=100)
def __unicode__(self):
return self.name
class Movie(models.Model):
"""
电影的数据库表格
"""
movie_name = models.CharField(max_length=64, blank=True)
# 豆瓣链接,值可以是null,也可以不填这个字段.
douban_link = models.CharField(max_length=256, null=True, blank=True)
# 豆瓣评分.
douban_score = models.CharField(max_length=64, null=True, blank=True)
# 豆瓣评分人数.
douban_counter = models.PositiveIntegerField(default=0, blank=True)
# Imdb链接.
imdb_link = models.CharField(max_length=256, null=True, blank=True)
# Imdb评分.
imdb_score = models.CharField(max_length=64, null=True, blank=True)
# Imdb评分人数.
imdb_counter = models.PositiveIntegerField(default=0, blank=True)
# 网站中的链接.
nomovie_link = models.CharField(max_length=256, null=True, blank=True)
# 网站中评分.
nomovie_score = models.CharField(max_length=64, null=True, blank=True)
# 网站中评分人数.
nomovie_counter = models.PositiveIntegerField(default=0, blank=True)
# 上映国家.
country = models.CharField(max_length=64, null=True, blank=True)
# 上映日期.
dateyear = models.CharField(max_length=64, null=True, blank=True)
# 主演.
actor = models.CharField(max_length=256, null=True, blank=True)
# 导演.
director = models.CharField(max_length=256, null=True, blank=True)
# 电影类型.
style = models.CharField(max_length=64, null=True, blank=True)
# 电影播放地址.
movie_address = models.CharField(max_length=256, null=True, blank=True)
# 电影下载链接.
download_link = models.CharField(max_length=256, null=True, blank=True)
# 电影在本网站的播放次数.
counter = models.PositiveIntegerField(default=0, blank=True)
# 电影来源,
# 0:表示豆瓣top250 1:表示imdbtop250 2:表示普通豆瓣 3:表示普通imdb
# 4:表示在豆瓣和imdb中都存在 5表示:用户自添加
original = models.CharField(max_length=256, null=True, blank=True)
# 1:表示通过 0:表示未通过 2:表示审核中
status = models.IntegerField(null=True, blank=True)
# 图片保存地址
image = models.CharField(max_length=256, null=True, blank=True)
# 爬取电影入库时间
spidertime = models.DateTimeField(auto_now_add=True, null=True)
# 关于电影
aboutmovie = models.CharField(max_length=256, null=True, blank=True)
# 电影语言
language = models.CharField(max_length=64, null=True, blank=True)
# 电影天堂搜索地址
dyttsearch = models.CharField(max_length=256, null=True, blank=True)
# 电影天堂搜索电影详情页面
dyttdetail = models.CharField(max_length=256, null=True, blank=True)
movierankings = models.ForeignKey(MovieRankings, null=True, blank=True)
def __unicode__(self):
return self.movie_name
# def get_comments(self):
class MovieHistory(models.Model):
# 观看的用户.
# 用户一对多MovieHistory,可以看多个电影.
user = models.ForeignKey(User)
# 观看的电影.
movie = models.ForeignKey(Movie)
# 观看的时间.
date = models.DateTimeField(auto_now_add=True)
# 0表示用户观看了该电影,1表示收藏,2表示推荐.
marked = models.IntegerField(blank=True, null=True)
def __unicode__(self):
return "{%s}--{%s}" % (self.user.username, self.movie.movie_name)
| [
8,
10,
11,
12,
13
] |
1,320 | 902159d9ad3a1e36b69142518007b5d4bcaef0f3 | <mask token>
| <mask token>
def crawl(file):
gis = GIS()
map = gis.map('United States')
map
job_df = pd.read_csv(Point_v1.CONSULTING_FILE).append(pd.read_csv(
Point_v1.DS_FILE)).append(pd.read_csv(Point_v1.SDE_FILE))
company_loc_df = pd.DataFrame()
company_loc_df['company'] = job_df['company'].unique()
geo_info = company_loc_df['company'].apply(lambda company: geocode(
company)[0] if geocode(company) else None)
company_loc_df['longitude'] = geo_info.apply(lambda info: info[
'location']['x'] if info else None)
company_loc_df['latitude'] = geo_info.apply(lambda info: info[
'location']['y'] if info else None)
company_loc_df['city'] = geo_info.apply(lambda info: info['attributes']
['City'] if info else None)
company_loc_df['state'] = geo_info.apply(lambda info: info['attributes'
]['RegionAbbr'] if info else None)
company_loc_df.to_csv(file, encoding='utf-8', index=False)
| from arcgis.geocoding import geocode
from arcgis.gis import GIS
import pandas as pd
import Point_v1
<mask token>
def crawl(file):
gis = GIS()
map = gis.map('United States')
map
job_df = pd.read_csv(Point_v1.CONSULTING_FILE).append(pd.read_csv(
Point_v1.DS_FILE)).append(pd.read_csv(Point_v1.SDE_FILE))
company_loc_df = pd.DataFrame()
company_loc_df['company'] = job_df['company'].unique()
geo_info = company_loc_df['company'].apply(lambda company: geocode(
company)[0] if geocode(company) else None)
company_loc_df['longitude'] = geo_info.apply(lambda info: info[
'location']['x'] if info else None)
company_loc_df['latitude'] = geo_info.apply(lambda info: info[
'location']['y'] if info else None)
company_loc_df['city'] = geo_info.apply(lambda info: info['attributes']
['City'] if info else None)
company_loc_df['state'] = geo_info.apply(lambda info: info['attributes'
]['RegionAbbr'] if info else None)
company_loc_df.to_csv(file, encoding='utf-8', index=False)
| from arcgis.geocoding import geocode
from arcgis.gis import GIS
import pandas as pd
import Point_v1
"""
This module is used to get the location information of different companies from arcgis API.
"""
def crawl(file):
gis = GIS()
map = gis.map("United States")
map
# read all kinds of job files
job_df = pd.read_csv(Point_v1.CONSULTING_FILE).append(
pd.read_csv(Point_v1.DS_FILE)).append(
pd.read_csv(Point_v1.SDE_FILE))
company_loc_df = pd.DataFrame()
company_loc_df["company"] = job_df["company"].unique()
geo_info = company_loc_df["company"].apply(lambda company: geocode(company)[0] if geocode(company) else None)
company_loc_df['longitude'] = geo_info.apply(lambda info: info["location"]["x"] if info else None)
company_loc_df['latitude'] = geo_info.apply(lambda info: info["location"]["y"] if info else None)
company_loc_df['city'] = geo_info.apply(lambda info: info['attributes']['City'] if info else None)
company_loc_df['state'] = geo_info.apply(lambda info: info['attributes']['RegionAbbr'] if info else None)
company_loc_df.to_csv(file, encoding='utf-8', index=False)
| null | [
0,
1,
2,
3
] |
1,321 | bad13218a7a9e687fbd29099ca80771296789d36 | <mask token>
class Form(QDialog):
def __init__(self, parent=None):
super(Form, self).__init__(parent)
self.setWindowTitle('Cover Letter Developer')
self.label1 = QLabel('Input Company Name')
self.edit1 = QLineEdit('')
self.label2 = QLabel('Input Position Title')
self.edit2 = QLineEdit('')
self.label3 = QLabel('How did you get introduced to the company?')
self.edit3 = QLineEdit('')
self.label4 = QLabel(
'What skills do you have that would help the COOP/Internship')
self.edit4 = QLineEdit('')
self.button = QPushButton('Develop')
layout = QVBoxLayout()
layout.addWidget(self.label1)
layout.addWidget(self.edit1)
layout.addWidget(self.label2)
layout.addWidget(self.edit2)
layout.addWidget(self.label3)
layout.addWidget(self.edit3)
layout.addWidget(self.label4)
layout.addWidget(self.edit4)
layout.addWidget(self.button)
self.setLayout(layout)
self.button.clicked.connect(self.coverlet)
<mask token>
<mask token>
| <mask token>
class Form(QDialog):
def __init__(self, parent=None):
super(Form, self).__init__(parent)
self.setWindowTitle('Cover Letter Developer')
self.label1 = QLabel('Input Company Name')
self.edit1 = QLineEdit('')
self.label2 = QLabel('Input Position Title')
self.edit2 = QLineEdit('')
self.label3 = QLabel('How did you get introduced to the company?')
self.edit3 = QLineEdit('')
self.label4 = QLabel(
'What skills do you have that would help the COOP/Internship')
self.edit4 = QLineEdit('')
self.button = QPushButton('Develop')
layout = QVBoxLayout()
layout.addWidget(self.label1)
layout.addWidget(self.edit1)
layout.addWidget(self.label2)
layout.addWidget(self.edit2)
layout.addWidget(self.label3)
layout.addWidget(self.edit3)
layout.addWidget(self.label4)
layout.addWidget(self.edit4)
layout.addWidget(self.button)
self.setLayout(layout)
self.button.clicked.connect(self.coverlet)
def coverlet(self):
name = self.edit1.text()
pos = self.edit2.text()
intro = self.edit3.text()
skills = self.edit4.text()
mytext = '\n Dear ' + name + """’s Hiring Team,
""" + ' ' + ' I am writing to apply to the ' + pos + ' Intern/COOP position at ' + name + '. I am a 4th year at Wentworth Institute of Technology, pursuing a Bachelor of Science degree in Electro-mechanical Engineering. The Electro-mechanical Engineering program combines the technical disciplines of Electrical and Mechanical Engineering. ' + intro + """
""" + 'As an intern at ' + name + ' , I will bring my toolset of ' + skills + """. Additionally I have experience in quality and reliability of electronic circuit systems through the tests that I have done when I was Analog Devices like shock, high voltage, HALT testing. Along with developing reliability testers that I programmed using LabView(a graphical programming language). My C programming and Python experience is from a project that I have done for a Junior Design Project and you can see the pictures through my personal website list below.
""" + ' ' + ' As an engineering student, the most valuable thing that I have currently learned about myself is that when faced with a difficult problem I may initially fail, but I don’t quit until I eventually solve the problem. I am a quick learner and will be a good asset to ' + name + '. Wentworth Institute of Technology incorporates COOPS/internships as part of its curriculum, and, therefore, I would be available to work full time throughout the summer for a minimum of 14 weeks. I would be honored to intern for ' + name + ' and gain experience in engineering and further ' + name + """ initiative. has a reputation for excellence, and I value your commitment to making the world a better and safer place.
""" + ' ' + """ You may contact me by phone, email or my personal website, which I have supplied below. Thank you for your time and consideration.
"""
anothertext = """
Respectfully yours,
Martynas Baranauskas
[email protected]
781-572-9775
Personal Website: https://baranauskasm.wixsite.com/mysite
or scan QR code with smartphone camera
"""
document = Document()
p = document.add_paragraph(mytext)
g = document.add_paragraph(anothertext)
k = document.add_picture('qr_code.png', width=Inches(0.7))
filename = name + '_' + pos + '_baranauskas_.docx'
document.save(filename)
print('-----------------------------------------------------')
print(name + '_' + pos + '_baranauskas.doxc document was developed')
print('------------------------------------------------------')
self.edit1.clear()
self.edit2.clear()
self.edit3.clear()
self.edit4.clear()
<mask token>
| <mask token>
class Form(QDialog):
def __init__(self, parent=None):
super(Form, self).__init__(parent)
self.setWindowTitle('Cover Letter Developer')
self.label1 = QLabel('Input Company Name')
self.edit1 = QLineEdit('')
self.label2 = QLabel('Input Position Title')
self.edit2 = QLineEdit('')
self.label3 = QLabel('How did you get introduced to the company?')
self.edit3 = QLineEdit('')
self.label4 = QLabel(
'What skills do you have that would help the COOP/Internship')
self.edit4 = QLineEdit('')
self.button = QPushButton('Develop')
layout = QVBoxLayout()
layout.addWidget(self.label1)
layout.addWidget(self.edit1)
layout.addWidget(self.label2)
layout.addWidget(self.edit2)
layout.addWidget(self.label3)
layout.addWidget(self.edit3)
layout.addWidget(self.label4)
layout.addWidget(self.edit4)
layout.addWidget(self.button)
self.setLayout(layout)
self.button.clicked.connect(self.coverlet)
def coverlet(self):
name = self.edit1.text()
pos = self.edit2.text()
intro = self.edit3.text()
skills = self.edit4.text()
mytext = '\n Dear ' + name + """’s Hiring Team,
""" + ' ' + ' I am writing to apply to the ' + pos + ' Intern/COOP position at ' + name + '. I am a 4th year at Wentworth Institute of Technology, pursuing a Bachelor of Science degree in Electro-mechanical Engineering. The Electro-mechanical Engineering program combines the technical disciplines of Electrical and Mechanical Engineering. ' + intro + """
""" + 'As an intern at ' + name + ' , I will bring my toolset of ' + skills + """. Additionally I have experience in quality and reliability of electronic circuit systems through the tests that I have done when I was Analog Devices like shock, high voltage, HALT testing. Along with developing reliability testers that I programmed using LabView(a graphical programming language). My C programming and Python experience is from a project that I have done for a Junior Design Project and you can see the pictures through my personal website list below.
""" + ' ' + ' As an engineering student, the most valuable thing that I have currently learned about myself is that when faced with a difficult problem I may initially fail, but I don’t quit until I eventually solve the problem. I am a quick learner and will be a good asset to ' + name + '. Wentworth Institute of Technology incorporates COOPS/internships as part of its curriculum, and, therefore, I would be available to work full time throughout the summer for a minimum of 14 weeks. I would be honored to intern for ' + name + ' and gain experience in engineering and further ' + name + """ initiative. has a reputation for excellence, and I value your commitment to making the world a better and safer place.
""" + ' ' + """ You may contact me by phone, email or my personal website, which I have supplied below. Thank you for your time and consideration.
"""
anothertext = """
Respectfully yours,
Martynas Baranauskas
[email protected]
781-572-9775
Personal Website: https://baranauskasm.wixsite.com/mysite
or scan QR code with smartphone camera
"""
document = Document()
p = document.add_paragraph(mytext)
g = document.add_paragraph(anothertext)
k = document.add_picture('qr_code.png', width=Inches(0.7))
filename = name + '_' + pos + '_baranauskas_.docx'
document.save(filename)
print('-----------------------------------------------------')
print(name + '_' + pos + '_baranauskas.doxc document was developed')
print('------------------------------------------------------')
self.edit1.clear()
self.edit2.clear()
self.edit3.clear()
self.edit4.clear()
if __name__ == '__main__':
app = QApplication(sys.argv)
form = Form()
form.resize(1300, 250)
form.show()
sys.exit(app.exec_())
| import sys
from PySide2.QtWidgets import QApplication, QDialog, QLineEdit, QPushButton, QVBoxLayout, QLabel, QWidget
from docx import Document
from docx.shared import Inches
class Form(QDialog):
def __init__(self, parent=None):
super(Form, self).__init__(parent)
self.setWindowTitle('Cover Letter Developer')
self.label1 = QLabel('Input Company Name')
self.edit1 = QLineEdit('')
self.label2 = QLabel('Input Position Title')
self.edit2 = QLineEdit('')
self.label3 = QLabel('How did you get introduced to the company?')
self.edit3 = QLineEdit('')
self.label4 = QLabel(
'What skills do you have that would help the COOP/Internship')
self.edit4 = QLineEdit('')
self.button = QPushButton('Develop')
layout = QVBoxLayout()
layout.addWidget(self.label1)
layout.addWidget(self.edit1)
layout.addWidget(self.label2)
layout.addWidget(self.edit2)
layout.addWidget(self.label3)
layout.addWidget(self.edit3)
layout.addWidget(self.label4)
layout.addWidget(self.edit4)
layout.addWidget(self.button)
self.setLayout(layout)
self.button.clicked.connect(self.coverlet)
def coverlet(self):
name = self.edit1.text()
pos = self.edit2.text()
intro = self.edit3.text()
skills = self.edit4.text()
mytext = '\n Dear ' + name + """’s Hiring Team,
""" + ' ' + ' I am writing to apply to the ' + pos + ' Intern/COOP position at ' + name + '. I am a 4th year at Wentworth Institute of Technology, pursuing a Bachelor of Science degree in Electro-mechanical Engineering. The Electro-mechanical Engineering program combines the technical disciplines of Electrical and Mechanical Engineering. ' + intro + """
""" + 'As an intern at ' + name + ' , I will bring my toolset of ' + skills + """. Additionally I have experience in quality and reliability of electronic circuit systems through the tests that I have done when I was Analog Devices like shock, high voltage, HALT testing. Along with developing reliability testers that I programmed using LabView(a graphical programming language). My C programming and Python experience is from a project that I have done for a Junior Design Project and you can see the pictures through my personal website list below.
""" + ' ' + ' As an engineering student, the most valuable thing that I have currently learned about myself is that when faced with a difficult problem I may initially fail, but I don’t quit until I eventually solve the problem. I am a quick learner and will be a good asset to ' + name + '. Wentworth Institute of Technology incorporates COOPS/internships as part of its curriculum, and, therefore, I would be available to work full time throughout the summer for a minimum of 14 weeks. I would be honored to intern for ' + name + ' and gain experience in engineering and further ' + name + """ initiative. has a reputation for excellence, and I value your commitment to making the world a better and safer place.
""" + ' ' + """ You may contact me by phone, email or my personal website, which I have supplied below. Thank you for your time and consideration.
"""
anothertext = """
Respectfully yours,
Martynas Baranauskas
[email protected]
781-572-9775
Personal Website: https://baranauskasm.wixsite.com/mysite
or scan QR code with smartphone camera
"""
document = Document()
p = document.add_paragraph(mytext)
g = document.add_paragraph(anothertext)
k = document.add_picture('qr_code.png', width=Inches(0.7))
filename = name + '_' + pos + '_baranauskas_.docx'
document.save(filename)
print('-----------------------------------------------------')
print(name + '_' + pos + '_baranauskas.doxc document was developed')
print('------------------------------------------------------')
self.edit1.clear()
self.edit2.clear()
self.edit3.clear()
self.edit4.clear()
if __name__ == '__main__':
app = QApplication(sys.argv)
form = Form()
form.resize(1300, 250)
form.show()
sys.exit(app.exec_())
| import sys
from PySide2.QtWidgets import QApplication, QDialog, QLineEdit, QPushButton,QVBoxLayout, QLabel, QWidget
from docx import Document
from docx.shared import Inches
class Form(QDialog):
def __init__(self, parent=None):
super(Form, self).__init__(parent)
#set the size
#Creat widgets
self.setWindowTitle("Cover Letter Developer")
self.label1 = QLabel('Input Company Name')
self.edit1 = QLineEdit("")
self.label2 = QLabel('Input Position Title')
self.edit2 = QLineEdit("")
self.label3 = QLabel('How did you get introduced to the company?')
self.edit3 = QLineEdit("")
self.label4 = QLabel('What skills do you have that would help the COOP/Internship')
self.edit4 = QLineEdit("")
self.button = QPushButton("Develop")
# Creat layout and add widgets
layout = QVBoxLayout()
layout.addWidget(self.label1)
layout.addWidget(self.edit1)
layout.addWidget(self.label2)
layout.addWidget(self.edit2)
layout.addWidget(self.label3)
layout.addWidget(self.edit3)
layout.addWidget(self.label4)
layout.addWidget(self.edit4)
layout.addWidget(self.button)
#set dialog layout
self.setLayout(layout)
self.button.clicked.connect(self.coverlet)
def coverlet(self):
name = self.edit1.text()
pos = self.edit2.text()
intro = self.edit3.text()
skills = self.edit4.text()
mytext = """
Dear """ + name + """’s Hiring Team,
\n
""" + """ """ + """ I am writing to apply to the """ + pos + """ Intern/COOP position at """ + name + """. I am a 4th year at Wentworth Institute of Technology, pursuing a Bachelor of Science degree in Electro-mechanical Engineering. The Electro-mechanical Engineering program combines the technical disciplines of Electrical and Mechanical Engineering. """ + intro + """
"""+ """As an intern at """ + name + """ , I will bring my toolset of """ + skills + """. Additionally I have experience in quality and reliability of electronic circuit systems through the tests that I have done when I was Analog Devices like shock, high voltage, HALT testing. Along with developing reliability testers that I programmed using LabView(a graphical programming language). My C programming and Python experience is from a project that I have done for a Junior Design Project and you can see the pictures through my personal website list below.
""" + """ """ + """ As an engineering student, the most valuable thing that I have currently learned about myself is that when faced with a difficult problem I may initially fail, but I don’t quit until I eventually solve the problem. I am a quick learner and will be a good asset to """ + name + """. Wentworth Institute of Technology incorporates COOPS/internships as part of its curriculum, and, therefore, I would be available to work full time throughout the summer for a minimum of 14 weeks. I would be honored to intern for """ + name + """ and gain experience in engineering and further """+ name +""" initiative. has a reputation for excellence, and I value your commitment to making the world a better and safer place.
""" + """ """ + """ You may contact me by phone, email or my personal website, which I have supplied below. Thank you for your time and consideration.
"""
anothertext = """
Respectfully yours,
Martynas Baranauskas
[email protected]
781-572-9775
Personal Website: https://baranauskasm.wixsite.com/mysite
or scan QR code with smartphone camera
"""
document = Document()
p = document.add_paragraph(mytext)
g = document.add_paragraph(anothertext)
k = document.add_picture('qr_code.png', width=Inches(0.7))
# document.add_page_break()
# the saving of the document and the path to the
filename = name + '_' + pos + '_baranauskas_.docx'
# filepath = r'C:\Users\baranauskasm\Desktop\COOP Stuff\Summer 2020 COOP (future)\cover letters\automated cover letters'
document.save(filename)
print("-----------------------------------------------------")
print(name + "_" + pos + "_baranauskas.doxc document was developed")
print("------------------------------------------------------")
#clear the form for another submition
self.edit1.clear()
self.edit2.clear()
self.edit3.clear()
self.edit4.clear()
if __name__ == '__main__':
#or you can do a automatic one with something like
# Create the Qt Application
app = QApplication(sys.argv)
# Create and show the form
form = Form()
#the size of the gui
form.resize(1300,250)
form.show()
# Run the main Qt loop
sys.exit(app.exec_())
| [
2,
3,
4,
5,
6
] |
1,322 | 029f4f015f558dbd4d6096b00c53f5f0fe69883d | <mask token>
| <mask token>
class CurriculoSerializer(serializers.ModelSerializer):
class Meta:
model = Curriculo
fields = 'id', 'name', 'description', 'image', 'create_at', 'update_at'
| from rest_framework import serializers
from core.models import Curriculo
class CurriculoSerializer(serializers.ModelSerializer):
class Meta:
model = Curriculo
fields = 'id', 'name', 'description', 'image', 'create_at', 'update_at'
| from rest_framework import serializers
from core.models import Curriculo
class CurriculoSerializer(serializers.ModelSerializer):
class Meta:
model = Curriculo
fields = ('id','name', 'description','image','create_at','update_at') | null | [
0,
1,
2,
3
] |
1,323 | 8e1eef3c5a9ca3ea504bbc269b48446527637626 | <mask token>
class PageIndex(BasePageIndex, Indexable):
template = CharField(model_attr='template')
template_title = CharField(model_attr='get_template_display')
get_template_display = CharField(model_attr='get_template_display')
| <mask token>
class BasePageIndex(SearchIndex):
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
def get_model(self):
return swapper.load_model('varlet', 'page')
class PageIndex(BasePageIndex, Indexable):
template = CharField(model_attr='template')
template_title = CharField(model_attr='get_template_display')
get_template_display = CharField(model_attr='get_template_display')
| <mask token>
class BasePageIndex(SearchIndex):
text = CharField(document=True, use_template=True, template_name=
'search/indexes/varlet/page_text.txt')
url = CharField(model_attr='url')
get_absolute_url = CharField(model_attr='get_absolute_url')
created = DateTimeField(model_attr='created')
modified = DateTimeField(model_attr='modified')
def get_model(self):
return swapper.load_model('varlet', 'page')
class PageIndex(BasePageIndex, Indexable):
template = CharField(model_attr='template')
template_title = CharField(model_attr='get_template_display')
get_template_display = CharField(model_attr='get_template_display')
| from __future__ import absolute_import, unicode_literals
import swapper
from haystack.constants import Indexable
from haystack.fields import CharField, DateTimeField
from haystack.indexes import SearchIndex
class BasePageIndex(SearchIndex):
text = CharField(document=True, use_template=True, template_name=
'search/indexes/varlet/page_text.txt')
url = CharField(model_attr='url')
get_absolute_url = CharField(model_attr='get_absolute_url')
created = DateTimeField(model_attr='created')
modified = DateTimeField(model_attr='modified')
def get_model(self):
return swapper.load_model('varlet', 'page')
class PageIndex(BasePageIndex, Indexable):
template = CharField(model_attr='template')
template_title = CharField(model_attr='get_template_display')
get_template_display = CharField(model_attr='get_template_display')
| # -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
import swapper
from haystack.constants import Indexable
from haystack.fields import CharField, DateTimeField
from haystack.indexes import SearchIndex
class BasePageIndex(SearchIndex):
text = CharField(document=True, use_template=True, template_name='search/indexes/varlet/page_text.txt')
url = CharField(model_attr='url')
get_absolute_url = CharField(model_attr='get_absolute_url')
created = DateTimeField(model_attr='created')
modified = DateTimeField(model_attr='modified')
def get_model(self):
return swapper.load_model('varlet', 'page')
class PageIndex(BasePageIndex, Indexable):
template = CharField(model_attr='template')
template_title = CharField(model_attr='get_template_display')
get_template_display = CharField(model_attr='get_template_display')
| [
2,
4,
5,
6,
7
] |
1,324 | 11ed7550c25ca9944ce7073d9655cb9af7bdeae9 | <mask token>
class Modeller:
<mask token>
<mask token>
| <mask token>
class Modeller:
<mask token>
def event_mode(self):
refusals = 0
processed = 0
generated_requests = self._generator.num_requests
generator = self._generator
generator.receivers = self._operators.copy()
self._operators[0].receivers = [self._computers[0]]
self._operators[1].receivers = [self._computers[0]]
self._operators[2].receivers = [self._computers[1]]
generator.next = generator.next_time()
self._operators[0].next = self._operators[0].next_time()
blocks = [generator, self._operators[0], self._operators[1], self.
_operators[2], self._computers[0], self._computers[1]]
while generator.num_requests >= 0:
current_time = generator.next
for block in blocks:
if 0 < block.next < current_time:
current_time = block.next
for block in blocks:
if current_time == block.next:
if not isinstance(block, Processor):
next_generator = generator.generate_request()
if next_generator is not None:
next_generator.next = (current_time +
next_generator.next_time())
processed += 1
else:
refusals += 1
generator.next = current_time + generator.next_time()
else:
block.process_request()
if block.current_queue_size == 0:
block.next = 0
else:
block.next = current_time + block.next_time()
return {'refusal_percentage': refusals / generated_requests * 100,
'refusals': refusals, 'processed': processed}
| <mask token>
class Modeller:
def __init__(self, generator, operators, computers):
self._generator = generator
self._operators = operators
self._computers = computers
def event_mode(self):
refusals = 0
processed = 0
generated_requests = self._generator.num_requests
generator = self._generator
generator.receivers = self._operators.copy()
self._operators[0].receivers = [self._computers[0]]
self._operators[1].receivers = [self._computers[0]]
self._operators[2].receivers = [self._computers[1]]
generator.next = generator.next_time()
self._operators[0].next = self._operators[0].next_time()
blocks = [generator, self._operators[0], self._operators[1], self.
_operators[2], self._computers[0], self._computers[1]]
while generator.num_requests >= 0:
current_time = generator.next
for block in blocks:
if 0 < block.next < current_time:
current_time = block.next
for block in blocks:
if current_time == block.next:
if not isinstance(block, Processor):
next_generator = generator.generate_request()
if next_generator is not None:
next_generator.next = (current_time +
next_generator.next_time())
processed += 1
else:
refusals += 1
generator.next = current_time + generator.next_time()
else:
block.process_request()
if block.current_queue_size == 0:
block.next = 0
else:
block.next = current_time + block.next_time()
return {'refusal_percentage': refusals / generated_requests * 100,
'refusals': refusals, 'processed': processed}
| from Distributions import UniformDistribution
from EventGenerator import Generator
from Processor import Processor
class Modeller:
def __init__(self, generator, operators, computers):
self._generator = generator
self._operators = operators
self._computers = computers
def event_mode(self):
refusals = 0
processed = 0
generated_requests = self._generator.num_requests
generator = self._generator
generator.receivers = self._operators.copy()
self._operators[0].receivers = [self._computers[0]]
self._operators[1].receivers = [self._computers[0]]
self._operators[2].receivers = [self._computers[1]]
generator.next = generator.next_time()
self._operators[0].next = self._operators[0].next_time()
blocks = [generator, self._operators[0], self._operators[1], self.
_operators[2], self._computers[0], self._computers[1]]
while generator.num_requests >= 0:
current_time = generator.next
for block in blocks:
if 0 < block.next < current_time:
current_time = block.next
for block in blocks:
if current_time == block.next:
if not isinstance(block, Processor):
next_generator = generator.generate_request()
if next_generator is not None:
next_generator.next = (current_time +
next_generator.next_time())
processed += 1
else:
refusals += 1
generator.next = current_time + generator.next_time()
else:
block.process_request()
if block.current_queue_size == 0:
block.next = 0
else:
block.next = current_time + block.next_time()
return {'refusal_percentage': refusals / generated_requests * 100,
'refusals': refusals, 'processed': processed}
|
from Distributions import UniformDistribution
from EventGenerator import Generator
from Processor import Processor
class Modeller:
def __init__(self, generator, operators, computers):
self._generator = generator
self._operators = operators
self._computers = computers
def event_mode(self):
refusals = 0
processed = 0
generated_requests = self._generator.num_requests
generator = self._generator
generator.receivers = self._operators.copy()
self._operators[0].receivers = [self._computers[0]]
self._operators[1].receivers = [self._computers[0]]
self._operators[2].receivers = [self._computers[1]]
generator.next = generator.next_time()
self._operators[0].next = self._operators[0].next_time()
blocks = [
generator,
self._operators[0],
self._operators[1],
self._operators[2],
self._computers[0],
self._computers[1],
]
while generator.num_requests >= 0:
# находим наименьшее время
current_time = generator.next
for block in blocks:
if 0 < block.next < current_time:
current_time = block.next
# для каждого из блоков
for block in blocks:
# если событие наступило для этого блока
if current_time == block.next:
if not isinstance(block, Processor):
# для генератора
# проверяем, может ли оператор обработать
next_generator = generator.generate_request()
if next_generator is not None:
next_generator.next = \
current_time + next_generator.next_time()
processed += 1
else:
refusals += 1
generator.next = current_time + generator.next_time()
else:
block.process_request()
if block.current_queue_size == 0:
block.next = 0
else:
block.next = current_time + block.next_time()
return {"refusal_percentage": refusals / generated_requests * 100,
"refusals": refusals,
"processed": processed,
} | [
1,
2,
3,
4,
5
] |
1,325 | d9a871fb6c889bcff455732007718af734859c72 | # SSURGO_ExportMuRaster.py
#
# Convert MUPOLYGON featureclass to raster for the specified SSURGO geodatabase.
# By default any small NoData areas (< 5000 sq meters) will be filled using
# the Majority value.
#
# Input mupolygon featureclass must have a projected coordinate system or it will skip.
# Input databases and featureclasses must use naming convention established by the
# 'SDM Export By State' tool.
#
# For geographic regions that have USGS NLCD available, the tool wil automatically
# align the coordinate system and raster grid to match.
#
# 10-31-2013 Added gap fill method
#
# 11-05-2014
# 11-22-2013
# 12-10-2013 Problem with using non-unique cellvalues for raster. Going back to
# creating an integer version of MUKEY in the mapunit polygon layer.
# 12-13-2013 Occasionally see error messages related to temporary GRIDs (g_g*) created
# under "C:\Users\steve.peaslee\AppData\Local\Temp\a subfolder". These
# are probably caused by orphaned INFO tables.
# 01-08-2014 Added basic raster metadata (still need process steps)
# 01-12-2014 Restricted conversion to use only input MUPOLYGON featureclass having
# a projected coordinate system with linear units=Meter
# 01-31-2014 Added progressor bar to 'Saving MUKEY values..'. Seems to be a hangup at this
# point when processing CONUS geodatabase
# 02-14-2014 Changed FeatureToLayer (CELL_CENTER) to PolygonToRaster (MAXIMUM_COMBINED_AREA)
# and removed the Gap Fill option.
# 2014-09-27 Added ISO metadata import
#
# 2014-10-18 Noticed that failure to create raster seemed to be related to long
# file names or non-alphanumeric characters such as a dash in the name.
#
# 2014-10-29 Removed ORDER BY MUKEY sql clause because some computers were failing on that line.
# Don't understand why.
#
# 2014-10-31 Added error message if the MUKEY column is not populated in the MUPOLYGON featureclass
#
# 2014-11-04 Problems occur when the user's gp environment points to Default.gdb for the scratchWorkpace.
# Added a fatal error message when that occurs.
#
# 2015-01-15 Hopefully fixed some of the issues that caused the raster conversion to crash at the end.
# Cleaned up some of the current workspace settings and moved the renaming of the final raster.
#
# 2015-02-26 Adding option for tiling raster conversion by areasymbol and then mosaicing. Slower and takes
# more disk space, but gets the job done when otherwise PolygonToRaster fails on big datasets.
# 2015-02-27 Make bTiling variable an integer (0, 2, 5) that can be used to slice the areasymbol value. This will
# give the user an option to tile by state (2) or by survey area (5)
# 2015-03-10 Moved sequence of CheckInExtension. It was at the beginning which seems wrong.
#
# 2015-03-11 Switched tiled raster format from geodatabase raster to TIFF. This should allow the entire
# temporary folder to be deleted instead of deleting rasters one-at-a-time (slow).
# 2015-03-11 Added attribute index (mukey) to raster attribute table
# 2015-03-13 Modified output raster name by incorporating the geodatabase name (after '_' and before ".gdb")
#
# 2015-09-16 Temporarily renamed output raster using a shorter string
#
# 2015-09-16 Trying several things to address 9999 failure on CONUS. Created a couple of ArcInfo workspace in temp
# 2015-09-16 Compacting geodatabase before PolygonToRaster conversion
#
# 2015-09-18 Still having problems with CONUS raster even with ArcGIS 10.3. Even the tiled method failed once
# on AR105. Actually may have been the next survey, but random order so don't know which one for sure.
# Trying to reorder mosaic to match the spatial order of the polygon layers. Need to figure out if
# the 99999 error in PolygonToRaster is occurring with the same soil survey or same count or any
# other pattern.
#
# 2015-09-18 Need to remember to turn off all layers in ArcMap. Redraw is triggered after each tile.
#
# 2015-10-01 Found problem apparently caused by 10.3. SnapRaster functionality was failing with tiles because of
# MakeFeatureLayer where_clause. Perhaps due to cursor lock persistence? Rewrote entire function to
# use SAPOLYGON featureclass to define extents for tiles. This seems to be working better anyway.
#
# 2015-10-02 Need to look at some method for sorting the extents of each tile and sort them in a geographic fashion.
# A similar method was used in the Create gSSURGO database tools for the Append process.
#
# 2015-10-23 Jennifer and I finally figured out what was causing her PolygonToRaster 9999 errors.
# It was dashes in the output GDB path. Will add a check for bad characters in path.
#
# 2015-10-26 Changed up SnapToNLCD function to incorporate SnapRaster input as long as the coordinate
# system matches and the extent coordinates are integer (no floating point!).
#
# 2015-10-27 Looking at possible issue with batchmode processing of rasters. Jennifer had several
# errors when trying to run all states at once.
#
# 2015-11-03 Fixed failure when indexing non-geodatabase rasters such as .IMG.
## ===================================================================================
class MyError(Exception):
pass
## ===================================================================================
def PrintMsg(msg, severity=0):
# prints message to screen if run as a python script
# Adds tool message to the geoprocessor
#
#Split the message on \n first, so that if it's multiple lines, a GPMessage will be added for each line
try:
for string in msg.split('\n'):
#Add a geoprocessing message (in case this is run as a tool)
if severity == 0:
arcpy.AddMessage(string)
elif severity == 1:
arcpy.AddWarning(string)
elif severity == 2:
arcpy.AddMessage(" ")
arcpy.AddError(string)
except:
pass
## ===================================================================================
def errorMsg():
try:
tb = sys.exc_info()[2]
tbinfo = traceback.format_tb(tb)[0]
theMsg = tbinfo + "\n" + str(sys.exc_type)+ ": " + str(sys.exc_value)
PrintMsg(theMsg, 2)
except:
PrintMsg("Unhandled error in errorMsg method", 2)
pass
## ===================================================================================
def WriteToLog(theMsg, theRptFile):
# prints message to screen if run as a python script
# Adds tool message to the geoprocessor
#print msg
#
try:
fh = open(theRptFile, "a")
theMsg = "\n" + theMsg
fh.write(theMsg)
fh.close()
except:
errorMsg()
pass
## ===================================================================================
def elapsedTime(start):
# Calculate amount of time since "start" and return time string
try:
# Stop timer
#
end = time.time()
# Calculate total elapsed seconds
eTotal = end - start
# day = 86400 seconds
# hour = 3600 seconds
# minute = 60 seconds
eMsg = ""
# calculate elapsed days
eDay1 = eTotal / 86400
eDay2 = math.modf(eDay1)
eDay = int(eDay2[1])
eDayR = eDay2[0]
if eDay > 1:
eMsg = eMsg + str(eDay) + " days "
elif eDay == 1:
eMsg = eMsg + str(eDay) + " day "
# Calculated elapsed hours
eHour1 = eDayR * 24
eHour2 = math.modf(eHour1)
eHour = int(eHour2[1])
eHourR = eHour2[0]
if eDay > 0 or eHour > 0:
if eHour > 1:
eMsg = eMsg + str(eHour) + " hours "
else:
eMsg = eMsg + str(eHour) + " hour "
# Calculate elapsed minutes
eMinute1 = eHourR * 60
eMinute2 = math.modf(eMinute1)
eMinute = int(eMinute2[1])
eMinuteR = eMinute2[0]
if eDay > 0 or eHour > 0 or eMinute > 0:
if eMinute > 1:
eMsg = eMsg + str(eMinute) + " minutes "
else:
eMsg = eMsg + str(eMinute) + " minute "
# Calculate elapsed secons
eSeconds = "%.1f" % (eMinuteR * 60)
if eSeconds == "1.00":
eMsg = eMsg + eSeconds + " second "
else:
eMsg = eMsg + eSeconds + " seconds "
return eMsg
except:
errorMsg()
return ""
## ===================================================================================
def Number_Format(num, places=0, bCommas=True):
try:
# Format a number according to locality and given places
#locale.setlocale(locale.LC_ALL, "")
if bCommas:
theNumber = locale.format("%.*f", (places, num), True)
else:
theNumber = locale.format("%.*f", (places, num), False)
return theNumber
except:
errorMsg()
return False
## ===================================================================================
def CheckStatistics(outputRaster):
# For no apparent reason, ArcGIS sometimes fails to build statistics. Might work one
# time and then the next time it may fail without any error message.
#
try:
#PrintMsg(" \n\tChecking raster statistics", 0)
for propType in ['MINIMUM', 'MAXIMUM', 'MEAN', 'STD']:
statVal = arcpy.GetRasterProperties_management (outputRaster, propType).getOutput(0)
#PrintMsg("\t\t" + propType + ": " + statVal, 1)
return True
except:
return False
## ===================================================================================
def UpdateMetadata(outputWS, target, surveyInfo, iRaster):
#
# Used for non-ISO metadata
#
# Search words: xxSTATExx, xxSURVEYSxx, xxTODAYxx, xxFYxx
#
try:
PrintMsg("\tUpdating metadata...")
arcpy.SetProgressor("default", "Updating metadata")
# Set metadata translator file
dInstall = arcpy.GetInstallInfo()
installPath = dInstall["InstallDir"]
prod = r"Metadata/Translator/ARCGIS2FGDC.xml"
mdTranslator = os.path.join(installPath, prod)
# Define input and output XML files
mdImport = os.path.join(env.scratchFolder, "xxImport.xml") # the metadata xml that will provide the updated info
xmlPath = os.path.dirname(sys.argv[0])
mdExport = os.path.join(xmlPath, "gSSURGO_MapunitRaster.xml") # original template metadata in script directory
# Cleanup output XML files from previous runs
if os.path.isfile(mdImport):
os.remove(mdImport)
# Get replacement value for the search words
#
stDict = StateNames()
st = os.path.basename(outputWS)[8:-4]
if st in stDict:
# Get state name from the geodatabase
mdState = stDict[st]
else:
# Leave state name blank. In the future it would be nice to include a tile name when appropriate
mdState = ""
# Set date strings for metadata, based upon today's date
#
d = datetime.date.today()
today = str(d.isoformat().replace("-",""))
# Set fiscal year according to the current month. If run during January thru September,
# set it to the current calendar year. Otherwise set it to the next calendar year.
#
if d.month > 9:
fy = "FY" + str(d.year + 1)
else:
fy = "FY" + str(d.year)
# Convert XML to tree format
tree = ET.parse(mdExport)
root = tree.getroot()
# new citeInfo has title.text, edition.text, serinfo/issue.text
citeInfo = root.findall('idinfo/citation/citeinfo/')
if not citeInfo is None:
# Process citation elements
# title, edition, issue
#
for child in citeInfo:
#PrintMsg("\t\t" + str(child.tag), 0)
if child.tag == "title":
if child.text.find('xxSTATExx') >= 0:
child.text = child.text.replace('xxSTATExx', mdState)
elif mdState != "":
child.text = child.text + " - " + mdState
elif child.tag == "edition":
if child.text == 'xxFYxx':
child.text = fy
elif child.tag == "serinfo":
for subchild in child.iter('issue'):
if subchild.text == "xxFYxx":
subchild.text = fy
# Update place keywords
ePlace = root.find('idinfo/keywords/place')
if not ePlace is None:
#PrintMsg("\t\tplace keywords", 0)
for child in ePlace.iter('placekey'):
if child.text == "xxSTATExx":
child.text = mdState
elif child.text == "xxSURVEYSxx":
child.text = surveyInfo
# Update credits
eIdInfo = root.find('idinfo')
if not eIdInfo is None:
#PrintMsg("\t\tcredits", 0)
for child in eIdInfo.iter('datacred'):
sCreds = child.text
if sCreds.find("xxSTATExx") >= 0:
#PrintMsg("\t\tcredits " + mdState, 0)
child.text = child.text.replace("xxSTATExx", mdState)
if sCreds.find("xxFYxx") >= 0:
#PrintMsg("\t\tcredits " + fy, 0)
child.text = child.text.replace("xxFYxx", fy)
if sCreds.find("xxTODAYxx") >= 0:
#PrintMsg("\t\tcredits " + today, 0)
child.text = child.text.replace("xxTODAYxx", today)
idPurpose = root.find('idinfo/descript/purpose')
if not idPurpose is None:
ip = idPurpose.text
if ip.find("xxFYxx") >= 0:
idPurpose.text = ip.replace("xxFYxx", fy)
#PrintMsg("\t\tpurpose", 0)
# create new xml file which will be imported, thereby updating the table's metadata
tree.write(mdImport, encoding="utf-8", xml_declaration=None, default_namespace=None, method="xml")
# import updated metadata to the geodatabase table
# Using three different methods with the same XML file works for ArcGIS 10.1
#
#PrintMsg("\t\tApplying metadata translators...")
arcpy.MetadataImporter_conversion (mdImport, target)
arcpy.ImportMetadata_conversion(mdImport, "FROM_FGDC", target, "DISABLED")
# delete the temporary xml metadata file
if os.path.isfile(mdImport):
os.remove(mdImport)
pass
# delete metadata tool logs
logFolder = os.path.dirname(env.scratchFolder)
logFile = os.path.basename(mdImport).split(".")[0] + "*"
currentWS = env.workspace
env.workspace = logFolder
logList = arcpy.ListFiles(logFile)
for lg in logList:
arcpy.Delete_management(lg)
env.workspace = currentWS
return True
except:
errorMsg()
False
## ===================================================================================
def CheckSpatialReference(muPolygon):
# Make sure that the coordinate system is projected and units are meters
try:
desc = arcpy.Describe(muPolygon)
inputSR = desc.spatialReference
if inputSR.type.upper() == "PROJECTED":
if inputSR.linearUnitName.upper() == "METER":
env.outputCoordinateSystem = inputSR
return True
else:
raise MyError, os.path.basename(theGDB) + ": Input soil polygon layer does not have a valid coordinate system for gSSURGO"
else:
raise MyError, os.path.basename(theGDB) + ": Input soil polygon layer must have a projected coordinate system"
except MyError, e:
# Example: raise MyError, "This is an error message"
PrintMsg(str(e), 2)
return False
except:
errorMsg()
return False
## ===================================================================================
def ConvertToRaster(muPolygon, rasterName):
# main function used for raster conversion
try:
#
# Set geoprocessing environment
#
env.overwriteOutput = True
arcpy.env.compression = "LZ77"
env.tileSize = "128 128"
gdb = os.path.dirname(muPolygon)
outputRaster = os.path.join(gdb, rasterName)
iRaster = 10 # output resolution is 10 meters
# Make sure that the env.scratchGDB is NOT Default.gdb. This causes problems for
# some unknown reason.
if (os.path.basename(env.scratchGDB).lower() == "default.gdb") or \
(os.path.basename(env.scratchWorkspace).lower() == "default.gdb") or \
(os.path.basename(env.scratchGDB).lower() == gdb):
raise MyError, "Invalid scratch workspace setting (" + env.scratchWorkspace + ")"
# Create an ArcInfo workspace under the scratchFolder. Trying to prevent
# 99999 errors for PolygonToRaster on very large databases
#
aiWorkspace = env.scratchFolder
if not arcpy.Exists(os.path.join(aiWorkspace, "info")):
#PrintMsg(" \nCreating ArcInfo workspace (" + os.path.basename(aiWorkspace) + ") in: " + os.path.dirname(aiWorkspace), 1)
arcpy.CreateArcInfoWorkspace_management(os.path.dirname(aiWorkspace), os.path.basename(aiWorkspace))
# turn off automatic Pyramid creation and Statistics calculation
env.rasterStatistics = "NONE"
env.pyramid = "PYRAMIDS 0"
env.workspace = gdb
# Need to check for dashes or spaces in folder names or leading numbers in database or raster names
desc = arcpy.Describe(muPolygon)
if not arcpy.Exists(muPolygon):
raise MyError, "Could not find input featureclass: " + muPolygon
# Check input layer's coordinate system to make sure horizontal units are meters
# set the output coordinate system for the raster (neccessary for PolygonToRaster)
if CheckSpatialReference(muPolygon) == False:
return False
# Sometimes it helps to compact large databases before raster conversion
#arcpy.SetProgressorLabel("Compacting database prior to rasterization...")
#arcpy.Compact_management(gdb)
# For rasters named using an attribute value, some attribute characters can result in
# 'illegal' names.
outputRaster = outputRaster.replace("-", "")
if arcpy.Exists(outputRaster):
arcpy.Delete_management(outputRaster)
time.sleep(1)
if arcpy.Exists(outputRaster):
err = "Output raster (" + os.path.basename(outputRaster) + ") already exists"
raise MyError, err
#start = time.time() # start clock to measure total processing time
#begin = time.time() # start clock to measure set up time
time.sleep(2)
PrintMsg(" \nBeginning raster conversion process", 0)
# Create Lookup table for storing MUKEY values and their integer counterparts
#
lu = os.path.join(env.scratchGDB, "Lookup")
if arcpy.Exists(lu):
arcpy.Delete_management(lu)
# The Lookup table contains both MUKEY and its integer counterpart (CELLVALUE).
# Using the joined lookup table creates a raster with CellValues that are the
# same as MUKEY (but integer). This will maintain correct MUKEY values
# during a moscaic or clip.
#
arcpy.CreateTable_management(os.path.dirname(lu), os.path.basename(lu))
arcpy.AddField_management(lu, "CELLVALUE", "LONG")
arcpy.AddField_management(lu, "mukey", "TEXT", "#", "#", "30")
# Create list of areasymbols present in the MUPOLYGON featureclass
# Having problems processing CONUS list of MUKEYs. Python seems to be running out of memory,
# but I don't see high usage in Windows Task Manager
#
# PrintMsg(" \nscratchFolder set to: " + env.scratchFolder, 1)
# Create list of MUKEY values from the MUPOLYGON featureclass
#
# Create a list of map unit keys present in the MUPOLYGON featureclass
#
PrintMsg("\tGetting list of mukeys from input soil polygon layer...", 0)
arcpy.SetProgressor("default", "Getting inventory of map units...")
tmpPolys = "SoilPolygons"
sqlClause = ("DISTINCT", None)
with arcpy.da.SearchCursor(muPolygon, ["mukey"], "", "", "", sql_clause=sqlClause) as srcCursor:
# Create a unique, sorted list of MUKEY values in the MUPOLYGON featureclass
mukeyList = [row[0] for row in srcCursor]
mukeyList.sort()
if len(mukeyList) == 0:
raise MyError, "Failed to get MUKEY values from " + muPolygon
muCnt = len(mukeyList)
# Load MUKEY values into Lookup table
#
#PrintMsg("\tSaving " + Number_Format(muCnt, 0, True) + " MUKEY values for " + Number_Format(polyCnt, 0, True) + " polygons" , 0)
arcpy.SetProgressorLabel("Creating lookup table...")
with arcpy.da.InsertCursor(lu, ("CELLVALUE", "mukey") ) as inCursor:
for mukey in mukeyList:
rec = mukey, mukey
inCursor.insertRow(rec)
# Add MUKEY attribute index to Lookup table
arcpy.AddIndex_management(lu, ["mukey"], "Indx_LU")
#
# End of Lookup table code
# Match NLCD raster (snapraster)
cdlRasters = arcpy.ListRasters("wsCDL*")
if len(cdlRasters) == 0:
raise MyError, "Required Cropland Data Layer rasters missing from " + gdb
else:
cdlRaster = cdlRasters[-1]
env.snapRaster = cdlRaster
#env.extent = cdlRaster
# Raster conversion process...
#
PrintMsg(" \nConverting featureclass " + os.path.basename(muPolygon) + " to raster (" + str(iRaster) + " meter)", 0)
tmpPolys = "poly_tmp"
arcpy.MakeFeatureLayer_management (muPolygon, tmpPolys)
arcpy.AddJoin_management (tmpPolys, "mukey", lu, "mukey", "KEEP_ALL")
arcpy.SetProgressor("default", "Running PolygonToRaster conversion...")
# Need to make sure that the join was successful
time.sleep(1)
rasterFields = arcpy.ListFields(tmpPolys)
rasterFieldNames = list()
for rFld in rasterFields:
rasterFieldNames.append(rFld.name.upper())
if not "LOOKUP.CELLVALUE" in rasterFieldNames:
raise MyError, "Join failed for Lookup table (CELLVALUE)"
if (os.path.basename(muPolygon).upper() + ".MUKEY") in rasterFieldNames:
#raise MyError, "Join failed for Lookup table (SPATIALVERSION)"
priorityFld = os.path.basename(muPolygon) + ".MUKEY"
else:
priorityFld = os.path.basename(muPolygon) + ".CELLVALUE"
#ListEnv()
arcpy.PolygonToRaster_conversion(tmpPolys, "Lookup.CELLVALUE", outputRaster, "MAXIMUM_COMBINED_AREA", "", iRaster) # No priority field for single raster
# immediately delete temporary polygon layer to free up memory for the rest of the process
time.sleep(1)
arcpy.Delete_management(tmpPolys)
# End of single raster process
# Now finish up the single temporary raster
#
PrintMsg(" \nFinalizing raster conversion process:", 0)
# Reset the stopwatch for the raster post-processing
#begin = time.time()
# Remove lookup table
if arcpy.Exists(lu):
arcpy.Delete_management(lu)
# ****************************************************
# Build pyramids and statistics
# ****************************************************
if arcpy.Exists(outputRaster):
time.sleep(1)
arcpy.SetProgressor("default", "Calculating raster statistics...")
PrintMsg("\tCalculating raster statistics...", 0)
env.pyramid = "PYRAMIDS -1 NEAREST"
arcpy.env.rasterStatistics = 'STATISTICS 100 100'
arcpy.CalculateStatistics_management (outputRaster, 1, 1, "", "OVERWRITE" )
if CheckStatistics(outputRaster) == False:
# For some reason the BuildPyramidsandStatistics command failed to build statistics for this raster.
#
# Try using CalculateStatistics while setting an AOI
PrintMsg("\tInitial attempt to create statistics failed, trying another method...", 0)
time.sleep(3)
if arcpy.Exists(os.path.join(gdb, "SAPOLYGON")):
# Try running CalculateStatistics with an AOI to limit the area that is processed
# if we have to use SAPOLYGON as an AOI, this will be REALLY slow
#arcpy.CalculateStatistics_management (outputRaster, 1, 1, "", "OVERWRITE", os.path.join(outputWS, "SAPOLYGON") )
arcpy.CalculateStatistics_management (outputRaster, 1, 1, "", "OVERWRITE" )
if CheckStatistics(outputRaster) == False:
time.sleep(3)
PrintMsg("\tFailed in both attempts to create statistics for raster layer", 1)
arcpy.SetProgressor("default", "Building pyramids...")
PrintMsg("\tBuilding pyramids...", 0)
arcpy.BuildPyramids_management(outputRaster, "-1", "NONE", "NEAREST", "DEFAULT", "", "SKIP_EXISTING")
# ****************************************************
# Add MUKEY to final raster
# ****************************************************
# Build attribute table for final output raster. Sometimes it fails to automatically build.
PrintMsg("\tBuilding raster attribute table and updating MUKEY values", )
arcpy.SetProgressor("default", "Building raster attrribute table...")
arcpy.BuildRasterAttributeTable_management(outputRaster)
# Add MUKEY values to final mapunit raster
#
arcpy.SetProgressor("default", "Adding MUKEY attribute to raster...")
arcpy.AddField_management(outputRaster, "MUKEY", "TEXT", "#", "#", "30")
with arcpy.da.UpdateCursor(outputRaster, ["VALUE", "MUKEY"]) as cur:
for rec in cur:
rec[1] = rec[0]
cur.updateRow(rec)
# Add attribute index (MUKEY) for raster
arcpy.AddIndex_management(outputRaster, ["mukey"], "Indx_RasterMukey")
else:
err = "Missing output raster (" + outputRaster + ")"
raise MyError, err
# Compare list of original mukeys with the list of raster mukeys
# Report discrepancies. These are usually thin polygons along survey boundaries,
# added to facilitate a line-join.
#
arcpy.SetProgressor("default", "Looking for missing map units...")
rCnt = int(arcpy.GetRasterProperties_management (outputRaster, "UNIQUEVALUECOUNT").getOutput(0))
if rCnt <> muCnt:
missingList = list()
rList = list()
# Create list of raster mukeys...
with arcpy.da.SearchCursor(outputRaster, ("MUKEY",)) as rcur:
for rec in rcur:
mukey = rec[0]
rList.append(mukey)
missingList = list(set(mukeyList) - set(rList))
queryList = list()
for mukey in missingList:
queryList.append("'" + mukey + "'")
if len(queryList) > 0:
PrintMsg("\tDiscrepancy in mapunit count for new raster", 1)
#PrintMsg("\t\tInput polygon mapunits: " + Number_Format(muCnt, 0, True), 0)
#PrintMsg("\t\tOutput raster mapunits: " + Number_Format(rCnt, 0, True), 0)
PrintMsg("The following MUKEY values were present in the original MUPOLYGON featureclass, ", 1)
PrintMsg("but not in the raster", 1)
PrintMsg("\t\tMUKEY IN (" + ", ".join(queryList) + ") \n ", 0)
# Update metadata file for the geodatabase
#
# Query the output SACATALOG table to get list of surveys that were exported to the gSSURGO
#
#saTbl = os.path.join(theGDB, "sacatalog")
#expList = list()
#with arcpy.da.SearchCursor(saTbl, ("AREASYMBOL", "SAVEREST")) as srcCursor:
# for rec in srcCursor:
# expList.append(rec[0] + " (" + str(rec[1]).split()[0] + ")")
#surveyInfo = ", ".join(expList)
surveyInfo = "" # could get this from SDA
#time.sleep(2)
arcpy.SetProgressorLabel("Updating metadata NOT...")
#bMetaData = UpdateMetadata(outputWS, outputRaster, surveyInfo, iRaster)
del outputRaster
del muPolygon
arcpy.CheckInExtension("Spatial")
return True
except MyError, e:
# Example: raise MyError, "This is an error message"
PrintMsg(str(e), 2)
arcpy.CheckInExtension("Spatial")
return False
except MemoryError:
raise MyError, "Not enough memory to process. Try running again with the 'Use tiles' option"
except:
errorMsg()
arcpy.CheckInExtension("Spatial")
return False
## ===================================================================================
## ===================================================================================
## MAIN
## ===================================================================================
# Import system modules
import sys, string, os, arcpy, locale, traceback, math, time, datetime, shutil
import xml.etree.cElementTree as ET
from arcpy import env
# Create the Geoprocessor object
try:
if __name__ == "__main__":
# get parameters
muPolygon = arcpy.GetParameterAsText(0) # required gSSURGO polygon layer
rasterName = arcpy.GetParameterAsText(1) # required name for output gdb raster
env.overwriteOutput= True
iRaster = 10
# Get Spatial Analyst extension
if arcpy.CheckExtension("Spatial") == "Available":
# try to find the name of the tile from the geodatabase name
# set the name of the output raster using the tilename and cell resolution
from arcpy.sa import *
arcpy.CheckOutExtension("Spatial")
else:
raise MyError, "Required Spatial Analyst extension is not available"
# Call function that does all of the work
bRaster = ConvertToRaster(muPolygon, theSnapRaster, iRaster)
arcpy.CheckInExtension("Spatial")
except MyError, e:
# Example: raise MyError, "This is an error message"
PrintMsg(str(e), 2)
except:
errorMsg()
| null | null | null | null | [
0
] |
1,326 | 1b58d294f02ce85bf19da03f94100af87408081d | import stockquote
import time
import datetime
from datetime import date
from connection import db
start_date='20100101'
def prices(symbol):
"""
Loads the prices from the start date for the given symbol
Only new quotes are downloaded.
"""
to = date.today().strftime("%Y%m%d")
c = db.cursor()
c.execute("SELECT DATE_ADD(max(date), INTERVAL 1 DAY) FROM quote where symbol = %s",
(symbol))
(_from, ) = c.fetchone()
if _from == date.today():
print "Skipping %s" % symbol
return
print "Downloading %s" % symbol
if _from is None:
_from = start_date
else:
_from = _from.strftime("%Y%m%d")
prices = stockquote.get_historical_prices(symbol, _from, to)
headers = prices[0]
try:
close = get_idx(headers, 'Close')
date_ = get_idx(headers, 'Date')
open = get_idx(headers, 'Open')
high = get_idx(headers, 'High')
low = get_idx(headers, 'Low')
quotes = prices[1:]
for l in quotes:
#print "%s %s" % (l[date_], l[close])
try:
insert(symbol, l[date_], l[close], l[high], l[low], l[open])
except Exception, e:
print "Could not insert %s:%s" % (symbol, e)
print "Inserted %s new quotes for %s" % (len(quotes), symbol)
except Exception, e:
print "Could not download %s" % symbol
print e
def get_idx(headers, query):
for index, item in enumerate(headers):
if (item == query):
return index
#print("Could not find requested header [%s]" % query)
#print("Available ones are %s" % headers)
raise "Eror ind downloading quote"
def insert(symbol, date, close, high, low, open):
c = db.cursor()
c.execute("INSERT INTO quote (date, symbol, close, high, low, open) VALUES (%s, %s, %s, %s, %s, %s)",
(date, symbol, close, high, low, open))
| null | null | null | null | [
0
] |
1,327 | 5d0a45b93bd7972333f5574188c65484c065e9cf | '''
Created on June 10 2013
@author: Eugene Shim
This unit test suite is designed to test the unitTestParser module.
At the moment, the functions of that module are too simple to really
unit test effectively
'''
#Standard library modules
import unittest
#the module being tested
import unitTestParser
class TestResultsTestSuite(unittest.TestCase):
#check the verbosity
def test_results
| null | null | null | null | [
0
] |
1,328 | 95ea811d38c314f5f19294500e16bae3d00d4fff | <mask token>
def merge_bitplane_to_image(bitplane, arr, color):
arr = bp.to_image(arr)
img = np.zeros(arr.shape)
img[:, :, color] = bitplane
return img
<mask token>
| <mask token>
if len(sys.argv) < 4:
print('USAGE: {0} <PATH> <COLOR> <BIT>'.format(sys.argv[0]))
print(' PATH: image path')
print(' COLOR: GRAY=-1, RED=0, GREEN=1, BLUE=2')
print(' BIT : 0~7 (0:MSB, 7:LSB)')
exit(1)
<mask token>
def merge_bitplane_to_image(bitplane, arr, color):
arr = bp.to_image(arr)
img = np.zeros(arr.shape)
img[:, :, color] = bitplane
return img
<mask token>
if len(arr.shape) < 2 or len(arr.shape) > 3:
print('Unsupported shape of image')
exit(1)
<mask token>
if COLOR != -1 and len(arr.shape) == 4:
arr = merge_bitplane_to_image(bitplane, arr, COLOR)
else:
arr = bitplane
Image.fromarray(np.uint8(arr)).show()
| <mask token>
if len(sys.argv) < 4:
print('USAGE: {0} <PATH> <COLOR> <BIT>'.format(sys.argv[0]))
print(' PATH: image path')
print(' COLOR: GRAY=-1, RED=0, GREEN=1, BLUE=2')
print(' BIT : 0~7 (0:MSB, 7:LSB)')
exit(1)
PATH = sys.argv[1]
COLOR = int(sys.argv[2])
BIT = int(sys.argv[3])
def merge_bitplane_to_image(bitplane, arr, color):
arr = bp.to_image(arr)
img = np.zeros(arr.shape)
img[:, :, color] = bitplane
return img
arr = bp.read_image_as_numpy(PATH)
if len(arr.shape) < 2 or len(arr.shape) > 3:
print('Unsupported shape of image')
exit(1)
arr = bp.to_binary(arr)
bitplane = bp.extract_bitplane(arr, COLOR, BIT)
bitplane[bitplane > 0] = 255
if COLOR != -1 and len(arr.shape) == 4:
arr = merge_bitplane_to_image(bitplane, arr, COLOR)
else:
arr = bitplane
Image.fromarray(np.uint8(arr)).show()
| import sys
import numpy as np
import bpcs as bp
from PIL import Image
if len(sys.argv) < 4:
print('USAGE: {0} <PATH> <COLOR> <BIT>'.format(sys.argv[0]))
print(' PATH: image path')
print(' COLOR: GRAY=-1, RED=0, GREEN=1, BLUE=2')
print(' BIT : 0~7 (0:MSB, 7:LSB)')
exit(1)
PATH = sys.argv[1]
COLOR = int(sys.argv[2])
BIT = int(sys.argv[3])
def merge_bitplane_to_image(bitplane, arr, color):
arr = bp.to_image(arr)
img = np.zeros(arr.shape)
img[:, :, color] = bitplane
return img
arr = bp.read_image_as_numpy(PATH)
if len(arr.shape) < 2 or len(arr.shape) > 3:
print('Unsupported shape of image')
exit(1)
arr = bp.to_binary(arr)
bitplane = bp.extract_bitplane(arr, COLOR, BIT)
bitplane[bitplane > 0] = 255
if COLOR != -1 and len(arr.shape) == 4:
arr = merge_bitplane_to_image(bitplane, arr, COLOR)
else:
arr = bitplane
Image.fromarray(np.uint8(arr)).show()
| import sys
import numpy as np
import bpcs as bp
from PIL import Image
if len(sys.argv)<4:
print("USAGE: {0} <PATH> <COLOR> <BIT>".format(sys.argv[0]))
print(" PATH: image path")
print(" COLOR: GRAY=-1, RED=0, GREEN=1, BLUE=2")
print(" BIT : 0~7 (0:MSB, 7:LSB)")
exit(1)
PATH = sys.argv[1]
COLOR = int(sys.argv[2])
BIT = int(sys.argv[3])
def merge_bitplane_to_image(bitplane, arr, color):
arr = bp.to_image(arr)
img = np.zeros(arr.shape)
img[:,:,color] = bitplane
return img
arr = bp.read_image_as_numpy(PATH)
if len(arr.shape)<2 or len(arr.shape)>3:
print("Unsupported shape of image")
exit(1)
arr = bp.to_binary(arr) # arr.shape = (h, w, 3(color), 8(byte)) or (h, w, 8(byte))
# arr = bp.to_image(arr) # arr.shape = (h, w, 3) or (h, w)
bitplane = bp.extract_bitplane(arr, COLOR, BIT)
bitplane[bitplane>0] = 255
if COLOR!=-1 and len(arr.shape)==4:
arr = merge_bitplane_to_image(bitplane, arr, COLOR)
else:
arr = bitplane
Image.fromarray(np.uint8(arr)).show() # show image
# Image.fromarray(np.uint8(arr)).save("test.png") # save image
| [
1,
2,
3,
4,
5
] |
1,329 | 60411e922bfec8f98028f959a370f954eef5437e | import re
import itertools
import setpath
import functions
import lib.jopts as jopts
from operator import itemgetter
import random
__docformat__ = 'reStructuredText en'
re_params=re.compile('(\w*):(.*)')
def consumer(func):
"""A decorator, advances func to its first yield point when called.
"""
from functools import wraps
@wraps(func)
def wrapper(*args,**kw):
gen = func(*args, **kw)
gen.next()
return gen
return wrapper
class freqitemsets:
"""
.. function:: freqitemsets(datacol, [threshold, noautothres, stats, maxlen]) -> [itemset_id:int, itemset_length:int, itemset_frequency:int, item:text]
Calculates frequent itemsets on a given column (datacol). The algorithm is tuned for the
case when we have many different items (in the order of millions), many input itemsets, but
small itemset length (10-20).
Returned table schema:
:itemset_id: Automatic itemset id
:itemset_length: Length of itemset
:itemset_frequency: How many times an itemset has been found
:item: Itemset's item value
Parameters:
:datacol:
Column on which to calculate frequent itemsets
:threshold: Default is 2
How many times an freq. itemset must appear for it to appear in the results
:noautothres: 1/0 (Default is 0)
Do not calculate the threshold automatically
:stats: 1/0 (Default is 0)
Return frequent itemset statistics
:maxlen: NUMBER (Default is no limit at all)
Maximum itemset length to search
Examples:
>>> table1('''
... 'car wood bike' 'first group'
... 'car car wood' 'first group'
... 'car wood' 'first group'
... 'car wood ice' 'first group'
... 'ice' 'second group'
... 'car ice' 'second group'
... 'car cream toy' 'second group'
... 'icecream ice car toy' 'second group'
... ''')
>>> sql("select b,freqitemsets(a, 'threshold:2', 'noautothres:1', 'maxlen:2') from table1 group by b")
b | itemset_id | itemset_length | itemset_frequency | item
---------------------------------------------------------------------
first group | 1 | 1 | 4 | wood
first group | 2 | 1 | 4 | car
first group | 3 | 2 | 4 | car
first group | 3 | 2 | 4 | wood
second group | 1 | 1 | 3 | ice
second group | 2 | 1 | 3 | car
second group | 3 | 1 | 2 | toy
second group | 4 | 2 | 2 | car
second group | 4 | 2 | 2 | ice
second group | 5 | 2 | 2 | car
second group | 5 | 2 | 2 | toy
>>> sql("select b,freqitemsets(a, 'stats:1') from table1 group by b")
b | MaxTransactionLength | CombinationCount | PassedTransactions | ValidKeywords
-------------------------------------------------------------------------------------------
first group | 3 | 2 | 3 | 2
first group | 3 | 1 | 1 | 2
first group | 3 | 0 | 0 | 0
second group | 4 | 3 | 3 | 3
second group | 4 | 0 | 3 | 0
"""
registered=True
multiset=True
def __init__(self):
self.threshold=2
self.startingthreshold=2
self.autothres=1
self.compress=0
self.initstatic=False
self.input={}
self.maxlength=0
self.kwcode={}
self.codekw={}
self.maxkwcode=0
self.overthres={}
self.belowthres={}
self.passedkw={}
self.init=True
self.itemset_id=0
self.maxlen=None
self.stats=False
def initargs(self, args):
self.init=False
for i in xrange(1, len(args)):
v=re_params.match(args[i])
if v is not None and v.groups()[0]!='' and v.groups()[1]!='' and i>0:
v=v.groups()
if v[0]=='threshold':
try:
self.threshold=int(v[1])
self.startingthreshold=self.threshold
except KeyboardInterrupt:
raise
except:
raise functions.OperatorError("FreqItemsets",'No integer value given for threshold')
if v[0]=='noautothres':
self.autothres=0
if v[0]=='compress':
self.compress=1
if v[0]=='maxlen':
self.maxlen=int(v[1])
if v[0]=='stats':
self.stats=True
def demultiplex(self, data):
iterable=None
iterpos=-1
for i in xrange(len(data)):
if hasattr(data[i],'__iter__')==True:
iterable=data[i]
iterpos=i
break
if iterpos==-1:
yield list(data)
else:
pre=list(data[0:iterpos])
post=list(data[iterpos+1:])
for i in iterable:
if hasattr(i,'__iter__')==False:
yield pre+[i]+post
else:
yield pre+list(i)+post
def insertcombfreq(self, comb, freq):
if comb in self.overthres:
self.overthres[comb]+=freq
else:
if comb in self.belowthres:
self.belowthres[comb]+=freq
else:
self.belowthres[comb]=freq
if self.belowthres[comb]>=self.threshold:
self.overthres[comb]=self.belowthres[comb]
del(self.belowthres[comb])
for k in comb:
if self.compress==0:
self.passedkw[k]=True
elif not k in self.passedkw:
self.passedkw[k]=self.overthres[comb]
else:
self.passedkw[k]+=self.overthres[comb]
def insertitemset(self, itemset):
if itemset not in self.input:
self.input[itemset]=1
else:
self.input[itemset]+=1
def cleanitemsets(self, minlength):
newitemsets={}
for k,v in self.input.iteritems():
itemset=tuple(i for i in k if i in self.passedkw)
if self.compress==1:
esoteric_itemset=tuple(i for i in itemset if self.passedkw[i]==v)
if len(esoteric_itemset)>0:
if len(itemset)>=minlength:
self.overthres[itemset]=v
itemset=tuple(i for i in itemset if self.passedkw[i]!=v)
if len(itemset)>=minlength:
if itemset not in newitemsets:
newitemsets[itemset]=v
else:
newitemsets[itemset]+=v
self.input=newitemsets
def step(self, *args):
if self.init==True:
self.initargs(args)
if len(args[0])==0:
return
itms=sorted(set(args[0].split(' ')))
itms=[x for x in itms if x!='']
li=len(itms)
if li>0:
if li>self.maxlength:
self.maxlength=li
inputkws=[]
for kw in itms:
if len(kw)==0:
print itms, args[0], len(args[0]), li
if kw not in self.kwcode:
self.kwcode[kw]=self.maxkwcode
self.codekw[self.maxkwcode]=kw
inputkws.append(self.maxkwcode)
self.insertcombfreq( (self.maxkwcode,),1 )
self.maxkwcode+=1
else:
itm=self.kwcode[kw]
self.insertcombfreq( (itm,),1 )
inputkws.append(itm)
if len(inputkws)>1:
self.insertitemset(tuple(inputkws))
def final(self):
if not self.stats:
yield ('itemset_id', 'itemset_length', 'itemset_frequency', 'item')
else:
yield ('MaxTransactionLength', 'CombinationCount', 'PassedTransactions', 'ValidKeywords')
splist=[{},{}]
del(self.kwcode)
splist[1]=self.overthres
if self.stats:
yield [self.maxlength, len(splist[1]), len(self.input), len(self.passedkw)]
if not self.stats:
for its,v in sorted(splist[1].items(), key=itemgetter(1),reverse=True):
self.itemset_id+=1
for i in self.demultiplex( (self.itemset_id, len([self.codekw[i] for i in its]), v, [self.codekw[i] for i in its]) ):
yield i
if self.maxlen==None:
self.maxlen=self.maxlength
for l in xrange(2, min(self.maxlength+1, self.maxlen+1)):
splist.append({})
self.belowthres={}
self.overthres={}
prevl=l-1
# Autothresholding
if self.autothres==1:
if len(self.input)==0 or len(self.passedkw)==0:
break
else:
self.threshold=self.startingthreshold + int(len(self.passedkw)/len(self.input))
self.cleanitemsets(l)
self.passedkw={}
prevsplist = splist[prevl]
icombs = itertools.combinations
insertcomb = self.insertcombfreq
for k,v in self.input.iteritems():
for k in icombs(k,l):
insertit=True
for i1 in icombs(k, prevl):
if i1 not in prevsplist:
insertit=False
break
if insertit:
insertcomb( k,v )
splist[l-1]={}
splist[l]=self.overthres
if self.stats:
yield [self.maxlength, len(splist[l]), len(self.input), len(self.passedkw)]
if not self.stats:
for its,v in sorted(splist[l].items(), key=itemgetter(1),reverse=True):
self.itemset_id+=1
for i in self.demultiplex( (self.itemset_id, len([self.codekw[i] for i in its]), v, [self.codekw[i] for i in its]) ):
yield i
del(self.overthres)
del(self.belowthres)
del(self.passedkw)
del(self.input)
del(self.codekw)
del(splist)
class sampledistvals:
"""
.. function:: sampledistvals(sample_size, C1, C2, C3) -> [C1, C2, C3]
Sampledistvals returns sample_size distinct values for each of the input C1..Cn columns.
>>> table1('''
... test1 2 3
... test1 2 3
... test2 4 2
... test4 2 t
... ''')
>>> sql("select sampledistvals(3, a, b, c) from table1")
C1 | C2 | C3
---------------------------------------------
["test1","test2","test4"] | [2,4] | [2,3,"t"]
"""
registered=True
def __init__(self):
self.vals=None
self.lenargs = -1
self.init=True
def step(self, *args):
if self.init:
self.lenargs = len(args)
self.vals = a=[set() for i in xrange(self.lenargs-1)]
self.init = False
for i in xrange(1, self.lenargs):
if len(self.vals[i-1])<args[0] and args[i] not in self.vals[i-1]:
self.vals[i-1].add(args[i])
def final(self):
yield tuple(['C'+str(i) for i in xrange(1, self.lenargs)] )
yield [jopts.toj(list(i)) for i in self.vals]
class sample:
"""
.. function:: sample(sample_size, C1, C2, C3)
Sample returns a random sample_size set of rows.
>>> table1('''
... test1 2 3
... test1 2 3
... test2 4 2
... test4 2 t
... ''')
>>> sql("select sample(2, a, b, c) from table1") # doctest: +ELLIPSIS
C1 | C2 | C3
---------------
...
"""
registered=True
def __init__(self):
self.samplelist = []
self.index = 0
def step(self, *args):
sample_count = args[0]
# Generate the reservoir
if self.index < sample_count:
self.samplelist.append(args[1:])
else:
r = random.randint(0, self.index)
if r < sample_count:
self.samplelist[r] = args[1:]
self.index += 1
def final(self):
if len(self.samplelist) == []:
yield tuple(['C1'])
else:
yield tuple(['C'+str(i) for i in xrange(1, len(self.samplelist[0]) + 1)] )
for r in self.samplelist:
yield list(r)
if not ('.' in __name__):
"""
This is needed to be able to test the function, put it at the end of every
new function you create
"""
import sys
import setpath
from functions import *
testfunction()
if __name__ == "__main__":
reload(sys)
sys.setdefaultencoding('utf-8')
import doctest
doctest.testmod()
| null | null | null | null | [
0
] |
1,330 | df6fa0409500f97e5afde8f97796d6ed0cc4d746 | <mask token>
def fcn_model_fn(features, labels, mode):
L2 = tf.contrib.layers.l2_regularizer(scale=0.1)
trainable = False
if mode == tf.estimator.ModeKeys.TRAIN:
trainable = True
seed = 2019
with tf.name_scope('vgg16_pretrained'):
x = tf.layers.conv2d(features, 64, (3, 3), activation='relu',
padding='same', name='conv1_1', kernel_regularizer=L2,
trainable=trainable)
x = tf.layers.dropout(x, rate=0.4, seed=seed, training=trainable,
name='dp1_1')
x = tf.layers.conv2d(x, 64, (3, 3), activation='relu', padding=
'same', name='conv1_2', kernel_regularizer=L2, trainable=trainable)
x = tf.layers.dropout(x, rate=0.4, seed=seed, training=trainable,
name='dp1_2')
x = tf.layers.max_pooling2d(x, (2, 2), strides=(2, 2), name='pool1')
x = tf.layers.conv2d(x, 128, (3, 3), activation='relu', padding=
'same', name='conv2_1', kernel_regularizer=L2, trainable=trainable)
x = tf.layers.dropout(x, rate=0.4, seed=seed, training=trainable,
name='dp2_1')
x = tf.layers.conv2d(x, 128, (3, 3), activation='relu', padding=
'same', name='conv2-2', kernel_regularizer=L2, trainable=trainable)
x = tf.layers.dropout(x, rate=0.4, seed=seed, training=trainable,
name='dp2_2')
x = tf.layers.max_pooling2d(x, (2, 2), strides=(2, 2), name='pool2')
x = tf.layers.conv2d(x, 256, (3, 3), activation='relu', padding=
'same', name='conv3_1', kernel_regularizer=L2, trainable=trainable)
x = tf.layers.dropout(x, rate=0.4, seed=seed, training=trainable,
name='dp3_1')
x = tf.layers.conv2d(x, 256, (3, 3), activation='relu', padding=
'same', name='conv3_2', kernel_regularizer=L2, trainable=trainable)
x = tf.layers.dropout(x, rate=0.4, seed=seed, training=trainable,
name='dp3_2')
x = tf.layers.conv2d(x, 256, (3, 3), activation='relu', padding=
'same', name='conv3_3', kernel_regularizer=L2, trainable=trainable)
x = tf.layers.dropout(x, rate=0.4, seed=seed, training=trainable,
name='dp3_3')
x = tf.layers.max_pooling2d(x, (2, 2), strides=(2, 2), name='pool3')
x = tf.layers.conv2d(x, 512, (3, 3), activation='relu', padding=
'same', name='conv4_1', kernel_regularizer=L2, trainable=trainable)
x = tf.layers.dropout(x, rate=0.4, seed=seed, training=trainable,
name='dp4_1')
x = tf.layers.conv2d(x, 512, (3, 3), activation='relu', padding=
'same', name='conv4_2', kernel_regularizer=L2, trainable=trainable)
x = tf.layers.dropout(x, rate=0.4, seed=seed, training=trainable,
name='dp4_2')
x = tf.layers.conv2d(x, 512, (3, 3), activation='relu', padding=
'same', name='conv4_3', kernel_regularizer=L2, trainable=trainable)
x = tf.layers.dropout(x, rate=0.4, seed=seed, training=trainable,
name='dp4_3')
x = tf.layers.max_pooling2d(x, (2, 2), strides=(2, 2), name='pool4')
x = tf.layers.conv2d(x, 512, (3, 3), activation='relu', padding=
'same', name='conv5_1', kernel_regularizer=L2, trainable=trainable)
x = tf.layers.dropout(x, rate=0.4, seed=seed, training=trainable,
name='dp5_1')
x = tf.layers.conv2d(x, 512, (3, 3), activation='relu', padding=
'same', name='conv5_2', kernel_regularizer=L2, trainable=trainable)
x = tf.layers.dropout(x, rate=0.4, seed=seed, training=trainable,
name='dp5_2')
x = tf.layers.conv2d(x, 512, (3, 3), activation='relu', padding=
'same', name='conv5_3', kernel_regularizer=L2, trainable=trainable)
x = tf.layers.dropout(x, rate=0.4, seed=seed, training=trainable,
name='dp5_3')
x = tf.layers.max_pooling2d(x, (2, 2), strides=(2, 2), name='pool5')
with tf.name_scope('deconv_layers'):
x = tf.layers.conv2d(x, 4096, (7, 7), activation='relu', padding=
'same', name='conv6_1', kernel_regularizer=L2, trainable=trainable)
x = tf.layers.dropout(x, rate=0.4, seed=seed, training=trainable,
name='dp6_1')
x = tf.layers.conv2d(x, 4096, (1, 1), activation='relu', padding=
'same', name='conv6_2', kernel_regularizer=L2, trainable=trainable)
x = tf.layers.dropout(x, rate=0.4, seed=seed, training=trainable,
name='dp6_2')
x = tf.layers.conv2d(x, 1, (1, 1), activation='relu', padding=
'same', name='conv6_3', kernel_regularizer=L2, trainable=trainable)
x = tf.layers.dropout(x, rate=0.4, seed=seed, training=trainable,
name='dp6_3')
heatmap = tf.layers.conv2d_transpose(x, 1, (64, 64), strides=(32,
32), activation='linear', padding='same', name='deconv6_1',
kernel_regularizer=L2, trainable=trainable)
logit = tf.nn.sigmoid(heatmap, name='logit')
pred = tf.to_int32(logit > 0.5)
pred = tf.squeeze(pred, axis=3)
predictions = {'classes': pred, 'probabilities': logit}
if mode == tf.estimator.ModeKeys.PREDICT:
return tf.estimator.EstimatorSpec(mode=mode, predictions=predictions)
if False:
logit_f = tf.reshape(heatmap, (-1, 1, 1, 1))
logit_f = tf.squeeze(logit_f, axis=[2, 3])
label_f = tf.reshape(labels, (-1, 1))
keep = tf.where(tf.greater_equal(labels, 0))
logit_f = tf.gather(logit_f, keep)
label_f = tf.gather(label_f, keep)
tf.assert_equal(tf.shape(label_f)[0], tf.shape(logit_f)[0])
tf.assert_non_negative(label_f)
loss = tf.losses.sigmoid_cross_entropy(multi_class_labels=label_f,
logits=logit_f)
heatmap = tf.squeeze(heatmap, axis=3)
label_f = tf.to_int32(labels > 0)
tf.assert_equal(tf.shape(label_f), tf.shape(heatmap))
tf.assert_non_negative(label_f)
loss = tf.losses.sigmoid_cross_entropy(multi_class_labels=label_f,
logits=heatmap)
if mode == tf.estimator.ModeKeys.TRAIN:
optimizer = tf.train.MomentumOptimizer(learning_rate=0.001,
momentum=0.99)
train_op = optimizer.minimize(loss=loss, global_step=tf.train.
get_global_step())
tf.summary.scalar('train_loss', loss)
return tf.estimator.EstimatorSpec(mode=mode, loss=loss, train_op=
train_op)
iou = tf.metrics.mean_iou(label_f, predictions['classes'], num_classes=
2, name='mean_iou')
eval_metric_ops = {'IoU': iou}
tensors_to_log_prob = {'probabilities': 'deconv_layers/logit'}
tensors_to_log_iou = {'mean_iou': iou}
tf.summary.scalar('mean_iou', iou[0])
logging_hook = tf.train.LoggingTensorHook(tensors=tensors_to_log_iou,
every_n_iter=200)
if mode == tf.estimator.ModeKeys.EVAL:
tf.summary.scalar('eval_loss', loss)
return tf.estimator.EstimatorSpec(mode=mode, loss=loss,
eval_metric_ops=eval_metric_ops)
<mask token>
| <mask token>
tf.logging.set_verbosity(tf.logging.INFO)
<mask token>
def fcn_model_fn(features, labels, mode):
L2 = tf.contrib.layers.l2_regularizer(scale=0.1)
trainable = False
if mode == tf.estimator.ModeKeys.TRAIN:
trainable = True
seed = 2019
with tf.name_scope('vgg16_pretrained'):
x = tf.layers.conv2d(features, 64, (3, 3), activation='relu',
padding='same', name='conv1_1', kernel_regularizer=L2,
trainable=trainable)
x = tf.layers.dropout(x, rate=0.4, seed=seed, training=trainable,
name='dp1_1')
x = tf.layers.conv2d(x, 64, (3, 3), activation='relu', padding=
'same', name='conv1_2', kernel_regularizer=L2, trainable=trainable)
x = tf.layers.dropout(x, rate=0.4, seed=seed, training=trainable,
name='dp1_2')
x = tf.layers.max_pooling2d(x, (2, 2), strides=(2, 2), name='pool1')
x = tf.layers.conv2d(x, 128, (3, 3), activation='relu', padding=
'same', name='conv2_1', kernel_regularizer=L2, trainable=trainable)
x = tf.layers.dropout(x, rate=0.4, seed=seed, training=trainable,
name='dp2_1')
x = tf.layers.conv2d(x, 128, (3, 3), activation='relu', padding=
'same', name='conv2-2', kernel_regularizer=L2, trainable=trainable)
x = tf.layers.dropout(x, rate=0.4, seed=seed, training=trainable,
name='dp2_2')
x = tf.layers.max_pooling2d(x, (2, 2), strides=(2, 2), name='pool2')
x = tf.layers.conv2d(x, 256, (3, 3), activation='relu', padding=
'same', name='conv3_1', kernel_regularizer=L2, trainable=trainable)
x = tf.layers.dropout(x, rate=0.4, seed=seed, training=trainable,
name='dp3_1')
x = tf.layers.conv2d(x, 256, (3, 3), activation='relu', padding=
'same', name='conv3_2', kernel_regularizer=L2, trainable=trainable)
x = tf.layers.dropout(x, rate=0.4, seed=seed, training=trainable,
name='dp3_2')
x = tf.layers.conv2d(x, 256, (3, 3), activation='relu', padding=
'same', name='conv3_3', kernel_regularizer=L2, trainable=trainable)
x = tf.layers.dropout(x, rate=0.4, seed=seed, training=trainable,
name='dp3_3')
x = tf.layers.max_pooling2d(x, (2, 2), strides=(2, 2), name='pool3')
x = tf.layers.conv2d(x, 512, (3, 3), activation='relu', padding=
'same', name='conv4_1', kernel_regularizer=L2, trainable=trainable)
x = tf.layers.dropout(x, rate=0.4, seed=seed, training=trainable,
name='dp4_1')
x = tf.layers.conv2d(x, 512, (3, 3), activation='relu', padding=
'same', name='conv4_2', kernel_regularizer=L2, trainable=trainable)
x = tf.layers.dropout(x, rate=0.4, seed=seed, training=trainable,
name='dp4_2')
x = tf.layers.conv2d(x, 512, (3, 3), activation='relu', padding=
'same', name='conv4_3', kernel_regularizer=L2, trainable=trainable)
x = tf.layers.dropout(x, rate=0.4, seed=seed, training=trainable,
name='dp4_3')
x = tf.layers.max_pooling2d(x, (2, 2), strides=(2, 2), name='pool4')
x = tf.layers.conv2d(x, 512, (3, 3), activation='relu', padding=
'same', name='conv5_1', kernel_regularizer=L2, trainable=trainable)
x = tf.layers.dropout(x, rate=0.4, seed=seed, training=trainable,
name='dp5_1')
x = tf.layers.conv2d(x, 512, (3, 3), activation='relu', padding=
'same', name='conv5_2', kernel_regularizer=L2, trainable=trainable)
x = tf.layers.dropout(x, rate=0.4, seed=seed, training=trainable,
name='dp5_2')
x = tf.layers.conv2d(x, 512, (3, 3), activation='relu', padding=
'same', name='conv5_3', kernel_regularizer=L2, trainable=trainable)
x = tf.layers.dropout(x, rate=0.4, seed=seed, training=trainable,
name='dp5_3')
x = tf.layers.max_pooling2d(x, (2, 2), strides=(2, 2), name='pool5')
with tf.name_scope('deconv_layers'):
x = tf.layers.conv2d(x, 4096, (7, 7), activation='relu', padding=
'same', name='conv6_1', kernel_regularizer=L2, trainable=trainable)
x = tf.layers.dropout(x, rate=0.4, seed=seed, training=trainable,
name='dp6_1')
x = tf.layers.conv2d(x, 4096, (1, 1), activation='relu', padding=
'same', name='conv6_2', kernel_regularizer=L2, trainable=trainable)
x = tf.layers.dropout(x, rate=0.4, seed=seed, training=trainable,
name='dp6_2')
x = tf.layers.conv2d(x, 1, (1, 1), activation='relu', padding=
'same', name='conv6_3', kernel_regularizer=L2, trainable=trainable)
x = tf.layers.dropout(x, rate=0.4, seed=seed, training=trainable,
name='dp6_3')
heatmap = tf.layers.conv2d_transpose(x, 1, (64, 64), strides=(32,
32), activation='linear', padding='same', name='deconv6_1',
kernel_regularizer=L2, trainable=trainable)
logit = tf.nn.sigmoid(heatmap, name='logit')
pred = tf.to_int32(logit > 0.5)
pred = tf.squeeze(pred, axis=3)
predictions = {'classes': pred, 'probabilities': logit}
if mode == tf.estimator.ModeKeys.PREDICT:
return tf.estimator.EstimatorSpec(mode=mode, predictions=predictions)
if False:
logit_f = tf.reshape(heatmap, (-1, 1, 1, 1))
logit_f = tf.squeeze(logit_f, axis=[2, 3])
label_f = tf.reshape(labels, (-1, 1))
keep = tf.where(tf.greater_equal(labels, 0))
logit_f = tf.gather(logit_f, keep)
label_f = tf.gather(label_f, keep)
tf.assert_equal(tf.shape(label_f)[0], tf.shape(logit_f)[0])
tf.assert_non_negative(label_f)
loss = tf.losses.sigmoid_cross_entropy(multi_class_labels=label_f,
logits=logit_f)
heatmap = tf.squeeze(heatmap, axis=3)
label_f = tf.to_int32(labels > 0)
tf.assert_equal(tf.shape(label_f), tf.shape(heatmap))
tf.assert_non_negative(label_f)
loss = tf.losses.sigmoid_cross_entropy(multi_class_labels=label_f,
logits=heatmap)
if mode == tf.estimator.ModeKeys.TRAIN:
optimizer = tf.train.MomentumOptimizer(learning_rate=0.001,
momentum=0.99)
train_op = optimizer.minimize(loss=loss, global_step=tf.train.
get_global_step())
tf.summary.scalar('train_loss', loss)
return tf.estimator.EstimatorSpec(mode=mode, loss=loss, train_op=
train_op)
iou = tf.metrics.mean_iou(label_f, predictions['classes'], num_classes=
2, name='mean_iou')
eval_metric_ops = {'IoU': iou}
tensors_to_log_prob = {'probabilities': 'deconv_layers/logit'}
tensors_to_log_iou = {'mean_iou': iou}
tf.summary.scalar('mean_iou', iou[0])
logging_hook = tf.train.LoggingTensorHook(tensors=tensors_to_log_iou,
every_n_iter=200)
if mode == tf.estimator.ModeKeys.EVAL:
tf.summary.scalar('eval_loss', loss)
return tf.estimator.EstimatorSpec(mode=mode, loss=loss,
eval_metric_ops=eval_metric_ops)
if __name__ == '__main__':
root_dir = '/home/pohsuanh/Documents/Computer_Vision/HW6'
train_data, eval_data, test_data, gt = data_load.load()
TRAIN = False
PREDICT = True
DRAW_SAMPLE = False
if DRAW_SAMPLE == True:
pic = np.random.randint(len(test_data['x']))
image_sample = test_data['x'][pic]
label_sample = test_data['y'][pic]
plt.figure(figsize=(20, 40))
plt.title('data')
plt.imshow(image_sample)
plt.figure(figsize=(20, 40))
plt.title('gt')
plt.imshow(label_sample)
pretrained_weights = tf.estimator.WarmStartSettings(ckpt_to_initialize_from
=os.path.join(root_dir, 'pretrained_weights', 'vgg_16.ckpt'),
vars_to_warm_start=tf.get_collection(tf.GraphKeys.
TRAINABLE_VARIABLES, scope='vgg16_pretrained'))
fcn_segmentor = tf.estimator.Estimator(model_fn=fcn_model_fn, model_dir
=os.path.join(root_dir, 'ckpts'), warm_start_from=pretrained_weights)
if TRAIN == True:
for epoch in range(100):
train_input_fn = tf.estimator.inputs.numpy_input_fn(x=
train_data['x'], y=train_data['y'], batch_size=1,
num_epochs=None, shuffle=True)
fcn_segmentor.train(input_fn=train_input_fn, steps=200)
eval_input_fn = tf.estimator.inputs.numpy_input_fn(x=eval_data[
'x'], y=eval_data['y'], num_epochs=1, batch_size=10,
shuffle=False)
eval_results = fcn_segmentor.evaluate(input_fn=eval_input_fn)
print('eval_loss :', eval_results)
if PREDICT == True:
pred_input_fn = tf.estimator.inputs.numpy_input_fn(x=test_data['x'],
y=test_data['y'], batch_size=1, num_epochs=1, shuffle=False)
pred = list(fcn_segmentor.predict(input_fn=pred_input_fn))
pred = [p['classes'] for p in pred]
fig = plt.figure(1, figsize=(32, 16))
for i, p in enumerate(pred):
fig.add_subplot(3, 1, 1)
plt.title('camera photo')
plt.imshow(test_data['x'][i])
fig.add_subplot(3, 1, 2)
plt.title('prediction')
plt.imshow(p)
fig.add_subplot(3, 1, 3)
plt.title('ground truth')
plt.imshow(gt['test'][i])
filename = 'pred_{}.png'.format(i)
plt.savefig(os.path.join(root_dir, 'predictions', filename))
| <mask token>
tf.logging.set_verbosity(tf.logging.INFO)
now = datetime.utcnow().strftime('%Y%m%d%H%M%S')
root_logdir = 'logs'
logdir = '{}/run-{}/'.format(root_logdir, now)
def fcn_model_fn(features, labels, mode):
L2 = tf.contrib.layers.l2_regularizer(scale=0.1)
trainable = False
if mode == tf.estimator.ModeKeys.TRAIN:
trainable = True
seed = 2019
with tf.name_scope('vgg16_pretrained'):
x = tf.layers.conv2d(features, 64, (3, 3), activation='relu',
padding='same', name='conv1_1', kernel_regularizer=L2,
trainable=trainable)
x = tf.layers.dropout(x, rate=0.4, seed=seed, training=trainable,
name='dp1_1')
x = tf.layers.conv2d(x, 64, (3, 3), activation='relu', padding=
'same', name='conv1_2', kernel_regularizer=L2, trainable=trainable)
x = tf.layers.dropout(x, rate=0.4, seed=seed, training=trainable,
name='dp1_2')
x = tf.layers.max_pooling2d(x, (2, 2), strides=(2, 2), name='pool1')
x = tf.layers.conv2d(x, 128, (3, 3), activation='relu', padding=
'same', name='conv2_1', kernel_regularizer=L2, trainable=trainable)
x = tf.layers.dropout(x, rate=0.4, seed=seed, training=trainable,
name='dp2_1')
x = tf.layers.conv2d(x, 128, (3, 3), activation='relu', padding=
'same', name='conv2-2', kernel_regularizer=L2, trainable=trainable)
x = tf.layers.dropout(x, rate=0.4, seed=seed, training=trainable,
name='dp2_2')
x = tf.layers.max_pooling2d(x, (2, 2), strides=(2, 2), name='pool2')
x = tf.layers.conv2d(x, 256, (3, 3), activation='relu', padding=
'same', name='conv3_1', kernel_regularizer=L2, trainable=trainable)
x = tf.layers.dropout(x, rate=0.4, seed=seed, training=trainable,
name='dp3_1')
x = tf.layers.conv2d(x, 256, (3, 3), activation='relu', padding=
'same', name='conv3_2', kernel_regularizer=L2, trainable=trainable)
x = tf.layers.dropout(x, rate=0.4, seed=seed, training=trainable,
name='dp3_2')
x = tf.layers.conv2d(x, 256, (3, 3), activation='relu', padding=
'same', name='conv3_3', kernel_regularizer=L2, trainable=trainable)
x = tf.layers.dropout(x, rate=0.4, seed=seed, training=trainable,
name='dp3_3')
x = tf.layers.max_pooling2d(x, (2, 2), strides=(2, 2), name='pool3')
x = tf.layers.conv2d(x, 512, (3, 3), activation='relu', padding=
'same', name='conv4_1', kernel_regularizer=L2, trainable=trainable)
x = tf.layers.dropout(x, rate=0.4, seed=seed, training=trainable,
name='dp4_1')
x = tf.layers.conv2d(x, 512, (3, 3), activation='relu', padding=
'same', name='conv4_2', kernel_regularizer=L2, trainable=trainable)
x = tf.layers.dropout(x, rate=0.4, seed=seed, training=trainable,
name='dp4_2')
x = tf.layers.conv2d(x, 512, (3, 3), activation='relu', padding=
'same', name='conv4_3', kernel_regularizer=L2, trainable=trainable)
x = tf.layers.dropout(x, rate=0.4, seed=seed, training=trainable,
name='dp4_3')
x = tf.layers.max_pooling2d(x, (2, 2), strides=(2, 2), name='pool4')
x = tf.layers.conv2d(x, 512, (3, 3), activation='relu', padding=
'same', name='conv5_1', kernel_regularizer=L2, trainable=trainable)
x = tf.layers.dropout(x, rate=0.4, seed=seed, training=trainable,
name='dp5_1')
x = tf.layers.conv2d(x, 512, (3, 3), activation='relu', padding=
'same', name='conv5_2', kernel_regularizer=L2, trainable=trainable)
x = tf.layers.dropout(x, rate=0.4, seed=seed, training=trainable,
name='dp5_2')
x = tf.layers.conv2d(x, 512, (3, 3), activation='relu', padding=
'same', name='conv5_3', kernel_regularizer=L2, trainable=trainable)
x = tf.layers.dropout(x, rate=0.4, seed=seed, training=trainable,
name='dp5_3')
x = tf.layers.max_pooling2d(x, (2, 2), strides=(2, 2), name='pool5')
with tf.name_scope('deconv_layers'):
x = tf.layers.conv2d(x, 4096, (7, 7), activation='relu', padding=
'same', name='conv6_1', kernel_regularizer=L2, trainable=trainable)
x = tf.layers.dropout(x, rate=0.4, seed=seed, training=trainable,
name='dp6_1')
x = tf.layers.conv2d(x, 4096, (1, 1), activation='relu', padding=
'same', name='conv6_2', kernel_regularizer=L2, trainable=trainable)
x = tf.layers.dropout(x, rate=0.4, seed=seed, training=trainable,
name='dp6_2')
x = tf.layers.conv2d(x, 1, (1, 1), activation='relu', padding=
'same', name='conv6_3', kernel_regularizer=L2, trainable=trainable)
x = tf.layers.dropout(x, rate=0.4, seed=seed, training=trainable,
name='dp6_3')
heatmap = tf.layers.conv2d_transpose(x, 1, (64, 64), strides=(32,
32), activation='linear', padding='same', name='deconv6_1',
kernel_regularizer=L2, trainable=trainable)
logit = tf.nn.sigmoid(heatmap, name='logit')
pred = tf.to_int32(logit > 0.5)
pred = tf.squeeze(pred, axis=3)
predictions = {'classes': pred, 'probabilities': logit}
if mode == tf.estimator.ModeKeys.PREDICT:
return tf.estimator.EstimatorSpec(mode=mode, predictions=predictions)
if False:
logit_f = tf.reshape(heatmap, (-1, 1, 1, 1))
logit_f = tf.squeeze(logit_f, axis=[2, 3])
label_f = tf.reshape(labels, (-1, 1))
keep = tf.where(tf.greater_equal(labels, 0))
logit_f = tf.gather(logit_f, keep)
label_f = tf.gather(label_f, keep)
tf.assert_equal(tf.shape(label_f)[0], tf.shape(logit_f)[0])
tf.assert_non_negative(label_f)
loss = tf.losses.sigmoid_cross_entropy(multi_class_labels=label_f,
logits=logit_f)
heatmap = tf.squeeze(heatmap, axis=3)
label_f = tf.to_int32(labels > 0)
tf.assert_equal(tf.shape(label_f), tf.shape(heatmap))
tf.assert_non_negative(label_f)
loss = tf.losses.sigmoid_cross_entropy(multi_class_labels=label_f,
logits=heatmap)
if mode == tf.estimator.ModeKeys.TRAIN:
optimizer = tf.train.MomentumOptimizer(learning_rate=0.001,
momentum=0.99)
train_op = optimizer.minimize(loss=loss, global_step=tf.train.
get_global_step())
tf.summary.scalar('train_loss', loss)
return tf.estimator.EstimatorSpec(mode=mode, loss=loss, train_op=
train_op)
iou = tf.metrics.mean_iou(label_f, predictions['classes'], num_classes=
2, name='mean_iou')
eval_metric_ops = {'IoU': iou}
tensors_to_log_prob = {'probabilities': 'deconv_layers/logit'}
tensors_to_log_iou = {'mean_iou': iou}
tf.summary.scalar('mean_iou', iou[0])
logging_hook = tf.train.LoggingTensorHook(tensors=tensors_to_log_iou,
every_n_iter=200)
if mode == tf.estimator.ModeKeys.EVAL:
tf.summary.scalar('eval_loss', loss)
return tf.estimator.EstimatorSpec(mode=mode, loss=loss,
eval_metric_ops=eval_metric_ops)
if __name__ == '__main__':
root_dir = '/home/pohsuanh/Documents/Computer_Vision/HW6'
train_data, eval_data, test_data, gt = data_load.load()
TRAIN = False
PREDICT = True
DRAW_SAMPLE = False
if DRAW_SAMPLE == True:
pic = np.random.randint(len(test_data['x']))
image_sample = test_data['x'][pic]
label_sample = test_data['y'][pic]
plt.figure(figsize=(20, 40))
plt.title('data')
plt.imshow(image_sample)
plt.figure(figsize=(20, 40))
plt.title('gt')
plt.imshow(label_sample)
pretrained_weights = tf.estimator.WarmStartSettings(ckpt_to_initialize_from
=os.path.join(root_dir, 'pretrained_weights', 'vgg_16.ckpt'),
vars_to_warm_start=tf.get_collection(tf.GraphKeys.
TRAINABLE_VARIABLES, scope='vgg16_pretrained'))
fcn_segmentor = tf.estimator.Estimator(model_fn=fcn_model_fn, model_dir
=os.path.join(root_dir, 'ckpts'), warm_start_from=pretrained_weights)
if TRAIN == True:
for epoch in range(100):
train_input_fn = tf.estimator.inputs.numpy_input_fn(x=
train_data['x'], y=train_data['y'], batch_size=1,
num_epochs=None, shuffle=True)
fcn_segmentor.train(input_fn=train_input_fn, steps=200)
eval_input_fn = tf.estimator.inputs.numpy_input_fn(x=eval_data[
'x'], y=eval_data['y'], num_epochs=1, batch_size=10,
shuffle=False)
eval_results = fcn_segmentor.evaluate(input_fn=eval_input_fn)
print('eval_loss :', eval_results)
if PREDICT == True:
pred_input_fn = tf.estimator.inputs.numpy_input_fn(x=test_data['x'],
y=test_data['y'], batch_size=1, num_epochs=1, shuffle=False)
pred = list(fcn_segmentor.predict(input_fn=pred_input_fn))
pred = [p['classes'] for p in pred]
fig = plt.figure(1, figsize=(32, 16))
for i, p in enumerate(pred):
fig.add_subplot(3, 1, 1)
plt.title('camera photo')
plt.imshow(test_data['x'][i])
fig.add_subplot(3, 1, 2)
plt.title('prediction')
plt.imshow(p)
fig.add_subplot(3, 1, 3)
plt.title('ground truth')
plt.imshow(gt['test'][i])
filename = 'pred_{}.png'.format(i)
plt.savefig(os.path.join(root_dir, 'predictions', filename))
| <mask token>
import os
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
import data_load
from datetime import datetime
tf.logging.set_verbosity(tf.logging.INFO)
now = datetime.utcnow().strftime('%Y%m%d%H%M%S')
root_logdir = 'logs'
logdir = '{}/run-{}/'.format(root_logdir, now)
def fcn_model_fn(features, labels, mode):
L2 = tf.contrib.layers.l2_regularizer(scale=0.1)
trainable = False
if mode == tf.estimator.ModeKeys.TRAIN:
trainable = True
seed = 2019
with tf.name_scope('vgg16_pretrained'):
x = tf.layers.conv2d(features, 64, (3, 3), activation='relu',
padding='same', name='conv1_1', kernel_regularizer=L2,
trainable=trainable)
x = tf.layers.dropout(x, rate=0.4, seed=seed, training=trainable,
name='dp1_1')
x = tf.layers.conv2d(x, 64, (3, 3), activation='relu', padding=
'same', name='conv1_2', kernel_regularizer=L2, trainable=trainable)
x = tf.layers.dropout(x, rate=0.4, seed=seed, training=trainable,
name='dp1_2')
x = tf.layers.max_pooling2d(x, (2, 2), strides=(2, 2), name='pool1')
x = tf.layers.conv2d(x, 128, (3, 3), activation='relu', padding=
'same', name='conv2_1', kernel_regularizer=L2, trainable=trainable)
x = tf.layers.dropout(x, rate=0.4, seed=seed, training=trainable,
name='dp2_1')
x = tf.layers.conv2d(x, 128, (3, 3), activation='relu', padding=
'same', name='conv2-2', kernel_regularizer=L2, trainable=trainable)
x = tf.layers.dropout(x, rate=0.4, seed=seed, training=trainable,
name='dp2_2')
x = tf.layers.max_pooling2d(x, (2, 2), strides=(2, 2), name='pool2')
x = tf.layers.conv2d(x, 256, (3, 3), activation='relu', padding=
'same', name='conv3_1', kernel_regularizer=L2, trainable=trainable)
x = tf.layers.dropout(x, rate=0.4, seed=seed, training=trainable,
name='dp3_1')
x = tf.layers.conv2d(x, 256, (3, 3), activation='relu', padding=
'same', name='conv3_2', kernel_regularizer=L2, trainable=trainable)
x = tf.layers.dropout(x, rate=0.4, seed=seed, training=trainable,
name='dp3_2')
x = tf.layers.conv2d(x, 256, (3, 3), activation='relu', padding=
'same', name='conv3_3', kernel_regularizer=L2, trainable=trainable)
x = tf.layers.dropout(x, rate=0.4, seed=seed, training=trainable,
name='dp3_3')
x = tf.layers.max_pooling2d(x, (2, 2), strides=(2, 2), name='pool3')
x = tf.layers.conv2d(x, 512, (3, 3), activation='relu', padding=
'same', name='conv4_1', kernel_regularizer=L2, trainable=trainable)
x = tf.layers.dropout(x, rate=0.4, seed=seed, training=trainable,
name='dp4_1')
x = tf.layers.conv2d(x, 512, (3, 3), activation='relu', padding=
'same', name='conv4_2', kernel_regularizer=L2, trainable=trainable)
x = tf.layers.dropout(x, rate=0.4, seed=seed, training=trainable,
name='dp4_2')
x = tf.layers.conv2d(x, 512, (3, 3), activation='relu', padding=
'same', name='conv4_3', kernel_regularizer=L2, trainable=trainable)
x = tf.layers.dropout(x, rate=0.4, seed=seed, training=trainable,
name='dp4_3')
x = tf.layers.max_pooling2d(x, (2, 2), strides=(2, 2), name='pool4')
x = tf.layers.conv2d(x, 512, (3, 3), activation='relu', padding=
'same', name='conv5_1', kernel_regularizer=L2, trainable=trainable)
x = tf.layers.dropout(x, rate=0.4, seed=seed, training=trainable,
name='dp5_1')
x = tf.layers.conv2d(x, 512, (3, 3), activation='relu', padding=
'same', name='conv5_2', kernel_regularizer=L2, trainable=trainable)
x = tf.layers.dropout(x, rate=0.4, seed=seed, training=trainable,
name='dp5_2')
x = tf.layers.conv2d(x, 512, (3, 3), activation='relu', padding=
'same', name='conv5_3', kernel_regularizer=L2, trainable=trainable)
x = tf.layers.dropout(x, rate=0.4, seed=seed, training=trainable,
name='dp5_3')
x = tf.layers.max_pooling2d(x, (2, 2), strides=(2, 2), name='pool5')
with tf.name_scope('deconv_layers'):
x = tf.layers.conv2d(x, 4096, (7, 7), activation='relu', padding=
'same', name='conv6_1', kernel_regularizer=L2, trainable=trainable)
x = tf.layers.dropout(x, rate=0.4, seed=seed, training=trainable,
name='dp6_1')
x = tf.layers.conv2d(x, 4096, (1, 1), activation='relu', padding=
'same', name='conv6_2', kernel_regularizer=L2, trainable=trainable)
x = tf.layers.dropout(x, rate=0.4, seed=seed, training=trainable,
name='dp6_2')
x = tf.layers.conv2d(x, 1, (1, 1), activation='relu', padding=
'same', name='conv6_3', kernel_regularizer=L2, trainable=trainable)
x = tf.layers.dropout(x, rate=0.4, seed=seed, training=trainable,
name='dp6_3')
heatmap = tf.layers.conv2d_transpose(x, 1, (64, 64), strides=(32,
32), activation='linear', padding='same', name='deconv6_1',
kernel_regularizer=L2, trainable=trainable)
logit = tf.nn.sigmoid(heatmap, name='logit')
pred = tf.to_int32(logit > 0.5)
pred = tf.squeeze(pred, axis=3)
predictions = {'classes': pred, 'probabilities': logit}
if mode == tf.estimator.ModeKeys.PREDICT:
return tf.estimator.EstimatorSpec(mode=mode, predictions=predictions)
if False:
logit_f = tf.reshape(heatmap, (-1, 1, 1, 1))
logit_f = tf.squeeze(logit_f, axis=[2, 3])
label_f = tf.reshape(labels, (-1, 1))
keep = tf.where(tf.greater_equal(labels, 0))
logit_f = tf.gather(logit_f, keep)
label_f = tf.gather(label_f, keep)
tf.assert_equal(tf.shape(label_f)[0], tf.shape(logit_f)[0])
tf.assert_non_negative(label_f)
loss = tf.losses.sigmoid_cross_entropy(multi_class_labels=label_f,
logits=logit_f)
heatmap = tf.squeeze(heatmap, axis=3)
label_f = tf.to_int32(labels > 0)
tf.assert_equal(tf.shape(label_f), tf.shape(heatmap))
tf.assert_non_negative(label_f)
loss = tf.losses.sigmoid_cross_entropy(multi_class_labels=label_f,
logits=heatmap)
if mode == tf.estimator.ModeKeys.TRAIN:
optimizer = tf.train.MomentumOptimizer(learning_rate=0.001,
momentum=0.99)
train_op = optimizer.minimize(loss=loss, global_step=tf.train.
get_global_step())
tf.summary.scalar('train_loss', loss)
return tf.estimator.EstimatorSpec(mode=mode, loss=loss, train_op=
train_op)
iou = tf.metrics.mean_iou(label_f, predictions['classes'], num_classes=
2, name='mean_iou')
eval_metric_ops = {'IoU': iou}
tensors_to_log_prob = {'probabilities': 'deconv_layers/logit'}
tensors_to_log_iou = {'mean_iou': iou}
tf.summary.scalar('mean_iou', iou[0])
logging_hook = tf.train.LoggingTensorHook(tensors=tensors_to_log_iou,
every_n_iter=200)
if mode == tf.estimator.ModeKeys.EVAL:
tf.summary.scalar('eval_loss', loss)
return tf.estimator.EstimatorSpec(mode=mode, loss=loss,
eval_metric_ops=eval_metric_ops)
if __name__ == '__main__':
root_dir = '/home/pohsuanh/Documents/Computer_Vision/HW6'
train_data, eval_data, test_data, gt = data_load.load()
TRAIN = False
PREDICT = True
DRAW_SAMPLE = False
if DRAW_SAMPLE == True:
pic = np.random.randint(len(test_data['x']))
image_sample = test_data['x'][pic]
label_sample = test_data['y'][pic]
plt.figure(figsize=(20, 40))
plt.title('data')
plt.imshow(image_sample)
plt.figure(figsize=(20, 40))
plt.title('gt')
plt.imshow(label_sample)
pretrained_weights = tf.estimator.WarmStartSettings(ckpt_to_initialize_from
=os.path.join(root_dir, 'pretrained_weights', 'vgg_16.ckpt'),
vars_to_warm_start=tf.get_collection(tf.GraphKeys.
TRAINABLE_VARIABLES, scope='vgg16_pretrained'))
fcn_segmentor = tf.estimator.Estimator(model_fn=fcn_model_fn, model_dir
=os.path.join(root_dir, 'ckpts'), warm_start_from=pretrained_weights)
if TRAIN == True:
for epoch in range(100):
train_input_fn = tf.estimator.inputs.numpy_input_fn(x=
train_data['x'], y=train_data['y'], batch_size=1,
num_epochs=None, shuffle=True)
fcn_segmentor.train(input_fn=train_input_fn, steps=200)
eval_input_fn = tf.estimator.inputs.numpy_input_fn(x=eval_data[
'x'], y=eval_data['y'], num_epochs=1, batch_size=10,
shuffle=False)
eval_results = fcn_segmentor.evaluate(input_fn=eval_input_fn)
print('eval_loss :', eval_results)
if PREDICT == True:
pred_input_fn = tf.estimator.inputs.numpy_input_fn(x=test_data['x'],
y=test_data['y'], batch_size=1, num_epochs=1, shuffle=False)
pred = list(fcn_segmentor.predict(input_fn=pred_input_fn))
pred = [p['classes'] for p in pred]
fig = plt.figure(1, figsize=(32, 16))
for i, p in enumerate(pred):
fig.add_subplot(3, 1, 1)
plt.title('camera photo')
plt.imshow(test_data['x'][i])
fig.add_subplot(3, 1, 2)
plt.title('prediction')
plt.imshow(p)
fig.add_subplot(3, 1, 3)
plt.title('ground truth')
plt.imshow(gt['test'][i])
filename = 'pred_{}.png'.format(i)
plt.savefig(os.path.join(root_dir, 'predictions', filename))
| #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Nov 26 23:42:11 2018
@author: pohsuanh
Fully Covolutional Network FCN-32s.
FCN-32s network is based on VGG-16
"""
import os
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
import data_load
from datetime import datetime
tf.logging.set_verbosity(tf.logging.INFO)
# assign each run to a separate log file, so the tensorboard can function properly.
now = datetime.utcnow().strftime("%Y%m%d%H%M%S")
root_logdir = "logs"
logdir = "{}/run-{}/".format(root_logdir,now)
def fcn_model_fn(features, labels, mode):
L2 = tf.contrib.layers.l2_regularizer(scale=0.1)
trainable = False
if mode == tf.estimator.ModeKeys.TRAIN :
trainable = True
seed = 2019
with tf.name_scope("vgg16_pretrained"):
x = tf.layers.conv2d(features, 64, (3, 3),
activation='relu',
padding='same',
name='conv1_1',
kernel_regularizer= L2,
trainable = trainable)
x = tf.layers.dropout(x, rate = 0.4, seed = seed, training = trainable , name ='dp1_1')
x = tf.layers.conv2d(x, 64, (3, 3),
activation='relu',
padding='same',
name='conv1_2',
kernel_regularizer= L2,
trainable = trainable)
x = tf.layers.dropout(x, rate = 0.4, seed = seed, training = trainable , name ='dp1_2')
x = tf.layers.max_pooling2d(x, (2, 2), strides=(2, 2), name='pool1')
# Block 2
x = tf.layers.conv2d(x, 128, (3, 3),
activation='relu',
padding='same',
name='conv2_1',
kernel_regularizer= L2,
trainable = trainable)
x = tf.layers.dropout(x, rate = 0.4, seed = seed, training = trainable , name ='dp2_1')
x = tf.layers.conv2d(x, 128, (3, 3),
activation='relu',
padding='same',
name='conv2-2',
kernel_regularizer= L2,
trainable = trainable)
x = tf.layers.dropout(x, rate = 0.4, seed = seed, training = trainable , name ='dp2_2')
x = tf.layers.max_pooling2d(x,(2, 2), strides=(2, 2), name='pool2')
# Block 3
x = tf.layers.conv2d (x, 256, (3, 3),
activation='relu',
padding='same',
name='conv3_1',
kernel_regularizer= L2,
trainable = trainable)
x = tf.layers.dropout(x, rate = 0.4, seed = seed, training = trainable , name ='dp3_1')
x = tf.layers.conv2d (x, 256, (3, 3),
activation='relu',
padding='same',
name='conv3_2',
kernel_regularizer= L2,
trainable = trainable)
x = tf.layers.dropout(x, rate = 0.4, seed = seed, training = trainable , name ='dp3_2')
x = tf.layers.conv2d (x, 256, (3, 3),
activation='relu',
padding='same',
name='conv3_3',
kernel_regularizer= L2,
trainable = trainable)
x = tf.layers.dropout(x, rate = 0.4, seed = seed, training = trainable , name ='dp3_3')
x = tf.layers.max_pooling2d(x, (2, 2), strides=(2, 2), name='pool3')
# Block 4
x = tf.layers.conv2d (x, 512, (3, 3),
activation='relu',
padding='same',
name='conv4_1',
kernel_regularizer= L2,
trainable = trainable)
x = tf.layers.dropout(x, rate = 0.4, seed = seed, training = trainable , name ='dp4_1')
x = tf.layers.conv2d (x, 512, (3, 3),
activation='relu',
padding='same',
name='conv4_2',
kernel_regularizer= L2,
trainable = trainable)
x = tf.layers.dropout(x, rate = 0.4, seed = seed, training = trainable , name ='dp4_2')
x = tf.layers.conv2d (x, 512, (3, 3),
activation='relu',
padding='same',
name='conv4_3',
kernel_regularizer= L2,
trainable = trainable)
x = tf.layers.dropout(x, rate = 0.4, seed = seed, training = trainable , name ='dp4_3')
x = tf.layers.max_pooling2d(x, (2, 2), strides=(2, 2), name='pool4')
# Block 5
x = tf.layers.conv2d (x, 512, (3, 3),
activation='relu',
padding='same',
name='conv5_1',
kernel_regularizer= L2,
trainable = trainable)
x = tf.layers.dropout(x, rate = 0.4, seed = seed, training = trainable , name ='dp5_1')
x = tf.layers.conv2d (x, 512, (3, 3),
activation='relu',
padding='same',
name='conv5_2',
kernel_regularizer= L2,
trainable = trainable)
x = tf.layers.dropout(x, rate = 0.4, seed = seed, training = trainable , name ='dp5_2')
x = tf.layers.conv2d (x, 512, (3, 3),
activation='relu',
padding='same',
name='conv5_3',
kernel_regularizer= L2,
trainable = trainable)
x = tf.layers.dropout(x, rate = 0.4, seed = seed, training = trainable , name ='dp5_3')
x = tf.layers.max_pooling2d(x, (2, 2), strides=(2, 2), name='pool5')
with tf.name_scope("deconv_layers"):
# Block 6
x = tf.layers.conv2d(x, 4096, (7,7),
activation='relu',
padding='same',
name='conv6_1',
kernel_regularizer= L2,
trainable = trainable)
x = tf.layers.dropout(x, rate = 0.4, seed = seed, training = trainable , name ='dp6_1')
x = tf.layers.conv2d(x, 4096, (1,1),
activation='relu',
padding='same',
name='conv6_2',
kernel_regularizer= L2,
trainable = trainable)
x = tf.layers.dropout(x, rate = 0.4, seed = seed, training = trainable , name ='dp6_2')
x = tf.layers.conv2d(x, 1, (1,1),
activation='relu',
padding='same',
name='conv6_3',
kernel_regularizer= L2,
trainable = trainable)
x = tf.layers.dropout(x, rate = 0.4, seed = seed, training = trainable , name ='dp6_3')
# There are two classes [1: road, 0: non-road]
heatmap = tf.layers.conv2d_transpose(x, 1, (64,64), strides=(32,32),
activation='linear',
padding='same',
name='deconv6_1',
kernel_regularizer= L2,
trainable = trainable)
logit = tf.nn.sigmoid(heatmap, name = 'logit')
pred = tf.to_int32(logit > 0.5)
pred = tf.squeeze(pred, axis = 3)
# print(heatmap.shape)
# Do pixel-wise classification :
predictions = {
# Generate predictions (for PREDICT and EVAL mode)
"classes": pred, # tf.argmax(logit, axis =3 )
# Add `softmax_tensor` to the graph. It is used for PREDICT and by the logging_hook`.
"probabilities": logit #tf.nn.softmax(logit, name="softmax_tensor")
}
if mode == tf.estimator.ModeKeys.PREDICT:
return tf.estimator.EstimatorSpec(mode=mode, predictions=predictions)
# Calculate Loss (for both TRAIN and EVAL modes)
# Homework requires tf.nn.sigmoid_cross_entropy_with_logits()
if False :
# ignore where label is -1 , which corresponds to Void.
logit_f = tf.reshape(heatmap, (-1,1,1,1)) # flatten the output
logit_f = tf.squeeze(logit_f, axis = [2,3])
label_f = tf.reshape(labels,(-1,1))
keep = tf.where(tf.greater_equal(labels, 0) )
logit_f = tf.gather(logit_f, keep)
label_f = tf.gather(label_f, keep)
tf.assert_equal(tf.shape(label_f)[0], tf.shape(logit_f)[0])
tf.assert_non_negative(label_f) # Void is labelled -1, which should be excluded from the loss func
# sigmoid_cross_entorpy implements tf.nn.sparse_signoid_cross_entropy_with_logit,
# it will convert output to logit in the op
loss = tf.losses.sigmoid_cross_entropy(multi_class_labels = label_f, logits=logit_f)
heatmap = tf.squeeze(heatmap, axis =3)
label_f = tf.to_int32(labels > 0)
tf.assert_equal(tf.shape(label_f), tf.shape(heatmap))
tf.assert_non_negative(label_f)
loss = tf.losses.sigmoid_cross_entropy( multi_class_labels = label_f ,logits = heatmap)
# Configure the trainable Op (for TRAIN mode)
if mode == tf.estimator.ModeKeys.TRAIN:
optimizer = tf.train.MomentumOptimizer(learning_rate=0.001, momentum = 0.99)
train_op = optimizer.minimize(loss=loss, global_step = tf.train.get_global_step())
tf.summary.scalar('train_loss', loss)
return tf.estimator.EstimatorSpec(mode=mode, loss=loss, train_op=train_op)
# Add evaluation metrics (for EVAL mode)
# Set up logging for metrics
iou = tf.metrics.mean_iou(label_f,predictions['classes'], num_classes = 2 , name = 'mean_iou')
eval_metric_ops = {"IoU": iou}
tensors_to_log_prob = {"probabilities": "deconv_layers/logit"}
tensors_to_log_iou = {"mean_iou": iou}
tf.summary.scalar('mean_iou', iou[0])
logging_hook = tf.train.LoggingTensorHook(
tensors=tensors_to_log_iou, every_n_iter=200)
if mode == tf.estimator.ModeKeys.EVAL :
tf.summary.scalar('eval_loss', loss)
return tf.estimator.EstimatorSpec(mode=mode, loss=loss, eval_metric_ops = eval_metric_ops)
#%%
if __name__ == "__main__":
root_dir = '/home/pohsuanh/Documents/Computer_Vision/HW6'
# Load training and eval data
train_data, eval_data, test_data, gt = data_load.load()
# Flags
TRAIN = False
PREDICT = True
DRAW_SAMPLE = False
# Construct model
if DRAW_SAMPLE == True :
# pic = np.random.randint((test_data['x']).shape[0])
pic = np.random.randint(len(test_data['x']))
image_sample = test_data['x'][pic]
label_sample = test_data['y'][pic]
# image_sample = tf.Session().run(image_sample)
#
# label_sample = tf.Session().run(label_sample)
plt.figure(figsize=(20,40))
plt.title('data')
plt.imshow(image_sample)
plt.figure(figsize =(20,40))
plt.title('gt')
plt.imshow(label_sample)
# Create the Estimator
pretrained_weights = tf.estimator.WarmStartSettings(
ckpt_to_initialize_from=os.path.join(root_dir,'pretrained_weights','vgg_16.ckpt'),
vars_to_warm_start= tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope = 'vgg16_pretrained'))
fcn_segmentor = tf.estimator.Estimator(
model_fn=fcn_model_fn, model_dir=os.path.join(root_dir, 'ckpts'), warm_start_from= pretrained_weights)
if TRAIN == True :
for epoch in range(100):
# Train the model
train_input_fn = tf.estimator.inputs.numpy_input_fn(
x=train_data['x'],
y=train_data['y'],
batch_size=1,
num_epochs=None, # number of epochs to iterate over data. If None will run forever.
shuffle=True)
fcn_segmentor.train(
input_fn=train_input_fn,
steps=200
)
# Evaluate the model and print results
eval_input_fn = tf.estimator.inputs.numpy_input_fn(
x=eval_data['x'],
y=eval_data['y'],
num_epochs=1,
batch_size=10,
shuffle=False)
eval_results = fcn_segmentor.evaluate(input_fn=eval_input_fn)
print('eval_loss :', eval_results)
#%% We withhold the predction from test set untill all the hyperparameters are finetuned.
if PREDICT == True :
pred_input_fn = tf.estimator.inputs.numpy_input_fn(
x=test_data['x'],
y=test_data['y'],
batch_size =1,
num_epochs=1,
shuffle=False)
# predict method returns a generator
pred = list( fcn_segmentor.predict(input_fn = pred_input_fn))
pred = [p['classes'] for p in pred]
fig = plt.figure(1, figsize=(32,16))
for i, p in enumerate(pred) :
fig.add_subplot(3,1,1)
plt.title('camera photo')
plt.imshow(test_data['x'][i])
fig.add_subplot(3,1,2)
plt.title('prediction')
plt.imshow(p)
fig.add_subplot(3,1,3)
plt.title('ground truth')
plt.imshow(gt['test'][i])
filename = 'pred_{}.png'.format(i)
plt.savefig(os.path.join(root_dir,'predictions',filename)) | [
1,
2,
3,
4,
5
] |
1,331 | 20d09a616133295a6162a7ab1d7970ccbaf6de95 | <mask token>
| <mask token>
torch.nn.functional.adaptive_avg_pool3d(**data)
| <mask token>
data = pickle.load(open('dd0eb7901523d494d4aa324f474c782063e9e231.p', 'rb'))
torch.nn.functional.adaptive_avg_pool3d(**data)
| import pickle
import torch
data = pickle.load(open('dd0eb7901523d494d4aa324f474c782063e9e231.p', 'rb'))
torch.nn.functional.adaptive_avg_pool3d(**data)
| null | [
0,
1,
2,
3
] |
1,332 | f69b4d022ebed5a0b660f55704bbe762d5d765d5 | <mask token>
| <mask token>
def checkio(data):
return True or False
| '''
Given an expression with numbers, brackets and operators. But in this task only brackets are important. Brackets can be one of three types -- "{}" "()" "[]". Brackets are determine the scope or restricted some expression. So each if was opened, then must be closed with the same type. The scopes of brackets must not intersected. You should to make a decision correct an expression or not. Don't care about operators and operands.
Input: An expression with different of types brackets.
Output: A boolean. Correct an expression or not.
Example:
?
1
2
3
4
5
checkio("((5+3)*2+1)") == True
checkio("{[(3+1)+2]+}") == True
checkio("(3+{1-1)}") == False
checkio("[1+1]+(2*2)-{3/3}") == True
checkio("(({[(((1)-2)+3)-3]/3}-3)") == False
'''
def checkio(data):
#replace this for solution
return True or False | null | null | [
0,
1,
2
] |
1,333 | 97a51d959ad642467c508cedc8786f636e4050bb | <mask token>
@pytest.fixture
def runner() ->CliRunner:
"""Fixture for invoking command-line interfaces."""
return CliRunner()
def test_main_succeeds(runner: CliRunner) ->None:
"""It exits with a status code of zero."""
with runner.isolated_filesystem():
df = generate_test_data()
df.to_csv('test_file.csv', index=False)
result = runner.invoke(__main__.main, ['test_file.csv'])
assert result.exit_code == 0
<mask token>
def test_002_header_style() ->None:
"""Tests that the header style optional argument works."""
df = generate_test_data()
skim(df, header_style='italic green')
<mask token>
| <mask token>
@pytest.fixture
def runner() ->CliRunner:
"""Fixture for invoking command-line interfaces."""
return CliRunner()
def test_main_succeeds(runner: CliRunner) ->None:
"""It exits with a status code of zero."""
with runner.isolated_filesystem():
df = generate_test_data()
df.to_csv('test_file.csv', index=False)
result = runner.invoke(__main__.main, ['test_file.csv'])
assert result.exit_code == 0
def test_000_basic_functionality() ->None:
"""Tests that a skim of the test data works."""
df = generate_test_data()
skim(df)
def test_001_colour_kwargs() ->None:
"""Tests that colour keyword arguments work."""
df = generate_test_data()
skim(df, datetime='chartreuse1')
def test_002_header_style() ->None:
"""Tests that the header style optional argument works."""
df = generate_test_data()
skim(df, header_style='italic green')
<mask token>
def test_004_when_df_is_named() ->None:
"""Tests what happens when df has a name."""
df = generate_test_data()
df.name = 'Named dataframe'
skim(df)
| <mask token>
@pytest.fixture
def runner() ->CliRunner:
"""Fixture for invoking command-line interfaces."""
return CliRunner()
def test_main_succeeds(runner: CliRunner) ->None:
"""It exits with a status code of zero."""
with runner.isolated_filesystem():
df = generate_test_data()
df.to_csv('test_file.csv', index=False)
result = runner.invoke(__main__.main, ['test_file.csv'])
assert result.exit_code == 0
def test_000_basic_functionality() ->None:
"""Tests that a skim of the test data works."""
df = generate_test_data()
skim(df)
def test_001_colour_kwargs() ->None:
"""Tests that colour keyword arguments work."""
df = generate_test_data()
skim(df, datetime='chartreuse1')
def test_002_header_style() ->None:
"""Tests that the header style optional argument works."""
df = generate_test_data()
skim(df, header_style='italic green')
def test_003_not_enough_datetimes() ->None:
"""Tests logic branch with too few datetimes for freq inference."""
df = generate_test_data()
df = df.head(2)
skim(df)
def test_004_when_df_is_named() ->None:
"""Tests what happens when df has a name."""
df = generate_test_data()
df.name = 'Named dataframe'
skim(df)
| <mask token>
import pytest
from click.testing import CliRunner
from skimpy import __main__
from skimpy import generate_test_data
from skimpy import skim
@pytest.fixture
def runner() ->CliRunner:
"""Fixture for invoking command-line interfaces."""
return CliRunner()
def test_main_succeeds(runner: CliRunner) ->None:
"""It exits with a status code of zero."""
with runner.isolated_filesystem():
df = generate_test_data()
df.to_csv('test_file.csv', index=False)
result = runner.invoke(__main__.main, ['test_file.csv'])
assert result.exit_code == 0
def test_000_basic_functionality() ->None:
"""Tests that a skim of the test data works."""
df = generate_test_data()
skim(df)
def test_001_colour_kwargs() ->None:
"""Tests that colour keyword arguments work."""
df = generate_test_data()
skim(df, datetime='chartreuse1')
def test_002_header_style() ->None:
"""Tests that the header style optional argument works."""
df = generate_test_data()
skim(df, header_style='italic green')
def test_003_not_enough_datetimes() ->None:
"""Tests logic branch with too few datetimes for freq inference."""
df = generate_test_data()
df = df.head(2)
skim(df)
def test_004_when_df_is_named() ->None:
"""Tests what happens when df has a name."""
df = generate_test_data()
df.name = 'Named dataframe'
skim(df)
| """Test cases for the __main__ module."""
import pytest
from click.testing import CliRunner
from skimpy import __main__
from skimpy import generate_test_data
from skimpy import skim
@pytest.fixture
def runner() -> CliRunner:
"""Fixture for invoking command-line interfaces."""
return CliRunner()
def test_main_succeeds(runner: CliRunner) -> None:
"""It exits with a status code of zero."""
with runner.isolated_filesystem():
df = generate_test_data()
df.to_csv("test_file.csv", index=False)
result = runner.invoke(__main__.main, ["test_file.csv"])
assert result.exit_code == 0
def test_000_basic_functionality() -> None:
"""Tests that a skim of the test data works."""
df = generate_test_data()
skim(df)
def test_001_colour_kwargs() -> None:
"""Tests that colour keyword arguments work."""
df = generate_test_data()
skim(df, datetime="chartreuse1")
def test_002_header_style() -> None:
"""Tests that the header style optional argument works."""
df = generate_test_data()
skim(df, header_style="italic green")
def test_003_not_enough_datetimes() -> None:
"""Tests logic branch with too few datetimes for freq inference."""
df = generate_test_data()
df = df.head(2)
skim(df)
def test_004_when_df_is_named() -> None:
"""Tests what happens when df has a name."""
df = generate_test_data()
df.name = "Named dataframe"
skim(df)
| [
3,
6,
7,
8,
9
] |
1,334 | 005650e2747c61b730960a29891b6ba6c8bd381b | <mask token>
def double_factorial(n):
k = 1
for i in range(n, 1, -2):
k *= i
return k
<mask token>
def gaussian_integral(alpha, m):
if int(m / 2) * 2 == m:
n = int(m / 2)
value = double_factorial(2 * n - 1) * sqrt(pi) / pow(2, n + 1) / pow(
alpha, n + 0.5)
else:
n = int((m - 1) / 2)
value = factorial(n) / 2 / pow(alpha, n + 1)
return value
<mask token>
| <mask token>
def factorial(n):
value = 1
for i in range(n, 1, -1):
value *= i
return value
def double_factorial(n):
k = 1
for i in range(n, 1, -2):
k *= i
return k
<mask token>
def gaussian_integral(alpha, m):
if int(m / 2) * 2 == m:
n = int(m / 2)
value = double_factorial(2 * n - 1) * sqrt(pi) / pow(2, n + 1) / pow(
alpha, n + 0.5)
else:
n = int((m - 1) / 2)
value = factorial(n) / 2 / pow(alpha, n + 1)
return value
<mask token>
| <mask token>
def factorial(n):
value = 1
for i in range(n, 1, -1):
value *= i
return value
def double_factorial(n):
k = 1
for i in range(n, 1, -2):
k *= i
return k
<mask token>
def gaussian_integral(alpha, m):
if int(m / 2) * 2 == m:
n = int(m / 2)
value = double_factorial(2 * n - 1) * sqrt(pi) / pow(2, n + 1) / pow(
alpha, n + 0.5)
else:
n = int((m - 1) / 2)
value = factorial(n) / 2 / pow(alpha, n + 1)
return value
def overlap_s_gaussians(expo1, expo2, power_of_r):
norm1 = pow(2 * expo1 / pi, 0.75)
norm2 = pow(2 * expo2 / pi, 0.75)
value = norm1 * norm2 * 4 * pi * gaussian_integral(expo1 + expo2,
power_of_r + 2)
return value
| import os, sys
import numpy as np
from math import exp, sqrt, pi
def factorial(n):
value = 1
for i in range(n, 1, -1):
value *= i
return value
def double_factorial(n):
k = 1
for i in range(n, 1, -2):
k *= i
return k
<mask token>
def gaussian_integral(alpha, m):
if int(m / 2) * 2 == m:
n = int(m / 2)
value = double_factorial(2 * n - 1) * sqrt(pi) / pow(2, n + 1) / pow(
alpha, n + 0.5)
else:
n = int((m - 1) / 2)
value = factorial(n) / 2 / pow(alpha, n + 1)
return value
def overlap_s_gaussians(expo1, expo2, power_of_r):
norm1 = pow(2 * expo1 / pi, 0.75)
norm2 = pow(2 * expo2 / pi, 0.75)
value = norm1 * norm2 * 4 * pi * gaussian_integral(expo1 + expo2,
power_of_r + 2)
return value
| # coding: utf-8
import os, sys
import numpy as np
from math import exp, sqrt, pi
def factorial(n):
value = 1
for i in range(n,1,-1):
value *= i
return value
def double_factorial(n):
k = 1
for i in range(n, 1, -2):
k *= i
#print("n:", n, "double factorial:", k)
return k
"""\int_0^\infty r^m e^{-alpha * r^2} dr"""
def gaussian_integral(alpha, m):
if int(m/2)*2 == m: # even number
n = int(m/2)
value = double_factorial(2*n-1) * sqrt(pi) / pow(2, n+1) / pow(alpha, n+0.5)
else:
n = int((m-1)/2)
value = factorial(n) / 2 / pow(alpha, n+1)
return value
def overlap_s_gaussians(expo1, expo2, power_of_r):
norm1 = pow(2*expo1/pi, 0.75)
norm2 = pow(2*expo2/pi, 0.75)
value = norm1 * norm2 * 4 * pi * gaussian_integral(expo1+expo2, power_of_r+2)
return value
| [
2,
3,
4,
5,
6
] |
1,335 | 26f466a6a2fd09bb108ca89e4537192c070ff83b | <mask token>
| <mask token>
print(len(c))
| c = 'こ に ち わ '
print(len(c))
| c = "こ に ち わ "
print (len(c))
| null | [
0,
1,
2,
3
] |
1,336 | 8c0a4d5a86d9ebd38ea05efb5b5b570368ce1449 | <mask token>
def matches_panic_funcs(name):
"""If the passed name contains one of the known panic_functions,
return the match
"""
for func in panic_functions:
if func in name:
return func
return ''
<mask token>
def any_origin_matches_panic_func(elf, addr):
"""returns name if any origin for the passed addr matches one
of the functions in the panic_functions array
"""
origins = linkage_or_origin_all_parents(elf, addr)
for origin in origins:
name = matches_panic_funcs(origin)
if name:
return name
return ''
def any_linkage_matches_panic_func(elf, addr):
"""returns True + name if any linkage for the passed addr matches one
of the functions in the panic_functions array
"""
linkages = linkage_or_origin_all_parents(elf, addr, True)
for linkage in linkages:
name = matches_panic_funcs(linkage)
if name:
return name
return ''
def check_for_source_in_parent(elf, addr):
"""Takes in a dwarfdump lookup including parents of the source DWARF
location, returns the first parent with a call file not in
the core library. If found, this often indicates the source of the panic
in the Tock source code.
"""
result = subprocess.run((DWARFDUMP, '--lookup=0x' + addr, '-p', elf),
capture_output=True, text=True)
dwarfdump = result.stdout
matches = re.findall(dw_at_file_re, dwarfdump)
def getFile(line):
return line.strip().split('"')[1]
source_files = list(map(getFile, matches))
for i, f in enumerate(source_files[::-1]):
if '/core/' not in f:
line_matches = re.findall(dw_at_line_re, dwarfdump)
def getLine(line):
return line.strip().split('(')[1].split(')')[0]
source_lines = list(map(getLine, line_matches))
source_line = source_lines[::-1][i]
return f, source_line
return '', ''
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('ELF', help='ELF file for analysis')
parser.add_argument('--verbose', '-v', action='store_true', help=
'Output additional DWARF info for each panic location in the binary')
parser.add_argument('--riscv', action='store_true', help=
'Use risc-v based objdump')
return parser.parse_args()
def find_all_panics(objdump, elf, is_riscv):
panic_list = []
within_core_panic_list = []
no_info_panic_list = []
result = subprocess.run((objdump, '-d', elf), capture_output=True, text
=True)
objdump_out = result.stdout
for function in panic_functions:
function_re = re.compile('.*:.*#.*' + function + '.*')
if not is_riscv:
function_re = re.compile('.*:.*<.*' + function + '.*')
matches = re.findall(function_re, objdump_out)
def getAddr(line):
return line.strip().split(':')[0]
addrs = list(map(getAddr, matches))
for addr in addrs:
result = subprocess.run((DWARFDUMP, '--lookup=0x' + addr, elf),
capture_output=True, text=True)
dwarfdump = result.stdout
dw_at_file = re.search(dw_at_file_re, dwarfdump)
dw_at_line = re.search(dw_at_line_re, dwarfdump)
line_info = re.search(line_info_re, dwarfdump)
abstract_origin = re.search(abstract_origin_re, dwarfdump)
linkage_name = re.search(dw_at_linkage_name_re, dwarfdump)
file_string = ''
line_string = ''
line_info_string = ''
abstract_origin_string = ''
linkage_name_string = ''
if dw_at_file:
file_string = dw_at_file.group(0).strip()
line_string = dw_at_line.group(0).strip()
panicinfo = {}
panicinfo['addr'] = addr
panicinfo['function'] = function
if line_info:
line_info_string = line_info.group(0).strip()
panicinfo['line_info'] = line_info_string
if abstract_origin:
abstract_origin_string = abstract_origin.group(0).strip()
if linkage_name:
linkage_name_string = linkage_name.group(0).strip()
if ('DW_AT_call_file' in file_string and 'DW_AT_decl_file' in
file_string):
raise RuntimeError('I misunderstand DWARF')
if ('DW_AT_call_file' in file_string or 'DW_AT_decl_file' in
file_string):
filename = file_string.split('"')[1]
line_num = line_string.split('(')[1].split(')')[0]
if 'DW_AT_call_file' in file_string:
panicinfo['call_file'] = filename
panicinfo['call_line'] = line_num
if 'DW_AT_decl_file' in file_string:
panicinfo['decl_file'] = filename
panicinfo['decl_line'] = line_num
if not '/core/' in filename:
if not 'closure' in abstract_origin_string:
panicinfo['best_guess_source'] = 'call/decl'
else:
panicinfo['best_guess_source'
] = 'call-closure-line-info'
panic_list.append(panicinfo)
continue
else:
parent_file, parent_line = check_for_source_in_parent(elf,
addr)
if parent_file:
panicinfo['parent_call_file'] = parent_file
panicinfo['parent_call_line'] = parent_line
panicinfo['best_guess_source'] = 'parent'
panic_list.append(panicinfo)
continue
elif not abstract_origin and not linkage_name:
no_info_panic_list.append(panicinfo)
continue
elif abstract_origin:
if 'core' in abstract_origin_string:
name = matches_panic_funcs(abstract_origin_string)
if name:
within_core_panic_list.append(panicinfo)
continue
else:
name2 = any_origin_matches_panic_func(elf, addr
)
name3 = any_linkage_matches_panic_func(elf,
addr)
if name2:
within_core_panic_list.append(panicinfo)
continue
elif name3:
within_core_panic_list.append(panicinfo)
continue
else:
no_info_panic_list.append(panicinfo)
continue
elif 'closure' in abstract_origin_string:
panicinfo['best_guess_source'] = 'lineinfo'
panic_list.append(panicinfo)
continue
else:
raise RuntimeError('Unhandled')
if linkage_name:
name = matches_panic_funcs(linkage_name_string)
if name:
within_core_panic_list.append(panicinfo)
continue
else:
no_info_panic_list.append(panicinfo)
print(
'Failed to match panic but we probably have enough info to trace it up. Linkage name: {}, addr: {}'
.format(linkage_name_string, addr))
continue
no_info_panic_list.append(panic_info)
print('did not find source for panic: {}'.format(addr))
continue
elif abstract_origin:
origin = abstract_origin_string.split('"')[1]
panicinfo['abstract_origin'] = origin
if 'core' in origin:
if matches_panic_funcs(origin):
within_core_panic_list.append(panicinfo)
continue
no_info_panic_list.append(panicinfo)
print(
'Probably could add this origin or one of its parents to the panic function list: {}'
.format(abstract_origin_string))
continue
else:
panicinfo['best_guess_source'] = 'abstract_origin + line'
panic_list.append(panicinfo)
continue
else:
try:
dw_at_name_string = re.findall(dw_at_name_re, dwarfdump)[-1
].strip()
function_name = dw_at_name_string.split('"')[1]
if 'OUTLINED_FUNCTION_' in function_name:
if function_name not in panic_functions:
panic_functions.append(function_name + '>')
within_core_panic_list.append(panicinfo)
continue
no_info_panic_list.append(panicinfo)
continue
except:
no_info_panic_list.append(panicinfo)
continue
raise RuntimeError('BUG: Should not reach here')
return panic_list, within_core_panic_list, no_info_panic_list
<mask token>
def main():
args = parse_args()
if sys.version_info.minor < 7:
print('This tool requires Python 3.7+')
return -1
print('Tock panic report for ' + args.ELF)
objdump = ARM_OBJDUMP
if args.riscv:
objdump = RISCV_OBJDUMP
panic_list, within_core_panic_list, no_info_panic_list = find_all_panics(
objdump, args.ELF, args.riscv)
print('num_panics: {}'.format(len(panic_list)))
buckets_list = {}
for f in panic_functions:
buckets_list[f] = []
for panic in panic_list:
buckets_list[panic['function']].append(panic)
for f, l in buckets_list.items():
if len(l) > 0:
print('{}: {}'.format(f, len(l)))
for p in l:
pretty_print(p)
if args.verbose:
print(p)
print()
print('num panics in core ignored: {}'.format(len(within_core_panic_list)))
print('num panics for which no info available: {}'.format(len(
no_info_panic_list)))
if args.verbose:
print(
'If more debug info is needed, run dwarfdump directly on the address in question.'
)
<mask token>
| <mask token>
if platform.system() == 'Darwin':
DWARFDUMP = 'dwarfdump'
elif platform.system() == 'Linux':
DWARFDUMP = 'llvm-dwarfdump'
else:
raise NotImplementedError('Unknown platform')
<mask token>
def matches_panic_funcs(name):
"""If the passed name contains one of the known panic_functions,
return the match
"""
for func in panic_functions:
if func in name:
return func
return ''
def linkage_or_origin_all_parents(elf, addr, linkage=False):
"""Returns a list of the abstract origin or linkage of all parents of the dwarf
location for the passed address
"""
result = subprocess.run((DWARFDUMP, '--lookup=0x' + addr, '-p', elf),
capture_output=True, text=True)
dwarfdump = result.stdout
regex = abstract_origin_re
if linkage:
regex = dw_at_linkage_name_re
matches = re.findall(regex, dwarfdump)
def getFunction(line):
return line.strip().split('"')[1]
origins = list(map(getFunction, matches))
return origins
def any_origin_matches_panic_func(elf, addr):
"""returns name if any origin for the passed addr matches one
of the functions in the panic_functions array
"""
origins = linkage_or_origin_all_parents(elf, addr)
for origin in origins:
name = matches_panic_funcs(origin)
if name:
return name
return ''
def any_linkage_matches_panic_func(elf, addr):
"""returns True + name if any linkage for the passed addr matches one
of the functions in the panic_functions array
"""
linkages = linkage_or_origin_all_parents(elf, addr, True)
for linkage in linkages:
name = matches_panic_funcs(linkage)
if name:
return name
return ''
def check_for_source_in_parent(elf, addr):
"""Takes in a dwarfdump lookup including parents of the source DWARF
location, returns the first parent with a call file not in
the core library. If found, this often indicates the source of the panic
in the Tock source code.
"""
result = subprocess.run((DWARFDUMP, '--lookup=0x' + addr, '-p', elf),
capture_output=True, text=True)
dwarfdump = result.stdout
matches = re.findall(dw_at_file_re, dwarfdump)
def getFile(line):
return line.strip().split('"')[1]
source_files = list(map(getFile, matches))
for i, f in enumerate(source_files[::-1]):
if '/core/' not in f:
line_matches = re.findall(dw_at_line_re, dwarfdump)
def getLine(line):
return line.strip().split('(')[1].split(')')[0]
source_lines = list(map(getLine, line_matches))
source_line = source_lines[::-1][i]
return f, source_line
return '', ''
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('ELF', help='ELF file for analysis')
parser.add_argument('--verbose', '-v', action='store_true', help=
'Output additional DWARF info for each panic location in the binary')
parser.add_argument('--riscv', action='store_true', help=
'Use risc-v based objdump')
return parser.parse_args()
def find_all_panics(objdump, elf, is_riscv):
panic_list = []
within_core_panic_list = []
no_info_panic_list = []
result = subprocess.run((objdump, '-d', elf), capture_output=True, text
=True)
objdump_out = result.stdout
for function in panic_functions:
function_re = re.compile('.*:.*#.*' + function + '.*')
if not is_riscv:
function_re = re.compile('.*:.*<.*' + function + '.*')
matches = re.findall(function_re, objdump_out)
def getAddr(line):
return line.strip().split(':')[0]
addrs = list(map(getAddr, matches))
for addr in addrs:
result = subprocess.run((DWARFDUMP, '--lookup=0x' + addr, elf),
capture_output=True, text=True)
dwarfdump = result.stdout
dw_at_file = re.search(dw_at_file_re, dwarfdump)
dw_at_line = re.search(dw_at_line_re, dwarfdump)
line_info = re.search(line_info_re, dwarfdump)
abstract_origin = re.search(abstract_origin_re, dwarfdump)
linkage_name = re.search(dw_at_linkage_name_re, dwarfdump)
file_string = ''
line_string = ''
line_info_string = ''
abstract_origin_string = ''
linkage_name_string = ''
if dw_at_file:
file_string = dw_at_file.group(0).strip()
line_string = dw_at_line.group(0).strip()
panicinfo = {}
panicinfo['addr'] = addr
panicinfo['function'] = function
if line_info:
line_info_string = line_info.group(0).strip()
panicinfo['line_info'] = line_info_string
if abstract_origin:
abstract_origin_string = abstract_origin.group(0).strip()
if linkage_name:
linkage_name_string = linkage_name.group(0).strip()
if ('DW_AT_call_file' in file_string and 'DW_AT_decl_file' in
file_string):
raise RuntimeError('I misunderstand DWARF')
if ('DW_AT_call_file' in file_string or 'DW_AT_decl_file' in
file_string):
filename = file_string.split('"')[1]
line_num = line_string.split('(')[1].split(')')[0]
if 'DW_AT_call_file' in file_string:
panicinfo['call_file'] = filename
panicinfo['call_line'] = line_num
if 'DW_AT_decl_file' in file_string:
panicinfo['decl_file'] = filename
panicinfo['decl_line'] = line_num
if not '/core/' in filename:
if not 'closure' in abstract_origin_string:
panicinfo['best_guess_source'] = 'call/decl'
else:
panicinfo['best_guess_source'
] = 'call-closure-line-info'
panic_list.append(panicinfo)
continue
else:
parent_file, parent_line = check_for_source_in_parent(elf,
addr)
if parent_file:
panicinfo['parent_call_file'] = parent_file
panicinfo['parent_call_line'] = parent_line
panicinfo['best_guess_source'] = 'parent'
panic_list.append(panicinfo)
continue
elif not abstract_origin and not linkage_name:
no_info_panic_list.append(panicinfo)
continue
elif abstract_origin:
if 'core' in abstract_origin_string:
name = matches_panic_funcs(abstract_origin_string)
if name:
within_core_panic_list.append(panicinfo)
continue
else:
name2 = any_origin_matches_panic_func(elf, addr
)
name3 = any_linkage_matches_panic_func(elf,
addr)
if name2:
within_core_panic_list.append(panicinfo)
continue
elif name3:
within_core_panic_list.append(panicinfo)
continue
else:
no_info_panic_list.append(panicinfo)
continue
elif 'closure' in abstract_origin_string:
panicinfo['best_guess_source'] = 'lineinfo'
panic_list.append(panicinfo)
continue
else:
raise RuntimeError('Unhandled')
if linkage_name:
name = matches_panic_funcs(linkage_name_string)
if name:
within_core_panic_list.append(panicinfo)
continue
else:
no_info_panic_list.append(panicinfo)
print(
'Failed to match panic but we probably have enough info to trace it up. Linkage name: {}, addr: {}'
.format(linkage_name_string, addr))
continue
no_info_panic_list.append(panic_info)
print('did not find source for panic: {}'.format(addr))
continue
elif abstract_origin:
origin = abstract_origin_string.split('"')[1]
panicinfo['abstract_origin'] = origin
if 'core' in origin:
if matches_panic_funcs(origin):
within_core_panic_list.append(panicinfo)
continue
no_info_panic_list.append(panicinfo)
print(
'Probably could add this origin or one of its parents to the panic function list: {}'
.format(abstract_origin_string))
continue
else:
panicinfo['best_guess_source'] = 'abstract_origin + line'
panic_list.append(panicinfo)
continue
else:
try:
dw_at_name_string = re.findall(dw_at_name_re, dwarfdump)[-1
].strip()
function_name = dw_at_name_string.split('"')[1]
if 'OUTLINED_FUNCTION_' in function_name:
if function_name not in panic_functions:
panic_functions.append(function_name + '>')
within_core_panic_list.append(panicinfo)
continue
no_info_panic_list.append(panicinfo)
continue
except:
no_info_panic_list.append(panicinfo)
continue
raise RuntimeError('BUG: Should not reach here')
return panic_list, within_core_panic_list, no_info_panic_list
def pretty_print(panicinfo):
if panicinfo['best_guess_source'] == 'call/decl':
try:
print('\t{} -- {}:{}'.format(panicinfo['addr'], panicinfo[
'call_file'], panicinfo['call_line']))
except:
print('\t{} -- in function starting at {}:{}'.format(panicinfo[
'addr'], panicinfo['decl_file'], panicinfo['decl_line']))
elif panicinfo['best_guess_source'] == 'parent':
print('\t{} -- at or in function starting at {}:{}'.format(
panicinfo['addr'], panicinfo['parent_call_file'], panicinfo[
'parent_call_line']))
elif panicinfo['best_guess_source'] == 'lineinfo':
print('\t{} -- in closure, try: {}'.format(panicinfo['addr'],
panicinfo['line_info']))
elif panicinfo['best_guess_source'] == 'abstract_origin + line':
print('\t{} -- line_info: {} from origin :{}'.format(panicinfo[
'addr'], panicinfo['line_info'], panicinfo['abstract_origin']))
elif panicinfo['best_guess_source'] == 'call-closure-line-info':
print('\t{} -- in closure starting on line_info: {}'.format(
panicinfo['addr'], panicinfo['line_info']))
else:
raise RuntimeError('Missing best guess source: {}'.format(panicinfo))
def main():
args = parse_args()
if sys.version_info.minor < 7:
print('This tool requires Python 3.7+')
return -1
print('Tock panic report for ' + args.ELF)
objdump = ARM_OBJDUMP
if args.riscv:
objdump = RISCV_OBJDUMP
panic_list, within_core_panic_list, no_info_panic_list = find_all_panics(
objdump, args.ELF, args.riscv)
print('num_panics: {}'.format(len(panic_list)))
buckets_list = {}
for f in panic_functions:
buckets_list[f] = []
for panic in panic_list:
buckets_list[panic['function']].append(panic)
for f, l in buckets_list.items():
if len(l) > 0:
print('{}: {}'.format(f, len(l)))
for p in l:
pretty_print(p)
if args.verbose:
print(p)
print()
print('num panics in core ignored: {}'.format(len(within_core_panic_list)))
print('num panics for which no info available: {}'.format(len(
no_info_panic_list)))
if args.verbose:
print(
'If more debug info is needed, run dwarfdump directly on the address in question.'
)
if __name__ == '__main__':
main()
| <mask token>
if platform.system() == 'Darwin':
DWARFDUMP = 'dwarfdump'
elif platform.system() == 'Linux':
DWARFDUMP = 'llvm-dwarfdump'
else:
raise NotImplementedError('Unknown platform')
ARM_OBJDUMP = 'arm-none-eabi-objdump'
RISCV_OBJDUMP = 'riscv64-unknown-elf-objdump'
panic_functions = ['expect_failed', 'unwrap_failed', 'panic_bounds_check',
'slice_index_order_fail', 'slice_end_index_len_fail',
'slice_start_index_len_fail', 'slice17len_mismatch_fail',
'str16slice_error_fail', 'copy_from_slice17len_mismatch_fail',
'copy_from_slice17', 'panicking5panic', '6unwrap17', '6expect17',
'11copy_within17', 'core..fmt..builders..PadAdapter', '11copy_within17',
'write_char', 'write_str', 'printable5check',
'char$u20$as$u20$core..fmt..Debug', 'GenericRadix7fmt_int',
'10unwrap_err17h6', '13is_whitespace17',
'$u20$core..slice..index..SliceIndex$LT',
'core..iter..adapters..filter..Filter$LT$I$C$P$GT$$u20$as$u20$core..iter',
'_ZN4core5slice5index74_$LT$impl$u20$core..ops..index..Index$LT$I$GT$$u20$for$u20$$u5b$T$u5d$$GT$5index17h4c77379bd26a525bE'
,
'_ZN4core5slice5index74_$LT$impl$u20$core..ops..index..Index$LT$I$GT$$u20$for$u20$$u5b$T$u5d$$GT$5index17hfe7e43aa2388c47bE'
]
dw_at_file_re = re.compile('.*(?:DW_AT_call_file|DW_AT_decl_file).*')
dw_at_line_re = re.compile('.*(?:DW_AT_call_line|DW_AT_decl_line).*')
line_info_re = re.compile('.*Line info.*')
abstract_origin_re = re.compile('.*DW_AT_abstract_origin.*')
dw_at_linkage_name_re = re.compile('.*DW_AT_linkage_name.*')
dw_at_name_re = re.compile('.*DW_AT_name.*')
def matches_panic_funcs(name):
"""If the passed name contains one of the known panic_functions,
return the match
"""
for func in panic_functions:
if func in name:
return func
return ''
def linkage_or_origin_all_parents(elf, addr, linkage=False):
"""Returns a list of the abstract origin or linkage of all parents of the dwarf
location for the passed address
"""
result = subprocess.run((DWARFDUMP, '--lookup=0x' + addr, '-p', elf),
capture_output=True, text=True)
dwarfdump = result.stdout
regex = abstract_origin_re
if linkage:
regex = dw_at_linkage_name_re
matches = re.findall(regex, dwarfdump)
def getFunction(line):
return line.strip().split('"')[1]
origins = list(map(getFunction, matches))
return origins
def any_origin_matches_panic_func(elf, addr):
"""returns name if any origin for the passed addr matches one
of the functions in the panic_functions array
"""
origins = linkage_or_origin_all_parents(elf, addr)
for origin in origins:
name = matches_panic_funcs(origin)
if name:
return name
return ''
def any_linkage_matches_panic_func(elf, addr):
"""returns True + name if any linkage for the passed addr matches one
of the functions in the panic_functions array
"""
linkages = linkage_or_origin_all_parents(elf, addr, True)
for linkage in linkages:
name = matches_panic_funcs(linkage)
if name:
return name
return ''
def check_for_source_in_parent(elf, addr):
"""Takes in a dwarfdump lookup including parents of the source DWARF
location, returns the first parent with a call file not in
the core library. If found, this often indicates the source of the panic
in the Tock source code.
"""
result = subprocess.run((DWARFDUMP, '--lookup=0x' + addr, '-p', elf),
capture_output=True, text=True)
dwarfdump = result.stdout
matches = re.findall(dw_at_file_re, dwarfdump)
def getFile(line):
return line.strip().split('"')[1]
source_files = list(map(getFile, matches))
for i, f in enumerate(source_files[::-1]):
if '/core/' not in f:
line_matches = re.findall(dw_at_line_re, dwarfdump)
def getLine(line):
return line.strip().split('(')[1].split(')')[0]
source_lines = list(map(getLine, line_matches))
source_line = source_lines[::-1][i]
return f, source_line
return '', ''
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('ELF', help='ELF file for analysis')
parser.add_argument('--verbose', '-v', action='store_true', help=
'Output additional DWARF info for each panic location in the binary')
parser.add_argument('--riscv', action='store_true', help=
'Use risc-v based objdump')
return parser.parse_args()
def find_all_panics(objdump, elf, is_riscv):
panic_list = []
within_core_panic_list = []
no_info_panic_list = []
result = subprocess.run((objdump, '-d', elf), capture_output=True, text
=True)
objdump_out = result.stdout
for function in panic_functions:
function_re = re.compile('.*:.*#.*' + function + '.*')
if not is_riscv:
function_re = re.compile('.*:.*<.*' + function + '.*')
matches = re.findall(function_re, objdump_out)
def getAddr(line):
return line.strip().split(':')[0]
addrs = list(map(getAddr, matches))
for addr in addrs:
result = subprocess.run((DWARFDUMP, '--lookup=0x' + addr, elf),
capture_output=True, text=True)
dwarfdump = result.stdout
dw_at_file = re.search(dw_at_file_re, dwarfdump)
dw_at_line = re.search(dw_at_line_re, dwarfdump)
line_info = re.search(line_info_re, dwarfdump)
abstract_origin = re.search(abstract_origin_re, dwarfdump)
linkage_name = re.search(dw_at_linkage_name_re, dwarfdump)
file_string = ''
line_string = ''
line_info_string = ''
abstract_origin_string = ''
linkage_name_string = ''
if dw_at_file:
file_string = dw_at_file.group(0).strip()
line_string = dw_at_line.group(0).strip()
panicinfo = {}
panicinfo['addr'] = addr
panicinfo['function'] = function
if line_info:
line_info_string = line_info.group(0).strip()
panicinfo['line_info'] = line_info_string
if abstract_origin:
abstract_origin_string = abstract_origin.group(0).strip()
if linkage_name:
linkage_name_string = linkage_name.group(0).strip()
if ('DW_AT_call_file' in file_string and 'DW_AT_decl_file' in
file_string):
raise RuntimeError('I misunderstand DWARF')
if ('DW_AT_call_file' in file_string or 'DW_AT_decl_file' in
file_string):
filename = file_string.split('"')[1]
line_num = line_string.split('(')[1].split(')')[0]
if 'DW_AT_call_file' in file_string:
panicinfo['call_file'] = filename
panicinfo['call_line'] = line_num
if 'DW_AT_decl_file' in file_string:
panicinfo['decl_file'] = filename
panicinfo['decl_line'] = line_num
if not '/core/' in filename:
if not 'closure' in abstract_origin_string:
panicinfo['best_guess_source'] = 'call/decl'
else:
panicinfo['best_guess_source'
] = 'call-closure-line-info'
panic_list.append(panicinfo)
continue
else:
parent_file, parent_line = check_for_source_in_parent(elf,
addr)
if parent_file:
panicinfo['parent_call_file'] = parent_file
panicinfo['parent_call_line'] = parent_line
panicinfo['best_guess_source'] = 'parent'
panic_list.append(panicinfo)
continue
elif not abstract_origin and not linkage_name:
no_info_panic_list.append(panicinfo)
continue
elif abstract_origin:
if 'core' in abstract_origin_string:
name = matches_panic_funcs(abstract_origin_string)
if name:
within_core_panic_list.append(panicinfo)
continue
else:
name2 = any_origin_matches_panic_func(elf, addr
)
name3 = any_linkage_matches_panic_func(elf,
addr)
if name2:
within_core_panic_list.append(panicinfo)
continue
elif name3:
within_core_panic_list.append(panicinfo)
continue
else:
no_info_panic_list.append(panicinfo)
continue
elif 'closure' in abstract_origin_string:
panicinfo['best_guess_source'] = 'lineinfo'
panic_list.append(panicinfo)
continue
else:
raise RuntimeError('Unhandled')
if linkage_name:
name = matches_panic_funcs(linkage_name_string)
if name:
within_core_panic_list.append(panicinfo)
continue
else:
no_info_panic_list.append(panicinfo)
print(
'Failed to match panic but we probably have enough info to trace it up. Linkage name: {}, addr: {}'
.format(linkage_name_string, addr))
continue
no_info_panic_list.append(panic_info)
print('did not find source for panic: {}'.format(addr))
continue
elif abstract_origin:
origin = abstract_origin_string.split('"')[1]
panicinfo['abstract_origin'] = origin
if 'core' in origin:
if matches_panic_funcs(origin):
within_core_panic_list.append(panicinfo)
continue
no_info_panic_list.append(panicinfo)
print(
'Probably could add this origin or one of its parents to the panic function list: {}'
.format(abstract_origin_string))
continue
else:
panicinfo['best_guess_source'] = 'abstract_origin + line'
panic_list.append(panicinfo)
continue
else:
try:
dw_at_name_string = re.findall(dw_at_name_re, dwarfdump)[-1
].strip()
function_name = dw_at_name_string.split('"')[1]
if 'OUTLINED_FUNCTION_' in function_name:
if function_name not in panic_functions:
panic_functions.append(function_name + '>')
within_core_panic_list.append(panicinfo)
continue
no_info_panic_list.append(panicinfo)
continue
except:
no_info_panic_list.append(panicinfo)
continue
raise RuntimeError('BUG: Should not reach here')
return panic_list, within_core_panic_list, no_info_panic_list
def pretty_print(panicinfo):
if panicinfo['best_guess_source'] == 'call/decl':
try:
print('\t{} -- {}:{}'.format(panicinfo['addr'], panicinfo[
'call_file'], panicinfo['call_line']))
except:
print('\t{} -- in function starting at {}:{}'.format(panicinfo[
'addr'], panicinfo['decl_file'], panicinfo['decl_line']))
elif panicinfo['best_guess_source'] == 'parent':
print('\t{} -- at or in function starting at {}:{}'.format(
panicinfo['addr'], panicinfo['parent_call_file'], panicinfo[
'parent_call_line']))
elif panicinfo['best_guess_source'] == 'lineinfo':
print('\t{} -- in closure, try: {}'.format(panicinfo['addr'],
panicinfo['line_info']))
elif panicinfo['best_guess_source'] == 'abstract_origin + line':
print('\t{} -- line_info: {} from origin :{}'.format(panicinfo[
'addr'], panicinfo['line_info'], panicinfo['abstract_origin']))
elif panicinfo['best_guess_source'] == 'call-closure-line-info':
print('\t{} -- in closure starting on line_info: {}'.format(
panicinfo['addr'], panicinfo['line_info']))
else:
raise RuntimeError('Missing best guess source: {}'.format(panicinfo))
def main():
args = parse_args()
if sys.version_info.minor < 7:
print('This tool requires Python 3.7+')
return -1
print('Tock panic report for ' + args.ELF)
objdump = ARM_OBJDUMP
if args.riscv:
objdump = RISCV_OBJDUMP
panic_list, within_core_panic_list, no_info_panic_list = find_all_panics(
objdump, args.ELF, args.riscv)
print('num_panics: {}'.format(len(panic_list)))
buckets_list = {}
for f in panic_functions:
buckets_list[f] = []
for panic in panic_list:
buckets_list[panic['function']].append(panic)
for f, l in buckets_list.items():
if len(l) > 0:
print('{}: {}'.format(f, len(l)))
for p in l:
pretty_print(p)
if args.verbose:
print(p)
print()
print('num panics in core ignored: {}'.format(len(within_core_panic_list)))
print('num panics for which no info available: {}'.format(len(
no_info_panic_list)))
if args.verbose:
print(
'If more debug info is needed, run dwarfdump directly on the address in question.'
)
if __name__ == '__main__':
main()
| import argparse
import platform
import re
import subprocess
import sys
if platform.system() == 'Darwin':
DWARFDUMP = 'dwarfdump'
elif platform.system() == 'Linux':
DWARFDUMP = 'llvm-dwarfdump'
else:
raise NotImplementedError('Unknown platform')
ARM_OBJDUMP = 'arm-none-eabi-objdump'
RISCV_OBJDUMP = 'riscv64-unknown-elf-objdump'
panic_functions = ['expect_failed', 'unwrap_failed', 'panic_bounds_check',
'slice_index_order_fail', 'slice_end_index_len_fail',
'slice_start_index_len_fail', 'slice17len_mismatch_fail',
'str16slice_error_fail', 'copy_from_slice17len_mismatch_fail',
'copy_from_slice17', 'panicking5panic', '6unwrap17', '6expect17',
'11copy_within17', 'core..fmt..builders..PadAdapter', '11copy_within17',
'write_char', 'write_str', 'printable5check',
'char$u20$as$u20$core..fmt..Debug', 'GenericRadix7fmt_int',
'10unwrap_err17h6', '13is_whitespace17',
'$u20$core..slice..index..SliceIndex$LT',
'core..iter..adapters..filter..Filter$LT$I$C$P$GT$$u20$as$u20$core..iter',
'_ZN4core5slice5index74_$LT$impl$u20$core..ops..index..Index$LT$I$GT$$u20$for$u20$$u5b$T$u5d$$GT$5index17h4c77379bd26a525bE'
,
'_ZN4core5slice5index74_$LT$impl$u20$core..ops..index..Index$LT$I$GT$$u20$for$u20$$u5b$T$u5d$$GT$5index17hfe7e43aa2388c47bE'
]
dw_at_file_re = re.compile('.*(?:DW_AT_call_file|DW_AT_decl_file).*')
dw_at_line_re = re.compile('.*(?:DW_AT_call_line|DW_AT_decl_line).*')
line_info_re = re.compile('.*Line info.*')
abstract_origin_re = re.compile('.*DW_AT_abstract_origin.*')
dw_at_linkage_name_re = re.compile('.*DW_AT_linkage_name.*')
dw_at_name_re = re.compile('.*DW_AT_name.*')
def matches_panic_funcs(name):
"""If the passed name contains one of the known panic_functions,
return the match
"""
for func in panic_functions:
if func in name:
return func
return ''
def linkage_or_origin_all_parents(elf, addr, linkage=False):
"""Returns a list of the abstract origin or linkage of all parents of the dwarf
location for the passed address
"""
result = subprocess.run((DWARFDUMP, '--lookup=0x' + addr, '-p', elf),
capture_output=True, text=True)
dwarfdump = result.stdout
regex = abstract_origin_re
if linkage:
regex = dw_at_linkage_name_re
matches = re.findall(regex, dwarfdump)
def getFunction(line):
return line.strip().split('"')[1]
origins = list(map(getFunction, matches))
return origins
def any_origin_matches_panic_func(elf, addr):
"""returns name if any origin for the passed addr matches one
of the functions in the panic_functions array
"""
origins = linkage_or_origin_all_parents(elf, addr)
for origin in origins:
name = matches_panic_funcs(origin)
if name:
return name
return ''
def any_linkage_matches_panic_func(elf, addr):
"""returns True + name if any linkage for the passed addr matches one
of the functions in the panic_functions array
"""
linkages = linkage_or_origin_all_parents(elf, addr, True)
for linkage in linkages:
name = matches_panic_funcs(linkage)
if name:
return name
return ''
def check_for_source_in_parent(elf, addr):
"""Takes in a dwarfdump lookup including parents of the source DWARF
location, returns the first parent with a call file not in
the core library. If found, this often indicates the source of the panic
in the Tock source code.
"""
result = subprocess.run((DWARFDUMP, '--lookup=0x' + addr, '-p', elf),
capture_output=True, text=True)
dwarfdump = result.stdout
matches = re.findall(dw_at_file_re, dwarfdump)
def getFile(line):
return line.strip().split('"')[1]
source_files = list(map(getFile, matches))
for i, f in enumerate(source_files[::-1]):
if '/core/' not in f:
line_matches = re.findall(dw_at_line_re, dwarfdump)
def getLine(line):
return line.strip().split('(')[1].split(')')[0]
source_lines = list(map(getLine, line_matches))
source_line = source_lines[::-1][i]
return f, source_line
return '', ''
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('ELF', help='ELF file for analysis')
parser.add_argument('--verbose', '-v', action='store_true', help=
'Output additional DWARF info for each panic location in the binary')
parser.add_argument('--riscv', action='store_true', help=
'Use risc-v based objdump')
return parser.parse_args()
def find_all_panics(objdump, elf, is_riscv):
panic_list = []
within_core_panic_list = []
no_info_panic_list = []
result = subprocess.run((objdump, '-d', elf), capture_output=True, text
=True)
objdump_out = result.stdout
for function in panic_functions:
function_re = re.compile('.*:.*#.*' + function + '.*')
if not is_riscv:
function_re = re.compile('.*:.*<.*' + function + '.*')
matches = re.findall(function_re, objdump_out)
def getAddr(line):
return line.strip().split(':')[0]
addrs = list(map(getAddr, matches))
for addr in addrs:
result = subprocess.run((DWARFDUMP, '--lookup=0x' + addr, elf),
capture_output=True, text=True)
dwarfdump = result.stdout
dw_at_file = re.search(dw_at_file_re, dwarfdump)
dw_at_line = re.search(dw_at_line_re, dwarfdump)
line_info = re.search(line_info_re, dwarfdump)
abstract_origin = re.search(abstract_origin_re, dwarfdump)
linkage_name = re.search(dw_at_linkage_name_re, dwarfdump)
file_string = ''
line_string = ''
line_info_string = ''
abstract_origin_string = ''
linkage_name_string = ''
if dw_at_file:
file_string = dw_at_file.group(0).strip()
line_string = dw_at_line.group(0).strip()
panicinfo = {}
panicinfo['addr'] = addr
panicinfo['function'] = function
if line_info:
line_info_string = line_info.group(0).strip()
panicinfo['line_info'] = line_info_string
if abstract_origin:
abstract_origin_string = abstract_origin.group(0).strip()
if linkage_name:
linkage_name_string = linkage_name.group(0).strip()
if ('DW_AT_call_file' in file_string and 'DW_AT_decl_file' in
file_string):
raise RuntimeError('I misunderstand DWARF')
if ('DW_AT_call_file' in file_string or 'DW_AT_decl_file' in
file_string):
filename = file_string.split('"')[1]
line_num = line_string.split('(')[1].split(')')[0]
if 'DW_AT_call_file' in file_string:
panicinfo['call_file'] = filename
panicinfo['call_line'] = line_num
if 'DW_AT_decl_file' in file_string:
panicinfo['decl_file'] = filename
panicinfo['decl_line'] = line_num
if not '/core/' in filename:
if not 'closure' in abstract_origin_string:
panicinfo['best_guess_source'] = 'call/decl'
else:
panicinfo['best_guess_source'
] = 'call-closure-line-info'
panic_list.append(panicinfo)
continue
else:
parent_file, parent_line = check_for_source_in_parent(elf,
addr)
if parent_file:
panicinfo['parent_call_file'] = parent_file
panicinfo['parent_call_line'] = parent_line
panicinfo['best_guess_source'] = 'parent'
panic_list.append(panicinfo)
continue
elif not abstract_origin and not linkage_name:
no_info_panic_list.append(panicinfo)
continue
elif abstract_origin:
if 'core' in abstract_origin_string:
name = matches_panic_funcs(abstract_origin_string)
if name:
within_core_panic_list.append(panicinfo)
continue
else:
name2 = any_origin_matches_panic_func(elf, addr
)
name3 = any_linkage_matches_panic_func(elf,
addr)
if name2:
within_core_panic_list.append(panicinfo)
continue
elif name3:
within_core_panic_list.append(panicinfo)
continue
else:
no_info_panic_list.append(panicinfo)
continue
elif 'closure' in abstract_origin_string:
panicinfo['best_guess_source'] = 'lineinfo'
panic_list.append(panicinfo)
continue
else:
raise RuntimeError('Unhandled')
if linkage_name:
name = matches_panic_funcs(linkage_name_string)
if name:
within_core_panic_list.append(panicinfo)
continue
else:
no_info_panic_list.append(panicinfo)
print(
'Failed to match panic but we probably have enough info to trace it up. Linkage name: {}, addr: {}'
.format(linkage_name_string, addr))
continue
no_info_panic_list.append(panic_info)
print('did not find source for panic: {}'.format(addr))
continue
elif abstract_origin:
origin = abstract_origin_string.split('"')[1]
panicinfo['abstract_origin'] = origin
if 'core' in origin:
if matches_panic_funcs(origin):
within_core_panic_list.append(panicinfo)
continue
no_info_panic_list.append(panicinfo)
print(
'Probably could add this origin or one of its parents to the panic function list: {}'
.format(abstract_origin_string))
continue
else:
panicinfo['best_guess_source'] = 'abstract_origin + line'
panic_list.append(panicinfo)
continue
else:
try:
dw_at_name_string = re.findall(dw_at_name_re, dwarfdump)[-1
].strip()
function_name = dw_at_name_string.split('"')[1]
if 'OUTLINED_FUNCTION_' in function_name:
if function_name not in panic_functions:
panic_functions.append(function_name + '>')
within_core_panic_list.append(panicinfo)
continue
no_info_panic_list.append(panicinfo)
continue
except:
no_info_panic_list.append(panicinfo)
continue
raise RuntimeError('BUG: Should not reach here')
return panic_list, within_core_panic_list, no_info_panic_list
def pretty_print(panicinfo):
if panicinfo['best_guess_source'] == 'call/decl':
try:
print('\t{} -- {}:{}'.format(panicinfo['addr'], panicinfo[
'call_file'], panicinfo['call_line']))
except:
print('\t{} -- in function starting at {}:{}'.format(panicinfo[
'addr'], panicinfo['decl_file'], panicinfo['decl_line']))
elif panicinfo['best_guess_source'] == 'parent':
print('\t{} -- at or in function starting at {}:{}'.format(
panicinfo['addr'], panicinfo['parent_call_file'], panicinfo[
'parent_call_line']))
elif panicinfo['best_guess_source'] == 'lineinfo':
print('\t{} -- in closure, try: {}'.format(panicinfo['addr'],
panicinfo['line_info']))
elif panicinfo['best_guess_source'] == 'abstract_origin + line':
print('\t{} -- line_info: {} from origin :{}'.format(panicinfo[
'addr'], panicinfo['line_info'], panicinfo['abstract_origin']))
elif panicinfo['best_guess_source'] == 'call-closure-line-info':
print('\t{} -- in closure starting on line_info: {}'.format(
panicinfo['addr'], panicinfo['line_info']))
else:
raise RuntimeError('Missing best guess source: {}'.format(panicinfo))
def main():
args = parse_args()
if sys.version_info.minor < 7:
print('This tool requires Python 3.7+')
return -1
print('Tock panic report for ' + args.ELF)
objdump = ARM_OBJDUMP
if args.riscv:
objdump = RISCV_OBJDUMP
panic_list, within_core_panic_list, no_info_panic_list = find_all_panics(
objdump, args.ELF, args.riscv)
print('num_panics: {}'.format(len(panic_list)))
buckets_list = {}
for f in panic_functions:
buckets_list[f] = []
for panic in panic_list:
buckets_list[panic['function']].append(panic)
for f, l in buckets_list.items():
if len(l) > 0:
print('{}: {}'.format(f, len(l)))
for p in l:
pretty_print(p)
if args.verbose:
print(p)
print()
print('num panics in core ignored: {}'.format(len(within_core_panic_list)))
print('num panics for which no info available: {}'.format(len(
no_info_panic_list)))
if args.verbose:
print(
'If more debug info is needed, run dwarfdump directly on the address in question.'
)
if __name__ == '__main__':
main()
| #!/usr/bin/env python3
# Licensed under the Apache License, Version 2.0 or the MIT License.
# SPDX-License-Identifier: Apache-2.0 OR MIT
# Copyright Tock Contributors 2023.
# Prints out the source locations of panics in a Tock kernel ELF
#
# This tool attempts to trace all panic locations in a Tock kernel ELF by
# tracing calls to panic functions in the core library, using the debug information
# embedded in the ELF file. This tool requires an ELF which includes debug information.
# In its current state, cannot accurately provide the source locations
# corresponding to each panic, but tries to be honest about its confidence in
# each guess. In general, each guess is usually enough to locate the relevant panic.
# More creative analysis might be able to increase
# the accuracy with which this tool can identify source locations of panics. For now,
# this tool is useful for:
#
# - obtaining a rough count of the number of panics in a Tock kernel binary
#
# - finding and removing panics in a Tock kernel binary
#
# - roughly determining which components of a Tock kernel binary contain the most panic
# paths
#
# There are several assumptions built into this tool which may not always hold. For one,
# the list of panic_functions are assumed to not match any strings in the actual
# codebase, despite the fact they are incomplete function names and overlap is possible.
# I could solve this by using full names of these functions, but I am unsure how often
# the name mangling of these functions will change as the rust compiler changes so this
# approach felt potentially more stable.
#
# Several assumptions are made about DWARF locations that do not always hold, so source
# locations are not always accurate -- sometimes, the printed location just points to
# the function containing a panic, rather than the actual line on which the panic
# occurs. Some assumptions about which panics are in the core library and will be
# caught by grepping for other calls may also not always hold. The best way to inspect
# these is by manually inspecting the panics in the `within_core_panic_list`.
#
# This script stores panics which it cannot trace out of the core library in the
# `no_info_panic_list`. If this list contains some panics, that is a sign that some
# panics have not been identified. You can manually look at the addresses stored in
# this list, attempt to find the core library function which leads to these instrucitons
# being called, and then add those core library functions to the list of panic functions.
#
# The output of this script is *not* stable.
#
# Usage: find_panics.py ELF [--riscv]
#
# Requires Python 3.7+
#
# Author: Hudson Ayers <[email protected]>
import argparse
import platform
import re
import subprocess
import sys
if platform.system() == 'Darwin':
DWARFDUMP = "dwarfdump"
elif platform.system() == 'Linux':
DWARFDUMP = "llvm-dwarfdump"
else:
raise NotImplementedError("Unknown platform")
# Note: In practice, GCC objdumps are better at symbol resolution than LLVM objdump
ARM_OBJDUMP = "arm-none-eabi-objdump"
RISCV_OBJDUMP = "riscv64-unknown-elf-objdump"
# TODO: For all functions below the initial batch, it would like be preferable to
# automatically populate the list with additional functions in the core library using
# debug info. For now, however, I do this manually.
panic_functions = [
"expect_failed",
"unwrap_failed",
"panic_bounds_check",
"slice_index_order_fail",
"slice_end_index_len_fail",
"slice_start_index_len_fail",
"slice17len_mismatch_fail",
"str16slice_error_fail",
"copy_from_slice17len_mismatch_fail",
"copy_from_slice17",
"panicking5panic",
# below are functions I have manually traced up from the above, more "core" panics, on a riscv binary with a low inline threshold
"6unwrap17",
"6expect17",
"11copy_within17",
"core..fmt..builders..PadAdapter", # calls slice_error_fail
"11copy_within17", # calls panicking::panic
"write_char", # calls PadAdapter one above
"write_str", # calls write_char
"printable5check", # calls slice_index_order_fail
"char$u20$as$u20$core..fmt..Debug", # calls printable5check
"GenericRadix7fmt_int", # calls slice_start_index_len_fail
# below are functions I manually traced on an arm binary,
# with a somewhat higher inline threshold.
"10unwrap_err17h6",
"13is_whitespace17",
"$u20$core..slice..index..SliceIndex$LT",
"core..iter..adapters..filter..Filter$LT$I$C$P$GT$$u20$as$u20$core..iter",
"_ZN4core5slice5index74_$LT$impl$u20$core..ops..index..Index$LT$I$GT$$u20$for$u20$$u5b$T$u5d$$GT$5index17h4c77379bd26a525bE",
"_ZN4core5slice5index74_$LT$impl$u20$core..ops..index..Index$LT$I$GT$$u20$for$u20$$u5b$T$u5d$$GT$5index17hfe7e43aa2388c47bE",
]
# Pre-compiled regex lookups
dw_at_file_re = re.compile(r""".*(?:DW_AT_call_file|DW_AT_decl_file).*""")
dw_at_line_re = re.compile(r""".*(?:DW_AT_call_line|DW_AT_decl_line).*""")
line_info_re = re.compile(r""".*Line info.*""")
abstract_origin_re = re.compile(r""".*DW_AT_abstract_origin.*""")
dw_at_linkage_name_re = re.compile(r""".*DW_AT_linkage_name.*""")
dw_at_name_re = re.compile(r""".*DW_AT_name.*""")
def matches_panic_funcs(name):
"""If the passed name contains one of the known panic_functions,
return the match
"""
for func in panic_functions:
if func in name:
return func
return ""
def linkage_or_origin_all_parents(elf, addr, linkage=False):
"""Returns a list of the abstract origin or linkage of all parents of the dwarf
location for the passed address
"""
result = subprocess.run(
(DWARFDUMP, "--lookup=0x" + addr, "-p", elf), capture_output=True, text=True
)
dwarfdump = result.stdout
regex = abstract_origin_re
if linkage:
regex = dw_at_linkage_name_re
matches = re.findall(regex, dwarfdump)
def getFunction(line):
return line.strip().split('"')[1]
origins = list(map(getFunction, matches))
return origins
def any_origin_matches_panic_func(elf, addr):
"""returns name if any origin for the passed addr matches one
of the functions in the panic_functions array
"""
origins = linkage_or_origin_all_parents(elf, addr)
for origin in origins:
name = matches_panic_funcs(origin)
if name:
return name
return ""
def any_linkage_matches_panic_func(elf, addr):
"""returns True + name if any linkage for the passed addr matches one
of the functions in the panic_functions array
"""
linkages = linkage_or_origin_all_parents(elf, addr, True)
for linkage in linkages:
name = matches_panic_funcs(linkage)
if name:
return name
return ""
def check_for_source_in_parent(elf, addr):
"""Takes in a dwarfdump lookup including parents of the source DWARF
location, returns the first parent with a call file not in
the core library. If found, this often indicates the source of the panic
in the Tock source code.
"""
result = subprocess.run(
(DWARFDUMP, "--lookup=0x" + addr, "-p", elf), capture_output=True, text=True
)
dwarfdump = result.stdout
matches = re.findall(dw_at_file_re, dwarfdump)
def getFile(line):
return line.strip().split('"')[1]
source_files = list(map(getFile, matches))
for (i, f) in enumerate(source_files[::-1]):
if "/core/" not in f:
line_matches = re.findall(dw_at_line_re, dwarfdump)
def getLine(line):
return line.strip().split("(")[1].split(")")[0]
source_lines = list(map(getLine, line_matches))
source_line = source_lines[::-1][i]
return (f, source_line)
return ("", "")
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("ELF", help="ELF file for analysis")
parser.add_argument(
"--verbose",
"-v",
action="store_true",
help="Output additional DWARF info for each panic location in the binary",
)
parser.add_argument("--riscv", action="store_true", help="Use risc-v based objdump")
return parser.parse_args()
# Find all addresses that panic, and get basic dwarf info on those addresses
def find_all_panics(objdump, elf, is_riscv):
panic_list = []
within_core_panic_list = []
no_info_panic_list = []
result = subprocess.run((objdump, "-d", elf), capture_output=True, text=True)
objdump_out = result.stdout
for function in panic_functions:
function_re = re.compile(".*:.*#.*" + function + ".*")
if not is_riscv:
# Arm-none-eabi-objdump uses ';' for comments instead of '#'
function_re = re.compile(".*:.*<.*" + function + ".*")
# TODO: arm elfs include loads of offsets from symbols in such a way that these lines
# are matched by this regex. In general, these loads occur within the instruction stream
# associated with the symbol at hand, and will usually be excluded by logic later in
# this function. This leads to `within_core_panic_list` and `no_info_panic_list`
# containing more "panics" than when analyzing a risc-v binary. We could fix this
# by matching *only* on functions with instructions that actually jump to a new symbol,
# but this would require a list of such instructions for each architecture. However
# as written it actually lets us identify panics which are jumped to via addresses
# stored in registers, which may actually catch additional valid panics.
matches = re.findall(function_re, objdump_out)
def getAddr(line):
return line.strip().split(":")[0]
addrs = list(map(getAddr, matches))
for addr in addrs:
result = subprocess.run(
(DWARFDUMP, "--lookup=0x" + addr, elf), capture_output=True, text=True
)
dwarfdump = result.stdout
dw_at_file = re.search(dw_at_file_re, dwarfdump)
dw_at_line = re.search(dw_at_line_re, dwarfdump)
line_info = re.search(line_info_re, dwarfdump)
abstract_origin = re.search(abstract_origin_re, dwarfdump)
linkage_name = re.search(dw_at_linkage_name_re, dwarfdump)
file_string = ""
line_string = ""
line_info_string = ""
abstract_origin_string = ""
linkage_name_string = ""
if dw_at_file:
file_string = dw_at_file.group(0).strip()
line_string = dw_at_line.group(0).strip()
panicinfo = {}
panicinfo["addr"] = addr
panicinfo["function"] = function
if line_info:
line_info_string = line_info.group(0).strip()
panicinfo["line_info"] = line_info_string
if abstract_origin:
abstract_origin_string = abstract_origin.group(0).strip()
if linkage_name:
linkage_name_string = linkage_name.group(0).strip()
if "DW_AT_call_file" in file_string and "DW_AT_decl_file" in file_string:
raise RuntimeError("I misunderstand DWARF")
if "DW_AT_call_file" in file_string or "DW_AT_decl_file" in file_string:
filename = file_string.split('"')[1]
line_num = line_string.split("(")[1].split(")")[0]
if "DW_AT_call_file" in file_string:
panicinfo["call_file"] = filename
panicinfo["call_line"] = line_num
if "DW_AT_decl_file" in file_string:
panicinfo["decl_file"] = filename
panicinfo["decl_line"] = line_num
if not "/core/" in filename:
if not "closure" in abstract_origin_string:
panicinfo["best_guess_source"] = "call/decl"
else:
panicinfo["best_guess_source"] = "call-closure-line-info"
panic_list.append(panicinfo)
continue
else: # 'core' in filename
(parent_file, parent_line) = check_for_source_in_parent(elf, addr)
if parent_file:
panicinfo["parent_call_file"] = parent_file
panicinfo["parent_call_line"] = parent_line
panicinfo["best_guess_source"] = "parent"
panic_list.append(panicinfo)
continue
elif not abstract_origin and not linkage_name:
no_info_panic_list.append(panicinfo)
continue
elif abstract_origin:
if "core" in abstract_origin_string:
name = matches_panic_funcs(abstract_origin_string)
if name:
within_core_panic_list.append(panicinfo)
continue
else:
name2 = any_origin_matches_panic_func(elf, addr)
name3 = any_linkage_matches_panic_func(elf, addr)
if name2:
within_core_panic_list.append(panicinfo)
continue
elif name3:
within_core_panic_list.append(panicinfo)
continue
else:
no_info_panic_list.append(panicinfo)
continue
elif "closure" in abstract_origin_string:
# not in core, in closure, line info is probably sufficient
panicinfo["best_guess_source"] = "lineinfo"
panic_list.append(panicinfo)
continue
else:
# i have not seen this happen -- core in file, not closure, origin not core
raise RuntimeError("Unhandled")
if linkage_name:
name = matches_panic_funcs(linkage_name_string)
if name:
within_core_panic_list.append(panicinfo)
continue
else:
no_info_panic_list.append(panicinfo)
print(
"Failed to match panic but we probably have enough info to trace it up. Linkage name: {}, addr: {}".format(
linkage_name_string, addr
)
)
continue
no_info_panic_list.append(panic_info)
print("did not find source for panic: {}".format(addr))
continue
elif abstract_origin:
origin = abstract_origin_string.split('"')[1]
panicinfo["abstract_origin"] = origin
if "core" in origin:
if matches_panic_funcs(origin):
within_core_panic_list.append(panicinfo)
continue
no_info_panic_list.append(panicinfo)
print(
"Probably could add this origin or one of its parents to the panic function list: {}".format(
abstract_origin_string
)
)
continue
else:
panicinfo["best_guess_source"] = "abstract_origin + line"
panic_list.append(panicinfo)
continue
else:
# This gets hit for OUTLINED_FUNCTION_XX a bunch on ARM
try:
dw_at_name_string = re.findall(dw_at_name_re, dwarfdump)[
-1
].strip() # see multiple matches for this string sometimes
function_name = dw_at_name_string.split('"')[1]
if "OUTLINED_FUNCTION_" in function_name:
# This is a common pattern where panicing paths are repeated in many
# places throughout the binary, and LLVMs optimizer outlines the repeated code.
# Let's add these to the list of panicing functions, dynamically so this is resilient to
# changes in the binary.
if function_name not in panic_functions:
# don't double insert
panic_functions.append(
function_name + ">"
) # so FUNCTION_22 does not catch FUNCTION_222
within_core_panic_list.append(panicinfo)
continue
no_info_panic_list.append(panicinfo)
continue
except:
# There seem to be a places where lookup fails completely
# Not easy to recover, log these and continue on.
no_info_panic_list.append(panicinfo)
continue
raise RuntimeError("BUG: Should not reach here")
return (panic_list, within_core_panic_list, no_info_panic_list)
def pretty_print(panicinfo):
if panicinfo["best_guess_source"] == "call/decl":
try:
print(
"\t{} -- {}:{}".format(
panicinfo["addr"], panicinfo["call_file"], panicinfo["call_line"]
)
)
except:
print(
"\t{} -- in function starting at {}:{}".format(
panicinfo["addr"], panicinfo["decl_file"], panicinfo["decl_line"]
)
)
elif panicinfo["best_guess_source"] == "parent":
print(
"\t{} -- at or in function starting at {}:{}".format(
panicinfo["addr"],
panicinfo["parent_call_file"],
panicinfo["parent_call_line"],
)
)
elif panicinfo["best_guess_source"] == "lineinfo":
print(
"\t{} -- in closure, try: {}".format(
panicinfo["addr"], panicinfo["line_info"]
)
)
elif panicinfo["best_guess_source"] == "abstract_origin + line":
print(
"\t{} -- line_info: {} from origin :{}".format(
panicinfo["addr"], panicinfo["line_info"], panicinfo["abstract_origin"]
)
)
elif panicinfo["best_guess_source"] == "call-closure-line-info":
print(
"\t{} -- in closure starting on line_info: {}".format(
panicinfo["addr"], panicinfo["line_info"]
)
)
else:
raise RuntimeError("Missing best guess source: {}".format(panicinfo))
def main():
args = parse_args()
if sys.version_info.minor < 7:
print("This tool requires Python 3.7+")
return -1
print("Tock panic report for " + args.ELF)
objdump = ARM_OBJDUMP
if args.riscv:
objdump = RISCV_OBJDUMP
(panic_list, within_core_panic_list, no_info_panic_list) = find_all_panics(
objdump, args.ELF, args.riscv
)
print("num_panics: {}".format(len(panic_list)))
buckets_list = {}
for f in panic_functions:
buckets_list[f] = []
for panic in panic_list:
buckets_list[panic["function"]].append(panic)
for f, l in buckets_list.items():
if len(l) > 0:
print("{}: {}".format(f, len(l)))
for p in l:
pretty_print(p)
if args.verbose:
print(p)
print()
print("num panics in core ignored: {}".format(len(within_core_panic_list)))
print("num panics for which no info available: {}".format(len(no_info_panic_list)))
if args.verbose:
print(
"If more debug info is needed, run dwarfdump directly on the address in question."
)
if __name__ == "__main__":
main()
| [
7,
10,
11,
12,
13
] |
1,337 | 9b8db3407313a3e39d429b7c10897fc447fcdc27 | <mask token>
class Solution(object):
<mask token>
| <mask token>
class Solution(object):
def exist(self, board, word):
"""
:type board: List[List[str]]
:type word: str
:rtype: bool
"""
def dfs(i, j, word, visited=set()):
if not word:
return True
for ni, nj in ((i + 1, j), (i - 1, j), (i, j + 1), (i, j - 1)):
if 0 <= ni < m and 0 <= nj < n and (ni, nj) not in visited:
if board[ni][nj] == word[0]:
if dfs(ni, nj, word[1:], visited | {(ni, nj)}):
return True
return False
m, n = len(board), len(board[0])
for i in range(m):
for j in range(n):
if board[i][j] == word[0]:
if dfs(i, j, word[1:], set([(i, j)])):
return True
return False
| class Solution(object):
<mask token>
class Solution(object):
def exist(self, board, word):
"""
:type board: List[List[str]]
:type word: str
:rtype: bool
"""
def dfs(i, j, word, visited=set()):
if not word:
return True
for ni, nj in ((i + 1, j), (i - 1, j), (i, j + 1), (i, j - 1)):
if 0 <= ni < m and 0 <= nj < n and (ni, nj) not in visited:
if board[ni][nj] == word[0]:
if dfs(ni, nj, word[1:], visited | {(ni, nj)}):
return True
return False
m, n = len(board), len(board[0])
for i in range(m):
for j in range(n):
if board[i][j] == word[0]:
if dfs(i, j, word[1:], set([(i, j)])):
return True
return False
| class Solution(object):
def exist(self, board, word):
"""
:type board: List[List[str]]
:type word: str
:rtype: bool
"""
if not board or not board[0]:
return not word
self.length = len(word)
def hasPathCore(row, col, depth=0):
if self.length == depth:
return True
hasPath = False
if 0 <= row and row < len(board) and 0 <= col and col < len(board
[0]) and board[row][col] == word[depth] and not visited[row][
col]:
visited[row][col] = True
up = hasPathCore(row - 1, col, depth + 1)
down = hasPathCore(row + 1, col, depth + 1)
left = hasPathCore(row, col - 1, depth + 1)
right = hasPathCore(row, col + 1, depth + 1)
hasPath = up or down or left or right
if not hasPath:
visited[row][col] = False
return hasPath
visited = [([False] * len(board[0])) for _ in range(len(board))]
for i in range(len(board)):
for j in range(len(board[0])):
if hasPathCore(i, j, 0):
return True
return False
class Solution(object):
def exist(self, board, word):
"""
:type board: List[List[str]]
:type word: str
:rtype: bool
"""
def dfs(i, j, word, visited=set()):
if not word:
return True
for ni, nj in ((i + 1, j), (i - 1, j), (i, j + 1), (i, j - 1)):
if 0 <= ni < m and 0 <= nj < n and (ni, nj) not in visited:
if board[ni][nj] == word[0]:
if dfs(ni, nj, word[1:], visited | {(ni, nj)}):
return True
return False
m, n = len(board), len(board[0])
for i in range(m):
for j in range(n):
if board[i][j] == word[0]:
if dfs(i, j, word[1:], set([(i, j)])):
return True
return False
| class Solution(object):
def exist(self, board, word):
"""
:type board: List[List[str]]
:type word: str
:rtype: bool
"""
if not board or not board[0]: return not word
self.length = len(word)
def hasPathCore(row, col, depth=0):
if self.length == depth:
return True
hasPath = False
if 0 <= row and row < len(board) and \
0 <= col and col < len(board[0]) and \
board[row][col] == word[depth] and \
not visited[row][col]:
visited[row][col] = True
up = hasPathCore(row - 1, col, depth + 1)
down = hasPathCore(row + 1, col, depth + 1)
left = hasPathCore(row, col - 1, depth + 1)
right = hasPathCore(row, col + 1, depth + 1)
hasPath = up or down or left or right
if not hasPath:
visited[row][col] = False
return hasPath
visited = [[False] * len(board[0]) for _ in range(len(board))]
for i in range(len(board)):
for j in range(len(board[0])):
if hasPathCore(i, j, 0): return True
return False
# python, dfs解法
class Solution(object):
def exist(self, board, word):
"""
:type board: List[List[str]]
:type word: str
:rtype: bool
"""
def dfs(i, j, word, visited=set()):
# Base case
if not word:
return True
for ni, nj in ((i + 1, j), (i - 1, j), (i, j + 1), (i, j - 1)):
# 搜索相邻的,且没有被访问过的位置
if 0 <= ni < m and 0 <= nj < n and (ni, nj) not in visited:
# 这个位置字符和word开头对上了
if board[ni][nj] == word[0]:
# 在下一层中,找到了一个成功的方向,即刻返回true
if dfs(ni, nj, word[1:], visited | {(ni, nj)}):
return True
return False
m, n = len(board), len(board[0])
for i in range(m):
for j in range(n):
# 开头对上了,进入下一层寻找
if board[i][j] == word[0]:
# 剩下的依然匹配,则返回true
if dfs(i, j, word[1:], set([(i, j)])):
return True
return False
| [
1,
2,
3,
4,
5
] |
1,338 | 01ede703e36268dc9b3331b21726c24674a43817 | <mask token>
| <mask token>
for i in range(0, 10):
lista.append(int(input()))
while z < j:
c = lista[z]
lista[z] = lista[j]
lista[j] = c
z += 1
j -= 1
print(lista)
| lista = []
z = 0
j = 9
for i in range(0, 10):
lista.append(int(input()))
while z < j:
c = lista[z]
lista[z] = lista[j]
lista[j] = c
z += 1
j -= 1
print(lista)
| null | null | [
0,
1,
2
] |
1,339 | 2c960685eaa14861c1c5b3ddb38b366a3e0e8e86 | #!/usr/bin/evn python
#-*-coding:utf8 -*-
import os, sys, json
class settings(object):
filename = ''
config = {}
def __init__(self):
self.DEBUG = os.environ.get('RdsMonitor_DEBUG', 0)
def get_settings(self):
"""Parses the settings from redis-live.conf.
"""
# TODO: Consider YAML. Human writable, machine readable.
with open(self.filename) as fp:
try:
return json.load(fp)
except Exception, e:
if self.DEBUG:
print >>sys.stderr, 'get_settings exception:', e
return {}
def get_redis_servers(self):
if self.DEBUG:
print >>sys.stderr, "get_redis_servers config:%s"%self.config
return self.config.get("RedisServers", '')
def get_redis_stats_server(self):
if self.DEBUG:
print >>sys.stderr, "get_redis_stats_server config:%s"%self.config
return self.config.get("RedisStatsServer", '')
def get_data_store_type(self):
if self.DEBUG:
print >>sys.stderr, "get_redis_stats_server config:%s"%self.config
return self.config.get("DataStoreType", '')
def get_sqlite_stats_store(self):
if self.DEBUG:
print >>sys.stderr, "get_redis_stats_server config:%s"%self.config
return self.config.get("SqliteStatsStore", '') | null | null | null | null | [
0
] |
1,340 | 5b33615e1890631bac68801310e4b606ac41cb13 | <mask token>
class TestTimeDehydration(_TestTemporalDehydrationV1):
@pytest.fixture
def hydration_handler(self):
return HydrationHandler()
<mask token>
<mask token>
def test_pandas_date_time_fixed_offset(self, assert_transforms):
dt = pd.Timestamp('2018-10-12T11:37:41.474716862+0100')
assert_transforms(dt, Structure(b'I', 1539340661, 474716862, 3600))
def test_date_time_fixed_negative_offset(self, assert_transforms):
dt = DateTime(2018, 10, 12, 11, 37, 41, 474716862, pytz.FixedOffset
(-60))
assert_transforms(dt, Structure(b'I', 1539347861, 474716862, -3600))
<mask token>
<mask token>
<mask token>
def test_native_date_time_zone_id(self, assert_transforms):
dt = datetime.datetime(2018, 10, 12, 11, 37, 41, 474716)
dt = pytz.timezone('Europe/Stockholm').localize(dt)
assert_transforms(dt, Structure(b'i', 1539337061, 474716000,
'Europe/Stockholm'))
@pytest.mark.parametrize(('dt', 'fields'), ((pd.Timestamp(
'2018-10-12T11:37:41.474716862+0200', tz='Europe/Stockholm'), (
1539337061, 474716862, 'Europe/Stockholm')), (pd.Timestamp((1032 *
24 + 2) * 3600 * 1000000000 + 1001000001, tz='Europe/London'), ((
1032 * 24 + 2) * 3600 + 1, 1000001, 'Europe/London')), (pd.
Timestamp((1032 * 24 + 1) * 3600 * 1000000000 + 1001000001, tz=
'Europe/London'), ((1032 * 24 + 1) * 3600 + 1, 1000001,
'Europe/London'))))
def test_pandas_date_time_zone_id(self, dt, fields, assert_transforms):
assert_transforms(dt, Structure(b'i', *fields))
| <mask token>
class TestTimeDehydration(_TestTemporalDehydrationV1):
@pytest.fixture
def hydration_handler(self):
return HydrationHandler()
<mask token>
<mask token>
def test_pandas_date_time_fixed_offset(self, assert_transforms):
dt = pd.Timestamp('2018-10-12T11:37:41.474716862+0100')
assert_transforms(dt, Structure(b'I', 1539340661, 474716862, 3600))
def test_date_time_fixed_negative_offset(self, assert_transforms):
dt = DateTime(2018, 10, 12, 11, 37, 41, 474716862, pytz.FixedOffset
(-60))
assert_transforms(dt, Structure(b'I', 1539347861, 474716862, -3600))
def test_native_date_time_fixed_negative_offset(self, assert_transforms):
dt = datetime.datetime(2018, 10, 12, 11, 37, 41, 474716, pytz.
FixedOffset(-60))
assert_transforms(dt, Structure(b'I', 1539347861, 474716000, -3600))
<mask token>
<mask token>
def test_native_date_time_zone_id(self, assert_transforms):
dt = datetime.datetime(2018, 10, 12, 11, 37, 41, 474716)
dt = pytz.timezone('Europe/Stockholm').localize(dt)
assert_transforms(dt, Structure(b'i', 1539337061, 474716000,
'Europe/Stockholm'))
@pytest.mark.parametrize(('dt', 'fields'), ((pd.Timestamp(
'2018-10-12T11:37:41.474716862+0200', tz='Europe/Stockholm'), (
1539337061, 474716862, 'Europe/Stockholm')), (pd.Timestamp((1032 *
24 + 2) * 3600 * 1000000000 + 1001000001, tz='Europe/London'), ((
1032 * 24 + 2) * 3600 + 1, 1000001, 'Europe/London')), (pd.
Timestamp((1032 * 24 + 1) * 3600 * 1000000000 + 1001000001, tz=
'Europe/London'), ((1032 * 24 + 1) * 3600 + 1, 1000001,
'Europe/London'))))
def test_pandas_date_time_zone_id(self, dt, fields, assert_transforms):
assert_transforms(dt, Structure(b'i', *fields))
| <mask token>
class TestTimeDehydration(_TestTemporalDehydrationV1):
@pytest.fixture
def hydration_handler(self):
return HydrationHandler()
<mask token>
def test_native_date_time_fixed_offset(self, assert_transforms):
dt = datetime.datetime(2018, 10, 12, 11, 37, 41, 474716, pytz.
FixedOffset(60))
assert_transforms(dt, Structure(b'I', 1539340661, 474716000, 3600))
def test_pandas_date_time_fixed_offset(self, assert_transforms):
dt = pd.Timestamp('2018-10-12T11:37:41.474716862+0100')
assert_transforms(dt, Structure(b'I', 1539340661, 474716862, 3600))
def test_date_time_fixed_negative_offset(self, assert_transforms):
dt = DateTime(2018, 10, 12, 11, 37, 41, 474716862, pytz.FixedOffset
(-60))
assert_transforms(dt, Structure(b'I', 1539347861, 474716862, -3600))
def test_native_date_time_fixed_negative_offset(self, assert_transforms):
dt = datetime.datetime(2018, 10, 12, 11, 37, 41, 474716, pytz.
FixedOffset(-60))
assert_transforms(dt, Structure(b'I', 1539347861, 474716000, -3600))
<mask token>
<mask token>
def test_native_date_time_zone_id(self, assert_transforms):
dt = datetime.datetime(2018, 10, 12, 11, 37, 41, 474716)
dt = pytz.timezone('Europe/Stockholm').localize(dt)
assert_transforms(dt, Structure(b'i', 1539337061, 474716000,
'Europe/Stockholm'))
@pytest.mark.parametrize(('dt', 'fields'), ((pd.Timestamp(
'2018-10-12T11:37:41.474716862+0200', tz='Europe/Stockholm'), (
1539337061, 474716862, 'Europe/Stockholm')), (pd.Timestamp((1032 *
24 + 2) * 3600 * 1000000000 + 1001000001, tz='Europe/London'), ((
1032 * 24 + 2) * 3600 + 1, 1000001, 'Europe/London')), (pd.
Timestamp((1032 * 24 + 1) * 3600 * 1000000000 + 1001000001, tz=
'Europe/London'), ((1032 * 24 + 1) * 3600 + 1, 1000001,
'Europe/London'))))
def test_pandas_date_time_zone_id(self, dt, fields, assert_transforms):
assert_transforms(dt, Structure(b'i', *fields))
| <mask token>
class TestTimeDehydration(_TestTemporalDehydrationV1):
@pytest.fixture
def hydration_handler(self):
return HydrationHandler()
def test_date_time_fixed_offset(self, assert_transforms):
dt = DateTime(2018, 10, 12, 11, 37, 41, 474716862, pytz.FixedOffset(60)
)
assert_transforms(dt, Structure(b'I', 1539340661, 474716862, 3600))
def test_native_date_time_fixed_offset(self, assert_transforms):
dt = datetime.datetime(2018, 10, 12, 11, 37, 41, 474716, pytz.
FixedOffset(60))
assert_transforms(dt, Structure(b'I', 1539340661, 474716000, 3600))
def test_pandas_date_time_fixed_offset(self, assert_transforms):
dt = pd.Timestamp('2018-10-12T11:37:41.474716862+0100')
assert_transforms(dt, Structure(b'I', 1539340661, 474716862, 3600))
def test_date_time_fixed_negative_offset(self, assert_transforms):
dt = DateTime(2018, 10, 12, 11, 37, 41, 474716862, pytz.FixedOffset
(-60))
assert_transforms(dt, Structure(b'I', 1539347861, 474716862, -3600))
def test_native_date_time_fixed_negative_offset(self, assert_transforms):
dt = datetime.datetime(2018, 10, 12, 11, 37, 41, 474716, pytz.
FixedOffset(-60))
assert_transforms(dt, Structure(b'I', 1539347861, 474716000, -3600))
def test_pandas_date_time_fixed_negative_offset(self, assert_transforms):
dt = pd.Timestamp('2018-10-12T11:37:41.474716862-0100')
assert_transforms(dt, Structure(b'I', 1539347861, 474716862, -3600))
def test_date_time_zone_id(self, assert_transforms):
dt = DateTime(2018, 10, 12, 11, 37, 41, 474716862)
dt = pytz.timezone('Europe/Stockholm').localize(dt)
assert_transforms(dt, Structure(b'i', 1539337061, 474716862,
'Europe/Stockholm'))
def test_native_date_time_zone_id(self, assert_transforms):
dt = datetime.datetime(2018, 10, 12, 11, 37, 41, 474716)
dt = pytz.timezone('Europe/Stockholm').localize(dt)
assert_transforms(dt, Structure(b'i', 1539337061, 474716000,
'Europe/Stockholm'))
@pytest.mark.parametrize(('dt', 'fields'), ((pd.Timestamp(
'2018-10-12T11:37:41.474716862+0200', tz='Europe/Stockholm'), (
1539337061, 474716862, 'Europe/Stockholm')), (pd.Timestamp((1032 *
24 + 2) * 3600 * 1000000000 + 1001000001, tz='Europe/London'), ((
1032 * 24 + 2) * 3600 + 1, 1000001, 'Europe/London')), (pd.
Timestamp((1032 * 24 + 1) * 3600 * 1000000000 + 1001000001, tz=
'Europe/London'), ((1032 * 24 + 1) * 3600 + 1, 1000001,
'Europe/London'))))
def test_pandas_date_time_zone_id(self, dt, fields, assert_transforms):
assert_transforms(dt, Structure(b'i', *fields))
| # Copyright (c) "Neo4j"
# Neo4j Sweden AB [https://neo4j.com]
#
# This file is part of Neo4j.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
import pandas as pd
import pytest
import pytz
from neo4j._codec.hydration.v2 import HydrationHandler
from neo4j._codec.packstream import Structure
from neo4j.time import DateTime
from ..v1.test_temporal_dehydration import (
TestTimeDehydration as _TestTemporalDehydrationV1,
)
class TestTimeDehydration(_TestTemporalDehydrationV1):
@pytest.fixture
def hydration_handler(self):
return HydrationHandler()
def test_date_time_fixed_offset(self, assert_transforms):
dt = DateTime(2018, 10, 12, 11, 37, 41, 474716862,
pytz.FixedOffset(60))
assert_transforms(
dt,
Structure(b"I", 1539340661, 474716862, 3600)
)
def test_native_date_time_fixed_offset(self, assert_transforms):
dt = datetime.datetime(2018, 10, 12, 11, 37, 41, 474716,
pytz.FixedOffset(60))
assert_transforms(
dt,
Structure(b"I", 1539340661, 474716000, 3600)
)
def test_pandas_date_time_fixed_offset(self, assert_transforms):
dt = pd.Timestamp("2018-10-12T11:37:41.474716862+0100")
assert_transforms(dt, Structure(b"I", 1539340661, 474716862, 3600))
def test_date_time_fixed_negative_offset(self, assert_transforms):
dt = DateTime(2018, 10, 12, 11, 37, 41, 474716862,
pytz.FixedOffset(-60))
assert_transforms(
dt,
Structure(b"I", 1539347861, 474716862, -3600)
)
def test_native_date_time_fixed_negative_offset(self, assert_transforms):
dt = datetime.datetime(2018, 10, 12, 11, 37, 41, 474716,
pytz.FixedOffset(-60))
assert_transforms(
dt,
Structure(b"I", 1539347861, 474716000, -3600)
)
def test_pandas_date_time_fixed_negative_offset(self, assert_transforms):
dt = pd.Timestamp("2018-10-12T11:37:41.474716862-0100")
assert_transforms(dt, Structure(b"I", 1539347861, 474716862, -3600))
def test_date_time_zone_id(self, assert_transforms):
dt = DateTime(2018, 10, 12, 11, 37, 41, 474716862)
dt = pytz.timezone("Europe/Stockholm").localize(dt)
# offset should be UTC+2 (7200 seconds)
assert_transforms(
dt,
Structure(b"i", 1539337061, 474716862, "Europe/Stockholm")
)
def test_native_date_time_zone_id(self, assert_transforms):
dt = datetime.datetime(2018, 10, 12, 11, 37, 41, 474716)
dt = pytz.timezone("Europe/Stockholm").localize(dt)
# offset should be UTC+2 (7200 seconds)
assert_transforms(
dt,
Structure(b"i", 1539337061, 474716000, "Europe/Stockholm")
)
@pytest.mark.parametrize(("dt", "fields"), (
(
pd.Timestamp("2018-10-12T11:37:41.474716862+0200",
tz="Europe/Stockholm"),
(1539337061, 474716862, "Europe/Stockholm"),
),
(
# 1972-10-29 02:00:01.001000001+0100 pre DST change
pd.Timestamp((1032 * 24 + 2) * 3600 * 1000000000 + 1001000001,
tz="Europe/London"),
((1032 * 24 + 2) * 3600 + 1, 1000001, "Europe/London"),
),
(
# 1972-10-29 02:00:01.001000001+0000 post DST change
pd.Timestamp((1032 * 24 + 1) * 3600 * 1000000000 + 1001000001,
tz="Europe/London"),
((1032 * 24 + 1) * 3600 + 1, 1000001, "Europe/London"),
)
))
def test_pandas_date_time_zone_id(self, dt, fields, assert_transforms):
assert_transforms(dt, Structure(b"i", *fields))
| [
6,
7,
8,
11,
13
] |
1,341 | 33867677611ceb757f6973eb70368c9f75f3ce92 | # system
import os
import numpy as np
import random
import copy
import time
# ROS
import rospy
import std_msgs.msg
import sensor_msgs.msg
import geometry_msgs.msg
import visualization_msgs.msg
import tf2_ros
import rosbag
import actionlib
from actionlib_msgs.msg import GoalStatus
import ros_numpy
# spartan ROS
import spartan_grasp_msgs.msg
import spartan_grasp_msgs.srv
import pdc_ros_msgs.msg
import fusion_server.msg
import fusion_server.srv
# spartan
import spartan.utils.utils as spartanUtils
import spartan.utils.ros_utils as rosUtils
import spartan.utils.director_utils as director_utils
import spartan.utils.control_utils as control_utils
from spartan.manipulation.schunk_driver import SchunkDriver
import fusion_server
from fusion_server.srv import *
import spartan.manipulation.gripper
from spartan.poser.poser_visualizer import PoserVisualizer
from spartan.manipulation.grasp_data import GraspData
from spartan.manipulation.object_manipulation import ObjectManipulation
from spartan.manipulation.category_manipulation_type import CategoryManipulationType
from spartan.utils.director_ros_visualizer import DirectorROSVisualizer
# director
from director import transformUtils
from director import visualization as vis
import director.objectmodel as om
import director.vtkNumpy as vnp
from director.debugVis import DebugData
import director.vtkAll as vtk
import director.segmentation as segmentation
import director.filterUtils as filterUtils
USING_DIRECTOR = True
if USING_DIRECTOR:
from spartan.utils.taskrunner import TaskRunner
MUG_RACK_CONFIG_FILE = os.path.join(spartanUtils.getSpartanSourceDir(), "src/catkin_projects/station_config/RLG_iiwa_1/manipulation/mug_rack.yaml")
# IF true limits you to this speed
DEBUG_SPEED = 20 # degrees per second
USE_DEBUG_SPEED = False
MANIP_TYPE = CategoryManipulationType.SHOE_ON_RACK
# MANIP_TYPE = CategoryManipulationType.MUG_ON_SHELF_3D
EXPERIMENT_MODE = True
class GraspSupervisorState(object):
STATUS_LIST = ["ABOVE_TABLE", "PRE_GRASP", "GRASP", "IK_FAILED", "NO_GRASP_FOUND", "GRASP_FOUND", "OBJECT_IN_GRIPPER", "GRASP_FAILED", "SAFETY_CHECK_FAILED", "PLANNING_FAILED", "FAILED"]
def __init__(self):
self.setPickFront()
self.clear()
def setPickFront(self):
self.graspingLocation = "front"
self.stowLocation = "left"
def setPickLeft(self):
self.graspingLocation = "left"
self.stowLocation = "front"
@property
def grasp_data(self):
return self._grasp_data
@grasp_data.setter
def grasp_data(self, value):
"""
:param value: GraspData
:return:
"""
self._grasp_data = value
@property
def cache(self):
return self._cache
def clear(self):
"""
Clear any stateful elements of the state
:return:
"""
self._grasp_data = None
self._status = None
self._cache = dict()
self._trajectory_result = None
def clear_cache(self):
"""
Clears only the cache
:return:
"""
self._cache = dict()
def set_status(self, status):
assert status in GraspSupervisorState.STATUS_LIST
self._status = status
@property
def status(self):
return self._status
@status.setter
def status(self, status):
assert status in GraspSupervisorState.STATUS_LIST
self._status = status
def set_status_ik_failed(self):
self.status = "IK_FAILED"
def print_status(self):
"""
Prints the status
:return:
"""
if self._status is None:
print "Current Status: None"
else:
print "Current Status: " + self._status
class GraspSupervisor(object):
def __init__(self, graspingParamsFile=None, cameraSerialNumber="carmine_1", tfBuffer=None):
self.graspingParamsFile = graspingParamsFile
self.reloadParams()
self.cameraSerialNumber = cameraSerialNumber
self.cameraName = 'camera_' + str(cameraSerialNumber)
self.pointCloudTopic = '/' + str(self.cameraName) + '/depth/points'
self.rgbImageTopic = '/' + str(self.cameraName) + '/rgb/image_rect_color'
self.depthImageTopic = '/' + str(self.cameraName) + '/depth_registered/sw_registered/image_rect'
self.camera_info_topic = '/' + str(self.cameraName) + '/rgb/camera_info'
self.graspFrameName = 'base'
self.ggcnn_grasp_frame_camera_axes_id = "ggcnn_grasp"
self.depthOpticalFrameName = self.cameraName + "_depth_optical_frame"
self.rgbOpticalFrameName = self.cameraName + "_rgb_optical_frame"
self.state = GraspSupervisorState()
self.robotService = rosUtils.RobotService.makeKukaRobotService()
self.robotService._use_debug_speed = USE_DEBUG_SPEED
self.robotService._debug_speed = DEBUG_SPEED
self.usingDirector = True
self.tfBuffer = tfBuffer # don't create a new one if it is passed in
self.setupConfig()
self._grasp_point = None # stores the grasp point to be used in grasp3DLocation
self._cache = dict()
self._gripper = spartan.manipulation.gripper.Gripper.make_schunk_gripper()
self._poser_visualizer = PoserVisualizer.make_default()
self.poser_result = None
self._object_manipulation = None
self._category_manip = None # can be assigned later as needed
self._shoe_manipulation_counter = 0
filename = os.path.join(os.path.join(spartanUtils.getSpartanSourceDir(), 'src/catkin_projects/station_config/RLG_iiwa_1/stored_poses.yaml'))
self._stored_poses_director = spartanUtils.getDictFromYamlFilename(filename)
if USING_DIRECTOR:
self.taskRunner = TaskRunner()
self.taskRunner.callOnThread(self.setup)
else:
self.setup()
self.debugMode = False
if self.debugMode:
print "\n\n----------WARNING GRASP SUPERVISOR IN DEBUG MODE----------\n"
# if self.debugMode:
# self.pointCloudListMsg = GraspSupervisor.getDefaultPointCloudListMsg()
def reloadParams(self):
self.graspingParams = spartanUtils.getDictFromYamlFilename(self.graspingParamsFile)
def setup(self):
self.setupSubscribers()
self.setupPublishers()
self.setupTF()
self.setupROSActions()
self.gripperDriver = SchunkDriver()
self.setup_visualization()
def _clear_cache(self):
"""
Clears our local cache of variables
:return:
"""
self._cache = dict()
def setupDirector(self):
self.taskRunner.callOnThread(self.setup)
def setupConfig(self):
self.config = dict()
self.config['base_frame_id'] = "base"
self.config['end_effector_frame_id'] = "iiwa_link_ee"
self.config['pick_up_distance'] = 0.25 # distance to move above the table after grabbing the object
self.config["sleep_time_for_sensor_collect"] = 0.1
self.config['scan'] = dict()
self.config['scan']['pose_list'] = ['scan_left_close', 'scan_above_table', 'scan_right']
self.config['scan']['joint_speed'] = 45
self.config['grasp_speed'] = 20
normal_speed = 30
self.config['speed'] = dict()
self.config['speed']['stow'] = normal_speed
self.config['speed']['pre_grasp'] = normal_speed
self.config['speed']['grasp'] = 10
self.config['home_pose_name'] = 'above_table_pre_grasp'
self.config['grasp_nominal_direction'] = np.array([1, 0, 0]) # x forwards
self.config['grasp_to_ee'] = dict()
self.config["object_interaction"] = dict()
self.config["object_interaction"]["speed"] = 10
self.config["object_interaction"]["rotate_speed"] = 30
self.config["object_interaction"]["pickup_distance"] = 0.15
# self.config["object_interaction"]["drop_distance_above_grasp"] = 0.035 # good for shoes
self.config["object_interaction"]["drop_distance_above_grasp"] = 0.002 # good for mugs
self.config["object_interaction"]["drop_location"] = [0.65, 0, 0.5] # z coordinate is overwritten later
self.graspToIiwaLinkEE = spartanUtils.transformFromPose(
self.graspingParams['gripper_palm_to_ee'])
self.iiwaLinkEEToGraspFrame = self.graspToIiwaLinkEE.GetLinearInverse()
self.gripper_fingertip_to_iiwa_link_ee = spartanUtils.transformFromPose(
self.graspingParams['gripper_fingertip_to_ee'])
self.T_gripper_fingertip__iiwa_link_ee = self.gripper_fingertip_to_iiwa_link_ee.GetLinearInverse()
pos = [-0.15, 0, 0]
quat = [1, 0, 0, 0]
self.preGraspToGraspTransform = transformUtils.transformFromPose(pos, quat)
def setupSubscribers(self):
self.pointCloudSubscriber = rosUtils.SimpleSubscriber(self.pointCloudTopic, sensor_msgs.msg.PointCloud2)
self.rgbImageSubscriber = rosUtils.SimpleSubscriber(self.rgbImageTopic, sensor_msgs.msg.Image)
self.depthImageSubscriber = rosUtils.SimpleSubscriber(self.depthImageTopic, sensor_msgs.msg.Image)
self.camera_info_subscriber = rosUtils.SimpleSubscriber(self.camera_info_topic, sensor_msgs.msg.CameraInfo)
self.pointCloudSubscriber.start()
self.rgbImageSubscriber.start()
self.depthImageSubscriber.start()
self.camera_info_subscriber.start()
self.clicked_point_subscriber = rosUtils.SimpleSubscriber("/clicked_point", geometry_msgs.msg.PointStamped,
self.on_clicked_point)
self.clicked_point_subscriber.start()
self.ggcnn_subscriber = rosUtils.SimpleSubscriber('ggcnn/out/command', std_msgs.msg.Float32MultiArray)
def setupPublishers(self):
"""
Sets up some ROS publishers
"""
self.rviz_marker_publisher = rospy.Publisher("/spartan_grasp/visualization_marker",
visualization_msgs.msg.Marker, queue_size=1)
self.rviz_marker_array_publisher = rospy.Publisher("/grasp_supervisor/visualization_marker_array",
visualization_msgs.msg.MarkerArray, queue_size=1)
self.grasp_pointcloud_publisher = rospy.Publisher("/grasp_supervisor/points", sensor_msgs.msg.PointCloud2,
queue_size=1)
def setup_visualization(self):
self._vis_container = om.getOrCreateContainer("grasp supervisor")
def on_clicked_point(self, clicked_point_msg):
"""
Visualizes the clicked point in rviz
"""
print "received a /clicked_point message . . . visualizing"
pos = clicked_point_msg.point
x, y, z = pos.x, pos.y, pos.z
marker = visualization_msgs.msg.Marker()
marker.header.frame_id = "base"
marker.header.stamp = rospy.Time.now()
marker.ns = "clicked_point"
marker.id = 0
marker.type = visualization_msgs.msg.Marker.SPHERE
marker.action = visualization_msgs.msg.Marker.ADD
marker.pose.position.x = x
marker.pose.position.y = y
marker.pose.position.z = z
marker.pose.orientation.x = 0.0
marker.pose.orientation.y = 0.0
marker.pose.orientation.z = 0.0
marker.pose.orientation.w = 1.0
marker.scale.x = 0.03
marker.scale.y = 0.03
marker.scale.z = 0.03
marker.color.a = 1.0
marker.color.r = 1.0
marker.color.g = 0.0
marker.color.b = 0.0
# hack to get around director funny business
for i in xrange(0, 5):
self.rviz_marker_publisher.publish(marker)
rospy.sleep(0.02)
def get_clicked_point(self):
"""
Returns the stored clicked point. If there is none it raises and error
rtype: geometry_msgs.Point
"""
lastMsg = self.clicked_point_subscriber.lastMsg
if lastMsg is None:
raise ValueError("No /clicked_point messages found.")
return lastMsg.point
def setupROSActions(self):
actionName = '/spartan_grasp/GenerateGraspsFromPointCloudList'
self.generate_grasps_client = actionlib.SimpleActionClient(actionName,
spartan_grasp_msgs.msg.GenerateGraspsFromPointCloudListAction)
actionName = '/spartan_grasp/Grasp3DLocation'
self.grasp_3D_location_client = actionlib.SimpleActionClient(actionName,
spartan_grasp_msgs.msg.Grasp3DLocationAction)
findBestBatchActionName = '/FindBestMatch'
self.find_best_match_client = actionlib.SimpleActionClient(findBestBatchActionName,
pdc_ros_msgs.msg.FindBestMatchAction)
poser_action_name = '/Poser'
self.poser_client = actionlib.SimpleActionClient(poser_action_name,
pdc_ros_msgs.msg.DeformableRegistrationAction)
category_manipulation_name = "/CategoryManipulation"
self.category_manip_client = actionlib.SimpleActionClient(category_manipulation_name, pdc_ros_msgs.msg.CategoryManipulationAction)
action_name = "/KeypointDetection"
self.keypoint_detection_client = actionlib.SimpleActionClient(action_name, pdc_ros_msgs.msg.KeypointDetectionAction)
action_name = "/PoseEstimation"
self.pose_estimation_client = actionlib.SimpleActionClient(action_name,
pdc_ros_msgs.msg.EstimatePoseAction)
action_name = "/SaveRGBD"
self.save_RGBD_client = actionlib.SimpleActionClient(action_name,
pdc_ros_msgs.msg.KeypointDetectionAction)
def setupTF(self):
if self.tfBuffer is None:
self.tfBuffer = tf2_ros.Buffer()
self.tfListener = tf2_ros.TransformListener(self.tfBuffer)
self.tfBroadcaster = tf2_ros.TransformBroadcaster()
def getDepthOpticalFrameToWorldTransform(self):
depth_optical_frame_to_world = self.tfBuffer.lookup_transform("base", self.depthOpticalFrameName,
rospy.Time(0))
return depth_optical_frame_to_world
def get_transform(self, from_name, to_name, ros_time=None):
if ros_time is None:
ros_time = rospy.Time(0)
transform_stamped_msg = self.tfBuffer.lookup_transform(to_name, from_name, ros_time)
# convert to vtkTransform
pos, quat = rosUtils.poseFromROSTransformMsg(transform_stamped_msg.transform)
return pos, quat
def getRgbOpticalFrameToWorldTransform(self, time=None):
"""
:param time:
:type time:
:return: geometry_msgs/TransformStamped
:rtype:
"""
if time is None:
time = rospy.Time(0)
rgb_optical_frame_to_world = self.tfBuffer.lookup_transform("base", self.rgbOpticalFrameName,
time)
return rgb_optical_frame_to_world
def capturePointCloudAndCameraTransform(self, cameraOrigin=[0, 0, 0]):
"""
Captures the current PointCloud2 from the sensor. Also records the pose of camera frame.
"""
# sleep to transforms can update
msg = spartan_grasp_msgs.msg.PointCloudWithTransform()
msg.header.stamp = rospy.Time.now()
msg.camera_origin.x = cameraOrigin[0]
msg.camera_origin.y = cameraOrigin[1]
msg.camera_origin.z = cameraOrigin[2]
msg.point_cloud_to_base_transform = self.getDepthOpticalFrameToWorldTransform()
msg.point_cloud = self.pointCloudSubscriber.waitForNextMessage()
self.testData = msg # for debugging
return msg
def captureRgbdAndCameraTransform(self, cameraOrigin=[0, 0, 0]):
# sleep to transforms can update
msg = pdc_ros_msgs.msg.RGBDWithPose()
msg.header.stamp = rospy.Time.now()
msg.camera_pose = self.getRgbOpticalFrameToWorldTransform()
msg.rgb_image = self.rgbImageSubscriber.waitForNextMessage()
msg.depth_image = self.depthImageSubscriber.waitForNextMessage()
# maybe be careful about rostime here
msg.point_cloud = self.pointCloudSubscriber.waitForNextMessage()
msg.point_cloud_pose = self.getDepthOpticalFrameToWorldTransform()
return msg
def moveHome(self, speed=None):
rospy.loginfo("moving home")
if speed is None:
speed = self.graspingParams['speed']['nominal']
homePose = self.graspingParams[self.state.graspingLocation]['poses']['scan_above_table']
self.robotService.moveToJointPosition(homePose,
maxJointDegreesPerSecond=speed)
def getStowPose(self):
stow_location = self.state.stowLocation
params = self.graspingParams[stow_location]
return params['poses']['stow']
# scans to several positions
def collectSensorData(self, saveToBagFile=False, **kwargs):
"""
Collects PointCloud Messages, also RGB and Depth images.
Writes the result to two class variables
- self.pointCloudListMsg
- self.listOfRgbdWithPose
also returns these two values
"""
self.moveHome()
rospy.loginfo("collecting sensor data")
graspLocationData = self.graspingParams[self.state.graspingLocation]
pointCloudListMsg = spartan_grasp_msgs.msg.PointCloudList()
pointCloudListMsg.header.stamp = rospy.Time.now()
data = dict()
pose_list = graspLocationData['scan_pose_list']
listOfRgbdWithPoseMsg = []
for poseName in pose_list:
rospy.loginfo("moving to pose = " + poseName)
joint_positions = graspLocationData['poses'][poseName]
self.robotService.moveToJointPosition(joint_positions,
maxJointDegreesPerSecond=self.config['scan']['joint_speed'])
rospy.sleep(self.config["sleep_time_for_sensor_collect"])
pointCloudWithTransformMsg = self.capturePointCloudAndCameraTransform()
pointCloudListMsg.point_cloud_list.append(pointCloudWithTransformMsg)
data[poseName] = pointCloudWithTransformMsg
rgbdWithPoseMsg = self.captureRgbdAndCameraTransform()
listOfRgbdWithPoseMsg.append(rgbdWithPoseMsg)
self.sensorData = data
self.pointCloudListMsg = pointCloudListMsg
self.listOfRgbdWithPoseMsg = listOfRgbdWithPoseMsg
if saveToBagFile:
self.saveSensorDataToBagFile(pointCloudListMsg=pointCloudListMsg, **kwargs)
return pointCloudListMsg, listOfRgbdWithPoseMsg
def findBestBatch(self):
"""
This function will:
- collect a small handful of RGBDWithPose msgs
- call the FindBestMatch service (a service of pdc-ros)
- return what was found from FindBestMatch
"""
self.moveHome()
_, listOfRgbdWithPoseMsg = self.collectSensorData()
self.list_rgbd_with_pose_msg = listOfRgbdWithPoseMsg
# request via a ROS Action
rospy.loginfo("waiting for find best match server")
self.find_best_match_client.wait_for_server()
goal = pdc_ros_msgs.msg.FindBestMatchGoal()
goal.rgbd_with_pose_list = listOfRgbdWithPoseMsg
goal.camera_info = self.camera_info_subscriber.waitForNextMessage()
rospy.loginfo("requesting best match from server")
self.find_best_match_client.send_goal(goal)
self.moveHome()
rospy.loginfo("waiting for find best match result")
self.find_best_match_client.wait_for_result()
result = self.find_best_match_client.get_result()
rospy.loginfo("received best match result")
self.best_match_result = result
if result.match_found:
print "match found"
print "location:", result.best_match_location
else:
print "NO MATCH FOUND"
return result
def run_poser(self):
"""
This function will:
- collect a small handful of RGBDWithPose msgs
- call the FindBestMatch service (a service of pdc-ros)
- return what was found from FindBestMatch
"""
# self.moveHome()
rgbdWithPoseMsg = self.captureRgbdAndCameraTransform()
listOfRgbdWithPoseMsg = [rgbdWithPoseMsg]
self.list_rgbd_with_pose_msg = listOfRgbdWithPoseMsg
# request via a ROS Action
rospy.loginfo("waiting for poser server")
self.poser_client.wait_for_server()
rospy.loginfo("connected to poser server")
goal = pdc_ros_msgs.msg.DeformableRegistrationGoal()
goal.rgbd_with_pose_list = listOfRgbdWithPoseMsg
goal.camera_info = self.camera_info_subscriber.waitForNextMessage()
rospy.loginfo("requesting registration from poser")
self.poser_client.send_goal(goal)
self.moveHome()
rospy.loginfo("waiting for poser result")
self.poser_client.wait_for_result()
result = self.poser_client.get_result()
state = self.poser_client.get_state()
rospy.loginfo("received poser result")
print("result:\n", result)
succeeded = (state == GoalStatus.SUCCEEDED)
if not succeeded:
rospy.loginfo("Poser failed")
self.poser_result = result
self._cache['poser_result'] = result
result_dict = dict()
result_dict['result'] = result
result_dict['output_dir'] = result.output_dir
result_dict['state'] = state
result_dict['succeeded'] = succeeded
result_dict['type'] = "mankey"
self._cache["keypoint_detection_result"] = result_dict
self.taskRunner.callOnMain(self.visualize_poser_result)
def run_keypoint_detection(self, wait_for_result=True, move_to_stored_pose=True, clear_state=True):
"""
Runs keypoint detection using ManKey in pdc-ros. Note that this clears the cache
:return:
:rtype:
"""
if clear_state:
self._clear_cache()
self.state.clear()
if move_to_stored_pose:
CMT = CategoryManipulationType
q = self._stored_poses_director["General"]["home"] # for mugs
if MANIP_TYPE in [CMT.SHOE_ON_RACK, CMT.SHOE_ON_TABLE]:
q = self._stored_poses_director['General']['center_back']
else: # basically all mugs
q = self._stored_poses_director["General"]["home"]
self.robotService.moveToJointPosition(q,
maxJointDegreesPerSecond=self.graspingParams['speed']['fast'])
rgbdWithPoseMsg = self.captureRgbdAndCameraTransform()
self.state.cache['rgbd_with_pose_list'] = []
self.state.cache['rgbd_with_pose_list'].append(rgbdWithPoseMsg)
# request via a ROS Action
rospy.loginfo("waiting for KeypointDetection server")
self.keypoint_detection_client.wait_for_server()
rospy.loginfo("connected to KeypointDetection server")
goal = pdc_ros_msgs.msg.KeypointDetectionGoal()
goal.rgbd_with_pose_list = self.state.cache['rgbd_with_pose_list']
goal.camera_info = self.camera_info_subscriber.waitForNextMessage()
if EXPERIMENT_MODE:
goal.output_dir = "mankey_experiments/%s" %(spartanUtils.get_current_YYYY_MM_DD_hh_mm_ss())
rospy.loginfo("requesting action from KeypointDetection server")
self.keypoint_detection_client.send_goal(goal)
self.state.set_status("ABOVE_TABLE")
if wait_for_result:
self.wait_for_keypoint_detection_result()
def wait_for_keypoint_detection_result(self):
"""
Wait for keypont detection result, save it to cache
"""
rospy.loginfo("waiting for KeypointDetection result")
self.keypoint_detection_client.wait_for_result()
result = self.keypoint_detection_client.get_result()
state = self.keypoint_detection_client.get_state()
rospy.loginfo("received KeypointDetection result")
print "result:\n", result
self.keypoint_detection_result = result
succeeded = (state == GoalStatus.SUCCEEDED)
if not succeeded:
rospy.loginfo("KeypointDetection failed")
result_dict = dict()
result_dict['result'] = result
result_dict['output_dir'] = result.output_dir
result_dict['state'] = state
result_dict['succeeded'] = succeeded
result_dict['type'] = "mankey"
self._cache["keypoint_detection_result"] = result_dict
self.state._cache["keypoint_detection_result"] = result_dict
return result_dict
def check_keypoint_detection_succeeded(self):
"""
Checks whether keypoint detection succeeded or not
:return:
:rtype:
"""
# you should have run keypoint detection before this
keypoint_detection_result = self.state.cache['keypoint_detection_result']
if keypoint_detection_result["state"] == GoalStatus.SUCCEEDED:
return True
else:
print("keypoint detection failed, ABORTING")
return False
def check_category_goal_estimation_succeeded(self):
"""
Returns a bool as to whether category goal estimation succeeded or not
:return:
:rtype:
"""
state = self.state.cache['category_manipulation_goal']['state']
if state == GoalStatus.SUCCEEDED:
return True
else:
print("category goal estimation failed, ABORTING")
return False
def estimate_mug_rack_pose(self):
"""
:return:
:rtype:
"""
# fusion_params_file = os.path.join(spartanUtils.getSpartanSourceDir(), "src/catkin_projects/station_config/RLG_iiwa_1/fusion/fusion_params.yaml")
#
#
# fusion_params = spartanUtils.getDictFromYamlFilename(fusion_params_file)
# bbox_min = np.array(fusion_params['left']['bbox_min'])
# bbox_min[2] += 0.05 # be conservative on where bottom of table is
# bbox_max = np.array(fusion_params['left']['bbox_max'])
bbox_min = np.array([0.07001, 0.49, 0.01026])
bbox_max = np.array([0.47195, 0.85201, 0.75])
rgbd_with_pose_list = []
# move to pose 1, capture RGBD
q = self._stored_poses_director["left_table"]["look_at_rack"]
speed = self.graspingParams["speed"]["fast"]
self.robotService.moveToJointPosition(q, maxJointDegreesPerSecond=speed)
rgbd_with_pose = self.captureRgbdAndCameraTransform()
rgbd_with_pose_list.append(rgbd_with_pose)
# move to pose 2, capture RGBD
q = self._stored_poses_director["left_table"]["look_at_rack_2"]
speed = self.graspingParams["speed"]["fast"]
self.robotService.moveToJointPosition(q, maxJointDegreesPerSecond=speed)
rgbd_with_pose = self.captureRgbdAndCameraTransform()
rgbd_with_pose_list.append(rgbd_with_pose)
# convert to VTK poly data and crop
d = DebugData()
for msg in rgbd_with_pose_list:
pointcloud_numpy = DirectorROSVisualizer.numpy_from_pointcloud2_msg(msg.point_cloud)
pointcloud_vtk = vnp.getVtkPolyDataFromNumpyPoints(pointcloud_numpy)
T_world_pointcloud = ros_numpy.numpify(msg.point_cloud_pose.transform)
T_world_pointcloud_vtk = transformUtils.getTransformFromNumpy(T_world_pointcloud)
pointcloud_vtk = filterUtils.transformPolyData(pointcloud_vtk, T_world_pointcloud_vtk)
d.addPolyData(pointcloud_vtk)
pointcloud = d.getPolyData()
print "pointcloud.GetNumberOfPoints()", pointcloud.GetNumberOfPoints()
# crop
transform = vtk.vtkTransform()
bounds = np.zeros([2,3])
bounds[0,:] = bbox_min
bounds[1,:] = bbox_max
print "bounds", bounds
cropped_pointcloud = segmentation.cropToBounds(pointcloud, transform, bounds)
print "cropped_pointcloud.GetNumberOfPoints()", cropped_pointcloud.GetNumberOfPoints()
# visualize it
def vis_function():
print "visualizing pointcloud"
vis.showPolyData(pointcloud, "pointcloud")
vis.showPolyData(cropped_pointcloud, "Mug rack pointcloud")
self.mug_rack_pointcloud = cropped_pointcloud
# not working for some reason
print "visualizing"
self.taskRunner.callOnMain(vis_function)
return
rgbd_with_pose = pdc_ros_msgs.msg.RGBDWithPose()
# N x 3
cropped_pointcloud_numpy = vnp.getNumpyFromVtk(cropped_pointcloud)
print "cropped_pointcloud_numpy.shape", cropped_pointcloud_numpy.shape
# save numpy to file
save_file = "/home/manuelli/sandbox/spartan/pointcloud.npy"
np.save(save_file, cropped_pointcloud_numpy)
return
# it's already in world frame
rgbd_with_pose.point_cloud = DirectorROSVisualizer.pointcloud2_msg_from_numpy(cropped_pointcloud_numpy)
# convert it back to ROS msg
goal = pdc_ros_msgs.msg.EstimatePoseGoal()
goal.rgbd_with_pose_list.append(rgbd_with_pose)
T_world_rack_vtk = self._category_manip.mug_rack_vis_obj.getChildFrame().transform
T_world_rack = transformUtils.getNumpyFromTransform(T_world_rack_vtk)
goal.T_init = ros_numpy.msgify(geometry_msgs.Pose, T_world_rack)
# send out service call
self.pose_estimation_client.wait_for_server()
self.pose_estimation_client.send_goal(goal)
# wait for result
self.pose_estimation_client.wait_for_result()
result = self.pose_estimation_client.get_result()
T_world_rack_estimated = ros_numpy.numpify(result.T_world_model)
T_world_rack_estimated_vtk = transformUtils.getTransformFromNumpy(T_world_rack_estimated)
self._category_manip.mug_rack_vis_obj.getChildFrame().copyFrame(T_world_rack_estimated_vtk)
def run_category_manipulation_goal_estimation(self, wait_for_result=True, capture_rgbd=True):
"""
Calls the CategoryManipulation service of pdc-ros
which is provided by category_manip_server.py.
Uses the keypoint detection result from either
`run_poser` or `run_keypoint_detection`
:return: bool
:rtype:
"""
if not self.check_keypoint_detection_succeeded():
return False
keypoint_detection_result = self.state.cache['keypoint_detection_result']
# don't specify poser output dir for now
goal = pdc_ros_msgs.msg.CategoryManipulationGoal()
goal.output_dir = keypoint_detection_result['output_dir']
goal.keypoint_detection_type = keypoint_detection_result['type']
if capture_rgbd:
self.moveHome()
rgbd_with_pose = self.captureRgbdAndCameraTransform()
self.state.cache['rgbd_with_pose_list'].append(rgbd_with_pose)
goal.rgbd_with_pose_list = self.state.cache['rgbd_with_pose_list']
if 'rgbd_with_pose_list' in self.state.cache:
goal.rgbd_with_pose_list = self.state.cache['rgbd_with_pose_list']
if MANIP_TYPE == CategoryManipulationType.SHOE_ON_RACK:
print("applying T_adjust")
print("self._shoe_manipulation_counter", self._shoe_manipulation_counter)
goal.apply_T_adjust = True
pos = np.array([self.graspingParams["shoe_offset"], 0, 0]) * self._shoe_manipulation_counter
quat = [1,0,0,0]
T_adjust_vtk = transformUtils.transformFromPose(pos, quat)
T_adjust = transformUtils.getNumpyFromTransform(T_adjust_vtk)
goal.T_adjust = ros_numpy.msgify(geometry_msgs.msg.Pose, T_adjust)
else:
goal.apply_T_adjust =False
rospy.loginfo("waiting for CategoryManip server")
self.category_manip_client.wait_for_server()
rospy.loginfo("connected to CategoryManip server")
self.category_manip_client.send_goal(goal)
if wait_for_result:
self.wait_for_category_manipulation_goal_result()
return True
def wait_for_category_manipulation_goal_result(self):
"""
Waits for category manipulation goal result
"""
print("waiting for category manipulation result")
self.category_manip_client.wait_for_result()
result = self.category_manip_client.get_result()
state = self.category_manip_client.get_state()
T_goal_obs = ros_numpy.numpify(result.T_goal_obs)
print "T_goal_obs:\n", T_goal_obs
T_goal_obs_vtk = transformUtils.getTransformFromNumpy(T_goal_obs)
print transformUtils.poseFromTransform(T_goal_obs_vtk)
self.state.cache['category_manipulation_goal'] = dict()
self.state.cache['category_manipulation_goal']['result'] = result
self.state.cache['category_manipulation_goal']["T_goal_obs"] = T_goal_obs_vtk
self.state.cache['category_manipulation_goal']['state'] = state
self.state.cache['category_manipulation_goal']["type"] = CategoryManipulationType.from_string(result.category_manipulation_type)
def run_mug_shelf_3D_pipeline(self):
"""
Runs entire pipeline for mug shelf 3D
:return:
:rtype:
"""
self.state.clear()
self._clear_cache()
# move home
speed = self.graspingParams['speed']['fast']
super_fast_speed = self.graspingParams['speed']['fast']
# q = self._stored_poses_director["General"]["home"]
# q = self._stored_poses_director["mug"]["image_capture_for_mug_shelf"]
q = self._stored_poses_director["General"]["center_back"]
self.robotService.moveToJointPosition(q,
maxJointDegreesPerSecond=super_fast_speed)
self.run_keypoint_detection(wait_for_result=False, move_to_stored_pose=False, clear_state=False)
# run keypoint detection
# move to center back to capture another RGBD image
q = self._stored_poses_director["General"]["home"]
self.robotService.moveToJointPosition(q,
maxJointDegreesPerSecond=super_fast_speed)
rgbd_with_pose = self.captureRgbdAndCameraTransform()
self.state.cache['rgbd_with_pose_list'].append(rgbd_with_pose)
self.wait_for_keypoint_detection_result()
if not self.check_keypoint_detection_succeeded():
self.state.set_status("FAILED")
return False
# run category manip
code = self.run_category_manipulation_goal_estimation(capture_rgbd=False)
if not code:
self.state.set_status("FAILED")
return False
self.wait_for_category_manipulation_goal_result()
if not self.check_category_goal_estimation_succeeded():
self.state.set_status("PLANNING_FAILED")
return False
# run the manipulation
# need safety checks in there before running autonomously
code = self.run_mug_shelf_manipulation()
if not (code == True):
self.state.set_status("FAILED")
return False
# if the place was successful then retract
self.retract_from_mug_shelf()
if EXPERIMENT_MODE:
output_dir = self.state.cache['keypoint_detection_result']['output_dir']
print "\n\n", os.path.split(output_dir)[1]
def run_mug_on_rack_pipeline(self, side_view=False):
"""
Runs entire pipeline for mug shelf 3D
:return:
:rtype:
"""
self.state.clear()
self._clear_cache()
# move home
speed = self.graspingParams['speed']['fast']
q = self._stored_poses_director["General"]["home"]
if side_view:
print "\nusing side view\n"
q = self._stored_poses_director["General"]["center_back"]
self.robotService.moveToJointPosition(q,
maxJointDegreesPerSecond=speed)
# run keypoint detection
self.run_keypoint_detection(wait_for_result=False, move_to_stored_pose=False, clear_state=False)
self.wait_for_keypoint_detection_result()
# move to center back to capture another RGBD image
q = self._stored_poses_director["General"]["center_back"]
if side_view:
q = self._stored_poses_director["General"]["home"]
self.robotService.moveToJointPosition(q,
maxJointDegreesPerSecond=speed)
rgbd_with_pose = self.captureRgbdAndCameraTransform()
self.state.cache['rgbd_with_pose_list'].append(rgbd_with_pose)
q = self._stored_poses_director["General"]["home"]
self.robotService.moveToJointPosition(q,
maxJointDegreesPerSecond=speed)
if not self.check_keypoint_detection_succeeded():
self.state.set_status("FAILED")
return False
# run category manip
code = self.run_category_manipulation_goal_estimation(capture_rgbd=False)
if not code:
self.state.set_status("FAILED")
return False
self.wait_for_category_manipulation_goal_result()
if not self.check_category_goal_estimation_succeeded():
self.state.set_status("PLANNING_FAILED")
return False
# run the manipulation
# need safety checks in there before running autonomously
code = self.run_mug_on_rack_manipulation()
if not (code == True):
self.state.set_status("FAILED")
return False
if EXPERIMENT_MODE:
output_dir = self.state.cache['keypoint_detection_result']['output_dir']
print "\n\n", os.path.split(output_dir)[1]
def run_shoe_on_rack_pipeline(self):
"""
Runs entire pipeline for mug shelf 3D
:return:
:rtype:
"""
if EXPERIMENT_MODE:
self._shoe_manipulation_counter = 0 # for testing
self.state.clear()
self._clear_cache()
# move home
speed = self.graspingParams['speed']['fast']
# q = self._stored_poses_director["General"]["center_back"]
q = self._stored_poses_director["General"]["home"]
self.robotService.moveToJointPosition(q,
maxJointDegreesPerSecond=speed)
# run keypoint detection
self.run_keypoint_detection(wait_for_result=False, move_to_stored_pose=False, clear_state=False)
self.wait_for_keypoint_detection_result()
if not self.check_keypoint_detection_succeeded():
self.state.set_status("FAILED")
return False
# run category manip
code = self.run_category_manipulation_goal_estimation(capture_rgbd=False)
if not code:
self.state.set_status("FAILED")
return False
self.wait_for_category_manipulation_goal_result()
if not self.check_category_goal_estimation_succeeded():
self.state.set_status("PLANNING_FAILED")
return False
# run the manipulation
# need safety checks in there before running autonomously
code = self.run_shoe_rack_manipulation()
if not code:
self.state.set_status("FAILED")
return False
# if the place was successful then retract
self.retract_from_shoe_rack()
if EXPERIMENT_MODE:
print "\n\n", self.state.cache['keypoint_detection_result']['output_dir']
def run_manipulate_object(self, debug=False):
"""
Runs the object manipulation code. Will put the object into the
specified target pose from `run_category_manipulation_goal_estimation`
:return:
"""
# self.taskRunner.callOnMain(self._poser_visualizer.visualize_result)
if not self.check_category_goal_estimation_succeeded():
return False
if debug:
self._object_manipulation = ObjectManipulation()
self._object_manipulation.assign_defaults()
self._object_manipulation.compute_transforms()
return
self.moveHome()
grasp_found, grasp_data = self.request_spartan_grasp(clear_state=False)
if not grasp_found:
print "no grasp found, returning\n"
return False
# execute the grasp
object_in_gripper = self.execute_grasp(self.state.grasp_data, close_gripper=True, use_cartesian_plan=True)
print "object_in_gripper:", object_in_gripper
T_goal_obs = self.state.cache['category_manipulation_T_goal_obs']
T_W_G = self.state.cache['gripper_frame_at_grasp']
self._object_manipulation = ObjectManipulation(T_goal_object=T_goal_obs, T_W_G=T_W_G)
self._object_manipulation.grasp_data = self.state.grasp_data
self._object_manipulation.compute_transforms()
self.taskRunner.callOnMain(self._object_manipulation.visualize)
pre_grasp_pose = self.state.cache['pre_grasp_ik_response'].joint_state.position
pickup_speed = self.graspingParams['speed']['pickup']
if not object_in_gripper:
# open the gripper and back away
self.gripperDriver.send_open_gripper_set_distance_from_current()
self.robotService.moveToJointPosition(pre_grasp_pose,
maxJointDegreesPerSecond=
pickup_speed)
return False
# pickup the object
self.robotService.moveToJointPosition(pre_grasp_pose,
maxJointDegreesPerSecond=
pickup_speed)
# place the object
grasp_data_place = self._object_manipulation.get_place_grasp_data()
self.execute_place(grasp_data_place)
# open the gripper and back away
pre_grasp_pose = self.state.cache['pre_grasp_ik_response'].joint_state.position
pickup_speed = self.graspingParams['speed']['pickup']
self.gripperDriver.send_open_gripper_set_distance_from_current()
# pickup the object
self.robotService.moveToJointPosition(pre_grasp_pose,
maxJointDegreesPerSecond=
pickup_speed)
# move home
self.moveHome()
def run_shoe_rack_manipulation(self, debug=False, push_in_distance=0.00):
"""
Runs the object manipulation code. Will put the object into the
specified target pose from `run_category_manipulation_goal_estimation`
:return:
"""
print("\n\n--- Running Shoe Manipulation-------\n\n")
# self.taskRunner.callOnMain(self._poser_visualizer.visualize_result)
if not self.check_category_goal_estimation_succeeded():
return False
# check that we really are doing mug
category_manipulation_type = self.state.cache['category_manipulation_goal']['type']
assert category_manipulation_type == CategoryManipulationType.SHOE_ON_RACK
speed = self.graspingParams['speed']['fast']
self.moveHome(speed=speed)
result = self.state.cache['category_manipulation_goal']['result']
T_W_fingertip = ros_numpy.numpify(result.T_world_gripper_fingertip)
T_W_fingertip_vtk = transformUtils.getTransformFromNumpy(T_W_fingertip)
grasp_data = GraspData.from_gripper_fingertip_frame(T_W_fingertip)
grasp_data.gripper.params["hand_inner_diameter"] = result.gripper_width
grasp_data.gripper.params["hand_inner_diameter"] = 0.07
self.state.grasp_data = grasp_data
# rotate the grasp to align with nominal
params = self.getParamsForCurrentLocation()
grasp_z_axis_nominal = np.array(params['grasp']['grasp_nominal_direction'])
grasp_data.rotate_grasp_frame_to_nominal(grasp_z_axis_nominal)
def vis_function():
vis.updateFrame(T_W_fingertip_vtk, "gripper fingertip frame", scale=0.15, parent=self._vis_container)
vis.updateFrame(grasp_data.grasp_frame, "grasp frame", scale=0.15, parent=self._vis_container)
self.visualize_grasp(grasp_data)
self.taskRunner.callOnMain(vis_function)
# execute the grasp
force_threshold_magnitude = 30
object_in_gripper = self.execute_grasp(grasp_data, close_gripper=True, use_cartesian_plan=True, force_threshold_magnitude=force_threshold_magnitude, push_in_distance=0.04, ee_speed_m_s=0.1)
if not object_in_gripper:
print("grasp failed, returning")
return False
print "object_in_gripper:", object_in_gripper
T_goal_obs = self.state.cache['category_manipulation_goal']["T_goal_obs"]
T_W_G = self.state.cache['gripper_frame_at_grasp']
pre_grasp_pose = self.state.cache['pre_grasp_ik_response'].joint_state.position
pickup_speed = self.graspingParams['speed']['pickup']
if not object_in_gripper:
# open the gripper and back away
self.gripperDriver.send_open_gripper_set_distance_from_current()
self.robotService.moveToJointPosition(pre_grasp_pose,
maxJointDegreesPerSecond=
pickup_speed)
return False
# pickup the object
self.robotService.moveToJointPosition(pre_grasp_pose,
maxJointDegreesPerSecond=
pickup_speed)
# move home
self.moveHome()
# move to approach pose
speed = self.graspingParams['speed']['fast']
q_approach = np.array(self._stored_poses_director["left_table"]["shoe_approach"])
self.robotService.moveToJointPosition(q_approach, maxJointDegreesPerSecond=speed)
# compute some poses
T_goal_obs = ros_numpy.numpify(result.T_goal_obs) # 4 x 4 numpy matrix
T_goal_obs_vtk = transformUtils.getTransformFromNumpy(T_goal_obs)
object_manip = ObjectManipulation(T_goal_object=T_goal_obs_vtk, T_W_G=T_W_G)
object_manip.compute_transforms()
T_W_Gn_vtk = object_manip.T_W_Gn # gripper to world for place pose
T_pre_goal_obs = ros_numpy.numpify(result.T_pre_goal_obs)
T_pre_goal_obs_vtk = transformUtils.getTransformFromNumpy(T_pre_goal_obs)
object_manip_approach = ObjectManipulation(T_goal_object=T_pre_goal_obs_vtk, T_W_G=T_W_G)
object_manip_approach.compute_transforms()
T_W_Gn_approach_vtk = object_manip_approach.T_W_Gn
# move this down by push_in_distance
pos, quat = transformUtils.poseFromTransform(T_W_Gn_approach_vtk)
T_W_Gn_approach_vtk = transformUtils.transformFromPose(pos, quat)
# now convert these to ee poses for running IK
pos, quat = transformUtils.poseFromTransform(T_W_Gn_vtk)
pos[2] -= push_in_distance
T_W_Gn_vtk = transformUtils.transformFromPose(pos, quat)
T_W_ee_vtk = self.getIiwaLinkEEFrameFromGraspFrame(T_W_Gn_vtk)
T_W_ee = transformUtils.getNumpyFromTransform(T_W_ee_vtk)
T_W_ee_approach_vtk = self.getIiwaLinkEEFrameFromGraspFrame(T_W_Gn_approach_vtk)
T_W_ee_approach = transformUtils.getNumpyFromTransform(T_W_ee_approach_vtk)
# place the object
force_threshold_magnitude = 50 # shoes are heavy
q_nom = np.array(self._stored_poses_director["Grasping"]["above_table_pre_grasp"])
q_nom = np.array(self._stored_poses_director["left_table"]["above_table_pre_grasp"])
code =self.execute_place_new(T_W_ee, T_W_ee_approach, q_nom=q_nom, use_cartesian_plan=True, force_threshold_magnitude=force_threshold_magnitude)
print("\n\n--- Finished Shoe Manipulation-------\n\n")
self._shoe_manipulation_counter += 1
return code
def retract_from_shoe_rack(self):
"""
Retract from the shoe rack
:return:
:rtype:
"""
# open the gripper and back away
self.gripperDriver.send_open_gripper_set_distance_from_current(distance=0.045)
# back away along gripper x-direction
ee_speed_m_s = 0.05
xyz_goal = [-0.15, 0, 0] # 10 cm
duration = np.linalg.norm(xyz_goal) / ee_speed_m_s
ee_frame_id = "iiwa_link_ee"
base_frame_id = "base"
expressed_in_frame = ee_frame_id
cartesian_traj_goal = \
control_utils.make_cartesian_trajectory_goal(xyz_goal,
ee_frame_id,
expressed_in_frame,
duration=duration,
speed=0.1)
action_client = self.robotService.cartesian_trajectory_action_client
action_client.send_goal(cartesian_traj_goal)
# wait for result
action_client.wait_for_result()
result = action_client.get_result()
self.state.cache['cartesian_traj_result'] = result
speed = self.graspingParams['speed']['fast']
if EXPERIMENT_MODE:
# move to pose
q = self._stored_poses_director["left_table"]["shoe_evaluation_side"]
self.robotService.moveToJointPosition(q, maxJointDegreesPerSecond=speed)
msg = self.captureRgbdAndCameraTransform()
save_dir = os.path.join(spartanUtils.get_sandbox_dir(), self.state.cache['keypoint_detection_result']['output_dir'], "evaluation")
self.save_RGBD_client.wait_for_server()
goal = pdc_ros_msgs.msg.KeypointDetectionGoal()
goal.rgbd_with_pose_list.append(msg)
goal.camera_info = self.camera_info_subscriber.waitForNextMessage()
goal.output_dir = save_dir
self.save_RGBD_client.send_goal(goal)
self.save_RGBD_client.wait_for_result()
self.moveHome(speed=speed)
def run_mug_on_rack_manipulation(self):
"""
Runs the object manipulation code. Will put the object into the
specified target pose from `run_category_manipulation_goal_estimation`
:return:
"""
self.wait_for_category_manipulation_goal_result()
if not self.check_category_goal_estimation_succeeded():
return False
category_manipulation_type = self.state.cache['category_manipulation_goal']['type']
assert category_manipulation_type == CategoryManipulationType.MUG_ON_RACK
self.moveHome()
# extract grasp from gripper fingertip pose
result = self.state.cache["category_manipulation_goal"]["result"]
T_W_fingertip = ros_numpy.numpify(result.T_world_gripper_fingertip)
T_W_fingertip_vtk = transformUtils.getTransformFromNumpy(T_W_fingertip)
grasp_data = GraspData.from_gripper_fingertip_frame(T_W_fingertip)
grasp_data.gripper.params["hand_inner_diameter"] = 0.05 # 4 cm wide
self.state.grasp_data = grasp_data
self.visualize_grasp(grasp_data)
debug_speed = 10
def vis_function():
vis.updateFrame(T_W_fingertip_vtk, "gripper fingertip frame", scale=0.15, parent=self._vis_container)
vis.updateFrame(grasp_data.grasp_frame, "grasp frame", scale=0.15, parent=self._vis_container)
self.taskRunner.callOnThread(vis_function)
# debugging
print("visualizing grasp")
self.visualize_grasp(grasp_data)
# execute the grasp
object_in_gripper = self.execute_grasp(self.state.grasp_data, close_gripper=True, use_cartesian_plan=True, push_in_distance=0.01, ee_speed_m_s=0.1)
T_W_G = self.state.cache['gripper_frame_at_grasp'] # this is set in execute_grasp
pre_grasp_pose = self.state.cache['pre_grasp_ik_response'].joint_state.position
pickup_speed = self.graspingParams['speed']['pickup']
if not object_in_gripper:
# open the gripper and back away
self.gripperDriver.send_open_gripper_set_distance_from_current()
self.robotService.moveToJointPosition(pre_grasp_pose,
maxJointDegreesPerSecond=
pickup_speed)
return False
# pickup the object
self.robotService.moveToJointPosition(pre_grasp_pose,
maxJointDegreesPerSecond=
pickup_speed)
# now move to nominal position for the place
# speed = self.graspingParams["speed"]["nominal"]
speed = self.graspingParams["speed"]["fast"]
# q_nom_left_table = self._stored_poses_director["left_table"]["above_table_pre_grasp"]
q_nom_left_table = self._stored_poses_director["left_table"]["above_table_pre_grasp_right"]
self.robotService.moveToJointPosition(q_nom_left_table,
maxJointDegreesPerSecond=
speed)
# compute some poses
T_goal_obs = ros_numpy.numpify(result.T_goal_obs) # 4 x 4 numpy matrix
T_goal_obs_vtk = transformUtils.getTransformFromNumpy(T_goal_obs)
object_manip = ObjectManipulation(T_goal_object=T_goal_obs_vtk, T_W_G=T_W_G)
object_manip.compute_transforms()
T_W_Gn_vtk = object_manip.T_W_Gn # gripper to world for place pose
T_pre_goal_obs = ros_numpy.numpify(result.T_pre_goal_obs)
T_pre_goal_obs_vtk = transformUtils.getTransformFromNumpy(T_pre_goal_obs)
object_manip_approach = ObjectManipulation(T_goal_object=T_pre_goal_obs_vtk, T_W_G=T_W_G)
object_manip_approach.compute_transforms()
T_W_Gn_approach_vtk = object_manip_approach.T_W_Gn
# now convert these to ee poses
T_W_ee_vtk = self.getIiwaLinkEEFrameFromGraspFrame(T_W_Gn_vtk)
T_W_ee = transformUtils.getNumpyFromTransform(T_W_ee_vtk)
T_W_ee_approach_vtk = self.getIiwaLinkEEFrameFromGraspFrame(T_W_Gn_approach_vtk)
T_W_ee_approach = transformUtils.getNumpyFromTransform(T_W_ee_approach_vtk)
# execute the place
print("executing place on rack")
return self.execute_place_new(T_W_ee, T_W_ee_approach, q_nom=q_nom_left_table, use_cartesian_plan=True, force_threshold_magnitude=30, ee_speed_m_s=0.1)
def retract_from_mug_rack(self, gripper_open=True):
"""
Move backwards from the mug rack
:return:
:rtype:
"""
category_manipulation_type = self.state.cache['category_manipulation_goal']['type']
assert category_manipulation_type == CategoryManipulationType.MUG_ON_RACK
if gripper_open:
self.gripperDriver.send_open_gripper_set_distance_from_current()
xyz_goal = np.array([-0.10, 0, 0])
ee_frame_id = "iiwa_link_ee"
expressed_in_frame = ee_frame_id
cartesian_grasp_speed = self.graspingParams['speed']['cartesian_grasp']
cartesian_traj_goal = \
control_utils.make_cartesian_trajectory_goal(xyz_goal,
ee_frame_id,
expressed_in_frame,
speed=cartesian_grasp_speed)
action_client = self.robotService.cartesian_trajectory_action_client
action_client.send_goal(cartesian_traj_goal)
# wait for result
action_client.wait_for_result()
result = action_client.get_result()
# now move to nominal position for the place
speed = self.graspingParams["speed"]["fast"]
super_fast_speed = self.graspingParams["speed"]["super_fast"]
# q_nom_left_table = self._stored_poses_director["left_table"]["above_table_pre_grasp"]
q_nom_left_table = self._stored_poses_director["left_table"]["above_table_pre_grasp_right"]
self.robotService.moveToJointPosition(q_nom_left_table,
maxJointDegreesPerSecond=
speed)
if EXPERIMENT_MODE:
q = self._stored_poses_director["left_table"]["mug_rack_evaluation"]
self.robotService.moveToJointPosition(q,
maxJointDegreesPerSecond=
speed)
msg = self.captureRgbdAndCameraTransform()
save_dir = os.path.join(spartanUtils.get_sandbox_dir(),
self.state.cache['keypoint_detection_result']['output_dir'], "evaluation")
self.save_RGBD_client.wait_for_server()
goal = pdc_ros_msgs.msg.KeypointDetectionGoal()
goal.rgbd_with_pose_list.append(msg)
goal.camera_info = self.camera_info_subscriber.waitForNextMessage()
goal.output_dir = save_dir
self.save_RGBD_client.send_goal(goal)
self.save_RGBD_client.wait_for_result()
self.moveHome(speed=super_fast_speed)
if EXPERIMENT_MODE:
output_dir = self.state.cache['keypoint_detection_result']['output_dir']
print "\n\n", os.path.split(output_dir)[1]
# clear the cache, to avoid you doing it twice
self.state.clear()
self._clear_cache()
def run_mug_shelf_manipulation(self, use_debug_speed=True):
"""
Runs the object manipulation code. Will put the object into the
specified target pose from `run_category_manipulation_goal_estimation`
:return:
"""
self.wait_for_category_manipulation_goal_result()
if not self.check_category_goal_estimation_succeeded():
self.state.set_status("PLANNING_FAILED")
return False
category_manipulation_type = self.state.cache['category_manipulation_goal']['type']
assert category_manipulation_type == CategoryManipulationType.MUG_ON_SHELF_3D
self.moveHome()
result = self.state.cache['category_manipulation_goal']['result']
print("\n\n---result----\n\n", result)
print("\n\n\n")
T_W_fingertip = ros_numpy.numpify(result.T_world_gripper_fingertip)
T_W_fingertip_vtk = transformUtils.getTransformFromNumpy(T_W_fingertip)
grasp_data = GraspData.from_gripper_fingertip_frame(T_W_fingertip)
grasp_data.gripper.params["hand_inner_diameter"] = result.gripper_width
# rotate grasp frame to align with nominal if we are doing a vertical grasp
force_threshold_magnitude = 30
push_in_distance = 0.0
if result.mug_orientation == "HORIZONTAL":
push_in_distance = -0.005
force_threshold_magnitude = 30
elif result.mug_orientation == "UPRIGHT":
push_in_distance = 0.01
force_threshold_magnitude = 30
# params = self.getParamsForCurrentLocation()
# grasp_z_axis_nominal = np.array(params['grasp']['grasp_nominal_direction'])
# grasp_data.rotate_grasp_frame_to_nominal(grasp_z_axis_nominal)
self.state.grasp_data = grasp_data
self.visualize_grasp(grasp_data)
def vis_function():
vis.updateFrame(T_W_fingertip_vtk, "gripper fingertip frame", scale=0.15, parent=self._vis_container)
vis.updateFrame(grasp_data.grasp_frame, "grasp frame", scale=0.15, parent=self._vis_container)
self.taskRunner.callOnThread(vis_function)
# debugging
print("visualizing grasp")
self.visualize_grasp(grasp_data)
# execute the grasp
object_in_gripper = self.execute_grasp(self.state.grasp_data, close_gripper=True, use_cartesian_plan=True, push_in_distance=push_in_distance, force_threshold_magnitude=force_threshold_magnitude, ee_speed_m_s=0.1)
T_W_G = self.state.cache['gripper_frame_at_grasp'] # this is set in execute_grasp
pre_grasp_pose = self.state.cache['pre_grasp_ik_response'].joint_state.position
pickup_speed = self.graspingParams['speed']['pickup']
if not object_in_gripper:
# open the gripper and back away
self.gripperDriver.send_open_gripper_set_distance_from_current()
self.robotService.moveToJointPosition(pre_grasp_pose,
maxJointDegreesPerSecond=
pickup_speed)
return False
# pickup the object
self.robotService.moveToJointPosition(pre_grasp_pose,
maxJointDegreesPerSecond=
pickup_speed)
# move to above table pre grasp
speed = self.graspingParams["speed"]["fast"]
q = self._stored_poses_director["Grasping"]["above_table_pre_grasp"]
self.robotService.moveToJointPosition(q,
maxJointDegreesPerSecond=
speed)
q_approach = None
if result.mug_orientation == "HORIZONTAL":
q_nom = self._stored_poses_director["mug"]["horizontal_grasp_nominal"]
q_approach_2 = self._stored_poses_director["mug"]["horizontal_grasp_approach_2"]
self.robotService.moveToJointPosition(q_approach_2,
maxJointDegreesPerSecond=
speed)
elif result.mug_orientation == "UPRIGHT":
q_nom = self._stored_poses_director["mug"]["vertical_grasp_nominal"]
q_approach_1 = self._stored_poses_director["mug"]["vertical_grasp_above_table"]
self.robotService.moveToJointPosition(q_approach_1,
maxJointDegreesPerSecond=
speed)
else:
raise ValueError("unknown mug orientation: %s" %(result.mug_orientation))
# compute some poses
T_goal_obs = ros_numpy.numpify(result.T_goal_obs) # 4 x 4 numpy matrix
T_goal_obs_vtk = transformUtils.getTransformFromNumpy(T_goal_obs)
object_manip = ObjectManipulation(T_goal_object=T_goal_obs_vtk, T_W_G=T_W_G)
object_manip.compute_transforms()
T_W_Gn_vtk = object_manip.T_W_Gn # gripper to world for place pose
T_pre_goal_obs = ros_numpy.numpify(result.T_pre_goal_obs)
T_pre_goal_obs_vtk = transformUtils.getTransformFromNumpy(T_pre_goal_obs)
object_manip_approach = ObjectManipulation(T_goal_object=T_pre_goal_obs_vtk, T_W_G=T_W_G)
object_manip_approach.compute_transforms()
T_W_Gn_approach_vtk = object_manip_approach.T_W_Gn
# now convert these to ee poses
T_W_ee_vtk = self.getIiwaLinkEEFrameFromGraspFrame(T_W_Gn_vtk)
T_W_ee = transformUtils.getNumpyFromTransform(T_W_ee_vtk)
T_W_ee_approach_vtk = self.getIiwaLinkEEFrameFromGraspFrame(T_W_Gn_approach_vtk)
T_W_ee_approach = transformUtils.getNumpyFromTransform(T_W_ee_approach_vtk)
# execute the place
print("executing place on shelf")
code = self.execute_place_new(T_W_ee, T_W_ee_approach, q_nom=q_nom, use_cartesian_plan=True, force_threshold_magnitude=30)
return code
def retract_from_mug_shelf(self, gripper_open=True, use_debug_speed=True):
"""
Move backwards from the rack
:return:
:rtype:
"""
category_manipulation_type = self.state.cache['category_manipulation_goal']['type']
assert category_manipulation_type == CategoryManipulationType.MUG_ON_SHELF_3D
result = self.state.cache['category_manipulation_goal']['result']
if gripper_open:
if result.mug_orientation == "HORIZONTAL":
self.gripperDriver.sendOpenGripperCommand()
else:
self.gripperDriver.send_open_gripper_set_distance_from_current()
# do different things depending on whether it was horizontal or vertical drop
result = self.state.cache['category_manipulation_goal']['result']
mug_orientation = result.mug_orientation
xyz_goal = np.array([-0.10, 0, 0])
ee_frame_id = "iiwa_link_ee"
expressed_in_frame = ee_frame_id
cartesian_grasp_speed = self.graspingParams['speed']['cartesian_grasp']
cartesian_traj_goal = \
control_utils.make_cartesian_trajectory_goal(xyz_goal,
ee_frame_id,
expressed_in_frame,
speed=cartesian_grasp_speed)
action_client = self.robotService.cartesian_trajectory_action_client
action_client.send_goal(cartesian_traj_goal)
# wait for result
action_client.wait_for_result()
result = action_client.get_result()
# now move to nominal position for the place
speed = self.graspingParams["speed"]["fast"]
super_fast_speed = q = self.graspingParams["speed"]["super_fast"]
if use_debug_speed:
speed = DEBUG_SPEED
if mug_orientation == "UPRIGHT":
q_pose_1 = self._stored_poses_director["mug"]["vertical_grasp_above_table"]
self.robotService.moveToJointPosition(q_pose_1,
maxJointDegreesPerSecond=
super_fast_speed)
elif mug_orientation=="HORIZONTAL":
q_pose_1 = self._stored_poses_director["mug"]["horizontal_grasp_approach"]
self.robotService.moveToJointPosition(q_pose_1,
maxJointDegreesPerSecond=
speed)
q_pose_2 = self._stored_poses_director["Grasping"]["above_table_pre_grasp"]
self.robotService.moveToJointPosition(q_pose_2,
maxJointDegreesPerSecond=
super_fast_speed)
if EXPERIMENT_MODE:
# move to pose
q = self._stored_poses_director["left_table"]["look_at_mug_shelf_2"]
self.robotService.moveToJointPosition(q, maxJointDegreesPerSecond=super_fast_speed)
msg = self.captureRgbdAndCameraTransform()
save_dir = os.path.join(spartanUtils.get_sandbox_dir(),
self.state.cache['keypoint_detection_result']['output_dir'], "evaluation")
self.save_RGBD_client.wait_for_server()
goal = pdc_ros_msgs.msg.KeypointDetectionGoal()
goal.rgbd_with_pose_list.append(msg)
goal.camera_info = self.camera_info_subscriber.waitForNextMessage()
goal.output_dir = save_dir
self.save_RGBD_client.send_goal(goal)
self.save_RGBD_client.wait_for_result()
super_fast_speed = q = self.graspingParams["speed"]["super_fast"]
self.moveHome(speed=super_fast_speed)
def run_category_manipulation_pipeline(self):
self._clear_cache()
self.run_keypoint_detection()
self.run_category_manipulation_goal_estimation()
self.run_manipulate_object()
def visualize_poser_result(self):
"""
DEPRECATED (this code is best used from pdc_ros)
Visualize the poser output
"""
# debugging
if self.poser_result is None:
# use the default path for debugging purposes
path_to_poser_output = os.path.join(spartanUtils.get_sandbox_dir(), "poser")
else:
path_to_poser_output = os.path.join(spartanUtils.get_sandbox_dir(), self.poser_result.poser_output_folder)
self._poser_visualizer = PoserVisualizer(path_to_poser_output)
poser_response = self._poser_visualizer.load_poser_response()
self._poser_visualizer.visualize_result(poser_response)
def grasp_best_match(self):
assert self.best_match_result.match_found
best_match_location_msg = self.best_match_result.best_match_location
best_match_location = np.zeros(3)
best_match_location[0] = best_match_location_msg.x
best_match_location[1] = best_match_location_msg.y
best_match_location[2] = best_match_location_msg.z
# check that it is above table
min_pt = np.array([0.4, -0.357198029757, 0.0])
max_pt = np.array([0.822621226311, 0.3723, 0.5])
greater_than_min = (best_match_location > min_pt).all()
less_than_max = (best_match_location < max_pt).all()
if not (greater_than_min and less_than_max):
print "best match location is outside of workspace bounds"
print "best_match_location:", best_match_location
return False
print "requesting Grasp 3D location"
self.grasp_3D_location_request(best_match_location)
result = self.wait_for_grasp_3D_location_result()
print "received Grasp 3D Location Response"
print "result:\n", result
grasp_found = self.processGenerateGraspsResult(result)
if not grasp_found:
print "no grasp found, returning"
return False
print "attempting grasp"
return self.attemptGrasp(self.graspFrame)
def find_best_match_and_grasp_and_stow(self):
# find best match
result = self.findBestBatch()
if not result.match_found:
return False
# attempt grasp best match
grasp_successful = self.grasp_best_match()
if not grasp_successful:
self.gripperDriver.send_open_gripper_set_distance_from_current()
self.moveHome()
print "grasp attempt failed, resetting"
return False
# stow
stow_pose = self.graspingParams["poses"]["hand_to_human_right"]
# stow_pose = self.graspingParams["poses"]["stow_in_bin"]
self.pickupObject(stow=True, stow_pose=stow_pose)
def request_best_match(self):
goal = pdc_ros_msgs.msg.FindBestMatchGoal()
goal.rgbd_with_pose_list = self.list_rgbd_with_pose_msg
goal.camera_info = self.camera_info_subscriber.waitForNextMessage()
self.find_best_match_client.send_goal(goal)
self.moveHome()
# From: https://www.programcreek.com/python/example/99841/sensor_msgs.msg.PointCloud2
def pointcloud2_to_array(self, cloud_msg):
'''
Converts a rospy PointCloud2 message to a numpy recordarray
Assumes all fields 32 bit floats, and there is no padding.
'''
dtype_list = [(f.name, np.float32) for f in cloud_msg.fields]
cloud_arr = np.fromstring(cloud_msg.data, dtype_list)
return cloud_arr
return np.reshape(cloud_arr, (cloud_msg.height, cloud_msg.width))
def processGenerateGraspsResult(self, result):
"""
Takes the result of spartan_grasp and parses it into a usable form
:param result:
:return:
"""
print "num antipodal grasps = ", len(result.antipodal_grasps)
print "num volume grasps = ", len(result.volume_grasps)
if (len(result.antipodal_grasps) == 0) and (len(result.volume_grasps) == 0):
self.topGrasp = None
self._grasp_found = False
rospy.loginfo("no valid grasps found")
return False
if len(result.antipodal_grasps) > 0:
self._grasp_found = True
grasp_msg = result.antipodal_grasps[0]
print "top grasp was ANTIPODAL"
elif len(result.volume_grasps) > 0:
self._grasp_found = True
grasp_msg = result.volume_grasps[0]
print "top grasp was VOLUME"
self.topGrasp = grasp_msg
rospy.loginfo("-------- top grasp score = %.3f", self.topGrasp.score)
self.graspFrame = spartanUtils.transformFromROSPoseMsg(self.topGrasp.pose.pose)
self.rotateGraspFrameToAlignWithNominal(self.graspFrame)
return True
def make_grasp_data_from_spartan_grasp_result(self, result):
"""
Takes the result of spartan_grasp and parses it into a usable form
:param result:
:return: bool, GraspData
"""
print "num antipodal grasps = ", len(result.antipodal_grasps)
print "num volume grasps = ", len(result.volume_grasps)
if (len(result.antipodal_grasps) == 0) and (len(result.volume_grasps) == 0):
rospy.loginfo("no valid grasps found")
return False, False
if len(result.antipodal_grasps) > 0:
grasp_msg = result.antipodal_grasps[0]
type = "antipodal"
print "top grasp was ANTIPODAL"
elif len(result.volume_grasps) > 0:
grasp_msg = result.volume_grasps[0]
type = "volume"
print "top grasp was VOLUME"
rospy.loginfo("-------- top grasp score = %.3f", grasp_msg.score)
grasp_data = GraspData.from_spartan_grasp(grasp_msg)
grasp_data.data['type'] = type
# rotate the grasp to align with nominal
params = self.getParamsForCurrentLocation()
grasp_z_axis_nominal = np.array(params['grasp']['grasp_nominal_direction'])
grasp_data.rotate_grasp_frame_to_nominal(grasp_z_axis_nominal)
return True, grasp_data
def getIiwaLinkEEFrameFromGraspFrame(self, graspFrame):
return transformUtils.concatenateTransforms([self.iiwaLinkEEToGraspFrame, graspFrame])
def get_iiwa_link_ee_from_gripper_fingertip_frame(self, T_W__gripper_fingertip):
"""
:param T_gripper_fingertip__W: gripper fingertip to world transform
:return:
"""
return transformUtils.concatenateTransforms([self.T_gripper_fingertip__iiwa_link_ee, T_W__gripper_fingertip])
def moveToFrame(self, graspFrame, speed=None):
if speed is None:
speed = self.config['grasp_speed']
poseStamped = self.makePoseStampedFromGraspFrame(graspFrame)
return self.robotService.moveToCartesianPosition(poseStamped, speed)
def makePoseStampedFromGraspFrame(self, graspFrame):
"""
Make PoseStamped message for the end effector frame from a given grasp frame
:param graspFrame: vtkTransform of the gripper frame
:return : pose of the end-effector for that grasp frame location
:rtype : geometry_msgs/PoseStamped
"""
iiwaLinkEEFrame = self.getIiwaLinkEEFrameFromGraspFrame(graspFrame)
poseDict = spartanUtils.poseFromTransform(iiwaLinkEEFrame)
poseMsg = rosUtils.ROSPoseMsgFromPose(poseDict)
poseStamped = geometry_msgs.msg.PoseStamped()
poseStamped.pose = poseMsg
poseStamped.header.frame_id = "base"
return poseStamped
def make_ee_pose_stamped_from_grasp(self, T_W_gripper_fingertip):
"""
Make PoseStamped message for the end effector frame from a given grasp frame.
:param T_W_gripper_fingertip: The position of the tips of the fingers, move down 3 cm to get
:return : pose of the end-effector for that grasp frame location
:rtype : geometry_msgs/PoseStamped
"""
iiwaLinkEEFrame = self.get_iiwa_link_ee_from_gripper_fingertip_frame(T_W_gripper_fingertip)
poseDict = spartanUtils.poseFromTransform(iiwaLinkEEFrame)
poseMsg = rosUtils.ROSPoseMsgFromPose(poseDict)
poseStamped = geometry_msgs.msg.PoseStamped()
poseStamped.pose = poseMsg
poseStamped.header.frame_id = "base"
return poseStamped
def execute_grasp(self, grasp_data=None, close_gripper=True, use_cartesian_plan=True, stop_at_pre_grasp=False, push_in_distance=None, use_debug_speed=False, force_threshold_magnitude=None, ee_speed_m_s=0.05):
"""
Moves to pre-grasp frame, then grasp frame
attemps to close gripper if `close_gripper=True` was passed in
:return: bool (whether or not grasp was successful)
"""
if grasp_data is None:
grasp_data = self.state.grasp_data
if push_in_distance is None:
push_in_distance = self.graspingParams['grasp_push_in_distance']
gripper_width = grasp_data.grasp_inner_diameter
if gripper_width is not None:
gripper_driver_width = gripper_width + self.graspingParams['gripper_width_offset']
self.gripperDriver.sendGripperCommand(gripper_driver_width, force=20.0)
else:
self.gripperDriver.send_open_gripper_set_distance_from_current()
rospy.sleep(0.5) # wait for 0.5 for gripper to move
# compute the pre-grasp frame
pre_grasp_distance = self.graspingParams['pre_grasp_distance']
pre_grasp_frame_gripper = grasp_data.compute_pre_grasp_frame(distance=pre_grasp_distance)
pre_grasp_ee_pose_stamped = self.makePoseStampedFromGraspFrame(pre_grasp_frame_gripper)
# safety check
is_safe = (GraspData.grasp_frame_safety_check(grasp_data.grasp_frame) and GraspData.grasp_frame_safety_check(pre_grasp_frame_gripper))
if not is_safe:
self.state.set_status("SAFETY_CHECK_FAILED")
return False
# run the ik for moving to pre-grasp location
graspLocationData = self.graspingParams[self.state.graspingLocation]
above_table_pre_grasp = graspLocationData['poses']['above_table_pre_grasp']
pre_grasp_ik_response = self.robotService.runIK(pre_grasp_ee_pose_stamped,
seedPose=above_table_pre_grasp,
nominalPose=above_table_pre_grasp)
pre_grasp_pose = pre_grasp_ik_response.joint_state.position
if not pre_grasp_ik_response.success:
rospy.loginfo("pre grasp pose ik failed, returning")
self.state.set_status_ik_failed()
self.state.print_status()
return False
# run the ik for moving to grasp location
# for now just do IK, otherwise use cartesian space plan with force guards
grasp_frame_ee_pose_stamped = self.makePoseStampedFromGraspFrame(grasp_data.grasp_frame)
grasp_ik_response = self.robotService.runIK(grasp_frame_ee_pose_stamped,
seedPose=above_table_pre_grasp,
nominalPose=above_table_pre_grasp)
grasp_pose = grasp_ik_response.joint_state.position
if not grasp_ik_response.success:
rospy.loginfo("pre grasp pose ik failed, returning")
self.state.set_status_ik_failed()
self.state.print_status()
return False
# store for later use
self.state.cache['grasp_ik_response'] = grasp_ik_response
self.state.cache['pre_grasp_ik_response'] = pre_grasp_ik_response
# move to pre-grasp position
# we do this using a position trajectory
print "moving to pre-grasp"
pre_grasp_speed = self.graspingParams['speed']['pre_grasp']
#### debugging
speed = pre_grasp_speed
if use_debug_speed:
speed = DEBUG_SPEED
self.robotService.moveToJointPosition(pre_grasp_pose,
maxJointDegreesPerSecond=
speed)
self.state.set_status("PRE_GRASP")
print "at pre-grasp pose"
if stop_at_pre_grasp:
return
if use_cartesian_plan:
# move to grasp position using compliant cartesian plan
move_forward_distance = pre_grasp_distance + push_in_distance
print "move_forward_distance", move_forward_distance
xyz_goal = move_forward_distance * np.array([1, 0, 0])
ee_frame_id = "iiwa_link_ee"
expressed_in_frame = ee_frame_id
cartesian_grasp_speed = self.graspingParams['speed']['cartesian_grasp']
cartesian_grasp_speed = ee_speed_m_s
cartesian_traj_goal = \
control_utils.make_cartesian_trajectory_goal(xyz_goal,
ee_frame_id,
expressed_in_frame,
speed=cartesian_grasp_speed)
# add force guards
# -z (gripper) direction in frame iiwa_link_ee,
if force_threshold_magnitude is None:
force_threshold_magnitude = self.graspingParams['force_threshold_magnitude']
force_vector = force_threshold_magnitude * np.array([-1, 0, 0])
force_guard = control_utils.make_force_guard_msg(force_vector)
cartesian_traj_goal.force_guard.append(force_guard)
action_client = self.robotService.cartesian_trajectory_action_client
action_client.send_goal(cartesian_traj_goal)
# wait for result
action_client.wait_for_result()
result = action_client.get_result()
grasp_data.data['cartesian_trajectory_result'] = result
print "Cartesian Trajectory Result\n", result
else:
# move to grasp pose using standard IK
speed = self.graspingParams['speed']['grasp']
if use_debug_speed:
speed = DEBUG_SPEED
self.robotService.moveToJointPosition(grasp_pose,
maxJointDegreesPerSecond=
speed)
# record current location of gripper (in world frame)
# before closing the gripper
pos, quat = self.get_transform("iiwa_link_ee", "base")
T_world_ee = transformUtils.transformFromPose(pos, quat)
T_world_grasp = transformUtils.concatenateTransforms([self.graspToIiwaLinkEE, T_world_ee])
self.state.cache['gripper_frame_at_grasp'] = T_world_grasp
has_object = False
if close_gripper:
print "closing gripper"
has_object = self.gripperDriver.closeGripper()
if has_object:
self.state.set_status("OBJECT_IN_GRIPPER")
print "object in gripper"
else:
self.state.set_status("GRASP_FAILED")
print "grasp failed"
return has_object
def execute_place(self, grasp_data=None, use_cartesian_plan=True):
if grasp_data is None:
grasp_data = self.state.grasp_data
# compute the pre-grasp frame
pre_grasp_distance = self.graspingParams['pre_grasp_distance']
pre_grasp_frame_gripper = grasp_data.compute_pre_grasp_frame(distance=pre_grasp_distance)
pre_grasp_ee_pose_stamped = self.makePoseStampedFromGraspFrame(pre_grasp_frame_gripper)
# run the ik for moving to pre-grasp location
graspLocationData = self.graspingParams[self.state.graspingLocation]
above_table_pre_grasp = graspLocationData['poses']['above_table_pre_grasp']
pre_grasp_ik_response = self.robotService.runIK(pre_grasp_ee_pose_stamped,
seedPose=above_table_pre_grasp,
nominalPose=above_table_pre_grasp)
pre_grasp_pose = pre_grasp_ik_response.joint_state.position
if not pre_grasp_ik_response.success:
rospy.loginfo("pre grasp pose ik failed, returning")
self.state.set_status_ik_failed()
self.state.print_status()
return False
# run the ik for moving to grasp location
# for now just do IK, otherwise use cartesian space plan with force guards
grasp_frame_ee_pose_stamped = self.makePoseStampedFromGraspFrame(grasp_data.grasp_frame)
grasp_ik_response = self.robotService.runIK(grasp_frame_ee_pose_stamped,
seedPose=above_table_pre_grasp,
nominalPose=above_table_pre_grasp)
grasp_pose = grasp_ik_response.joint_state.position
if not grasp_ik_response.success:
rospy.loginfo("pre grasp pose ik failed, returning")
self.state.set_status_ik_failed()
self.state.print_status()
return False
# store for later use
self.state.cache['grasp_ik_response'] = grasp_ik_response
self.state.cache['pre_grasp_ik_response'] = pre_grasp_ik_response
# move to pre-grasp position
# we do this using a position trajectory
print "moving to pre-grasp"
pre_grasp_speed = self.graspingParams['speed']['pre_grasp']
self.robotService.moveToJointPosition(pre_grasp_pose,
maxJointDegreesPerSecond=
pre_grasp_speed)
self.state.set_status("PRE_GRASP")
print "at pre-grasp pose"
if use_cartesian_plan:
# move to grasp position using compliant cartesian plan
push_distance = self.graspingParams['grasp_push_in_distance']
move_forward_distance = pre_grasp_distance + push_distance
print "move_forward_distance", move_forward_distance
xyz_goal = move_forward_distance * np.array([1, 0, 0])
ee_frame_id = "iiwa_link_ee"
expressed_in_frame = ee_frame_id
cartesian_grasp_speed = self.graspingParams['speed']['cartesian_grasp']
cartesian_traj_goal = \
control_utils.make_cartesian_trajectory_goal(xyz_goal,
ee_frame_id,
expressed_in_frame,
speed=cartesian_grasp_speed)
# add force guards
# -z (gripper) direction in frame iiwa_link_ee,
force_magnitude = self.graspingParams['force_threshold_magnitude']
force_vector = force_magnitude * np.array([-1, 0, 0])
force_guard = control_utils.make_force_guard_msg(force_vector)
cartesian_traj_goal.force_guard.append(force_guard)
action_client = self.robotService.cartesian_trajectory_action_client
action_client.send_goal(cartesian_traj_goal)
# wait for result
action_client.wait_for_result()
result = action_client.get_result()
grasp_data.data['cartesian_trajectory_result'] = result
print "Cartesian Trajectory Result\n", result
else:
# move to grasp pose using standard IK
speed = self.graspingParams['speed']['grasp']
self.robotService.moveToJointPosition(grasp_pose,
maxJointDegreesPerSecond=
speed)
self.gripperDriver.send_open_gripper_set_distance_from_current()
return True
def execute_place_new(self, T_W_ee, T_W_ee_approach, q_nom=None, use_cartesian_plan=False, use_debug_speed=False, force_threshold_magnitude=10, ee_speed_m_s=0.05):
"""
:param T_W_ee: ee location for place
:type T_W_ee:
:param T_W_ee_approach: ee location for approach
:type T_W_ee_approach:
:param q_nom: pose for use as nominal and seed for ik
:type q_nom:
:param use_cartesian_plan: whether or not to use the cartesian plane
:type use_cartesian_plan:
:return:
:rtype:
"""
# safety check
is_safe = (GraspData.grasp_frame_safety_check(T_W_ee) and GraspData.grasp_frame_safety_check(T_W_ee_approach))
if not is_safe:
self.state.set_status("SAFETY_CHECK_FAILED")
return False
# run the ik for moving to pre-grasp location
debug_speed = 10
if q_nom is None:
graspLocationData = self.graspingParams[self.state.graspingLocation]
q_nom = graspLocationData['poses']['above_table_pre_grasp']
T_W_ee_vtk = transformUtils.getTransformFromNumpy(T_W_ee)
T_W_ee_approach_vtk = transformUtils.getTransformFromNumpy(T_W_ee_approach)
# pose stamped
frame_id = "base"
T_W_ee_approach_stamped = geometry_msgs.msg.PoseStamped()
T_W_ee_approach_stamped.pose = ros_numpy.msgify(geometry_msgs.msg.Pose,
T_W_ee_approach)
T_W_ee_approach_stamped.header.frame_id = frame_id
T_W_ee_approach_stamped.header.stamp = rospy.Time.now()
print T_W_ee_approach_stamped
pre_place_ik_response = self.robotService.runIK(T_W_ee_approach_stamped,
seedPose=q_nom,
nominalPose=q_nom)
pre_place_pose = pre_place_ik_response.joint_state.position
self.state.cache["pre_place_ik_response"] = pre_place_ik_response
if not pre_place_ik_response.success:
rospy.loginfo("pre place pose ik failed, returning")
self.state.set_status_ik_failed()
self.state.print_status()
return False
# run the ik for moving to grasp location
frame_id = "base"
T_W_ee_stamped = geometry_msgs.msg.PoseStamped()
T_W_ee_stamped.pose = ros_numpy.msgify(geometry_msgs.msg.Pose,
T_W_ee)
T_W_ee_stamped.header.frame_id = frame_id
T_W_ee_stamped.header.stamp = rospy.Time.now()
# for now just do IK, otherwise use cartesian space plan with force guards
place_ik_response = self.robotService.runIK(T_W_ee_stamped,
seedPose=q_nom,
nominalPose=q_nom)
place_pose = place_ik_response.joint_state.position
if not place_ik_response.success:
rospy.loginfo("place pose ik failed, returning")
self.state.set_status_ik_failed()
self.state.print_status()
return False
# store for later use
self.state.cache['place_ik_response'] = place_ik_response
# move to pre-grasp position
# we do this using a position trajectory
print "moving to approach pose"
# pre_grasp_speed = self.graspingParams['speed']['pre_grasp']
speed = self.graspingParams['speed']['grasp']
if use_debug_speed:
speed = debug_speed
self.robotService.moveToJointPosition(pre_place_pose,
maxJointDegreesPerSecond=
speed)
self.state.set_status("PRE_GRASP")
print "at approach pose"
if use_cartesian_plan:
# move to grasp position using compliant cartesian plan
# for now doesn't deal with orientations
xyz_approach = np.array(T_W_ee_approach_vtk.GetPosition())
xyz_place = np.array(T_W_ee_vtk.GetPosition())
distance = np.linalg.norm(xyz_place - xyz_approach)
duration = distance/ee_speed_m_s
xyz_goal = xyz_place
ee_frame_id = "iiwa_link_ee"
base_frame_id = "base"
expressed_in_frame = base_frame_id
cartesian_grasp_speed = self.graspingParams['speed']['cartesian_grasp']
cartesian_traj_goal = \
control_utils.make_cartesian_trajectory_goal(xyz_goal,
ee_frame_id,
expressed_in_frame,
duration=duration)
# add force guards
# x_axis in frame iiwa_link_ee,
force_vector = force_threshold_magnitude * np.array([-1, 0, 0])
force_guard = control_utils.make_force_guard_msg(force_vector)
cartesian_traj_goal.force_guard.append(force_guard)
# z_axis in frame iiwa_link_ee
force_vector = force_threshold_magnitude * np.array([0, 0, 1])
force_guard = control_utils.make_force_guard_msg(force_vector)
cartesian_traj_goal.force_guard.append(force_guard)
action_client = self.robotService.cartesian_trajectory_action_client
action_client.send_goal(cartesian_traj_goal)
# wait for result
action_client.wait_for_result()
result = action_client.get_result()
self.state.cache['cartesian_traj_result'] = result
print "Cartesian Trajectory Result\n", result
else:
# move to grasp pose using standard IK
speed = self.graspingParams['speed']['grasp']
self.robotService.moveToJointPosition(place_pose,
maxJointDegreesPerSecond=
speed)
# now back off
# self.gripperDriver.send_open_gripper_set_distance_from_current()
return True
def attemptGrasp(self, graspFrame):
"""
Attempt a grasp
return: boolean if it was successful or not
"""
self._clear_cache()
self._cache["grasp_frame"] = graspFrame
preGraspFrame = transformUtils.concatenateTransforms([self.preGraspToGraspTransform, self.graspFrame])
graspLocationData = self.graspingParams[self.state.graspingLocation]
above_table_pre_grasp = graspLocationData['poses']['above_table_pre_grasp']
preGraspFramePoseStamped = self.makePoseStampedFromGraspFrame(preGraspFrame)
preGrasp_ik_response = self.robotService.runIK(preGraspFramePoseStamped, seedPose=above_table_pre_grasp,
nominalPose=above_table_pre_grasp)
if not preGrasp_ik_response.success:
rospy.loginfo("pre grasp pose ik failed, returning")
return False
graspFramePoseStamped = self.makePoseStampedFromGraspFrame(graspFrame)
preGraspPose = preGrasp_ik_response.joint_state.position
grasp_ik_response = self.robotService.runIK(graspFramePoseStamped, seedPose=preGraspPose,
nominalPose=preGraspPose)
self._cache['grasp_ik_response'] = grasp_ik_response
self._cache['pre_grasp_ik_response'] = preGrasp_ik_response
if not grasp_ik_response.success:
rospy.loginfo("grasp pose not reachable, returning")
return False
graspPose = grasp_ik_response.joint_state.position
# store for future use
self.preGraspFrame = preGraspFrame
self.graspFrame = graspFrame
self.gripperDriver.send_open_gripper_set_distance_from_current()
rospy.sleep(0.5) # wait for the gripper to open
self.robotService.moveToJointPosition(preGraspPose,
maxJointDegreesPerSecond=self.graspingParams['speed']['pre_grasp'])
self.robotService.moveToJointPosition(graspPose, maxJointDegreesPerSecond=self.graspingParams['speed']['grasp'])
objectInGripper = self.gripperDriver.closeGripper()
return objectInGripper
def vtkFrameToPoseMsg(self, vtkFrame):
poseDict = spartanUtils.poseFromTransform(vtkFrame)
poseMsg = rosUtils.ROSPoseMsgFromPose(poseDict)
poseStamped = geometry_msgs.msg.PoseStamped()
poseStamped.pose = poseMsg
poseStamped.header.frame_id = "base"
return poseStamped
"""
Moves the gripper up 15cm then moves home
"""
def pickupObject(self, stow=True, stow_pose=None):
endEffectorFrame = self.tfBuffer.lookup_transform(self.config['base_frame_id'],
self.config['end_effector_frame_id'], rospy.Time(0))
eeFrameVtk = spartanUtils.transformFromROSTransformMsg(endEffectorFrame.transform)
eeFrameVtk.PostMultiply()
eeFrameVtk.Translate(0, 0, self.config['pick_up_distance'])
vis.updateFrame(eeFrameVtk, 'pickup frame')
self._cache['eeFrameVtk'] = eeFrameVtk
self._cache['endEffectorFrame'] = endEffectorFrame
poseStamped = self.vtkFrameToPoseMsg(eeFrameVtk)
speed = 10 # joint degrees per second
params = self.getParamsForCurrentLocation()
above_table_pre_grasp = params['poses']['above_table_pre_grasp']
ik_response = self.robotService.runIK(poseStamped, seedPose=above_table_pre_grasp,
nominalPose=above_table_pre_grasp)
if ik_response.success:
self.robotService.moveToJointPosition(ik_response.joint_state.position,
maxJointDegreesPerSecond=self.graspingParams['speed']['slow'])
if stow_pose is None:
stow_pose = self.getStowPose()
# move to above_table_pre_grasp
# self.robotService.moveToJointPosition(above_table_pre_grasp, maxJointDegreesPerSecond=self.graspingParams['speed']['stow'])
# move to stow_pose
if stow:
self.robotService.moveToJointPosition(stow_pose,
maxJointDegreesPerSecond=self.graspingParams['speed']['stow'])
# release object
self.gripperDriver.send_open_gripper_set_distance_from_current()
rospy.sleep(0.5)
# move Home
self.moveHome()
def pickup_object(self):
"""
Just moves to pre-grasp frame
:return:
"""
if "pre_grasp_ik_response" not in self.state.cache:
return False
pre_grasp_ik_response = self.state.cache['pre_grasp_ik_response']
pre_grasp_pose = pre_grasp_ik_response.joint_state.position
pre_grasp_speed = self.graspingParams['speed']['stow']
self.robotService.moveToJointPosition(pre_grasp_pose,
maxJointDegreesPerSecond=
pre_grasp_speed)
def pickup_object_and_reorient_on_table(self):
"""
Places the object back on the table in a random orientation
Relies on variables in self._cache being set from when we picked up the object
:return:
"""
def set_position(t, pos):
_, quat = transformUtils.poseFromTransform(t)
return transformUtils.transformFromPose(pos, quat)
speed = self.config["object_interaction"]["speed"]
pick_up_distance = self.config["object_interaction"]["pickup_distance"]
drop_distance_above_grasp = self.config["object_interaction"]["drop_distance_above_grasp"]
rotate_speed = self.config["object_interaction"]["rotate_speed"]
drop_location = self.config["object_interaction"]["drop_location"] # z coordinate is overwritten later
endEffectorFrame = self.tfBuffer.lookup_transform(self.config['base_frame_id'],
self.config['end_effector_frame_id'], rospy.Time(0))
grasp_ee_frame = spartanUtils.transformFromROSTransformMsg(endEffectorFrame.transform)
# the frame of the end-effector after we have picked up the object
pickup_ee_frame_vtk = transformUtils.copyFrame(grasp_ee_frame)
pickup_ee_frame_vtk.PostMultiply()
pickup_ee_frame_vtk.Translate(0, 0, pick_up_distance)
vis.updateFrame(pickup_ee_frame_vtk, 'pickup frame', scale=0.15)
self._cache['grasped_ee_frame'] = endEffectorFrame
self._cache['pickup_ee_frame_vtk'] = pickup_ee_frame_vtk
poseStamped = self.vtkFrameToPoseMsg(pickup_ee_frame_vtk)
speed = 10 # joint degrees per second
params = self.getParamsForCurrentLocation()
above_table_pre_grasp = params['poses']['above_table_pre_grasp']
pickup_ik_response = self.robotService.runIK(poseStamped, seedPose=above_table_pre_grasp,
nominalPose=above_table_pre_grasp)
# compute the drop frame location
# This is done by rotating along the z-axis of the grasp frame by some random
# amount in [-90, 90] and then just releasing
rotate_x_angle = random.uniform(45, 90)
# if random.random() < 0.5:
# rotate_x_angle *= -1
pre_drop_frame = transformUtils.copyFrame(pickup_ee_frame_vtk)
pre_drop_frame.PreMultiply()
pre_drop_frame.RotateX(rotate_x_angle)
pre_drop_frame_pos, _ = transformUtils.poseFromTransform(pre_drop_frame)
pre_drop_frame_pos[0:2] = drop_location[0:2]
pre_drop_frame = set_position(pre_drop_frame, pre_drop_frame_pos)
grasp_ee_height = grasp_ee_frame.GetPosition()[2]
drop_frame_pos = copy.copy(pre_drop_frame_pos)
drop_frame_pos[2] = grasp_ee_height + drop_distance_above_grasp
print "drop_frame_pos", drop_frame_pos
drop_frame = transformUtils.copyFrame(pre_drop_frame)
drop_frame = set_position(drop_frame, drop_frame_pos)
vis.updateFrame(pre_drop_frame, "pre drop frame", scale=0.15)
vis.updateFrame(drop_frame, "drop frame", scale=0.15)
# run IK
pre_drop_frame_pose_stamped = self.vtkFrameToPoseMsg(pre_drop_frame)
pre_drop_ik_response = self.robotService.runIK(pre_drop_frame_pose_stamped, seedPose=above_table_pre_grasp,
nominalPose=above_table_pre_grasp)
drop_frame_pose_stamped = self.vtkFrameToPoseMsg(drop_frame)
drop_ik_response = self.robotService.runIK(drop_frame_pose_stamped, seedPose=above_table_pre_grasp,
nominalPose=above_table_pre_grasp)
if pickup_ik_response.success and pre_drop_ik_response.success and drop_ik_response.success:
# pickup object
self.robotService.moveToJointPosition(pickup_ik_response.joint_state.position,
maxJointDegreesPerSecond=speed)
# move to pre-drop
self.robotService.moveToJointPosition(pre_drop_ik_response.joint_state.position,
maxJointDegreesPerSecond=rotate_speed)
# move to drop location
self.robotService.moveToJointPosition(drop_ik_response.joint_state.position,
maxJointDegreesPerSecond=speed)
self.gripperDriver.send_open_gripper_set_distance_from_current()
rospy.sleep(0.5)
# move to pre-drop
self.robotService.moveToJointPosition(pre_drop_ik_response.joint_state.position,
maxJointDegreesPerSecond=rotate_speed)
self.moveHome()
else:
print "ik failed"
return False
return True
def planGraspAndPickupObject(self, stow=True):
self.collectSensorData()
self.requestGrasp()
self.moveHome()
result = self.waitForGenerateGraspsResult()
graspFound = self.processGenerateGraspsResult(result)
if not graspFound:
rospy.loginfo("no grasp found, returning")
return False
graspSuccessful = self.attemptGrasp(self.graspFrame)
if not graspSuccessful:
rospy.loginfo("grasp not successful returning")
return False
self.pickupObject(stow)
def graspAndStowObject(self):
graspSuccessful = self.attemptGrasp(self.graspFrame)
if not graspSuccessful:
rospy.loginfo("grasp not successful returning")
return False
stow = True
self.pickupObject(stow)
def askForCaptureScene(self):
"""
This function just waits for, then asks for the capture_scene service
provided by fusion_server.
This only collects fusion data without performing fusion, so it's
fast. See fusion_server for documentation.
"""
rospy.wait_for_service('capture_scene')
print "Found it!, starting capture..."
try:
capture_scene = rospy.ServiceProxy('capture_scene', fusion_server.srv.CaptureScene)
resp = capture_scene()
print "bag_filepath = %s" % resp.bag_filepath
rospy.loginfo("bag_filepath = %s", resp.bag_filepath)
except rospy.ServiceException, e:
print "Service call failed: %s" % e
def interact_with_object(self):
"""
Runs one iteration of picking up the object re-orienting it
and then placing it back on the table
"""
self.collectSensorData()
self.moveHome()
self.requestGrasp()
result = self.waitForGenerateGraspsResult()
graspFound = self.processGenerateGraspsResult(result)
if not graspFound:
print "no grasp found"
return False
grasp_successful = self.attemptGrasp(self.graspFrame)
if not grasp_successful:
print "grasp attemp was not successful"
return False
else:
print "grasped object"
reoriented_object = self.pickup_object_and_reorient_on_table()
if not reoriented_object:
print "didn't manage to reorient object"
return False
return True
def interactAndCollectFusionDataLoop(self, num_interactions):
"""
Attempts to pickup the object and move it around
:param num_interactions:
:return:
"""
for i in range(num_interactions):
success = self.interact_with_object()
if not success:
print "Human, please go move the object? \n"
print "If you don't want to keep doing this,"
print "then go implement a 'smack-the-object' primitive."
# in future:
# self.smackObject()
rospy.sleep(4.0)
rospy.sleep(1.0)
self.askForCaptureScene()
def testMoveToFrame(self):
pos = [0.51148583, 0.0152224, 0.50182436]
quat = [0.68751512, 0.15384615, 0.69882778, -0.12366916]
targetFrame = transformUtils.transformFromPose(pos, quat)
poseDict = spartanUtils.poseFromTransform(targetFrame)
poseMsg = rosUtils.ROSPoseMsgFromPose(poseDict)
poseStamped = geometry_msgs.msg.PoseStamped()
poseStamped.pose = poseMsg
poseStamped.header.frame_id = "base"
self.poseStamped = poseStamped
self.robotService.moveToCartesianPosition(poseStamped, 30)
def showGraspFrame(self):
vis.updateFrame(self.graspFrame, 'grasp frame', scale=0.15)
vis.updateFrame(self.getIiwaLinkEEFrameFromGraspFrame(self.graspFrame), 'iiwa_link_ee_grasp_frame', scale=0.15)
def showGripperFrame(self):
iiwaLinkEE = self.robotSystem.robotStateModel.getLinkFrame('iiwa_link_ee')
gripperFrame = transformUtils.concatenateTransforms([self.graspToIiwaLinkEE, iiwaLinkEE])
vis.updateFrame(gripperFrame, 'Gripper Frame', scale=0.15)
def show_gripper_fingertip_frame(self):
iiwaLinkEE = self.robotSystem.robotStateModel.getLinkFrame('iiwa_link_ee')
gripperFrame = transformUtils.concatenateTransforms([self.gripper_fingertip_to_iiwa_link_ee, iiwaLinkEE])
vis.updateFrame(gripperFrame, 'Gripper Fingertip Frame', scale=0.15)
def getParamsForCurrentLocation(self):
return self.graspingParams[self.state.graspingLocation]
def rotateGraspFrameToAlignWithNominal(self, graspFrame):
"""
Rotate the grasp frame to align with the nominal direction. In this case we want
the ZAxis of the grasp to be aligned with (1,0,0) in world frame.
If it's not aligned rotate it by 180 degrees about the x-axis of the grasp
:param graspFrame:
:return:
"""
graspFrameZAxis = graspFrame.TransformVector(0, 0, 1)
params = self.getParamsForCurrentLocation()
graspNominalDirection = params['grasp']['grasp_nominal_direction']
if (np.dot(graspFrameZAxis, graspNominalDirection) < 0):
graspFrame.PreMultiply()
graspFrame.RotateX(180)
def saveSensorDataToBagFile(self, pointCloudListMsg=None, filename=None, overwrite=True):
"""
Save sensor data to a bag file
"""
if pointCloudListMsg is None:
return
if filename is None:
filename = os.path.join(spartanUtils.get_sandbox_dir(), "rosbag", 'grasp_sensor_data_%s.bag' %(spartanUtils.get_current_time_unique_name()))
if not os.path.isdir(os.path.dirname(filename)):
os.makedirs(os.path.dirname(filename))
if overwrite and os.path.isfile(filename):
os.remove(filename)
bag = rosbag.Bag(filename, 'w')
bag.write('data', pointCloudListMsg)
bag.close()
def requestGrasp(self, pointCloudListMsg=None):
"""
Requests a grasp from the SpartanGrasp ROS service
Doesn't collect new sensor data
"""
# request the grasp via a ROS Action
if pointCloudListMsg is None:
pointCloudListMsg = self.pointCloudListMsg
rospy.loginfo("waiting for spartan grasp server")
self.generate_grasps_client.wait_for_server()
rospy.loginfo("requsting grasps spartan grasp server")
params = self.getParamsForCurrentLocation()
goal = spartan_grasp_msgs.msg.GenerateGraspsFromPointCloudListGoal()
goal.point_clouds = self.pointCloudListMsg
if 'grasp_volume' in params:
node = params['grasp_volume']
rectangle = GraspSupervisor.rectangleMessageFromYamlNode(node)
goal.params.grasp_volume.append(rectangle)
if 'collision_volume' in params:
node = params['collision_volume']
rectangle = GraspSupervisor.rectangleMessageFromYamlNode(node)
goal.params.collision_volume.append(rectangle)
if 'collision_objects' in params:
for key, val in params['collision_objects'].iteritems():
rectangle = GraspSupervisor.rectangleMessageFromYamlNode(val)
goal.params.collision_objects.append(rectangle)
self.generate_grasps_client.send_goal(goal)
def call_spartan_grasp(self):
"""
Better named wrapper method
:return:
"""
self.requestGrasp()
def waitForGenerateGraspsResult(self):
rospy.loginfo("waiting for result")
self.generate_grasps_client.wait_for_result()
result = self.generate_grasps_client.get_result()
self.generate_grasps_result = result
rospy.loginfo("received result")
return result
def wait_for_grasp_3D_location_result(self):
"""
Waits for the result of the Grasp3DLocation action
:return:
"""
rospy.loginfo("waiting for result")
self.grasp_3D_location_client.wait_for_result()
result = self.grasp_3D_location_client.get_result()
self.grasp_3D_location_result = result # debugging
rospy.loginfo("received result")
return result
def request_grasp_3D_location(self, pointCloudListMsg=None, grasp_point=None):
"""
Requests a grasp3DLocation from the SpartanGrasp ROS service
Doesn't collect new sensor data
"""
# request the grasp via a ROS Action
if pointCloudListMsg is None:
pointCloudListMsg = self.pointCloudListMsg
rospy.loginfo("waiting for spartan grasp server")
self.grasp_3D_location_client.wait_for_server()
rospy.loginfo("requsting grasps spartan grasp server")
params = self.getParamsForCurrentLocation()
goal = spartan_grasp_msgs.msg.Grasp3DLocationGoal()
if grasp_point is None:
grasp_point = self.get_clicked_point()
goal.grasp_point = self.get_clicked_point()
goal.point_clouds = pointCloudListMsg
if 'grasp_volume' in params:
node = params['grasp_volume']
rectangle = GraspSupervisor.rectangleMessageFromYamlNode(node)
goal.params.grasp_volume.append(rectangle)
if 'collision_volume' in params:
node = params['collision_volume']
rectangle = GraspSupervisor.rectangleMessageFromYamlNode(node)
goal.params.collision_volume.append(rectangle)
if 'collision_objects' in params:
for key, val in params['collision_objects'].iteritems():
rectangle = GraspSupervisor.rectangleMessageFromYamlNode(val)
goal.params.collision_objects.append(rectangle)
self.grasp_3D_location_client.send_goal(goal)
def request_spartan_grasp(self, clear_state=True):
"""
- collect sensor data
- send request to spartan grasp
:return: bool, GraspData
"""
self.moveHome()
self.collectSensorData()
self.moveHome()
self.requestGrasp()
result = self.waitForGenerateGraspsResult()
grasp_found, grasp_data = self.make_grasp_data_from_spartan_grasp_result(result)
if clear_state:
self.state.clear()
if grasp_found:
self.state.set_status("GRASP_FOUND")
self.state.grasp_data = grasp_data
else:
self.state.set_status("NO_GRASP_FOUND")
if grasp_found and self.debugMode:
# visualize the grasp frame
self.visualize_grasp(grasp_data)
return grasp_found, grasp_data
def grasp_3D_location_request(self, grasp_point, pointCloudListMsg=None):
"""
Sends a request to grasp a specific 3D location
:param : grasp_point is numpy array or list of size [3]
"""
params = self.getParamsForCurrentLocation()
goal = spartan_grasp_msgs.msg.Grasp3DLocationGoal()
if pointCloudListMsg is None:
goal.point_clouds = self.pointCloudListMsg
goal.grasp_point.x = grasp_point[0]
goal.grasp_point.y = grasp_point[1]
goal.grasp_point.z = grasp_point[2]
if 'grasp_volume' in params:
node = params['grasp_volume']
rectangle = GraspSupervisor.rectangleMessageFromYamlNode(node)
goal.params.grasp_volume.append(rectangle)
if 'collision_volume' in params:
node = params['collision_volume']
rectangle = GraspSupervisor.rectangleMessageFromYamlNode(node)
goal.params.collision_volume.append(rectangle)
if 'collision_objects' in params:
for key, val in params['collision_objects'].iteritems():
rectangle = GraspSupervisor.rectangleMessageFromYamlNode(val)
goal.params.collision_objects.append(rectangle)
self.grasp_3D_location_client.send_goal(goal)
def grasp_3D_location(self):
"""
Runs the grasping_3D_location pipeline
1. Checks to make sure there is a clicked_point
2. Collects sensor data
3. Sends off the request to spartan_grasp server
:return: None
"""
self.get_clicked_point()
self.collectSensorData()
self.request_grasp_3D_location()
self.moveHome()
result = self.wait_for_grasp_3D_location_result()
grasp_found = self.processGenerateGraspsResult(result)
def visualize_grasp(self, grasp_data):
stamp = rospy.Time.now()
vis.updateFrame(grasp_data.grasp_frame, "grasp frame", parent=self._vis_container,
scale=0.15)
point_cloud_msg = None
if 'point_cloud_msg' in grasp_data.data:
point_cloud_msg = grasp_data.data['point_cloud_msg']
# publish grasp to world transform
pose = director_utils.poseFromTransform(grasp_data.grasp_frame)
transform_msg = rosUtils.ROSTransformMsgFromPose(pose)
ts = geometry_msgs.msg.TransformStamped()
ts.header.stamp = stamp
ts.header.frame_id = self.config["base_frame_id"]
frame_id = "grasp_frame"
ts.child_frame_id = frame_id
ts.transform = transform_msg
# use the gripper stored in the grasp data if it exists
gripper = grasp_data.gripper
if gripper is None:
gripper = self._gripper
marker_array = gripper.make_rviz_visualization_msg(frame_id, stamp)
for i in xrange(0, 5):
if point_cloud_msg is not None:
self.grasp_pointcloud_publisher.publish(point_cloud_msg)
self.rviz_marker_array_publisher.publish(marker_array)
self.tfBroadcaster.sendTransform(ts)
rospy.sleep(0.02)
def get_ggcnn_grasp(self):
"""
Looks up the ggcnn grasp frame from the tf server
Also need to think about gripper width etc.
:return: tuple (bool, dict)
:rtype:
"""
# just do a transform lookup
return_data = dict()
self.state.clear()
try:
ggcnn_grasp_frame_camera_axes = self.tfBuffer.lookup_transform(self.config["base_frame_id"],
self.ggcnn_grasp_frame_camera_axes_id,
rospy.Time.now(), rospy.Duration(2.0))
except Exception as e:
rospy.loginfo("Unable to get ggcnn grasp frame from tf, returning")
print(e)
return False, return_data
return_data['ggcnn_grasp_frame_camera_axes'] = ggcnn_grasp_frame_camera_axes
# make grasp object
T_W_GC = director_utils.transformFromROSTransformMsg(ggcnn_grasp_frame_camera_axes.transform)
grasp_data = GraspData.from_ggcnn_grasp_frame_camera_axes(T_W_GC)
# get the pointcloud associated with this grasp
point_cloud_msg = self.pointCloudSubscriber.waitForNextMessage()
grasp_data.data['point_cloud_msg'] = point_cloud_msg
# rotate the grasp to align with nominal
params = self.getParamsForCurrentLocation()
grasp_z_axis_nominal = np.array(params['grasp']['grasp_nominal_direction'])
grasp_data.rotate_grasp_frame_to_nominal(grasp_z_axis_nominal)
self.state.grasp_data = grasp_data
return_data['grasp_data'] = grasp_data
if self.debugMode:
# visualize the grasp frame
self.visualize_grasp(grasp_data)
return True, return_data
def start_bagging(self):
print "Waiting for 'start_bagging_fusion_data' service..."
rospy.wait_for_service('start_bagging_fusion_data')
print "Found it!, starting bagging..."
try:
start_bagging_fusion_data = rospy.ServiceProxy('start_bagging_fusion_data', StartBaggingFusionData)
resp1 = start_bagging_fusion_data()
# return resp1.data_filepath
except rospy.ServiceException, e:
print "Service call failed: %s" % e
def stop_bagging(self):
print "Waiting for 'stop_bagging_fusion_data' service..."
rospy.wait_for_service('stop_bagging_fusion_data')
print "Found it!, stopping bagging..."
try:
stop_bagging_fusion_data = rospy.ServiceProxy('stop_bagging_fusion_data', StopBaggingFusionData)
resp1 = stop_bagging_fusion_data()
return resp1.status
except rospy.ServiceException, e:
print "Service call failed: %s" % e
def testInThread(self):
"""
DEPRECATED
Runs the grasping pipeline
1. Move the robot to collect sensor data
2. Request the grasp (via a Ros Action)
3. Move Home
4. Wait for the response from SpartanGrasp
5. Process the result
"""
self.collectSensorData()
self.moveHome()
self.requestGrasp()
result = self.waitForGenerateGraspsResult()
graspFound = self.processGenerateGraspsResult(result)
return graspFound
def testMoveHome(self):
self.taskRunner.callOnThread(self.moveHome)
def test(self):
self.taskRunner.callOnThread(self.testInThread)
def test_grasp_3D_location(self):
"""
Calls grasp_3D_location in a thread
:return:
"""
self.taskRunner.callOnThread(self.grasp_3D_location)
def testAttemptGrasp(self):
self.taskRunner.callOnThread(self.attemptGrasp, self.graspFrame)
def testPickupObject(self):
self.taskRunner.callOnThread(self.pickupObject)
def test_pickup_object(self):
self.taskRunner.callOnThread(self.pickup_object)
def testGraspAndStowObject(self):
self.taskRunner.callOnThread(self.graspAndStowObject)
def testPipeline(self):
self.taskRunner.callOnThread(self.planGraspAndPickupObject)
def testCollectSensorData(self, **kwargs):
self.taskRunner.callOnThread(self.collectSensorData, **kwargs)
def testRequestGrasp(self):
self.taskRunner.callOnThread(self.requestGrasp)
def testInteractionLoop(self, num_interactions=3):
self.taskRunner.callOnThread(self.interactAndCollectFusionDataLoop, num_interactions)
def test_on_clicked_point(self):
self.taskRunner.callOnThread(self.on_clicked_point)
def testFindBestMatch(self):
self.taskRunner.callOnThread(self.findBestBatch)
def test_grasp_best_match(self):
self.taskRunner.callOnThread(self.grasp_best_match)
def test_find_best_match_and_grasp_and_stow(self):
self.taskRunner.callOnThread(self.find_best_match_and_grasp_and_stow)
def test_best_match_no_data(self):
self.taskRunner.callOnThread(self.request_best_match)
def test_reorient(self):
self.taskRunner.callOnThread(self.pickup_object_and_reorient_on_table)
def test_interact_with_object(self):
self.taskRunner.callOnThread(self.interact_with_object)
def test_start_bagging(self):
self.taskRunner.callOnThread(self.start_bagging)
def test_stop_bagging(self):
self.taskRunner.callOnThread(self.stop_bagging)
def test_execute_grasp(self):
self.taskRunner.callOnThread(self.execute_grasp)
def test_request_spartan_grasp(self, *args, **kwargs):
"""
Collect sensor data and send request to spartan_grasp
Visualize resulting grasp
:return:
"""
self.taskRunner.callOnThread(self.request_spartan_grasp, *args, **kwargs)
def test_run_poser(self, *args, **kwargs):
self.taskRunner.callOnThread(self.run_poser, *args, **kwargs)
def test_run_manipulate_object(self, *args, **kwargs):
self.taskRunner.callOnThread(self.run_manipulate_object, *args, **kwargs)
def test_run_category_manipulation_goal_estimation(self,*args, **kwargs):
self.taskRunner.callOnThread(self.run_category_manipulation_goal_estimation, *args, **kwargs)
def test_run_category_manipulation_pipeline(self, *args, **kwargs):
self.taskRunner.callOnThread(self.run_category_manipulation_pipeline, *args, **kwargs)
def test_run_keypoint_detection(self, *args, **kwargs):
self.taskRunner.callOnThread(self.run_keypoint_detection, *args, **kwargs)
def test_run_mug_on_rack_manipulation(self, *args, **kwargs):
self.taskRunner.callOnThread(self.run_mug_on_rack_manipulation, *args, **kwargs)
def test_retract_from_rack(self, *args, **kwargs):
self.taskRunner.callOnThread(self.retract_from_mug_rack, *args, **kwargs)
def test_retract_from_mug_shelf(self, *args, **kwargs):
self.taskRunner.callOnThread(self.retract_from_mug_shelf, *args, **kwargs)
def test_run_mug_shelf_manipulation(self, *args, **kwargs):
self.taskRunner.callOnThread(self.run_mug_shelf_manipulation, *args, **kwargs)
def test_run_shoe_manipulation(self, *args, **kwargs):
self.taskRunner.callOnThread(self.run_shoe_rack_manipulation, *args, **kwargs)
def loadDefaultPointCloud(self):
self.pointCloudListMsg = GraspSupervisor.getDefaultPointCloudListMsg()
def test_dev(self):
def thread_fun():
self.run_keypoint_detection(wait_for_result=False, move_to_stored_pose=True)
speed = self.graspingParams['speed']['fast']
self.moveHome(speed=speed)
self.run_category_manipulation_goal_estimation()
self.taskRunner.callOnThread(thread_fun)
def test_mug_shelf_3D_pipeline(self):
self.taskRunner.callOnThread(self.run_mug_shelf_3D_pipeline)
def test_mug_rack_pipeline(self, *args, **kwargs):
# time.sleep(10.0) # sleep for 10 seconds
self.taskRunner.callOnThread(self.run_mug_on_rack_pipeline, *args, **kwargs)
def test_shoe_rack_pipeline(self):
self.taskRunner.callOnThread(self.run_shoe_on_rack_pipeline)
def test_category_manip_pipeline(self):
"""
Runs the appropriate category manip pipeline
:return:
:rtype:
"""
raise NotImplementedError("")
def test_estimate_mug_rack_pose(self):
self.taskRunner.callOnThread(self.estimate_mug_rack_pose)
def r(self):
self.test_retract_from_rack()
@staticmethod
def rectangleMessageFromYamlNode(node):
msg = spartan_grasp_msgs.msg.Rectangle()
msg.min_pt = rosUtils.listToPointMsg(node['min_pt'])
msg.max_pt = rosUtils.listToPointMsg(node['max_pt'])
msg.pose = rosUtils.ROSPoseMsgFromPose(node)
return msg
@staticmethod
def makeDefault(**kwargs):
graspingParamsFile = os.path.join(spartanUtils.getSpartanSourceDir(), 'src', 'catkin_projects',
'station_config', 'RLG_iiwa_1', 'manipulation', 'params.yaml')
return GraspSupervisor(graspingParamsFile=graspingParamsFile, **kwargs)
@staticmethod
def getPointCloudListMsg(rosBagFilename):
bag = rosbag.Bag(rosBagFilename)
pointCloudListMsg = None
for topic, msg, t in bag.read_messages(topics=['data']):
pointCloudListMsg = msg
bag.close()
return pointCloudListMsg
@staticmethod
def getDefaultPointCloudListMsg():
spartanSourceDir = spartanUtils.getSpartanSourceDir()
# filename = "grasp_sensor_data.bag"
filename = "sr300_box.bag"
rosBagFilename = os.path.join(spartanSourceDir, 'data', 'rosbag', 'iiwa', filename)
return GraspSupervisor.getPointCloudListMsg(rosBagFilename)
| null | null | null | null | [
0
] |
1,342 | b59dfd97a2b52ddef4e37557ea96bff9edf34989 | <mask token>
| <mask token>
class Solution(object):
<mask token>
| <mask token>
class Solution(object):
def buildTree(self, inorder, postorder):
"""
:type inorder: List[int]
:type postorder: List[int]
:rtype: TreeNode
"""
hashmap = {}
for i, val in enumerate(inorder):
hashmap[val] = i
global post_index
post_index = len(inorder) - 1
def helper(left_index, right_index):
if left_index >= right_index:
return None
global post_index
root_val = postorder[post_index]
root = TreeNode(root_val)
post_index -= 1
index = hashmap[root_val]
root.right = helper(index + 1, right_index)
root.left = helper(left_index, index)
return root
return helper(0, len(inorder))
| <mask token>
global post_index
class Solution(object):
def buildTree(self, inorder, postorder):
"""
:type inorder: List[int]
:type postorder: List[int]
:rtype: TreeNode
"""
hashmap = {}
for i, val in enumerate(inorder):
hashmap[val] = i
global post_index
post_index = len(inorder) - 1
def helper(left_index, right_index):
if left_index >= right_index:
return None
global post_index
root_val = postorder[post_index]
root = TreeNode(root_val)
post_index -= 1
index = hashmap[root_val]
root.right = helper(index + 1, right_index)
root.left = helper(left_index, index)
return root
return helper(0, len(inorder))
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
Copyright 2020, Yutong Xie, UIUC.
Using recursion to construct binary tree from postorder and inorder traversal
'''
# Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
global post_index
class Solution(object):
def buildTree(self, inorder, postorder):
"""
:type inorder: List[int]
:type postorder: List[int]
:rtype: TreeNode
"""
hashmap = {}
for i, val in enumerate(inorder):
hashmap[val] = i
global post_index
post_index = len(inorder)-1
def helper(left_index, right_index):
if left_index >= right_index:
return None
global post_index
root_val = postorder[post_index]
root = TreeNode(root_val)
post_index -= 1
index = hashmap[root_val]
root.right = helper(index+1, right_index)
root.left = helper(left_index, index)
return root
return helper(0, len(inorder))
| [
0,
1,
2,
3,
4
] |
1,343 | 22afc6b9df87ef1eba284da20a807366278c24d4 | <mask token>
def rest_api(mode=None):
""""""
values = config.read()
wt_url = Text(value=values['api']['url'], placeholder='Add URL',
description='API URL:', disabled=False)
wt_user = Text(value=values['api']['user'], placeholder='Username',
description='API User:', disabled=False)
wt_pass = Password(value=values['api']['pass'], placeholder='******',
description='API Password:', disabled=False)
wb_save = Button(description='Save', disabled=False, icon='save')
progress = Output()
def outlog(*text):
with progress:
print(*text)
@wb_save.on_click
def wb_save_on_click(b):
config.update(['api', 'url'], str(wt_url.value))
config.update(['api', 'user'], str(wt_user.value))
if wt_pass.value != '':
config.update(['api', 'pass'], str(wt_pass.value))
outlog('API information is updated')
wbox = VBox([wt_url, wt_user, wt_pass, wb_save, progress])
return wbox
<mask token>
def direct_settings():
values = config.read()
ds_def = values['set']['ds_conf']
ds_dye = values['set']['ds_year']
if ds_def not in [d for d in values['ds_conf']]:
ds_def = [d for d in values['ds_conf']][0]
dsc = Dropdown(options=[d for d in values['ds_conf']], value=ds_def,
description='Default:', disabled=False, layout=Layout(width='200px'))
dsy = Dropdown(options=[int(y) for y in values['ds_conf'][dsc.value][
'years']], value=int(ds_dye), description='Dataset year:', disabled
=False, layout=Layout(width='180px'))
btn_refresh = Button(layout=Layout(width='35px'), icon='fa-refresh')
@btn_refresh.on_click
def btn_refresh_on_click(b):
values = config.read()
ds_c = values['set']['ds_conf']
ds_y = values['set']['ds_year']
dsc.options = [d for d in values['ds_conf']]
dsy.options = [int(y) for y in values['ds_conf'][ds_c]['years']]
dsc.value = ds_c
dsy.value = int(ds_y)
def on_dsc_change(change):
config.update(['set', 'ds_conf'], dsc.value)
values = config.read()
ds_c = values['set']['ds_conf']
dsy.options = [int(y) for y in values['ds_conf'][ds_c]['years']]
dsc.observe(on_dsc_change, 'value')
def on_dsy_change(change):
config.update(['set', 'ds_year'], str(dsy.value))
dsy.observe(on_dsy_change, 'value')
bt_set = Button(layout=Layout(width='40px'), icon='cogs', tooltip=
'Configure this dataset')
bt_new = Button(layout=Layout(width='40px'), icon='plus', tooltip=
'Add new dataset configuration')
bt_rec = Button(layout=Layout(width='40px'), icon='trash-alt', tooltip=
'Delete dataset configuration')
bt_rey = Button(layout=Layout(width='40px'), icon='trash-alt', tooltip=
'Delete only the selected year.')
dsc_box = HBox([dsc, btn_refresh, bt_rec, dsy, bt_set, bt_rey, bt_new])
progress = Output()
def outlog(*text):
with progress:
print(*text)
def dsc_config(dsc_value):
values = config.read()
ds_db = Dropdown(options=['1'], value='1', description='Database:',
disabled=False, layout=Layout(width='140px'))
try:
with open(f"{config.get_value(['paths', 'temp'])}tb_prefix", 'r'
) as f:
code_value = f.read()
except Exception:
code_value = dsc_value
ds_code = Combobox(value=code_value, placeholder='abc', options=[m for
m in data_options.eu_ms()] + [''], description='AOI code:',
ensure_option=False, disabled=False, layout=Layout(width=
'200px'), tooltip=
'Lowercase AOI code name for the dataset (5chr max).')
ds_year = BoundedIntText(value=int(dsy.value), min=1980, max=2100,
step=1, description='Dataset year:', disabled=False, layout=
Layout(width='180px'))
ds_desc = Text(value=values['ds_conf'][dsc_value]['desc'],
description='Description:', disabled=False)
info_map_text = ['Set default map view options. ',
'You can get automatically the dataset ', 'center coordinates.']
lat, lon = values['ds_conf'][dsc_value]['center'].split(',')
map_cent_lat = FloatText(value=float(lat), description='Lat:',
disabled=False, layout=Layout(width='160px'))
map_cent_lon = FloatText(value=float(lon), description='Lon:',
disabled=False, layout=Layout(width='160px'))
map_zoom = BoundedIntText(value=values['ds_conf'][dsc_value]['zoom'
], min=0, max=20, step=1, description='Zoom:', disabled=False,
layout=Layout(width='140px'))
bt_get_center = Button(layout=Layout(width='40px'), icon='bullseye',
tooltip='Get center point from database.')
ds_box = HBox([ds_code, ds_year, ds_desc])
map_box = HBox([Label('Map center: '), map_cent_lat, map_cent_lon,
bt_get_center, map_zoom])
info_config = Label(
"""Change 'AOI code' value to create a new configuration set or
leave the same 'AOI code' value to configure the selected one."""
)
db = int(values['ds_conf'][dsc_value]['db'])
def get_tb_list():
tbls = database.tables(db, None, False)
if tbls is None:
return []
else:
return tbls
tb_dc = Dropdown(options=get_tb_list(), value=config.autoselect(
values['ds_conf'][dsc_value]['years'][str(ds_year.value)][
'tables']['dias_catalog'], get_tb_list(), False), description=
'DIAS catalog:', disabled=False)
tb_pr = Dropdown(options=get_tb_list(), value=config.autoselect(
values['ds_conf'][dsc_value]['years'][str(ds_year.value)][
'tables']['parcels'], get_tb_list(), False), description=
'Parcels:', disabled=False)
def get_pr_columns():
try:
colms = database.table_columns(tb_pr.value, 1, None)
if colms is None:
return []
else:
return colms
except Exception:
return []
tc_id = Dropdown(options=get_pr_columns(), value=config.autoselect(
values['ds_conf'][dsc_value]['years'][str(ds_year.value)][
'columns']['parcels_id'], get_pr_columns(), False), description
='Parcels ID:', disabled=False, layout=Layout(width='180px'))
tc_cn = Dropdown(options=get_pr_columns(), value=config.autoselect(
values['ds_conf'][dsc_value]['years'][str(ds_year.value)][
'columns']['crop_names'], get_pr_columns(), False), description
='Crop names:', disabled=False, layout=Layout(width='180px'))
tc_cc = Dropdown(options=get_pr_columns(), value=config.autoselect(
values['ds_conf'][dsc_value]['years'][str(ds_year.value)][
'columns']['crop_codes'], get_pr_columns(), False), description
='Crop codes:', disabled=False, layout=Layout(width='180px'))
def on_tb_pr_change(change):
tc_id.options = get_pr_columns()
tc_cn.options = get_pr_columns()
tc_cc.options = get_pr_columns()
tb_pr.observe(on_tb_pr_change, 'value')
parcel_box = HBox([tb_pr, tc_id, tc_cn, tc_cc])
tb_s2 = Dropdown(options=get_tb_list(), value=config.autoselect(
values['ds_conf'][dsc_value]['years'][str(ds_year.value)][
'tables']['s2'], get_tb_list(), False), description=
'S2 signatures:', disabled=False)
tb_bs = Dropdown(options=get_tb_list(), value=config.autoselect(
values['ds_conf'][dsc_value]['years'][str(ds_year.value)][
'tables']['bs'], get_tb_list(), False), description=
'Backscattering:', disabled=False)
tb_6c = Dropdown(options=get_tb_list(), value=config.autoselect(
values['ds_conf'][dsc_value]['years'][str(ds_year.value)][
'tables']['c6'], get_tb_list(), False), description=
'6 day coherence:', disabled=False)
wb_save = Button(description='Save', disabled=False, icon='save')
@bt_get_center.on_click
def bt_get_center_on_click(b):
import json
center_json = json.loads(database.getTableCentroid(tb_pr.value)
['center'][0])
map_cent_lat.value = round(center_json['coordinates'][1], 2)
map_cent_lon.value = round(center_json['coordinates'][0], 2)
map_zoom.value = 10
@wb_save.on_click
def wb_save_on_click(b):
progress.clear_output()
dscode = ds_code.value
config.update(['ds_conf', dscode, 'years', str(ds_year.value),
'tables', 'dias_catalog'], str(tb_dc.value))
config.update(['ds_conf', dscode, 'years', str(ds_year.value),
'tables', 'parcels'], str(tb_pr.value))
config.update(['ds_conf', dscode, 'years', str(ds_year.value),
'columns', 'parcels_id'], str(tc_id.value))
config.update(['ds_conf', dscode, 'years', str(ds_year.value),
'columns', 'crop_names'], str(tc_cn.value))
config.update(['ds_conf', dscode, 'years', str(ds_year.value),
'columns', 'crop_codes'], str(tc_cc.value))
config.update(['ds_conf', dscode, 'years', str(ds_year.value),
'tables', 's2'], str(tb_s2.value))
config.update(['ds_conf', dscode, 'years', str(ds_year.value),
'tables', 'bs'], str(tb_bs.value))
config.update(['ds_conf', dscode, 'years', str(ds_year.value),
'tables', 'c6'], str(tb_6c.value))
config.update(['ds_conf', dscode, 'db'], str(ds_db.value))
config.update(['ds_conf', dscode, 'desc'], str(ds_desc.value))
config.update(['ds_conf', dscode, 'center'],
f'{map_cent_lat.value},{map_cent_lon.value}')
config.update(['ds_conf', dscode, 'zoom'], str(map_zoom.value))
config.update(['set', 'ds_conf'], str(dscode))
config.update(['set', 'ds_year'], str(ds_year.value))
values = config.read()
ds_c = values['set']['ds_conf']
ds_y = values['set']['ds_year']
dsc.options = [d for d in values['ds_conf']]
dsy.options = [int(y) for y in values['ds_conf'][ds_c]['years']]
dsc.value = ds_c
dsy.value = int(ds_y)
outlog('The configurations are saved.')
return VBox([info_config, ds_box, parcel_box, tb_dc, tb_s2, tb_bs,
tb_6c, Label(''.join(info_map_text)), map_box, wb_save])
dsc_new_box = HBox([])
@bt_set.on_click
def bt_set_on_click(b):
if dsc_new_box.children == ():
dsc_new_box.children = [dsc_config(dsc.value)]
bt_set.icon = 'chevron-up'
else:
dsc_new_box.children = ()
bt_set.icon = 'cogs'
@bt_new.on_click
def bt_new_on_click(b):
if dsc_new_box.children == ():
dsc_new_box.children = [dsc_config(dsc.value)]
bt_set.icon = 'chevron-up'
else:
dsc_new_box.children = ()
bt_set.icon = 'cogs'
@bt_rec.on_click
def bt_rec_on_click(b):
progress.clear_output()
if len(dsc.options) > 1:
config.delete(['ds_conf', dsc.value])
outlog(f"Dataset configuration '{dsc.value}' is deleted.")
values = config.read()
dsc.options = [d for d in values['ds_conf']]
else:
outlog('Can not remove last configuration.')
@bt_rey.on_click
def bt_rey_on_click(b):
progress.clear_output()
if len(dsy.options) > 1:
config.delete(['ds_conf', dsc.value, 'years', str(dsy.value)])
outlog(f"Year {dsy.value} of dataset '{dsc.value}' is deleted.")
values = config.read()
dsy.options = [int(y) for y in values['ds_conf'][str(dsc.value)
]['years']]
else:
outlog('Can not remove last configuration.')
wbox = VBox([Label('Datasets configurations.'), dsc_box, dsc_new_box,
progress])
return wbox
| <mask token>
def widget_box():
source = int(config.get_value(['set', 'data_source']))
sources = RadioButtons(options=[('JRC RESTful API.', 0), (
'Direct access to database and object storage.', 1)], value=source,
layout={'width': 'max-content'})
sources_box = Box([Label(value='Data sources:'), sources])
info_api = Label('RESTful API Settings.')
info_direct = Label('Direct access settings')
view_options = VBox([info_direct])
if source == 0:
view_options.children = [info_api, rest_api()]
elif source == 1:
view_options.children = [info_direct, direct()]
def on_source_change(change):
view_options.children = []
if sources.value == 0:
view_options.children = [info_api, rest_api()]
elif sources.value == 1:
view_options.children = [info_direct, direct()]
config.update(['set', 'data_source'], str(sources.value))
sources.observe(on_source_change, 'value')
wbox_sources = VBox([sources_box, view_options], layout=Layout(border=
'1px solid black'))
info_general = Label(value='General settings:')
wbox = VBox([wbox_sources, info_general, settings.widget_box()])
return wbox
def rest_api(mode=None):
""""""
values = config.read()
wt_url = Text(value=values['api']['url'], placeholder='Add URL',
description='API URL:', disabled=False)
wt_user = Text(value=values['api']['user'], placeholder='Username',
description='API User:', disabled=False)
wt_pass = Password(value=values['api']['pass'], placeholder='******',
description='API Password:', disabled=False)
wb_save = Button(description='Save', disabled=False, icon='save')
progress = Output()
def outlog(*text):
with progress:
print(*text)
@wb_save.on_click
def wb_save_on_click(b):
config.update(['api', 'url'], str(wt_url.value))
config.update(['api', 'user'], str(wt_user.value))
if wt_pass.value != '':
config.update(['api', 'pass'], str(wt_pass.value))
outlog('API information is updated')
wbox = VBox([wt_url, wt_user, wt_pass, wb_save, progress])
return wbox
<mask token>
def direct_settings():
values = config.read()
ds_def = values['set']['ds_conf']
ds_dye = values['set']['ds_year']
if ds_def not in [d for d in values['ds_conf']]:
ds_def = [d for d in values['ds_conf']][0]
dsc = Dropdown(options=[d for d in values['ds_conf']], value=ds_def,
description='Default:', disabled=False, layout=Layout(width='200px'))
dsy = Dropdown(options=[int(y) for y in values['ds_conf'][dsc.value][
'years']], value=int(ds_dye), description='Dataset year:', disabled
=False, layout=Layout(width='180px'))
btn_refresh = Button(layout=Layout(width='35px'), icon='fa-refresh')
@btn_refresh.on_click
def btn_refresh_on_click(b):
values = config.read()
ds_c = values['set']['ds_conf']
ds_y = values['set']['ds_year']
dsc.options = [d for d in values['ds_conf']]
dsy.options = [int(y) for y in values['ds_conf'][ds_c]['years']]
dsc.value = ds_c
dsy.value = int(ds_y)
def on_dsc_change(change):
config.update(['set', 'ds_conf'], dsc.value)
values = config.read()
ds_c = values['set']['ds_conf']
dsy.options = [int(y) for y in values['ds_conf'][ds_c]['years']]
dsc.observe(on_dsc_change, 'value')
def on_dsy_change(change):
config.update(['set', 'ds_year'], str(dsy.value))
dsy.observe(on_dsy_change, 'value')
bt_set = Button(layout=Layout(width='40px'), icon='cogs', tooltip=
'Configure this dataset')
bt_new = Button(layout=Layout(width='40px'), icon='plus', tooltip=
'Add new dataset configuration')
bt_rec = Button(layout=Layout(width='40px'), icon='trash-alt', tooltip=
'Delete dataset configuration')
bt_rey = Button(layout=Layout(width='40px'), icon='trash-alt', tooltip=
'Delete only the selected year.')
dsc_box = HBox([dsc, btn_refresh, bt_rec, dsy, bt_set, bt_rey, bt_new])
progress = Output()
def outlog(*text):
with progress:
print(*text)
def dsc_config(dsc_value):
values = config.read()
ds_db = Dropdown(options=['1'], value='1', description='Database:',
disabled=False, layout=Layout(width='140px'))
try:
with open(f"{config.get_value(['paths', 'temp'])}tb_prefix", 'r'
) as f:
code_value = f.read()
except Exception:
code_value = dsc_value
ds_code = Combobox(value=code_value, placeholder='abc', options=[m for
m in data_options.eu_ms()] + [''], description='AOI code:',
ensure_option=False, disabled=False, layout=Layout(width=
'200px'), tooltip=
'Lowercase AOI code name for the dataset (5chr max).')
ds_year = BoundedIntText(value=int(dsy.value), min=1980, max=2100,
step=1, description='Dataset year:', disabled=False, layout=
Layout(width='180px'))
ds_desc = Text(value=values['ds_conf'][dsc_value]['desc'],
description='Description:', disabled=False)
info_map_text = ['Set default map view options. ',
'You can get automatically the dataset ', 'center coordinates.']
lat, lon = values['ds_conf'][dsc_value]['center'].split(',')
map_cent_lat = FloatText(value=float(lat), description='Lat:',
disabled=False, layout=Layout(width='160px'))
map_cent_lon = FloatText(value=float(lon), description='Lon:',
disabled=False, layout=Layout(width='160px'))
map_zoom = BoundedIntText(value=values['ds_conf'][dsc_value]['zoom'
], min=0, max=20, step=1, description='Zoom:', disabled=False,
layout=Layout(width='140px'))
bt_get_center = Button(layout=Layout(width='40px'), icon='bullseye',
tooltip='Get center point from database.')
ds_box = HBox([ds_code, ds_year, ds_desc])
map_box = HBox([Label('Map center: '), map_cent_lat, map_cent_lon,
bt_get_center, map_zoom])
info_config = Label(
"""Change 'AOI code' value to create a new configuration set or
leave the same 'AOI code' value to configure the selected one."""
)
db = int(values['ds_conf'][dsc_value]['db'])
def get_tb_list():
tbls = database.tables(db, None, False)
if tbls is None:
return []
else:
return tbls
tb_dc = Dropdown(options=get_tb_list(), value=config.autoselect(
values['ds_conf'][dsc_value]['years'][str(ds_year.value)][
'tables']['dias_catalog'], get_tb_list(), False), description=
'DIAS catalog:', disabled=False)
tb_pr = Dropdown(options=get_tb_list(), value=config.autoselect(
values['ds_conf'][dsc_value]['years'][str(ds_year.value)][
'tables']['parcels'], get_tb_list(), False), description=
'Parcels:', disabled=False)
def get_pr_columns():
try:
colms = database.table_columns(tb_pr.value, 1, None)
if colms is None:
return []
else:
return colms
except Exception:
return []
tc_id = Dropdown(options=get_pr_columns(), value=config.autoselect(
values['ds_conf'][dsc_value]['years'][str(ds_year.value)][
'columns']['parcels_id'], get_pr_columns(), False), description
='Parcels ID:', disabled=False, layout=Layout(width='180px'))
tc_cn = Dropdown(options=get_pr_columns(), value=config.autoselect(
values['ds_conf'][dsc_value]['years'][str(ds_year.value)][
'columns']['crop_names'], get_pr_columns(), False), description
='Crop names:', disabled=False, layout=Layout(width='180px'))
tc_cc = Dropdown(options=get_pr_columns(), value=config.autoselect(
values['ds_conf'][dsc_value]['years'][str(ds_year.value)][
'columns']['crop_codes'], get_pr_columns(), False), description
='Crop codes:', disabled=False, layout=Layout(width='180px'))
def on_tb_pr_change(change):
tc_id.options = get_pr_columns()
tc_cn.options = get_pr_columns()
tc_cc.options = get_pr_columns()
tb_pr.observe(on_tb_pr_change, 'value')
parcel_box = HBox([tb_pr, tc_id, tc_cn, tc_cc])
tb_s2 = Dropdown(options=get_tb_list(), value=config.autoselect(
values['ds_conf'][dsc_value]['years'][str(ds_year.value)][
'tables']['s2'], get_tb_list(), False), description=
'S2 signatures:', disabled=False)
tb_bs = Dropdown(options=get_tb_list(), value=config.autoselect(
values['ds_conf'][dsc_value]['years'][str(ds_year.value)][
'tables']['bs'], get_tb_list(), False), description=
'Backscattering:', disabled=False)
tb_6c = Dropdown(options=get_tb_list(), value=config.autoselect(
values['ds_conf'][dsc_value]['years'][str(ds_year.value)][
'tables']['c6'], get_tb_list(), False), description=
'6 day coherence:', disabled=False)
wb_save = Button(description='Save', disabled=False, icon='save')
@bt_get_center.on_click
def bt_get_center_on_click(b):
import json
center_json = json.loads(database.getTableCentroid(tb_pr.value)
['center'][0])
map_cent_lat.value = round(center_json['coordinates'][1], 2)
map_cent_lon.value = round(center_json['coordinates'][0], 2)
map_zoom.value = 10
@wb_save.on_click
def wb_save_on_click(b):
progress.clear_output()
dscode = ds_code.value
config.update(['ds_conf', dscode, 'years', str(ds_year.value),
'tables', 'dias_catalog'], str(tb_dc.value))
config.update(['ds_conf', dscode, 'years', str(ds_year.value),
'tables', 'parcels'], str(tb_pr.value))
config.update(['ds_conf', dscode, 'years', str(ds_year.value),
'columns', 'parcels_id'], str(tc_id.value))
config.update(['ds_conf', dscode, 'years', str(ds_year.value),
'columns', 'crop_names'], str(tc_cn.value))
config.update(['ds_conf', dscode, 'years', str(ds_year.value),
'columns', 'crop_codes'], str(tc_cc.value))
config.update(['ds_conf', dscode, 'years', str(ds_year.value),
'tables', 's2'], str(tb_s2.value))
config.update(['ds_conf', dscode, 'years', str(ds_year.value),
'tables', 'bs'], str(tb_bs.value))
config.update(['ds_conf', dscode, 'years', str(ds_year.value),
'tables', 'c6'], str(tb_6c.value))
config.update(['ds_conf', dscode, 'db'], str(ds_db.value))
config.update(['ds_conf', dscode, 'desc'], str(ds_desc.value))
config.update(['ds_conf', dscode, 'center'],
f'{map_cent_lat.value},{map_cent_lon.value}')
config.update(['ds_conf', dscode, 'zoom'], str(map_zoom.value))
config.update(['set', 'ds_conf'], str(dscode))
config.update(['set', 'ds_year'], str(ds_year.value))
values = config.read()
ds_c = values['set']['ds_conf']
ds_y = values['set']['ds_year']
dsc.options = [d for d in values['ds_conf']]
dsy.options = [int(y) for y in values['ds_conf'][ds_c]['years']]
dsc.value = ds_c
dsy.value = int(ds_y)
outlog('The configurations are saved.')
return VBox([info_config, ds_box, parcel_box, tb_dc, tb_s2, tb_bs,
tb_6c, Label(''.join(info_map_text)), map_box, wb_save])
dsc_new_box = HBox([])
@bt_set.on_click
def bt_set_on_click(b):
if dsc_new_box.children == ():
dsc_new_box.children = [dsc_config(dsc.value)]
bt_set.icon = 'chevron-up'
else:
dsc_new_box.children = ()
bt_set.icon = 'cogs'
@bt_new.on_click
def bt_new_on_click(b):
if dsc_new_box.children == ():
dsc_new_box.children = [dsc_config(dsc.value)]
bt_set.icon = 'chevron-up'
else:
dsc_new_box.children = ()
bt_set.icon = 'cogs'
@bt_rec.on_click
def bt_rec_on_click(b):
progress.clear_output()
if len(dsc.options) > 1:
config.delete(['ds_conf', dsc.value])
outlog(f"Dataset configuration '{dsc.value}' is deleted.")
values = config.read()
dsc.options = [d for d in values['ds_conf']]
else:
outlog('Can not remove last configuration.')
@bt_rey.on_click
def bt_rey_on_click(b):
progress.clear_output()
if len(dsy.options) > 1:
config.delete(['ds_conf', dsc.value, 'years', str(dsy.value)])
outlog(f"Year {dsy.value} of dataset '{dsc.value}' is deleted.")
values = config.read()
dsy.options = [int(y) for y in values['ds_conf'][str(dsc.value)
]['years']]
else:
outlog('Can not remove last configuration.')
wbox = VBox([Label('Datasets configurations.'), dsc_box, dsc_new_box,
progress])
return wbox
| <mask token>
def widget_box():
source = int(config.get_value(['set', 'data_source']))
sources = RadioButtons(options=[('JRC RESTful API.', 0), (
'Direct access to database and object storage.', 1)], value=source,
layout={'width': 'max-content'})
sources_box = Box([Label(value='Data sources:'), sources])
info_api = Label('RESTful API Settings.')
info_direct = Label('Direct access settings')
view_options = VBox([info_direct])
if source == 0:
view_options.children = [info_api, rest_api()]
elif source == 1:
view_options.children = [info_direct, direct()]
def on_source_change(change):
view_options.children = []
if sources.value == 0:
view_options.children = [info_api, rest_api()]
elif sources.value == 1:
view_options.children = [info_direct, direct()]
config.update(['set', 'data_source'], str(sources.value))
sources.observe(on_source_change, 'value')
wbox_sources = VBox([sources_box, view_options], layout=Layout(border=
'1px solid black'))
info_general = Label(value='General settings:')
wbox = VBox([wbox_sources, info_general, settings.widget_box()])
return wbox
def rest_api(mode=None):
""""""
values = config.read()
wt_url = Text(value=values['api']['url'], placeholder='Add URL',
description='API URL:', disabled=False)
wt_user = Text(value=values['api']['user'], placeholder='Username',
description='API User:', disabled=False)
wt_pass = Password(value=values['api']['pass'], placeholder='******',
description='API Password:', disabled=False)
wb_save = Button(description='Save', disabled=False, icon='save')
progress = Output()
def outlog(*text):
with progress:
print(*text)
@wb_save.on_click
def wb_save_on_click(b):
config.update(['api', 'url'], str(wt_url.value))
config.update(['api', 'user'], str(wt_user.value))
if wt_pass.value != '':
config.update(['api', 'pass'], str(wt_pass.value))
outlog('API information is updated')
wbox = VBox([wt_url, wt_user, wt_pass, wb_save, progress])
return wbox
def direct():
tab_box = Tab(children=[settings.direct_conn(), direct_settings()])
tab_box.set_title(0, 'Connection')
tab_box.set_title(1, 'db Configuration')
return tab_box
def direct_settings():
values = config.read()
ds_def = values['set']['ds_conf']
ds_dye = values['set']['ds_year']
if ds_def not in [d for d in values['ds_conf']]:
ds_def = [d for d in values['ds_conf']][0]
dsc = Dropdown(options=[d for d in values['ds_conf']], value=ds_def,
description='Default:', disabled=False, layout=Layout(width='200px'))
dsy = Dropdown(options=[int(y) for y in values['ds_conf'][dsc.value][
'years']], value=int(ds_dye), description='Dataset year:', disabled
=False, layout=Layout(width='180px'))
btn_refresh = Button(layout=Layout(width='35px'), icon='fa-refresh')
@btn_refresh.on_click
def btn_refresh_on_click(b):
values = config.read()
ds_c = values['set']['ds_conf']
ds_y = values['set']['ds_year']
dsc.options = [d for d in values['ds_conf']]
dsy.options = [int(y) for y in values['ds_conf'][ds_c]['years']]
dsc.value = ds_c
dsy.value = int(ds_y)
def on_dsc_change(change):
config.update(['set', 'ds_conf'], dsc.value)
values = config.read()
ds_c = values['set']['ds_conf']
dsy.options = [int(y) for y in values['ds_conf'][ds_c]['years']]
dsc.observe(on_dsc_change, 'value')
def on_dsy_change(change):
config.update(['set', 'ds_year'], str(dsy.value))
dsy.observe(on_dsy_change, 'value')
bt_set = Button(layout=Layout(width='40px'), icon='cogs', tooltip=
'Configure this dataset')
bt_new = Button(layout=Layout(width='40px'), icon='plus', tooltip=
'Add new dataset configuration')
bt_rec = Button(layout=Layout(width='40px'), icon='trash-alt', tooltip=
'Delete dataset configuration')
bt_rey = Button(layout=Layout(width='40px'), icon='trash-alt', tooltip=
'Delete only the selected year.')
dsc_box = HBox([dsc, btn_refresh, bt_rec, dsy, bt_set, bt_rey, bt_new])
progress = Output()
def outlog(*text):
with progress:
print(*text)
def dsc_config(dsc_value):
values = config.read()
ds_db = Dropdown(options=['1'], value='1', description='Database:',
disabled=False, layout=Layout(width='140px'))
try:
with open(f"{config.get_value(['paths', 'temp'])}tb_prefix", 'r'
) as f:
code_value = f.read()
except Exception:
code_value = dsc_value
ds_code = Combobox(value=code_value, placeholder='abc', options=[m for
m in data_options.eu_ms()] + [''], description='AOI code:',
ensure_option=False, disabled=False, layout=Layout(width=
'200px'), tooltip=
'Lowercase AOI code name for the dataset (5chr max).')
ds_year = BoundedIntText(value=int(dsy.value), min=1980, max=2100,
step=1, description='Dataset year:', disabled=False, layout=
Layout(width='180px'))
ds_desc = Text(value=values['ds_conf'][dsc_value]['desc'],
description='Description:', disabled=False)
info_map_text = ['Set default map view options. ',
'You can get automatically the dataset ', 'center coordinates.']
lat, lon = values['ds_conf'][dsc_value]['center'].split(',')
map_cent_lat = FloatText(value=float(lat), description='Lat:',
disabled=False, layout=Layout(width='160px'))
map_cent_lon = FloatText(value=float(lon), description='Lon:',
disabled=False, layout=Layout(width='160px'))
map_zoom = BoundedIntText(value=values['ds_conf'][dsc_value]['zoom'
], min=0, max=20, step=1, description='Zoom:', disabled=False,
layout=Layout(width='140px'))
bt_get_center = Button(layout=Layout(width='40px'), icon='bullseye',
tooltip='Get center point from database.')
ds_box = HBox([ds_code, ds_year, ds_desc])
map_box = HBox([Label('Map center: '), map_cent_lat, map_cent_lon,
bt_get_center, map_zoom])
info_config = Label(
"""Change 'AOI code' value to create a new configuration set or
leave the same 'AOI code' value to configure the selected one."""
)
db = int(values['ds_conf'][dsc_value]['db'])
def get_tb_list():
tbls = database.tables(db, None, False)
if tbls is None:
return []
else:
return tbls
tb_dc = Dropdown(options=get_tb_list(), value=config.autoselect(
values['ds_conf'][dsc_value]['years'][str(ds_year.value)][
'tables']['dias_catalog'], get_tb_list(), False), description=
'DIAS catalog:', disabled=False)
tb_pr = Dropdown(options=get_tb_list(), value=config.autoselect(
values['ds_conf'][dsc_value]['years'][str(ds_year.value)][
'tables']['parcels'], get_tb_list(), False), description=
'Parcels:', disabled=False)
def get_pr_columns():
try:
colms = database.table_columns(tb_pr.value, 1, None)
if colms is None:
return []
else:
return colms
except Exception:
return []
tc_id = Dropdown(options=get_pr_columns(), value=config.autoselect(
values['ds_conf'][dsc_value]['years'][str(ds_year.value)][
'columns']['parcels_id'], get_pr_columns(), False), description
='Parcels ID:', disabled=False, layout=Layout(width='180px'))
tc_cn = Dropdown(options=get_pr_columns(), value=config.autoselect(
values['ds_conf'][dsc_value]['years'][str(ds_year.value)][
'columns']['crop_names'], get_pr_columns(), False), description
='Crop names:', disabled=False, layout=Layout(width='180px'))
tc_cc = Dropdown(options=get_pr_columns(), value=config.autoselect(
values['ds_conf'][dsc_value]['years'][str(ds_year.value)][
'columns']['crop_codes'], get_pr_columns(), False), description
='Crop codes:', disabled=False, layout=Layout(width='180px'))
def on_tb_pr_change(change):
tc_id.options = get_pr_columns()
tc_cn.options = get_pr_columns()
tc_cc.options = get_pr_columns()
tb_pr.observe(on_tb_pr_change, 'value')
parcel_box = HBox([tb_pr, tc_id, tc_cn, tc_cc])
tb_s2 = Dropdown(options=get_tb_list(), value=config.autoselect(
values['ds_conf'][dsc_value]['years'][str(ds_year.value)][
'tables']['s2'], get_tb_list(), False), description=
'S2 signatures:', disabled=False)
tb_bs = Dropdown(options=get_tb_list(), value=config.autoselect(
values['ds_conf'][dsc_value]['years'][str(ds_year.value)][
'tables']['bs'], get_tb_list(), False), description=
'Backscattering:', disabled=False)
tb_6c = Dropdown(options=get_tb_list(), value=config.autoselect(
values['ds_conf'][dsc_value]['years'][str(ds_year.value)][
'tables']['c6'], get_tb_list(), False), description=
'6 day coherence:', disabled=False)
wb_save = Button(description='Save', disabled=False, icon='save')
@bt_get_center.on_click
def bt_get_center_on_click(b):
import json
center_json = json.loads(database.getTableCentroid(tb_pr.value)
['center'][0])
map_cent_lat.value = round(center_json['coordinates'][1], 2)
map_cent_lon.value = round(center_json['coordinates'][0], 2)
map_zoom.value = 10
@wb_save.on_click
def wb_save_on_click(b):
progress.clear_output()
dscode = ds_code.value
config.update(['ds_conf', dscode, 'years', str(ds_year.value),
'tables', 'dias_catalog'], str(tb_dc.value))
config.update(['ds_conf', dscode, 'years', str(ds_year.value),
'tables', 'parcels'], str(tb_pr.value))
config.update(['ds_conf', dscode, 'years', str(ds_year.value),
'columns', 'parcels_id'], str(tc_id.value))
config.update(['ds_conf', dscode, 'years', str(ds_year.value),
'columns', 'crop_names'], str(tc_cn.value))
config.update(['ds_conf', dscode, 'years', str(ds_year.value),
'columns', 'crop_codes'], str(tc_cc.value))
config.update(['ds_conf', dscode, 'years', str(ds_year.value),
'tables', 's2'], str(tb_s2.value))
config.update(['ds_conf', dscode, 'years', str(ds_year.value),
'tables', 'bs'], str(tb_bs.value))
config.update(['ds_conf', dscode, 'years', str(ds_year.value),
'tables', 'c6'], str(tb_6c.value))
config.update(['ds_conf', dscode, 'db'], str(ds_db.value))
config.update(['ds_conf', dscode, 'desc'], str(ds_desc.value))
config.update(['ds_conf', dscode, 'center'],
f'{map_cent_lat.value},{map_cent_lon.value}')
config.update(['ds_conf', dscode, 'zoom'], str(map_zoom.value))
config.update(['set', 'ds_conf'], str(dscode))
config.update(['set', 'ds_year'], str(ds_year.value))
values = config.read()
ds_c = values['set']['ds_conf']
ds_y = values['set']['ds_year']
dsc.options = [d for d in values['ds_conf']]
dsy.options = [int(y) for y in values['ds_conf'][ds_c]['years']]
dsc.value = ds_c
dsy.value = int(ds_y)
outlog('The configurations are saved.')
return VBox([info_config, ds_box, parcel_box, tb_dc, tb_s2, tb_bs,
tb_6c, Label(''.join(info_map_text)), map_box, wb_save])
dsc_new_box = HBox([])
@bt_set.on_click
def bt_set_on_click(b):
if dsc_new_box.children == ():
dsc_new_box.children = [dsc_config(dsc.value)]
bt_set.icon = 'chevron-up'
else:
dsc_new_box.children = ()
bt_set.icon = 'cogs'
@bt_new.on_click
def bt_new_on_click(b):
if dsc_new_box.children == ():
dsc_new_box.children = [dsc_config(dsc.value)]
bt_set.icon = 'chevron-up'
else:
dsc_new_box.children = ()
bt_set.icon = 'cogs'
@bt_rec.on_click
def bt_rec_on_click(b):
progress.clear_output()
if len(dsc.options) > 1:
config.delete(['ds_conf', dsc.value])
outlog(f"Dataset configuration '{dsc.value}' is deleted.")
values = config.read()
dsc.options = [d for d in values['ds_conf']]
else:
outlog('Can not remove last configuration.')
@bt_rey.on_click
def bt_rey_on_click(b):
progress.clear_output()
if len(dsy.options) > 1:
config.delete(['ds_conf', dsc.value, 'years', str(dsy.value)])
outlog(f"Year {dsy.value} of dataset '{dsc.value}' is deleted.")
values = config.read()
dsy.options = [int(y) for y in values['ds_conf'][str(dsc.value)
]['years']]
else:
outlog('Can not remove last configuration.')
wbox = VBox([Label('Datasets configurations.'), dsc_box, dsc_new_box,
progress])
return wbox
| from ipywidgets import Text, VBox, HBox, Label, Password, RadioButtons, Button, Layout, Box, Tab, Output, Dropdown, FloatText, BoundedIntText, Combobox
from cbm.utils import config, data_options
from cbm.ipycbm.utils import settings
from cbm.sources import database
def widget_box():
source = int(config.get_value(['set', 'data_source']))
sources = RadioButtons(options=[('JRC RESTful API.', 0), (
'Direct access to database and object storage.', 1)], value=source,
layout={'width': 'max-content'})
sources_box = Box([Label(value='Data sources:'), sources])
info_api = Label('RESTful API Settings.')
info_direct = Label('Direct access settings')
view_options = VBox([info_direct])
if source == 0:
view_options.children = [info_api, rest_api()]
elif source == 1:
view_options.children = [info_direct, direct()]
def on_source_change(change):
view_options.children = []
if sources.value == 0:
view_options.children = [info_api, rest_api()]
elif sources.value == 1:
view_options.children = [info_direct, direct()]
config.update(['set', 'data_source'], str(sources.value))
sources.observe(on_source_change, 'value')
wbox_sources = VBox([sources_box, view_options], layout=Layout(border=
'1px solid black'))
info_general = Label(value='General settings:')
wbox = VBox([wbox_sources, info_general, settings.widget_box()])
return wbox
def rest_api(mode=None):
""""""
values = config.read()
wt_url = Text(value=values['api']['url'], placeholder='Add URL',
description='API URL:', disabled=False)
wt_user = Text(value=values['api']['user'], placeholder='Username',
description='API User:', disabled=False)
wt_pass = Password(value=values['api']['pass'], placeholder='******',
description='API Password:', disabled=False)
wb_save = Button(description='Save', disabled=False, icon='save')
progress = Output()
def outlog(*text):
with progress:
print(*text)
@wb_save.on_click
def wb_save_on_click(b):
config.update(['api', 'url'], str(wt_url.value))
config.update(['api', 'user'], str(wt_user.value))
if wt_pass.value != '':
config.update(['api', 'pass'], str(wt_pass.value))
outlog('API information is updated')
wbox = VBox([wt_url, wt_user, wt_pass, wb_save, progress])
return wbox
def direct():
tab_box = Tab(children=[settings.direct_conn(), direct_settings()])
tab_box.set_title(0, 'Connection')
tab_box.set_title(1, 'db Configuration')
return tab_box
def direct_settings():
values = config.read()
ds_def = values['set']['ds_conf']
ds_dye = values['set']['ds_year']
if ds_def not in [d for d in values['ds_conf']]:
ds_def = [d for d in values['ds_conf']][0]
dsc = Dropdown(options=[d for d in values['ds_conf']], value=ds_def,
description='Default:', disabled=False, layout=Layout(width='200px'))
dsy = Dropdown(options=[int(y) for y in values['ds_conf'][dsc.value][
'years']], value=int(ds_dye), description='Dataset year:', disabled
=False, layout=Layout(width='180px'))
btn_refresh = Button(layout=Layout(width='35px'), icon='fa-refresh')
@btn_refresh.on_click
def btn_refresh_on_click(b):
values = config.read()
ds_c = values['set']['ds_conf']
ds_y = values['set']['ds_year']
dsc.options = [d for d in values['ds_conf']]
dsy.options = [int(y) for y in values['ds_conf'][ds_c]['years']]
dsc.value = ds_c
dsy.value = int(ds_y)
def on_dsc_change(change):
config.update(['set', 'ds_conf'], dsc.value)
values = config.read()
ds_c = values['set']['ds_conf']
dsy.options = [int(y) for y in values['ds_conf'][ds_c]['years']]
dsc.observe(on_dsc_change, 'value')
def on_dsy_change(change):
config.update(['set', 'ds_year'], str(dsy.value))
dsy.observe(on_dsy_change, 'value')
bt_set = Button(layout=Layout(width='40px'), icon='cogs', tooltip=
'Configure this dataset')
bt_new = Button(layout=Layout(width='40px'), icon='plus', tooltip=
'Add new dataset configuration')
bt_rec = Button(layout=Layout(width='40px'), icon='trash-alt', tooltip=
'Delete dataset configuration')
bt_rey = Button(layout=Layout(width='40px'), icon='trash-alt', tooltip=
'Delete only the selected year.')
dsc_box = HBox([dsc, btn_refresh, bt_rec, dsy, bt_set, bt_rey, bt_new])
progress = Output()
def outlog(*text):
with progress:
print(*text)
def dsc_config(dsc_value):
values = config.read()
ds_db = Dropdown(options=['1'], value='1', description='Database:',
disabled=False, layout=Layout(width='140px'))
try:
with open(f"{config.get_value(['paths', 'temp'])}tb_prefix", 'r'
) as f:
code_value = f.read()
except Exception:
code_value = dsc_value
ds_code = Combobox(value=code_value, placeholder='abc', options=[m for
m in data_options.eu_ms()] + [''], description='AOI code:',
ensure_option=False, disabled=False, layout=Layout(width=
'200px'), tooltip=
'Lowercase AOI code name for the dataset (5chr max).')
ds_year = BoundedIntText(value=int(dsy.value), min=1980, max=2100,
step=1, description='Dataset year:', disabled=False, layout=
Layout(width='180px'))
ds_desc = Text(value=values['ds_conf'][dsc_value]['desc'],
description='Description:', disabled=False)
info_map_text = ['Set default map view options. ',
'You can get automatically the dataset ', 'center coordinates.']
lat, lon = values['ds_conf'][dsc_value]['center'].split(',')
map_cent_lat = FloatText(value=float(lat), description='Lat:',
disabled=False, layout=Layout(width='160px'))
map_cent_lon = FloatText(value=float(lon), description='Lon:',
disabled=False, layout=Layout(width='160px'))
map_zoom = BoundedIntText(value=values['ds_conf'][dsc_value]['zoom'
], min=0, max=20, step=1, description='Zoom:', disabled=False,
layout=Layout(width='140px'))
bt_get_center = Button(layout=Layout(width='40px'), icon='bullseye',
tooltip='Get center point from database.')
ds_box = HBox([ds_code, ds_year, ds_desc])
map_box = HBox([Label('Map center: '), map_cent_lat, map_cent_lon,
bt_get_center, map_zoom])
info_config = Label(
"""Change 'AOI code' value to create a new configuration set or
leave the same 'AOI code' value to configure the selected one."""
)
db = int(values['ds_conf'][dsc_value]['db'])
def get_tb_list():
tbls = database.tables(db, None, False)
if tbls is None:
return []
else:
return tbls
tb_dc = Dropdown(options=get_tb_list(), value=config.autoselect(
values['ds_conf'][dsc_value]['years'][str(ds_year.value)][
'tables']['dias_catalog'], get_tb_list(), False), description=
'DIAS catalog:', disabled=False)
tb_pr = Dropdown(options=get_tb_list(), value=config.autoselect(
values['ds_conf'][dsc_value]['years'][str(ds_year.value)][
'tables']['parcels'], get_tb_list(), False), description=
'Parcels:', disabled=False)
def get_pr_columns():
try:
colms = database.table_columns(tb_pr.value, 1, None)
if colms is None:
return []
else:
return colms
except Exception:
return []
tc_id = Dropdown(options=get_pr_columns(), value=config.autoselect(
values['ds_conf'][dsc_value]['years'][str(ds_year.value)][
'columns']['parcels_id'], get_pr_columns(), False), description
='Parcels ID:', disabled=False, layout=Layout(width='180px'))
tc_cn = Dropdown(options=get_pr_columns(), value=config.autoselect(
values['ds_conf'][dsc_value]['years'][str(ds_year.value)][
'columns']['crop_names'], get_pr_columns(), False), description
='Crop names:', disabled=False, layout=Layout(width='180px'))
tc_cc = Dropdown(options=get_pr_columns(), value=config.autoselect(
values['ds_conf'][dsc_value]['years'][str(ds_year.value)][
'columns']['crop_codes'], get_pr_columns(), False), description
='Crop codes:', disabled=False, layout=Layout(width='180px'))
def on_tb_pr_change(change):
tc_id.options = get_pr_columns()
tc_cn.options = get_pr_columns()
tc_cc.options = get_pr_columns()
tb_pr.observe(on_tb_pr_change, 'value')
parcel_box = HBox([tb_pr, tc_id, tc_cn, tc_cc])
tb_s2 = Dropdown(options=get_tb_list(), value=config.autoselect(
values['ds_conf'][dsc_value]['years'][str(ds_year.value)][
'tables']['s2'], get_tb_list(), False), description=
'S2 signatures:', disabled=False)
tb_bs = Dropdown(options=get_tb_list(), value=config.autoselect(
values['ds_conf'][dsc_value]['years'][str(ds_year.value)][
'tables']['bs'], get_tb_list(), False), description=
'Backscattering:', disabled=False)
tb_6c = Dropdown(options=get_tb_list(), value=config.autoselect(
values['ds_conf'][dsc_value]['years'][str(ds_year.value)][
'tables']['c6'], get_tb_list(), False), description=
'6 day coherence:', disabled=False)
wb_save = Button(description='Save', disabled=False, icon='save')
@bt_get_center.on_click
def bt_get_center_on_click(b):
import json
center_json = json.loads(database.getTableCentroid(tb_pr.value)
['center'][0])
map_cent_lat.value = round(center_json['coordinates'][1], 2)
map_cent_lon.value = round(center_json['coordinates'][0], 2)
map_zoom.value = 10
@wb_save.on_click
def wb_save_on_click(b):
progress.clear_output()
dscode = ds_code.value
config.update(['ds_conf', dscode, 'years', str(ds_year.value),
'tables', 'dias_catalog'], str(tb_dc.value))
config.update(['ds_conf', dscode, 'years', str(ds_year.value),
'tables', 'parcels'], str(tb_pr.value))
config.update(['ds_conf', dscode, 'years', str(ds_year.value),
'columns', 'parcels_id'], str(tc_id.value))
config.update(['ds_conf', dscode, 'years', str(ds_year.value),
'columns', 'crop_names'], str(tc_cn.value))
config.update(['ds_conf', dscode, 'years', str(ds_year.value),
'columns', 'crop_codes'], str(tc_cc.value))
config.update(['ds_conf', dscode, 'years', str(ds_year.value),
'tables', 's2'], str(tb_s2.value))
config.update(['ds_conf', dscode, 'years', str(ds_year.value),
'tables', 'bs'], str(tb_bs.value))
config.update(['ds_conf', dscode, 'years', str(ds_year.value),
'tables', 'c6'], str(tb_6c.value))
config.update(['ds_conf', dscode, 'db'], str(ds_db.value))
config.update(['ds_conf', dscode, 'desc'], str(ds_desc.value))
config.update(['ds_conf', dscode, 'center'],
f'{map_cent_lat.value},{map_cent_lon.value}')
config.update(['ds_conf', dscode, 'zoom'], str(map_zoom.value))
config.update(['set', 'ds_conf'], str(dscode))
config.update(['set', 'ds_year'], str(ds_year.value))
values = config.read()
ds_c = values['set']['ds_conf']
ds_y = values['set']['ds_year']
dsc.options = [d for d in values['ds_conf']]
dsy.options = [int(y) for y in values['ds_conf'][ds_c]['years']]
dsc.value = ds_c
dsy.value = int(ds_y)
outlog('The configurations are saved.')
return VBox([info_config, ds_box, parcel_box, tb_dc, tb_s2, tb_bs,
tb_6c, Label(''.join(info_map_text)), map_box, wb_save])
dsc_new_box = HBox([])
@bt_set.on_click
def bt_set_on_click(b):
if dsc_new_box.children == ():
dsc_new_box.children = [dsc_config(dsc.value)]
bt_set.icon = 'chevron-up'
else:
dsc_new_box.children = ()
bt_set.icon = 'cogs'
@bt_new.on_click
def bt_new_on_click(b):
if dsc_new_box.children == ():
dsc_new_box.children = [dsc_config(dsc.value)]
bt_set.icon = 'chevron-up'
else:
dsc_new_box.children = ()
bt_set.icon = 'cogs'
@bt_rec.on_click
def bt_rec_on_click(b):
progress.clear_output()
if len(dsc.options) > 1:
config.delete(['ds_conf', dsc.value])
outlog(f"Dataset configuration '{dsc.value}' is deleted.")
values = config.read()
dsc.options = [d for d in values['ds_conf']]
else:
outlog('Can not remove last configuration.')
@bt_rey.on_click
def bt_rey_on_click(b):
progress.clear_output()
if len(dsy.options) > 1:
config.delete(['ds_conf', dsc.value, 'years', str(dsy.value)])
outlog(f"Year {dsy.value} of dataset '{dsc.value}' is deleted.")
values = config.read()
dsy.options = [int(y) for y in values['ds_conf'][str(dsc.value)
]['years']]
else:
outlog('Can not remove last configuration.')
wbox = VBox([Label('Datasets configurations.'), dsc_box, dsc_new_box,
progress])
return wbox
| #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# This file is part of CbM (https://github.com/ec-jrc/cbm).
# Author : Konstantinos Anastasakis
# Credits : GTCAP Team
# Copyright : 2021 European Commission, Joint Research Centre
# License : 3-Clause BSD
from ipywidgets import (Text, VBox, HBox, Label, Password, RadioButtons,
Button, Layout, Box, Tab, Output, Dropdown,
FloatText, BoundedIntText, Combobox)
from cbm.utils import config, data_options
from cbm.ipycbm.utils import settings
from cbm.sources import database
def widget_box():
source = int(config.get_value(['set', 'data_source']))
sources = RadioButtons(
options=[
("JRC RESTful API.", 0),
("Direct access to database and object storage.", 1)
],
value=source,
layout={'width': 'max-content'}
)
sources_box = Box([
Label(value="Data sources:"),
sources]
)
info_api = Label("RESTful API Settings.")
info_direct = Label("Direct access settings")
view_options = VBox([info_direct])
if source == 0:
view_options.children = [info_api, rest_api()]
elif source == 1:
view_options.children = [info_direct, direct()]
def on_source_change(change):
view_options.children = []
if sources.value == 0:
view_options.children = [info_api, rest_api()]
elif sources.value == 1:
view_options.children = [info_direct, direct()]
config.update(['set', 'data_source'], str(sources.value))
sources.observe(on_source_change, 'value')
wbox_sources = VBox([sources_box, view_options],
layout=Layout(border='1px solid black'))
info_general = Label(value="General settings:")
wbox = VBox([wbox_sources, info_general, settings.widget_box()])
return wbox
def rest_api(mode=None):
""""""
values = config.read()
wt_url = Text(
value=values['api']['url'],
placeholder='Add URL',
description='API URL:',
disabled=False
)
wt_user = Text(
value=values['api']['user'],
placeholder='Username',
description='API User:',
disabled=False
)
wt_pass = Password(
value=values['api']['pass'],
placeholder='******',
description='API Password:',
disabled=False
)
wb_save = Button(
description='Save',
disabled=False,
icon='save'
)
progress = Output()
def outlog(*text):
with progress:
print(*text)
@wb_save.on_click
def wb_save_on_click(b):
config.update(['api', 'url'], str(wt_url.value))
config.update(['api', 'user'], str(wt_user.value))
if wt_pass.value != '':
config.update(['api', 'pass'], str(wt_pass.value))
outlog("API information is updated")
wbox = VBox([wt_url, wt_user, wt_pass, wb_save, progress])
return wbox
def direct():
# try:
tab_box = Tab(children=[settings.direct_conn(), direct_settings()])
tab_box.set_title(0, 'Connection')
tab_box.set_title(1, 'db Configuration')
# except:
# tab_box = Tab(children=[direct_conn()])
# tab_box.set_title(0, 'Connection')
# print("!WARNING! Can not load direct configuration settings.")
return tab_box
def direct_settings():
values = config.read()
ds_def = values['set']['ds_conf']
ds_dye = values['set']['ds_year']
if ds_def not in [d for d in values['ds_conf']]:
ds_def = [d for d in values['ds_conf']][0]
dsc = Dropdown(
options=[d for d in values['ds_conf']],
value=ds_def,
description='Default:',
disabled=False,
layout=Layout(width='200px')
)
dsy = Dropdown(
options=[int(y) for y in values['ds_conf'][dsc.value]['years']],
value=int(ds_dye),
description='Dataset year:',
disabled=False,
layout=Layout(width='180px')
)
btn_refresh = Button(
layout=Layout(width='35px'),
icon='fa-refresh')
@btn_refresh.on_click
def btn_refresh_on_click(b):
values = config.read()
ds_c = values['set']['ds_conf']
ds_y = values['set']['ds_year']
dsc.options = [d for d in values['ds_conf']]
dsy.options = [int(y) for y in values['ds_conf'][ds_c]['years']]
dsc.value = ds_c
dsy.value = int(ds_y)
def on_dsc_change(change):
config.update(['set', 'ds_conf'], dsc.value)
values = config.read()
ds_c = values['set']['ds_conf']
dsy.options = [int(y) for y in values['ds_conf'][ds_c]['years']]
dsc.observe(on_dsc_change, 'value')
def on_dsy_change(change):
config.update(['set', 'ds_year'], str(dsy.value))
dsy.observe(on_dsy_change, 'value')
bt_set = Button(layout=Layout(width='40px'), icon='cogs',
tooltip="Configure this dataset")
bt_new = Button(layout=Layout(width='40px'), icon='plus',
tooltip="Add new dataset configuration")
bt_rec = Button(layout=Layout(width='40px'), icon='trash-alt',
tooltip='Delete dataset configuration')
bt_rey = Button(layout=Layout(width='40px'), icon='trash-alt',
tooltip='Delete only the selected year.')
dsc_box = HBox([dsc, btn_refresh, bt_rec, dsy, bt_set, bt_rey, bt_new])
progress = Output()
def outlog(*text):
with progress:
print(*text)
def dsc_config(dsc_value):
values = config.read()
ds_db = Dropdown(
options=["1"],
value="1",
description='Database:',
disabled=False,
layout=Layout(width='140px')
)
try:
with open(f"{config.get_value(['paths','temp'])}tb_prefix", 'r') as f:
code_value = f.read()
except Exception:
code_value = dsc_value
ds_code = Combobox(
value=code_value,
placeholder='abc',
options=[m for m in data_options.eu_ms()]+[''],
description='AOI code:',
ensure_option=False,
disabled=False,
layout=Layout(width='200px'),
tooltip='Lowercase AOI code name for the dataset (5chr max).'
)
ds_year = BoundedIntText(
value=int(dsy.value),
min=1980,
max=2100,
step=1,
description='Dataset year:',
disabled=False,
layout=Layout(width='180px')
)
ds_desc = Text(
value=values['ds_conf'][dsc_value]['desc'],
description='Description:',
disabled=False
)
info_map_text = ["Set default map view options. ",
"You can get automatically the dataset ",
"center coordinates."]
lat, lon = values['ds_conf'][dsc_value]['center'].split(",")
map_cent_lat = FloatText(
value=float(lat),
description='Lat:',
disabled=False,
layout=Layout(width='160px')
)
map_cent_lon = FloatText(
value=float(lon),
description='Lon:',
disabled=False,
layout=Layout(width='160px')
)
map_zoom = BoundedIntText(
value=values['ds_conf'][dsc_value]['zoom'],
min=0,
max=20,
step=1,
description='Zoom:',
disabled=False,
layout=Layout(width='140px')
)
bt_get_center = Button(
layout=Layout(width='40px'),
icon='bullseye',
tooltip='Get center point from database.'
)
ds_box = HBox([ds_code, ds_year, ds_desc])
map_box = HBox([Label("Map center: "), map_cent_lat,
map_cent_lon, bt_get_center, map_zoom])
info_config = Label(
"""Change 'AOI code' value to create a new configuration set or
leave the same 'AOI code' value to configure the selected one.""")
db = int(values['ds_conf'][dsc_value]['db'])
def get_tb_list():
tbls = database.tables(db, None, False)
if tbls is None:
return []
else:
return tbls
tb_dc = Dropdown(
options=get_tb_list(),
value=config.autoselect(
values['ds_conf'][dsc_value]['years'][
str(ds_year.value)]['tables']['dias_catalog'],
get_tb_list(), False),
description='DIAS catalog:',
disabled=False
)
tb_pr = Dropdown(
options=get_tb_list(),
value=config.autoselect(
values['ds_conf'][dsc_value]['years'][
str(ds_year.value)]['tables']['parcels'],
get_tb_list(), False),
description='Parcels:',
disabled=False
)
def get_pr_columns():
try:
colms = database.table_columns(tb_pr.value, 1, None)
if colms is None:
return []
else:
return colms
except Exception:
return []
tc_id = Dropdown(
options=get_pr_columns(),
value=config.autoselect(
values['ds_conf'][dsc_value]['years'][
str(ds_year.value)]['columns']['parcels_id'],
get_pr_columns(), False),
description='Parcels ID:',
disabled=False,
layout=Layout(width='180px')
)
tc_cn = Dropdown(
options=get_pr_columns(),
value=config.autoselect(
values['ds_conf'][dsc_value]['years'][
str(ds_year.value)]['columns']['crop_names'],
get_pr_columns(), False),
description='Crop names:',
disabled=False,
layout=Layout(width='180px')
)
tc_cc = Dropdown(
options=get_pr_columns(),
value=config.autoselect(
values['ds_conf'][dsc_value]['years'][
str(ds_year.value)]['columns']['crop_codes'],
get_pr_columns(), False),
description='Crop codes:',
disabled=False,
layout=Layout(width='180px')
)
def on_tb_pr_change(change):
tc_id.options = get_pr_columns()
tc_cn.options = get_pr_columns()
tc_cc.options = get_pr_columns()
tb_pr.observe(on_tb_pr_change, 'value')
parcel_box = HBox([tb_pr, tc_id, tc_cn, tc_cc])
tb_s2 = Dropdown(
options=get_tb_list(),
value=config.autoselect(
values['ds_conf'][dsc_value]['years'][
str(ds_year.value)]['tables']['s2'],
get_tb_list(), False),
description='S2 signatures:',
disabled=False
)
tb_bs = Dropdown(
options=get_tb_list(),
value=config.autoselect(
values['ds_conf'][dsc_value]['years'][
str(ds_year.value)]['tables']['bs'],
get_tb_list(), False),
description='Backscattering:',
disabled=False
)
tb_6c = Dropdown(
options=get_tb_list(),
value=config.autoselect(
values['ds_conf'][dsc_value]['years'][
str(ds_year.value)]['tables']['c6'],
get_tb_list(), False),
description='6 day coherence:',
disabled=False
)
wb_save = Button(
description='Save',
disabled=False,
icon='save'
)
@bt_get_center.on_click
def bt_get_center_on_click(b):
import json
center_json = json.loads(
database.getTableCentroid(tb_pr.value)['center'][0])
map_cent_lat.value = round(center_json['coordinates'][1], 2)
map_cent_lon.value = round(center_json['coordinates'][0], 2)
map_zoom.value = 10
@wb_save.on_click
def wb_save_on_click(b):
progress.clear_output()
dscode = ds_code.value
config.update(['ds_conf', dscode, 'years', str(ds_year.value),
'tables', 'dias_catalog'], str(tb_dc.value))
config.update(['ds_conf', dscode, 'years', str(ds_year.value),
'tables', 'parcels'], str(tb_pr.value))
config.update(['ds_conf', dscode, 'years', str(ds_year.value),
'columns', 'parcels_id'], str(tc_id.value))
config.update(['ds_conf', dscode, 'years', str(ds_year.value),
'columns', 'crop_names'], str(tc_cn.value))
config.update(['ds_conf', dscode, 'years', str(ds_year.value),
'columns', 'crop_codes'], str(tc_cc.value))
config.update(['ds_conf', dscode, 'years', str(ds_year.value),
'tables', 's2'], str(tb_s2.value))
config.update(['ds_conf', dscode, 'years', str(ds_year.value),
'tables', 'bs'], str(tb_bs.value))
config.update(['ds_conf', dscode, 'years', str(ds_year.value),
'tables', 'c6'], str(tb_6c.value))
config.update(['ds_conf', dscode,
'db'], str(ds_db.value))
config.update(['ds_conf', dscode,
'desc'], str(ds_desc.value))
config.update(['ds_conf', dscode, 'center'],
f"{map_cent_lat.value},{map_cent_lon.value}")
config.update(['ds_conf', dscode,
'zoom'], str(map_zoom.value))
config.update(['set', 'ds_conf'], str(dscode))
config.update(['set', 'ds_year'], str(ds_year.value))
values = config.read()
ds_c = values['set']['ds_conf']
ds_y = values['set']['ds_year']
dsc.options = [d for d in values['ds_conf']]
dsy.options = [int(y) for y in values['ds_conf'][ds_c]['years']]
dsc.value = ds_c
dsy.value = int(ds_y)
outlog("The configurations are saved.")
return VBox([info_config, ds_box, parcel_box,
tb_dc, tb_s2, tb_bs, tb_6c,
Label(''.join(info_map_text)), map_box, wb_save])
dsc_new_box = HBox([])
@bt_set.on_click
def bt_set_on_click(b):
if dsc_new_box.children == ():
dsc_new_box.children = [dsc_config(dsc.value)]
bt_set.icon = 'chevron-up'
else:
dsc_new_box.children = ()
bt_set.icon = 'cogs'
@bt_new.on_click
def bt_new_on_click(b):
if dsc_new_box.children == ():
dsc_new_box.children = [dsc_config(dsc.value)]
bt_set.icon = 'chevron-up'
else:
dsc_new_box.children = ()
bt_set.icon = 'cogs'
@bt_rec.on_click
def bt_rec_on_click(b):
progress.clear_output()
if len(dsc.options) > 1:
config.delete(['ds_conf', dsc.value])
outlog(f"Dataset configuration '{dsc.value}' is deleted.")
values = config.read()
dsc.options = [d for d in values['ds_conf']]
else:
outlog("Can not remove last configuration.")
@bt_rey.on_click
def bt_rey_on_click(b):
progress.clear_output()
if len(dsy.options) > 1:
config.delete(['ds_conf', dsc.value, 'years', str(dsy.value)])
outlog(f"Year {dsy.value} of dataset '{dsc.value}' is deleted.")
values = config.read()
dsy.options = [int(y) for y in values['ds_conf']
[str(dsc.value)]['years']]
else:
outlog("Can not remove last configuration.")
wbox = VBox([Label("Datasets configurations."), dsc_box,
dsc_new_box, progress])
return wbox
| [
2,
3,
4,
5,
6
] |
1,344 | 83ce5ee4d2a18caeb364b74c3739015fc0e1474c | #!/usr/bin/env python
import rospy
import numpy as np
from sensor_msgs.msg import Image
import cv2, cv_bridge
from geometry_msgs.msg import Twist, Pose2D
from std_msgs.msg import String
import pytesseract as ocr
from PIL import Image as imagePil
import os
import time
from roseli.srv import CreateMap, CreateMapRequest
from roseli.srv import TagImage, TagImageResponse
from roseli.srv import ResetEnc, ResetEncRequest
from dynamic_reconfigure.server import Server
from roseli.cfg import ocr_tagConfig
class ReadTag:
def __init__(self):
self.bridge = cv_bridge.CvBridge()
self.twist=Twist()
self.image_server = rospy.Service('/cropTag', TagImage, self.image_callback) #/cropTag
self.cmd_vel_pub = rospy.Publisher('cmd_vel', Twist, queue_size=1)
self.range_param = Server(ocr_tagConfig, self.reconfigure)
self.string = String()
self._pose2d_ = Pose2D()
self.rate = rospy.Rate(1)
def reconfigure(self, config, level):
#print(config)
self.min_h = config.min_hue_ocr
self.min_s = config.min_saturation_ocr
self.min_v = config.min_value_ocr
self.max_h = config.max_hue_ocr
self.max_s = config.max_saturation_ocr
self.max_v = config.max_value_ocr
return config
def creating_map_client(self, pose2d, ip):
rospy.wait_for_service('/pose2D')
try:
create_map = rospy.ServiceProxy('/pose2D', CreateMap)
resp = CreateMapRequest(pose2d, ip)
return create_map(resp)
except rospy.ServiceException, e:
print "Service call failed: %s"%e
def reset_enc_func(self):
rospy.wait_for_service('/reset_enc_server')
try:
reset = rospy.ServiceProxy('/reset_enc_server', ResetEnc)
resp = ResetEncRequest()
return reset(resp)
except rospy.ServiceException, e:
print "Service call failed: %s"%e
def image_callback (self, msg):
self.twist.linear.x = 0
self.twist.angular.z = 0
self.cmd_vel_pub.publish(self.twist)
self.rate.sleep()
try:
img = self.bridge.imgmsg_to_cv2(msg.tag, "bgr8")
except cv_bridge.CvBridgeError as e:
print ("Error: Imagem da Tag nao recebida")
print(e)
lowerBound1=np.array([self.min_h, self.min_s, self.min_v]) #lower boundary of the HSV image
upperBound1=np.array([self.max_h, self.max_s, self.max_v]) #Upper boundary of the HSV image
img_HSV=cv2.cvtColor(img,cv2.COLOR_BGR2HSV)
imgThresholder=cv2.inRange(img_HSV,lowerBound1,upperBound1,1)
cv2.imshow('picamera', img)
cv2.waitKey(500)
kernel = np.ones((3, 3), np.uint8)
imgFilter=cv2.morphologyEx(imgThresholder, cv2.MORPH_DILATE, kernel)
#imgFilter=cv2.adaptiveThreshold(imgThresholder, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 11, 1)
cv2.imshow('window_tag', imgFilter)
cv2.waitKey(500)
#cv2.destroyAllWindows()
#cv2.waitKey(1000)
filename = "{}.png".format(os.getpid())
cv2.imwrite(filename, imgFilter)
text = ocr.image_to_string(imagePil.open(filename),config="-c tessedit_char_whitelist=1234567890.")
os.remove(filename)
print(text)
separated= text.split(' ')
if (not len(separated) == 3):
print("It doesn't read a tag!")
return TagImageResponse()
else:
self._pose2d_.x = float(separated[0])
self._pose2d_.y = float(separated[1])
self._pose2d_.theta = float(separated[2])
_resp_ = self.creating_map_client(self._pose2d_, 0)
flag = self.reset_enc_func()
self.twist.linear.x = 0.3
self.twist.angular.z = 0
for x in range(0, 10):
self.cmd_vel_pub.publish(self.twist)
time.sleep(0.5)
return TagImageResponse()
if __name__=='__main__':
try:
rospy.init_node('readtag')
readtag = ReadTag()
rospy.spin()
except rospy.ROSInterruptException:
pass
| null | null | null | null | [
0
] |
1,345 | 0b2fd671b99b7012a14b132db2322318873b826c | <mask token>
class Other_Operations_Stack(Stack):
def min_value(self):
min_value = self.peek()
for value in self._data:
if value < min_value:
min_value = value
self.pop()
return min_value
<mask token>
| <mask token>
class Other_Operations_Stack(Stack):
def min_value(self):
min_value = self.peek()
for value in self._data:
if value < min_value:
min_value = value
self.pop()
return min_value
<mask token>
content_stack.push(1)
content_stack.push(-2)
content_stack.push(3)
print(content_stack.min_value())
| <mask token>
class Other_Operations_Stack(Stack):
def min_value(self):
min_value = self.peek()
for value in self._data:
if value < min_value:
min_value = value
self.pop()
return min_value
content_stack = Other_Operations_Stack()
content_stack.push(1)
content_stack.push(-2)
content_stack.push(3)
print(content_stack.min_value())
| <mask token>
from stack import Stack
class Other_Operations_Stack(Stack):
def min_value(self):
min_value = self.peek()
for value in self._data:
if value < min_value:
min_value = value
self.pop()
return min_value
content_stack = Other_Operations_Stack()
content_stack.push(1)
content_stack.push(-2)
content_stack.push(3)
print(content_stack.min_value())
| '''
Exercício 1: Estenda a classe Stack , que escrevemos durante as explicações do
conteúdo, adicionando uma nova função chamada min_value() que irá retornar o
menor valor inteiro presente na pilha.
'''
from stack import Stack
class Other_Operations_Stack(Stack):
def min_value(self):
min_value = self.peek()
for value in self._data:
if value < min_value:
min_value = value
self.pop()
return min_value
content_stack = Other_Operations_Stack()
content_stack.push(1)
content_stack.push(-2)
content_stack.push(3)
print(content_stack.min_value()) # saída: -2
| [
2,
3,
4,
5,
6
] |
1,346 | 68c2fd1d8ca9e1dd9373ca9f641c2920c87b2392 | <mask token>
class IntCode:
def __init__(self, code):
self.code = code
self.base = 0
self.idx = 0
self.terminated = False
@staticmethod
def load_code(code_string):
return IntCode(read_code(code_string))
@staticmethod
def load_from_file(filename):
return IntCode.load_code(open(filename, 'r').read())
def copy(self):
"""
Returns a fresh copy of the code, **in the same state**.
"""
return IntCode(self.code.copy())
def get_value(self, mode, value):
if mode == 0:
return self.code[value]
elif mode == 1:
return value
elif mode == 2:
return self.code[value + self.base]
def get_values(self, modes):
return [self.get_value(mode, self.code[self.idx + i]) for i, mode in
enumerate(modes, start=1)]
def get_modes(self, value, n_modes):
value = value // 100
modes = []
for _ in range(n_modes):
modes.append(int(value % 10))
value //= 10
return modes
def write_to(self, mode, param, value):
"""
write value to the location given by param, based on the mode.
"""
if mode == 0:
self.code[param] = value
elif mode == 1:
raise ValueError
elif mode == 2:
self.code[param + self.base] = value
def run(self, inputs=None, print_outputs=False):
"""
Resumes the code from the current instruction, using the
given 'inputs' for any required inputs.
When it halts, the outputs from this run are returned.
If the program has terminated, the 'terminated' flag is set.
"""
input_idx = 0
outputs = []
while True:
value = self.code[self.idx]
opcode = value % 100
if opcode == 1:
modes = self.get_modes(value, 3)
values = self.get_values(modes)
self.write_to(modes[2], self.code[self.idx + 3], values[0] +
values[1])
self.idx += 4
elif opcode == 2:
modes = self.get_modes(value, 3)
values = self.get_values(modes)
self.write_to(modes[2], self.code[self.idx + 3], values[0] *
values[1])
self.idx += 4
elif opcode == 3:
if inputs is None or input_idx >= len(inputs):
return outputs
input_val = inputs[input_idx]
input_idx += 1
modes = self.get_modes(value, 1)
self.write_to(modes[0], self.code[self.idx + 1], input_val)
self.idx += 2
elif opcode == 4:
modes = self.get_modes(value, 1)
v = self.get_value(modes[0], self.code[self.idx + 1])
outputs.append(v)
if print_outputs:
print(v)
self.idx += 2
elif opcode == 5:
modes = self.get_modes(value, 2)
values = self.get_values(modes)
if values[0] != 0:
self.idx = values[1]
else:
self.idx += 3
elif opcode == 6:
modes = self.get_modes(value, 2)
values = self.get_values(modes)
if values[0] == 0:
self.idx = values[1]
else:
self.idx += 3
elif opcode == 7:
modes = self.get_modes(value, 3)
values = self.get_values(modes)
compare_val = 1 if values[0] < values[1] else 0
self.write_to(modes[2], self.code[self.idx + 3], compare_val)
self.idx += 4
elif opcode == 8:
modes = self.get_modes(value, 3)
values = self.get_values(modes)
compare_val = 1 if values[0] == values[1] else 0
self.write_to(modes[2], self.code[self.idx + 3], compare_val)
self.idx += 4
elif opcode == 9:
modes = self.get_modes(value, 1)
values = self.get_values(modes)
self.base += values[0]
self.idx += 2
elif opcode == 99:
self.terminated = True
return outputs
else:
raise ValueError
| <mask token>
def to_ascii(line):
"""
Writes a string as ASCII code. Appends a newline at the end.
"""
data = [ord(c) for c in line]
data.append(10)
return data
class IntCode:
def __init__(self, code):
self.code = code
self.base = 0
self.idx = 0
self.terminated = False
@staticmethod
def load_code(code_string):
return IntCode(read_code(code_string))
@staticmethod
def load_from_file(filename):
return IntCode.load_code(open(filename, 'r').read())
def copy(self):
"""
Returns a fresh copy of the code, **in the same state**.
"""
return IntCode(self.code.copy())
def get_value(self, mode, value):
if mode == 0:
return self.code[value]
elif mode == 1:
return value
elif mode == 2:
return self.code[value + self.base]
def get_values(self, modes):
return [self.get_value(mode, self.code[self.idx + i]) for i, mode in
enumerate(modes, start=1)]
def get_modes(self, value, n_modes):
value = value // 100
modes = []
for _ in range(n_modes):
modes.append(int(value % 10))
value //= 10
return modes
def write_to(self, mode, param, value):
"""
write value to the location given by param, based on the mode.
"""
if mode == 0:
self.code[param] = value
elif mode == 1:
raise ValueError
elif mode == 2:
self.code[param + self.base] = value
def run(self, inputs=None, print_outputs=False):
"""
Resumes the code from the current instruction, using the
given 'inputs' for any required inputs.
When it halts, the outputs from this run are returned.
If the program has terminated, the 'terminated' flag is set.
"""
input_idx = 0
outputs = []
while True:
value = self.code[self.idx]
opcode = value % 100
if opcode == 1:
modes = self.get_modes(value, 3)
values = self.get_values(modes)
self.write_to(modes[2], self.code[self.idx + 3], values[0] +
values[1])
self.idx += 4
elif opcode == 2:
modes = self.get_modes(value, 3)
values = self.get_values(modes)
self.write_to(modes[2], self.code[self.idx + 3], values[0] *
values[1])
self.idx += 4
elif opcode == 3:
if inputs is None or input_idx >= len(inputs):
return outputs
input_val = inputs[input_idx]
input_idx += 1
modes = self.get_modes(value, 1)
self.write_to(modes[0], self.code[self.idx + 1], input_val)
self.idx += 2
elif opcode == 4:
modes = self.get_modes(value, 1)
v = self.get_value(modes[0], self.code[self.idx + 1])
outputs.append(v)
if print_outputs:
print(v)
self.idx += 2
elif opcode == 5:
modes = self.get_modes(value, 2)
values = self.get_values(modes)
if values[0] != 0:
self.idx = values[1]
else:
self.idx += 3
elif opcode == 6:
modes = self.get_modes(value, 2)
values = self.get_values(modes)
if values[0] == 0:
self.idx = values[1]
else:
self.idx += 3
elif opcode == 7:
modes = self.get_modes(value, 3)
values = self.get_values(modes)
compare_val = 1 if values[0] < values[1] else 0
self.write_to(modes[2], self.code[self.idx + 3], compare_val)
self.idx += 4
elif opcode == 8:
modes = self.get_modes(value, 3)
values = self.get_values(modes)
compare_val = 1 if values[0] == values[1] else 0
self.write_to(modes[2], self.code[self.idx + 3], compare_val)
self.idx += 4
elif opcode == 9:
modes = self.get_modes(value, 1)
values = self.get_values(modes)
self.base += values[0]
self.idx += 2
elif opcode == 99:
self.terminated = True
return outputs
else:
raise ValueError
| <mask token>
def read_code(string):
"""
string should be a comma-separated string.
"""
code = defaultdict(int)
for i, x in enumerate(string.split(',')):
code[i] = int(x)
return code
def to_ascii(line):
"""
Writes a string as ASCII code. Appends a newline at the end.
"""
data = [ord(c) for c in line]
data.append(10)
return data
class IntCode:
def __init__(self, code):
self.code = code
self.base = 0
self.idx = 0
self.terminated = False
@staticmethod
def load_code(code_string):
return IntCode(read_code(code_string))
@staticmethod
def load_from_file(filename):
return IntCode.load_code(open(filename, 'r').read())
def copy(self):
"""
Returns a fresh copy of the code, **in the same state**.
"""
return IntCode(self.code.copy())
def get_value(self, mode, value):
if mode == 0:
return self.code[value]
elif mode == 1:
return value
elif mode == 2:
return self.code[value + self.base]
def get_values(self, modes):
return [self.get_value(mode, self.code[self.idx + i]) for i, mode in
enumerate(modes, start=1)]
def get_modes(self, value, n_modes):
value = value // 100
modes = []
for _ in range(n_modes):
modes.append(int(value % 10))
value //= 10
return modes
def write_to(self, mode, param, value):
"""
write value to the location given by param, based on the mode.
"""
if mode == 0:
self.code[param] = value
elif mode == 1:
raise ValueError
elif mode == 2:
self.code[param + self.base] = value
def run(self, inputs=None, print_outputs=False):
"""
Resumes the code from the current instruction, using the
given 'inputs' for any required inputs.
When it halts, the outputs from this run are returned.
If the program has terminated, the 'terminated' flag is set.
"""
input_idx = 0
outputs = []
while True:
value = self.code[self.idx]
opcode = value % 100
if opcode == 1:
modes = self.get_modes(value, 3)
values = self.get_values(modes)
self.write_to(modes[2], self.code[self.idx + 3], values[0] +
values[1])
self.idx += 4
elif opcode == 2:
modes = self.get_modes(value, 3)
values = self.get_values(modes)
self.write_to(modes[2], self.code[self.idx + 3], values[0] *
values[1])
self.idx += 4
elif opcode == 3:
if inputs is None or input_idx >= len(inputs):
return outputs
input_val = inputs[input_idx]
input_idx += 1
modes = self.get_modes(value, 1)
self.write_to(modes[0], self.code[self.idx + 1], input_val)
self.idx += 2
elif opcode == 4:
modes = self.get_modes(value, 1)
v = self.get_value(modes[0], self.code[self.idx + 1])
outputs.append(v)
if print_outputs:
print(v)
self.idx += 2
elif opcode == 5:
modes = self.get_modes(value, 2)
values = self.get_values(modes)
if values[0] != 0:
self.idx = values[1]
else:
self.idx += 3
elif opcode == 6:
modes = self.get_modes(value, 2)
values = self.get_values(modes)
if values[0] == 0:
self.idx = values[1]
else:
self.idx += 3
elif opcode == 7:
modes = self.get_modes(value, 3)
values = self.get_values(modes)
compare_val = 1 if values[0] < values[1] else 0
self.write_to(modes[2], self.code[self.idx + 3], compare_val)
self.idx += 4
elif opcode == 8:
modes = self.get_modes(value, 3)
values = self.get_values(modes)
compare_val = 1 if values[0] == values[1] else 0
self.write_to(modes[2], self.code[self.idx + 3], compare_val)
self.idx += 4
elif opcode == 9:
modes = self.get_modes(value, 1)
values = self.get_values(modes)
self.base += values[0]
self.idx += 2
elif opcode == 99:
self.terminated = True
return outputs
else:
raise ValueError
| from collections import defaultdict
def read_code(string):
"""
string should be a comma-separated string.
"""
code = defaultdict(int)
for i, x in enumerate(string.split(',')):
code[i] = int(x)
return code
def to_ascii(line):
"""
Writes a string as ASCII code. Appends a newline at the end.
"""
data = [ord(c) for c in line]
data.append(10)
return data
class IntCode:
def __init__(self, code):
self.code = code
self.base = 0
self.idx = 0
self.terminated = False
@staticmethod
def load_code(code_string):
return IntCode(read_code(code_string))
@staticmethod
def load_from_file(filename):
return IntCode.load_code(open(filename, 'r').read())
def copy(self):
"""
Returns a fresh copy of the code, **in the same state**.
"""
return IntCode(self.code.copy())
def get_value(self, mode, value):
if mode == 0:
return self.code[value]
elif mode == 1:
return value
elif mode == 2:
return self.code[value + self.base]
def get_values(self, modes):
return [self.get_value(mode, self.code[self.idx + i]) for i, mode in
enumerate(modes, start=1)]
def get_modes(self, value, n_modes):
value = value // 100
modes = []
for _ in range(n_modes):
modes.append(int(value % 10))
value //= 10
return modes
def write_to(self, mode, param, value):
"""
write value to the location given by param, based on the mode.
"""
if mode == 0:
self.code[param] = value
elif mode == 1:
raise ValueError
elif mode == 2:
self.code[param + self.base] = value
def run(self, inputs=None, print_outputs=False):
"""
Resumes the code from the current instruction, using the
given 'inputs' for any required inputs.
When it halts, the outputs from this run are returned.
If the program has terminated, the 'terminated' flag is set.
"""
input_idx = 0
outputs = []
while True:
value = self.code[self.idx]
opcode = value % 100
if opcode == 1:
modes = self.get_modes(value, 3)
values = self.get_values(modes)
self.write_to(modes[2], self.code[self.idx + 3], values[0] +
values[1])
self.idx += 4
elif opcode == 2:
modes = self.get_modes(value, 3)
values = self.get_values(modes)
self.write_to(modes[2], self.code[self.idx + 3], values[0] *
values[1])
self.idx += 4
elif opcode == 3:
if inputs is None or input_idx >= len(inputs):
return outputs
input_val = inputs[input_idx]
input_idx += 1
modes = self.get_modes(value, 1)
self.write_to(modes[0], self.code[self.idx + 1], input_val)
self.idx += 2
elif opcode == 4:
modes = self.get_modes(value, 1)
v = self.get_value(modes[0], self.code[self.idx + 1])
outputs.append(v)
if print_outputs:
print(v)
self.idx += 2
elif opcode == 5:
modes = self.get_modes(value, 2)
values = self.get_values(modes)
if values[0] != 0:
self.idx = values[1]
else:
self.idx += 3
elif opcode == 6:
modes = self.get_modes(value, 2)
values = self.get_values(modes)
if values[0] == 0:
self.idx = values[1]
else:
self.idx += 3
elif opcode == 7:
modes = self.get_modes(value, 3)
values = self.get_values(modes)
compare_val = 1 if values[0] < values[1] else 0
self.write_to(modes[2], self.code[self.idx + 3], compare_val)
self.idx += 4
elif opcode == 8:
modes = self.get_modes(value, 3)
values = self.get_values(modes)
compare_val = 1 if values[0] == values[1] else 0
self.write_to(modes[2], self.code[self.idx + 3], compare_val)
self.idx += 4
elif opcode == 9:
modes = self.get_modes(value, 1)
values = self.get_values(modes)
self.base += values[0]
self.idx += 2
elif opcode == 99:
self.terminated = True
return outputs
else:
raise ValueError
| # helper functions to handle intcode
from collections import defaultdict
def read_code(string):
"""
string should be a comma-separated string.
"""
code = defaultdict(int)
for i, x in enumerate(string.split(',')):
code[i] = int(x)
return code
def to_ascii(line):
"""
Writes a string as ASCII code. Appends a newline at the end.
"""
data = [ord(c) for c in line]
data.append(10)
return data
class IntCode:
def __init__(self, code):
self.code = code
self.base = 0
# instruction pointer
self.idx = 0
self.terminated = False
@staticmethod
def load_code(code_string):
return IntCode(read_code(code_string))
@staticmethod
def load_from_file(filename):
return IntCode.load_code(open(filename, 'r').read())
def copy(self):
"""
Returns a fresh copy of the code, **in the same state**.
"""
return IntCode(self.code.copy())
def get_value(self, mode, value):
if mode == 0:
# position mode
return self.code[value]
elif mode == 1:
# immediate mode
return value
elif mode == 2:
# relative mode
return self.code[value + self.base]
def get_values(self, modes):
return [
self.get_value(mode, self.code[self.idx + i])
for i, mode in enumerate(modes, start=1)
]
def get_modes(self, value, n_modes):
value = value // 100
modes = []
for _ in range(n_modes):
modes.append(int(value % 10))
value //= 10
return modes
def write_to(self, mode, param, value):
"""
write value to the location given by param, based on the mode.
"""
if mode == 0:
# position mode
self.code[param] = value
elif mode == 1:
# cannot be in immediate mode
raise ValueError
elif mode == 2:
# relative mode
self.code[param + self.base] = value
def run(self, inputs=None, print_outputs=False):
"""
Resumes the code from the current instruction, using the
given 'inputs' for any required inputs.
When it halts, the outputs from this run are returned.
If the program has terminated, the 'terminated' flag is set.
"""
input_idx = 0
outputs = []
while True:
# parse the value
value = self.code[self.idx]
opcode = value % 100
if opcode == 1:
# Day 2
modes = self.get_modes(value, 3)
values = self.get_values(modes)
self.write_to(modes[2], self.code[self.idx+3], values[0] + values[1])
self.idx += 4
elif opcode == 2:
# Day 2
modes = self.get_modes(value, 3)
values = self.get_values(modes)
self.write_to(modes[2], self.code[self.idx+3], values[0] * values[1])
self.idx += 4
elif opcode == 3:
# Day 5
if inputs is None or input_idx >= len(inputs):
# halt if we are expecting an input, resume later
return outputs
input_val = inputs[input_idx]
input_idx += 1
modes = self.get_modes(value, 1)
self.write_to(modes[0], self.code[self.idx+1], input_val)
self.idx += 2
elif opcode == 4:
# Day 5
modes = self.get_modes(value, 1)
v = self.get_value(modes[0], self.code[self.idx+1])
outputs.append(v)
if print_outputs:
print(v)
self.idx += 2
elif opcode == 5:
# Day 5
modes = self.get_modes(value, 2)
values = self.get_values(modes)
if values[0] != 0:
self.idx = values[1]
else:
self.idx += 3
elif opcode == 6:
# Day 5
modes = self.get_modes(value, 2)
values = self.get_values(modes)
if values[0] == 0:
self.idx = values[1]
else:
self.idx += 3
elif opcode == 7:
# Day 5
modes = self.get_modes(value, 3)
values = self.get_values(modes)
compare_val = 1 if values[0] < values[1] else 0
self.write_to(modes[2], self.code[self.idx+3], compare_val)
self.idx += 4
elif opcode == 8:
# Day 5
modes = self.get_modes(value, 3)
values = self.get_values(modes)
compare_val = 1 if values[0] == values[1] else 0
self.write_to(modes[2], self.code[self.idx+3], compare_val)
self.idx += 4
elif opcode == 9:
# Day 9
modes = self.get_modes(value, 1)
values = self.get_values(modes)
self.base += values[0]
self.idx += 2
elif opcode == 99:
self.terminated = True
return outputs
else:
raise ValueError | [
10,
11,
12,
13,
14
] |
1,347 | b7ebee3c96fd9cd3d8ddc69838363925085a944d | <mask token>
| <mask token>
def rotate_left3(nums):
if len(nums) < 3:
return 0
nums.append(nums[0])
del nums[0]
return nums
| '''
Given an array of ints length 3, return an array with the elements "rotated
left" so {1, 2, 3} yields {2, 3, 1}.
rotate_left3([1, 2, 3]) → [2, 3, 1]
rotate_left3([5, 11, 9]) → [11, 9, 5]
rotate_left3([7, 0, 0]) → [0, 0, 7]
'''
#卡了很久,还是列表的基本操作不太熟
#参考:https://zhidao.baidu.com/question/1244520812319200859.html
def rotate_left3(nums):
if len(nums) < 3:
return 0
nums.append(nums[0])#是nums.append(),下面是del nums[index]
del nums[0]
return nums
| null | null | [
0,
1,
2
] |
1,348 | 51ef1c0f6a17e12b2324a80f962b2ce47cc05bcc | <mask token>
| def _get_single_variable(self, name, shape=None, dtype=dtypes.float32,
initializer=None, regularizer=None, partition_info=None, reuse=None,
trainable=True, collections=None, caching_device=None, validate_shape=
True, use_resource=None):
"""Get or create a single Variable (e.g. a shard or entire variable).
See the documentation of get_variable above (ignore partitioning components)
for details.
Args:
name: see get_variable.
shape: see get_variable.
dtype: see get_variable.
initializer: see get_variable.
regularizer: see get_variable.
partition_info: _PartitionInfo object.
reuse: see get_variable.
trainable: see get_variable.
collections: see get_variable.
caching_device: see get_variable.
validate_shape: see get_variable.
use_resource: see get_variable.
Returns:
A Variable. See documentation of get_variable above.
Raises:
ValueError: See documentation of get_variable above.
"""
initializing_from_value = False
if initializer is not None and not callable(initializer):
initializing_from_value = True
if shape is not None and initializing_from_value:
raise ValueError('If initializer is a constant, do not specify shape.')
should_check = reuse is not None
dtype = dtypes.as_dtype(dtype)
shape = tensor_shape.as_shape(shape)
if name in self._vars:
if should_check and not reuse:
tb = self._vars[name].op.traceback[::-1]
tb = [x for x in tb if 'tensorflow/python' not in x[0]][:3]
raise ValueError(
"""Variable %s already exists, disallowed. Did you mean to set reuse=True in VarScope? Originally defined at:
%s"""
% (name, ''.join(traceback.format_list(tb))))
found_var = self._vars[name]
if not shape.is_compatible_with(found_var.get_shape()):
raise ValueError(
'Trying to share variable %s, but specified shape %s and found shape %s.'
% (name, shape, found_var.get_shape()))
if not dtype.is_compatible_with(found_var.dtype):
dtype_str = dtype.name
found_type_str = found_var.dtype.name
raise ValueError(
'Trying to share variable %s, but specified dtype %s and found dtype %s.'
% (name, dtype_str, found_type_str))
return found_var
if should_check and reuse:
raise ValueError(
'Variable %s does not exist, or was not created with tf.get_variable(). Did you mean to set reuse=None in VarScope?'
% name)
if not shape.is_fully_defined() and not initializing_from_value:
raise ValueError(
'Shape of a new variable (%s) must be fully defined, but instead was %s.'
% (name, shape))
if initializer is None:
initializer, initializing_from_value = self._get_default_initializer(
name=name, shape=shape, dtype=dtype)
with ops.control_dependencies(None):
if initializing_from_value:
init_val = initializer
variable_dtype = None
else:
if isinstance(initializer, type(init_ops.Initializer)):
initializer = initializer(dtype=dtype)
init_val = lambda : initializer(shape.as_list(), dtype=dtype,
partition_info=partition_info)
variable_dtype = dtype.base_dtype
if use_resource is None:
use_resource = False
if use_resource:
v = resource_variable_ops.ResourceVariable(initial_value=init_val,
name=name, trainable=trainable, collections=collections,
caching_device=caching_device, dtype=variable_dtype,
validate_shape=validate_shape)
else:
v = variables.Variable(initial_value=init_val, name=name, trainable
=trainable, collections=collections, caching_device=
caching_device, dtype=variable_dtype, validate_shape=validate_shape
)
self._vars[name] = v
logging.vlog(1, 'Created variable %s with shape %s and init %s', v.name,
format(shape), initializer)
if regularizer:
with ops.colocate_with(v.op):
with ops.name_scope(name + '/Regularizer/'):
loss = regularizer(v)
if loss is not None:
logging.vlog(1,
'Applied regularizer to %s and added the result %s to REGULARIZATION_LOSSES.'
, v.name, loss.name)
ops.add_to_collection(ops.GraphKeys.REGULARIZATION_LOSSES, loss
)
return v
| def _get_single_variable(self, name, shape=None, dtype=dtypes.float32, initializer=None, regularizer=None, partition_info=None, reuse=None, trainable=True, collections=None, caching_device=None, validate_shape=True, use_resource=None):
'Get or create a single Variable (e.g. a shard or entire variable).\n\n See the documentation of get_variable above (ignore partitioning components)\n for details.\n\n Args:\n name: see get_variable.\n shape: see get_variable.\n dtype: see get_variable.\n initializer: see get_variable.\n regularizer: see get_variable.\n partition_info: _PartitionInfo object.\n reuse: see get_variable.\n trainable: see get_variable.\n collections: see get_variable.\n caching_device: see get_variable.\n validate_shape: see get_variable.\n use_resource: see get_variable.\n\n Returns:\n A Variable. See documentation of get_variable above.\n\n Raises:\n ValueError: See documentation of get_variable above.\n '
initializing_from_value = False
if ((initializer is not None) and (not callable(initializer))):
initializing_from_value = True
if ((shape is not None) and initializing_from_value):
raise ValueError('If initializer is a constant, do not specify shape.')
should_check = (reuse is not None)
dtype = dtypes.as_dtype(dtype)
shape = tensor_shape.as_shape(shape)
if (name in self._vars):
if (should_check and (not reuse)):
tb = self._vars[name].op.traceback[::(- 1)]
tb = [x for x in tb if ('tensorflow/python' not in x[0])][:3]
raise ValueError(('Variable %s already exists, disallowed. Did you mean to set reuse=True in VarScope? Originally defined at:\n\n%s' % (name, ''.join(traceback.format_list(tb)))))
found_var = self._vars[name]
if (not shape.is_compatible_with(found_var.get_shape())):
raise ValueError(('Trying to share variable %s, but specified shape %s and found shape %s.' % (name, shape, found_var.get_shape())))
if (not dtype.is_compatible_with(found_var.dtype)):
dtype_str = dtype.name
found_type_str = found_var.dtype.name
raise ValueError(('Trying to share variable %s, but specified dtype %s and found dtype %s.' % (name, dtype_str, found_type_str)))
return found_var
if (should_check and reuse):
raise ValueError(('Variable %s does not exist, or was not created with tf.get_variable(). Did you mean to set reuse=None in VarScope?' % name))
if ((not shape.is_fully_defined()) and (not initializing_from_value)):
raise ValueError(('Shape of a new variable (%s) must be fully defined, but instead was %s.' % (name, shape)))
if (initializer is None):
(initializer, initializing_from_value) = self._get_default_initializer(name=name, shape=shape, dtype=dtype)
with ops.control_dependencies(None):
if initializing_from_value:
init_val = initializer
variable_dtype = None
else:
if isinstance(initializer, type(init_ops.Initializer)):
initializer = initializer(dtype=dtype)
init_val = (lambda : initializer(shape.as_list(), dtype=dtype, partition_info=partition_info))
variable_dtype = dtype.base_dtype
if (use_resource is None):
use_resource = False
if use_resource:
v = resource_variable_ops.ResourceVariable(initial_value=init_val, name=name, trainable=trainable, collections=collections, caching_device=caching_device, dtype=variable_dtype, validate_shape=validate_shape)
else:
v = variables.Variable(initial_value=init_val, name=name, trainable=trainable, collections=collections, caching_device=caching_device, dtype=variable_dtype, validate_shape=validate_shape)
self._vars[name] = v
logging.vlog(1, 'Created variable %s with shape %s and init %s', v.name, format(shape), initializer)
if regularizer:
with ops.colocate_with(v.op):
with ops.name_scope((name + '/Regularizer/')):
loss = regularizer(v)
if (loss is not None):
logging.vlog(1, 'Applied regularizer to %s and added the result %s to REGULARIZATION_LOSSES.', v.name, loss.name)
ops.add_to_collection(ops.GraphKeys.REGULARIZATION_LOSSES, loss)
return v | null | null | [
0,
1,
2
] |
1,349 | 26a778f16cc50d1a8791fb672fb8907464865f3f | n = 5
a = '1'
if n == 1:
print(a)
else:
for i in range(2, n + 1):
if i == 2:
a = '11'
else:
count = 1
for j in range(len(a) - 1):
if j == len(a) - 2 :
if a[j] == a[j + 1]:
count += 1
a = a + count + a[j]
else:
elif a[j] == a[j + 1]:
count += 1
print(a)
else:
a = a + count + a[j]
count = 1
print(a)
| null | null | null | null | [
0
] |
1,350 | ab12468b1da20c896e3578091fd9ba245dcfa0a4 | <mask token>
| <mask token>
class Migration(migrations.Migration):
<mask token>
<mask token>
| <mask token>
class Migration(migrations.Migration):
dependencies = [('core', '0003_auto_20200310_1620')]
operations = [migrations.AddField(model_name='tag', name='name', field=
models.CharField(choices=[('METHOD', 'METHOD'), ('FUNCTION',
'FUNCTION'), ('OPERATOR', 'OPERATOR'), ('HELPER FUNCTION',
'HELPER FUNCTION')], default='code', max_length=100)), migrations.
AddField(model_name='tag', name='slug', field=models.CharField(
default='code', max_length=100, unique=True))]
| from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [('core', '0003_auto_20200310_1620')]
operations = [migrations.AddField(model_name='tag', name='name', field=
models.CharField(choices=[('METHOD', 'METHOD'), ('FUNCTION',
'FUNCTION'), ('OPERATOR', 'OPERATOR'), ('HELPER FUNCTION',
'HELPER FUNCTION')], default='code', max_length=100)), migrations.
AddField(model_name='tag', name='slug', field=models.CharField(
default='code', max_length=100, unique=True))]
| # Generated by Django 3.0.4 on 2020-03-11 17:48
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0003_auto_20200310_1620'),
]
operations = [
migrations.AddField(
model_name='tag',
name='name',
field=models.CharField(choices=[('METHOD', 'METHOD'), ('FUNCTION', 'FUNCTION'), ('OPERATOR', 'OPERATOR'), ('HELPER FUNCTION', 'HELPER FUNCTION')], default='code', max_length=100),
),
migrations.AddField(
model_name='tag',
name='slug',
field=models.CharField(default='code', max_length=100, unique=True),
),
]
| [
0,
1,
2,
3,
4
] |
1,351 | 6e253747182716f84aa6326aafe15ff82be17378 | <mask token>
class MyDaemon(DaemonBase):
<mask token>
def __init__(self, api_url, monitor_port, pidfile, stdin='/dev/null',
stdout='/dev/null', stderr='/dev/null'):
self.api_url = api_url
self.monitor_port = monitor_port
super().__init__(pidfile, stdin, stdout, stderr)
@staticmethod
def get_host_addrs(family):
for nic, snics in psutil.net_if_addrs().items():
for snic in snics:
if snic.family == family:
yield nic, snic.address
<mask token>
def tasks(self):
pnic_before = get_net_io_counters()
while 1:
time.sleep(60)
pnic_after = get_net_io_counters()
send_datas = {'type': 8, 'ip_addr': ''.join([n[1] for n in self
.get_host_addrs(socket.AF_INET) if n[0] == self.
monitor_port]), 'cpu_perf': get_cpu_percent(), 'mem_perf':
get_mem_usage(), 'disk_perf': get_disk_usage(),
'disk_speed': get_disk_speed(), 'net_perf':
get_network_traffic(pnic_before, pnic_after)}
self.do_post(send_datas)
pnic_before = get_net_io_counters()
<mask token>
| <mask token>
class MyDaemon(DaemonBase):
<mask token>
def __init__(self, api_url, monitor_port, pidfile, stdin='/dev/null',
stdout='/dev/null', stderr='/dev/null'):
self.api_url = api_url
self.monitor_port = monitor_port
super().__init__(pidfile, stdin, stdout, stderr)
@staticmethod
def get_host_addrs(family):
for nic, snics in psutil.net_if_addrs().items():
for snic in snics:
if snic.family == family:
yield nic, snic.address
<mask token>
def tasks(self):
pnic_before = get_net_io_counters()
while 1:
time.sleep(60)
pnic_after = get_net_io_counters()
send_datas = {'type': 8, 'ip_addr': ''.join([n[1] for n in self
.get_host_addrs(socket.AF_INET) if n[0] == self.
monitor_port]), 'cpu_perf': get_cpu_percent(), 'mem_perf':
get_mem_usage(), 'disk_perf': get_disk_usage(),
'disk_speed': get_disk_speed(), 'net_perf':
get_network_traffic(pnic_before, pnic_after)}
self.do_post(send_datas)
pnic_before = get_net_io_counters()
def run(self):
sys.stdout.write('Daemon started with pid %s\n' % os.getpid())
_p = Process(target=self.tasks, daemon=True)
_p.start()
p = psutil.Process(_p.pid)
while 1:
current_cpu = p.cpu_percent()
current_mem = p.memory_percent()
if p.is_running() and (current_mem > 1 or current_cpu > 1):
p.terminate()
p.wait()
with open('/tmp/test_daemon.log', 'a') as f:
f.write('CPU: %s - MEM: %s - at: %s\n' % (current_cpu,
current_mem, time.ctime()))
_p = Process(target=self.tasks, daemon=True)
_p.start()
sys.stdout.write('The subprocess restart pid %s\n' % _p.pid)
p = psutil.Process(_p.pid)
time.sleep(60)
| <mask token>
class MyDaemon(DaemonBase):
"""Real Daemon class"""
def __init__(self, api_url, monitor_port, pidfile, stdin='/dev/null',
stdout='/dev/null', stderr='/dev/null'):
self.api_url = api_url
self.monitor_port = monitor_port
super().__init__(pidfile, stdin, stdout, stderr)
@staticmethod
def get_host_addrs(family):
for nic, snics in psutil.net_if_addrs().items():
for snic in snics:
if snic.family == family:
yield nic, snic.address
def do_post(self, params):
data = json.dumps(params)
data = parse.urlencode({'data': data})
req = request.Request(self.api_url, data=data.encode('utf-8'))
try:
with request.urlopen(req, timeout=3) as resp:
return resp.status
except Exception as e:
with open('/tmp/test_daemon.err', 'a') as f:
print('%s at: %s' % (e, time.ctime()), file=f)
def tasks(self):
pnic_before = get_net_io_counters()
while 1:
time.sleep(60)
pnic_after = get_net_io_counters()
send_datas = {'type': 8, 'ip_addr': ''.join([n[1] for n in self
.get_host_addrs(socket.AF_INET) if n[0] == self.
monitor_port]), 'cpu_perf': get_cpu_percent(), 'mem_perf':
get_mem_usage(), 'disk_perf': get_disk_usage(),
'disk_speed': get_disk_speed(), 'net_perf':
get_network_traffic(pnic_before, pnic_after)}
self.do_post(send_datas)
pnic_before = get_net_io_counters()
def run(self):
sys.stdout.write('Daemon started with pid %s\n' % os.getpid())
_p = Process(target=self.tasks, daemon=True)
_p.start()
p = psutil.Process(_p.pid)
while 1:
current_cpu = p.cpu_percent()
current_mem = p.memory_percent()
if p.is_running() and (current_mem > 1 or current_cpu > 1):
p.terminate()
p.wait()
with open('/tmp/test_daemon.log', 'a') as f:
f.write('CPU: %s - MEM: %s - at: %s\n' % (current_cpu,
current_mem, time.ctime()))
_p = Process(target=self.tasks, daemon=True)
_p.start()
sys.stdout.write('The subprocess restart pid %s\n' % _p.pid)
p = psutil.Process(_p.pid)
time.sleep(60)
| import os
import sys
import time
import json
import socket
from urllib import request, parse
from concurrent.futures import ThreadPoolExecutor
from multiprocessing import Process
import psutil
from daemon import DaemonBase
from host_performence import *
class MyDaemon(DaemonBase):
"""Real Daemon class"""
def __init__(self, api_url, monitor_port, pidfile, stdin='/dev/null',
stdout='/dev/null', stderr='/dev/null'):
self.api_url = api_url
self.monitor_port = monitor_port
super().__init__(pidfile, stdin, stdout, stderr)
@staticmethod
def get_host_addrs(family):
for nic, snics in psutil.net_if_addrs().items():
for snic in snics:
if snic.family == family:
yield nic, snic.address
def do_post(self, params):
data = json.dumps(params)
data = parse.urlencode({'data': data})
req = request.Request(self.api_url, data=data.encode('utf-8'))
try:
with request.urlopen(req, timeout=3) as resp:
return resp.status
except Exception as e:
with open('/tmp/test_daemon.err', 'a') as f:
print('%s at: %s' % (e, time.ctime()), file=f)
def tasks(self):
pnic_before = get_net_io_counters()
while 1:
time.sleep(60)
pnic_after = get_net_io_counters()
send_datas = {'type': 8, 'ip_addr': ''.join([n[1] for n in self
.get_host_addrs(socket.AF_INET) if n[0] == self.
monitor_port]), 'cpu_perf': get_cpu_percent(), 'mem_perf':
get_mem_usage(), 'disk_perf': get_disk_usage(),
'disk_speed': get_disk_speed(), 'net_perf':
get_network_traffic(pnic_before, pnic_after)}
self.do_post(send_datas)
pnic_before = get_net_io_counters()
def run(self):
sys.stdout.write('Daemon started with pid %s\n' % os.getpid())
_p = Process(target=self.tasks, daemon=True)
_p.start()
p = psutil.Process(_p.pid)
while 1:
current_cpu = p.cpu_percent()
current_mem = p.memory_percent()
if p.is_running() and (current_mem > 1 or current_cpu > 1):
p.terminate()
p.wait()
with open('/tmp/test_daemon.log', 'a') as f:
f.write('CPU: %s - MEM: %s - at: %s\n' % (current_cpu,
current_mem, time.ctime()))
_p = Process(target=self.tasks, daemon=True)
_p.start()
sys.stdout.write('The subprocess restart pid %s\n' % _p.pid)
p = psutil.Process(_p.pid)
time.sleep(60)
| import os
import sys
import time
import json
import socket
from urllib import request, parse
from concurrent.futures import ThreadPoolExecutor
from multiprocessing import Process
import psutil
from daemon import DaemonBase
from host_performence import *
class MyDaemon(DaemonBase):
"""Real Daemon class"""
def __init__(self,
api_url,
monitor_port,
pidfile,
stdin='/dev/null',
stdout='/dev/null',
stderr='/dev/null'):
self.api_url = api_url
self.monitor_port = monitor_port
super().__init__(pidfile, stdin, stdout, stderr)
@staticmethod
def get_host_addrs(family):
for nic, snics in psutil.net_if_addrs().items():
for snic in snics:
if snic.family == family:
yield (nic, snic.address)
def do_post(self, params):
data = json.dumps(params)
# Json Post
# headers = {'Content-Type': 'application/json'}
# req = request.Request(self.api_url, data=data.encode('utf-8'), headers=headers)
# Form Post eg. ?data=params&code=1
data = parse.urlencode({'data': data})
req = request.Request(self.api_url, data=data.encode('utf-8'))
try:
with request.urlopen(req, timeout=3) as resp:
# print(resp.read().decode('utf-8'))
return resp.status
except Exception as e:
with open('/tmp/test_daemon.err', 'a') as f:
print('%s at: %s' % (e, time.ctime()), file=f)
def tasks(self):
pnic_before = get_net_io_counters()
while 1:
time.sleep(60)
pnic_after = get_net_io_counters()
send_datas = {
'type': 8,
'ip_addr': ''.join([
n[1] for n in self.get_host_addrs(socket.AF_INET)
if n[0] == self.monitor_port
]),
'cpu_perf': get_cpu_percent(),
'mem_perf': get_mem_usage(),
'disk_perf': get_disk_usage(),
'disk_speed': get_disk_speed(),
'net_perf': get_network_traffic(pnic_before, pnic_after)
}
self.do_post(send_datas)
pnic_before = get_net_io_counters()
def run(self):
sys.stdout.write('Daemon started with pid %s\n' % os.getpid())
_p = Process(target=self.tasks, daemon=True)
_p.start()
p = psutil.Process(_p.pid)
while 1:
current_cpu = p.cpu_percent()
current_mem = p.memory_percent()
# print(current_cpu, current_mem, time.ctime(), p.pid, p.ppid())
if p.is_running() and (current_mem > 1 or current_cpu > 1):
p.terminate()
p.wait()
with open('/tmp/test_daemon.log', 'a') as f:
f.write('CPU: %s - MEM: %s - at: %s\n' %
(current_cpu, current_mem, time.ctime()))
_p = Process(target=self.tasks, daemon=True)
_p.start()
sys.stdout.write('The subprocess restart pid %s\n' % _p.pid)
p = psutil.Process(_p.pid)
time.sleep(60) | [
4,
5,
7,
8,
9
] |
1,352 | 603708c830dadb6f1a3e5de00536d558f448b5fb | <mask token>
class Getter(object):
<mask token>
def __call__(self, url, **kwargs):
try:
return self._inner_call(url, **kwargs)
except (Timeout, ConnectionError, RequestException) as ex:
message = ex.response.reason if getattr(ex, 'response', None
) is not None else type(ex).__name__
raise GetterError(message, ex, not isinstance(ex, RequestException)
)
def _inner_call(self, url, **kwargs):
if 'timeout' not in kwargs:
kwargs['timeout'] = 20
result = self.session.get(url, **kwargs)
if result is None:
return
if result.status_code == 401:
if self.login():
result = self.session.get(url, **kwargs)
if result is None:
return
if result.status_code == 404:
return
result.raise_for_status()
return result
class GetterError(Exception):
def __init__(self, message, cause, connection_error):
super(GetterError, self).__init__()
self.message = message
self.cause = cause
self.connection_error = connection_error
self.request = getattr(cause, 'request', None)
self.response = getattr(cause, 'response', None)
| <mask token>
class Getter(object):
def __init__(self, contenttype=None, login=lambda : False, session=None):
self.session = session or retryable_session()
self.login = login
if contenttype:
self.session.headers['Accept'] = contenttype
def __call__(self, url, **kwargs):
try:
return self._inner_call(url, **kwargs)
except (Timeout, ConnectionError, RequestException) as ex:
message = ex.response.reason if getattr(ex, 'response', None
) is not None else type(ex).__name__
raise GetterError(message, ex, not isinstance(ex, RequestException)
)
def _inner_call(self, url, **kwargs):
if 'timeout' not in kwargs:
kwargs['timeout'] = 20
result = self.session.get(url, **kwargs)
if result is None:
return
if result.status_code == 401:
if self.login():
result = self.session.get(url, **kwargs)
if result is None:
return
if result.status_code == 404:
return
result.raise_for_status()
return result
class GetterError(Exception):
def __init__(self, message, cause, connection_error):
super(GetterError, self).__init__()
self.message = message
self.cause = cause
self.connection_error = connection_error
self.request = getattr(cause, 'request', None)
self.response = getattr(cause, 'response', None)
| <mask token>
def retryable_session(retries=3, backoff_factor=0.5, status_forcelist=(500,
502, 504, 520), session=None):
session = session or requests.Session()
retry = Retry(total=retries, read=retries, connect=retries,
backoff_factor=backoff_factor, status_forcelist=status_forcelist)
adapter = HTTPAdapter(max_retries=retry)
session.mount('http://', adapter)
session.mount('https://', adapter)
return session
class Getter(object):
def __init__(self, contenttype=None, login=lambda : False, session=None):
self.session = session or retryable_session()
self.login = login
if contenttype:
self.session.headers['Accept'] = contenttype
def __call__(self, url, **kwargs):
try:
return self._inner_call(url, **kwargs)
except (Timeout, ConnectionError, RequestException) as ex:
message = ex.response.reason if getattr(ex, 'response', None
) is not None else type(ex).__name__
raise GetterError(message, ex, not isinstance(ex, RequestException)
)
def _inner_call(self, url, **kwargs):
if 'timeout' not in kwargs:
kwargs['timeout'] = 20
result = self.session.get(url, **kwargs)
if result is None:
return
if result.status_code == 401:
if self.login():
result = self.session.get(url, **kwargs)
if result is None:
return
if result.status_code == 404:
return
result.raise_for_status()
return result
class GetterError(Exception):
def __init__(self, message, cause, connection_error):
super(GetterError, self).__init__()
self.message = message
self.cause = cause
self.connection_error = connection_error
self.request = getattr(cause, 'request', None)
self.response = getattr(cause, 'response', None)
| import requests
from requests.adapters import HTTPAdapter
from requests.exceptions import ConnectionError, Timeout, RequestException
from requests.packages.urllib3.util.retry import Retry
def retryable_session(retries=3, backoff_factor=0.5, status_forcelist=(500,
502, 504, 520), session=None):
session = session or requests.Session()
retry = Retry(total=retries, read=retries, connect=retries,
backoff_factor=backoff_factor, status_forcelist=status_forcelist)
adapter = HTTPAdapter(max_retries=retry)
session.mount('http://', adapter)
session.mount('https://', adapter)
return session
class Getter(object):
def __init__(self, contenttype=None, login=lambda : False, session=None):
self.session = session or retryable_session()
self.login = login
if contenttype:
self.session.headers['Accept'] = contenttype
def __call__(self, url, **kwargs):
try:
return self._inner_call(url, **kwargs)
except (Timeout, ConnectionError, RequestException) as ex:
message = ex.response.reason if getattr(ex, 'response', None
) is not None else type(ex).__name__
raise GetterError(message, ex, not isinstance(ex, RequestException)
)
def _inner_call(self, url, **kwargs):
if 'timeout' not in kwargs:
kwargs['timeout'] = 20
result = self.session.get(url, **kwargs)
if result is None:
return
if result.status_code == 401:
if self.login():
result = self.session.get(url, **kwargs)
if result is None:
return
if result.status_code == 404:
return
result.raise_for_status()
return result
class GetterError(Exception):
def __init__(self, message, cause, connection_error):
super(GetterError, self).__init__()
self.message = message
self.cause = cause
self.connection_error = connection_error
self.request = getattr(cause, 'request', None)
self.response = getattr(cause, 'response', None)
| import requests
from requests.adapters import HTTPAdapter
from requests.exceptions import ConnectionError, Timeout, RequestException
# import from `requests` because Jarvis / some platforms still have old urllib3
from requests.packages.urllib3.util.retry import Retry
def retryable_session(retries=3, backoff_factor=0.5, status_forcelist=(500, 502, 504, 520), session=None):
# from https://www.peterbe.com/plog/best-practice-with-retries-with-requests
session = session or requests.Session()
# 'Retry-After' 413/503/529 headers are respected by default
retry = Retry(total=retries, read=retries, connect=retries,
backoff_factor=backoff_factor, status_forcelist=status_forcelist)
adapter = HTTPAdapter(max_retries=retry)
session.mount('http://', adapter)
session.mount('https://', adapter)
return session
class Getter(object):
def __init__(self, contenttype=None, login=lambda: False, session=None):
self.session = session or retryable_session()
self.login = login
if contenttype:
self.session.headers['Accept'] = contenttype
def __call__(self, url, **kwargs):
try:
return self._inner_call(url, **kwargs)
except (Timeout, ConnectionError, RequestException) as ex:
message = ex.response.reason if getattr(ex, 'response', None) is not None else type(ex).__name__
raise GetterError(message, ex, not isinstance(ex, RequestException))
def _inner_call(self, url, **kwargs):
if 'timeout' not in kwargs:
kwargs['timeout'] = 20
result = self.session.get(url, **kwargs)
if result is None:
return
if result.status_code == 401:
if self.login():
result = self.session.get(url, **kwargs)
if result is None:
return
if result.status_code == 404:
return
result.raise_for_status()
return result
class GetterError(Exception):
def __init__(self, message, cause, connection_error):
super(GetterError, self).__init__()
self.message = message
self.cause = cause
self.connection_error = connection_error
self.request = getattr(cause, 'request', None)
self.response = getattr(cause, 'response', None)
| [
5,
6,
7,
8,
9
] |
1,353 | b8ab6b8c111876d6a781c82438f79307a849c47a | # -*- coding: utf-8 -*-
import requests
import Queue
import codecs
import os
import urllib
import base64
from threading import Thread
from Crypto.Cipher import AES
requests.packages.urllib3.disable_warnings()
def check(q):
while True:
try:
c = q.get()
user = c.split(':')[0]
passw = c.split(':')[1]
work = False
proxy = {
'http': '127.0.0.1:8888',
'https': '127.0.0.1:8888'
}
s = requests.session()
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/48.0.2564.97 Safari/537.36',
'Accept-Encoding': 'gzip',
'Accept': 'application/json, text/javascript, */*; q=0.01',
'X-Requested-With': 'XMLHttpRequest'
}
r = s.get(
'https://www.namecheap.com/Cart/ajax/DomainSelection.ashx?action=checkuser&username={0}'.format(user),
verify=False,
headers=headers,
proxies=proxy
)
if 'UserExist' in r.text:
print user, 'is registered!'
f = open("registered.txt", "a")
f.write('{0}\n'.format(c))
f.close()
else:
print user, 'does not work!'
except Exception, e:
print e
raw_input("Please Send Me The Error Message!")
q.task_done()
def main():
with codecs.open('tocheck.txt', 'r', encoding='utf-8') as f:
users = f.readlines()
with codecs.open('regthreads.txt', 'r', encoding='utf-8') as f:
threads = f.read()
queue = Queue.Queue()
for _ in range(int(threads)):
worker = Thread(target=check, args=(queue,))
worker.start()
for user in users:
queue.put(user.strip().encode('ascii', 'ignore'))
if __name__ == '__main__':
try:
key = os.environ['COMPUTERNAME']
f = open("data.txt", "r")
data = f.read()
f.close()
while len(key) < 32:
key += 'A'
IV = 16 * '\x00'
mode = AES.MODE_CBC
encryptor = AES.new(key, mode, IV=IV)
l = base64.b16encode(encryptor.encrypt(data))
r = requests.get(
'http://divcentral.xyz/login.php?l={0}&serial={1}'.format(urllib.quote_plus(l), data)
)
if encryptor.decrypt(base64.b16decode(urllib.unquote(r.text))):
main()
else:
print 'Could not log in!'
except Exception, e:
print 'Error! PM Me with the message!'
print e
raw_input()
| null | null | null | null | [
0
] |
1,354 | 3ac13cc74a7eabef686ceb9d9e46f2ef109a225e | #!/usr/bin/env python
# -*-coding:utf-8 -*-
from common import http_requests_get,is_domain
import re
class Crt(object):
def __init__(self, domain):
self.domain=domain
self.site='http://crt.sh/?q=%25.'
self.result=[]
def run(self):
url = self.site + self.domain
print url
try:
r=http_requests_get(url=url)
# self.result.append(re)
results = re.findall('</TD>\n <TD>(.*?)</TD>\n <TD><A',r.content,re.S)
for result in results:
if is_domain(result):
self.result.append(result)
return list(set(self.result))
except Exception,e:
return self.result | null | null | null | null | [
0
] |
1,355 | af4d2380f92ea636594695e5ad4ba766d6874dd3 | <mask token>
def dadata_clean(method, data):
return dadata_proxy.dadata_clean(method, data)
def get_detailed_address(address):
from fw.utils.address_utils import get_detailed_address as _get_detailed_address
return _get_detailed_address(address)
def dadata_standardize_address(address):
from fw.utils.address_utils import dadata_standardize_address as _dadata_standardize_address
return _dadata_standardize_address(address)
def get_ifns_by_address(address, service_nalog_ru_url):
from services.ifns.ifns_manager import get_ifns_by_address as _get_ifns_by_address
return _get_ifns_by_address(address, service_nalog_ru_url)
<mask token>
def get_nalog_ru_time_slots(person_data, company_data, internal_ifns_number,
internal_ifns_service, logger):
from services.ifns.ifns_manager import get_nalog_ru_time_slots as _get_nalog_ru_time_slots
return _get_nalog_ru_time_slots(person_data, company_data,
internal_ifns_number, internal_ifns_service, logger)
def book_ifns(person_data, company_data, internal_ifns_number,
internal_ifns_service, dt, logger):
from services.ifns.ifns_manager import book_ifns as _book_ifns
return _book_ifns(person_data, company_data, internal_ifns_number,
internal_ifns_service, dt, logger)
def get_registration_ifns(service_nalog_ru_url, address_ifns=None):
from services.ifns.ifns_manager import get_registration_ifns as _get_registration_ifns
return _get_registration_ifns(service_nalog_ru_url, address_ifns=
address_ifns)
def get_ifns_registrations(name, company_type='ooo', date_from=None,
date_to=None, service=None, ifns=None, service_nalog_ru_url=None,
logger=None):
from services.ifns.ifns_manager import get_ifns_registrations as _get_ifns_registrations
return _get_ifns_registrations(name, company_type=company_type,
date_from=date_from, date_to=date_to, service=service, ifns=ifns,
service_nalog_ru_url=service_nalog_ru_url, logger=logger)
def check_car_policy(policy_series, policy_number, timeout=20.0):
from services.car_assurance.integration import check_car_policy as _check_car_policy
return _check_car_policy(policy_series, policy_number, timeout=timeout)
| <mask token>
def dadata_suggest(method, data):
return dadata_proxy.dadata_suggest(method, data)
def dadata_clean(method, data):
return dadata_proxy.dadata_clean(method, data)
def get_detailed_address(address):
from fw.utils.address_utils import get_detailed_address as _get_detailed_address
return _get_detailed_address(address)
def dadata_standardize_address(address):
from fw.utils.address_utils import dadata_standardize_address as _dadata_standardize_address
return _dadata_standardize_address(address)
def get_ifns_by_address(address, service_nalog_ru_url):
from services.ifns.ifns_manager import get_ifns_by_address as _get_ifns_by_address
return _get_ifns_by_address(address, service_nalog_ru_url)
def get_ifns_by_code(tax_office, service_nalog_ru_url):
from services.ifns.ifns_manager import get_ifns_by_code as _get_ifns_by_code
return _get_ifns_by_code(tax_office, service_nalog_ru_url)
def get_nalog_ru_time_slots(person_data, company_data, internal_ifns_number,
internal_ifns_service, logger):
from services.ifns.ifns_manager import get_nalog_ru_time_slots as _get_nalog_ru_time_slots
return _get_nalog_ru_time_slots(person_data, company_data,
internal_ifns_number, internal_ifns_service, logger)
def book_ifns(person_data, company_data, internal_ifns_number,
internal_ifns_service, dt, logger):
from services.ifns.ifns_manager import book_ifns as _book_ifns
return _book_ifns(person_data, company_data, internal_ifns_number,
internal_ifns_service, dt, logger)
def get_registration_ifns(service_nalog_ru_url, address_ifns=None):
from services.ifns.ifns_manager import get_registration_ifns as _get_registration_ifns
return _get_registration_ifns(service_nalog_ru_url, address_ifns=
address_ifns)
def get_ifns_registrations(name, company_type='ooo', date_from=None,
date_to=None, service=None, ifns=None, service_nalog_ru_url=None,
logger=None):
from services.ifns.ifns_manager import get_ifns_registrations as _get_ifns_registrations
return _get_ifns_registrations(name, company_type=company_type,
date_from=date_from, date_to=date_to, service=service, ifns=ifns,
service_nalog_ru_url=service_nalog_ru_url, logger=logger)
def check_car_policy(policy_series, policy_number, timeout=20.0):
from services.car_assurance.integration import check_car_policy as _check_car_policy
return _check_car_policy(policy_series, policy_number, timeout=timeout)
| <mask token>
cache = CacheWrapper()
def dadata_suggest(method, data):
return dadata_proxy.dadata_suggest(method, data)
def dadata_clean(method, data):
return dadata_proxy.dadata_clean(method, data)
def get_detailed_address(address):
from fw.utils.address_utils import get_detailed_address as _get_detailed_address
return _get_detailed_address(address)
def dadata_standardize_address(address):
from fw.utils.address_utils import dadata_standardize_address as _dadata_standardize_address
return _dadata_standardize_address(address)
def get_ifns_by_address(address, service_nalog_ru_url):
from services.ifns.ifns_manager import get_ifns_by_address as _get_ifns_by_address
return _get_ifns_by_address(address, service_nalog_ru_url)
def get_ifns_by_code(tax_office, service_nalog_ru_url):
from services.ifns.ifns_manager import get_ifns_by_code as _get_ifns_by_code
return _get_ifns_by_code(tax_office, service_nalog_ru_url)
def get_nalog_ru_time_slots(person_data, company_data, internal_ifns_number,
internal_ifns_service, logger):
from services.ifns.ifns_manager import get_nalog_ru_time_slots as _get_nalog_ru_time_slots
return _get_nalog_ru_time_slots(person_data, company_data,
internal_ifns_number, internal_ifns_service, logger)
def book_ifns(person_data, company_data, internal_ifns_number,
internal_ifns_service, dt, logger):
from services.ifns.ifns_manager import book_ifns as _book_ifns
return _book_ifns(person_data, company_data, internal_ifns_number,
internal_ifns_service, dt, logger)
def get_registration_ifns(service_nalog_ru_url, address_ifns=None):
from services.ifns.ifns_manager import get_registration_ifns as _get_registration_ifns
return _get_registration_ifns(service_nalog_ru_url, address_ifns=
address_ifns)
def get_ifns_registrations(name, company_type='ooo', date_from=None,
date_to=None, service=None, ifns=None, service_nalog_ru_url=None,
logger=None):
from services.ifns.ifns_manager import get_ifns_registrations as _get_ifns_registrations
return _get_ifns_registrations(name, company_type=company_type,
date_from=date_from, date_to=date_to, service=service, ifns=ifns,
service_nalog_ru_url=service_nalog_ru_url, logger=logger)
def check_car_policy(policy_series, policy_number, timeout=20.0):
from services.car_assurance.integration import check_car_policy as _check_car_policy
return _check_car_policy(policy_series, policy_number, timeout=timeout)
| from fw.api import dadata_proxy
from flask import current_app
from fw.cache.cache_wrapper import CacheWrapper
cache = CacheWrapper()
def dadata_suggest(method, data):
return dadata_proxy.dadata_suggest(method, data)
def dadata_clean(method, data):
return dadata_proxy.dadata_clean(method, data)
def get_detailed_address(address):
from fw.utils.address_utils import get_detailed_address as _get_detailed_address
return _get_detailed_address(address)
def dadata_standardize_address(address):
from fw.utils.address_utils import dadata_standardize_address as _dadata_standardize_address
return _dadata_standardize_address(address)
def get_ifns_by_address(address, service_nalog_ru_url):
from services.ifns.ifns_manager import get_ifns_by_address as _get_ifns_by_address
return _get_ifns_by_address(address, service_nalog_ru_url)
def get_ifns_by_code(tax_office, service_nalog_ru_url):
from services.ifns.ifns_manager import get_ifns_by_code as _get_ifns_by_code
return _get_ifns_by_code(tax_office, service_nalog_ru_url)
def get_nalog_ru_time_slots(person_data, company_data, internal_ifns_number,
internal_ifns_service, logger):
from services.ifns.ifns_manager import get_nalog_ru_time_slots as _get_nalog_ru_time_slots
return _get_nalog_ru_time_slots(person_data, company_data,
internal_ifns_number, internal_ifns_service, logger)
def book_ifns(person_data, company_data, internal_ifns_number,
internal_ifns_service, dt, logger):
from services.ifns.ifns_manager import book_ifns as _book_ifns
return _book_ifns(person_data, company_data, internal_ifns_number,
internal_ifns_service, dt, logger)
def get_registration_ifns(service_nalog_ru_url, address_ifns=None):
from services.ifns.ifns_manager import get_registration_ifns as _get_registration_ifns
return _get_registration_ifns(service_nalog_ru_url, address_ifns=
address_ifns)
def get_ifns_registrations(name, company_type='ooo', date_from=None,
date_to=None, service=None, ifns=None, service_nalog_ru_url=None,
logger=None):
from services.ifns.ifns_manager import get_ifns_registrations as _get_ifns_registrations
return _get_ifns_registrations(name, company_type=company_type,
date_from=date_from, date_to=date_to, service=service, ifns=ifns,
service_nalog_ru_url=service_nalog_ru_url, logger=logger)
def check_car_policy(policy_series, policy_number, timeout=20.0):
from services.car_assurance.integration import check_car_policy as _check_car_policy
return _check_car_policy(policy_series, policy_number, timeout=timeout)
| # -*- coding: utf-8 -*-
from fw.api import dadata_proxy
from flask import current_app
from fw.cache.cache_wrapper import CacheWrapper
cache = CacheWrapper()
def dadata_suggest(method, data):
return dadata_proxy.dadata_suggest(method, data)
def dadata_clean(method, data):
return dadata_proxy.dadata_clean(method, data)
def get_detailed_address(address):
from fw.utils.address_utils import get_detailed_address as _get_detailed_address
return _get_detailed_address(address)
def dadata_standardize_address(address):
from fw.utils.address_utils import dadata_standardize_address as _dadata_standardize_address
return _dadata_standardize_address(address)
def get_ifns_by_address(address, service_nalog_ru_url):
from services.ifns.ifns_manager import get_ifns_by_address as _get_ifns_by_address
return _get_ifns_by_address(address, service_nalog_ru_url)
def get_ifns_by_code(tax_office, service_nalog_ru_url):
from services.ifns.ifns_manager import get_ifns_by_code as _get_ifns_by_code
return _get_ifns_by_code(tax_office, service_nalog_ru_url)
def get_nalog_ru_time_slots(person_data, company_data, internal_ifns_number, internal_ifns_service, logger):
from services.ifns.ifns_manager import get_nalog_ru_time_slots as _get_nalog_ru_time_slots
return _get_nalog_ru_time_slots(person_data, company_data, internal_ifns_number, internal_ifns_service, logger)
def book_ifns(person_data, company_data, internal_ifns_number, internal_ifns_service, dt, logger):
from services.ifns.ifns_manager import book_ifns as _book_ifns
return _book_ifns(person_data, company_data, internal_ifns_number, internal_ifns_service, dt, logger)
def get_registration_ifns(service_nalog_ru_url, address_ifns=None):
from services.ifns.ifns_manager import get_registration_ifns as _get_registration_ifns
return _get_registration_ifns(service_nalog_ru_url, address_ifns=address_ifns)
def get_ifns_registrations(name, company_type='ooo', date_from=None, date_to=None,
service=None, ifns=None, service_nalog_ru_url=None, logger=None):
from services.ifns.ifns_manager import get_ifns_registrations as _get_ifns_registrations
return _get_ifns_registrations(name, company_type=company_type, date_from=date_from, date_to=date_to,
service=service, ifns=ifns, service_nalog_ru_url=service_nalog_ru_url, logger=logger)
def check_car_policy(policy_series, policy_number, timeout=20.0):
from services.car_assurance.integration import check_car_policy as _check_car_policy
return _check_car_policy(policy_series, policy_number, timeout=timeout)
| [
9,
11,
12,
13,
14
] |
1,356 | a8ae59bb525c52ef852655f0ef1e32d96c8914d6 | <mask token>
class ReloadModelHandler(BaseHandler):
def __init__(self, application, request, **kwargs):
super(ReloadModelHandler, self).__init__(application, request, **kwargs
)
<mask token>
| <mask token>
class ReloadModelHandler(BaseHandler):
def __init__(self, application, request, **kwargs):
super(ReloadModelHandler, self).__init__(application, request, **kwargs
)
def do_action(self):
model_name = self.get_argument('modelname', None)
if model_name is None:
for model_name in os.listdir(model_path):
if model_name.find('.model') == -1:
continue
model = read_model(os.path.join(model_path, model_name))
options.models[model_name] = model
self.set_result(result={'message': 'server has reload all models'})
else:
model = read_model(os.path.join(model_path, model_name))
options.models[model_name] = model
self.set_result(result={'message': 'server has reload {model}'.
format(model=model_name)})
| <mask token>
module_path = os.path.abspath(os.path.join(os.curdir))
model_path = os.path.join(module_path, 'model')
class ReloadModelHandler(BaseHandler):
def __init__(self, application, request, **kwargs):
super(ReloadModelHandler, self).__init__(application, request, **kwargs
)
def do_action(self):
model_name = self.get_argument('modelname', None)
if model_name is None:
for model_name in os.listdir(model_path):
if model_name.find('.model') == -1:
continue
model = read_model(os.path.join(model_path, model_name))
options.models[model_name] = model
self.set_result(result={'message': 'server has reload all models'})
else:
model = read_model(os.path.join(model_path, model_name))
options.models[model_name] = model
self.set_result(result={'message': 'server has reload {model}'.
format(model=model_name)})
| from src.handler.base.base_handler import BaseHandler
from src.utils.tools import read_model
from tornado.options import options
import os
module_path = os.path.abspath(os.path.join(os.curdir))
model_path = os.path.join(module_path, 'model')
class ReloadModelHandler(BaseHandler):
def __init__(self, application, request, **kwargs):
super(ReloadModelHandler, self).__init__(application, request, **kwargs
)
def do_action(self):
model_name = self.get_argument('modelname', None)
if model_name is None:
for model_name in os.listdir(model_path):
if model_name.find('.model') == -1:
continue
model = read_model(os.path.join(model_path, model_name))
options.models[model_name] = model
self.set_result(result={'message': 'server has reload all models'})
else:
model = read_model(os.path.join(model_path, model_name))
options.models[model_name] = model
self.set_result(result={'message': 'server has reload {model}'.
format(model=model_name)})
| # -*- coding: utf-8 -*-
# @Time : 2019/3/5 上午9:55
# @Author : yidxue
from src.handler.base.base_handler import BaseHandler
from src.utils.tools import read_model
from tornado.options import options
import os
module_path = os.path.abspath(os.path.join(os.curdir))
model_path = os.path.join(module_path, 'model')
class ReloadModelHandler(BaseHandler):
def __init__(self, application, request, **kwargs):
super(ReloadModelHandler, self).__init__(application, request, **kwargs)
def do_action(self):
model_name = self.get_argument('modelname', None)
if model_name is None:
for model_name in os.listdir(model_path):
if model_name.find(".model") == -1:
continue
model = read_model(os.path.join(model_path, model_name))
options.models[model_name] = model
self.set_result(result={"message": "server has reload all models"})
else:
model = read_model(os.path.join(model_path, model_name))
options.models[model_name] = model
self.set_result(result={"message": "server has reload {model}".format(model=model_name)})
| [
2,
3,
4,
5,
6
] |
1,357 | 9ed674513bebe65ece538e9ce2b3945bb0c532cc | <mask token>
class GoogleTTS:
<mask token>
def check_google_connection(self):
try:
message = 'Hallo'
filename = 'temp_voice.mp3'
tts = gTTS(text=message, lang='de')
tts.save(filename)
os.remove(filename)
return True
except Exception as err:
logging.error('Error during Google TTS testing {}'.format(err))
return False
class SapiTTS:
def __init__(self):
self.engine = pyttsx3.init('sapi5')
rate = self.engine.getProperty('rate')
self.engine.setProperty('rate', rate - 20)
self.engine.setProperty('volume', 0.9)
def utter_voice_message(self, message):
try:
self.engine.say(message)
self.engine.runAndWait()
return 'TTS finished'
except Exception as err:
logging.error('Error during TTS {}'.format(err))
return None
<mask token>
| <mask token>
class GoogleTTS:
def utter_voice_message(self, message):
try:
filename = 'temp_voice.mp3'
tts = gTTS(text=message, lang='de', slow=False)
tts.save(filename)
media = pyglet.media.load(filename, streaming=True)
media.play()
time.sleep(media.duration)
return 'TTS finished'
except Exception as err:
logging.error('Error during TTS {}'.format(err))
return None
def check_google_connection(self):
try:
message = 'Hallo'
filename = 'temp_voice.mp3'
tts = gTTS(text=message, lang='de')
tts.save(filename)
os.remove(filename)
return True
except Exception as err:
logging.error('Error during Google TTS testing {}'.format(err))
return False
class SapiTTS:
def __init__(self):
self.engine = pyttsx3.init('sapi5')
rate = self.engine.getProperty('rate')
self.engine.setProperty('rate', rate - 20)
self.engine.setProperty('volume', 0.9)
def utter_voice_message(self, message):
try:
self.engine.say(message)
self.engine.runAndWait()
return 'TTS finished'
except Exception as err:
logging.error('Error during TTS {}'.format(err))
return None
if __name__ == '__main__':
gtts = GoogleTTS()
gtts.utter_voice_message('Guten Tag, mein Name ist Carina')
| <mask token>
ROOT_DIR = os.path.dirname(os.path.abspath(__file__))
class GoogleTTS:
def utter_voice_message(self, message):
try:
filename = 'temp_voice.mp3'
tts = gTTS(text=message, lang='de', slow=False)
tts.save(filename)
media = pyglet.media.load(filename, streaming=True)
media.play()
time.sleep(media.duration)
return 'TTS finished'
except Exception as err:
logging.error('Error during TTS {}'.format(err))
return None
def check_google_connection(self):
try:
message = 'Hallo'
filename = 'temp_voice.mp3'
tts = gTTS(text=message, lang='de')
tts.save(filename)
os.remove(filename)
return True
except Exception as err:
logging.error('Error during Google TTS testing {}'.format(err))
return False
class SapiTTS:
def __init__(self):
self.engine = pyttsx3.init('sapi5')
rate = self.engine.getProperty('rate')
self.engine.setProperty('rate', rate - 20)
self.engine.setProperty('volume', 0.9)
def utter_voice_message(self, message):
try:
self.engine.say(message)
self.engine.runAndWait()
return 'TTS finished'
except Exception as err:
logging.error('Error during TTS {}'.format(err))
return None
if __name__ == '__main__':
gtts = GoogleTTS()
gtts.utter_voice_message('Guten Tag, mein Name ist Carina')
| import pyttsx3
import pyglet
import time
import logging
import os
from gtts import gTTS
ROOT_DIR = os.path.dirname(os.path.abspath(__file__))
class GoogleTTS:
def utter_voice_message(self, message):
try:
filename = 'temp_voice.mp3'
tts = gTTS(text=message, lang='de', slow=False)
tts.save(filename)
media = pyglet.media.load(filename, streaming=True)
media.play()
time.sleep(media.duration)
return 'TTS finished'
except Exception as err:
logging.error('Error during TTS {}'.format(err))
return None
def check_google_connection(self):
try:
message = 'Hallo'
filename = 'temp_voice.mp3'
tts = gTTS(text=message, lang='de')
tts.save(filename)
os.remove(filename)
return True
except Exception as err:
logging.error('Error during Google TTS testing {}'.format(err))
return False
class SapiTTS:
def __init__(self):
self.engine = pyttsx3.init('sapi5')
rate = self.engine.getProperty('rate')
self.engine.setProperty('rate', rate - 20)
self.engine.setProperty('volume', 0.9)
def utter_voice_message(self, message):
try:
self.engine.say(message)
self.engine.runAndWait()
return 'TTS finished'
except Exception as err:
logging.error('Error during TTS {}'.format(err))
return None
if __name__ == '__main__':
gtts = GoogleTTS()
gtts.utter_voice_message('Guten Tag, mein Name ist Carina')
| import pyttsx3
import pyglet
import time
import logging
import os
from gtts import gTTS
ROOT_DIR = os.path.dirname(os.path.abspath(__file__))
class GoogleTTS:
def utter_voice_message(self, message):
try:
# Google Text-to-Speech API - needs internet connectivity
#filename = ROOT_DIR + '\\temp_voice.mp3'
filename = 'temp_voice.mp3'
tts = gTTS(text=message, lang='de', slow=False)
tts.save(filename)
media = pyglet.media.load(filename, streaming=True)
media.play()
time.sleep(media.duration)
#os.remove(filename)
return 'TTS finished'
except Exception as err:
logging.error("Error during TTS {}".format(err))
return None
def check_google_connection(self):
try:
message = "Hallo"
filename = 'temp_voice.mp3'
tts = gTTS(text=message, lang='de')
tts.save(filename)
os.remove(filename)
return True
except Exception as err:
logging.error("Error during Google TTS testing {}".format(err))
return False
class SapiTTS:
def __init__(self):
# Sapi Microsoft speech engine - works offline
self.engine = pyttsx3.init('sapi5') # use SAPI5 engine
rate = self.engine.getProperty('rate')
self.engine.setProperty('rate', rate - 20) # words per minute
self.engine.setProperty('volume', 0.9)
def utter_voice_message(self, message):
try:
self.engine.say(message)
self.engine.runAndWait()
return 'TTS finished'
except Exception as err:
logging.error("Error during TTS {}".format(err))
return None
if __name__ == '__main__':
gtts = GoogleTTS()
gtts.utter_voice_message('Guten Tag, mein Name ist Carina')
| [
5,
7,
8,
9,
10
] |
1,358 | 3496216de9f6b7d9d3db69eb4d8f8c0fdcd5123c | <mask token>
class RSAGraphModel(SimpleLasagneModel):
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
class RSALearner(NeuralLearner):
def __init__(self, id=None):
self.get_options()
self.init_submodels(id)
super(RSALearner, self).__init__(id=id)
color_resolution = (self.options.listener_color_resolution if self.
options.listener else self.options.speaker_color_resolution)
self.seq_vec = SequenceVectorizer()
self.color_vec = BucketsVectorizer(color_resolution, hsv=self.
options.speaker_hsv)
def init_submodels(self, id=None):
id_tag = id + '/' if id else ''
self.get_options()
listener_classes = self.options.listener_class
speaker_classes = self.options.speaker_class
if len(listener_classes) != self.options.rsa_listeners:
assert len(listener_classes) == 1, len(listener_classes)
listener_classes = listener_classes * self.options.rsa_listeners
if len(speaker_classes) != self.options.rsa_speakers:
assert len(speaker_classes) == 1, len(speaker_classes)
speaker_classes = speaker_classes * self.options.rsa_speakers
self.listeners = [LISTENERS[listener_classes[j]](id='%sL%d' % (
id_tag, j)) for j in range(self.options.rsa_listeners)]
self.speakers = [SPEAKERS[speaker_classes[k]](id='%sS%d' % (id_tag,
k)) for k in range(self.options.rsa_speakers)]
agents = self.listeners if self.options.listener else self.speakers
self.eval_agent = agents[self.options.eval_agent]
def predict(self, eval_instances, verbosity=0):
return self.eval_agent.predict(eval_instances, verbosity=verbosity)
def score(self, eval_instances, verbosity=0):
return self.eval_agent.score(eval_instances, verbosity=verbosity)
def predict_and_score(self, eval_instances, verbosity=0):
return self.eval_agent.predict_and_score(eval_instances, verbosity=
verbosity)
def on_iter_end(self, step, writer):
for agent in (self.speakers + self.listeners):
agent.on_iter_end(step, writer)
def sample_joint_smooth(self, num_samples):
return self.eval_agent.sample_joint_smooth(num_samples)
def _data_to_arrays(self, training_instances, init_vectorizer=False,
test=False, inverted=False):
input_arrays = []
target_arrays = []
if self.options.listener != inverted:
listener_dataset = training_instances
speaker_dataset = [inst.inverted() for inst in training_instances]
else:
listener_dataset = [inst.inverted() for inst in training_instances]
speaker_dataset = training_instances
for listener in self.listeners:
if not test:
listener.dataset = listener_dataset
inputs, targets = listener._data_to_arrays(listener_dataset,
test=test, init_vectorizer=init_vectorizer)
input_arrays.extend(inputs)
target_arrays.extend(targets)
for speaker in self.speakers:
if not test:
speaker.dataset = speaker_dataset
inputs, targets = speaker._data_to_arrays(speaker_dataset, test
=test, init_vectorizer=init_vectorizer)
input_arrays.extend(inputs)
target_arrays.extend(targets)
return input_arrays, target_arrays
def _build_model(self):
for agent in (self.listeners + self.speakers):
agent._build_model(RSASubModel)
self.build_aggregate_model()
def train_priors(self, training_instances, listener_data=False):
prior_class = LISTENER_PRIORS[self.options.listener_prior
] if self.options.listener else SPEAKER_PRIORS[self.options.
speaker_prior]
self.prior_emp = prior_class()
self.prior_smooth = prior_class()
self.prior_emp.train(training_instances, listener_data=listener_data)
self.prior_smooth.train(training_instances, listener_data=listener_data
)
for agent in (self.listeners + self.speakers):
agent.train_priors(training_instances, listener_data=listener_data)
def build_aggregate_model(self):
self.model = RSAGraphModel(self.listeners, self.speakers, self.
eval_agent)
self.prior_emp = AggregatePrior(self.listeners, self.speakers,
'prior_emp')
self.prior_smooth = AggregatePrior(self.listeners, self.speakers,
'prior_smooth')
def __getstate__(self):
return self.seq_vec, self.color_vec, [agent.__getstate__() for
agent in self.listeners + self.speakers]
def __setstate__(self, state):
self.seq_vec, self.color_vec, submodels = state
self.init_submodels()
for agent, substate in zip(self.listeners + self.speakers, submodels):
agent.unpickle(substate, RSASubModel)
self.build_aggregate_model()
<mask token>
| <mask token>
class RSAGraphModel(SimpleLasagneModel):
<mask token>
def params(self):
result = []
for listener in self.listeners:
result.extend(listener.params())
for speaker in self.speakers:
result.extend(speaker.params())
return result
def get_train_loss(self, target_vars, params):
for agent in self.speakers:
agent.model.build_sample_vars(len(self.listeners))
for agent in self.listeners:
agent.model.build_sample_vars(len(self.speakers))
monitored = self.get_est_loss(layer_by_layer=self.options.
layer_by_layer)
if self.options.grad_of_est:
est_grad, monitored_grads = self.get_grad_of_est(monitored, params)
else:
est_grad, monitored_grads = self.get_est_grad(params,
layer_by_layer=self.options.layer_by_layer)
monitored.update(monitored_grads)
synth_vars = [v for agent in self.listeners + self.speakers for v in
agent.model.all_synth_vars]
return monitored, est_grad, synth_vars
def get_est_loss(self, layer_by_layer=False):
def kl(agent_p, agent_q, other_idx):
if layer_by_layer:
return agent_q.loss_out(agent_q.model.sample_inputs_others[
other_idx], agent_q.model.sample_target_others[other_idx]
).mean()
else:
return (agent_p.log_joint_emp(agent_p.model.
sample_inputs_self, agent_p.model.sample_target_self) -
agent_q.log_joint_smooth(agent_q.model.
sample_inputs_others[other_idx], agent_q.model.
sample_target_others[other_idx])).mean()
id_tag_log = self.id + ': ' if self.id else ''
id_tag = self.id + '/' if self.id else ''
if self.options.verbosity >= 4:
print(id_tag_log + 'loss: KL(dataset || L)')
alpha_losses = [('%salpha_%s' % (id_tag, listener.id), alpha *
listener.loss_out().mean()) for alpha, listener in zip(self.
options.rsa_alpha, self.listeners)]
if self.options.verbosity >= 4:
print(id_tag_log + 'loss: KL(dataset || S)')
beta_losses = [('%sbeta_%s' % (id_tag, speaker.id), beta * speaker.
loss_out().mean()) for beta, speaker in zip(self.options.
rsa_beta, self.speakers)]
if self.options.verbosity >= 4:
print(id_tag_log + 'loss: KL(L || S)')
mu_losses = [('%smu_%s_%s' % (id_tag, listener.id, speaker.id), mu *
kl(listener, speaker, j)) for mu, (listener, j, speaker, k) in
zip(self.options.rsa_mu, self.dyads())]
if self.options.verbosity >= 4:
print(id_tag_log + 'loss: KL(S || L)')
nu_losses = [('%snu_%s_%s' % (id_tag, speaker.id, listener.id), nu *
kl(speaker, listener, k)) for nu, (listener, j, speaker, k) in
zip(self.options.rsa_nu, self.dyads())]
all_sublosses = alpha_losses + beta_losses + mu_losses + nu_losses
est_loss = t_sum(loss for tag, loss in all_sublosses)
monitored = OrderedDict([('loss', est_loss)])
if self.options.monitor_sublosses:
monitored.update(all_sublosses)
if self.options.monitor_activations:
for agent in (self.listeners + self.speakers):
for name, layer in get_named_layers(agent.l_out).iteritems():
monitored['activation/' + name] = get_output(layer)
return monitored
<mask token>
<mask token>
def dyads(self):
for j, listener in enumerate(self.listeners):
for k, speaker in enumerate(self.speakers):
yield listener, j, speaker, k
def minibatches(self, inputs, targets, batch_size, shuffle=False):
agents = self.listeners + self.speakers
batches = super(RSAGraphModel, self).minibatches(inputs, targets,
batch_size, shuffle=shuffle)
for dataset_inputs, dataset_targets, _synth in batches:
inputs_batch = []
targets_batch = []
synth_batch = []
filtered = self.filter_arrays(dataset_inputs, dataset_targets)
for agent, (agent_inputs, agent_targets) in zip(agents, filtered):
inputs_batch.extend(agent_inputs)
targets_batch.extend(agent_targets)
input_types = [a.shape for a in agent_inputs]
target_types = [a.shape for a in agent_targets]
if self.options.verbosity >= 8:
print('%s: %s -> %s' % (agent.id, input_types,
target_types))
listener_samples = [(listener.sample_joint_smooth(self.options.
listener_samples) if self.options.listener_sample_smoothed else
listener.sample_joint_emp(self.options.listener_samples)) for
listener in self.listeners]
speaker_samples = [(speaker.sample_joint_smooth(self.options.
speaker_samples) if self.options.speaker_sample_smoothed else
speaker.sample_joint_emp(self.options.listener_samples)) for
speaker in self.speakers]
for listener, samples in zip(self.listeners, listener_samples):
arrays = listener.model.data_to_synth_arrays(listener,
samples, speaker_samples)
synth_batch.extend(arrays)
synth_types = [a.shape for a in arrays]
if self.options.verbosity >= 8:
print('%s synth: %s' % (listener.id, synth_types))
for speaker, samples in zip(self.speakers, speaker_samples):
arrays = speaker.model.data_to_synth_arrays(speaker,
samples, listener_samples)
synth_batch.extend(arrays)
synth_types = [a.shape for a in arrays]
if self.options.verbosity >= 8:
print('%s synth: %s' % (speaker.id, synth_types))
yield inputs_batch, targets_batch, synth_batch
def filter_arrays(self, inputs, targets):
result = []
input_idx = 0
for agent, target in zip(self.listeners + self.speakers, targets):
assert input_idx + len(agent.model.input_vars) <= len(inputs), (
input_idx, len(agent.model.input_vars), len(inputs))
agent_inputs = inputs[input_idx:input_idx + len(agent.model.
input_vars)]
agent_targets = [target]
result.append((agent_inputs, agent_targets))
input_idx += len(agent.model.input_vars)
return result
class RSALearner(NeuralLearner):
def __init__(self, id=None):
self.get_options()
self.init_submodels(id)
super(RSALearner, self).__init__(id=id)
color_resolution = (self.options.listener_color_resolution if self.
options.listener else self.options.speaker_color_resolution)
self.seq_vec = SequenceVectorizer()
self.color_vec = BucketsVectorizer(color_resolution, hsv=self.
options.speaker_hsv)
def init_submodels(self, id=None):
id_tag = id + '/' if id else ''
self.get_options()
listener_classes = self.options.listener_class
speaker_classes = self.options.speaker_class
if len(listener_classes) != self.options.rsa_listeners:
assert len(listener_classes) == 1, len(listener_classes)
listener_classes = listener_classes * self.options.rsa_listeners
if len(speaker_classes) != self.options.rsa_speakers:
assert len(speaker_classes) == 1, len(speaker_classes)
speaker_classes = speaker_classes * self.options.rsa_speakers
self.listeners = [LISTENERS[listener_classes[j]](id='%sL%d' % (
id_tag, j)) for j in range(self.options.rsa_listeners)]
self.speakers = [SPEAKERS[speaker_classes[k]](id='%sS%d' % (id_tag,
k)) for k in range(self.options.rsa_speakers)]
agents = self.listeners if self.options.listener else self.speakers
self.eval_agent = agents[self.options.eval_agent]
def predict(self, eval_instances, verbosity=0):
return self.eval_agent.predict(eval_instances, verbosity=verbosity)
def score(self, eval_instances, verbosity=0):
return self.eval_agent.score(eval_instances, verbosity=verbosity)
def predict_and_score(self, eval_instances, verbosity=0):
return self.eval_agent.predict_and_score(eval_instances, verbosity=
verbosity)
def on_iter_end(self, step, writer):
for agent in (self.speakers + self.listeners):
agent.on_iter_end(step, writer)
def sample_joint_smooth(self, num_samples):
return self.eval_agent.sample_joint_smooth(num_samples)
def _data_to_arrays(self, training_instances, init_vectorizer=False,
test=False, inverted=False):
input_arrays = []
target_arrays = []
if self.options.listener != inverted:
listener_dataset = training_instances
speaker_dataset = [inst.inverted() for inst in training_instances]
else:
listener_dataset = [inst.inverted() for inst in training_instances]
speaker_dataset = training_instances
for listener in self.listeners:
if not test:
listener.dataset = listener_dataset
inputs, targets = listener._data_to_arrays(listener_dataset,
test=test, init_vectorizer=init_vectorizer)
input_arrays.extend(inputs)
target_arrays.extend(targets)
for speaker in self.speakers:
if not test:
speaker.dataset = speaker_dataset
inputs, targets = speaker._data_to_arrays(speaker_dataset, test
=test, init_vectorizer=init_vectorizer)
input_arrays.extend(inputs)
target_arrays.extend(targets)
return input_arrays, target_arrays
def _build_model(self):
for agent in (self.listeners + self.speakers):
agent._build_model(RSASubModel)
self.build_aggregate_model()
def train_priors(self, training_instances, listener_data=False):
prior_class = LISTENER_PRIORS[self.options.listener_prior
] if self.options.listener else SPEAKER_PRIORS[self.options.
speaker_prior]
self.prior_emp = prior_class()
self.prior_smooth = prior_class()
self.prior_emp.train(training_instances, listener_data=listener_data)
self.prior_smooth.train(training_instances, listener_data=listener_data
)
for agent in (self.listeners + self.speakers):
agent.train_priors(training_instances, listener_data=listener_data)
def build_aggregate_model(self):
self.model = RSAGraphModel(self.listeners, self.speakers, self.
eval_agent)
self.prior_emp = AggregatePrior(self.listeners, self.speakers,
'prior_emp')
self.prior_smooth = AggregatePrior(self.listeners, self.speakers,
'prior_smooth')
def __getstate__(self):
return self.seq_vec, self.color_vec, [agent.__getstate__() for
agent in self.listeners + self.speakers]
def __setstate__(self, state):
self.seq_vec, self.color_vec, submodels = state
self.init_submodels()
for agent, substate in zip(self.listeners + self.speakers, submodels):
agent.unpickle(substate, RSASubModel)
self.build_aggregate_model()
<mask token>
| <mask token>
class RSAGraphModel(SimpleLasagneModel):
def __init__(self, listeners, speakers, eval_agent, id=None):
self.get_options()
self.listeners = listeners
self.speakers = speakers
self.eval_agent = eval_agent
input_vars = [v for listener in listeners for v in listener.model.
input_vars] + [v for speaker in speakers for v in speaker.model
.input_vars]
target_vars = [listener.model.target_var for listener in listeners] + [
speaker.model.target_var for speaker in speakers]
super(RSAGraphModel, self).__init__(input_vars, target_vars, l_out=
eval_agent.model.l_out, loss=None, optimizer=OPTIMIZERS[self.
options.rsa_optimizer], learning_rate=self.options.
rsa_learning_rate, id=id)
def params(self):
result = []
for listener in self.listeners:
result.extend(listener.params())
for speaker in self.speakers:
result.extend(speaker.params())
return result
def get_train_loss(self, target_vars, params):
for agent in self.speakers:
agent.model.build_sample_vars(len(self.listeners))
for agent in self.listeners:
agent.model.build_sample_vars(len(self.speakers))
monitored = self.get_est_loss(layer_by_layer=self.options.
layer_by_layer)
if self.options.grad_of_est:
est_grad, monitored_grads = self.get_grad_of_est(monitored, params)
else:
est_grad, monitored_grads = self.get_est_grad(params,
layer_by_layer=self.options.layer_by_layer)
monitored.update(monitored_grads)
synth_vars = [v for agent in self.listeners + self.speakers for v in
agent.model.all_synth_vars]
return monitored, est_grad, synth_vars
def get_est_loss(self, layer_by_layer=False):
def kl(agent_p, agent_q, other_idx):
if layer_by_layer:
return agent_q.loss_out(agent_q.model.sample_inputs_others[
other_idx], agent_q.model.sample_target_others[other_idx]
).mean()
else:
return (agent_p.log_joint_emp(agent_p.model.
sample_inputs_self, agent_p.model.sample_target_self) -
agent_q.log_joint_smooth(agent_q.model.
sample_inputs_others[other_idx], agent_q.model.
sample_target_others[other_idx])).mean()
id_tag_log = self.id + ': ' if self.id else ''
id_tag = self.id + '/' if self.id else ''
if self.options.verbosity >= 4:
print(id_tag_log + 'loss: KL(dataset || L)')
alpha_losses = [('%salpha_%s' % (id_tag, listener.id), alpha *
listener.loss_out().mean()) for alpha, listener in zip(self.
options.rsa_alpha, self.listeners)]
if self.options.verbosity >= 4:
print(id_tag_log + 'loss: KL(dataset || S)')
beta_losses = [('%sbeta_%s' % (id_tag, speaker.id), beta * speaker.
loss_out().mean()) for beta, speaker in zip(self.options.
rsa_beta, self.speakers)]
if self.options.verbosity >= 4:
print(id_tag_log + 'loss: KL(L || S)')
mu_losses = [('%smu_%s_%s' % (id_tag, listener.id, speaker.id), mu *
kl(listener, speaker, j)) for mu, (listener, j, speaker, k) in
zip(self.options.rsa_mu, self.dyads())]
if self.options.verbosity >= 4:
print(id_tag_log + 'loss: KL(S || L)')
nu_losses = [('%snu_%s_%s' % (id_tag, speaker.id, listener.id), nu *
kl(speaker, listener, k)) for nu, (listener, j, speaker, k) in
zip(self.options.rsa_nu, self.dyads())]
all_sublosses = alpha_losses + beta_losses + mu_losses + nu_losses
est_loss = t_sum(loss for tag, loss in all_sublosses)
monitored = OrderedDict([('loss', est_loss)])
if self.options.monitor_sublosses:
monitored.update(all_sublosses)
if self.options.monitor_activations:
for agent in (self.listeners + self.speakers):
for name, layer in get_named_layers(agent.l_out).iteritems():
monitored['activation/' + name] = get_output(layer)
return monitored
<mask token>
def get_grad_of_est(self, monitored, params):
grad_of_est = T.grad(monitored['loss'], params)
monitored_grads = OrderedDict()
if self.options.monitor_grads:
monitored_grads.update([('grad/' + param.name, grad) for param,
grad in zip(params, grad_of_est)])
if self.options.monitor_subgrads:
monitored_grads.update([(tag + '/' + param.name, grad) for tag,
subloss in monitored.iteritems() if tag != 'loss' for param,
grad in zip(params, T.grad(subloss, params,
disconnected_inputs='ignore'))])
return grad_of_est, monitored_grads
def dyads(self):
for j, listener in enumerate(self.listeners):
for k, speaker in enumerate(self.speakers):
yield listener, j, speaker, k
def minibatches(self, inputs, targets, batch_size, shuffle=False):
agents = self.listeners + self.speakers
batches = super(RSAGraphModel, self).minibatches(inputs, targets,
batch_size, shuffle=shuffle)
for dataset_inputs, dataset_targets, _synth in batches:
inputs_batch = []
targets_batch = []
synth_batch = []
filtered = self.filter_arrays(dataset_inputs, dataset_targets)
for agent, (agent_inputs, agent_targets) in zip(agents, filtered):
inputs_batch.extend(agent_inputs)
targets_batch.extend(agent_targets)
input_types = [a.shape for a in agent_inputs]
target_types = [a.shape for a in agent_targets]
if self.options.verbosity >= 8:
print('%s: %s -> %s' % (agent.id, input_types,
target_types))
listener_samples = [(listener.sample_joint_smooth(self.options.
listener_samples) if self.options.listener_sample_smoothed else
listener.sample_joint_emp(self.options.listener_samples)) for
listener in self.listeners]
speaker_samples = [(speaker.sample_joint_smooth(self.options.
speaker_samples) if self.options.speaker_sample_smoothed else
speaker.sample_joint_emp(self.options.listener_samples)) for
speaker in self.speakers]
for listener, samples in zip(self.listeners, listener_samples):
arrays = listener.model.data_to_synth_arrays(listener,
samples, speaker_samples)
synth_batch.extend(arrays)
synth_types = [a.shape for a in arrays]
if self.options.verbosity >= 8:
print('%s synth: %s' % (listener.id, synth_types))
for speaker, samples in zip(self.speakers, speaker_samples):
arrays = speaker.model.data_to_synth_arrays(speaker,
samples, listener_samples)
synth_batch.extend(arrays)
synth_types = [a.shape for a in arrays]
if self.options.verbosity >= 8:
print('%s synth: %s' % (speaker.id, synth_types))
yield inputs_batch, targets_batch, synth_batch
def filter_arrays(self, inputs, targets):
result = []
input_idx = 0
for agent, target in zip(self.listeners + self.speakers, targets):
assert input_idx + len(agent.model.input_vars) <= len(inputs), (
input_idx, len(agent.model.input_vars), len(inputs))
agent_inputs = inputs[input_idx:input_idx + len(agent.model.
input_vars)]
agent_targets = [target]
result.append((agent_inputs, agent_targets))
input_idx += len(agent.model.input_vars)
return result
class RSALearner(NeuralLearner):
def __init__(self, id=None):
self.get_options()
self.init_submodels(id)
super(RSALearner, self).__init__(id=id)
color_resolution = (self.options.listener_color_resolution if self.
options.listener else self.options.speaker_color_resolution)
self.seq_vec = SequenceVectorizer()
self.color_vec = BucketsVectorizer(color_resolution, hsv=self.
options.speaker_hsv)
def init_submodels(self, id=None):
id_tag = id + '/' if id else ''
self.get_options()
listener_classes = self.options.listener_class
speaker_classes = self.options.speaker_class
if len(listener_classes) != self.options.rsa_listeners:
assert len(listener_classes) == 1, len(listener_classes)
listener_classes = listener_classes * self.options.rsa_listeners
if len(speaker_classes) != self.options.rsa_speakers:
assert len(speaker_classes) == 1, len(speaker_classes)
speaker_classes = speaker_classes * self.options.rsa_speakers
self.listeners = [LISTENERS[listener_classes[j]](id='%sL%d' % (
id_tag, j)) for j in range(self.options.rsa_listeners)]
self.speakers = [SPEAKERS[speaker_classes[k]](id='%sS%d' % (id_tag,
k)) for k in range(self.options.rsa_speakers)]
agents = self.listeners if self.options.listener else self.speakers
self.eval_agent = agents[self.options.eval_agent]
def predict(self, eval_instances, verbosity=0):
return self.eval_agent.predict(eval_instances, verbosity=verbosity)
def score(self, eval_instances, verbosity=0):
return self.eval_agent.score(eval_instances, verbosity=verbosity)
def predict_and_score(self, eval_instances, verbosity=0):
return self.eval_agent.predict_and_score(eval_instances, verbosity=
verbosity)
def on_iter_end(self, step, writer):
for agent in (self.speakers + self.listeners):
agent.on_iter_end(step, writer)
def sample_joint_smooth(self, num_samples):
return self.eval_agent.sample_joint_smooth(num_samples)
def _data_to_arrays(self, training_instances, init_vectorizer=False,
test=False, inverted=False):
input_arrays = []
target_arrays = []
if self.options.listener != inverted:
listener_dataset = training_instances
speaker_dataset = [inst.inverted() for inst in training_instances]
else:
listener_dataset = [inst.inverted() for inst in training_instances]
speaker_dataset = training_instances
for listener in self.listeners:
if not test:
listener.dataset = listener_dataset
inputs, targets = listener._data_to_arrays(listener_dataset,
test=test, init_vectorizer=init_vectorizer)
input_arrays.extend(inputs)
target_arrays.extend(targets)
for speaker in self.speakers:
if not test:
speaker.dataset = speaker_dataset
inputs, targets = speaker._data_to_arrays(speaker_dataset, test
=test, init_vectorizer=init_vectorizer)
input_arrays.extend(inputs)
target_arrays.extend(targets)
return input_arrays, target_arrays
def _build_model(self):
for agent in (self.listeners + self.speakers):
agent._build_model(RSASubModel)
self.build_aggregate_model()
def train_priors(self, training_instances, listener_data=False):
prior_class = LISTENER_PRIORS[self.options.listener_prior
] if self.options.listener else SPEAKER_PRIORS[self.options.
speaker_prior]
self.prior_emp = prior_class()
self.prior_smooth = prior_class()
self.prior_emp.train(training_instances, listener_data=listener_data)
self.prior_smooth.train(training_instances, listener_data=listener_data
)
for agent in (self.listeners + self.speakers):
agent.train_priors(training_instances, listener_data=listener_data)
def build_aggregate_model(self):
self.model = RSAGraphModel(self.listeners, self.speakers, self.
eval_agent)
self.prior_emp = AggregatePrior(self.listeners, self.speakers,
'prior_emp')
self.prior_smooth = AggregatePrior(self.listeners, self.speakers,
'prior_smooth')
def __getstate__(self):
return self.seq_vec, self.color_vec, [agent.__getstate__() for
agent in self.listeners + self.speakers]
def __setstate__(self, state):
self.seq_vec, self.color_vec, submodels = state
self.init_submodels()
for agent, substate in zip(self.listeners + self.speakers, submodels):
agent.unpickle(substate, RSASubModel)
self.build_aggregate_model()
<mask token>
| <mask token>
parser = config.get_options_parser()
parser.add_argument('--rsa_listeners', type=int, default=1, help=
'Number of listeners to use in RSA cooperative nets graph')
parser.add_argument('--rsa_speakers', type=int, default=1, help=
'Number of speakers to use in RSA cooperative nets graph')
parser.add_argument('--listener_class', default=['Listener'], choices=
LISTENERS.keys(), nargs='+', help=
'The name of the listener model to use in the RSA network.')
parser.add_argument('--speaker_class', default=['Speaker'], choices=
SPEAKERS.keys(), nargs='+', help=
'The name of the speaker model to use in the RSA network.')
parser.add_argument('--eval_agent', type=int, default=0, help=
'Index of the agent (listener/speaker) to use as the primary object of evaluation. Whether this agent is a listener or speaker will be inferred from the --listener flag.'
)
parser.add_argument('--rsa_optimizer', choices=OPTIMIZERS.keys(), default=
'rmsprop', help=
'The optimization (update) algorithm to use for RSA training.')
parser.add_argument('--rsa_learning_rate', type=float, default=0.1, help=
'The learning rate to use for RSA training.')
parser.add_argument('--rsa_alpha', type=float, nargs='*', default=[1.0],
help=
'Weights for the log-likelihood of the dataset according to the listeners. Provide as many values as there are listeners.'
)
parser.add_argument('--rsa_beta', type=float, nargs='*', default=[1.0],
help=
'Weights for the log-likelihood of the dataset according to the speakers. Provide as many values as there are speakers.'
)
parser.add_argument('--rsa_mu', type=float, nargs='*', default=[1.0], help=
'Weights for KL(L_j||S_k). Provide values to fill a rsa_listeners x rsa_speakers matrix, in row-major order (i.e. all speakers for first listener, then all speakers for second listener, etc.).'
)
parser.add_argument('--rsa_nu', type=float, nargs='*', default=[1.0], help=
'Weights for KL(S_k||L_j). Provide values to fill a rsa_listeners x rsa_speakers matrix, in row-major order (i.e. all speakers for first listener, then all speakers for second listener, etc.).'
)
parser.add_argument('--listener_samples', type=int, default=128, help=
'Number of samples to draw from the listener per minibatch.')
parser.add_argument('--speaker_samples', type=int, default=128, help=
'Number of samples to draw from the speaker per minibatch.')
parser.add_argument('--monitor_sublosses', type=config.boolean, default=
False, help=
'If `True`, return sub-losses for monitoring and write them to the TensorBoard events file. This will likely increase compilation time.'
)
parser.add_argument('--monitor_subgrads', type=config.boolean, default=
False, help=
'If `True`, return sub-gradients for monitoring and write them to the TensorBoard events file. This will likely increase compilation time.'
)
parser.add_argument('--grad_of_est', type=config.boolean, default=False,
help=
'If `True`, optimize using the gradient of the estimated loss; otherwise, use the manually-derived estimate of the gradient of the true loss.'
)
parser.add_argument('--layer_by_layer', type=config.boolean, default=False,
help=
'If `True`, train RSA agents layer-by-layer (only use the log-likelihood sub-gradients, equivalent to training each agent on data generated from the other agents); otherwise, use the gradient of the full RSA objective.'
)
parser.add_argument('--listener_sample_smoothed', type=config.boolean,
default=False, help=
'If `True`, take samples from the smoothed utterance prior; otherwise, sample from the empirical utterance prior.'
)
parser.add_argument('--speaker_sample_smoothed', type=config.boolean,
default=False, help=
'If `True`, take samples from the smoothed world prior; otherwise, sample from the empirical world prior.'
)
class AggregatePrior(object):
def __init__(self, listeners, speakers, prior_name='prior_emp'):
self.listeners = listeners
self.speakers = speakers
self.prior_name = prior_name
def train(self, training_instances, listener=False):
for agent in self.listeners:
getattr(agent, self.prior_name).train(training_instances,
listener=listener)
for agent in self.speakers:
getattr(agent, self.prior_name).train(training_instances,
listener=listener)
def apply(self, input_vars):
assert False, "AggregatePrior.apply shouldn't be called; only individual model priors are used in RSA coop nets model"
class RSASubModel(SimpleLasagneModel):
"""
A SimpleLasagneModel for a subcomponent of an RSA graph.
"""
def __init__(self, input_vars, target_vars, l_out, loss, optimizer,
learning_rate=0.001, id=None):
super(RSASubModel, self).__init__(input_vars, target_vars, l_out,
loss, optimizer, learning_rate=learning_rate, id=id)
if len(target_vars) != 1:
raise ValueError(
'target_vars should be a sequence of length 1, instead got %s'
% (target_vars,))
self.target_var = target_vars[0]
def build_sample_vars(self, num_other_agents):
self.sample_inputs_self = [v.type('%s_sample_self' % (v.name,)) for
v in self.input_vars]
self.sample_inputs_others = [[v.type('%s_sample_other%d' % (v.name,
i)) for v in self.input_vars] for i in range(num_other_agents)]
t = self.target_var
self.sample_target_self = t.type('%s_sample_self' % (t.name,))
self.sample_target_others = [t.type('%s_sample_other%d' % (t.name,
i)) for i in range(num_other_agents)]
self.all_synth_vars = self.sample_inputs_self + [self.
sample_target_self] + [v for o_inputs, o_target in zip(self.
sample_inputs_others, self.sample_target_others) for v in
o_inputs + [o_target]]
def data_to_synth_arrays(self, agent, samples_self, samples_others):
def flatten(arrays):
inputs, targets = arrays
return inputs + targets
return [arr for i, samples in enumerate([samples_self] +
samples_others) for arr in flatten(agent._data_to_arrays(
samples, inverted=i != 0))]
class RSAGraphModel(SimpleLasagneModel):
def __init__(self, listeners, speakers, eval_agent, id=None):
self.get_options()
self.listeners = listeners
self.speakers = speakers
self.eval_agent = eval_agent
input_vars = [v for listener in listeners for v in listener.model.
input_vars] + [v for speaker in speakers for v in speaker.model
.input_vars]
target_vars = [listener.model.target_var for listener in listeners] + [
speaker.model.target_var for speaker in speakers]
super(RSAGraphModel, self).__init__(input_vars, target_vars, l_out=
eval_agent.model.l_out, loss=None, optimizer=OPTIMIZERS[self.
options.rsa_optimizer], learning_rate=self.options.
rsa_learning_rate, id=id)
def params(self):
result = []
for listener in self.listeners:
result.extend(listener.params())
for speaker in self.speakers:
result.extend(speaker.params())
return result
def get_train_loss(self, target_vars, params):
for agent in self.speakers:
agent.model.build_sample_vars(len(self.listeners))
for agent in self.listeners:
agent.model.build_sample_vars(len(self.speakers))
monitored = self.get_est_loss(layer_by_layer=self.options.
layer_by_layer)
if self.options.grad_of_est:
est_grad, monitored_grads = self.get_grad_of_est(monitored, params)
else:
est_grad, monitored_grads = self.get_est_grad(params,
layer_by_layer=self.options.layer_by_layer)
monitored.update(monitored_grads)
synth_vars = [v for agent in self.listeners + self.speakers for v in
agent.model.all_synth_vars]
return monitored, est_grad, synth_vars
def get_est_loss(self, layer_by_layer=False):
def kl(agent_p, agent_q, other_idx):
if layer_by_layer:
return agent_q.loss_out(agent_q.model.sample_inputs_others[
other_idx], agent_q.model.sample_target_others[other_idx]
).mean()
else:
return (agent_p.log_joint_emp(agent_p.model.
sample_inputs_self, agent_p.model.sample_target_self) -
agent_q.log_joint_smooth(agent_q.model.
sample_inputs_others[other_idx], agent_q.model.
sample_target_others[other_idx])).mean()
id_tag_log = self.id + ': ' if self.id else ''
id_tag = self.id + '/' if self.id else ''
if self.options.verbosity >= 4:
print(id_tag_log + 'loss: KL(dataset || L)')
alpha_losses = [('%salpha_%s' % (id_tag, listener.id), alpha *
listener.loss_out().mean()) for alpha, listener in zip(self.
options.rsa_alpha, self.listeners)]
if self.options.verbosity >= 4:
print(id_tag_log + 'loss: KL(dataset || S)')
beta_losses = [('%sbeta_%s' % (id_tag, speaker.id), beta * speaker.
loss_out().mean()) for beta, speaker in zip(self.options.
rsa_beta, self.speakers)]
if self.options.verbosity >= 4:
print(id_tag_log + 'loss: KL(L || S)')
mu_losses = [('%smu_%s_%s' % (id_tag, listener.id, speaker.id), mu *
kl(listener, speaker, j)) for mu, (listener, j, speaker, k) in
zip(self.options.rsa_mu, self.dyads())]
if self.options.verbosity >= 4:
print(id_tag_log + 'loss: KL(S || L)')
nu_losses = [('%snu_%s_%s' % (id_tag, speaker.id, listener.id), nu *
kl(speaker, listener, k)) for nu, (listener, j, speaker, k) in
zip(self.options.rsa_nu, self.dyads())]
all_sublosses = alpha_losses + beta_losses + mu_losses + nu_losses
est_loss = t_sum(loss for tag, loss in all_sublosses)
monitored = OrderedDict([('loss', est_loss)])
if self.options.monitor_sublosses:
monitored.update(all_sublosses)
if self.options.monitor_activations:
for agent in (self.listeners + self.speakers):
for name, layer in get_named_layers(agent.l_out).iteritems():
monitored['activation/' + name] = get_output(layer)
return monitored
def get_est_grad(self, params, layer_by_layer=False):
def mean_weighted_grad(weights, loss):
return T.Lop(loss, params, weights / T.cast(weights.shape[0],
'float32'), disconnected_inputs='ignore')
def mean_grad(loss):
return T.grad(loss.mean(), params, disconnected_inputs='ignore')
id_tag = self.id + ': ' if self.id else ''
if self.options.verbosity >= 4:
print(id_tag + 'grad: alpha')
all_subgrads = [('grad_alpha/%s' % (listener.id,), mean_grad(alpha *
listener.loss_out())) for alpha, listener in zip(self.options.
rsa_alpha, self.listeners)]
if self.options.verbosity >= 4:
print(id_tag + 'grad: beta')
all_subgrads.extend([('grad_beta/%s' % (speaker.id,), mean_grad(
beta * speaker.loss_out())) for beta, speaker in zip(self.
options.rsa_beta, self.speakers)])
if self.options.verbosity >= 4:
print(id_tag + 'grad: nu co-training')
all_subgrads.extend([('grad_nu_co/%s_%s' % (listener.id, speaker.id
), mean_grad(nu * listener.loss_out(listener.model.
sample_inputs_others[k], listener.model.sample_target_others[k]
))) for nu, (listener, j, speaker, k) in zip(self.options.
rsa_nu, self.dyads())])
if self.options.verbosity >= 4:
print(id_tag + 'grad: mu co-training')
all_subgrads.extend([('grad_mu_co/%s_%s' % (listener.id, speaker.id
), mean_grad(mu * speaker.loss_out(speaker.model.
sample_inputs_others[j], speaker.model.sample_target_others[j])
)) for mu, (listener, j, speaker, k) in zip(self.options.rsa_mu,
self.dyads())])
if not layer_by_layer:
if self.options.verbosity >= 4:
print(id_tag + 'grad: mu regularizer')
all_subgrads.extend([('grad_mu_reg/%s_%s' % (listener.id,
speaker.id), mean_weighted_grad(mu * (1 + listener.
log_joint_emp(listener.model.sample_inputs_self, listener.
model.sample_target_self) - speaker.log_joint_smooth(
speaker.model.sample_inputs_others[j], speaker.model.
sample_target_others[j])), listener.loss_out(listener.model
.sample_inputs_self, listener.model.sample_target_self))) for
mu, (listener, j, speaker, k) in zip(self.options.rsa_mu,
self.dyads())])
if self.options.verbosity >= 4:
print(id_tag + 'grad: nu regularizer')
all_subgrads.extend([('grad_nu_reg/%s_%s' % (listener.id,
speaker.id), mean_weighted_grad(nu * (1 + speaker.
log_joint_emp(speaker.model.sample_inputs_self, speaker.
model.sample_target_self) - listener.log_joint_smooth(
listener.model.sample_inputs_others[k], listener.model.
sample_target_others[k])), speaker.loss_out(speaker.model.
sample_inputs_self, speaker.model.sample_target_self))) for
nu, (listener, j, speaker, k) in zip(self.options.rsa_nu,
self.dyads())])
est_grad = t_sum([grads for tag, grads in all_subgrads], nested=True)
monitored = OrderedDict()
if self.options.monitor_grads:
monitored.update([('grad/' + param.name, grad) for param, grad in
zip(params, est_grad)])
if self.options.monitor_subgrads:
monitored.update([(tag + '/' + param.name, grad) for tag, grads in
all_subgrads for param, grad in zip(params, grads)])
return est_grad, monitored
def get_grad_of_est(self, monitored, params):
grad_of_est = T.grad(monitored['loss'], params)
monitored_grads = OrderedDict()
if self.options.monitor_grads:
monitored_grads.update([('grad/' + param.name, grad) for param,
grad in zip(params, grad_of_est)])
if self.options.monitor_subgrads:
monitored_grads.update([(tag + '/' + param.name, grad) for tag,
subloss in monitored.iteritems() if tag != 'loss' for param,
grad in zip(params, T.grad(subloss, params,
disconnected_inputs='ignore'))])
return grad_of_est, monitored_grads
def dyads(self):
for j, listener in enumerate(self.listeners):
for k, speaker in enumerate(self.speakers):
yield listener, j, speaker, k
def minibatches(self, inputs, targets, batch_size, shuffle=False):
agents = self.listeners + self.speakers
batches = super(RSAGraphModel, self).minibatches(inputs, targets,
batch_size, shuffle=shuffle)
for dataset_inputs, dataset_targets, _synth in batches:
inputs_batch = []
targets_batch = []
synth_batch = []
filtered = self.filter_arrays(dataset_inputs, dataset_targets)
for agent, (agent_inputs, agent_targets) in zip(agents, filtered):
inputs_batch.extend(agent_inputs)
targets_batch.extend(agent_targets)
input_types = [a.shape for a in agent_inputs]
target_types = [a.shape for a in agent_targets]
if self.options.verbosity >= 8:
print('%s: %s -> %s' % (agent.id, input_types,
target_types))
listener_samples = [(listener.sample_joint_smooth(self.options.
listener_samples) if self.options.listener_sample_smoothed else
listener.sample_joint_emp(self.options.listener_samples)) for
listener in self.listeners]
speaker_samples = [(speaker.sample_joint_smooth(self.options.
speaker_samples) if self.options.speaker_sample_smoothed else
speaker.sample_joint_emp(self.options.listener_samples)) for
speaker in self.speakers]
for listener, samples in zip(self.listeners, listener_samples):
arrays = listener.model.data_to_synth_arrays(listener,
samples, speaker_samples)
synth_batch.extend(arrays)
synth_types = [a.shape for a in arrays]
if self.options.verbosity >= 8:
print('%s synth: %s' % (listener.id, synth_types))
for speaker, samples in zip(self.speakers, speaker_samples):
arrays = speaker.model.data_to_synth_arrays(speaker,
samples, listener_samples)
synth_batch.extend(arrays)
synth_types = [a.shape for a in arrays]
if self.options.verbosity >= 8:
print('%s synth: %s' % (speaker.id, synth_types))
yield inputs_batch, targets_batch, synth_batch
def filter_arrays(self, inputs, targets):
result = []
input_idx = 0
for agent, target in zip(self.listeners + self.speakers, targets):
assert input_idx + len(agent.model.input_vars) <= len(inputs), (
input_idx, len(agent.model.input_vars), len(inputs))
agent_inputs = inputs[input_idx:input_idx + len(agent.model.
input_vars)]
agent_targets = [target]
result.append((agent_inputs, agent_targets))
input_idx += len(agent.model.input_vars)
return result
class RSALearner(NeuralLearner):
def __init__(self, id=None):
self.get_options()
self.init_submodels(id)
super(RSALearner, self).__init__(id=id)
color_resolution = (self.options.listener_color_resolution if self.
options.listener else self.options.speaker_color_resolution)
self.seq_vec = SequenceVectorizer()
self.color_vec = BucketsVectorizer(color_resolution, hsv=self.
options.speaker_hsv)
def init_submodels(self, id=None):
id_tag = id + '/' if id else ''
self.get_options()
listener_classes = self.options.listener_class
speaker_classes = self.options.speaker_class
if len(listener_classes) != self.options.rsa_listeners:
assert len(listener_classes) == 1, len(listener_classes)
listener_classes = listener_classes * self.options.rsa_listeners
if len(speaker_classes) != self.options.rsa_speakers:
assert len(speaker_classes) == 1, len(speaker_classes)
speaker_classes = speaker_classes * self.options.rsa_speakers
self.listeners = [LISTENERS[listener_classes[j]](id='%sL%d' % (
id_tag, j)) for j in range(self.options.rsa_listeners)]
self.speakers = [SPEAKERS[speaker_classes[k]](id='%sS%d' % (id_tag,
k)) for k in range(self.options.rsa_speakers)]
agents = self.listeners if self.options.listener else self.speakers
self.eval_agent = agents[self.options.eval_agent]
def predict(self, eval_instances, verbosity=0):
return self.eval_agent.predict(eval_instances, verbosity=verbosity)
def score(self, eval_instances, verbosity=0):
return self.eval_agent.score(eval_instances, verbosity=verbosity)
def predict_and_score(self, eval_instances, verbosity=0):
return self.eval_agent.predict_and_score(eval_instances, verbosity=
verbosity)
def on_iter_end(self, step, writer):
for agent in (self.speakers + self.listeners):
agent.on_iter_end(step, writer)
def sample_joint_smooth(self, num_samples):
return self.eval_agent.sample_joint_smooth(num_samples)
def _data_to_arrays(self, training_instances, init_vectorizer=False,
test=False, inverted=False):
input_arrays = []
target_arrays = []
if self.options.listener != inverted:
listener_dataset = training_instances
speaker_dataset = [inst.inverted() for inst in training_instances]
else:
listener_dataset = [inst.inverted() for inst in training_instances]
speaker_dataset = training_instances
for listener in self.listeners:
if not test:
listener.dataset = listener_dataset
inputs, targets = listener._data_to_arrays(listener_dataset,
test=test, init_vectorizer=init_vectorizer)
input_arrays.extend(inputs)
target_arrays.extend(targets)
for speaker in self.speakers:
if not test:
speaker.dataset = speaker_dataset
inputs, targets = speaker._data_to_arrays(speaker_dataset, test
=test, init_vectorizer=init_vectorizer)
input_arrays.extend(inputs)
target_arrays.extend(targets)
return input_arrays, target_arrays
def _build_model(self):
for agent in (self.listeners + self.speakers):
agent._build_model(RSASubModel)
self.build_aggregate_model()
def train_priors(self, training_instances, listener_data=False):
prior_class = LISTENER_PRIORS[self.options.listener_prior
] if self.options.listener else SPEAKER_PRIORS[self.options.
speaker_prior]
self.prior_emp = prior_class()
self.prior_smooth = prior_class()
self.prior_emp.train(training_instances, listener_data=listener_data)
self.prior_smooth.train(training_instances, listener_data=listener_data
)
for agent in (self.listeners + self.speakers):
agent.train_priors(training_instances, listener_data=listener_data)
def build_aggregate_model(self):
self.model = RSAGraphModel(self.listeners, self.speakers, self.
eval_agent)
self.prior_emp = AggregatePrior(self.listeners, self.speakers,
'prior_emp')
self.prior_smooth = AggregatePrior(self.listeners, self.speakers,
'prior_smooth')
def __getstate__(self):
return self.seq_vec, self.color_vec, [agent.__getstate__() for
agent in self.listeners + self.speakers]
def __setstate__(self, state):
self.seq_vec, self.color_vec, submodels = state
self.init_submodels()
for agent, substate in zip(self.listeners + self.speakers, submodels):
agent.unpickle(substate, RSASubModel)
self.build_aggregate_model()
def t_sum(seq, start=None, nested=False):
"""A version of sum that doesn't start with 0, for constructing
Theano graphs without superfluous TensorConstants.
If `nested` is True, sum expressions embedded within lists,
elementwise (for use with the output for T.jacobian).
>>> t_sum([1, 2, 3])
6
>>> t_sum(xrange(1, 4), start=4)
10
>>> t_sum([[1, 2], [3, 4], [5, 6]], nested=True)
[9, 12]
>>> t_sum([[1, 2], [3, 4], [5, 6]], start=[-1, -2], nested=True)
[8, 10]
"""
if nested:
if not isinstance(seq, list):
seq = list(seq)
if start:
return [t_sum(subseq, start_elem) for subseq, start_elem in zip
(zip(*seq), start)]
else:
return [t_sum(subseq) for subseq in zip(*seq)]
seq_list = list(seq)
if seq_list:
reduced = reduce(operator.add, seq_list)
if start:
reduced = start + reduced
return reduced
elif start:
return start
else:
return 0
| import operator
import theano.tensor as T
from collections import OrderedDict
from lasagne.layers import get_output
from stanza.research import config
from neural import SimpleLasagneModel, NeuralLearner
from vectorizers import SequenceVectorizer, BucketsVectorizer
from neural import OPTIMIZERS, get_named_layers
from listener import LISTENERS, PRIORS as LISTENER_PRIORS
from speaker import SPEAKERS, PRIORS as SPEAKER_PRIORS
parser = config.get_options_parser()
parser.add_argument('--rsa_listeners', type=int, default=1,
help='Number of listeners to use in RSA cooperative nets graph')
parser.add_argument('--rsa_speakers', type=int, default=1,
help='Number of speakers to use in RSA cooperative nets graph')
parser.add_argument('--listener_class', default=['Listener'], choices=LISTENERS.keys(), nargs='+',
help='The name of the listener model to use in the RSA network.')
parser.add_argument('--speaker_class', default=['Speaker'], choices=SPEAKERS.keys(), nargs='+',
help='The name of the speaker model to use in the RSA network.')
parser.add_argument('--eval_agent', type=int, default=0,
help='Index of the agent (listener/speaker) to use as the primary object '
'of evaluation. Whether this agent is a listener or speaker will be '
'inferred from the --listener flag.')
parser.add_argument('--rsa_optimizer', choices=OPTIMIZERS.keys(), default='rmsprop',
help='The optimization (update) algorithm to use for RSA training.')
parser.add_argument('--rsa_learning_rate', type=float, default=0.1,
help='The learning rate to use for RSA training.')
parser.add_argument('--rsa_alpha', type=float, nargs='*', default=[1.0],
help='Weights for the log-likelihood of the dataset according to the '
'listeners. Provide as many values as there are listeners.')
parser.add_argument('--rsa_beta', type=float, nargs='*', default=[1.0],
help='Weights for the log-likelihood of the dataset according to the '
'speakers. Provide as many values as there are speakers.')
parser.add_argument('--rsa_mu', type=float, nargs='*', default=[1.0],
help='Weights for KL(L_j||S_k). Provide values to fill a '
'rsa_listeners x rsa_speakers matrix, in row-major order '
'(i.e. all speakers for first listener, then all speakers for second '
'listener, etc.).')
parser.add_argument('--rsa_nu', type=float, nargs='*', default=[1.0],
help='Weights for KL(S_k||L_j). Provide values to fill a '
'rsa_listeners x rsa_speakers matrix, in row-major order '
'(i.e. all speakers for first listener, then all speakers for second '
'listener, etc.).')
parser.add_argument('--listener_samples', type=int, default=128,
help='Number of samples to draw from the listener per minibatch.')
parser.add_argument('--speaker_samples', type=int, default=128,
help='Number of samples to draw from the speaker per minibatch.')
parser.add_argument('--monitor_sublosses', type=config.boolean, default=False,
help='If `True`, return sub-losses for monitoring and write them to the '
'TensorBoard events file. This will likely increase compilation time.')
parser.add_argument('--monitor_subgrads', type=config.boolean, default=False,
help='If `True`, return sub-gradients for monitoring and write them to the '
'TensorBoard events file. This will likely increase compilation time.')
parser.add_argument('--grad_of_est', type=config.boolean, default=False,
help='If `True`, optimize using the gradient of the estimated loss; '
'otherwise, use the manually-derived estimate of the gradient of '
'the true loss.')
parser.add_argument('--layer_by_layer', type=config.boolean, default=False,
help='If `True`, train RSA agents layer-by-layer (only use the log-likelihood '
'sub-gradients, equivalent to training each agent on data generated from '
'the other agents); otherwise, use the gradient of the full RSA '
'objective.')
parser.add_argument('--listener_sample_smoothed', type=config.boolean, default=False,
help='If `True`, take samples from the smoothed utterance prior; otherwise, '
'sample from the empirical utterance prior.')
parser.add_argument('--speaker_sample_smoothed', type=config.boolean, default=False,
help='If `True`, take samples from the smoothed world prior; otherwise, '
'sample from the empirical world prior.')
class AggregatePrior(object):
def __init__(self, listeners, speakers, prior_name='prior_emp'):
self.listeners = listeners
self.speakers = speakers
self.prior_name = prior_name
def train(self, training_instances, listener=False):
for agent in self.listeners:
getattr(agent, self.prior_name).train(training_instances, listener=listener)
for agent in self.speakers:
getattr(agent, self.prior_name).train(training_instances, listener=listener)
def apply(self, input_vars):
assert False, ("AggregatePrior.apply shouldn't be called; "
"only individual model priors are used in RSA coop nets model")
class RSASubModel(SimpleLasagneModel):
'''
A SimpleLasagneModel for a subcomponent of an RSA graph.
'''
def __init__(self, input_vars, target_vars, l_out, loss, optimizer,
learning_rate=0.001, id=None):
super(RSASubModel, self).__init__(input_vars, target_vars, l_out, loss, optimizer,
learning_rate=learning_rate, id=id)
if len(target_vars) != 1:
raise ValueError('target_vars should be a sequence of length 1, instead got %s' %
(target_vars,))
self.target_var = target_vars[0]
def build_sample_vars(self, num_other_agents):
self.sample_inputs_self = [v.type('%s_sample_self' % (v.name,))
for v in self.input_vars]
self.sample_inputs_others = [[v.type('%s_sample_other%d' % (v.name, i))
for v in self.input_vars]
for i in range(num_other_agents)]
t = self.target_var
self.sample_target_self = t.type('%s_sample_self' % (t.name,))
self.sample_target_others = [t.type('%s_sample_other%d' % (t.name, i))
for i in range(num_other_agents)]
self.all_synth_vars = (self.sample_inputs_self +
[self.sample_target_self] +
[v
for o_inputs, o_target in zip(self.sample_inputs_others,
self.sample_target_others)
for v in o_inputs + [o_target]])
def data_to_synth_arrays(self, agent, samples_self, samples_others):
def flatten(arrays):
inputs, targets = arrays
return inputs + targets
return [arr
for i, samples in enumerate([samples_self] + samples_others)
for arr in flatten(agent._data_to_arrays(samples, inverted=(i != 0)))]
class RSAGraphModel(SimpleLasagneModel):
def __init__(self, listeners, speakers, eval_agent, id=None):
self.get_options()
self.listeners = listeners
self.speakers = speakers
self.eval_agent = eval_agent
input_vars = ([v for listener in listeners for v in listener.model.input_vars] +
[v for speaker in speakers for v in speaker.model.input_vars])
target_vars = ([listener.model.target_var for listener in listeners] +
[speaker.model.target_var for speaker in speakers])
super(RSAGraphModel, self).__init__(input_vars, target_vars,
l_out=eval_agent.model.l_out, loss=None,
optimizer=OPTIMIZERS[self.options.rsa_optimizer],
learning_rate=self.options.rsa_learning_rate,
id=id)
def params(self):
result = []
for listener in self.listeners:
result.extend(listener.params())
for speaker in self.speakers:
result.extend(speaker.params())
return result
def get_train_loss(self, target_vars, params):
for agent in self.speakers:
agent.model.build_sample_vars(len(self.listeners))
for agent in self.listeners:
agent.model.build_sample_vars(len(self.speakers))
monitored = self.get_est_loss(layer_by_layer=self.options.layer_by_layer)
if self.options.grad_of_est:
est_grad, monitored_grads = self.get_grad_of_est(monitored, params)
else:
est_grad, monitored_grads = self.get_est_grad(
params, layer_by_layer=self.options.layer_by_layer)
monitored.update(monitored_grads)
synth_vars = [v
for agent in self.listeners + self.speakers
for v in agent.model.all_synth_vars]
return monitored, est_grad, synth_vars
def get_est_loss(self, layer_by_layer=False):
def kl(agent_p, agent_q, other_idx):
if layer_by_layer:
return agent_q.loss_out(agent_q.model.sample_inputs_others[other_idx],
agent_q.model.sample_target_others[other_idx]).mean()
else:
return (
agent_p.log_joint_emp(agent_p.model.sample_inputs_self,
agent_p.model.sample_target_self) -
agent_q.log_joint_smooth(agent_q.model.sample_inputs_others[other_idx],
agent_q.model.sample_target_others[other_idx])
).mean()
id_tag_log = (self.id + ': ') if self.id else ''
id_tag = (self.id + '/') if self.id else ''
# \alpha * KL(dataset || L) = \alpha * log L(dataset) + C
if self.options.verbosity >= 4:
print(id_tag_log + 'loss: KL(dataset || L)')
alpha_losses = [
('%salpha_%s' % (id_tag, listener.id), alpha * listener.loss_out().mean())
for alpha, listener in zip(self.options.rsa_alpha, self.listeners)
]
# \beta * KL(dataset || S) = \beta * log S(dataset) + C
if self.options.verbosity >= 4:
print(id_tag_log + 'loss: KL(dataset || S)')
beta_losses = [
('%sbeta_%s' % (id_tag, speaker.id), beta * speaker.loss_out().mean())
for beta, speaker in zip(self.options.rsa_beta, self.speakers)
]
# \mu * KL(L || S)
if self.options.verbosity >= 4:
print(id_tag_log + 'loss: KL(L || S)')
mu_losses = [
('%smu_%s_%s' % (id_tag, listener.id, speaker.id), mu * kl(listener, speaker, j))
for mu, (listener, j, speaker, k) in zip(self.options.rsa_mu, self.dyads())
]
# \nu * KL(S || L)
if self.options.verbosity >= 4:
print(id_tag_log + 'loss: KL(S || L)')
nu_losses = [
('%snu_%s_%s' % (id_tag, speaker.id, listener.id), nu * kl(speaker, listener, k))
for nu, (listener, j, speaker, k) in zip(self.options.rsa_nu, self.dyads())
]
all_sublosses = alpha_losses + beta_losses + mu_losses + nu_losses
est_loss = t_sum(loss for tag, loss in all_sublosses)
monitored = OrderedDict([('loss', est_loss)])
if self.options.monitor_sublosses:
monitored.update(all_sublosses)
if self.options.monitor_activations:
for agent in self.listeners + self.speakers:
for name, layer in get_named_layers(agent.l_out).iteritems():
monitored['activation/' + name] = get_output(layer)
return monitored
def get_est_grad(self, params, layer_by_layer=False):
def mean_weighted_grad(weights, loss):
# Lop to the rescue! Here I was calling T.jacobian and trying to
# broadcast things and elementwise-multiply through the resulting lists,
# when a function already existed to do all of that for me...
return T.Lop(loss, params, weights / T.cast(weights.shape[0], 'float32'),
disconnected_inputs='ignore')
# TODO: control variates?
def mean_grad(loss):
return T.grad(loss.mean(), params, disconnected_inputs='ignore')
id_tag = (self.id + ': ') if self.id else ''
# alpha and beta: train the agents directly against the dataset.
# \alpha_j E_D [-d/d\theta_j log L(c | m; \theta_j)]
if self.options.verbosity >= 4:
print(id_tag + 'grad: alpha')
all_subgrads = [
('grad_alpha/%s' % (listener.id,),
mean_grad(alpha * listener.loss_out()))
for alpha, listener in zip(self.options.rsa_alpha, self.listeners)
]
# \beta_k E_D [-d/d\phi_k log S(m | c; \phi_k)]
if self.options.verbosity >= 4:
print(id_tag + 'grad: beta')
all_subgrads.extend([
('grad_beta/%s' % (speaker.id,),
mean_grad(beta * speaker.loss_out()))
for beta, speaker in zip(self.options.rsa_beta, self.speakers)
])
# The "simple" mu and nu terms: train the agents directly against each other.
# These are still ordinary log-likelihood terms; the complexity comes from
# identifying the right input variables and iterating over the m x n dyads.
# sum_k \nu_jk E_{G_S(\phi_k)} [-d/d\theta_j log L(c | m; \theta_j)]
if self.options.verbosity >= 4:
print(id_tag + 'grad: nu co-training')
all_subgrads.extend([
('grad_nu_co/%s_%s' % (listener.id, speaker.id),
mean_grad(nu * listener.loss_out(listener.model.sample_inputs_others[k],
listener.model.sample_target_others[k])))
for nu, (listener, j, speaker, k) in zip(self.options.rsa_nu, self.dyads())
])
# sum_j \nu_jk E_{G_L(\theta_j)} [-d/d\phi_k log S(m | c; \phi_k)]
if self.options.verbosity >= 4:
print(id_tag + 'grad: mu co-training')
all_subgrads.extend([
('grad_mu_co/%s_%s' % (listener.id, speaker.id),
mean_grad(mu * speaker.loss_out(speaker.model.sample_inputs_others[j],
speaker.model.sample_target_others[j])))
for mu, (listener, j, speaker, k) in zip(self.options.rsa_mu, self.dyads())
])
# The "hard" mu and nu terms: regularize the agents with maximum entropy and
# accommodating other agents' priors.
#
# Zero out these subgradients if we're doing layer-by-layer training.
if not layer_by_layer:
# sum_k \mu_jk E_{G_L(\theta_j)}
# [(1 + log G_L(c, m; \theta_j) - log H_S(c, m; \phi_k)) *
# d/d\theta_j log L(c | m; \theta_j)]
if self.options.verbosity >= 4:
print(id_tag + 'grad: mu regularizer')
all_subgrads.extend([
('grad_mu_reg/%s_%s' % (listener.id, speaker.id),
mean_weighted_grad(
mu *
(1 + listener.log_joint_emp(listener.model.sample_inputs_self,
listener.model.sample_target_self) -
speaker.log_joint_smooth(speaker.model.sample_inputs_others[j],
speaker.model.sample_target_others[j])),
listener.loss_out(listener.model.sample_inputs_self,
listener.model.sample_target_self)))
for mu, (listener, j, speaker, k) in zip(self.options.rsa_mu, self.dyads())
])
# sum_j \nu_jk E_{G_S(\phi_k)}
# [(1 + log G_S(c, m; \phi_k) - log H_L(c, m; \theta_j)) *
# d/d\phi_k log S(m | c; \phi_k)]
if self.options.verbosity >= 4:
print(id_tag + 'grad: nu regularizer')
all_subgrads.extend([
('grad_nu_reg/%s_%s' % (listener.id, speaker.id),
mean_weighted_grad(
nu *
(1 + speaker.log_joint_emp(speaker.model.sample_inputs_self,
speaker.model.sample_target_self) -
listener.log_joint_smooth(listener.model.sample_inputs_others[k],
listener.model.sample_target_others[k])),
speaker.loss_out(speaker.model.sample_inputs_self,
speaker.model.sample_target_self)))
for nu, (listener, j, speaker, k) in zip(self.options.rsa_nu, self.dyads())
])
est_grad = t_sum([grads for tag, grads in all_subgrads], nested=True)
monitored = OrderedDict()
if self.options.monitor_grads:
monitored.update([
('grad/' + param.name, grad)
for param, grad in zip(params, est_grad)
])
if self.options.monitor_subgrads:
monitored.update([
(tag + '/' + param.name, grad)
for tag, grads in all_subgrads
for param, grad in zip(params, grads)
])
return est_grad, monitored
def get_grad_of_est(self, monitored, params):
grad_of_est = T.grad(monitored['loss'], params)
monitored_grads = OrderedDict()
if self.options.monitor_grads:
monitored_grads.update([
('grad/' + param.name, grad)
for param, grad in zip(params, grad_of_est)
])
if self.options.monitor_subgrads:
monitored_grads.update([
(tag + '/' + param.name, grad)
for tag, subloss in monitored.iteritems() if tag != 'loss'
for param, grad in zip(params, T.grad(subloss, params,
disconnected_inputs='ignore'))
])
return grad_of_est, monitored_grads
def dyads(self):
for j, listener in enumerate(self.listeners):
for k, speaker in enumerate(self.speakers):
yield (listener, j, speaker, k)
def minibatches(self, inputs, targets, batch_size, shuffle=False):
agents = self.listeners + self.speakers
batches = super(RSAGraphModel, self).minibatches(inputs, targets, batch_size,
shuffle=shuffle)
for dataset_inputs, dataset_targets, _synth in batches:
inputs_batch = []
targets_batch = []
synth_batch = []
filtered = self.filter_arrays(dataset_inputs, dataset_targets)
for agent, (agent_inputs, agent_targets) in zip(agents, filtered):
inputs_batch.extend(agent_inputs)
targets_batch.extend(agent_targets)
input_types = [a.shape for a in agent_inputs]
target_types = [a.shape for a in agent_targets]
if self.options.verbosity >= 8:
print('%s: %s -> %s' % (agent.id, input_types, target_types))
listener_samples = [listener.sample_joint_smooth(self.options.listener_samples)
if self.options.listener_sample_smoothed else
listener.sample_joint_emp(self.options.listener_samples)
for listener in self.listeners]
speaker_samples = [speaker.sample_joint_smooth(self.options.speaker_samples)
if self.options.speaker_sample_smoothed else
speaker.sample_joint_emp(self.options.listener_samples)
for speaker in self.speakers]
for listener, samples in zip(self.listeners, listener_samples):
arrays = listener.model.data_to_synth_arrays(listener, samples,
speaker_samples)
synth_batch.extend(arrays)
synth_types = [a.shape for a in arrays]
if self.options.verbosity >= 8:
print('%s synth: %s' % (listener.id, synth_types))
for speaker, samples in zip(self.speakers, speaker_samples):
arrays = speaker.model.data_to_synth_arrays(speaker, samples,
listener_samples)
synth_batch.extend(arrays)
synth_types = [a.shape for a in arrays]
if self.options.verbosity >= 8:
print('%s synth: %s' % (speaker.id, synth_types))
yield inputs_batch, targets_batch, synth_batch
def filter_arrays(self, inputs, targets):
result = []
input_idx = 0
for agent, target in zip(self.listeners + self.speakers, targets):
assert input_idx + len(agent.model.input_vars) <= len(inputs), \
(input_idx, len(agent.model.input_vars), len(inputs))
agent_inputs = inputs[input_idx:input_idx + len(agent.model.input_vars)]
agent_targets = [target]
result.append((agent_inputs, agent_targets))
input_idx += len(agent.model.input_vars)
return result
class RSALearner(NeuralLearner):
def __init__(self, id=None):
self.get_options()
self.init_submodels(id)
super(RSALearner, self).__init__(id=id)
color_resolution = (self.options.listener_color_resolution
if self.options.listener else
self.options.speaker_color_resolution)
self.seq_vec = SequenceVectorizer()
self.color_vec = BucketsVectorizer(color_resolution, hsv=self.options.speaker_hsv)
def init_submodels(self, id=None):
id_tag = (id + '/') if id else ''
self.get_options()
listener_classes = self.options.listener_class
speaker_classes = self.options.speaker_class
if len(listener_classes) != self.options.rsa_listeners:
assert len(listener_classes) == 1, len(listener_classes)
listener_classes = listener_classes * self.options.rsa_listeners
if len(speaker_classes) != self.options.rsa_speakers:
assert len(speaker_classes) == 1, len(speaker_classes)
speaker_classes = speaker_classes * self.options.rsa_speakers
self.listeners = [LISTENERS[listener_classes[j]](id='%sL%d' % (id_tag, j))
for j in range(self.options.rsa_listeners)]
self.speakers = [SPEAKERS[speaker_classes[k]](id='%sS%d' % (id_tag, k))
for k in range(self.options.rsa_speakers)]
agents = self.listeners if self.options.listener else self.speakers
self.eval_agent = agents[self.options.eval_agent]
def predict(self, eval_instances, verbosity=0):
return self.eval_agent.predict(eval_instances, verbosity=verbosity)
def score(self, eval_instances, verbosity=0):
return self.eval_agent.score(eval_instances, verbosity=verbosity)
def predict_and_score(self, eval_instances, verbosity=0):
return self.eval_agent.predict_and_score(eval_instances, verbosity=verbosity)
def on_iter_end(self, step, writer):
for agent in self.speakers + self.listeners:
agent.on_iter_end(step, writer)
def sample_joint_smooth(self, num_samples):
return self.eval_agent.sample_joint_smooth(num_samples)
def _data_to_arrays(self, training_instances,
init_vectorizer=False, test=False, inverted=False):
input_arrays = []
target_arrays = []
if self.options.listener != inverted:
listener_dataset = training_instances
speaker_dataset = [inst.inverted() for inst in training_instances]
else:
listener_dataset = [inst.inverted() for inst in training_instances]
speaker_dataset = training_instances
for listener in self.listeners:
if not test:
listener.dataset = listener_dataset
inputs, targets = listener._data_to_arrays(listener_dataset, test=test,
init_vectorizer=init_vectorizer)
input_arrays.extend(inputs)
target_arrays.extend(targets)
for speaker in self.speakers:
if not test:
speaker.dataset = speaker_dataset
inputs, targets = speaker._data_to_arrays(speaker_dataset, test=test,
init_vectorizer=init_vectorizer)
input_arrays.extend(inputs)
target_arrays.extend(targets)
return input_arrays, target_arrays
def _build_model(self):
for agent in self.listeners + self.speakers:
agent._build_model(RSASubModel)
self.build_aggregate_model()
def train_priors(self, training_instances, listener_data=False):
prior_class = (LISTENER_PRIORS[self.options.listener_prior]
if self.options.listener else
SPEAKER_PRIORS[self.options.speaker_prior])
self.prior_emp = prior_class()
self.prior_smooth = prior_class()
self.prior_emp.train(training_instances, listener_data=listener_data)
self.prior_smooth.train(training_instances, listener_data=listener_data)
for agent in self.listeners + self.speakers:
agent.train_priors(training_instances, listener_data=listener_data)
def build_aggregate_model(self):
self.model = RSAGraphModel(self.listeners, self.speakers, self.eval_agent)
self.prior_emp = AggregatePrior(self.listeners, self.speakers, 'prior_emp')
self.prior_smooth = AggregatePrior(self.listeners, self.speakers, 'prior_smooth')
def __getstate__(self):
return (self.seq_vec, self.color_vec,
[agent.__getstate__() for agent in self.listeners + self.speakers])
def __setstate__(self, state):
self.seq_vec, self.color_vec, submodels = state
self.init_submodels()
for agent, substate in zip(self.listeners + self.speakers, submodels):
agent.unpickle(substate, RSASubModel)
self.build_aggregate_model()
def t_sum(seq, start=None, nested=False):
'''A version of sum that doesn't start with 0, for constructing
Theano graphs without superfluous TensorConstants.
If `nested` is True, sum expressions embedded within lists,
elementwise (for use with the output for T.jacobian).
>>> t_sum([1, 2, 3])
6
>>> t_sum(xrange(1, 4), start=4)
10
>>> t_sum([[1, 2], [3, 4], [5, 6]], nested=True)
[9, 12]
>>> t_sum([[1, 2], [3, 4], [5, 6]], start=[-1, -2], nested=True)
[8, 10]
'''
if nested:
if not isinstance(seq, list):
seq = list(seq)
if start:
return [t_sum(subseq, start_elem) for subseq, start_elem in zip(zip(*seq), start)]
else:
return [t_sum(subseq) for subseq in zip(*seq)]
seq_list = list(seq)
if seq_list:
reduced = reduce(operator.add, seq_list)
if start:
reduced = start + reduced
return reduced
elif start:
return start
else:
return 0
| [
15,
21,
23,
36,
38
] |
1,359 | d30e2fa4d5b0a0965dad7d69b672b8f4ad137ff4 | <mask token>
def get_file_vocabs(file):
file_vocabs = Counter()
for sent in file.readlines():
voc = Counter()
for word in sent.split():
voc[word] += 1
file_vocabs.update(voc)
return file_vocabs
<mask token>
| <mask token>
def get_file_vocabs(file):
file_vocabs = Counter()
for sent in file.readlines():
voc = Counter()
for word in sent.split():
voc[word] += 1
file_vocabs.update(voc)
return file_vocabs
def get_vocab(dirpath):
vocabs = {}
cvocabs = Counter()
for filename in os.listdir(dirpath):
with open(dirpath + '\\' + filename, 'r', encoding='utf-8') as file:
file_vocabs = get_file_vocabs(file)
cvocabs.update(file_vocabs)
print('Step 1: Process file', filename)
n = len(cvocabs)
if n >= MOST_COMMON:
n = MOST_COMMON
cvocabs = dict(cvocabs.most_common(n))
print('Step 2...')
for i, kk in enumerate(cvocabs.keys()):
vocabs[kk] = i + 1
return vocabs
if __name__ == '__main__':
vocabs = get_vocab(dirpath)
print('Saving...')
with open(savepath, 'w') as file:
file.write(json.dumps(vocabs))
| <mask token>
MOST_COMMON = 120000
savepath = (
'D:\\My Documents\\My Project\\experiment1\\finished\\test_vocabs.json')
dirpath = 'D:\\My Documents\\My Project\\experiment1\\finished\\test'
def get_file_vocabs(file):
file_vocabs = Counter()
for sent in file.readlines():
voc = Counter()
for word in sent.split():
voc[word] += 1
file_vocabs.update(voc)
return file_vocabs
def get_vocab(dirpath):
vocabs = {}
cvocabs = Counter()
for filename in os.listdir(dirpath):
with open(dirpath + '\\' + filename, 'r', encoding='utf-8') as file:
file_vocabs = get_file_vocabs(file)
cvocabs.update(file_vocabs)
print('Step 1: Process file', filename)
n = len(cvocabs)
if n >= MOST_COMMON:
n = MOST_COMMON
cvocabs = dict(cvocabs.most_common(n))
print('Step 2...')
for i, kk in enumerate(cvocabs.keys()):
vocabs[kk] = i + 1
return vocabs
if __name__ == '__main__':
vocabs = get_vocab(dirpath)
print('Saving...')
with open(savepath, 'w') as file:
file.write(json.dumps(vocabs))
| import os
import os.path
import json
from collections import defaultdict, Counter
MOST_COMMON = 120000
savepath = (
'D:\\My Documents\\My Project\\experiment1\\finished\\test_vocabs.json')
dirpath = 'D:\\My Documents\\My Project\\experiment1\\finished\\test'
def get_file_vocabs(file):
file_vocabs = Counter()
for sent in file.readlines():
voc = Counter()
for word in sent.split():
voc[word] += 1
file_vocabs.update(voc)
return file_vocabs
def get_vocab(dirpath):
vocabs = {}
cvocabs = Counter()
for filename in os.listdir(dirpath):
with open(dirpath + '\\' + filename, 'r', encoding='utf-8') as file:
file_vocabs = get_file_vocabs(file)
cvocabs.update(file_vocabs)
print('Step 1: Process file', filename)
n = len(cvocabs)
if n >= MOST_COMMON:
n = MOST_COMMON
cvocabs = dict(cvocabs.most_common(n))
print('Step 2...')
for i, kk in enumerate(cvocabs.keys()):
vocabs[kk] = i + 1
return vocabs
if __name__ == '__main__':
vocabs = get_vocab(dirpath)
print('Saving...')
with open(savepath, 'w') as file:
file.write(json.dumps(vocabs))
| #!/usr/bin/python
# -*- coding: utf-8 -*-
import os
import os.path
import json
from collections import defaultdict, Counter
MOST_COMMON = 120000
savepath = r'D:\My Documents\My Project\experiment1\finished\test_vocabs.json'
dirpath = 'D:\\My Documents\\My Project\\experiment1\\finished\\test'
#dirpath = 'D:\\Corpus\\1-billion-word-language-modeling-benchmark-r13output\\1-billion-word-language-modeling-benchmark-r13output\\training-monolingual.tokenized.shuffled'
#savepath = 'D:\\My Documents\\My Project\\experiment1\\finished\\a.json'
#dirpath = 'D:\\My Documents\\My Project\\experiment1\\finished\\test'
def get_file_vocabs(file):
file_vocabs = Counter()
for sent in file.readlines():
voc = Counter()
for word in sent.split():
voc[word] += 1
file_vocabs.update(voc)
return file_vocabs
def get_vocab(dirpath):
vocabs = {}
cvocabs = Counter()
for filename in os.listdir(dirpath):
with open(dirpath + '\\' + filename, 'r', encoding='utf-8') as file:
file_vocabs = get_file_vocabs(file)
cvocabs.update(file_vocabs)
print('Step 1: Process file', filename)
n = len(cvocabs)
if n >= MOST_COMMON: n = MOST_COMMON
cvocabs = dict(cvocabs.most_common(n))
print('Step 2...')
for i, kk in enumerate(cvocabs.keys()):
vocabs[kk] = i + 1
return vocabs
if __name__ == '__main__':
vocabs = get_vocab(dirpath)
print('Saving...')
with open(savepath, 'w') as file:
file.write(json.dumps(vocabs))
| [
1,
3,
4,
5,
6
] |
1,360 | 16879598a8b1a0b23c5ea6de18f8fb0b0b77201c | <mask token>
def get(path):
return reduce(lambda view, part: view[part], path.split('.'), config).get()
| <mask token>
config.set_file('config.yaml')
def get(path):
return reduce(lambda view, part: view[part], path.split('.'), config).get()
| <mask token>
config = confuse.Configuration('SleepCycleWebhooks')
config.set_file('config.yaml')
def get(path):
return reduce(lambda view, part: view[part], path.split('.'), config).get()
| from functools import reduce
import confuse
config = confuse.Configuration('SleepCycleWebhooks')
config.set_file('config.yaml')
def get(path):
return reduce(lambda view, part: view[part], path.split('.'), config).get()
| null | [
1,
2,
3,
4
] |
1,361 | 267695555e876dc2fe5820dc194490aad9e5e344 | <mask token>
def avg_dissim_within_group_element(node, element_list):
max_diameter = -np.inf
sum_dissm = 0
for i in element_list:
sum_dissm += dissimilarity_matrix[node][i]
if dissimilarity_matrix[node][i] > max_diameter:
max_diameter = dissimilarity_matrix[node][i]
if len(element_list) > 1:
avg = sum_dissm / (len(element_list) - 1)
else:
avg = 0
return avg
def avg_dissim_across_group_element(node, main_list, splinter_list):
if len(splinter_list) == 0:
return 0
sum_dissm = 0
for j in splinter_list:
sum_dissm = sum_dissm + dissimilarity_matrix[node][j]
avg = sum_dissm / len(splinter_list)
return avg
def splinter(main_list, splinter_group):
most_dissm_object_value = -np.inf
most_dissm_object_index = None
for node in main_list:
x = avg_dissim_within_group_element(node, main_list)
y = avg_dissim_across_group_element(node, main_list, splinter_group)
diff = x - y
if diff > most_dissm_object_value:
most_dissm_object_value = diff
most_dissm_object_index = node
if most_dissm_object_value > 0:
return most_dissm_object_index, 1
else:
return -1, -1
def split(element_list):
main_list = element_list
splinter_group = []
most_dissm_object_index, flag = splinter(main_list, splinter_group)
while flag > 0:
main_list.remove(most_dissm_object_index)
splinter_group.append(most_dissm_object_index)
most_dissm_object_index, flag = splinter(element_list, splinter_group)
return main_list, splinter_group
<mask token>
| <mask token>
def avg_dissim_within_group_element(node, element_list):
max_diameter = -np.inf
sum_dissm = 0
for i in element_list:
sum_dissm += dissimilarity_matrix[node][i]
if dissimilarity_matrix[node][i] > max_diameter:
max_diameter = dissimilarity_matrix[node][i]
if len(element_list) > 1:
avg = sum_dissm / (len(element_list) - 1)
else:
avg = 0
return avg
def avg_dissim_across_group_element(node, main_list, splinter_list):
if len(splinter_list) == 0:
return 0
sum_dissm = 0
for j in splinter_list:
sum_dissm = sum_dissm + dissimilarity_matrix[node][j]
avg = sum_dissm / len(splinter_list)
return avg
def splinter(main_list, splinter_group):
most_dissm_object_value = -np.inf
most_dissm_object_index = None
for node in main_list:
x = avg_dissim_within_group_element(node, main_list)
y = avg_dissim_across_group_element(node, main_list, splinter_group)
diff = x - y
if diff > most_dissm_object_value:
most_dissm_object_value = diff
most_dissm_object_index = node
if most_dissm_object_value > 0:
return most_dissm_object_index, 1
else:
return -1, -1
def split(element_list):
main_list = element_list
splinter_group = []
most_dissm_object_index, flag = splinter(main_list, splinter_group)
while flag > 0:
main_list.remove(most_dissm_object_index)
splinter_group.append(most_dissm_object_index)
most_dissm_object_index, flag = splinter(element_list, splinter_group)
return main_list, splinter_group
def max_distance(cluster_list):
max_diameter_cluster_index = None
max_diameter_cluster_value = -np.inf
index = 0
for element_list in cluster_list:
for i in element_list:
for j in element_list:
if dissimilarity_matrix[i][j] > max_diameter_cluster_value:
max_diameter_cluster_value = dissimilarity_matrix[i][j]
max_diameter_cluster_index = index
index += 1
if max_diameter_cluster_value <= 0:
return -1
return max_diameter_cluster_index
<mask token>
| <mask token>
print(dissimilarity_matrix)
def avg_dissim_within_group_element(node, element_list):
max_diameter = -np.inf
sum_dissm = 0
for i in element_list:
sum_dissm += dissimilarity_matrix[node][i]
if dissimilarity_matrix[node][i] > max_diameter:
max_diameter = dissimilarity_matrix[node][i]
if len(element_list) > 1:
avg = sum_dissm / (len(element_list) - 1)
else:
avg = 0
return avg
def avg_dissim_across_group_element(node, main_list, splinter_list):
if len(splinter_list) == 0:
return 0
sum_dissm = 0
for j in splinter_list:
sum_dissm = sum_dissm + dissimilarity_matrix[node][j]
avg = sum_dissm / len(splinter_list)
return avg
def splinter(main_list, splinter_group):
most_dissm_object_value = -np.inf
most_dissm_object_index = None
for node in main_list:
x = avg_dissim_within_group_element(node, main_list)
y = avg_dissim_across_group_element(node, main_list, splinter_group)
diff = x - y
if diff > most_dissm_object_value:
most_dissm_object_value = diff
most_dissm_object_index = node
if most_dissm_object_value > 0:
return most_dissm_object_index, 1
else:
return -1, -1
def split(element_list):
main_list = element_list
splinter_group = []
most_dissm_object_index, flag = splinter(main_list, splinter_group)
while flag > 0:
main_list.remove(most_dissm_object_index)
splinter_group.append(most_dissm_object_index)
most_dissm_object_index, flag = splinter(element_list, splinter_group)
return main_list, splinter_group
def max_distance(cluster_list):
max_diameter_cluster_index = None
max_diameter_cluster_value = -np.inf
index = 0
for element_list in cluster_list:
for i in element_list:
for j in element_list:
if dissimilarity_matrix[i][j] > max_diameter_cluster_value:
max_diameter_cluster_value = dissimilarity_matrix[i][j]
max_diameter_cluster_index = index
index += 1
if max_diameter_cluster_value <= 0:
return -1
return max_diameter_cluster_index
if __name__ == '__main__':
argv = sys.argv
num_clusters = sys.argv[-1]
current_clusters = [all_elements]
print(current_clusters)
level = 1
index = 0
with tqdm(total=100) as pbar:
while index != -1 and level != num_clusters:
a_clstr, b_clstr = split(current_clusters[index])
del current_clusters[index]
current_clusters.append(a_clstr)
current_clusters.append(b_clstr)
index = max_distance(current_clusters)
level += 1
pbar.update(10)
for i in range(num_clusters):
pd.DataFrame(current_clusters[i], columns=['id']).to_csv(
'%s_cluster_%d.txt' % (sys.argv[1], i), sep='\t')
| import pandas as pd
import numpy as np
import sys
from tqdm import tqdm
import time
from scipy.spatial.distance import pdist, squareform
data = pd.read_csv(sys.argv[1], delimiter='\t')
all_elements = [index for index in data.index]
distance_matrix = pdist(data, metric='euclidean')
dissimilarity_matrix = np.array(squareform(distance_matrix))
print(dissimilarity_matrix)
def avg_dissim_within_group_element(node, element_list):
max_diameter = -np.inf
sum_dissm = 0
for i in element_list:
sum_dissm += dissimilarity_matrix[node][i]
if dissimilarity_matrix[node][i] > max_diameter:
max_diameter = dissimilarity_matrix[node][i]
if len(element_list) > 1:
avg = sum_dissm / (len(element_list) - 1)
else:
avg = 0
return avg
def avg_dissim_across_group_element(node, main_list, splinter_list):
if len(splinter_list) == 0:
return 0
sum_dissm = 0
for j in splinter_list:
sum_dissm = sum_dissm + dissimilarity_matrix[node][j]
avg = sum_dissm / len(splinter_list)
return avg
def splinter(main_list, splinter_group):
most_dissm_object_value = -np.inf
most_dissm_object_index = None
for node in main_list:
x = avg_dissim_within_group_element(node, main_list)
y = avg_dissim_across_group_element(node, main_list, splinter_group)
diff = x - y
if diff > most_dissm_object_value:
most_dissm_object_value = diff
most_dissm_object_index = node
if most_dissm_object_value > 0:
return most_dissm_object_index, 1
else:
return -1, -1
def split(element_list):
main_list = element_list
splinter_group = []
most_dissm_object_index, flag = splinter(main_list, splinter_group)
while flag > 0:
main_list.remove(most_dissm_object_index)
splinter_group.append(most_dissm_object_index)
most_dissm_object_index, flag = splinter(element_list, splinter_group)
return main_list, splinter_group
def max_distance(cluster_list):
max_diameter_cluster_index = None
max_diameter_cluster_value = -np.inf
index = 0
for element_list in cluster_list:
for i in element_list:
for j in element_list:
if dissimilarity_matrix[i][j] > max_diameter_cluster_value:
max_diameter_cluster_value = dissimilarity_matrix[i][j]
max_diameter_cluster_index = index
index += 1
if max_diameter_cluster_value <= 0:
return -1
return max_diameter_cluster_index
if __name__ == '__main__':
argv = sys.argv
num_clusters = sys.argv[-1]
current_clusters = [all_elements]
print(current_clusters)
level = 1
index = 0
with tqdm(total=100) as pbar:
while index != -1 and level != num_clusters:
a_clstr, b_clstr = split(current_clusters[index])
del current_clusters[index]
current_clusters.append(a_clstr)
current_clusters.append(b_clstr)
index = max_distance(current_clusters)
level += 1
pbar.update(10)
for i in range(num_clusters):
pd.DataFrame(current_clusters[i], columns=['id']).to_csv(
'%s_cluster_%d.txt' % (sys.argv[1], i), sep='\t')
| #library
import pandas as pd
import numpy as np
import sys
from tqdm import tqdm # appear the precess of running situation.
import time
from scipy.spatial.distance import pdist, squareform
#0. Data Load
data = pd.read_csv(sys.argv[1], delimiter='\t') # Load train (input text file)
#1. Data Preprocessing
all_elements = [index for index in data.index] # Save index name.
#Make a distance metrix to compute dissimilarity.
distance_matrix = pdist(data, metric='euclidean')
dissimilarity_matrix = np.array(squareform(distance_matrix))
#dissimilarity_matrix = pd.DataFrame(squareform(distance_matrix), columns=all_elements, index=all_elements)
print(dissimilarity_matrix)
#2. Modeling : DIANA Clustering
#2-1. Compute dissimilarity average in ONE Cluster.
def avg_dissim_within_group_element(node, element_list):
max_diameter = -np.inf
sum_dissm = 0 #Set Sum equal zero.
for i in element_list:
sum_dissm += dissimilarity_matrix[node][i] #While iterate element_list, Sum the distance matrix value singly in a node.
if( dissimilarity_matrix[node][i] > max_diameter): #If distance matrix is bigger than max_distance,
max_diameter = dissimilarity_matrix[node][i] # that distance matrix value become a max_diameter.
if(len(element_list)>1):
avg = sum_dissm/(len(element_list)-1) # Average of distance matrix.
else:
avg = 0
return avg
# 2-2. Compute dissimilarity average between different Group(e.g. Cluster1 and Cluster2)
# id in sperated new group = splinter_list
def avg_dissim_across_group_element(node, main_list, splinter_list):
if len(splinter_list) == 0: #there is no spliter group, return zero.
return 0
sum_dissm = 0
for j in splinter_list:
sum_dissm = sum_dissm + dissimilarity_matrix[node][j] #Compute average between Object in splinter group
avg = sum_dissm/(len(splinter_list)) #and all object dissimilarity matrix.
return avg
# 2-3. Cluster Splinter
def splinter(main_list, splinter_group):
most_dissm_object_value = -np.inf #initate minus.
most_dissm_object_index = None
for node in main_list:
x = avg_dissim_within_group_element(node, main_list) # Previously, a point in main group as a standard.
y = avg_dissim_across_group_element(node, main_list, splinter_group) # a point in the seperated group.
diff = x - y # difference between X and Y
if diff > most_dissm_object_value:
most_dissm_object_value = diff
most_dissm_object_index = node # save index and value which has largest value between two groups.
if(most_dissm_object_value>0): # differnce is Plus, Create new splinter group. flag = 1
return (most_dissm_object_index, 1)
else: # difference is minus, flag = -1
return (-1, -1)
# 2-4. Split
def split(element_list):
main_list = element_list
splinter_group = []
(most_dissm_object_index, flag) = splinter(main_list, splinter_group)
while(flag > 0): # Iterate splinter function until a flag become minus.
main_list.remove(most_dissm_object_index) #Delete the most largest dissimilarity average object index in the main list.
splinter_group.append(most_dissm_object_index) # Then, append in the new splinter group.
(most_dissm_object_index, flag) = splinter(element_list, splinter_group)
return (main_list, splinter_group)
# 2-5. look for maximum distance in the current cluster.
def max_distance(cluster_list):
max_diameter_cluster_index = None
max_diameter_cluster_value = -np.inf
index = 0
for element_list in cluster_list:
for i in element_list: #columns
for j in element_list: #rows
#Switch the largest dissimilarity average object(index), value.
if dissimilarity_matrix[i][j] > max_diameter_cluster_value:
max_diameter_cluster_value = dissimilarity_matrix[i][j]
max_diameter_cluster_index = index
index +=1
if(max_diameter_cluster_value <= 0):
return -1
return max_diameter_cluster_index
# main
if __name__ == '__main__':
# Save arguments list
argv = sys.argv
# Set the number of cluster.
num_clusters = sys.argv[-1]
current_clusters = ([all_elements])
print(current_clusters)
level = 1
index = 0
with tqdm(total=100) as pbar:
while((index!=-1) and (level!=num_clusters)): #Proceed until the index equal -1 and setting number of cluster.
(a_clstr, b_clstr) = split(current_clusters[index])
del current_clusters[index] # Delete current cluster.
current_clusters.append(a_clstr) #original cluster
current_clusters.append(b_clstr) #splinter cluster
index = max_distance(current_clusters)
level +=1
pbar.update(10)
for i in range(num_clusters): # Save the results.
pd.DataFrame(current_clusters[i], columns=['id']).to_csv("%s_cluster_%d.txt" %(sys.argv[1], i), sep='\t')
| [
4,
5,
6,
8,
9
] |
1,362 | 61f2fbed184ff6f842ba9527456da453844f8dc6 | # DATA TYPES (DATA TİPLERİ)
# STRİNGS (KARAKTER DİZİNLERİ)
# Bir karakter dizinini tanımlamak için tırnaklar kullanılır. birkaç satır ka-
# rakter dizini yazıyorsak 3 tırnak kullanılır:
print("""Üç tırnaklı
karakter
dizinine
örnek""")
üç tırnaklı
karakter
dizinine
örnek
print('Tek tırnak: Tek satırlık stringlerde uygulanır.')
Tek tırnak: Tek satırlık stringlerde uygulanır.
print("Çift Tırnak: Yine tek satırlık cümlelerde kullanılır.")
Çift Tırnak: Yine tek satırlık cümlelerde kullanılır.
# Farklı tırnakların olmasının nedeni, tek tırnakla ayrılan özel isimlerin ayrım
# işaretinin çıktıyı string olarak kabul etmesini önlemek:
print("Türkiye'nin başkenti Ankara'dır.")
Türkiye'nin başkenti Ankara'dır.
# Yukarıdaki gibi bir çıktı almak için çift tırnak ("") kullandım. Çünkü tek
# tırnak kullansam şöyle hatalı bir print kodu oluşurdu:
print('Türkiye'nin başkenti Ankara'dır.') # Python, dğerlerin hatalı olduğunu
# ifade eden renklendirme yapardı.
# Çift tırnakla başlayıp ayraçları tek tırnak yaparsak Python, çift tırnakla
# başladığından dolayı aradaki tek tırnakları görmez ve onu da string içeriği
# olarak kabul eder. Aynı şekilde üç tırnakla başlasaydım bu sefer de çift tır-
# nakları görmeyip onları da string içeriği olarak kabul ederdi.
| null | null | null | null | [
0
] |
1,363 | 2b87b8571664989e78790bd9df23eee9cbd44035 | <mask token>
| <mask token>
@itchat.msg_register(itchat.content.TEXT)
def text_reply(msg):
return msg.text
<mask token>
| <mask token>
@itchat.msg_register(itchat.content.TEXT)
def text_reply(msg):
return msg.text
itchat.auto_login()
itchat.run()
| import itchat
@itchat.msg_register(itchat.content.TEXT)
def text_reply(msg):
return msg.text
itchat.auto_login()
itchat.run()
| # @Time : 2019/12/12 15:54
# @Author : Libuda
# @FileName: 远程服务器文件监控.py
# @Software: PyCharm
import itchat
@itchat.msg_register(itchat.content.TEXT)
def text_reply(msg):
return msg.text
itchat.auto_login()
itchat.run()
| [
0,
1,
2,
3,
4
] |
1,364 | 9c3f6c368c764918da5cce44da574b7c041fa414 | class Node:
<mask token>
def __init__(self, k: int=None, loc: tuple=None, **kwargs):
"""
Each node contain dew fields:
key: node_id.
location: node's position represent as 3DPoint.
ni_out: a dictionary that holds all the "edges" that connected from this node,
each edge is represented using a pair (key, edge weight).
ni_in: a dictionary that holds all the "edges" that connected to this node,
each edge is represented using a pair (key, edge weight)
"""
self.__key = k
self.__location = loc
self.__ni_out = {}
self.__ni_in = {}
def add_neighbor_out(self, neighbor_id: int, weight: float) ->None:
"""
Add "edge" that connected from this node (node_id ---> neighbor_id).
:param neighbor_id: dest node key
:param weight: edge's weight
"""
self.__ni_out[neighbor_id] = weight
def add_neighbor_in(self, neighbor_id: int, weight: float) ->None:
"""
Add "edge" that connected to this node (neighbor_id ---> node_id).
:param neighbor_id: dest node key
:param weight: edge's weight
"""
self.__ni_in[neighbor_id] = weight
def get_connections_out(self) ->dict:
"""
Return a dictionary that holds all the "edges" that connected from this node,
each edge is represented using a pair (key, edge weight).
:return: dictionary (key, edge weight).
"""
return self.__ni_out
def get_connections_in(self) ->dict:
"""
Return a dictionary that holds all the "edges" that connected to this node,
each edge is represented using a pair (key, edge weight).
:return: dictionary (key, edge weight).
"""
return self.__ni_in
def get_key(self) ->int:
"""
Return this node key.
:return: key
"""
return self.__key
<mask token>
def set_location(self, location: tuple) ->None:
"""
Allows to add location to this node.
This method used for load and plot graphs that their nodes have no position.
:param location: the new position of this node
"""
self.__location = location
def as_dict_node(self):
"""
Return the node as dictionary {"pos": "x", "y", "z", "id": key}
:return: the node as dictionary
"""
loc_as_str = str(self.get_location())
m_dict = {'pos': loc_as_str[1:-1], 'id': self.get_key()}
return m_dict
def as_dict_edge(self):
"""
Return the edge as dictionary {"src": src node_id, "w": edge weight, "dest": dest node_id}
:return: the edge as dictionary
"""
l_list = []
for k, v in self.get_connections_out().items():
m_dict = {'src': int(self.get_key()), 'w': float(v), 'dest': int(k)
}
l_list.append(m_dict)
return l_list
<mask token>
def __str__(self) ->str:
return 'Node: id: ' + str(self.__key) + ' neighbors: ' + str(self.
__ni_out)
def __eq__(self, o: object) ->bool:
if self is o:
return True
if o is None or self.__class__ is not o.__class__:
return False
other = o
return self.__key == other.__key and self.__location.__eq__(other.
__location) and self.__ni_in.__eq__(other.__ni_in
) and self.__ni_out.__eq__(other.__ni_out)
| class Node:
<mask token>
def __init__(self, k: int=None, loc: tuple=None, **kwargs):
"""
Each node contain dew fields:
key: node_id.
location: node's position represent as 3DPoint.
ni_out: a dictionary that holds all the "edges" that connected from this node,
each edge is represented using a pair (key, edge weight).
ni_in: a dictionary that holds all the "edges" that connected to this node,
each edge is represented using a pair (key, edge weight)
"""
self.__key = k
self.__location = loc
self.__ni_out = {}
self.__ni_in = {}
def add_neighbor_out(self, neighbor_id: int, weight: float) ->None:
"""
Add "edge" that connected from this node (node_id ---> neighbor_id).
:param neighbor_id: dest node key
:param weight: edge's weight
"""
self.__ni_out[neighbor_id] = weight
def add_neighbor_in(self, neighbor_id: int, weight: float) ->None:
"""
Add "edge" that connected to this node (neighbor_id ---> node_id).
:param neighbor_id: dest node key
:param weight: edge's weight
"""
self.__ni_in[neighbor_id] = weight
def get_connections_out(self) ->dict:
"""
Return a dictionary that holds all the "edges" that connected from this node,
each edge is represented using a pair (key, edge weight).
:return: dictionary (key, edge weight).
"""
return self.__ni_out
def get_connections_in(self) ->dict:
"""
Return a dictionary that holds all the "edges" that connected to this node,
each edge is represented using a pair (key, edge weight).
:return: dictionary (key, edge weight).
"""
return self.__ni_in
def get_key(self) ->int:
"""
Return this node key.
:return: key
"""
return self.__key
<mask token>
def set_location(self, location: tuple) ->None:
"""
Allows to add location to this node.
This method used for load and plot graphs that their nodes have no position.
:param location: the new position of this node
"""
self.__location = location
def as_dict_node(self):
"""
Return the node as dictionary {"pos": "x", "y", "z", "id": key}
:return: the node as dictionary
"""
loc_as_str = str(self.get_location())
m_dict = {'pos': loc_as_str[1:-1], 'id': self.get_key()}
return m_dict
def as_dict_edge(self):
"""
Return the edge as dictionary {"src": src node_id, "w": edge weight, "dest": dest node_id}
:return: the edge as dictionary
"""
l_list = []
for k, v in self.get_connections_out().items():
m_dict = {'src': int(self.get_key()), 'w': float(v), 'dest': int(k)
}
l_list.append(m_dict)
return l_list
def __repr__(self):
return str([self.get_key()])
def __str__(self) ->str:
return 'Node: id: ' + str(self.__key) + ' neighbors: ' + str(self.
__ni_out)
def __eq__(self, o: object) ->bool:
if self is o:
return True
if o is None or self.__class__ is not o.__class__:
return False
other = o
return self.__key == other.__key and self.__location.__eq__(other.
__location) and self.__ni_in.__eq__(other.__ni_in
) and self.__ni_out.__eq__(other.__ni_out)
| class Node:
<mask token>
def __init__(self, k: int=None, loc: tuple=None, **kwargs):
"""
Each node contain dew fields:
key: node_id.
location: node's position represent as 3DPoint.
ni_out: a dictionary that holds all the "edges" that connected from this node,
each edge is represented using a pair (key, edge weight).
ni_in: a dictionary that holds all the "edges" that connected to this node,
each edge is represented using a pair (key, edge weight)
"""
self.__key = k
self.__location = loc
self.__ni_out = {}
self.__ni_in = {}
def add_neighbor_out(self, neighbor_id: int, weight: float) ->None:
"""
Add "edge" that connected from this node (node_id ---> neighbor_id).
:param neighbor_id: dest node key
:param weight: edge's weight
"""
self.__ni_out[neighbor_id] = weight
def add_neighbor_in(self, neighbor_id: int, weight: float) ->None:
"""
Add "edge" that connected to this node (neighbor_id ---> node_id).
:param neighbor_id: dest node key
:param weight: edge's weight
"""
self.__ni_in[neighbor_id] = weight
def get_connections_out(self) ->dict:
"""
Return a dictionary that holds all the "edges" that connected from this node,
each edge is represented using a pair (key, edge weight).
:return: dictionary (key, edge weight).
"""
return self.__ni_out
def get_connections_in(self) ->dict:
"""
Return a dictionary that holds all the "edges" that connected to this node,
each edge is represented using a pair (key, edge weight).
:return: dictionary (key, edge weight).
"""
return self.__ni_in
def get_key(self) ->int:
"""
Return this node key.
:return: key
"""
return self.__key
def get_location(self) ->tuple:
"""
Return this node location as a 3DPoint (x, y, z).
:return: this node location
"""
return self.__location
def set_location(self, location: tuple) ->None:
"""
Allows to add location to this node.
This method used for load and plot graphs that their nodes have no position.
:param location: the new position of this node
"""
self.__location = location
def as_dict_node(self):
"""
Return the node as dictionary {"pos": "x", "y", "z", "id": key}
:return: the node as dictionary
"""
loc_as_str = str(self.get_location())
m_dict = {'pos': loc_as_str[1:-1], 'id': self.get_key()}
return m_dict
def as_dict_edge(self):
"""
Return the edge as dictionary {"src": src node_id, "w": edge weight, "dest": dest node_id}
:return: the edge as dictionary
"""
l_list = []
for k, v in self.get_connections_out().items():
m_dict = {'src': int(self.get_key()), 'w': float(v), 'dest': int(k)
}
l_list.append(m_dict)
return l_list
def __repr__(self):
return str([self.get_key()])
def __str__(self) ->str:
return 'Node: id: ' + str(self.__key) + ' neighbors: ' + str(self.
__ni_out)
def __eq__(self, o: object) ->bool:
if self is o:
return True
if o is None or self.__class__ is not o.__class__:
return False
other = o
return self.__key == other.__key and self.__location.__eq__(other.
__location) and self.__ni_in.__eq__(other.__ni_in
) and self.__ni_out.__eq__(other.__ni_out)
| class Node:
"""
This class represent a node (vertex).
"""
def __init__(self, k: int=None, loc: tuple=None, **kwargs):
"""
Each node contain dew fields:
key: node_id.
location: node's position represent as 3DPoint.
ni_out: a dictionary that holds all the "edges" that connected from this node,
each edge is represented using a pair (key, edge weight).
ni_in: a dictionary that holds all the "edges" that connected to this node,
each edge is represented using a pair (key, edge weight)
"""
self.__key = k
self.__location = loc
self.__ni_out = {}
self.__ni_in = {}
def add_neighbor_out(self, neighbor_id: int, weight: float) ->None:
"""
Add "edge" that connected from this node (node_id ---> neighbor_id).
:param neighbor_id: dest node key
:param weight: edge's weight
"""
self.__ni_out[neighbor_id] = weight
def add_neighbor_in(self, neighbor_id: int, weight: float) ->None:
"""
Add "edge" that connected to this node (neighbor_id ---> node_id).
:param neighbor_id: dest node key
:param weight: edge's weight
"""
self.__ni_in[neighbor_id] = weight
def get_connections_out(self) ->dict:
"""
Return a dictionary that holds all the "edges" that connected from this node,
each edge is represented using a pair (key, edge weight).
:return: dictionary (key, edge weight).
"""
return self.__ni_out
def get_connections_in(self) ->dict:
"""
Return a dictionary that holds all the "edges" that connected to this node,
each edge is represented using a pair (key, edge weight).
:return: dictionary (key, edge weight).
"""
return self.__ni_in
def get_key(self) ->int:
"""
Return this node key.
:return: key
"""
return self.__key
def get_location(self) ->tuple:
"""
Return this node location as a 3DPoint (x, y, z).
:return: this node location
"""
return self.__location
def set_location(self, location: tuple) ->None:
"""
Allows to add location to this node.
This method used for load and plot graphs that their nodes have no position.
:param location: the new position of this node
"""
self.__location = location
def as_dict_node(self):
"""
Return the node as dictionary {"pos": "x", "y", "z", "id": key}
:return: the node as dictionary
"""
loc_as_str = str(self.get_location())
m_dict = {'pos': loc_as_str[1:-1], 'id': self.get_key()}
return m_dict
def as_dict_edge(self):
"""
Return the edge as dictionary {"src": src node_id, "w": edge weight, "dest": dest node_id}
:return: the edge as dictionary
"""
l_list = []
for k, v in self.get_connections_out().items():
m_dict = {'src': int(self.get_key()), 'w': float(v), 'dest': int(k)
}
l_list.append(m_dict)
return l_list
def __repr__(self):
return str([self.get_key()])
def __str__(self) ->str:
return 'Node: id: ' + str(self.__key) + ' neighbors: ' + str(self.
__ni_out)
def __eq__(self, o: object) ->bool:
if self is o:
return True
if o is None or self.__class__ is not o.__class__:
return False
other = o
return self.__key == other.__key and self.__location.__eq__(other.
__location) and self.__ni_in.__eq__(other.__ni_in
) and self.__ni_out.__eq__(other.__ni_out)
| class Node:
"""
This class represent a node (vertex).
"""
def __init__(self, k: int = None, loc: tuple = None, **kwargs):
"""
Each node contain dew fields:
key: node_id.
location: node's position represent as 3DPoint.
ni_out: a dictionary that holds all the "edges" that connected from this node,
each edge is represented using a pair (key, edge weight).
ni_in: a dictionary that holds all the "edges" that connected to this node,
each edge is represented using a pair (key, edge weight)
"""
self.__key = k
self.__location = loc
self.__ni_out = {}
self.__ni_in = {}
def add_neighbor_out(self, neighbor_id: int, weight: float) -> None:
"""
Add "edge" that connected from this node (node_id ---> neighbor_id).
:param neighbor_id: dest node key
:param weight: edge's weight
"""
self.__ni_out[neighbor_id] = weight
def add_neighbor_in(self, neighbor_id: int, weight: float) -> None:
"""
Add "edge" that connected to this node (neighbor_id ---> node_id).
:param neighbor_id: dest node key
:param weight: edge's weight
"""
self.__ni_in[neighbor_id] = weight
def get_connections_out(self) -> dict:
"""
Return a dictionary that holds all the "edges" that connected from this node,
each edge is represented using a pair (key, edge weight).
:return: dictionary (key, edge weight).
"""
return self.__ni_out
def get_connections_in(self) -> dict:
"""
Return a dictionary that holds all the "edges" that connected to this node,
each edge is represented using a pair (key, edge weight).
:return: dictionary (key, edge weight).
"""
return self.__ni_in
def get_key(self) -> int:
"""
Return this node key.
:return: key
"""
return self.__key
def get_location(self) -> tuple:
"""
Return this node location as a 3DPoint (x, y, z).
:return: this node location
"""
return self.__location
def set_location(self, location: tuple) -> None:
"""
Allows to add location to this node.
This method used for load and plot graphs that their nodes have no position.
:param location: the new position of this node
"""
self.__location = location
def as_dict_node(self):
"""
Return the node as dictionary {"pos": "x", "y", "z", "id": key}
:return: the node as dictionary
"""
loc_as_str = str(self.get_location())
m_dict = {"pos": loc_as_str[1:-1], "id": self.get_key()}
return m_dict
def as_dict_edge(self):
"""
Return the edge as dictionary {"src": src node_id, "w": edge weight, "dest": dest node_id}
:return: the edge as dictionary
"""
l_list = []
for k, v in self.get_connections_out().items():
m_dict = {"src": int(self.get_key()), "w": float(v), "dest": int(k)}
l_list.append(m_dict)
return l_list
def __repr__(self):
return str([self.get_key()])
def __str__(self) -> str:
return "Node: id: " + str(self.__key) + ' neighbors: ' + str(self.__ni_out)
def __eq__(self, o: object) -> bool:
if self is o:
return True
if o is None or self.__class__ is not o.__class__:
return False
other = o
return self.__key == other.__key and self.__location.__eq__(other.__location) and self.__ni_in.__eq__(
other.__ni_in) and self.__ni_out.__eq__(other.__ni_out) | [
12,
13,
14,
15,
16
] |
1,365 | 21d261dec6668a24030f37b7dcb87c0132e63528 | <mask token>
class EditUserProfileView(LoginRequiredMixin, UpdateView):
model = Profile
form_class = UserProfileForm
template_name = 'profile.html'
| <mask token>
@login_required
def home(request):
return render(request, 'home.html')
<mask token>
class EditUserProfileView(LoginRequiredMixin, UpdateView):
model = Profile
form_class = UserProfileForm
template_name = 'profile.html'
| <mask token>
@login_required
def home(request):
return render(request, 'home.html')
def signup(request):
if request.method == 'POST':
form = SignUpForm(request.POST)
if form.is_valid():
user = form.save()
user.refresh_from_db()
user.profile.birth_date = form.cleaned_data.get('birth_date')
user.save()
raw_password = form.cleaned_data.get('password1')
return redirect('login')
else:
form = SignUpForm()
return render(request, 'signup.html', {'form': form})
class EditUserProfileView(LoginRequiredMixin, UpdateView):
model = Profile
form_class = UserProfileForm
template_name = 'profile.html'
| from django.contrib.auth.decorators import login_required
from django.contrib.auth import login, authenticate
from django.shortcuts import render, redirect
from mysite.core.forms import SignUpForm, UserProfileForm
from django.views.generic import UpdateView
from .models import Profile
from django.contrib.auth.mixins import LoginRequiredMixin
@login_required
def home(request):
return render(request, 'home.html')
def signup(request):
if request.method == 'POST':
form = SignUpForm(request.POST)
if form.is_valid():
user = form.save()
user.refresh_from_db()
user.profile.birth_date = form.cleaned_data.get('birth_date')
user.save()
raw_password = form.cleaned_data.get('password1')
return redirect('login')
else:
form = SignUpForm()
return render(request, 'signup.html', {'form': form})
class EditUserProfileView(LoginRequiredMixin, UpdateView):
model = Profile
form_class = UserProfileForm
template_name = 'profile.html'
| from django.contrib.auth.decorators import login_required
from django.contrib.auth import login, authenticate
from django.shortcuts import render, redirect
from mysite.core.forms import SignUpForm,UserProfileForm
from django.views.generic import UpdateView
from .models import Profile
from django.contrib.auth.mixins import LoginRequiredMixin
@login_required
def home(request):
return render(request, 'home.html')
def signup(request):
if request.method == 'POST':
form = SignUpForm(request.POST)
if form.is_valid():
user = form.save()
user.refresh_from_db() # load the profile instance created by the signal
user.profile.birth_date = form.cleaned_data.get('birth_date')
user.save()
raw_password = form.cleaned_data.get('password1')
# user = authenticate(username=user.username, password=raw_password)
# login(request, user)
return redirect('login')
else:
form = SignUpForm()
return render(request, 'signup.html', {'form': form})
class EditUserProfileView(LoginRequiredMixin,UpdateView):
model = Profile
form_class = UserProfileForm
template_name = "profile.html"
| [
2,
3,
4,
5,
6
] |
1,366 | c8565e1b5659dd0908aabf91e07738a798dc3232 | <mask token>
| <mask token>
regressor.fit(X, y)
<mask token>
plt.scatter(X, y, color='red')
plt.plot(X_grid, regressor.predict(X_grid), color='blue')
plt.scatter(6.5, y_pred, color='green')
plt.title('Salary vs Title')
plt.xlabel('Title')
plt.ylabel('Salary')
plt.show()
| <mask token>
dataset = pd.read_csv('Position_Salaries.csv')
X = dataset.iloc[:, 1:-1].values
y = dataset.iloc[:, dataset.shape[1] - 1].values
<mask token>
regressor = DecisionTreeRegressor(random_state=0)
regressor.fit(X, y)
y_pred = regressor.predict(np.reshape([6.5], (-1, 1)))
X_grid = np.arange(min(X), max(X), 0.1)
X_grid = X_grid.reshape((len(X_grid), 1))
plt.scatter(X, y, color='red')
plt.plot(X_grid, regressor.predict(X_grid), color='blue')
plt.scatter(6.5, y_pred, color='green')
plt.title('Salary vs Title')
plt.xlabel('Title')
plt.ylabel('Salary')
plt.show()
| import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
dataset = pd.read_csv('Position_Salaries.csv')
X = dataset.iloc[:, 1:-1].values
y = dataset.iloc[:, dataset.shape[1] - 1].values
from sklearn.tree import DecisionTreeRegressor
regressor = DecisionTreeRegressor(random_state=0)
regressor.fit(X, y)
y_pred = regressor.predict(np.reshape([6.5], (-1, 1)))
X_grid = np.arange(min(X), max(X), 0.1)
X_grid = X_grid.reshape((len(X_grid), 1))
plt.scatter(X, y, color='red')
plt.plot(X_grid, regressor.predict(X_grid), color='blue')
plt.scatter(6.5, y_pred, color='green')
plt.title('Salary vs Title')
plt.xlabel('Title')
plt.ylabel('Salary')
plt.show()
| import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
dataset = pd.read_csv('Position_Salaries.csv')
X = dataset.iloc[:, 1:-1].values
y = dataset.iloc[:, dataset.shape[1]-1].values
#Fitting the Decision Tree Regression
from sklearn.tree import DecisionTreeRegressor
regressor = DecisionTreeRegressor(random_state = 0)
regressor.fit(X, y)
#Predicting a new result
y_pred = regressor.predict(np.reshape([6.5], (-1, 1)))
#Visualizing the results
X_grid = np.arange(min(X), max(X), 0.1)
X_grid = X_grid.reshape((len(X_grid), 1))
plt.scatter(X, y, color = 'red')
plt.plot(X_grid, regressor.predict(X_grid), color = 'blue')
plt.scatter(6.5, y_pred, color = 'green')
plt.title('Salary vs Title')
plt.xlabel('Title')
plt.ylabel('Salary')
plt.show() | [
0,
1,
2,
3,
4
] |
1,367 | 53cd9d5a79e97bb1af69446a82c747248c3cc298 | <mask token>
def _get_headers(environ):
"""
Returns only proper HTTP headers.
"""
for key, value in iteritems(environ):
key = str(key)
if key.startswith('HTTP_') and key not in ('HTTP_CONTENT_TYPE',
'HTTP_CONTENT_LENGTH'):
yield key[5:].replace('_', '-').title(), value
elif key in ('CONTENT_TYPE', 'CONTENT_LENGTH'):
yield key.replace('_', '-').title(), value
<mask token>
| <mask token>
def _get_headers(environ):
"""
Returns only proper HTTP headers.
"""
for key, value in iteritems(environ):
key = str(key)
if key.startswith('HTTP_') and key not in ('HTTP_CONTENT_TYPE',
'HTTP_CONTENT_LENGTH'):
yield key[5:].replace('_', '-').title(), value
elif key in ('CONTENT_TYPE', 'CONTENT_LENGTH'):
yield key.replace('_', '-').title(), value
def get_host(environ, use_x_forwarded_for=False):
"""
Return the host for the given WSGI environment.
"""
if use_x_forwarded_for and 'HTTP_X_FORWARDED_HOST' in environ:
rv = environ['HTTP_X_FORWARDED_HOST']
if environ['wsgi.url_scheme'] == 'http' and rv.endswith(':80'):
rv = rv[:-3]
elif environ['wsgi.url_scheme'] == 'https' and rv.endswith(':443'):
rv = rv[:-4]
elif environ.get('HTTP_HOST'):
rv = environ['HTTP_HOST']
if environ['wsgi.url_scheme'] == 'http' and rv.endswith(':80'):
rv = rv[:-3]
elif environ['wsgi.url_scheme'] == 'https' and rv.endswith(':443'):
rv = rv[:-4]
elif environ.get('SERVER_NAME'):
rv = environ['SERVER_NAME']
if (environ['wsgi.url_scheme'], environ['SERVER_PORT']) not in ((
'https', '443'), ('http', '80')):
rv += ':' + environ['SERVER_PORT']
else:
rv = 'unknown'
return rv
| <mask token>
if TYPE_CHECKING:
from typing import Dict
from typing import Iterator
from typing import Tuple
def _get_headers(environ):
"""
Returns only proper HTTP headers.
"""
for key, value in iteritems(environ):
key = str(key)
if key.startswith('HTTP_') and key not in ('HTTP_CONTENT_TYPE',
'HTTP_CONTENT_LENGTH'):
yield key[5:].replace('_', '-').title(), value
elif key in ('CONTENT_TYPE', 'CONTENT_LENGTH'):
yield key.replace('_', '-').title(), value
def get_host(environ, use_x_forwarded_for=False):
"""
Return the host for the given WSGI environment.
"""
if use_x_forwarded_for and 'HTTP_X_FORWARDED_HOST' in environ:
rv = environ['HTTP_X_FORWARDED_HOST']
if environ['wsgi.url_scheme'] == 'http' and rv.endswith(':80'):
rv = rv[:-3]
elif environ['wsgi.url_scheme'] == 'https' and rv.endswith(':443'):
rv = rv[:-4]
elif environ.get('HTTP_HOST'):
rv = environ['HTTP_HOST']
if environ['wsgi.url_scheme'] == 'http' and rv.endswith(':80'):
rv = rv[:-3]
elif environ['wsgi.url_scheme'] == 'https' and rv.endswith(':443'):
rv = rv[:-4]
elif environ.get('SERVER_NAME'):
rv = environ['SERVER_NAME']
if (environ['wsgi.url_scheme'], environ['SERVER_PORT']) not in ((
'https', '443'), ('http', '80')):
rv += ':' + environ['SERVER_PORT']
else:
rv = 'unknown'
return rv
| <mask token>
from sentry_sdk._compat import iteritems
from sentry_sdk._types import TYPE_CHECKING
if TYPE_CHECKING:
from typing import Dict
from typing import Iterator
from typing import Tuple
def _get_headers(environ):
"""
Returns only proper HTTP headers.
"""
for key, value in iteritems(environ):
key = str(key)
if key.startswith('HTTP_') and key not in ('HTTP_CONTENT_TYPE',
'HTTP_CONTENT_LENGTH'):
yield key[5:].replace('_', '-').title(), value
elif key in ('CONTENT_TYPE', 'CONTENT_LENGTH'):
yield key.replace('_', '-').title(), value
def get_host(environ, use_x_forwarded_for=False):
"""
Return the host for the given WSGI environment.
"""
if use_x_forwarded_for and 'HTTP_X_FORWARDED_HOST' in environ:
rv = environ['HTTP_X_FORWARDED_HOST']
if environ['wsgi.url_scheme'] == 'http' and rv.endswith(':80'):
rv = rv[:-3]
elif environ['wsgi.url_scheme'] == 'https' and rv.endswith(':443'):
rv = rv[:-4]
elif environ.get('HTTP_HOST'):
rv = environ['HTTP_HOST']
if environ['wsgi.url_scheme'] == 'http' and rv.endswith(':80'):
rv = rv[:-3]
elif environ['wsgi.url_scheme'] == 'https' and rv.endswith(':443'):
rv = rv[:-4]
elif environ.get('SERVER_NAME'):
rv = environ['SERVER_NAME']
if (environ['wsgi.url_scheme'], environ['SERVER_PORT']) not in ((
'https', '443'), ('http', '80')):
rv += ':' + environ['SERVER_PORT']
else:
rv = 'unknown'
return rv
| """
Copyright (c) 2007 by the Pallets team.
Some rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE AND DOCUMENTATION IS PROVIDED BY THE COPYRIGHT HOLDERS AND
CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING,
BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
THIS SOFTWARE AND DOCUMENTATION, EVEN IF ADVISED OF THE POSSIBILITY OF
SUCH DAMAGE.
"""
from sentry_sdk._compat import iteritems
from sentry_sdk._types import TYPE_CHECKING
if TYPE_CHECKING:
from typing import Dict
from typing import Iterator
from typing import Tuple
#
# `get_headers` comes from `werkzeug.datastructures.EnvironHeaders`
# https://github.com/pallets/werkzeug/blob/0.14.1/werkzeug/datastructures.py#L1361
#
# We need this function because Django does not give us a "pure" http header
# dict. So we might as well use it for all WSGI integrations.
#
def _get_headers(environ):
# type: (Dict[str, str]) -> Iterator[Tuple[str, str]]
"""
Returns only proper HTTP headers.
"""
for key, value in iteritems(environ):
key = str(key)
if key.startswith("HTTP_") and key not in (
"HTTP_CONTENT_TYPE",
"HTTP_CONTENT_LENGTH",
):
yield key[5:].replace("_", "-").title(), value
elif key in ("CONTENT_TYPE", "CONTENT_LENGTH"):
yield key.replace("_", "-").title(), value
#
# `get_host` comes from `werkzeug.wsgi.get_host`
# https://github.com/pallets/werkzeug/blob/1.0.1/src/werkzeug/wsgi.py#L145
#
def get_host(environ, use_x_forwarded_for=False):
# type: (Dict[str, str], bool) -> str
"""
Return the host for the given WSGI environment.
"""
if use_x_forwarded_for and "HTTP_X_FORWARDED_HOST" in environ:
rv = environ["HTTP_X_FORWARDED_HOST"]
if environ["wsgi.url_scheme"] == "http" and rv.endswith(":80"):
rv = rv[:-3]
elif environ["wsgi.url_scheme"] == "https" and rv.endswith(":443"):
rv = rv[:-4]
elif environ.get("HTTP_HOST"):
rv = environ["HTTP_HOST"]
if environ["wsgi.url_scheme"] == "http" and rv.endswith(":80"):
rv = rv[:-3]
elif environ["wsgi.url_scheme"] == "https" and rv.endswith(":443"):
rv = rv[:-4]
elif environ.get("SERVER_NAME"):
rv = environ["SERVER_NAME"]
if (environ["wsgi.url_scheme"], environ["SERVER_PORT"]) not in (
("https", "443"),
("http", "80"),
):
rv += ":" + environ["SERVER_PORT"]
else:
# In spite of the WSGI spec, SERVER_NAME might not be present.
rv = "unknown"
return rv
| [
1,
2,
3,
4,
5
] |
1,368 | ee5e970f32b1d601f9dc3ab37a5028ce7ff8a32e | <mask token>
| <mask token>
print(message)
| message = 'Hello Python World '
print(message)
| # message 为定义的变量
message = 'Hello Python World '
print(message) | null | [
0,
1,
2,
3
] |
1,369 | 8d1067a9bb0629276ef27de91f63cf2370a44e24 | <mask token>
class Protocol:
<mask token>
<mask token>
<mask token>
@abstractmethod
def execute(self, command):
""""execute command method"""
class LocalProtocol(Protocol):
"""simple protocol for using bots within app"""
def __init__(self, command_executor):
self._command_executor = command_executor
def execute(self, command):
if not self._command_executor.has_executor(command.name):
return Protocol.FAIL
try:
result = self._command_executor.execute(command)
except:
result = Protocol.FAIL
return result
<mask token>
| <mask token>
class Protocol:
<mask token>
__metaclass__ = ABCMeta
FAIL = 'Failed'
@abstractmethod
def execute(self, command):
""""execute command method"""
class LocalProtocol(Protocol):
"""simple protocol for using bots within app"""
def __init__(self, command_executor):
self._command_executor = command_executor
def execute(self, command):
if not self._command_executor.has_executor(command.name):
return Protocol.FAIL
try:
result = self._command_executor.execute(command)
except:
result = Protocol.FAIL
return result
<mask token>
| <mask token>
class Protocol:
"""base protocol class"""
__metaclass__ = ABCMeta
FAIL = 'Failed'
@abstractmethod
def execute(self, command):
""""execute command method"""
class LocalProtocol(Protocol):
"""simple protocol for using bots within app"""
def __init__(self, command_executor):
self._command_executor = command_executor
def execute(self, command):
if not self._command_executor.has_executor(command.name):
return Protocol.FAIL
try:
result = self._command_executor.execute(command)
except:
result = Protocol.FAIL
return result
<mask token>
| from abc import ABCMeta, abstractmethod
__author__ = 'Alexiy'
class Protocol:
"""base protocol class"""
__metaclass__ = ABCMeta
FAIL = 'Failed'
@abstractmethod
def execute(self, command):
""""execute command method"""
class LocalProtocol(Protocol):
"""simple protocol for using bots within app"""
def __init__(self, command_executor):
self._command_executor = command_executor
def execute(self, command):
if not self._command_executor.has_executor(command.name):
return Protocol.FAIL
try:
result = self._command_executor.execute(command)
except:
result = Protocol.FAIL
return result
Protocol.register(LocalProtocol)
| null | [
6,
7,
8,
11
] |
1,370 | 1ffdc2845bc503c0a30407de444a152f8cc68d57 | <mask token>
| <mask token>
path.append('D:/Github/astrophy-research/mylib')
path.append('D:/Github/astrophy-research/multi_shear_detect')
path.append('%s/work/mylib' % my_home)
<mask token>
if rank == 0:
nbytes = 2 * signal_num * itemsize
else:
nbytes = 0
<mask token>
print(rank, signal_est)
comm.Barrier()
if rank == 0:
print(signals)
print(result)
mc = numpy.array(tool_box.data_fit(signals, result[0], result[1]))
mc[0] = mc[0] - 1
print(mc)
| <mask token>
my_home = os.popen('echo $MYWORK_DIR').readlines()[0][:-1]
<mask token>
path.append('D:/Github/astrophy-research/mylib')
path.append('D:/Github/astrophy-research/multi_shear_detect')
path.append('%s/work/mylib' % my_home)
<mask token>
comm = MPI.COMM_WORLD
rank = comm.Get_rank()
numprocs = comm.Get_size()
source_num = int(argv[1]) * 10000
sigma_1 = float(argv[2])
sigma_2 = float(argv[3])
signal_num = numprocs
signals = numpy.linspace(-0.05, 0.05, signal_num)
itemsize = MPI.DOUBLE.Get_size()
if rank == 0:
nbytes = 2 * signal_num * itemsize
else:
nbytes = 0
win1 = MPI.Win.Allocate_shared(nbytes, itemsize, comm=comm)
buf1, itemsize = win1.Shared_query(0)
result = numpy.ndarray(buffer=buf1, dtype='d', shape=(2, signal_num))
fq = Fourier_Quad(12, 123)
n = numpy.ones((source_num,))
source = numpy.random.normal(signals[rank], sigma_1, source_num
) + numpy.random.normal(-signals[rank] / 100, sigma_2, source_num)
signal_est = fq.find_shear(source, n, 8, scale=100, left=-0.08, right=0.08)[:2]
result[:, rank] = signal_est
print(rank, signal_est)
comm.Barrier()
if rank == 0:
print(signals)
print(result)
mc = numpy.array(tool_box.data_fit(signals, result[0], result[1]))
mc[0] = mc[0] - 1
print(mc)
| import os
my_home = os.popen('echo $MYWORK_DIR').readlines()[0][:-1]
import numpy
from sys import path, argv
path.append('D:/Github/astrophy-research/mylib')
path.append('D:/Github/astrophy-research/multi_shear_detect')
path.append('%s/work/mylib' % my_home)
from Fourier_Quad import Fourier_Quad
import tool_box
from mpi4py import MPI
comm = MPI.COMM_WORLD
rank = comm.Get_rank()
numprocs = comm.Get_size()
source_num = int(argv[1]) * 10000
sigma_1 = float(argv[2])
sigma_2 = float(argv[3])
signal_num = numprocs
signals = numpy.linspace(-0.05, 0.05, signal_num)
itemsize = MPI.DOUBLE.Get_size()
if rank == 0:
nbytes = 2 * signal_num * itemsize
else:
nbytes = 0
win1 = MPI.Win.Allocate_shared(nbytes, itemsize, comm=comm)
buf1, itemsize = win1.Shared_query(0)
result = numpy.ndarray(buffer=buf1, dtype='d', shape=(2, signal_num))
fq = Fourier_Quad(12, 123)
n = numpy.ones((source_num,))
source = numpy.random.normal(signals[rank], sigma_1, source_num
) + numpy.random.normal(-signals[rank] / 100, sigma_2, source_num)
signal_est = fq.find_shear(source, n, 8, scale=100, left=-0.08, right=0.08)[:2]
result[:, rank] = signal_est
print(rank, signal_est)
comm.Barrier()
if rank == 0:
print(signals)
print(result)
mc = numpy.array(tool_box.data_fit(signals, result[0], result[1]))
mc[0] = mc[0] - 1
print(mc)
| import os
my_home = os.popen("echo $MYWORK_DIR").readlines()[0][:-1]
import numpy
from sys import path, argv
path.append("D:/Github/astrophy-research/mylib")
path.append("D:/Github/astrophy-research/multi_shear_detect")
path.append('%s/work/mylib' % my_home)
from Fourier_Quad import Fourier_Quad
# import h5py
# from plot_tool import Image_Plot
import tool_box
from mpi4py import MPI
comm = MPI.COMM_WORLD
rank = comm.Get_rank()
numprocs = comm.Get_size()
source_num = int(argv[1])*10000
sigma_1 = float(argv[2])
sigma_2 = float(argv[3])
signal_num = numprocs
signals = numpy.linspace(-0.05, 0.05, signal_num)
itemsize = MPI.DOUBLE.Get_size()
if rank == 0:
# bytes for 10 double elements
nbytes = 2*signal_num*itemsize
else:
nbytes = 0
# on rank 0 of comm, create the contiguous shared block
win1 = MPI.Win.Allocate_shared(nbytes, itemsize, comm=comm)
buf1, itemsize = win1.Shared_query(0)
result = numpy.ndarray(buffer=buf1, dtype='d', shape=(2, signal_num)) # array filled with zero
fq = Fourier_Quad(12,123)
n = numpy.ones((source_num, ))
# for i in range(signal_num):
source = numpy.random.normal(signals[rank], sigma_1, source_num) + numpy.random.normal(-signals[rank]/100, sigma_2, source_num)
signal_est = fq.find_shear(source, n, 8,scale=100, left=-0.08, right=0.08)[:2]
result[:, rank] = signal_est
print(rank, signal_est)
comm.Barrier()
if rank == 0:
# result[2] = signals
print(signals)
print(result)
mc = numpy.array(tool_box.data_fit(signals, result[0], result[1]))
mc[0] = mc[0] - 1
print(mc)
# img = Image_Plot()
# img.subplots(1,1)
# img.axs[0][0].errorbar(signals, result[0], result[1])
# img.axs[0][0].plot([-0.06,0.06],[-0.06, 0.06])
# img.show_img() | [
0,
1,
2,
3,
4
] |
1,371 | 7e5cf782692d9cfb2718b2efcc83efa2ecb815cd | <mask token>
| <mask token>
try:
from psycopg2 import connect
except:
pass
| <mask token>
import pgnumpy
import cpgnumpy
from pgnumpy import connect
from pgnumpy import PgNumpy
from pgnumpy import PgInput
from pgnumpy import ArrayWriter
from pgnumpy import ArrayStringifier
from pgnumpy import array2table
from pgnumpy import test
from pgnumpy import test_simple
try:
from psycopg2 import connect
except:
pass
| """
Package:
pgnumpy
Description
A class and a set of functions for interacting with a PostgreSql database.
A C++ extension module allows returning results as a NumPy array. Numpy
arrays can also be written to tables.
The workhorse class is called PgNumpy
This class has limited functionality compared to the full Python database
api specification. It can execute arbitrary queries and extract results
into numpy arrays. However, cursors are not yet supported. For getting
results, only the fetchall() command is available, as the goal is always to
extract all rows into a single numpy structure rather than work row by row.
More generic DB-API compliant packges like psycopg are more suitable when
more flexible operations are needed.
Classes:
PgNumpy:
The class used in all database interactions. This class represents a
database connection and facilitates executing queries and extracting
results. See docs for pgnumpy.PgNumpy for more details.
PgInput:
A class for writing input files for use in a COPY into the database.
ArrayWriter:
Write arrays to a file for input to postgres. This slower version can
be used if recfile is not available.
ArrayStringifier:
Make a string from an array, possibly with brackets indicating
dimensions.
Convenience Functions:
connect:
Create a database connection, returning a PgNumpy object. If conninfo
is None or "" then the "default" connection based on the PGUSER and
PGDATABASE environment variables is used.
array2table:
Write array with fields (a structure) to a postgres table. If the
table does not yet exist it is created with column definitions based on
the input array. If it does exist the data are appended as new rows in
the table.
"""
import pgnumpy
import cpgnumpy
from pgnumpy import connect
from pgnumpy import PgNumpy
from pgnumpy import PgInput
from pgnumpy import ArrayWriter
from pgnumpy import ArrayStringifier
from pgnumpy import array2table
#from pgnumpy import tables
#from pgnumpy import table_exists
#from pgnumpy import describe
from pgnumpy import test
from pgnumpy import test_simple
#from pgnumpy import obliterate
#from pgnumpy import compare_arrays
# attempt to import the connect method from psycopg2
try:
from psycopg2 import connect
except:
pass
| null | [
0,
1,
2,
3
] |
1,372 | 34f98d4a6a15c9a7b42f237cab204b736dc97136 | <mask token>
| {'name': 'Clarico CMS Blocks', 'category': 'Website', 'version': '1.0',
'summary': '13 CMS Building Blocks', 'description': '', 'depends': [
'snippet_style_1', 'snippet_style_2', 'snippet_style_3',
'snippet_style_4', 'snippet_style_5', 'snippet_style_6',
'snippet_style_7', 'snippet_style_8', 'snippet_style_9',
'snippet_style_10', 'snippet_style_11', 'snippet_style_12',
'snippet_style_13'], 'author': 'Emipro Technologies Pvt. Ltd.',
'website': 'http://www.emiprotechnologies.com', 'installable': True}
| {
# Theme information
'name' : 'Clarico CMS Blocks',
'category' : 'Website',
'version' : '1.0',
'summary': '13 CMS Building Blocks',
'description': """""",
# Dependencies
'depends': [
'snippet_style_1',
'snippet_style_2',
'snippet_style_3',
'snippet_style_4',
'snippet_style_5',
'snippet_style_6',
'snippet_style_7',
'snippet_style_8',
'snippet_style_9',
'snippet_style_10',
'snippet_style_11',
'snippet_style_12',
'snippet_style_13',
],
# Author
'author': 'Emipro Technologies Pvt. Ltd.',
'website': 'http://www.emiprotechnologies.com',
# Technical
'installable': True,
}
| null | null | [
0,
1,
2
] |
1,373 | c27c2df1830f066ca4f973c46967722869090d05 | <mask token>
| <mask token>
class Config(object):
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
| <mask token>
class Config(object):
from_train_file = 'data/dev.en'
to_train_file = 'data/dev.vi'
_PAD = b'_PAD'
_GO = b'_GO'
_EOS = b'_EOS'
_UNK = b'_UNK'
_START_VOCAB = [_PAD, _GO, _EOS, _UNK]
PAD_ID = 0
GO_ID = 1
EOS_ID = 2
UNK_ID = 3
batch_size = 64
max_epochs = 1
early_stopping = 2
dropout = 0.9
lr = 0.5
l2 = 0.001
learning_rate_decay = 0.99
batch_size = 32
size = 1024
num_layers = 3
from_vocab_size = 10000
to_vocab_size = 10000
data_dir = 'data/'
dev_dir = 'data/'
max_train_data_size = 200
steps_per_checkpoint = 5
forward_only = False
buckets = [(10, 50)]
num_samples = 512
encode_layers = 3
encode_num_steps = 10
encode_hidden_size = 50
decode_layers = 3
encode_num_steps = 10
decode_hidden_size = 50
dtype = tf.float32
| import tensorflow as tf
class Config(object):
from_train_file = 'data/dev.en'
to_train_file = 'data/dev.vi'
_PAD = b'_PAD'
_GO = b'_GO'
_EOS = b'_EOS'
_UNK = b'_UNK'
_START_VOCAB = [_PAD, _GO, _EOS, _UNK]
PAD_ID = 0
GO_ID = 1
EOS_ID = 2
UNK_ID = 3
batch_size = 64
max_epochs = 1
early_stopping = 2
dropout = 0.9
lr = 0.5
l2 = 0.001
learning_rate_decay = 0.99
batch_size = 32
size = 1024
num_layers = 3
from_vocab_size = 10000
to_vocab_size = 10000
data_dir = 'data/'
dev_dir = 'data/'
max_train_data_size = 200
steps_per_checkpoint = 5
forward_only = False
buckets = [(10, 50)]
num_samples = 512
encode_layers = 3
encode_num_steps = 10
encode_hidden_size = 50
decode_layers = 3
encode_num_steps = 10
decode_hidden_size = 50
dtype = tf.float32
| import tensorflow as tf
class Config(object):
# Source and Target files
from_train_file='data/dev.en'
to_train_file='data/dev.vi'
# Special characters and ID's
_PAD = b"_PAD"
_GO = b"_GO"
_EOS = b"_EOS"
_UNK = b"_UNK"
_START_VOCAB = [_PAD, _GO, _EOS, _UNK]
PAD_ID = 0
GO_ID = 1
EOS_ID = 2
UNK_ID = 3
# NMT hyperparameters
batch_size = 64
max_epochs = 1
early_stopping = 2
dropout = 0.9
lr = 0.5
l2 = 0.001
learning_rate_decay = 0.99
batch_size = 32
size = 1024
num_layers = 3
from_vocab_size = 10000
to_vocab_size = 10000
data_dir = "data/"
dev_dir = "data/"
max_train_data_size = 200
steps_per_checkpoint = 5
forward_only = False
# Buckets
buckets = [(10,50)]
# Other config variables
num_samples = 512
# Encoding parameters
encode_layers = 3
encode_num_steps = 10
encode_hidden_size = 50
# Encoding parameters
decode_layers = 3
encode_num_steps = 10
decode_hidden_size = 50
dtype = tf.float32
| [
0,
1,
2,
3,
4
] |
1,374 | 153e7e66e2b796d011b78aed102d30e37bb0b80f | <mask token>
| <mask token>
def main():
env = gym.make('Pendulum-v0')
log_dir = 'log/pendulum'
agent = DDPG(env, sigma=0.2, num_episodes=250, buffer_size=1000000,
batch_size=64, tau=0.001, batch_norm=False, merge_layer=0)
agent.train()
agent.eval_all(log_dir + '/models', num_eps=10)
<mask token>
| <mask token>
def main():
env = gym.make('Pendulum-v0')
log_dir = 'log/pendulum'
agent = DDPG(env, sigma=0.2, num_episodes=250, buffer_size=1000000,
batch_size=64, tau=0.001, batch_norm=False, merge_layer=0)
agent.train()
agent.eval_all(log_dir + '/models', num_eps=10)
if __name__ == '__main__':
main()
| import gym
from ddpg import DDPG
def main():
env = gym.make('Pendulum-v0')
log_dir = 'log/pendulum'
agent = DDPG(env, sigma=0.2, num_episodes=250, buffer_size=1000000,
batch_size=64, tau=0.001, batch_norm=False, merge_layer=0)
agent.train()
agent.eval_all(log_dir + '/models', num_eps=10)
if __name__ == '__main__':
main()
| import gym
from ddpg import DDPG
def main():
#env = gym.make('LunarLanderContinuous-v2')
#log_dir = 'log/lander'
env = gym.make('Pendulum-v0')
log_dir = 'log/pendulum'
# paper settings
# agent = DDPG(env, sigma=0.2, num_episodes=1000, buffer_size=1000000, batch_size=64,
# tau=1e-3, batch_norm=True, merge_layer=2)
# did not work unless I merged action into critic at first layer
# worked btter without batchnorm
agent = DDPG(env, sigma=0.2, num_episodes=250, buffer_size=1000000, batch_size=64,
tau=1e-3, batch_norm=False, merge_layer=0)
agent.train()
agent.eval_all(log_dir+'/models', num_eps=10)
if __name__ == '__main__':
main() | [
0,
1,
2,
3,
4
] |
1,375 | 917a291c7b62dee392d7411c3e039949d74d7af8 | <mask token>
class Nest:
<mask token>
<mask token>
<mask token>
def update_pos(self, new_position: Tuple[float, float]) ->None:
"""
If the new position's value is better than the old one, update the nests position and value.
Arguments:
new_position {Tuple[float, float]} -- The new position
"""
new_value = self.__function(new_position)
if new_value < self.__value:
self.__value = new_value
self.__position = new_position
| <mask token>
class Nest:
def __init__(self, function, lower_boundary, upper_boundary):
self.__function = function
self.__lower_boundary = lower_boundary
self.__upper_boundary = upper_boundary
self.__position = np.random.uniform(self.__lower_boundary, self.
__upper_boundary, 2)
self.__value = self.__function(self.__position)
<mask token>
<mask token>
def update_pos(self, new_position: Tuple[float, float]) ->None:
"""
If the new position's value is better than the old one, update the nests position and value.
Arguments:
new_position {Tuple[float, float]} -- The new position
"""
new_value = self.__function(new_position)
if new_value < self.__value:
self.__value = new_value
self.__position = new_position
| <mask token>
class Nest:
def __init__(self, function, lower_boundary, upper_boundary):
self.__function = function
self.__lower_boundary = lower_boundary
self.__upper_boundary = upper_boundary
self.__position = np.random.uniform(self.__lower_boundary, self.
__upper_boundary, 2)
self.__value = self.__function(self.__position)
<mask token>
@property
def value(self) ->float:
return self.__value
def update_pos(self, new_position: Tuple[float, float]) ->None:
"""
If the new position's value is better than the old one, update the nests position and value.
Arguments:
new_position {Tuple[float, float]} -- The new position
"""
new_value = self.__function(new_position)
if new_value < self.__value:
self.__value = new_value
self.__position = new_position
| <mask token>
class Nest:
def __init__(self, function, lower_boundary, upper_boundary):
self.__function = function
self.__lower_boundary = lower_boundary
self.__upper_boundary = upper_boundary
self.__position = np.random.uniform(self.__lower_boundary, self.
__upper_boundary, 2)
self.__value = self.__function(self.__position)
@property
def position(self) ->Tuple[float, float]:
return self.__position
@property
def value(self) ->float:
return self.__value
def update_pos(self, new_position: Tuple[float, float]) ->None:
"""
If the new position's value is better than the old one, update the nests position and value.
Arguments:
new_position {Tuple[float, float]} -- The new position
"""
new_value = self.__function(new_position)
if new_value < self.__value:
self.__value = new_value
self.__position = new_position
| # ------------------------------------------------------------------------------------------------------
# Copyright (c) Leo Hanisch. All rights reserved.
# Licensed under the BSD 3-Clause License. See LICENSE.txt in the project root for license information.
# ------------------------------------------------------------------------------------------------------
from typing import Tuple
import numpy as np
class Nest:
def __init__(self, function, lower_boundary, upper_boundary):
self.__function = function
self.__lower_boundary = lower_boundary
self.__upper_boundary = upper_boundary
# Randomly create a new nest position
self.__position = np.random.uniform(self.__lower_boundary, self.__upper_boundary, 2)
self.__value = self.__function(self.__position)
@property
def position(self) -> Tuple[float, float]:
return self.__position
@property
def value(self) -> float:
return self.__value
def update_pos(self, new_position: Tuple[float, float]) -> None:
"""
If the new position's value is better than the old one, update the nests position and value.
Arguments:
new_position {Tuple[float, float]} -- The new position
"""
new_value = self.__function(new_position)
if new_value < self.__value:
self.__value = new_value
self.__position = new_position
| [
2,
3,
4,
5,
7
] |
1,376 | 6455741bbda42b9d84428545ddd50a5d1b54a7ba | <mask token>
def get_ports():
ports = serial.tools.list_ports.comports()
ports_str = []
for port in ports:
ports_str.append(port.device)
return ports_str
def start():
opt_mode = mode.get()
opt_filename = filename.get()
opt_port = portname.get()
if not opt_mode or not opt_filename or not opt_mode:
return messagebox.showwarning('Error', 'Invalid input')
if opt_mode == MODE_PLAYBACK:
read(opt_filename, opt_port)
elif opt_mode == MODE_RECORD:
print('record ' + opt_filename + ' ' + opt_port)
action_button.set('Stop')
<mask token>
| <mask token>
root.title('ChadBotX')
<mask token>
def pick_file():
newfilename = filedialog.askopenfilename(initialdir='/', title=
'Select file', filetypes=(('Byte files', '*.txt'), ('all files',
'*.*')))
filename.set(newfilename)
def get_ports():
ports = serial.tools.list_ports.comports()
ports_str = []
for port in ports:
ports_str.append(port.device)
return ports_str
def start():
opt_mode = mode.get()
opt_filename = filename.get()
opt_port = portname.get()
if not opt_mode or not opt_filename or not opt_mode:
return messagebox.showwarning('Error', 'Invalid input')
if opt_mode == MODE_PLAYBACK:
read(opt_filename, opt_port)
elif opt_mode == MODE_RECORD:
print('record ' + opt_filename + ' ' + opt_port)
action_button.set('Stop')
<mask token>
label.pack()
ttk.Button(root, text='Choose file', command=pick_file).pack(pady=(10, 7))
ttk.Label(root, text='File name:').pack()
<mask token>
ttk.Label(root, text='Port:').pack()
ttk.Combobox(root, textvariable=portname, values=get_ports()).pack(pady=(0,
2), padx=(10, 10))
ttk.Radiobutton(root, text='Record', variable=mode, value=1).pack(pady=(5, 2))
ttk.Radiobutton(root, text='Playback', variable=mode, value=2).pack(pady=(2, 5)
)
ttk.Button(root, textvariable=action_button, command=start).pack(pady=(2, 10))
root.mainloop()
| <mask token>
root = tkinter.Tk()
root.title('ChadBotX')
MODE_RECORD = 1
MODE_PLAYBACK = 2
portname = tkinter.StringVar(root, '')
filename = tkinter.StringVar(root, '')
mode = tkinter.IntVar(root, 0)
action_button = tkinter.StringVar(root, 'Start')
def pick_file():
newfilename = filedialog.askopenfilename(initialdir='/', title=
'Select file', filetypes=(('Byte files', '*.txt'), ('all files',
'*.*')))
filename.set(newfilename)
def get_ports():
ports = serial.tools.list_ports.comports()
ports_str = []
for port in ports:
ports_str.append(port.device)
return ports_str
def start():
opt_mode = mode.get()
opt_filename = filename.get()
opt_port = portname.get()
if not opt_mode or not opt_filename or not opt_mode:
return messagebox.showwarning('Error', 'Invalid input')
if opt_mode == MODE_PLAYBACK:
read(opt_filename, opt_port)
elif opt_mode == MODE_RECORD:
print('record ' + opt_filename + ' ' + opt_port)
action_button.set('Stop')
image = Image.open('./chad.png')
photo = ImageTk.PhotoImage(image)
label = tkinter.Label(image=photo)
label.image = photo
label.pack()
ttk.Button(root, text='Choose file', command=pick_file).pack(pady=(10, 7))
ttk.Label(root, text='File name:').pack()
entry = ttk.Entry(root, textvariable=filename).pack(pady=(0, 2))
ttk.Label(root, text='Port:').pack()
ttk.Combobox(root, textvariable=portname, values=get_ports()).pack(pady=(0,
2), padx=(10, 10))
ttk.Radiobutton(root, text='Record', variable=mode, value=1).pack(pady=(5, 2))
ttk.Radiobutton(root, text='Playback', variable=mode, value=2).pack(pady=(2, 5)
)
ttk.Button(root, textvariable=action_button, command=start).pack(pady=(2, 10))
root.mainloop()
| import tkinter
from tkinter import ttk, filedialog, messagebox
import serial.tools.list_ports
from PIL import ImageTk, Image
from read_bytes import read
root = tkinter.Tk()
root.title('ChadBotX')
MODE_RECORD = 1
MODE_PLAYBACK = 2
portname = tkinter.StringVar(root, '')
filename = tkinter.StringVar(root, '')
mode = tkinter.IntVar(root, 0)
action_button = tkinter.StringVar(root, 'Start')
def pick_file():
newfilename = filedialog.askopenfilename(initialdir='/', title=
'Select file', filetypes=(('Byte files', '*.txt'), ('all files',
'*.*')))
filename.set(newfilename)
def get_ports():
ports = serial.tools.list_ports.comports()
ports_str = []
for port in ports:
ports_str.append(port.device)
return ports_str
def start():
opt_mode = mode.get()
opt_filename = filename.get()
opt_port = portname.get()
if not opt_mode or not opt_filename or not opt_mode:
return messagebox.showwarning('Error', 'Invalid input')
if opt_mode == MODE_PLAYBACK:
read(opt_filename, opt_port)
elif opt_mode == MODE_RECORD:
print('record ' + opt_filename + ' ' + opt_port)
action_button.set('Stop')
image = Image.open('./chad.png')
photo = ImageTk.PhotoImage(image)
label = tkinter.Label(image=photo)
label.image = photo
label.pack()
ttk.Button(root, text='Choose file', command=pick_file).pack(pady=(10, 7))
ttk.Label(root, text='File name:').pack()
entry = ttk.Entry(root, textvariable=filename).pack(pady=(0, 2))
ttk.Label(root, text='Port:').pack()
ttk.Combobox(root, textvariable=portname, values=get_ports()).pack(pady=(0,
2), padx=(10, 10))
ttk.Radiobutton(root, text='Record', variable=mode, value=1).pack(pady=(5, 2))
ttk.Radiobutton(root, text='Playback', variable=mode, value=2).pack(pady=(2, 5)
)
ttk.Button(root, textvariable=action_button, command=start).pack(pady=(2, 10))
root.mainloop()
| import tkinter
from tkinter import ttk, filedialog, messagebox
import serial.tools.list_ports
from PIL import ImageTk, Image
from read_bytes import read
root = tkinter.Tk()
root.title('ChadBotX')
# Define constants for mode selection
MODE_RECORD = 1
MODE_PLAYBACK = 2
# Define gui state
portname = tkinter.StringVar(root, "")
filename = tkinter.StringVar(root, "")
mode = tkinter.IntVar(root, 0)
action_button = tkinter.StringVar(root, "Start")
def pick_file():
# Open file picker and return name of file selcted
newfilename = filedialog.askopenfilename(initialdir = "/",title = "Select file",filetypes = (("Byte files","*.txt"),("all files","*.*")))
# tkinter.StringVar(root, filename)
filename.set(newfilename)
def get_ports():
# Get list of com ports
# https://pythonhosted.org/pyserial/tools.html
ports = serial.tools.list_ports.comports()
ports_str = []
for port in ports:
ports_str.append(port.device)
return ports_str
def start():
opt_mode = mode.get()
opt_filename = filename.get()
opt_port = portname.get()
if (not opt_mode or not opt_filename or not opt_mode):
return messagebox.showwarning("Error", "Invalid input")
if (opt_mode == MODE_PLAYBACK):
read(opt_filename, opt_port)
elif (opt_mode == MODE_RECORD):
print("record " + opt_filename + " " + opt_port)
action_button.set('Stop')
# Add widgets to window
image = Image.open("./chad.png")
photo = ImageTk.PhotoImage(image)
label = tkinter.Label(image=photo)
label.image = photo
label.pack()
ttk.Button(root, text="Choose file", command=pick_file).pack(pady=(10, 7))
ttk.Label(root, text="File name:").pack()
entry = ttk.Entry(root, textvariable=filename).pack(pady=(0, 2))
ttk.Label(root, text="Port:").pack()
ttk.Combobox(root, textvariable=portname, values=get_ports()).pack(pady=(0, 2), padx=(10, 10))
ttk.Radiobutton(root, text="Record", variable=mode, value=1).pack(pady=(5, 2))
ttk.Radiobutton(root, text="Playback", variable=mode, value=2).pack(pady=(2, 5))
ttk.Button(root, textvariable=action_button, command=start).pack(pady=(2, 10))
root.mainloop() | [
2,
4,
5,
6,
7
] |
1,377 | e280b003c95681ed4a887b0939077efeac9deefe | <mask token>
| <mask token>
def sorting_l2(mat):
mat_l2 = norma_l2(mat)
mat_sort_index = np.argsort(mat_l2)
mat_sort_l2 = mat[mat_sort_index, :]
return mat_sort_l2[::-1]
| import numpy as np
from Ejercicio1 import norma_l2
def sorting_l2(mat):
mat_l2 = norma_l2(mat)
mat_sort_index = np.argsort(mat_l2)
mat_sort_l2 = mat[mat_sort_index, :]
return mat_sort_l2[::-1]
| null | null | [
0,
1,
2
] |
1,378 | 7aba77137b96071101078c38c1c9397bf837d92a | <mask token>
| <mask token>
a.index(333)
print(a)
| a = [66.25, 333, 333, 1, 1234.5]
a.index(333)
print(a)
| null | null | [
0,
1,
2
] |
1,379 | a2e77298059104b403555af95430d7995f8a697b | <mask token>
class LoginViewWebApp(FlaskView):
<mask token>
def __init__(self):
self.user_controller = UserController()
@route('/register', methods=['GET', 'POST'])
def register_user(self):
if request.method == 'GET':
return render_template('register.html')
elif request.method == 'POST':
app.logger.info('Got post')
app.logger.info(request.form)
username, password, email = request.form['username'], request.form[
'password'], request.form['email']
ok, error = self.user_controller.create_user(username, password,
email)
if ok:
return '', 200
else:
return 'User already registered', 432
<mask token>
@route('/logout', methods=['GET'])
def logout(self):
logout_user()
return '', 200
| <mask token>
class LoginViewWebApp(FlaskView):
<mask token>
def __init__(self):
self.user_controller = UserController()
@route('/register', methods=['GET', 'POST'])
def register_user(self):
if request.method == 'GET':
return render_template('register.html')
elif request.method == 'POST':
app.logger.info('Got post')
app.logger.info(request.form)
username, password, email = request.form['username'], request.form[
'password'], request.form['email']
ok, error = self.user_controller.create_user(username, password,
email)
if ok:
return '', 200
else:
return 'User already registered', 432
@route('/login', methods=['GET', 'POST'])
def login(self):
if request.method == 'GET':
return render_template('login.html')
elif request.method == 'POST':
username = request.form['username']
password = request.form['password']
user = self.user_controller.get_user_w_password(username, password)
if user is None:
return 'Invalid credentials', 432
else:
login_user(user)
return '', 200
@route('/logout', methods=['GET'])
def logout(self):
logout_user()
return '', 200
| <mask token>
class LoginViewWebApp(FlaskView):
route_base = '/'
def __init__(self):
self.user_controller = UserController()
@route('/register', methods=['GET', 'POST'])
def register_user(self):
if request.method == 'GET':
return render_template('register.html')
elif request.method == 'POST':
app.logger.info('Got post')
app.logger.info(request.form)
username, password, email = request.form['username'], request.form[
'password'], request.form['email']
ok, error = self.user_controller.create_user(username, password,
email)
if ok:
return '', 200
else:
return 'User already registered', 432
@route('/login', methods=['GET', 'POST'])
def login(self):
if request.method == 'GET':
return render_template('login.html')
elif request.method == 'POST':
username = request.form['username']
password = request.form['password']
user = self.user_controller.get_user_w_password(username, password)
if user is None:
return 'Invalid credentials', 432
else:
login_user(user)
return '', 200
@route('/logout', methods=['GET'])
def logout(self):
logout_user()
return '', 200
| import flask
from flask.ext.classy import FlaskView, route, request
from annotator_supreme.controllers.user_controller import UserController
from annotator_supreme.views import view_tools
from annotator_supreme.views import error_views
from flask import render_template, flash, redirect, url_for
from annotator_supreme import app
from flask.ext.login import login_user, logout_user
import json
class LoginViewWebApp(FlaskView):
route_base = '/'
def __init__(self):
self.user_controller = UserController()
@route('/register', methods=['GET', 'POST'])
def register_user(self):
if request.method == 'GET':
return render_template('register.html')
elif request.method == 'POST':
app.logger.info('Got post')
app.logger.info(request.form)
username, password, email = request.form['username'], request.form[
'password'], request.form['email']
ok, error = self.user_controller.create_user(username, password,
email)
if ok:
return '', 200
else:
return 'User already registered', 432
@route('/login', methods=['GET', 'POST'])
def login(self):
if request.method == 'GET':
return render_template('login.html')
elif request.method == 'POST':
username = request.form['username']
password = request.form['password']
user = self.user_controller.get_user_w_password(username, password)
if user is None:
return 'Invalid credentials', 432
else:
login_user(user)
return '', 200
@route('/logout', methods=['GET'])
def logout(self):
logout_user()
return '', 200
| import flask
from flask.ext.classy import FlaskView, route, request
from annotator_supreme.controllers.user_controller import UserController
from annotator_supreme.views import view_tools
from annotator_supreme.views import error_views
from flask import render_template, flash, redirect, url_for
from annotator_supreme import app
from flask.ext.login import login_user, logout_user
import json
class LoginViewWebApp(FlaskView):
route_base = '/'
def __init__(self):
self.user_controller = UserController()
@route('/register' , methods=['GET','POST'])
def register_user(self):
if request.method == 'GET':
return render_template('register.html')
elif request.method == 'POST':
app.logger.info("Got post")
app.logger.info(request.form)
username, password, email = request.form['username'] , request.form['password'], request.form['email']
ok, error = self.user_controller.create_user(username, password, email)
if ok:
return "", 200
else:
return "User already registered", 432
@route('/login',methods=['GET','POST'])
def login(self):
if request.method == 'GET':
return render_template('login.html')
elif request.method == 'POST':
username = request.form['username']
password = request.form['password']
user = self.user_controller.get_user_w_password(username, password)
if user is None:
return "Invalid credentials", 432
else:
login_user(user)
return "", 200
@route('/logout', methods=['GET'])
def logout(self):
logout_user()
return "", 200
| [
4,
5,
6,
7,
8
] |
1,380 | f0ac2e66cc7fe9730c77a8feb77a74e26986a3f8 | import pygame
class MenuManager():
def __init__(self, manager):
print "Menu manager created. Continue? [y/n]"
self.manager = manager
self.paused = False
self.intro_done = False
self.menus = []
self.menus.append(Pause_menu(self))
self.menus.append(Start_screen(self))
def get_paused(self):
return self.paused
def set_paused(self, pause):
self.paused = pause
def set_intro_done(self, startup):
self.intro_done = startup
def get_intro_done(self):
return self.intro_done
def set_active(self, menu_index):
self.menus[menu_index].set_active()
def unset_active(self, menu_index):
self.menus[menu_index].unset_active()
def exit_game(self):
self.manager.exit_game()
def pass_event(self, event):
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_p:
self.unset_active(1)
self.paused = not self.paused
def draw(self, screen):
if self.paused and self.menus[1].is_active() == False:
self.set_active(0)
else:
self.unset_active(0)
for menu in self.menus:
if menu.is_active():
menu.draw(screen)
class Button():
def __init__(self, pos, size, color, font, font_size, font_color, image=None, text=None):
self.pos = pos
self.size = size
self.rect = pygame.Rect(self.pos, self.size)
self.color = color
self.d_color = 40
if self.color[0]>235 or self.color[1]>235 or self.color[2]>235:
self.hover_color = (self.color[0]-self.d_color, self.color[1]-self.d_color, self.color[2]-self.d_color)
else:
self.hover_color = (self.color[0]+self.d_color, self.color[1]+self.d_color, self.color[2]+self.d_color)
self.font = pygame.font.SysFont(font, font_size)
self.font_color = font_color
self.font.set_bold(True)
if image != None: self.image = pygame.image.load("Sprites/"+image+".png")
else: self.image = None
if text != None:
self.text = self.font.render(text, True, self.font_color)
def draw(self, screen):
draw_pos = (screen.get_width()/2+self.pos[0]-self.size[0]/2, screen.get_height()/2+self.pos[1])
if self.image != None:
screen.blit(self.image, draw_pos)
else:
self.rect = pygame.Rect(draw_pos, self.size)
if self.rect.collidepoint(pygame.mouse.get_pos()[0], pygame.mouse.get_pos()[1]):
draw_color = self.hover_color
else:
draw_color = self.color
pygame.draw.rect(screen, draw_color, self.rect)
screen.blit(self.text, (self.rect.x+self.rect.w/2-self.text.get_width()/2, self.rect.y+self.rect.h/2-self.text.get_height()/2))
pygame.draw.rect(screen, (0,0,0), self.rect, 1)
def is_clicked(self):
if self.rect.collidepoint(pygame.mouse.get_pos()):
return True
else:
return False
class Pause_menu():
def __init__(self, manager):
self.manager = manager
self.buttons = []
self.buttons.append(Button((-100, 30), (100,50), (255,255,255), "Arial", 20, (255,0,0), text="Continue"))
self.buttons.append(Button((100, 30), (120,50), (255,255,255), "Arial", 20, (255,0,0), text="Exit game"))
self.active = False
def draw(self, screen):
for button in self.buttons:
self.check_clicked()
button.draw(screen)
def is_active(self):
return self.active
def set_active(self):
self.active = True
def unset_active(self):
self.active = False
def check_clicked(self):
for button_i in range(len(self.buttons)):
if pygame.mouse.get_pressed()[0] == True and self.buttons[button_i].is_clicked():
if button_i == 0:
self.manager.set_paused(False)
self.manager.unset_active(1)
elif button_i == 1:
print "Exit button pressed. Goodbye"
self.manager.exit_game()
class Start_screen():
def __init__(self, manager):
self.manager = manager
self.active = False
self.image = pygame.image.load("Files/Start_screen.png")
self.buttons = []
self.buttons.append(Button((-100, 150), (130,50), (255,255,255), "Arial", 20, (255,0,0), text="Start"))
self.buttons.append(Button((100, 150), (190,50), (255,255,255), "Arial", 20, (255,0,0), text="Exit game [ESC]"))
def draw(self, screen):
draw_pos = (screen.get_width()/2-self.image.get_width()/2, 20)
self.check_clicked()
for button in self.buttons:
button.draw(screen)
screen.blit(self.image, draw_pos)
def is_active(self):
return self.active
def set_active(self):
self.active = True
def unset_active(self):
self.active = False
def check_clicked(self):
for button_i in range(len(self.buttons)):
if pygame.mouse.get_pressed()[0] == True:
if self.buttons[button_i].is_clicked():
if button_i == 0:
self.manager.set_intro_done(True)
self.manager.unset_active(1)
self.manager.manager.get_universe().set_can_shoot(True)
elif button_i == 1:
print "Exit button pressed. Goodbye"
self.manager.exit_game() | null | null | null | null | [
0
] |
1,381 | 0659df48bb150582917e333a7a25d2d25395dfda | <mask token>
| <mask token>
class face_verifier:
<mask token>
def verify_person(self, f1, f2):
batch_tensor = torch.cat([f1, f2], 0)
output_feat = self.model(batch_tensor.cuda())
sim = torch.nn.CosineSimilarity(dim=0)
sim = sim(output_feat[0], output_feat[1]).data.cpu().numpy()
if sim > 0.7:
return 0
elif sim > 0.5:
return 1
else:
return 2
| <mask token>
class face_verifier:
def __init__(self, net_depth=50, drop_ratio=0.6, net_mode='ir_se',
device='cuda'):
self.model = Backbone(net_depth, drop_ratio, net_mode).to(device)
save_path = 'face_recognition/model_ir_se50.pth'
self.model.load_state_dict(torch.load(save_path))
self.model.eval()
def verify_person(self, f1, f2):
batch_tensor = torch.cat([f1, f2], 0)
output_feat = self.model(batch_tensor.cuda())
sim = torch.nn.CosineSimilarity(dim=0)
sim = sim(output_feat[0], output_feat[1]).data.cpu().numpy()
if sim > 0.7:
return 0
elif sim > 0.5:
return 1
else:
return 2
| from face_recognition.model import Backbone
import torch
import numpy
class face_verifier:
def __init__(self, net_depth=50, drop_ratio=0.6, net_mode='ir_se',
device='cuda'):
self.model = Backbone(net_depth, drop_ratio, net_mode).to(device)
save_path = 'face_recognition/model_ir_se50.pth'
self.model.load_state_dict(torch.load(save_path))
self.model.eval()
def verify_person(self, f1, f2):
batch_tensor = torch.cat([f1, f2], 0)
output_feat = self.model(batch_tensor.cuda())
sim = torch.nn.CosineSimilarity(dim=0)
sim = sim(output_feat[0], output_feat[1]).data.cpu().numpy()
if sim > 0.7:
return 0
elif sim > 0.5:
return 1
else:
return 2
| from face_recognition.model import Backbone
import torch
import numpy
class face_verifier():
def __init__(self, net_depth=50, drop_ratio=0.6, net_mode="ir_se", device="cuda"):
# create model
self.model = Backbone(net_depth, drop_ratio, net_mode).to(device)
save_path = "face_recognition/model_ir_se50.pth"
# load model
self.model.load_state_dict(torch.load(save_path))
self.model.eval()
def verify_person(self, f1, f2):
# 0: same / 1: ambiguous / 2: different
batch_tensor = torch.cat([f1, f2], 0)
output_feat = self.model(batch_tensor.cuda())
sim = torch.nn.CosineSimilarity(dim=0)
sim = sim(output_feat[0], output_feat[1]).data.cpu().numpy()
if sim > 0.7: # same
return 0
elif sim > 0.5: # ambiguous
return 1
else:
return 2
| [
0,
2,
3,
4,
5
] |
1,382 | dccdca65cce2959b07657636e23e7c9ab8a4f96c | <mask token>
class MoneyFst(GraphFst):
<mask token>
def __init__(self, decimal: GraphFst, deterministic: bool=True):
super().__init__(name='money', kind='verbalize', deterministic=
deterministic)
maj_singular_masc = pynutil.delete('currency_maj: "') + pynini.closure(
NEMO_NOT_QUOTE, 1) @ masc_singular + pynutil.delete('"')
maj_singular_fem = pynutil.delete('currency_maj: "') + pynini.closure(
NEMO_NOT_QUOTE, 1) @ fem_singular + pynutil.delete('"')
maj_plural_masc = pynutil.delete('currency_maj: "') + pynini.closure(
NEMO_NOT_QUOTE, 1) @ masc_plural + pynutil.delete('"')
maj_plural_fem = pynutil.delete('currency_maj: "') + pynini.closure(
NEMO_NOT_QUOTE, 1) @ fem_plural + pynutil.delete('"')
maj_masc = maj_plural_masc | maj_singular_masc
maj_fem = maj_plural_fem | maj_singular_fem
min_singular_masc = pynutil.delete('currency_min: "') + pynini.closure(
NEMO_NOT_QUOTE, 1) @ masc_singular + pynutil.delete('"')
min_singular_fem = pynutil.delete('currency_min: "') + pynini.closure(
NEMO_NOT_QUOTE, 1) @ fem_singular + pynutil.delete('"')
min_plural_masc = pynutil.delete('currency_min: "') + pynini.closure(
NEMO_NOT_QUOTE, 1) @ masc_plural + pynutil.delete('"')
min_plural_fem = pynutil.delete('currency_min: "') + pynini.closure(
NEMO_NOT_QUOTE, 1) @ fem_plural + pynutil.delete('"')
min_masc = min_plural_masc | min_singular_masc
min_fem = min_plural_fem | min_singular_fem
fractional_part = pynutil.delete('fractional_part: "'
) + pynini.closure(NEMO_NOT_QUOTE, 1) + pynutil.delete('"')
integer_part = pynutil.delete('integer_part: "') + pynini.closure(
NEMO_NOT_QUOTE, 1) + pynutil.delete('"')
optional_add_and = pynini.closure(pynutil.insert(pynini.union(
'con ', 'y ')), 0, 1)
graph_integer_masc = integer_part + NEMO_SPACE + maj_masc
graph_integer_fem = shift_cardinal_gender(integer_part
) + NEMO_SPACE + maj_fem
graph_integer = graph_integer_fem | graph_integer_masc
graph_integer_with_minor_masc = (graph_integer_masc + NEMO_SPACE +
pynini.union(optional_add_and + strip_cardinal_apocope(
fractional_part), optional_add_and + fractional_part +
NEMO_SPACE + min_masc, optional_add_and + shift_cardinal_gender
(fractional_part) + NEMO_SPACE + min_fem) + delete_preserve_order)
graph_integer_with_minor_fem = (graph_integer_fem + NEMO_SPACE +
pynini.union(optional_add_and + shift_cardinal_gender(
fractional_part), optional_add_and + fractional_part +
NEMO_SPACE + min_masc, optional_add_and + shift_cardinal_gender
(fractional_part) + NEMO_SPACE + min_fem) + delete_preserve_order)
graph_integer_with_minor = (graph_integer_with_minor_fem |
graph_integer_with_minor_masc)
graph_decimal_masc = decimal.graph_masc + NEMO_SPACE + maj_masc
graph_decimal_fem = decimal.graph_fem
graph_decimal_fem |= decimal.numbers_only_quantity
graph_decimal_fem += NEMO_SPACE + maj_fem
graph_decimal = graph_decimal_fem | graph_decimal_masc
graph_decimal = pynini.cdrewrite(pynutil.insert(' de'),
'quantity: "' + pynini.closure(NEMO_NOT_QUOTE, 1), '"', NEMO_SIGMA
) @ graph_decimal
graph_minor_masc = (fractional_part + NEMO_SPACE + min_masc +
delete_preserve_order)
graph_minor_fem = shift_cardinal_gender(fractional_part
) + NEMO_SPACE + min_fem + delete_preserve_order
graph_minor = graph_minor_fem | graph_minor_masc
graph = (graph_integer | graph_integer_with_minor | graph_decimal |
graph_minor)
delete_tokens = self.delete_tokens(graph)
self.fst = delete_tokens.optimize()
| <mask token>
class MoneyFst(GraphFst):
"""
Finite state transducer for verbalizing money, e.g.
money { currency_maj: "euro" integer_part: "un"} -> "un euro"
money { currency_maj: "euro" integer_part: "un" fractional_part: "cero cero un"} -> "uno coma cero cero uno euros"
money { integer_part: "un" currency_maj: "libra" fractional_part: "cuarenta" preserve_order: true} -> "una libra cuarenta"
money { integer_part: "un" currency_maj: "libra" fractional_part: "cuarenta" currency_min: "peniques" preserve_order: true} -> "una libra con cuarenta peniques"
money { fractional_part: "un" currency_min: "penique" preserve_order: true} -> "un penique"
Args:
decimal: GraphFst
deterministic: if True will provide a single transduction option,
for False multiple transduction are generated (used for audio-based normalization)
"""
def __init__(self, decimal: GraphFst, deterministic: bool=True):
super().__init__(name='money', kind='verbalize', deterministic=
deterministic)
maj_singular_masc = pynutil.delete('currency_maj: "') + pynini.closure(
NEMO_NOT_QUOTE, 1) @ masc_singular + pynutil.delete('"')
maj_singular_fem = pynutil.delete('currency_maj: "') + pynini.closure(
NEMO_NOT_QUOTE, 1) @ fem_singular + pynutil.delete('"')
maj_plural_masc = pynutil.delete('currency_maj: "') + pynini.closure(
NEMO_NOT_QUOTE, 1) @ masc_plural + pynutil.delete('"')
maj_plural_fem = pynutil.delete('currency_maj: "') + pynini.closure(
NEMO_NOT_QUOTE, 1) @ fem_plural + pynutil.delete('"')
maj_masc = maj_plural_masc | maj_singular_masc
maj_fem = maj_plural_fem | maj_singular_fem
min_singular_masc = pynutil.delete('currency_min: "') + pynini.closure(
NEMO_NOT_QUOTE, 1) @ masc_singular + pynutil.delete('"')
min_singular_fem = pynutil.delete('currency_min: "') + pynini.closure(
NEMO_NOT_QUOTE, 1) @ fem_singular + pynutil.delete('"')
min_plural_masc = pynutil.delete('currency_min: "') + pynini.closure(
NEMO_NOT_QUOTE, 1) @ masc_plural + pynutil.delete('"')
min_plural_fem = pynutil.delete('currency_min: "') + pynini.closure(
NEMO_NOT_QUOTE, 1) @ fem_plural + pynutil.delete('"')
min_masc = min_plural_masc | min_singular_masc
min_fem = min_plural_fem | min_singular_fem
fractional_part = pynutil.delete('fractional_part: "'
) + pynini.closure(NEMO_NOT_QUOTE, 1) + pynutil.delete('"')
integer_part = pynutil.delete('integer_part: "') + pynini.closure(
NEMO_NOT_QUOTE, 1) + pynutil.delete('"')
optional_add_and = pynini.closure(pynutil.insert(pynini.union(
'con ', 'y ')), 0, 1)
graph_integer_masc = integer_part + NEMO_SPACE + maj_masc
graph_integer_fem = shift_cardinal_gender(integer_part
) + NEMO_SPACE + maj_fem
graph_integer = graph_integer_fem | graph_integer_masc
graph_integer_with_minor_masc = (graph_integer_masc + NEMO_SPACE +
pynini.union(optional_add_and + strip_cardinal_apocope(
fractional_part), optional_add_and + fractional_part +
NEMO_SPACE + min_masc, optional_add_and + shift_cardinal_gender
(fractional_part) + NEMO_SPACE + min_fem) + delete_preserve_order)
graph_integer_with_minor_fem = (graph_integer_fem + NEMO_SPACE +
pynini.union(optional_add_and + shift_cardinal_gender(
fractional_part), optional_add_and + fractional_part +
NEMO_SPACE + min_masc, optional_add_and + shift_cardinal_gender
(fractional_part) + NEMO_SPACE + min_fem) + delete_preserve_order)
graph_integer_with_minor = (graph_integer_with_minor_fem |
graph_integer_with_minor_masc)
graph_decimal_masc = decimal.graph_masc + NEMO_SPACE + maj_masc
graph_decimal_fem = decimal.graph_fem
graph_decimal_fem |= decimal.numbers_only_quantity
graph_decimal_fem += NEMO_SPACE + maj_fem
graph_decimal = graph_decimal_fem | graph_decimal_masc
graph_decimal = pynini.cdrewrite(pynutil.insert(' de'),
'quantity: "' + pynini.closure(NEMO_NOT_QUOTE, 1), '"', NEMO_SIGMA
) @ graph_decimal
graph_minor_masc = (fractional_part + NEMO_SPACE + min_masc +
delete_preserve_order)
graph_minor_fem = shift_cardinal_gender(fractional_part
) + NEMO_SPACE + min_fem + delete_preserve_order
graph_minor = graph_minor_fem | graph_minor_masc
graph = (graph_integer | graph_integer_with_minor | graph_decimal |
graph_minor)
delete_tokens = self.delete_tokens(graph)
self.fst = delete_tokens.optimize()
| <mask token>
try:
import pynini
from pynini.lib import pynutil
fem = pynini.string_file(get_abs_path('data/money/currency_plural_fem.tsv')
)
masc = pynini.string_file(get_abs_path(
'data/money/currency_plural_masc.tsv'))
fem_singular = pynini.project(fem, 'input')
masc_singular = pynini.project(masc, 'input')
fem_plural = pynini.project(fem, 'output')
masc_plural = pynini.project(masc, 'output')
PYNINI_AVAILABLE = True
except (ModuleNotFoundError, ImportError):
fem_plural = None
masc_plural = None
fem_singular = None
masc_singular = None
PYNINI_AVAILABLE = False
class MoneyFst(GraphFst):
"""
Finite state transducer for verbalizing money, e.g.
money { currency_maj: "euro" integer_part: "un"} -> "un euro"
money { currency_maj: "euro" integer_part: "un" fractional_part: "cero cero un"} -> "uno coma cero cero uno euros"
money { integer_part: "un" currency_maj: "libra" fractional_part: "cuarenta" preserve_order: true} -> "una libra cuarenta"
money { integer_part: "un" currency_maj: "libra" fractional_part: "cuarenta" currency_min: "peniques" preserve_order: true} -> "una libra con cuarenta peniques"
money { fractional_part: "un" currency_min: "penique" preserve_order: true} -> "un penique"
Args:
decimal: GraphFst
deterministic: if True will provide a single transduction option,
for False multiple transduction are generated (used for audio-based normalization)
"""
def __init__(self, decimal: GraphFst, deterministic: bool=True):
super().__init__(name='money', kind='verbalize', deterministic=
deterministic)
maj_singular_masc = pynutil.delete('currency_maj: "') + pynini.closure(
NEMO_NOT_QUOTE, 1) @ masc_singular + pynutil.delete('"')
maj_singular_fem = pynutil.delete('currency_maj: "') + pynini.closure(
NEMO_NOT_QUOTE, 1) @ fem_singular + pynutil.delete('"')
maj_plural_masc = pynutil.delete('currency_maj: "') + pynini.closure(
NEMO_NOT_QUOTE, 1) @ masc_plural + pynutil.delete('"')
maj_plural_fem = pynutil.delete('currency_maj: "') + pynini.closure(
NEMO_NOT_QUOTE, 1) @ fem_plural + pynutil.delete('"')
maj_masc = maj_plural_masc | maj_singular_masc
maj_fem = maj_plural_fem | maj_singular_fem
min_singular_masc = pynutil.delete('currency_min: "') + pynini.closure(
NEMO_NOT_QUOTE, 1) @ masc_singular + pynutil.delete('"')
min_singular_fem = pynutil.delete('currency_min: "') + pynini.closure(
NEMO_NOT_QUOTE, 1) @ fem_singular + pynutil.delete('"')
min_plural_masc = pynutil.delete('currency_min: "') + pynini.closure(
NEMO_NOT_QUOTE, 1) @ masc_plural + pynutil.delete('"')
min_plural_fem = pynutil.delete('currency_min: "') + pynini.closure(
NEMO_NOT_QUOTE, 1) @ fem_plural + pynutil.delete('"')
min_masc = min_plural_masc | min_singular_masc
min_fem = min_plural_fem | min_singular_fem
fractional_part = pynutil.delete('fractional_part: "'
) + pynini.closure(NEMO_NOT_QUOTE, 1) + pynutil.delete('"')
integer_part = pynutil.delete('integer_part: "') + pynini.closure(
NEMO_NOT_QUOTE, 1) + pynutil.delete('"')
optional_add_and = pynini.closure(pynutil.insert(pynini.union(
'con ', 'y ')), 0, 1)
graph_integer_masc = integer_part + NEMO_SPACE + maj_masc
graph_integer_fem = shift_cardinal_gender(integer_part
) + NEMO_SPACE + maj_fem
graph_integer = graph_integer_fem | graph_integer_masc
graph_integer_with_minor_masc = (graph_integer_masc + NEMO_SPACE +
pynini.union(optional_add_and + strip_cardinal_apocope(
fractional_part), optional_add_and + fractional_part +
NEMO_SPACE + min_masc, optional_add_and + shift_cardinal_gender
(fractional_part) + NEMO_SPACE + min_fem) + delete_preserve_order)
graph_integer_with_minor_fem = (graph_integer_fem + NEMO_SPACE +
pynini.union(optional_add_and + shift_cardinal_gender(
fractional_part), optional_add_and + fractional_part +
NEMO_SPACE + min_masc, optional_add_and + shift_cardinal_gender
(fractional_part) + NEMO_SPACE + min_fem) + delete_preserve_order)
graph_integer_with_minor = (graph_integer_with_minor_fem |
graph_integer_with_minor_masc)
graph_decimal_masc = decimal.graph_masc + NEMO_SPACE + maj_masc
graph_decimal_fem = decimal.graph_fem
graph_decimal_fem |= decimal.numbers_only_quantity
graph_decimal_fem += NEMO_SPACE + maj_fem
graph_decimal = graph_decimal_fem | graph_decimal_masc
graph_decimal = pynini.cdrewrite(pynutil.insert(' de'),
'quantity: "' + pynini.closure(NEMO_NOT_QUOTE, 1), '"', NEMO_SIGMA
) @ graph_decimal
graph_minor_masc = (fractional_part + NEMO_SPACE + min_masc +
delete_preserve_order)
graph_minor_fem = shift_cardinal_gender(fractional_part
) + NEMO_SPACE + min_fem + delete_preserve_order
graph_minor = graph_minor_fem | graph_minor_masc
graph = (graph_integer | graph_integer_with_minor | graph_decimal |
graph_minor)
delete_tokens = self.delete_tokens(graph)
self.fst = delete_tokens.optimize()
| from nemo_text_processing.text_normalization.en.graph_utils import NEMO_NOT_QUOTE, NEMO_SIGMA, NEMO_SPACE, GraphFst, delete_preserve_order
from nemo_text_processing.text_normalization.es.graph_utils import shift_cardinal_gender, strip_cardinal_apocope
from nemo_text_processing.text_normalization.es.utils import get_abs_path
try:
import pynini
from pynini.lib import pynutil
fem = pynini.string_file(get_abs_path('data/money/currency_plural_fem.tsv')
)
masc = pynini.string_file(get_abs_path(
'data/money/currency_plural_masc.tsv'))
fem_singular = pynini.project(fem, 'input')
masc_singular = pynini.project(masc, 'input')
fem_plural = pynini.project(fem, 'output')
masc_plural = pynini.project(masc, 'output')
PYNINI_AVAILABLE = True
except (ModuleNotFoundError, ImportError):
fem_plural = None
masc_plural = None
fem_singular = None
masc_singular = None
PYNINI_AVAILABLE = False
class MoneyFst(GraphFst):
"""
Finite state transducer for verbalizing money, e.g.
money { currency_maj: "euro" integer_part: "un"} -> "un euro"
money { currency_maj: "euro" integer_part: "un" fractional_part: "cero cero un"} -> "uno coma cero cero uno euros"
money { integer_part: "un" currency_maj: "libra" fractional_part: "cuarenta" preserve_order: true} -> "una libra cuarenta"
money { integer_part: "un" currency_maj: "libra" fractional_part: "cuarenta" currency_min: "peniques" preserve_order: true} -> "una libra con cuarenta peniques"
money { fractional_part: "un" currency_min: "penique" preserve_order: true} -> "un penique"
Args:
decimal: GraphFst
deterministic: if True will provide a single transduction option,
for False multiple transduction are generated (used for audio-based normalization)
"""
def __init__(self, decimal: GraphFst, deterministic: bool=True):
super().__init__(name='money', kind='verbalize', deterministic=
deterministic)
maj_singular_masc = pynutil.delete('currency_maj: "') + pynini.closure(
NEMO_NOT_QUOTE, 1) @ masc_singular + pynutil.delete('"')
maj_singular_fem = pynutil.delete('currency_maj: "') + pynini.closure(
NEMO_NOT_QUOTE, 1) @ fem_singular + pynutil.delete('"')
maj_plural_masc = pynutil.delete('currency_maj: "') + pynini.closure(
NEMO_NOT_QUOTE, 1) @ masc_plural + pynutil.delete('"')
maj_plural_fem = pynutil.delete('currency_maj: "') + pynini.closure(
NEMO_NOT_QUOTE, 1) @ fem_plural + pynutil.delete('"')
maj_masc = maj_plural_masc | maj_singular_masc
maj_fem = maj_plural_fem | maj_singular_fem
min_singular_masc = pynutil.delete('currency_min: "') + pynini.closure(
NEMO_NOT_QUOTE, 1) @ masc_singular + pynutil.delete('"')
min_singular_fem = pynutil.delete('currency_min: "') + pynini.closure(
NEMO_NOT_QUOTE, 1) @ fem_singular + pynutil.delete('"')
min_plural_masc = pynutil.delete('currency_min: "') + pynini.closure(
NEMO_NOT_QUOTE, 1) @ masc_plural + pynutil.delete('"')
min_plural_fem = pynutil.delete('currency_min: "') + pynini.closure(
NEMO_NOT_QUOTE, 1) @ fem_plural + pynutil.delete('"')
min_masc = min_plural_masc | min_singular_masc
min_fem = min_plural_fem | min_singular_fem
fractional_part = pynutil.delete('fractional_part: "'
) + pynini.closure(NEMO_NOT_QUOTE, 1) + pynutil.delete('"')
integer_part = pynutil.delete('integer_part: "') + pynini.closure(
NEMO_NOT_QUOTE, 1) + pynutil.delete('"')
optional_add_and = pynini.closure(pynutil.insert(pynini.union(
'con ', 'y ')), 0, 1)
graph_integer_masc = integer_part + NEMO_SPACE + maj_masc
graph_integer_fem = shift_cardinal_gender(integer_part
) + NEMO_SPACE + maj_fem
graph_integer = graph_integer_fem | graph_integer_masc
graph_integer_with_minor_masc = (graph_integer_masc + NEMO_SPACE +
pynini.union(optional_add_and + strip_cardinal_apocope(
fractional_part), optional_add_and + fractional_part +
NEMO_SPACE + min_masc, optional_add_and + shift_cardinal_gender
(fractional_part) + NEMO_SPACE + min_fem) + delete_preserve_order)
graph_integer_with_minor_fem = (graph_integer_fem + NEMO_SPACE +
pynini.union(optional_add_and + shift_cardinal_gender(
fractional_part), optional_add_and + fractional_part +
NEMO_SPACE + min_masc, optional_add_and + shift_cardinal_gender
(fractional_part) + NEMO_SPACE + min_fem) + delete_preserve_order)
graph_integer_with_minor = (graph_integer_with_minor_fem |
graph_integer_with_minor_masc)
graph_decimal_masc = decimal.graph_masc + NEMO_SPACE + maj_masc
graph_decimal_fem = decimal.graph_fem
graph_decimal_fem |= decimal.numbers_only_quantity
graph_decimal_fem += NEMO_SPACE + maj_fem
graph_decimal = graph_decimal_fem | graph_decimal_masc
graph_decimal = pynini.cdrewrite(pynutil.insert(' de'),
'quantity: "' + pynini.closure(NEMO_NOT_QUOTE, 1), '"', NEMO_SIGMA
) @ graph_decimal
graph_minor_masc = (fractional_part + NEMO_SPACE + min_masc +
delete_preserve_order)
graph_minor_fem = shift_cardinal_gender(fractional_part
) + NEMO_SPACE + min_fem + delete_preserve_order
graph_minor = graph_minor_fem | graph_minor_masc
graph = (graph_integer | graph_integer_with_minor | graph_decimal |
graph_minor)
delete_tokens = self.delete_tokens(graph)
self.fst = delete_tokens.optimize()
| # Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nemo_text_processing.text_normalization.en.graph_utils import (
NEMO_NOT_QUOTE,
NEMO_SIGMA,
NEMO_SPACE,
GraphFst,
delete_preserve_order,
)
from nemo_text_processing.text_normalization.es.graph_utils import shift_cardinal_gender, strip_cardinal_apocope
from nemo_text_processing.text_normalization.es.utils import get_abs_path
try:
import pynini
from pynini.lib import pynutil
fem = pynini.string_file((get_abs_path("data/money/currency_plural_fem.tsv")))
masc = pynini.string_file((get_abs_path("data/money/currency_plural_masc.tsv")))
fem_singular = pynini.project(fem, "input")
masc_singular = pynini.project(masc, "input")
fem_plural = pynini.project(fem, "output")
masc_plural = pynini.project(masc, "output")
PYNINI_AVAILABLE = True
except (ModuleNotFoundError, ImportError):
fem_plural = None
masc_plural = None
fem_singular = None
masc_singular = None
PYNINI_AVAILABLE = False
class MoneyFst(GraphFst):
"""
Finite state transducer for verbalizing money, e.g.
money { currency_maj: "euro" integer_part: "un"} -> "un euro"
money { currency_maj: "euro" integer_part: "un" fractional_part: "cero cero un"} -> "uno coma cero cero uno euros"
money { integer_part: "un" currency_maj: "libra" fractional_part: "cuarenta" preserve_order: true} -> "una libra cuarenta"
money { integer_part: "un" currency_maj: "libra" fractional_part: "cuarenta" currency_min: "peniques" preserve_order: true} -> "una libra con cuarenta peniques"
money { fractional_part: "un" currency_min: "penique" preserve_order: true} -> "un penique"
Args:
decimal: GraphFst
deterministic: if True will provide a single transduction option,
for False multiple transduction are generated (used for audio-based normalization)
"""
def __init__(self, decimal: GraphFst, deterministic: bool = True):
super().__init__(name="money", kind="verbalize", deterministic=deterministic)
maj_singular_masc = (
pynutil.delete("currency_maj: \"")
+ (pynini.closure(NEMO_NOT_QUOTE, 1) @ masc_singular)
+ pynutil.delete("\"")
)
maj_singular_fem = (
pynutil.delete("currency_maj: \"")
+ (pynini.closure(NEMO_NOT_QUOTE, 1) @ fem_singular)
+ pynutil.delete("\"")
)
maj_plural_masc = (
pynutil.delete("currency_maj: \"")
+ (pynini.closure(NEMO_NOT_QUOTE, 1) @ masc_plural)
+ pynutil.delete("\"")
)
maj_plural_fem = (
pynutil.delete("currency_maj: \"")
+ (pynini.closure(NEMO_NOT_QUOTE, 1) @ fem_plural)
+ pynutil.delete("\"")
)
maj_masc = maj_plural_masc | maj_singular_masc # Tagger kept quantity resolution stable
maj_fem = maj_plural_fem | maj_singular_fem
min_singular_masc = (
pynutil.delete("currency_min: \"")
+ (pynini.closure(NEMO_NOT_QUOTE, 1) @ masc_singular)
+ pynutil.delete("\"")
)
min_singular_fem = (
pynutil.delete("currency_min: \"")
+ (pynini.closure(NEMO_NOT_QUOTE, 1) @ fem_singular)
+ pynutil.delete("\"")
)
min_plural_masc = (
pynutil.delete("currency_min: \"")
+ (pynini.closure(NEMO_NOT_QUOTE, 1) @ masc_plural)
+ pynutil.delete("\"")
)
min_plural_fem = (
pynutil.delete("currency_min: \"")
+ (pynini.closure(NEMO_NOT_QUOTE, 1) @ fem_plural)
+ pynutil.delete("\"")
)
min_masc = min_plural_masc | min_singular_masc
min_fem = min_plural_fem | min_singular_fem
fractional_part = (
pynutil.delete("fractional_part: \"") + pynini.closure(NEMO_NOT_QUOTE, 1) + pynutil.delete("\"")
)
integer_part = pynutil.delete("integer_part: \"") + pynini.closure(NEMO_NOT_QUOTE, 1) + pynutil.delete("\"")
optional_add_and = pynini.closure(pynutil.insert(pynini.union("con ", "y ")), 0, 1)
# *** currency_maj
graph_integer_masc = integer_part + NEMO_SPACE + maj_masc
graph_integer_fem = shift_cardinal_gender(integer_part) + NEMO_SPACE + maj_fem
graph_integer = graph_integer_fem | graph_integer_masc
# *** currency_maj + (***) | ((con) *** current_min)
graph_integer_with_minor_masc = (
graph_integer_masc
+ NEMO_SPACE
+ pynini.union(
optional_add_and + strip_cardinal_apocope(fractional_part),
(optional_add_and + fractional_part + NEMO_SPACE + min_masc),
(optional_add_and + shift_cardinal_gender(fractional_part) + NEMO_SPACE + min_fem),
) # Could be minor currency that is different gender
+ delete_preserve_order
)
graph_integer_with_minor_fem = (
graph_integer_fem
+ NEMO_SPACE
+ pynini.union(
optional_add_and + shift_cardinal_gender(fractional_part),
(optional_add_and + fractional_part + NEMO_SPACE + min_masc),
(optional_add_and + shift_cardinal_gender(fractional_part) + NEMO_SPACE + min_fem),
) # Could be minor currency that is different gender
+ delete_preserve_order
)
graph_integer_with_minor = graph_integer_with_minor_fem | graph_integer_with_minor_masc
## *** coma *** currency_maj
graph_decimal_masc = decimal.graph_masc + NEMO_SPACE + maj_masc
graph_decimal_fem = decimal.graph_fem
graph_decimal_fem |= decimal.numbers_only_quantity # can still have "x billions" with fem currency
graph_decimal_fem += NEMO_SPACE + maj_fem
graph_decimal = graph_decimal_fem | graph_decimal_masc
graph_decimal = (
pynini.cdrewrite(
pynutil.insert(" de"), "quantity: \"" + pynini.closure(NEMO_NOT_QUOTE, 1), "\"", NEMO_SIGMA
)
@ graph_decimal
) # formally it's millones/billones de ***
# *** current_min
graph_minor_masc = fractional_part + NEMO_SPACE + min_masc + delete_preserve_order
graph_minor_fem = shift_cardinal_gender(fractional_part) + NEMO_SPACE + min_fem + delete_preserve_order
graph_minor = graph_minor_fem | graph_minor_masc
graph = graph_integer | graph_integer_with_minor | graph_decimal | graph_minor
delete_tokens = self.delete_tokens(graph)
self.fst = delete_tokens.optimize()
| [
2,
3,
4,
5,
6
] |
1,383 | ebbc79d6582f7d6139e0dcec6333b679bb86c63c | <mask token>
| class Solution(object):
<mask token>
| class Solution(object):
def findPaths(self, m, n, N, i, j):
"""
:type m: int
:type n: int
:type N: int
:type i: int
:type j: int
:rtype: int
"""
MOD = 10 ** 9 + 7
dz = zip((1, 0, -1, 0), (0, 1, 0, -1))
dp = [([0] * n) for x in range(m)]
dp[i][j] = 1
ans = 0
for _ in range(N):
ndp = [([0] * n) for x in range(m)]
for x in range(m):
for y in range(n):
for dx, dy in dz:
nx, ny = x + dx, y + dy
if 0 <= nx < m and 0 <= ny < n:
ndp[nx][ny] = (ndp[nx][ny] + dp[x][y]) % MOD
else:
ans = (ans + dp[x][y]) % MOD
dp = ndp
return ans
| class Solution(object):
def findPaths(self, m, n, N, i, j):
"""
:type m: int
:type n: int
:type N: int
:type i: int
:type j: int
:rtype: int
"""
MOD = 10 ** 9 + 7
dz = zip((1,0,-1,0),(0,1,0,-1))
dp = [[0]* n for x in range(m)]
dp[i][j] = 1
ans = 0
for _ in range(N):
ndp = [[0] * n for x in range(m)]
for x in range(m):
for y in range(n):
for dx,dy in dz:
nx,ny = x + dx, y+dy
if 0 <= nx < m and 0 <= ny <n:
ndp[nx][ny]= (ndp[nx][ny]+dp[x][y])%MOD
else:
ans = (ans + dp[x][y])% MOD
dp = ndp
return ans
| null | [
0,
1,
2,
3
] |
1,384 | 2da6debb1f9ae2c966a17fdfb3b668160a3ef8d7 | <mask token>
| <mask token>
if real_fdragon50 == input:
print('Hello!')
else:
print('Who are you')
| <mask token>
input = 11
real_fdragon50 = 11
if real_fdragon50 == input:
print('Hello!')
else:
print('Who are you')
| '''
#조건문 예제
#fdragon50
#2016
'''
# 주석 : 도움말/덧글 / 미사용(추후 사용가능한) 코드 기록
# 여러줄의 문자열 표현은 ''' ''' 사이에 표현 가능하나 사용은 권장않음
# #으로 시작하는것은 문자열 자체가 아닌.. 무시되는 구간
# 주석은 누가봐도 이해할수있게 / 간결하게
# 더 좋은것은 누가봐도 이해할수 있는 코드임
# 가독성이 좋은 코드를 만들수 있도록..
#조건문 예제
#fdragon50
#2016
input = 11
real_fdragon50 = 11
#real_k8805 = "ab"
if real_fdragon50 == input:
print("Hello!")
#elif real_k8805 == input:
# print("Hello!")
else:
print("Who are you")
| null | [
0,
1,
2,
3
] |
1,385 | deb8ee1d6327a6406244147a819821e8d2b2890e | <mask token>
| <mask token>
class Migration(migrations.Migration):
<mask token>
<mask token>
| <mask token>
class Migration(migrations.Migration):
dependencies = [migrations.swappable_dependency(settings.AUTH_USER_MODEL)]
operations = [migrations.CreateModel(name='Invoice', fields=[('id',
models.AutoField(verbose_name='ID', serialize=False, auto_created=
True, primary_key=True)), ('created_on', models.DateTimeField(
verbose_name='Created on', unique=True, editable=False)), (
'payment_no', models.PositiveIntegerField(verbose_name='Payment on',
unique=True, editable=False)), ('payment_info', models.CharField(
verbose_name='Payment Info', max_length=128, editable=False)), (
'user', models.ForeignKey(editable=False, to=settings.
AUTH_USER_MODEL, verbose_name='User'))], options={'verbose_name':
'invoice', 'verbose_name_plural': 'invoices'}), migrations.
CreateModel(name='Payment', fields=[('id', models.AutoField(
verbose_name='ID', serialize=False, auto_created=True, primary_key=
True)), ('created_on', models.DateTimeField(auto_now_add=True,
verbose_name='Created on')), ('amount', models.DecimalField(
verbose_name='Amount', max_digits=9, decimal_places=2)), (
'payment_no', models.PositiveIntegerField(unique=True, verbose_name
='Payment no')), ('mode', models.PositiveSmallIntegerField(
verbose_name='Mode', choices=[(0, b'REAL'), (1, b'TEST')])), (
'sys_invs_no', models.PositiveIntegerField(verbose_name=
b'LMI_SYS_INVS_NO')), ('sys_trans_no', models.PositiveIntegerField(
verbose_name=b'LMI_SYS_TRANS_NO')), ('sys_trans_date', models.
DateTimeField(verbose_name=b'LMI_SYS_TRANS_DATE')), ('payer_purse',
models.CharField(max_length=13, verbose_name='Payer purse')), (
'payer_wm', models.CharField(max_length=12, verbose_name='Payer WM'
)), ('paymer_number', models.CharField(max_length=30, verbose_name=
'Paymer number', blank=True)), ('paymer_email', models.EmailField(
max_length=254, verbose_name='Paymer email', blank=True)), (
'telepat_phonenumber', models.CharField(max_length=30, verbose_name
='Phone number', blank=True)), ('telepat_orderid', models.CharField
(max_length=30, verbose_name='Order id', blank=True)), (
'payment_creditdays', models.PositiveIntegerField(null=True,
verbose_name='Credit days', blank=True)), ('invoice', models.
OneToOneField(related_name='payment', null=True, blank=True, to=
'webmoney_merchant.Invoice', verbose_name='Invoice'))], options={
'verbose_name': 'payment', 'verbose_name_plural': 'payments'}),
migrations.CreateModel(name='Purse', fields=[('id', models.
AutoField(verbose_name='ID', serialize=False, auto_created=True,
primary_key=True)), ('purse', models.CharField(unique=True,
max_length=13, verbose_name='Purse')), ('purse_type', models.
CharField(default=b'B', unique=True, max_length=1, verbose_name=
'Purse type', choices=[(b'B', b'WMB'), (b'C', b'WMC'), (b'D',
b'WMD'), (b'E', b'WME'), (b'G', b'WMG'), (b'K', b'WMK'), (b'R',
b'WMR'), (b'U', b'WMU'), (b'X', b'WMX'), (b'Y', b'WMY'), (b'Z',
b'WMZ')])), ('secret_key', models.CharField(max_length=50,
verbose_name='Secret key'))], options={'verbose_name': 'purse',
'verbose_name_plural': 'purses'}), migrations.AddField(model_name=
'payment', name='payee_purse', field=models.ForeignKey(related_name
='payments', verbose_name='Payee purse', to='webmoney_merchant.Purse'))
]
| from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [migrations.swappable_dependency(settings.AUTH_USER_MODEL)]
operations = [migrations.CreateModel(name='Invoice', fields=[('id',
models.AutoField(verbose_name='ID', serialize=False, auto_created=
True, primary_key=True)), ('created_on', models.DateTimeField(
verbose_name='Created on', unique=True, editable=False)), (
'payment_no', models.PositiveIntegerField(verbose_name='Payment on',
unique=True, editable=False)), ('payment_info', models.CharField(
verbose_name='Payment Info', max_length=128, editable=False)), (
'user', models.ForeignKey(editable=False, to=settings.
AUTH_USER_MODEL, verbose_name='User'))], options={'verbose_name':
'invoice', 'verbose_name_plural': 'invoices'}), migrations.
CreateModel(name='Payment', fields=[('id', models.AutoField(
verbose_name='ID', serialize=False, auto_created=True, primary_key=
True)), ('created_on', models.DateTimeField(auto_now_add=True,
verbose_name='Created on')), ('amount', models.DecimalField(
verbose_name='Amount', max_digits=9, decimal_places=2)), (
'payment_no', models.PositiveIntegerField(unique=True, verbose_name
='Payment no')), ('mode', models.PositiveSmallIntegerField(
verbose_name='Mode', choices=[(0, b'REAL'), (1, b'TEST')])), (
'sys_invs_no', models.PositiveIntegerField(verbose_name=
b'LMI_SYS_INVS_NO')), ('sys_trans_no', models.PositiveIntegerField(
verbose_name=b'LMI_SYS_TRANS_NO')), ('sys_trans_date', models.
DateTimeField(verbose_name=b'LMI_SYS_TRANS_DATE')), ('payer_purse',
models.CharField(max_length=13, verbose_name='Payer purse')), (
'payer_wm', models.CharField(max_length=12, verbose_name='Payer WM'
)), ('paymer_number', models.CharField(max_length=30, verbose_name=
'Paymer number', blank=True)), ('paymer_email', models.EmailField(
max_length=254, verbose_name='Paymer email', blank=True)), (
'telepat_phonenumber', models.CharField(max_length=30, verbose_name
='Phone number', blank=True)), ('telepat_orderid', models.CharField
(max_length=30, verbose_name='Order id', blank=True)), (
'payment_creditdays', models.PositiveIntegerField(null=True,
verbose_name='Credit days', blank=True)), ('invoice', models.
OneToOneField(related_name='payment', null=True, blank=True, to=
'webmoney_merchant.Invoice', verbose_name='Invoice'))], options={
'verbose_name': 'payment', 'verbose_name_plural': 'payments'}),
migrations.CreateModel(name='Purse', fields=[('id', models.
AutoField(verbose_name='ID', serialize=False, auto_created=True,
primary_key=True)), ('purse', models.CharField(unique=True,
max_length=13, verbose_name='Purse')), ('purse_type', models.
CharField(default=b'B', unique=True, max_length=1, verbose_name=
'Purse type', choices=[(b'B', b'WMB'), (b'C', b'WMC'), (b'D',
b'WMD'), (b'E', b'WME'), (b'G', b'WMG'), (b'K', b'WMK'), (b'R',
b'WMR'), (b'U', b'WMU'), (b'X', b'WMX'), (b'Y', b'WMY'), (b'Z',
b'WMZ')])), ('secret_key', models.CharField(max_length=50,
verbose_name='Secret key'))], options={'verbose_name': 'purse',
'verbose_name_plural': 'purses'}), migrations.AddField(model_name=
'payment', name='payee_purse', field=models.ForeignKey(related_name
='payments', verbose_name='Payee purse', to='webmoney_merchant.Purse'))
]
| # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Invoice',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('created_on', models.DateTimeField(verbose_name='Created on', unique=True, editable=False)),
('payment_no', models.PositiveIntegerField(verbose_name='Payment on', unique=True, editable=False)),
('payment_info', models.CharField(verbose_name='Payment Info', max_length=128, editable=False)),
('user', models.ForeignKey(editable=False, to=settings.AUTH_USER_MODEL, verbose_name='User')),
],
options={
'verbose_name': 'invoice',
'verbose_name_plural': 'invoices',
},
),
migrations.CreateModel(
name='Payment',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('created_on', models.DateTimeField(auto_now_add=True, verbose_name='Created on')),
('amount', models.DecimalField(verbose_name='Amount', max_digits=9, decimal_places=2)),
('payment_no', models.PositiveIntegerField(unique=True, verbose_name='Payment no')),
('mode', models.PositiveSmallIntegerField(verbose_name='Mode', choices=[(0, b'REAL'), (1, b'TEST')])),
('sys_invs_no', models.PositiveIntegerField(verbose_name=b'LMI_SYS_INVS_NO')),
('sys_trans_no', models.PositiveIntegerField(verbose_name=b'LMI_SYS_TRANS_NO')),
('sys_trans_date', models.DateTimeField(verbose_name=b'LMI_SYS_TRANS_DATE')),
('payer_purse', models.CharField(max_length=13, verbose_name='Payer purse')),
('payer_wm', models.CharField(max_length=12, verbose_name='Payer WM')),
('paymer_number', models.CharField(max_length=30, verbose_name='Paymer number', blank=True)),
('paymer_email', models.EmailField(max_length=254, verbose_name='Paymer email', blank=True)),
('telepat_phonenumber', models.CharField(max_length=30, verbose_name='Phone number', blank=True)),
('telepat_orderid', models.CharField(max_length=30, verbose_name='Order id', blank=True)),
('payment_creditdays', models.PositiveIntegerField(null=True, verbose_name='Credit days', blank=True)),
('invoice', models.OneToOneField(related_name='payment', null=True, blank=True, to='webmoney_merchant.Invoice', verbose_name='Invoice')),
],
options={
'verbose_name': 'payment',
'verbose_name_plural': 'payments',
},
),
migrations.CreateModel(
name='Purse',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('purse', models.CharField(unique=True, max_length=13, verbose_name='Purse')),
('purse_type', models.CharField(default=b'B', unique=True, max_length=1, verbose_name='Purse type', choices=[(b'B', b'WMB'), (b'C', b'WMC'), (b'D', b'WMD'), (b'E', b'WME'), (b'G', b'WMG'), (b'K', b'WMK'), (b'R', b'WMR'), (b'U', b'WMU'), (b'X', b'WMX'), (b'Y', b'WMY'), (b'Z', b'WMZ')])),
('secret_key', models.CharField(max_length=50, verbose_name='Secret key')),
],
options={
'verbose_name': 'purse',
'verbose_name_plural': 'purses',
},
),
migrations.AddField(
model_name='payment',
name='payee_purse',
field=models.ForeignKey(related_name='payments', verbose_name='Payee purse', to='webmoney_merchant.Purse'),
),
]
| [
0,
1,
2,
3,
4
] |
1,386 | d64140466e62b78506d0f200f451649023697a3b | <mask token>
def deps_remote():
for step in INSTALL_STEPS:
run(step)
| <mask token>
def deps_local():
for step in INSTALL_STEPS:
local(step)
def deps_remote():
for step in INSTALL_STEPS:
run(step)
| <mask token>
INSTALL_STEPS = [
'yes | sudo apt-get install libmysqlclient-dev\t python-dev python-mysqldb python-virtualenv'
, 'virtualenv --no-site-packages env',
'. env/bin/activate;pip install -r requirements.txt']
def deps_local():
for step in INSTALL_STEPS:
local(step)
def deps_remote():
for step in INSTALL_STEPS:
run(step)
| from fabric.api import local, run
INSTALL_STEPS = [
'yes | sudo apt-get install libmysqlclient-dev\t python-dev python-mysqldb python-virtualenv'
, 'virtualenv --no-site-packages env',
'. env/bin/activate;pip install -r requirements.txt']
def deps_local():
for step in INSTALL_STEPS:
local(step)
def deps_remote():
for step in INSTALL_STEPS:
run(step)
| from fabric.api import local,run
INSTALL_STEPS = ['yes | sudo apt-get install libmysqlclient-dev python-dev python-mysqldb python-virtualenv',
'virtualenv --no-site-packages env',
'. env/bin/activate;pip install -r requirements.txt']
def deps_local():
for step in INSTALL_STEPS:
local(step)
def deps_remote():
for step in INSTALL_STEPS:
run(step)
| [
1,
2,
3,
4,
5
] |
1,387 | 8356bc92a3a8b561d55bf5f2d9aeb0da89b730ca | # -*- coding: utf-8 -*-
from matplotlib import pyplot as plt
from matplotlib import colors
import numpy as np
import sys
max_value = int(sys.argv[1])
file1 = open(sys.argv[2])
file2 = open(sys.argv[3])
file3 = open(sys.argv[4])
histogram = np.zeros(max_value, dtype=int).tolist()
highest_value = 0.0
sample_size = 0.5
sample = []
for i,line in enumerate(file1.readlines()):
values = line.strip().split(",")
for j, value in enumerate(values):
if(j == 0):
histogram[int(value.split("[")[1])] += 1
elif(j == len(values) - 1):
histogram[int(value.split("]")[0])] += 1
else:
histogram[int(value)] += 1
for i,line in enumerate(file2.readlines()):
values = line.strip().split(",")
for j, value in enumerate(values):
if(j == 0):
histogram[int(value.split("[")[1])] += 1
elif(j == len(values) - 1):
histogram[int(value.split("]")[0])] += 1
else:
histogram[int(value)] += 1
for i,line in enumerate(file3.readlines()):
values = line.strip().split(",")
for j, value in enumerate(values):
if(j == 0):
histogram[int(value.split("[")[1])] += 1
elif(j == len(values) - 1):
histogram[int(value.split("]")[0])] += 1
else:
histogram[int(value)] += 1
for i in range(len(histogram)):
histogram[i] = histogram[i] / 3.0
for value in histogram:
if(value > highest_value):
highest_value = float(value)
print highest_value
for i,value in enumerate(histogram):
if(value >= (highest_value - (highest_value * sample_size))):
sample.append(i)
sample_file = open(sys.argv[1].split("_mean.")[0] + ".sample", "w")
for value in sample:
sample_file.write(str(value) + " ")
sample_file.close()
objects = []
for i in range(max_value):
objects.append(str(i))
y_pos = np.arange(len(objects))
ibar = plt.bar(y_pos, histogram, align='center', alpha=0.5)
for i,element in enumerate(ibar):
norm = colors.Normalize(0.0, 1.0)
color = plt.cm.winter(norm(histogram[i]/highest_value))
element.set_color(color)
#plt.xticks(y_pos, objects)
plt.xlabel('Individuo')
plt.ylabel('Vezes Selecionado')
plt.title('GASIR - Genetic Algorithm for SIR Model')
plt.savefig(sys.argv[1].split(".")[0] + "_mean.svg", format="svg")
#plt.show()
| null | null | null | null | [
0
] |
1,388 | aaebd9eba8a5c51c64baaf60224720b87a6364e1 | <mask token>
@print_run_time
def dbscan(train_x, train_y):
db = cluster.DBSCAN(eps=0.2, min_samples=3)
db.fit(train_x)
fmi = metrics.fowlkes_mallows_score(train_y, db.labels_)
return fmi
<mask token>
| <mask token>
def sort_data(data_list):
x_list = []
y_list = []
for data in data_list:
x_list.append(data[0])
y_list.append(data[1])
x_array = np.array(x_list)
y_array = np.array(y_list)
return x_array, y_array
def print_run_time(func):
def wrapper(*args, **kw):
local_time = time.time()
output = func(*args, **kw)
time_cost = time.time() - local_time
print('{} run time is {}'.format(func.__name__, time_cost))
with open('./cluster/tmp.csv', 'a+') as csvfile:
writer = csv.writer(csvfile)
writer.writerow([func.__name__, output, time_cost])
return output, time_cost
return wrapper
@print_run_time
def kmeans(train_x, train_y, num_cluster=5):
km_cluster = cluster.KMeans(n_clusters=num_cluster)
km_cluster.fit(train_x)
fmi = metrics.fowlkes_mallows_score(train_y, km_cluster.labels_)
return fmi
@print_run_time
def dbscan(train_x, train_y):
db = cluster.DBSCAN(eps=0.2, min_samples=3)
db.fit(train_x)
fmi = metrics.fowlkes_mallows_score(train_y, db.labels_)
return fmi
@print_run_time
def AC(train_x, train_y, num_cluster=5):
ac = cluster.AgglomerativeClustering(n_clusters=num_cluster)
ac.fit(train_x)
predicted_labels = ac.fit_predict(train_x)
fmi = metrics.fowlkes_mallows_score(train_y, ac.labels_)
return fmi
@print_run_time
@print_run_time
def S_C(train_x, train_y, num_cluster=5):
sc = cluster.SpectralClustering(n_clusters=num_cluster).fit(train_x)
fmi = metrics.fowlkes_mallows_score(train_y, sc.labels_)
return fmi
@print_run_time
def MBK(train_x, train_y, num_cluster=5):
mbk = cluster.MiniBatchKMeans(n_clusters=num_cluster).fit(train_x)
fmi = metrics.fowlkes_mallows_score(train_y, mbk.labels_)
return fmi
<mask token>
| <mask token>
sys.path.append('./feature/')
<mask token>
def sort_data(data_list):
x_list = []
y_list = []
for data in data_list:
x_list.append(data[0])
y_list.append(data[1])
x_array = np.array(x_list)
y_array = np.array(y_list)
return x_array, y_array
def print_run_time(func):
def wrapper(*args, **kw):
local_time = time.time()
output = func(*args, **kw)
time_cost = time.time() - local_time
print('{} run time is {}'.format(func.__name__, time_cost))
with open('./cluster/tmp.csv', 'a+') as csvfile:
writer = csv.writer(csvfile)
writer.writerow([func.__name__, output, time_cost])
return output, time_cost
return wrapper
@print_run_time
def kmeans(train_x, train_y, num_cluster=5):
km_cluster = cluster.KMeans(n_clusters=num_cluster)
km_cluster.fit(train_x)
fmi = metrics.fowlkes_mallows_score(train_y, km_cluster.labels_)
return fmi
@print_run_time
def dbscan(train_x, train_y):
db = cluster.DBSCAN(eps=0.2, min_samples=3)
db.fit(train_x)
fmi = metrics.fowlkes_mallows_score(train_y, db.labels_)
return fmi
@print_run_time
def AC(train_x, train_y, num_cluster=5):
ac = cluster.AgglomerativeClustering(n_clusters=num_cluster)
ac.fit(train_x)
predicted_labels = ac.fit_predict(train_x)
fmi = metrics.fowlkes_mallows_score(train_y, ac.labels_)
return fmi
@print_run_time
@print_run_time
def S_C(train_x, train_y, num_cluster=5):
sc = cluster.SpectralClustering(n_clusters=num_cluster).fit(train_x)
fmi = metrics.fowlkes_mallows_score(train_y, sc.labels_)
return fmi
@print_run_time
def MBK(train_x, train_y, num_cluster=5):
mbk = cluster.MiniBatchKMeans(n_clusters=num_cluster).fit(train_x)
fmi = metrics.fowlkes_mallows_score(train_y, mbk.labels_)
return fmi
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='')
parser.add_argument('-d', '--dataset', type=str, default='cit-HepPh',
help='')
parser.add_argument('-t', '--task', type=int, default=0, help='')
parser.add_argument('-f', '--feature_type', type=int, default=0, help='')
parser.add_argument('-l', '--label_type', type=int, default=2, help='')
parser.add_argument('-s', '--shuffle', type=bool, default=True, help='')
parser.add_argument('-p', '--proportion', type=tuple, default=(0.7, 0.3
), help='')
parser.add_argument('-m', '--method', type=str, default='all', choices=
['kmeans', 'dbscan', 'AC', 'AP', 'meanshift', 'S_C', 'FA', 'MBK',
'all'], help='')
parser.add_argument('-sp', '--save_path', type=str, default=
'./cluster/result.csv', help='')
args = parser.parse_args()
training_set, validation_set, test_set = fe.get_datasets(dataset=args.
dataset, task=args.task, feature_type=args.feature_type, label_type
=args.label_type, shuffle=args.shuffle, proportion=args.proportion)
train_x, train_y = sort_data(training_set)
val_x, val_y = sort_data(validation_set)
with open('./cluster/tmp.csv', 'w') as csvfile:
writer = csv.writer(csvfile)
writer.writerow(['method', 'index', 'time_cost'])
if args.method == 'kmeans':
acc = kmeans(train_x, train_y, len(np.unique(train_y)))
elif args.method == 'dbscan':
acc = dbscan(train_x, train_y)
elif args.method == 'AC':
acc = AC(train_x, train_y, len(np.unique(train_y)))
elif args.method == 'AP':
acc = AP(train_x, train_y)
elif args.method == 'meanshift':
acc = meanshift(train_x, train_y)
elif args.method == 'S_C':
acc = S_C(train_x, train_y, len(np.unique(train_y)))
elif args.method == 'FA':
acc = FA(train_x, train_y, len(np.unique(train_y)))
elif args.method == 'MBK':
acc = MBK(train_x, train_y, len(np.unique(train_y)))
elif args.method == 'all':
acc_k = kmeans(train_x, train_y, len(np.unique(train_y)))
acc_ac = AC(train_x, train_y, len(np.unique(train_y)))
acc_sc = S_C(train_x, train_y, len(np.unique(train_y)))
acc_mbk = MBK(train_x, train_y, len(np.unique(train_y)))
acc_db = dbscan(train_x, train_y)
tmp_path = os.path.abspath('./cluster/tmp.csv')
os.rename('./cluster/tmp.csv', args.save_path)
| import numpy as np
import sklearn.cluster as cluster
import os
import time
import argparse
import csv
from sklearn import metrics
import sys
sys.path.append('./feature/')
import feature_extraction as fe
def sort_data(data_list):
x_list = []
y_list = []
for data in data_list:
x_list.append(data[0])
y_list.append(data[1])
x_array = np.array(x_list)
y_array = np.array(y_list)
return x_array, y_array
def print_run_time(func):
def wrapper(*args, **kw):
local_time = time.time()
output = func(*args, **kw)
time_cost = time.time() - local_time
print('{} run time is {}'.format(func.__name__, time_cost))
with open('./cluster/tmp.csv', 'a+') as csvfile:
writer = csv.writer(csvfile)
writer.writerow([func.__name__, output, time_cost])
return output, time_cost
return wrapper
@print_run_time
def kmeans(train_x, train_y, num_cluster=5):
km_cluster = cluster.KMeans(n_clusters=num_cluster)
km_cluster.fit(train_x)
fmi = metrics.fowlkes_mallows_score(train_y, km_cluster.labels_)
return fmi
@print_run_time
def dbscan(train_x, train_y):
db = cluster.DBSCAN(eps=0.2, min_samples=3)
db.fit(train_x)
fmi = metrics.fowlkes_mallows_score(train_y, db.labels_)
return fmi
@print_run_time
def AC(train_x, train_y, num_cluster=5):
ac = cluster.AgglomerativeClustering(n_clusters=num_cluster)
ac.fit(train_x)
predicted_labels = ac.fit_predict(train_x)
fmi = metrics.fowlkes_mallows_score(train_y, ac.labels_)
return fmi
@print_run_time
@print_run_time
def S_C(train_x, train_y, num_cluster=5):
sc = cluster.SpectralClustering(n_clusters=num_cluster).fit(train_x)
fmi = metrics.fowlkes_mallows_score(train_y, sc.labels_)
return fmi
@print_run_time
def MBK(train_x, train_y, num_cluster=5):
mbk = cluster.MiniBatchKMeans(n_clusters=num_cluster).fit(train_x)
fmi = metrics.fowlkes_mallows_score(train_y, mbk.labels_)
return fmi
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='')
parser.add_argument('-d', '--dataset', type=str, default='cit-HepPh',
help='')
parser.add_argument('-t', '--task', type=int, default=0, help='')
parser.add_argument('-f', '--feature_type', type=int, default=0, help='')
parser.add_argument('-l', '--label_type', type=int, default=2, help='')
parser.add_argument('-s', '--shuffle', type=bool, default=True, help='')
parser.add_argument('-p', '--proportion', type=tuple, default=(0.7, 0.3
), help='')
parser.add_argument('-m', '--method', type=str, default='all', choices=
['kmeans', 'dbscan', 'AC', 'AP', 'meanshift', 'S_C', 'FA', 'MBK',
'all'], help='')
parser.add_argument('-sp', '--save_path', type=str, default=
'./cluster/result.csv', help='')
args = parser.parse_args()
training_set, validation_set, test_set = fe.get_datasets(dataset=args.
dataset, task=args.task, feature_type=args.feature_type, label_type
=args.label_type, shuffle=args.shuffle, proportion=args.proportion)
train_x, train_y = sort_data(training_set)
val_x, val_y = sort_data(validation_set)
with open('./cluster/tmp.csv', 'w') as csvfile:
writer = csv.writer(csvfile)
writer.writerow(['method', 'index', 'time_cost'])
if args.method == 'kmeans':
acc = kmeans(train_x, train_y, len(np.unique(train_y)))
elif args.method == 'dbscan':
acc = dbscan(train_x, train_y)
elif args.method == 'AC':
acc = AC(train_x, train_y, len(np.unique(train_y)))
elif args.method == 'AP':
acc = AP(train_x, train_y)
elif args.method == 'meanshift':
acc = meanshift(train_x, train_y)
elif args.method == 'S_C':
acc = S_C(train_x, train_y, len(np.unique(train_y)))
elif args.method == 'FA':
acc = FA(train_x, train_y, len(np.unique(train_y)))
elif args.method == 'MBK':
acc = MBK(train_x, train_y, len(np.unique(train_y)))
elif args.method == 'all':
acc_k = kmeans(train_x, train_y, len(np.unique(train_y)))
acc_ac = AC(train_x, train_y, len(np.unique(train_y)))
acc_sc = S_C(train_x, train_y, len(np.unique(train_y)))
acc_mbk = MBK(train_x, train_y, len(np.unique(train_y)))
acc_db = dbscan(train_x, train_y)
tmp_path = os.path.abspath('./cluster/tmp.csv')
os.rename('./cluster/tmp.csv', args.save_path)
| #聚类算法:
# kmeans
# 密度聚类:DBSCAN
# 层次聚类:AgglomerativeClustering
# 谱聚类:SpectralClustering
# 分批kmeans:MiniBatchKMeans
# 评价指标:FMI(Fowlkes–Mallows index)
# 排除:特征聚类:FeatureAgglomeration# 亲和传播聚类(AP)聚类:affinity_propagation# 偏移均值向量:MeanShift
import numpy as np
import sklearn.cluster as cluster
import os
import time
import argparse
import csv
from sklearn import metrics
import sys
sys.path.append('./feature/')
import feature_extraction as fe
def sort_data(data_list):
x_list=[]
y_list=[]
for data in data_list:
x_list.append(data[0])
y_list.append(data[1])
x_array=np.array(x_list)
y_array=np.array(y_list)
return x_array,y_array
def print_run_time(func):
def wrapper(*args, **kw):
local_time = time.time()
output=func(*args, **kw)
time_cost=time.time() - local_time
print('{} run time is {}'.format(func.__name__,time_cost))
with open("./cluster/tmp.csv","a+") as csvfile:
writer = csv.writer(csvfile)
writer.writerow([func.__name__,output,time_cost])
return output,time_cost
return wrapper
@print_run_time
def kmeans (train_x,train_y,num_cluster = 5):
km_cluster = cluster.KMeans(n_clusters=num_cluster)
km_cluster.fit(train_x)
#FMI指数:与真实值对比
fmi = metrics.fowlkes_mallows_score(train_y,km_cluster.labels_)
# print("kmeans的FMI评价分值为:%f"%(fmi))
return fmi
@print_run_time
def dbscan(train_x,train_y):
# 密度聚类
db = cluster.DBSCAN(eps=0.2,min_samples=3)
db.fit(train_x)
#FMI指数:与真实值对比
fmi = metrics.fowlkes_mallows_score(train_y,db.labels_)
return fmi
@print_run_time
def AC(train_x,train_y,num_cluster = 5):
# 层次聚类
ac = cluster.AgglomerativeClustering(n_clusters=num_cluster)
ac.fit(train_x)
predicted_labels = ac.fit_predict(train_x)
# #计算ARI指数
# ARI = (metrics.adjusted_rand_score(train_y, predicted_labels))
#FMI指数:与真实值对比
fmi = metrics.fowlkes_mallows_score(train_y,ac.labels_)
return fmi
@print_run_time
# def AP(train_x,train_y):
# #亲和传播聚类(AP)聚类
# ap = cluster.affinity_propagation(preference=-50).fit(train_x)
# #FMI指数:与真实值对比
# fmi = metrics.fowlkes_mallows_score(train_y,ap.labels_)
# return fmi
# @print_run_time
# def meanshift(train_x,train_y):
# #偏移均值向量(meanshift)
# ms = cluster.MeanShift(bandwidth=2).fit(train_x)
# #FMI指数:与真实值对比
# fmi = metrics.fowlkes_mallows_score(train_y,ms.labels_)
# return fmi
@print_run_time
def S_C(train_x,train_y,num_cluster = 5):
#谱聚类
sc = cluster.SpectralClustering(n_clusters=num_cluster).fit(train_x)
#FMI指数:与真实值对比
fmi = metrics.fowlkes_mallows_score(train_y,sc.labels_)
return fmi
# @print_run_time
# def FA(train_x,train_y,num_cluster = 5):
# #特征聚类
# fa = cluster.FeatureAgglomeration(n_clusters=num_cluster).fit(train_x)
# #FMI指数:与真实值对比
# fmi = metrics.fowlkes_mallows_score(train_y,fa.labels_)
# return fmi
@print_run_time
def MBK(train_x,train_y,num_cluster = 5):
#分批kmeans
mbk = cluster.MiniBatchKMeans(n_clusters=num_cluster).fit(train_x)
#FMI指数:与真实值对比
fmi = metrics.fowlkes_mallows_score(train_y,mbk.labels_)
return fmi
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="")
parser.add_argument("-d", "--dataset", type=str, default="cit-HepPh", help="")
parser.add_argument("-t", "--task", type=int, default=0, help="")
parser.add_argument("-f", "--feature_type", type=int, default=0, help="")
parser.add_argument("-l", "--label_type", type=int, default=2, help="")
parser.add_argument("-s", "--shuffle", type=bool, default=True, help="")
parser.add_argument("-p", "--proportion", type=tuple, default=(0.7, 0.3), help="")
parser.add_argument("-m", "--method", type=str, default='all',choices=['kmeans','dbscan','AC','AP','meanshift','S_C','FA','MBK','all'], help="")
parser.add_argument("-sp", "--save_path", type=str, default='./cluster/result.csv', help="")
args = parser.parse_args()
training_set, validation_set, test_set = fe.get_datasets(dataset=args.dataset, task=args.task,
feature_type=args.feature_type, label_type=args.label_type,
shuffle=args.shuffle, proportion=args.proportion)
train_x,train_y=sort_data(training_set)
val_x,val_y=sort_data(validation_set)
with open("./cluster/tmp.csv","w") as csvfile:
writer = csv.writer(csvfile)
writer.writerow(['method','index','time_cost'])
if args.method=='kmeans':
acc = kmeans(train_x,train_y,len(np.unique(train_y)))
elif args.method=='dbscan':
acc = dbscan(train_x,train_y)
elif args.method=='AC':
acc = AC(train_x,train_y,len(np.unique(train_y)))
elif args.method=='AP':
acc = AP(train_x,train_y)
elif args.method=='meanshift':
acc = meanshift(train_x,train_y)
elif args.method=='S_C':
acc = S_C(train_x,train_y,len(np.unique(train_y)))
elif args.method=='FA':
acc = FA(train_x,train_y,len(np.unique(train_y)))
elif args.method=='MBK':
acc = MBK(train_x,train_y,len(np.unique(train_y)))
elif args.method=='all':
acc_k = kmeans(train_x,train_y,len(np.unique(train_y)))
acc_ac = AC(train_x,train_y,len(np.unique(train_y)))
acc_sc = S_C(train_x,train_y,len(np.unique(train_y)))
# acc_fa = FA(train_x,train_y,len(np.unique(train_y))) ValueError: Found input variables with inconsistent numbers of samples: [7414, 24684]
acc_mbk = MBK(train_x,train_y,len(np.unique(train_y)))
acc_db = dbscan(train_x,train_y)
# acc_ap = AP(train_x,train_y) affinity_propagation() missing 1 required positional argument: 'S'
# acc_ms = meanshift(train_x,train_y) timesout
tmp_path=os.path.abspath('./cluster/tmp.csv')
os.rename('./cluster/tmp.csv',args.save_path)
| [
1,
7,
8,
9,
10
] |
1,389 | eb81b0e41743e1785b82e88f6a618dc91eba73e5 | <mask token>
def process_frame(img):
global vid_data
img = cv2.resize(img, (w, h))
cv2.imshow('Frame', img)
cv2.waitKey(1)
vid_data = np.append(vid_data, img, axis=0)
<mask token>
| <mask token>
def process_frame(img):
global vid_data
img = cv2.resize(img, (w, h))
cv2.imshow('Frame', img)
cv2.waitKey(1)
vid_data = np.append(vid_data, img, axis=0)
<mask token>
while vid.isOpened():
ret, frame = vid.read()
if ret:
process_frame(frame)
n = n + 1
"""
cv2.imshow('Frame', frame)
if cv2.waitKey(25) & 0xFF == ord('q'):
break
"""
else:
break
vid.release()
cv2.destroyAllWindows()
print(vid_data.shape)
<mask token>
print(vid_data.shape)
np.savetxt('trackmania_vid_data2D_360x640.csv', vid_data, delimiter=',')
| <mask token>
vid = cv2.VideoCapture('trackmania_test_vid.mp4')
w = 1280 // 2
h = 720 // 2
vid_data = np.empty((360, 640, 3))
def process_frame(img):
global vid_data
img = cv2.resize(img, (w, h))
cv2.imshow('Frame', img)
cv2.waitKey(1)
vid_data = np.append(vid_data, img, axis=0)
n = 0
while vid.isOpened():
ret, frame = vid.read()
if ret:
process_frame(frame)
n = n + 1
"""
cv2.imshow('Frame', frame)
if cv2.waitKey(25) & 0xFF == ord('q'):
break
"""
else:
break
vid.release()
cv2.destroyAllWindows()
print(vid_data.shape)
vid_data = vid_data.reshape((vid_data.shape[0], -1))
print(vid_data.shape)
np.savetxt('trackmania_vid_data2D_360x640.csv', vid_data, delimiter=',')
| import numpy as np
import cv2
vid = cv2.VideoCapture('trackmania_test_vid.mp4')
w = 1280 // 2
h = 720 // 2
vid_data = np.empty((360, 640, 3))
def process_frame(img):
global vid_data
img = cv2.resize(img, (w, h))
cv2.imshow('Frame', img)
cv2.waitKey(1)
vid_data = np.append(vid_data, img, axis=0)
n = 0
while vid.isOpened():
ret, frame = vid.read()
if ret:
process_frame(frame)
n = n + 1
"""
cv2.imshow('Frame', frame)
if cv2.waitKey(25) & 0xFF == ord('q'):
break
"""
else:
break
vid.release()
cv2.destroyAllWindows()
print(vid_data.shape)
vid_data = vid_data.reshape((vid_data.shape[0], -1))
print(vid_data.shape)
np.savetxt('trackmania_vid_data2D_360x640.csv', vid_data, delimiter=',')
| #train a neural network from input video feed
import numpy as np
import cv2
vid = cv2.VideoCapture('trackmania_test_vid.mp4')
w = 1280//2
h = 720//2
vid_data = np.empty((360, 640, 3))
#print(vid_data.shape)
def process_frame(img):
global vid_data
img = cv2.resize(img, (w, h))
cv2.imshow('Frame', img)
cv2.waitKey(1)
vid_data = np.append(vid_data, img, axis=0)
#print(img.shape)
# Read until video is completed
n = 0
while vid.isOpened():
# Capture frame-by-frame
ret, frame = vid.read()
if ret:
#print("frame = {}".format(frame.shape))
process_frame(frame)
n = n + 1
'''
cv2.imshow('Frame', frame)
if cv2.waitKey(25) & 0xFF == ord('q'):
break
'''
else:
break
# When everything done, release the video capture object
vid.release()
# Closes all the frames
cv2.destroyAllWindows()
print(vid_data.shape)
vid_data = vid_data.reshape((vid_data.shape[0], -1))
print(vid_data.shape)
# n = 1340
#print('No. of frames = {}'.format(n))
np.savetxt("trackmania_vid_data2D_360x640.csv", vid_data, delimiter=",")
#50580,320,3 ---> 281,180,320,3
#101160,640,3 ---> 281,360,640,3
| [
1,
2,
3,
4,
5
] |
1,390 | b2d3ebe4b1ce8f6f0fde8495fb90542080b810ce | <mask token>
class _TimeIT(object):
<mask token>
def __init__(self, func, args_list, kwargs_dict, setup_line_list,
check_too_fast, run_sec, name, perf_counter_reference_time):
""" Constructor. See class doc string.
"""
self.func = func
self.orig_func_name = getattr(self.func, '__name__', self.func)
self.args_list = args_list.copy()
self.kwargs_dict = kwargs_dict.copy()
self.setup_line_list = setup_line_list
self.check_too_fast = check_too_fast
self.run_sec = run_sec
self.name = name
self.perf_counter_reference_time = perf_counter_reference_time
if callable(self.func):
_ns = {}
self.src = self.__get_final_inner_function()
if (self.run_sec is not None and self.run_sec != -1 and self.
run_sec < 0.1):
raise Err('_TimeIT.__init__()',
'run_sec: <{:.1f}> must be at least <0.1 second> or <-1 to run it once> or <None to print the `func code block`>'
.format(self.run_sec))
_code = compile(self.src, 'benchmarkit-src', 'exec')
exec(_code, globals(), _ns)
self.inner = _ns['inner']
else:
raise ValueError('<func>: is not a `callable` type: <{}>'.
format(self.func))
def benchmark_it(self, with_gc):
""" Returns timing result for the `func code block`
.. note::
By default, timeit() temporarily turns off garbage collection during the timing.
The advantage of this approach is that it makes independent timings more comparable.
This disadvantage is that GC may be an important component of the performance of the function being measured.
If so, GC can be re-enabled as the with_gc=True
Returns:
dict: benchmark result: dict keys: loops, all_loops_time_sec, avg_loop_sec, best_loop_sec, worst_loop_sec
- loops: how many times the `func code block` was executed (looped over)
- all_loops_time_sec: the total time in seconds for all loops:
only loop times are counted not other times: depending on the `func code block` this can be about 25% of the total runtime
- avg_loop_sec: average loop time in seconds: this should be mostly used as measure time:
if there where only a very low number of loops - one might want to increase the `run_sec` and rerun it
- two_best_loop_sec: time in seconds for the two fastest of all loops
- two_worst_loop_sec: time in seconds for the two slowest of all loops
Raises:
SpeedIT.Err: example if `run_sec` is not <-1 run once>, <None only print> but less than 0.1
"""
if self.run_sec is None:
benchmark_result = self.src
elif with_gc:
gc_old = gc.isenabled()
gc.enable()
try:
benchmark_result = self.inner()
benchmark_result['name'] = self.name
finally:
if not gc_old:
gc.disable()
else:
gc_old = gc.isenabled()
gc.disable()
try:
benchmark_result = self.inner()
benchmark_result['name'] = self.name
finally:
if gc_old:
gc.enable()
return benchmark_result
def __get_final_inner_function(self):
""" Returns a string of an generated inner function with the code body from: func
Tries to generate a function with the 'code-body' from the passed on func as well as the args_list, kwargs_dict
.. warnings:: the `func` function may not have any return statements: but any inner function can have one
Returns:
str: generated inner function
Raises:
SpeedIT.Err: example if an indentation is encountered which is not a multiple of the first found indentation
"""
has_block_speedit = False
_start_block_stripped_line = ''
start_tag_block_speedit = 0
end_tag_block_speedit = 0
func_line, lnum = getsourcelines(self.func)
sig = signature(self.func)
indent_ = None
func_def_indent = len(func_line[0]) - len(func_line[0].lstrip())
func_body = func_line[1:]
search_docstring = False
first_none_docstring_idx = 0
for idx, line_orig in enumerate(func_body):
rstripped_line = line_orig.rstrip()
if rstripped_line:
stripped_codeline = rstripped_line.lstrip()
if stripped_codeline[0] == '#':
if not ('::SPEEDIT::' in stripped_codeline or
'**SPEEDIT**' in stripped_codeline):
continue
if search_docstring:
if stripped_codeline[0:3] == '"""' or stripped_codeline[0:3
] == "'''":
search_docstring = False
continue
else:
codebody_indent = len(rstripped_line) - len(
stripped_codeline)
indent_ = codebody_indent - func_def_indent
if stripped_codeline[0:3] == '"""' or stripped_codeline[0:3
] == "'''":
search_docstring = True
continue
first_none_docstring_idx = idx
break
adjusted_func_code_line = []
for line_orig in func_body[first_none_docstring_idx:]:
if line_orig:
rstrip_line = line_orig.rstrip()
if rstrip_line:
stripped_line = rstrip_line.lstrip()
if stripped_line[0] == '#':
if ('::SPEEDIT::' in stripped_line or '**SPEEDIT**' in
stripped_line):
has_block_speedit = True
else:
continue
line_indentation = len(rstrip_line) - len(stripped_line)
if line_indentation % indent_ != 0:
raise Err('_TimeIT.get_final_inner_function',
"""<{}>: ERROR: indentation must be a multiple of the second function line: <{}>
seems we encountered a wrong indented line: line_indentation: <{}>
{}"""
.format(self.orig_func_name, indent_,
line_indentation, line_orig))
line_indentation_level = int((line_indentation -
func_def_indent) / indent_) + 1
if has_block_speedit:
if '::SPEEDIT::' in stripped_line:
if (start_tag_block_speedit !=
end_tag_block_speedit):
raise Err('_TimeIT.get_final_inner_function',
"""<{}>: FUNCTION INNER TAG ERROR: has_block_speedit: <{}>
Expected an END-TAG <**SPEEDIT**>:
{}"""
.format(self.orig_func_name,
has_block_speedit, line_orig))
adjusted_func_code_line.append(' ' *
line_indentation_level +
'_speeit_prefix__stmt_inner_start = _speeit_prefix__perf_counter() # ::SPEEDIT::START internally added'
)
start_tag_block_speedit += 1
_start_block_stripped_line = stripped_line
elif '**SPEEDIT**' in stripped_line:
if (end_tag_block_speedit !=
start_tag_block_speedit - 1):
raise Err('_TimeIT.get_final_inner_function',
"""<{}>: FUNCTION INNER TAG ERROR: has_block_speedit: <{}>
Expected an START-TAG <::SPEEDIT::>:
{}"""
.format(self.orig_func_name,
has_block_speedit, line_orig))
adjusted_func_code_line.append(' ' *
line_indentation_level +
'_speeit_prefix__result_time += _speeit_prefix__perf_counter() - _speeit_prefix__stmt_inner_start # **SPEEDIT**END internally added'
)
if self.check_too_fast:
adjusted_func_code_line.append(' ' *
line_indentation_level +
'if _speeit_prefix__result_time < _speeit_prefix__check_reference_time: raise Exception("in function: <{}>'
.format(self.orig_func_name) +
' code block: too fast to measure:\\n code part: _speeit_prefix__result_time: <{:.11f}> 2 times _smallest_perf_counter_time: <{:.11f}>\\n '
+ ' _start_block_stripped_line: <{}>'
.format(_start_block_stripped_line) +
'".format(_speeit_prefix__result_time, _speeit_prefix__check_reference_time)) # SPEEDIT: internally added'
)
end_tag_block_speedit += 1
else:
adjusted_func_code_line.append(' ' *
line_indentation_level + stripped_line)
else:
adjusted_func_code_line.append(' ' *
line_indentation_level + stripped_line)
if has_block_speedit:
if start_tag_block_speedit != end_tag_block_speedit:
adjusted_func_code_line.append(
' _speeit_prefix__result_time += _speeit_prefix__perf_counter() - _speeit_prefix__stmt_inner_start # **SPEEDIT**END internally added'
)
if self.check_too_fast:
adjusted_func_code_line.append(
' if _speeit_prefix__result_time < _speeit_prefix__check_reference_time: raise Exception("in function: <{}>'
.format(self.orig_func_name) +
' code block: too fast to measure:\\n code part: _speeit_prefix__result_time: <{:.11f}> 2 times _smallest_perf_counter_time: <{:.11f}>\\n '
+ ' _start_block_stripped_line: <{}>'.format(
_start_block_stripped_line) +
'".format(_speeit_prefix__result_time, _speeit_prefix__check_reference_time)) # SPEEDIT: internally added'
)
else:
adjusted_func_code_line.insert(0,
' _speeit_prefix__stmt_inner_start = _speeit_prefix__perf_counter() # ::SPEEDIT::START internally added'
)
adjusted_func_code_line.append(
' _speeit_prefix__result_time += _speeit_prefix__perf_counter() - _speeit_prefix__stmt_inner_start # **SPEEDIT**END internally added'
)
if self.check_too_fast:
adjusted_func_code_line.append(
' if _speeit_prefix__result_time < _speeit_prefix__check_reference_time: raise Exception("in function: <{}>'
.format(self.orig_func_name) +
' code block: too fast to measure:\\n code part: _speeit_prefix__result_time: <{:.11f}> 2 times _smallest_perf_counter_time: <{:.11f}>".format(_speeit_prefix__result_time, _speeit_prefix__check_reference_time)) # SPEEDIT: internally added'
)
final_param_line = []
for param, value in sig.parameters.items():
if value.kind == value.POSITIONAL_OR_KEYWORD:
if param in self.kwargs_dict:
value_to_set = self.kwargs_dict.pop(param)
else:
value_to_set = self.args_list.pop(0)
if isinstance(value_to_set, str):
parameter_line = '{} = "{}"'.format(param, value_to_set)
else:
parameter_line = '{} = {}'.format(param, value_to_set)
final_param_line.append(' ' * 2 + parameter_line)
elif value.kind == value.POSITIONAL_ONLY:
value_to_set = self.args_list.pop(0)
if isinstance(value_to_set, str):
parameter_line = '{} = "{}"'.format(param, value_to_set)
else:
parameter_line = '{} = {}'.format(param, value_to_set)
final_param_line.append(' ' * 2 + parameter_line)
raise Err('_TimeIT.get_final_inner_function()',
'POSITIONAL_ONLY !! not sure what to do .. check in future if needed: param: <{}> value.kind: <{}>'
.format(param, value.kind))
elif value.kind == value.VAR_POSITIONAL:
parameter_line = '{} = {}'.format(param, self.args_list)
final_param_line.append(' ' * 2 + parameter_line)
elif value.kind == value.KEYWORD_ONLY:
if param in self.kwargs_dict:
value_to_set = self.kwargs_dict.pop(param)
else:
value_to_set = value.default
if isinstance(value_to_set, str):
parameter_line = '{} = "{}"'.format(param, value_to_set)
else:
parameter_line = '{} = {}'.format(param, value_to_set)
final_param_line.append(' ' * 2 + parameter_line)
elif value.kind == value.VAR_KEYWORD:
parameter_line = '{} = {}'.format(param, self.kwargs_dict)
final_param_line.append(' ' * 2 + parameter_line)
else:
continue
final_setup_lines = []
for setup_line in self.setup_line_list:
setup_line = setup_line.strip()
if setup_line:
final_setup_lines.append(' ' + setup_line)
final_inner_function_lines = [
'def inner(): # orig function name: <{}>'.format(self.
orig_func_name),
' from time import perf_counter as _speeit_prefix__perf_counter',
'', ' _speeit_prefix__run_sec = {}'.format(self.run_sec), '',
' # ==================== START SETUP LINES ==================== #'
, '']
final_inner_function_lines.extend(final_setup_lines)
inner_function_lines_part2 = ['',
' # ==================== END SETUP LINES ==================== #',
'',
' # The smallest difference of calling _speeit_prefix__perf_counter() immediately after each other a couple of times'
, ' _speeit_prefix__check_reference_time = {}'.format(self.
perf_counter_reference_time), ' _speeit_prefix__loops = 0',
' _speeit_prefix__all_loops_time_sec = 0.0',
' _speeit_prefix__avg_loop_sec = 0.0',
' _speeit_prefix__best_loop_sec = 99999999999.0',
' _speeit_prefix__second_best_loop_sec = 99999999999.0',
' _speeit_prefix__worst_loop_sec = 0.0',
' _speeit_prefix__second_worst_loop_sec = 0.0',
' if _speeit_prefix__run_sec is None:', ' return {',
' "loops": _speeit_prefix__loops,',
' "all_loops_time_sec": _speeit_prefix__all_loops_time_sec,'
, ' "avg_loop_sec": _speeit_prefix__avg_loop_sec,',
' "best_loop_sec": _speeit_prefix__best_loop_sec,',
' "second_best_loop_sec": _speeit_prefix__second_best_loop_sec,'
, ' "worst_loop_sec": _speeit_prefix__worst_loop_sec,',
' "second_worst_loop_sec": _speeit_prefix__second_worst_loop_sec'
, ' }', ' elif _speeit_prefix__run_sec == -1:',
' # only run it once',
' _speeit_prefix__run_once = True', ' else:',
' _speeit_prefix__run_once = False',
' _speeit_prefix__main_start_time = _speeit_prefix__perf_counter()'
, ' while True:', ' _speeit_prefix__loops += 1',
' _speeit_prefix__result_time = 0', '',
' # ==================== START CODE BLOCK ==================== #'
, '']
final_inner_function_lines.extend(inner_function_lines_part2)
final_inner_function_lines.extend(final_param_line)
final_inner_function_lines.extend(adjusted_func_code_line)
inner_function_lines_rest = ['',
' # ==================== END CODE BLOCK ==================== #'
, '',
' _speeit_prefix__all_loops_time_sec += _speeit_prefix__result_time'
,
' if _speeit_prefix__result_time <= _speeit_prefix__best_loop_sec:'
,
' _speeit_prefix__second_best_loop_sec = _speeit_prefix__best_loop_sec'
,
' _speeit_prefix__best_loop_sec = _speeit_prefix__result_time'
,
' if _speeit_prefix__result_time >= _speeit_prefix__worst_loop_sec:'
,
' _speeit_prefix__second_worst_loop_sec = _speeit_prefix__worst_loop_sec'
,
' _speeit_prefix__worst_loop_sec = _speeit_prefix__result_time'
, ' if _speeit_prefix__run_once:', ' break',
' # check if we have to get out',
' if _speeit_prefix__perf_counter() - _speeit_prefix__main_start_time >= _speeit_prefix__run_sec:'
, ' break',
' _speeit_prefix__avg_loop_sec = _speeit_prefix__all_loops_time_sec / _speeit_prefix__loops'
,
' if _speeit_prefix__second_best_loop_sec == 99999999999.0:',
' _speeit_prefix__second_best_loop_sec = -1.0',
' if _speeit_prefix__second_worst_loop_sec == 0.0:',
' _speeit_prefix__second_worst_loop_sec = -1.0',
' return {', ' "loops": _speeit_prefix__loops,',
' "all_loops_time_sec": _speeit_prefix__all_loops_time_sec,',
' "avg_loop_sec": _speeit_prefix__avg_loop_sec,',
' "best_loop_sec": _speeit_prefix__best_loop_sec,',
' "second_best_loop_sec": _speeit_prefix__second_best_loop_sec,'
, ' "worst_loop_sec": _speeit_prefix__worst_loop_sec,',
' "second_worst_loop_sec": _speeit_prefix__second_worst_loop_sec'
, ' }', '']
final_inner_function_lines.extend(inner_function_lines_rest)
return '\n'.join(final_inner_function_lines)
<mask token>
| <mask token>
class _TimeIT(object):
""" Class for timing execution speed of function code.
Partially based on code from python timeit.py
This does not execute the original function but generates a new function which executes only the code body of 'func': `func code block`
This avoids calling into the function itself
Args:
func (function):
.. warning:: the `func` function may not have any return statements: but any inner function can have one
OK
.. code-block:: python
def example_formal_func_inner(data_):
shuffle(data_)
def fninner(x):
return x[1]
result = sorted(data_.items(), key=fninner)
del result
NOT OK
.. code-block:: python
def example_pep265(data_):
shuffle(data_)
result = sorted(data_.items(), key=itemgetter(1))
return result
func_positional_arguments (list): positional arguments for the function
func_keyword_arguments (dict): any keyword arguments for the function
setup_line_list (list): of strings with import lines needed by the functions any global data ect..
this part is executed once before the actual `func code block` enters the loop
.. warning:: no multiline string or indented code line
check_too_fast(bool): if True and a code block is timed faster than a `Reference-Time` an Exception is raised.
- Reference-Time: the smallest difference of calling perf_counter() immediately after each other a couple of times
.. seealso:: _helper_get_perf_counter_reference_time()
run_sec (float or -1 or None): seconds the `func code block` will be executed (looped over)
- if run_sec is -1: then the generated function source code is only run once
- if run_sec is None: then the generated function source code is only printed
this is mainly useful to see the exact final `func code block` which will be timed.
name (str): the name used for the output `name` part
perf_counter_reference_time (float): passed on see: _helper_get_perf_counter_reference_time()
"""
def __init__(self, func, args_list, kwargs_dict, setup_line_list,
check_too_fast, run_sec, name, perf_counter_reference_time):
""" Constructor. See class doc string.
"""
self.func = func
self.orig_func_name = getattr(self.func, '__name__', self.func)
self.args_list = args_list.copy()
self.kwargs_dict = kwargs_dict.copy()
self.setup_line_list = setup_line_list
self.check_too_fast = check_too_fast
self.run_sec = run_sec
self.name = name
self.perf_counter_reference_time = perf_counter_reference_time
if callable(self.func):
_ns = {}
self.src = self.__get_final_inner_function()
if (self.run_sec is not None and self.run_sec != -1 and self.
run_sec < 0.1):
raise Err('_TimeIT.__init__()',
'run_sec: <{:.1f}> must be at least <0.1 second> or <-1 to run it once> or <None to print the `func code block`>'
.format(self.run_sec))
_code = compile(self.src, 'benchmarkit-src', 'exec')
exec(_code, globals(), _ns)
self.inner = _ns['inner']
else:
raise ValueError('<func>: is not a `callable` type: <{}>'.
format(self.func))
def benchmark_it(self, with_gc):
""" Returns timing result for the `func code block`
.. note::
By default, timeit() temporarily turns off garbage collection during the timing.
The advantage of this approach is that it makes independent timings more comparable.
This disadvantage is that GC may be an important component of the performance of the function being measured.
If so, GC can be re-enabled as the with_gc=True
Returns:
dict: benchmark result: dict keys: loops, all_loops_time_sec, avg_loop_sec, best_loop_sec, worst_loop_sec
- loops: how many times the `func code block` was executed (looped over)
- all_loops_time_sec: the total time in seconds for all loops:
only loop times are counted not other times: depending on the `func code block` this can be about 25% of the total runtime
- avg_loop_sec: average loop time in seconds: this should be mostly used as measure time:
if there where only a very low number of loops - one might want to increase the `run_sec` and rerun it
- two_best_loop_sec: time in seconds for the two fastest of all loops
- two_worst_loop_sec: time in seconds for the two slowest of all loops
Raises:
SpeedIT.Err: example if `run_sec` is not <-1 run once>, <None only print> but less than 0.1
"""
if self.run_sec is None:
benchmark_result = self.src
elif with_gc:
gc_old = gc.isenabled()
gc.enable()
try:
benchmark_result = self.inner()
benchmark_result['name'] = self.name
finally:
if not gc_old:
gc.disable()
else:
gc_old = gc.isenabled()
gc.disable()
try:
benchmark_result = self.inner()
benchmark_result['name'] = self.name
finally:
if gc_old:
gc.enable()
return benchmark_result
def __get_final_inner_function(self):
""" Returns a string of an generated inner function with the code body from: func
Tries to generate a function with the 'code-body' from the passed on func as well as the args_list, kwargs_dict
.. warnings:: the `func` function may not have any return statements: but any inner function can have one
Returns:
str: generated inner function
Raises:
SpeedIT.Err: example if an indentation is encountered which is not a multiple of the first found indentation
"""
has_block_speedit = False
_start_block_stripped_line = ''
start_tag_block_speedit = 0
end_tag_block_speedit = 0
func_line, lnum = getsourcelines(self.func)
sig = signature(self.func)
indent_ = None
func_def_indent = len(func_line[0]) - len(func_line[0].lstrip())
func_body = func_line[1:]
search_docstring = False
first_none_docstring_idx = 0
for idx, line_orig in enumerate(func_body):
rstripped_line = line_orig.rstrip()
if rstripped_line:
stripped_codeline = rstripped_line.lstrip()
if stripped_codeline[0] == '#':
if not ('::SPEEDIT::' in stripped_codeline or
'**SPEEDIT**' in stripped_codeline):
continue
if search_docstring:
if stripped_codeline[0:3] == '"""' or stripped_codeline[0:3
] == "'''":
search_docstring = False
continue
else:
codebody_indent = len(rstripped_line) - len(
stripped_codeline)
indent_ = codebody_indent - func_def_indent
if stripped_codeline[0:3] == '"""' or stripped_codeline[0:3
] == "'''":
search_docstring = True
continue
first_none_docstring_idx = idx
break
adjusted_func_code_line = []
for line_orig in func_body[first_none_docstring_idx:]:
if line_orig:
rstrip_line = line_orig.rstrip()
if rstrip_line:
stripped_line = rstrip_line.lstrip()
if stripped_line[0] == '#':
if ('::SPEEDIT::' in stripped_line or '**SPEEDIT**' in
stripped_line):
has_block_speedit = True
else:
continue
line_indentation = len(rstrip_line) - len(stripped_line)
if line_indentation % indent_ != 0:
raise Err('_TimeIT.get_final_inner_function',
"""<{}>: ERROR: indentation must be a multiple of the second function line: <{}>
seems we encountered a wrong indented line: line_indentation: <{}>
{}"""
.format(self.orig_func_name, indent_,
line_indentation, line_orig))
line_indentation_level = int((line_indentation -
func_def_indent) / indent_) + 1
if has_block_speedit:
if '::SPEEDIT::' in stripped_line:
if (start_tag_block_speedit !=
end_tag_block_speedit):
raise Err('_TimeIT.get_final_inner_function',
"""<{}>: FUNCTION INNER TAG ERROR: has_block_speedit: <{}>
Expected an END-TAG <**SPEEDIT**>:
{}"""
.format(self.orig_func_name,
has_block_speedit, line_orig))
adjusted_func_code_line.append(' ' *
line_indentation_level +
'_speeit_prefix__stmt_inner_start = _speeit_prefix__perf_counter() # ::SPEEDIT::START internally added'
)
start_tag_block_speedit += 1
_start_block_stripped_line = stripped_line
elif '**SPEEDIT**' in stripped_line:
if (end_tag_block_speedit !=
start_tag_block_speedit - 1):
raise Err('_TimeIT.get_final_inner_function',
"""<{}>: FUNCTION INNER TAG ERROR: has_block_speedit: <{}>
Expected an START-TAG <::SPEEDIT::>:
{}"""
.format(self.orig_func_name,
has_block_speedit, line_orig))
adjusted_func_code_line.append(' ' *
line_indentation_level +
'_speeit_prefix__result_time += _speeit_prefix__perf_counter() - _speeit_prefix__stmt_inner_start # **SPEEDIT**END internally added'
)
if self.check_too_fast:
adjusted_func_code_line.append(' ' *
line_indentation_level +
'if _speeit_prefix__result_time < _speeit_prefix__check_reference_time: raise Exception("in function: <{}>'
.format(self.orig_func_name) +
' code block: too fast to measure:\\n code part: _speeit_prefix__result_time: <{:.11f}> 2 times _smallest_perf_counter_time: <{:.11f}>\\n '
+ ' _start_block_stripped_line: <{}>'
.format(_start_block_stripped_line) +
'".format(_speeit_prefix__result_time, _speeit_prefix__check_reference_time)) # SPEEDIT: internally added'
)
end_tag_block_speedit += 1
else:
adjusted_func_code_line.append(' ' *
line_indentation_level + stripped_line)
else:
adjusted_func_code_line.append(' ' *
line_indentation_level + stripped_line)
if has_block_speedit:
if start_tag_block_speedit != end_tag_block_speedit:
adjusted_func_code_line.append(
' _speeit_prefix__result_time += _speeit_prefix__perf_counter() - _speeit_prefix__stmt_inner_start # **SPEEDIT**END internally added'
)
if self.check_too_fast:
adjusted_func_code_line.append(
' if _speeit_prefix__result_time < _speeit_prefix__check_reference_time: raise Exception("in function: <{}>'
.format(self.orig_func_name) +
' code block: too fast to measure:\\n code part: _speeit_prefix__result_time: <{:.11f}> 2 times _smallest_perf_counter_time: <{:.11f}>\\n '
+ ' _start_block_stripped_line: <{}>'.format(
_start_block_stripped_line) +
'".format(_speeit_prefix__result_time, _speeit_prefix__check_reference_time)) # SPEEDIT: internally added'
)
else:
adjusted_func_code_line.insert(0,
' _speeit_prefix__stmt_inner_start = _speeit_prefix__perf_counter() # ::SPEEDIT::START internally added'
)
adjusted_func_code_line.append(
' _speeit_prefix__result_time += _speeit_prefix__perf_counter() - _speeit_prefix__stmt_inner_start # **SPEEDIT**END internally added'
)
if self.check_too_fast:
adjusted_func_code_line.append(
' if _speeit_prefix__result_time < _speeit_prefix__check_reference_time: raise Exception("in function: <{}>'
.format(self.orig_func_name) +
' code block: too fast to measure:\\n code part: _speeit_prefix__result_time: <{:.11f}> 2 times _smallest_perf_counter_time: <{:.11f}>".format(_speeit_prefix__result_time, _speeit_prefix__check_reference_time)) # SPEEDIT: internally added'
)
final_param_line = []
for param, value in sig.parameters.items():
if value.kind == value.POSITIONAL_OR_KEYWORD:
if param in self.kwargs_dict:
value_to_set = self.kwargs_dict.pop(param)
else:
value_to_set = self.args_list.pop(0)
if isinstance(value_to_set, str):
parameter_line = '{} = "{}"'.format(param, value_to_set)
else:
parameter_line = '{} = {}'.format(param, value_to_set)
final_param_line.append(' ' * 2 + parameter_line)
elif value.kind == value.POSITIONAL_ONLY:
value_to_set = self.args_list.pop(0)
if isinstance(value_to_set, str):
parameter_line = '{} = "{}"'.format(param, value_to_set)
else:
parameter_line = '{} = {}'.format(param, value_to_set)
final_param_line.append(' ' * 2 + parameter_line)
raise Err('_TimeIT.get_final_inner_function()',
'POSITIONAL_ONLY !! not sure what to do .. check in future if needed: param: <{}> value.kind: <{}>'
.format(param, value.kind))
elif value.kind == value.VAR_POSITIONAL:
parameter_line = '{} = {}'.format(param, self.args_list)
final_param_line.append(' ' * 2 + parameter_line)
elif value.kind == value.KEYWORD_ONLY:
if param in self.kwargs_dict:
value_to_set = self.kwargs_dict.pop(param)
else:
value_to_set = value.default
if isinstance(value_to_set, str):
parameter_line = '{} = "{}"'.format(param, value_to_set)
else:
parameter_line = '{} = {}'.format(param, value_to_set)
final_param_line.append(' ' * 2 + parameter_line)
elif value.kind == value.VAR_KEYWORD:
parameter_line = '{} = {}'.format(param, self.kwargs_dict)
final_param_line.append(' ' * 2 + parameter_line)
else:
continue
final_setup_lines = []
for setup_line in self.setup_line_list:
setup_line = setup_line.strip()
if setup_line:
final_setup_lines.append(' ' + setup_line)
final_inner_function_lines = [
'def inner(): # orig function name: <{}>'.format(self.
orig_func_name),
' from time import perf_counter as _speeit_prefix__perf_counter',
'', ' _speeit_prefix__run_sec = {}'.format(self.run_sec), '',
' # ==================== START SETUP LINES ==================== #'
, '']
final_inner_function_lines.extend(final_setup_lines)
inner_function_lines_part2 = ['',
' # ==================== END SETUP LINES ==================== #',
'',
' # The smallest difference of calling _speeit_prefix__perf_counter() immediately after each other a couple of times'
, ' _speeit_prefix__check_reference_time = {}'.format(self.
perf_counter_reference_time), ' _speeit_prefix__loops = 0',
' _speeit_prefix__all_loops_time_sec = 0.0',
' _speeit_prefix__avg_loop_sec = 0.0',
' _speeit_prefix__best_loop_sec = 99999999999.0',
' _speeit_prefix__second_best_loop_sec = 99999999999.0',
' _speeit_prefix__worst_loop_sec = 0.0',
' _speeit_prefix__second_worst_loop_sec = 0.0',
' if _speeit_prefix__run_sec is None:', ' return {',
' "loops": _speeit_prefix__loops,',
' "all_loops_time_sec": _speeit_prefix__all_loops_time_sec,'
, ' "avg_loop_sec": _speeit_prefix__avg_loop_sec,',
' "best_loop_sec": _speeit_prefix__best_loop_sec,',
' "second_best_loop_sec": _speeit_prefix__second_best_loop_sec,'
, ' "worst_loop_sec": _speeit_prefix__worst_loop_sec,',
' "second_worst_loop_sec": _speeit_prefix__second_worst_loop_sec'
, ' }', ' elif _speeit_prefix__run_sec == -1:',
' # only run it once',
' _speeit_prefix__run_once = True', ' else:',
' _speeit_prefix__run_once = False',
' _speeit_prefix__main_start_time = _speeit_prefix__perf_counter()'
, ' while True:', ' _speeit_prefix__loops += 1',
' _speeit_prefix__result_time = 0', '',
' # ==================== START CODE BLOCK ==================== #'
, '']
final_inner_function_lines.extend(inner_function_lines_part2)
final_inner_function_lines.extend(final_param_line)
final_inner_function_lines.extend(adjusted_func_code_line)
inner_function_lines_rest = ['',
' # ==================== END CODE BLOCK ==================== #'
, '',
' _speeit_prefix__all_loops_time_sec += _speeit_prefix__result_time'
,
' if _speeit_prefix__result_time <= _speeit_prefix__best_loop_sec:'
,
' _speeit_prefix__second_best_loop_sec = _speeit_prefix__best_loop_sec'
,
' _speeit_prefix__best_loop_sec = _speeit_prefix__result_time'
,
' if _speeit_prefix__result_time >= _speeit_prefix__worst_loop_sec:'
,
' _speeit_prefix__second_worst_loop_sec = _speeit_prefix__worst_loop_sec'
,
' _speeit_prefix__worst_loop_sec = _speeit_prefix__result_time'
, ' if _speeit_prefix__run_once:', ' break',
' # check if we have to get out',
' if _speeit_prefix__perf_counter() - _speeit_prefix__main_start_time >= _speeit_prefix__run_sec:'
, ' break',
' _speeit_prefix__avg_loop_sec = _speeit_prefix__all_loops_time_sec / _speeit_prefix__loops'
,
' if _speeit_prefix__second_best_loop_sec == 99999999999.0:',
' _speeit_prefix__second_best_loop_sec = -1.0',
' if _speeit_prefix__second_worst_loop_sec == 0.0:',
' _speeit_prefix__second_worst_loop_sec = -1.0',
' return {', ' "loops": _speeit_prefix__loops,',
' "all_loops_time_sec": _speeit_prefix__all_loops_time_sec,',
' "avg_loop_sec": _speeit_prefix__avg_loop_sec,',
' "best_loop_sec": _speeit_prefix__best_loop_sec,',
' "second_best_loop_sec": _speeit_prefix__second_best_loop_sec,'
, ' "worst_loop_sec": _speeit_prefix__worst_loop_sec,',
' "second_worst_loop_sec": _speeit_prefix__second_worst_loop_sec'
, ' }', '']
final_inner_function_lines.extend(inner_function_lines_rest)
return '\n'.join(final_inner_function_lines)
def speedit_benchmark(func_dict, setup_line_list, use_func_name=True,
output_in_sec=False, benchmarkit__with_gc=False,
benchmarkit__check_too_fast=True, benchmarkit__rank_by='best',
benchmarkit__run_sec=1, benchmarkit__repeat=3):
""" Returns one txt string for the ready comparison table: format is conform with reStructuredText
Usage:
.. code-block:: python
func_dict = {
'function_f1': (function_f1, [act_one_hamlet], {}),
'function_f2': (function_f2, [act_one_hamlet], {}),
'function_f3': (function_f3, [act_one_hamlet], {}),
}
setup_line_list = [
'from random import shuffle',
'from os.path import abspath, dirname, join',
'MY_CONSTANT = 15'
]
benchmark_result = BenchmarkIT.speedit_benchmark(func_dict, setup_line_list, benchmarkit__run_sec=1.0, output_in_sec=True, use_func_name=True, benchmarkit__with_gc=False, benchmarkit__repeat=3)
Args:
func_dict (dict): mapping function names to functions
value format: tuple (function, list_of_positional_arguments, dictionary_of_keyword_arguments)
setup_line_list (list): of strings with import lines needed by the functions any global data ect..
.. warning:: no multiline string or indented code line
use_func_name (bool): if True the function name will be used in the output `name` if False the `func_dict key` will be used in the the output `name`
output_in_sec (int): if true the output is keep in seconds if false it is transformed to:
second (s)
millisecond (ms) One thousandth of one second
microsecond (µs) One millionth of one second
nanosecond (ns) One billionth of one second
benchmarkit__with_gc (bool): if True gc is kept on during timing: if False: turns off garbage collection during the timing
benchmarkit__check_too_fast(bool): if True and aa code block is timed faster than a `Reference-Time` an Exception is raised.
- Reference-Time: the smallest difference of calling perf_counter() immediately after each other a couple of times
.. seealso:: _helper_get_perf_counter_reference_time()
benchmarkit__rank_by (str): `best` or `average`
benchmarkit__run_sec (float or -1 or None): the number of loops per run is scaled to approximately fit the benchmarkit__run_sec
- if benchmarkit__run_sec is -1: then the generated function source code is only run once
- if benchmarkit__run_sec is None: then the generated function source code is only printed
this is mainly useful to see the exact final `func code block` which will be timed.
benchmarkit__repeat (int): how often everything is repeated
This is a convenience variable that calls the whole setup repeatedly
Returns:
str: ready to print or write to file: table format is conform with reStructuredText
Raises:
SpeedIT.Err
"""
if not func_dict:
raise Err('speedit_benchmark()',
'At least one function must be defined in `func_dict`: <{}>'.
format(func_dict))
if benchmarkit__rank_by != 'best' and benchmarkit__rank_by != 'average':
raise Err('speedit_benchmark()',
'<benchmarkit__rank_by> must be one of: <best, average> We got: <{}>'
.format(benchmarkit__rank_by))
if benchmarkit__repeat < 1:
raise Err('speedit_benchmark()',
'<benchmarkit__repeat> must be greater than <0> We got: <{}>'.
format(benchmarkit__repeat))
all_final_lines = []
perf_counter_reference_time = _helper_get_perf_counter_reference_time()
if benchmarkit__run_sec is None:
all_final_lines.extend([
'================ RUN SECONDS: benchmarkit__run_sec was defined as: None (benchmarkit__run_sec=None) ================'
, '', ''])
for func_name, (function_, func_positional_arguments,
func_keyword_arguments) in sorted(func_dict.items()):
if use_func_name:
name = getattr(function_, '__name__', function_)
else:
name = func_name
benchmark_result = _TimeIT(function_, func_positional_arguments,
func_keyword_arguments, setup_line_list,
benchmarkit__check_too_fast, benchmarkit__run_sec, name,
perf_counter_reference_time).benchmark_it(benchmarkit__with_gc)
all_final_lines.extend([
'===================== function name: <{}>'.format(
func_name), '', benchmark_result, '', ''])
else:
title_line = (
'SpeedIT: `BenchmarkIT` for: <{}> functions. benchmarkit__with_gc: <{}> benchmarkit__run_sec: <{}> '
.format(len(func_dict), benchmarkit__with_gc, benchmarkit__run_sec)
)
for repeat_all in range(benchmarkit__repeat):
table = []
for func_name, (function_, func_positional_arguments,
func_keyword_arguments) in sorted(func_dict.items()):
if use_func_name:
name = getattr(function_, '__name__', function_)
else:
name = func_name
benchmark_result = _TimeIT(function_,
func_positional_arguments, func_keyword_arguments,
setup_line_list, benchmarkit__check_too_fast,
benchmarkit__run_sec, name, perf_counter_reference_time
).benchmark_it(with_gc=benchmarkit__with_gc)
table.append(benchmark_result)
if benchmarkit__rank_by == 'best':
table = sorted(table, key=itemgetter('best_loop_sec'))
compare_reference = table[0]['best_loop_sec']
for idx, dict_ in enumerate(table):
dict_['compare'] = '{:,.3f}'.format(dict_[
'best_loop_sec'] / compare_reference * 100.0)
dict_['rank'] = '{:,}'.format(idx + 1)
dict_['loops'] = '{:,}'.format(dict_['loops'])
if output_in_sec:
dict_['avg_loop_sec'] = '{:.11f}'.format(dict_[
'avg_loop_sec'])
dict_['best_loop_sec'] = '{:.11f}'.format(dict_[
'best_loop_sec'])
if dict_['second_best_loop_sec'] == -1.0:
dict_['second_best_loop_sec'] = 'NOT-MEASURED'
else:
dict_['second_best_loop_sec'] = '{:.11f}'.format(
dict_['second_best_loop_sec'])
dict_['worst_loop_sec'] = '{:.11f}'.format(dict_[
'worst_loop_sec'])
if dict_['second_worst_loop_sec'] == -1.0:
dict_['second_worst_loop_sec'] = 'NOT-MEASURED'
else:
dict_['second_worst_loop_sec'] = '{:.11f}'.format(
dict_['second_worst_loop_sec'])
dict_['all_loops_time_sec'] = '{:.11f}'.format(dict_
['all_loops_time_sec'])
else:
dict_['avg_loop_sec'] = format_time(dict_[
'avg_loop_sec'])
dict_['best_loop_sec'] = format_time(dict_[
'best_loop_sec'])
dict_['second_best_loop_sec'] = format_time(dict_[
'second_best_loop_sec'])
dict_['worst_loop_sec'] = format_time(dict_[
'worst_loop_sec'])
dict_['second_worst_loop_sec'] = format_time(dict_[
'second_worst_loop_sec'])
dict_['all_loops_time_sec'] = format_time(dict_[
'all_loops_time_sec'])
elif benchmarkit__rank_by == 'average':
table = sorted(table, key=itemgetter('avg_loop_sec'))
compare_reference = table[0]['avg_loop_sec']
for idx, dict_ in enumerate(table):
dict_['compare'] = '{:,.3f}'.format(dict_[
'avg_loop_sec'] / compare_reference * 100.0)
dict_['rank'] = '{:,}'.format(idx + 1)
dict_['loops'] = '{:,}'.format(dict_['loops'])
if output_in_sec:
dict_['avg_loop_sec'] = '{:.11f}'.format(dict_[
'avg_loop_sec'])
dict_['best_loop_sec'] = '{:.11f}'.format(dict_[
'best_loop_sec'])
if dict_['second_best_loop_sec'] == -1.0:
dict_['second_best_loop_sec'] = 'NOT-MEASURED'
else:
dict_['second_best_loop_sec'] = '{:.11f}'.format(
dict_['second_best_loop_sec'])
dict_['worst_loop_sec'] = '{:.11f}'.format(dict_[
'worst_loop_sec'])
if dict_['second_worst_loop_sec'] == -1.0:
dict_['second_worst_loop_sec'] = 'NOT-MEASURED'
else:
dict_['second_worst_loop_sec'] = '{:.11f}'.format(
dict_['second_worst_loop_sec'])
dict_['all_loops_time_sec'] = '{:.11f}'.format(dict_
['all_loops_time_sec'])
else:
dict_['avg_loop_sec'] = format_time(dict_[
'avg_loop_sec'])
dict_['best_loop_sec'] = format_time(dict_[
'best_loop_sec'])
dict_['second_best_loop_sec'] = format_time(dict_[
'second_best_loop_sec'])
dict_['worst_loop_sec'] = format_time(dict_[
'worst_loop_sec'])
dict_['second_worst_loop_sec'] = format_time(dict_[
'second_worst_loop_sec'])
dict_['all_loops_time_sec'] = format_time(dict_[
'all_loops_time_sec'])
header_mapping = [('name', 'name'), ('rank-{}'.format(
benchmarkit__rank_by), 'rank'), ('compare %', 'compare'), (
'num. loops', 'loops'), ('avg_loop', 'avg_loop_sec'), (
'best_loop', 'best_loop_sec'), ('second_best_loop',
'second_best_loop_sec'), ('worst_loop', 'worst_loop_sec'),
('second_worst_loop', 'second_worst_loop_sec'), (
'all_loops time', 'all_loops_time_sec')]
all_final_lines.extend(get_table_rst_formatted_lines(table,
header_mapping, title_line))
all_final_lines.extend(['', ''])
return '\n'.join(all_final_lines)
| <mask token>
def _helper_get_perf_counter_reference_time():
""" Helper: Returns 2 times: the smallest difference of calling perf_counter() immediately after each other a couple of times
Returns:
float: 2 times the smallest difference of calling perf_counter() immediately after each other a couple of times
"""
_result_time = 99999999999.0
for y_ in range(50):
for x_ in range(3000):
temp_start = perf_counter()
temp_time = perf_counter() - temp_start
if temp_time < _result_time:
_result_time = temp_time
return _result_time * 2
class _TimeIT(object):
""" Class for timing execution speed of function code.
Partially based on code from python timeit.py
This does not execute the original function but generates a new function which executes only the code body of 'func': `func code block`
This avoids calling into the function itself
Args:
func (function):
.. warning:: the `func` function may not have any return statements: but any inner function can have one
OK
.. code-block:: python
def example_formal_func_inner(data_):
shuffle(data_)
def fninner(x):
return x[1]
result = sorted(data_.items(), key=fninner)
del result
NOT OK
.. code-block:: python
def example_pep265(data_):
shuffle(data_)
result = sorted(data_.items(), key=itemgetter(1))
return result
func_positional_arguments (list): positional arguments for the function
func_keyword_arguments (dict): any keyword arguments for the function
setup_line_list (list): of strings with import lines needed by the functions any global data ect..
this part is executed once before the actual `func code block` enters the loop
.. warning:: no multiline string or indented code line
check_too_fast(bool): if True and a code block is timed faster than a `Reference-Time` an Exception is raised.
- Reference-Time: the smallest difference of calling perf_counter() immediately after each other a couple of times
.. seealso:: _helper_get_perf_counter_reference_time()
run_sec (float or -1 or None): seconds the `func code block` will be executed (looped over)
- if run_sec is -1: then the generated function source code is only run once
- if run_sec is None: then the generated function source code is only printed
this is mainly useful to see the exact final `func code block` which will be timed.
name (str): the name used for the output `name` part
perf_counter_reference_time (float): passed on see: _helper_get_perf_counter_reference_time()
"""
def __init__(self, func, args_list, kwargs_dict, setup_line_list,
check_too_fast, run_sec, name, perf_counter_reference_time):
""" Constructor. See class doc string.
"""
self.func = func
self.orig_func_name = getattr(self.func, '__name__', self.func)
self.args_list = args_list.copy()
self.kwargs_dict = kwargs_dict.copy()
self.setup_line_list = setup_line_list
self.check_too_fast = check_too_fast
self.run_sec = run_sec
self.name = name
self.perf_counter_reference_time = perf_counter_reference_time
if callable(self.func):
_ns = {}
self.src = self.__get_final_inner_function()
if (self.run_sec is not None and self.run_sec != -1 and self.
run_sec < 0.1):
raise Err('_TimeIT.__init__()',
'run_sec: <{:.1f}> must be at least <0.1 second> or <-1 to run it once> or <None to print the `func code block`>'
.format(self.run_sec))
_code = compile(self.src, 'benchmarkit-src', 'exec')
exec(_code, globals(), _ns)
self.inner = _ns['inner']
else:
raise ValueError('<func>: is not a `callable` type: <{}>'.
format(self.func))
def benchmark_it(self, with_gc):
""" Returns timing result for the `func code block`
.. note::
By default, timeit() temporarily turns off garbage collection during the timing.
The advantage of this approach is that it makes independent timings more comparable.
This disadvantage is that GC may be an important component of the performance of the function being measured.
If so, GC can be re-enabled as the with_gc=True
Returns:
dict: benchmark result: dict keys: loops, all_loops_time_sec, avg_loop_sec, best_loop_sec, worst_loop_sec
- loops: how many times the `func code block` was executed (looped over)
- all_loops_time_sec: the total time in seconds for all loops:
only loop times are counted not other times: depending on the `func code block` this can be about 25% of the total runtime
- avg_loop_sec: average loop time in seconds: this should be mostly used as measure time:
if there where only a very low number of loops - one might want to increase the `run_sec` and rerun it
- two_best_loop_sec: time in seconds for the two fastest of all loops
- two_worst_loop_sec: time in seconds for the two slowest of all loops
Raises:
SpeedIT.Err: example if `run_sec` is not <-1 run once>, <None only print> but less than 0.1
"""
if self.run_sec is None:
benchmark_result = self.src
elif with_gc:
gc_old = gc.isenabled()
gc.enable()
try:
benchmark_result = self.inner()
benchmark_result['name'] = self.name
finally:
if not gc_old:
gc.disable()
else:
gc_old = gc.isenabled()
gc.disable()
try:
benchmark_result = self.inner()
benchmark_result['name'] = self.name
finally:
if gc_old:
gc.enable()
return benchmark_result
def __get_final_inner_function(self):
""" Returns a string of an generated inner function with the code body from: func
Tries to generate a function with the 'code-body' from the passed on func as well as the args_list, kwargs_dict
.. warnings:: the `func` function may not have any return statements: but any inner function can have one
Returns:
str: generated inner function
Raises:
SpeedIT.Err: example if an indentation is encountered which is not a multiple of the first found indentation
"""
has_block_speedit = False
_start_block_stripped_line = ''
start_tag_block_speedit = 0
end_tag_block_speedit = 0
func_line, lnum = getsourcelines(self.func)
sig = signature(self.func)
indent_ = None
func_def_indent = len(func_line[0]) - len(func_line[0].lstrip())
func_body = func_line[1:]
search_docstring = False
first_none_docstring_idx = 0
for idx, line_orig in enumerate(func_body):
rstripped_line = line_orig.rstrip()
if rstripped_line:
stripped_codeline = rstripped_line.lstrip()
if stripped_codeline[0] == '#':
if not ('::SPEEDIT::' in stripped_codeline or
'**SPEEDIT**' in stripped_codeline):
continue
if search_docstring:
if stripped_codeline[0:3] == '"""' or stripped_codeline[0:3
] == "'''":
search_docstring = False
continue
else:
codebody_indent = len(rstripped_line) - len(
stripped_codeline)
indent_ = codebody_indent - func_def_indent
if stripped_codeline[0:3] == '"""' or stripped_codeline[0:3
] == "'''":
search_docstring = True
continue
first_none_docstring_idx = idx
break
adjusted_func_code_line = []
for line_orig in func_body[first_none_docstring_idx:]:
if line_orig:
rstrip_line = line_orig.rstrip()
if rstrip_line:
stripped_line = rstrip_line.lstrip()
if stripped_line[0] == '#':
if ('::SPEEDIT::' in stripped_line or '**SPEEDIT**' in
stripped_line):
has_block_speedit = True
else:
continue
line_indentation = len(rstrip_line) - len(stripped_line)
if line_indentation % indent_ != 0:
raise Err('_TimeIT.get_final_inner_function',
"""<{}>: ERROR: indentation must be a multiple of the second function line: <{}>
seems we encountered a wrong indented line: line_indentation: <{}>
{}"""
.format(self.orig_func_name, indent_,
line_indentation, line_orig))
line_indentation_level = int((line_indentation -
func_def_indent) / indent_) + 1
if has_block_speedit:
if '::SPEEDIT::' in stripped_line:
if (start_tag_block_speedit !=
end_tag_block_speedit):
raise Err('_TimeIT.get_final_inner_function',
"""<{}>: FUNCTION INNER TAG ERROR: has_block_speedit: <{}>
Expected an END-TAG <**SPEEDIT**>:
{}"""
.format(self.orig_func_name,
has_block_speedit, line_orig))
adjusted_func_code_line.append(' ' *
line_indentation_level +
'_speeit_prefix__stmt_inner_start = _speeit_prefix__perf_counter() # ::SPEEDIT::START internally added'
)
start_tag_block_speedit += 1
_start_block_stripped_line = stripped_line
elif '**SPEEDIT**' in stripped_line:
if (end_tag_block_speedit !=
start_tag_block_speedit - 1):
raise Err('_TimeIT.get_final_inner_function',
"""<{}>: FUNCTION INNER TAG ERROR: has_block_speedit: <{}>
Expected an START-TAG <::SPEEDIT::>:
{}"""
.format(self.orig_func_name,
has_block_speedit, line_orig))
adjusted_func_code_line.append(' ' *
line_indentation_level +
'_speeit_prefix__result_time += _speeit_prefix__perf_counter() - _speeit_prefix__stmt_inner_start # **SPEEDIT**END internally added'
)
if self.check_too_fast:
adjusted_func_code_line.append(' ' *
line_indentation_level +
'if _speeit_prefix__result_time < _speeit_prefix__check_reference_time: raise Exception("in function: <{}>'
.format(self.orig_func_name) +
' code block: too fast to measure:\\n code part: _speeit_prefix__result_time: <{:.11f}> 2 times _smallest_perf_counter_time: <{:.11f}>\\n '
+ ' _start_block_stripped_line: <{}>'
.format(_start_block_stripped_line) +
'".format(_speeit_prefix__result_time, _speeit_prefix__check_reference_time)) # SPEEDIT: internally added'
)
end_tag_block_speedit += 1
else:
adjusted_func_code_line.append(' ' *
line_indentation_level + stripped_line)
else:
adjusted_func_code_line.append(' ' *
line_indentation_level + stripped_line)
if has_block_speedit:
if start_tag_block_speedit != end_tag_block_speedit:
adjusted_func_code_line.append(
' _speeit_prefix__result_time += _speeit_prefix__perf_counter() - _speeit_prefix__stmt_inner_start # **SPEEDIT**END internally added'
)
if self.check_too_fast:
adjusted_func_code_line.append(
' if _speeit_prefix__result_time < _speeit_prefix__check_reference_time: raise Exception("in function: <{}>'
.format(self.orig_func_name) +
' code block: too fast to measure:\\n code part: _speeit_prefix__result_time: <{:.11f}> 2 times _smallest_perf_counter_time: <{:.11f}>\\n '
+ ' _start_block_stripped_line: <{}>'.format(
_start_block_stripped_line) +
'".format(_speeit_prefix__result_time, _speeit_prefix__check_reference_time)) # SPEEDIT: internally added'
)
else:
adjusted_func_code_line.insert(0,
' _speeit_prefix__stmt_inner_start = _speeit_prefix__perf_counter() # ::SPEEDIT::START internally added'
)
adjusted_func_code_line.append(
' _speeit_prefix__result_time += _speeit_prefix__perf_counter() - _speeit_prefix__stmt_inner_start # **SPEEDIT**END internally added'
)
if self.check_too_fast:
adjusted_func_code_line.append(
' if _speeit_prefix__result_time < _speeit_prefix__check_reference_time: raise Exception("in function: <{}>'
.format(self.orig_func_name) +
' code block: too fast to measure:\\n code part: _speeit_prefix__result_time: <{:.11f}> 2 times _smallest_perf_counter_time: <{:.11f}>".format(_speeit_prefix__result_time, _speeit_prefix__check_reference_time)) # SPEEDIT: internally added'
)
final_param_line = []
for param, value in sig.parameters.items():
if value.kind == value.POSITIONAL_OR_KEYWORD:
if param in self.kwargs_dict:
value_to_set = self.kwargs_dict.pop(param)
else:
value_to_set = self.args_list.pop(0)
if isinstance(value_to_set, str):
parameter_line = '{} = "{}"'.format(param, value_to_set)
else:
parameter_line = '{} = {}'.format(param, value_to_set)
final_param_line.append(' ' * 2 + parameter_line)
elif value.kind == value.POSITIONAL_ONLY:
value_to_set = self.args_list.pop(0)
if isinstance(value_to_set, str):
parameter_line = '{} = "{}"'.format(param, value_to_set)
else:
parameter_line = '{} = {}'.format(param, value_to_set)
final_param_line.append(' ' * 2 + parameter_line)
raise Err('_TimeIT.get_final_inner_function()',
'POSITIONAL_ONLY !! not sure what to do .. check in future if needed: param: <{}> value.kind: <{}>'
.format(param, value.kind))
elif value.kind == value.VAR_POSITIONAL:
parameter_line = '{} = {}'.format(param, self.args_list)
final_param_line.append(' ' * 2 + parameter_line)
elif value.kind == value.KEYWORD_ONLY:
if param in self.kwargs_dict:
value_to_set = self.kwargs_dict.pop(param)
else:
value_to_set = value.default
if isinstance(value_to_set, str):
parameter_line = '{} = "{}"'.format(param, value_to_set)
else:
parameter_line = '{} = {}'.format(param, value_to_set)
final_param_line.append(' ' * 2 + parameter_line)
elif value.kind == value.VAR_KEYWORD:
parameter_line = '{} = {}'.format(param, self.kwargs_dict)
final_param_line.append(' ' * 2 + parameter_line)
else:
continue
final_setup_lines = []
for setup_line in self.setup_line_list:
setup_line = setup_line.strip()
if setup_line:
final_setup_lines.append(' ' + setup_line)
final_inner_function_lines = [
'def inner(): # orig function name: <{}>'.format(self.
orig_func_name),
' from time import perf_counter as _speeit_prefix__perf_counter',
'', ' _speeit_prefix__run_sec = {}'.format(self.run_sec), '',
' # ==================== START SETUP LINES ==================== #'
, '']
final_inner_function_lines.extend(final_setup_lines)
inner_function_lines_part2 = ['',
' # ==================== END SETUP LINES ==================== #',
'',
' # The smallest difference of calling _speeit_prefix__perf_counter() immediately after each other a couple of times'
, ' _speeit_prefix__check_reference_time = {}'.format(self.
perf_counter_reference_time), ' _speeit_prefix__loops = 0',
' _speeit_prefix__all_loops_time_sec = 0.0',
' _speeit_prefix__avg_loop_sec = 0.0',
' _speeit_prefix__best_loop_sec = 99999999999.0',
' _speeit_prefix__second_best_loop_sec = 99999999999.0',
' _speeit_prefix__worst_loop_sec = 0.0',
' _speeit_prefix__second_worst_loop_sec = 0.0',
' if _speeit_prefix__run_sec is None:', ' return {',
' "loops": _speeit_prefix__loops,',
' "all_loops_time_sec": _speeit_prefix__all_loops_time_sec,'
, ' "avg_loop_sec": _speeit_prefix__avg_loop_sec,',
' "best_loop_sec": _speeit_prefix__best_loop_sec,',
' "second_best_loop_sec": _speeit_prefix__second_best_loop_sec,'
, ' "worst_loop_sec": _speeit_prefix__worst_loop_sec,',
' "second_worst_loop_sec": _speeit_prefix__second_worst_loop_sec'
, ' }', ' elif _speeit_prefix__run_sec == -1:',
' # only run it once',
' _speeit_prefix__run_once = True', ' else:',
' _speeit_prefix__run_once = False',
' _speeit_prefix__main_start_time = _speeit_prefix__perf_counter()'
, ' while True:', ' _speeit_prefix__loops += 1',
' _speeit_prefix__result_time = 0', '',
' # ==================== START CODE BLOCK ==================== #'
, '']
final_inner_function_lines.extend(inner_function_lines_part2)
final_inner_function_lines.extend(final_param_line)
final_inner_function_lines.extend(adjusted_func_code_line)
inner_function_lines_rest = ['',
' # ==================== END CODE BLOCK ==================== #'
, '',
' _speeit_prefix__all_loops_time_sec += _speeit_prefix__result_time'
,
' if _speeit_prefix__result_time <= _speeit_prefix__best_loop_sec:'
,
' _speeit_prefix__second_best_loop_sec = _speeit_prefix__best_loop_sec'
,
' _speeit_prefix__best_loop_sec = _speeit_prefix__result_time'
,
' if _speeit_prefix__result_time >= _speeit_prefix__worst_loop_sec:'
,
' _speeit_prefix__second_worst_loop_sec = _speeit_prefix__worst_loop_sec'
,
' _speeit_prefix__worst_loop_sec = _speeit_prefix__result_time'
, ' if _speeit_prefix__run_once:', ' break',
' # check if we have to get out',
' if _speeit_prefix__perf_counter() - _speeit_prefix__main_start_time >= _speeit_prefix__run_sec:'
, ' break',
' _speeit_prefix__avg_loop_sec = _speeit_prefix__all_loops_time_sec / _speeit_prefix__loops'
,
' if _speeit_prefix__second_best_loop_sec == 99999999999.0:',
' _speeit_prefix__second_best_loop_sec = -1.0',
' if _speeit_prefix__second_worst_loop_sec == 0.0:',
' _speeit_prefix__second_worst_loop_sec = -1.0',
' return {', ' "loops": _speeit_prefix__loops,',
' "all_loops_time_sec": _speeit_prefix__all_loops_time_sec,',
' "avg_loop_sec": _speeit_prefix__avg_loop_sec,',
' "best_loop_sec": _speeit_prefix__best_loop_sec,',
' "second_best_loop_sec": _speeit_prefix__second_best_loop_sec,'
, ' "worst_loop_sec": _speeit_prefix__worst_loop_sec,',
' "second_worst_loop_sec": _speeit_prefix__second_worst_loop_sec'
, ' }', '']
final_inner_function_lines.extend(inner_function_lines_rest)
return '\n'.join(final_inner_function_lines)
def speedit_benchmark(func_dict, setup_line_list, use_func_name=True,
output_in_sec=False, benchmarkit__with_gc=False,
benchmarkit__check_too_fast=True, benchmarkit__rank_by='best',
benchmarkit__run_sec=1, benchmarkit__repeat=3):
""" Returns one txt string for the ready comparison table: format is conform with reStructuredText
Usage:
.. code-block:: python
func_dict = {
'function_f1': (function_f1, [act_one_hamlet], {}),
'function_f2': (function_f2, [act_one_hamlet], {}),
'function_f3': (function_f3, [act_one_hamlet], {}),
}
setup_line_list = [
'from random import shuffle',
'from os.path import abspath, dirname, join',
'MY_CONSTANT = 15'
]
benchmark_result = BenchmarkIT.speedit_benchmark(func_dict, setup_line_list, benchmarkit__run_sec=1.0, output_in_sec=True, use_func_name=True, benchmarkit__with_gc=False, benchmarkit__repeat=3)
Args:
func_dict (dict): mapping function names to functions
value format: tuple (function, list_of_positional_arguments, dictionary_of_keyword_arguments)
setup_line_list (list): of strings with import lines needed by the functions any global data ect..
.. warning:: no multiline string or indented code line
use_func_name (bool): if True the function name will be used in the output `name` if False the `func_dict key` will be used in the the output `name`
output_in_sec (int): if true the output is keep in seconds if false it is transformed to:
second (s)
millisecond (ms) One thousandth of one second
microsecond (µs) One millionth of one second
nanosecond (ns) One billionth of one second
benchmarkit__with_gc (bool): if True gc is kept on during timing: if False: turns off garbage collection during the timing
benchmarkit__check_too_fast(bool): if True and aa code block is timed faster than a `Reference-Time` an Exception is raised.
- Reference-Time: the smallest difference of calling perf_counter() immediately after each other a couple of times
.. seealso:: _helper_get_perf_counter_reference_time()
benchmarkit__rank_by (str): `best` or `average`
benchmarkit__run_sec (float or -1 or None): the number of loops per run is scaled to approximately fit the benchmarkit__run_sec
- if benchmarkit__run_sec is -1: then the generated function source code is only run once
- if benchmarkit__run_sec is None: then the generated function source code is only printed
this is mainly useful to see the exact final `func code block` which will be timed.
benchmarkit__repeat (int): how often everything is repeated
This is a convenience variable that calls the whole setup repeatedly
Returns:
str: ready to print or write to file: table format is conform with reStructuredText
Raises:
SpeedIT.Err
"""
if not func_dict:
raise Err('speedit_benchmark()',
'At least one function must be defined in `func_dict`: <{}>'.
format(func_dict))
if benchmarkit__rank_by != 'best' and benchmarkit__rank_by != 'average':
raise Err('speedit_benchmark()',
'<benchmarkit__rank_by> must be one of: <best, average> We got: <{}>'
.format(benchmarkit__rank_by))
if benchmarkit__repeat < 1:
raise Err('speedit_benchmark()',
'<benchmarkit__repeat> must be greater than <0> We got: <{}>'.
format(benchmarkit__repeat))
all_final_lines = []
perf_counter_reference_time = _helper_get_perf_counter_reference_time()
if benchmarkit__run_sec is None:
all_final_lines.extend([
'================ RUN SECONDS: benchmarkit__run_sec was defined as: None (benchmarkit__run_sec=None) ================'
, '', ''])
for func_name, (function_, func_positional_arguments,
func_keyword_arguments) in sorted(func_dict.items()):
if use_func_name:
name = getattr(function_, '__name__', function_)
else:
name = func_name
benchmark_result = _TimeIT(function_, func_positional_arguments,
func_keyword_arguments, setup_line_list,
benchmarkit__check_too_fast, benchmarkit__run_sec, name,
perf_counter_reference_time).benchmark_it(benchmarkit__with_gc)
all_final_lines.extend([
'===================== function name: <{}>'.format(
func_name), '', benchmark_result, '', ''])
else:
title_line = (
'SpeedIT: `BenchmarkIT` for: <{}> functions. benchmarkit__with_gc: <{}> benchmarkit__run_sec: <{}> '
.format(len(func_dict), benchmarkit__with_gc, benchmarkit__run_sec)
)
for repeat_all in range(benchmarkit__repeat):
table = []
for func_name, (function_, func_positional_arguments,
func_keyword_arguments) in sorted(func_dict.items()):
if use_func_name:
name = getattr(function_, '__name__', function_)
else:
name = func_name
benchmark_result = _TimeIT(function_,
func_positional_arguments, func_keyword_arguments,
setup_line_list, benchmarkit__check_too_fast,
benchmarkit__run_sec, name, perf_counter_reference_time
).benchmark_it(with_gc=benchmarkit__with_gc)
table.append(benchmark_result)
if benchmarkit__rank_by == 'best':
table = sorted(table, key=itemgetter('best_loop_sec'))
compare_reference = table[0]['best_loop_sec']
for idx, dict_ in enumerate(table):
dict_['compare'] = '{:,.3f}'.format(dict_[
'best_loop_sec'] / compare_reference * 100.0)
dict_['rank'] = '{:,}'.format(idx + 1)
dict_['loops'] = '{:,}'.format(dict_['loops'])
if output_in_sec:
dict_['avg_loop_sec'] = '{:.11f}'.format(dict_[
'avg_loop_sec'])
dict_['best_loop_sec'] = '{:.11f}'.format(dict_[
'best_loop_sec'])
if dict_['second_best_loop_sec'] == -1.0:
dict_['second_best_loop_sec'] = 'NOT-MEASURED'
else:
dict_['second_best_loop_sec'] = '{:.11f}'.format(
dict_['second_best_loop_sec'])
dict_['worst_loop_sec'] = '{:.11f}'.format(dict_[
'worst_loop_sec'])
if dict_['second_worst_loop_sec'] == -1.0:
dict_['second_worst_loop_sec'] = 'NOT-MEASURED'
else:
dict_['second_worst_loop_sec'] = '{:.11f}'.format(
dict_['second_worst_loop_sec'])
dict_['all_loops_time_sec'] = '{:.11f}'.format(dict_
['all_loops_time_sec'])
else:
dict_['avg_loop_sec'] = format_time(dict_[
'avg_loop_sec'])
dict_['best_loop_sec'] = format_time(dict_[
'best_loop_sec'])
dict_['second_best_loop_sec'] = format_time(dict_[
'second_best_loop_sec'])
dict_['worst_loop_sec'] = format_time(dict_[
'worst_loop_sec'])
dict_['second_worst_loop_sec'] = format_time(dict_[
'second_worst_loop_sec'])
dict_['all_loops_time_sec'] = format_time(dict_[
'all_loops_time_sec'])
elif benchmarkit__rank_by == 'average':
table = sorted(table, key=itemgetter('avg_loop_sec'))
compare_reference = table[0]['avg_loop_sec']
for idx, dict_ in enumerate(table):
dict_['compare'] = '{:,.3f}'.format(dict_[
'avg_loop_sec'] / compare_reference * 100.0)
dict_['rank'] = '{:,}'.format(idx + 1)
dict_['loops'] = '{:,}'.format(dict_['loops'])
if output_in_sec:
dict_['avg_loop_sec'] = '{:.11f}'.format(dict_[
'avg_loop_sec'])
dict_['best_loop_sec'] = '{:.11f}'.format(dict_[
'best_loop_sec'])
if dict_['second_best_loop_sec'] == -1.0:
dict_['second_best_loop_sec'] = 'NOT-MEASURED'
else:
dict_['second_best_loop_sec'] = '{:.11f}'.format(
dict_['second_best_loop_sec'])
dict_['worst_loop_sec'] = '{:.11f}'.format(dict_[
'worst_loop_sec'])
if dict_['second_worst_loop_sec'] == -1.0:
dict_['second_worst_loop_sec'] = 'NOT-MEASURED'
else:
dict_['second_worst_loop_sec'] = '{:.11f}'.format(
dict_['second_worst_loop_sec'])
dict_['all_loops_time_sec'] = '{:.11f}'.format(dict_
['all_loops_time_sec'])
else:
dict_['avg_loop_sec'] = format_time(dict_[
'avg_loop_sec'])
dict_['best_loop_sec'] = format_time(dict_[
'best_loop_sec'])
dict_['second_best_loop_sec'] = format_time(dict_[
'second_best_loop_sec'])
dict_['worst_loop_sec'] = format_time(dict_[
'worst_loop_sec'])
dict_['second_worst_loop_sec'] = format_time(dict_[
'second_worst_loop_sec'])
dict_['all_loops_time_sec'] = format_time(dict_[
'all_loops_time_sec'])
header_mapping = [('name', 'name'), ('rank-{}'.format(
benchmarkit__rank_by), 'rank'), ('compare %', 'compare'), (
'num. loops', 'loops'), ('avg_loop', 'avg_loop_sec'), (
'best_loop', 'best_loop_sec'), ('second_best_loop',
'second_best_loop_sec'), ('worst_loop', 'worst_loop_sec'),
('second_worst_loop', 'second_worst_loop_sec'), (
'all_loops time', 'all_loops_time_sec')]
all_final_lines.extend(get_table_rst_formatted_lines(table,
header_mapping, title_line))
all_final_lines.extend(['', ''])
return '\n'.join(all_final_lines)
| <mask token>
import gc
from inspect import signature, getsourcelines
from operator import itemgetter
from time import perf_counter
from SpeedIT.ProjectErr import Err
from SpeedIT.Utils import format_time, get_table_rst_formatted_lines
def _helper_get_perf_counter_reference_time():
""" Helper: Returns 2 times: the smallest difference of calling perf_counter() immediately after each other a couple of times
Returns:
float: 2 times the smallest difference of calling perf_counter() immediately after each other a couple of times
"""
_result_time = 99999999999.0
for y_ in range(50):
for x_ in range(3000):
temp_start = perf_counter()
temp_time = perf_counter() - temp_start
if temp_time < _result_time:
_result_time = temp_time
return _result_time * 2
class _TimeIT(object):
""" Class for timing execution speed of function code.
Partially based on code from python timeit.py
This does not execute the original function but generates a new function which executes only the code body of 'func': `func code block`
This avoids calling into the function itself
Args:
func (function):
.. warning:: the `func` function may not have any return statements: but any inner function can have one
OK
.. code-block:: python
def example_formal_func_inner(data_):
shuffle(data_)
def fninner(x):
return x[1]
result = sorted(data_.items(), key=fninner)
del result
NOT OK
.. code-block:: python
def example_pep265(data_):
shuffle(data_)
result = sorted(data_.items(), key=itemgetter(1))
return result
func_positional_arguments (list): positional arguments for the function
func_keyword_arguments (dict): any keyword arguments for the function
setup_line_list (list): of strings with import lines needed by the functions any global data ect..
this part is executed once before the actual `func code block` enters the loop
.. warning:: no multiline string or indented code line
check_too_fast(bool): if True and a code block is timed faster than a `Reference-Time` an Exception is raised.
- Reference-Time: the smallest difference of calling perf_counter() immediately after each other a couple of times
.. seealso:: _helper_get_perf_counter_reference_time()
run_sec (float or -1 or None): seconds the `func code block` will be executed (looped over)
- if run_sec is -1: then the generated function source code is only run once
- if run_sec is None: then the generated function source code is only printed
this is mainly useful to see the exact final `func code block` which will be timed.
name (str): the name used for the output `name` part
perf_counter_reference_time (float): passed on see: _helper_get_perf_counter_reference_time()
"""
def __init__(self, func, args_list, kwargs_dict, setup_line_list,
check_too_fast, run_sec, name, perf_counter_reference_time):
""" Constructor. See class doc string.
"""
self.func = func
self.orig_func_name = getattr(self.func, '__name__', self.func)
self.args_list = args_list.copy()
self.kwargs_dict = kwargs_dict.copy()
self.setup_line_list = setup_line_list
self.check_too_fast = check_too_fast
self.run_sec = run_sec
self.name = name
self.perf_counter_reference_time = perf_counter_reference_time
if callable(self.func):
_ns = {}
self.src = self.__get_final_inner_function()
if (self.run_sec is not None and self.run_sec != -1 and self.
run_sec < 0.1):
raise Err('_TimeIT.__init__()',
'run_sec: <{:.1f}> must be at least <0.1 second> or <-1 to run it once> or <None to print the `func code block`>'
.format(self.run_sec))
_code = compile(self.src, 'benchmarkit-src', 'exec')
exec(_code, globals(), _ns)
self.inner = _ns['inner']
else:
raise ValueError('<func>: is not a `callable` type: <{}>'.
format(self.func))
def benchmark_it(self, with_gc):
""" Returns timing result for the `func code block`
.. note::
By default, timeit() temporarily turns off garbage collection during the timing.
The advantage of this approach is that it makes independent timings more comparable.
This disadvantage is that GC may be an important component of the performance of the function being measured.
If so, GC can be re-enabled as the with_gc=True
Returns:
dict: benchmark result: dict keys: loops, all_loops_time_sec, avg_loop_sec, best_loop_sec, worst_loop_sec
- loops: how many times the `func code block` was executed (looped over)
- all_loops_time_sec: the total time in seconds for all loops:
only loop times are counted not other times: depending on the `func code block` this can be about 25% of the total runtime
- avg_loop_sec: average loop time in seconds: this should be mostly used as measure time:
if there where only a very low number of loops - one might want to increase the `run_sec` and rerun it
- two_best_loop_sec: time in seconds for the two fastest of all loops
- two_worst_loop_sec: time in seconds for the two slowest of all loops
Raises:
SpeedIT.Err: example if `run_sec` is not <-1 run once>, <None only print> but less than 0.1
"""
if self.run_sec is None:
benchmark_result = self.src
elif with_gc:
gc_old = gc.isenabled()
gc.enable()
try:
benchmark_result = self.inner()
benchmark_result['name'] = self.name
finally:
if not gc_old:
gc.disable()
else:
gc_old = gc.isenabled()
gc.disable()
try:
benchmark_result = self.inner()
benchmark_result['name'] = self.name
finally:
if gc_old:
gc.enable()
return benchmark_result
def __get_final_inner_function(self):
""" Returns a string of an generated inner function with the code body from: func
Tries to generate a function with the 'code-body' from the passed on func as well as the args_list, kwargs_dict
.. warnings:: the `func` function may not have any return statements: but any inner function can have one
Returns:
str: generated inner function
Raises:
SpeedIT.Err: example if an indentation is encountered which is not a multiple of the first found indentation
"""
has_block_speedit = False
_start_block_stripped_line = ''
start_tag_block_speedit = 0
end_tag_block_speedit = 0
func_line, lnum = getsourcelines(self.func)
sig = signature(self.func)
indent_ = None
func_def_indent = len(func_line[0]) - len(func_line[0].lstrip())
func_body = func_line[1:]
search_docstring = False
first_none_docstring_idx = 0
for idx, line_orig in enumerate(func_body):
rstripped_line = line_orig.rstrip()
if rstripped_line:
stripped_codeline = rstripped_line.lstrip()
if stripped_codeline[0] == '#':
if not ('::SPEEDIT::' in stripped_codeline or
'**SPEEDIT**' in stripped_codeline):
continue
if search_docstring:
if stripped_codeline[0:3] == '"""' or stripped_codeline[0:3
] == "'''":
search_docstring = False
continue
else:
codebody_indent = len(rstripped_line) - len(
stripped_codeline)
indent_ = codebody_indent - func_def_indent
if stripped_codeline[0:3] == '"""' or stripped_codeline[0:3
] == "'''":
search_docstring = True
continue
first_none_docstring_idx = idx
break
adjusted_func_code_line = []
for line_orig in func_body[first_none_docstring_idx:]:
if line_orig:
rstrip_line = line_orig.rstrip()
if rstrip_line:
stripped_line = rstrip_line.lstrip()
if stripped_line[0] == '#':
if ('::SPEEDIT::' in stripped_line or '**SPEEDIT**' in
stripped_line):
has_block_speedit = True
else:
continue
line_indentation = len(rstrip_line) - len(stripped_line)
if line_indentation % indent_ != 0:
raise Err('_TimeIT.get_final_inner_function',
"""<{}>: ERROR: indentation must be a multiple of the second function line: <{}>
seems we encountered a wrong indented line: line_indentation: <{}>
{}"""
.format(self.orig_func_name, indent_,
line_indentation, line_orig))
line_indentation_level = int((line_indentation -
func_def_indent) / indent_) + 1
if has_block_speedit:
if '::SPEEDIT::' in stripped_line:
if (start_tag_block_speedit !=
end_tag_block_speedit):
raise Err('_TimeIT.get_final_inner_function',
"""<{}>: FUNCTION INNER TAG ERROR: has_block_speedit: <{}>
Expected an END-TAG <**SPEEDIT**>:
{}"""
.format(self.orig_func_name,
has_block_speedit, line_orig))
adjusted_func_code_line.append(' ' *
line_indentation_level +
'_speeit_prefix__stmt_inner_start = _speeit_prefix__perf_counter() # ::SPEEDIT::START internally added'
)
start_tag_block_speedit += 1
_start_block_stripped_line = stripped_line
elif '**SPEEDIT**' in stripped_line:
if (end_tag_block_speedit !=
start_tag_block_speedit - 1):
raise Err('_TimeIT.get_final_inner_function',
"""<{}>: FUNCTION INNER TAG ERROR: has_block_speedit: <{}>
Expected an START-TAG <::SPEEDIT::>:
{}"""
.format(self.orig_func_name,
has_block_speedit, line_orig))
adjusted_func_code_line.append(' ' *
line_indentation_level +
'_speeit_prefix__result_time += _speeit_prefix__perf_counter() - _speeit_prefix__stmt_inner_start # **SPEEDIT**END internally added'
)
if self.check_too_fast:
adjusted_func_code_line.append(' ' *
line_indentation_level +
'if _speeit_prefix__result_time < _speeit_prefix__check_reference_time: raise Exception("in function: <{}>'
.format(self.orig_func_name) +
' code block: too fast to measure:\\n code part: _speeit_prefix__result_time: <{:.11f}> 2 times _smallest_perf_counter_time: <{:.11f}>\\n '
+ ' _start_block_stripped_line: <{}>'
.format(_start_block_stripped_line) +
'".format(_speeit_prefix__result_time, _speeit_prefix__check_reference_time)) # SPEEDIT: internally added'
)
end_tag_block_speedit += 1
else:
adjusted_func_code_line.append(' ' *
line_indentation_level + stripped_line)
else:
adjusted_func_code_line.append(' ' *
line_indentation_level + stripped_line)
if has_block_speedit:
if start_tag_block_speedit != end_tag_block_speedit:
adjusted_func_code_line.append(
' _speeit_prefix__result_time += _speeit_prefix__perf_counter() - _speeit_prefix__stmt_inner_start # **SPEEDIT**END internally added'
)
if self.check_too_fast:
adjusted_func_code_line.append(
' if _speeit_prefix__result_time < _speeit_prefix__check_reference_time: raise Exception("in function: <{}>'
.format(self.orig_func_name) +
' code block: too fast to measure:\\n code part: _speeit_prefix__result_time: <{:.11f}> 2 times _smallest_perf_counter_time: <{:.11f}>\\n '
+ ' _start_block_stripped_line: <{}>'.format(
_start_block_stripped_line) +
'".format(_speeit_prefix__result_time, _speeit_prefix__check_reference_time)) # SPEEDIT: internally added'
)
else:
adjusted_func_code_line.insert(0,
' _speeit_prefix__stmt_inner_start = _speeit_prefix__perf_counter() # ::SPEEDIT::START internally added'
)
adjusted_func_code_line.append(
' _speeit_prefix__result_time += _speeit_prefix__perf_counter() - _speeit_prefix__stmt_inner_start # **SPEEDIT**END internally added'
)
if self.check_too_fast:
adjusted_func_code_line.append(
' if _speeit_prefix__result_time < _speeit_prefix__check_reference_time: raise Exception("in function: <{}>'
.format(self.orig_func_name) +
' code block: too fast to measure:\\n code part: _speeit_prefix__result_time: <{:.11f}> 2 times _smallest_perf_counter_time: <{:.11f}>".format(_speeit_prefix__result_time, _speeit_prefix__check_reference_time)) # SPEEDIT: internally added'
)
final_param_line = []
for param, value in sig.parameters.items():
if value.kind == value.POSITIONAL_OR_KEYWORD:
if param in self.kwargs_dict:
value_to_set = self.kwargs_dict.pop(param)
else:
value_to_set = self.args_list.pop(0)
if isinstance(value_to_set, str):
parameter_line = '{} = "{}"'.format(param, value_to_set)
else:
parameter_line = '{} = {}'.format(param, value_to_set)
final_param_line.append(' ' * 2 + parameter_line)
elif value.kind == value.POSITIONAL_ONLY:
value_to_set = self.args_list.pop(0)
if isinstance(value_to_set, str):
parameter_line = '{} = "{}"'.format(param, value_to_set)
else:
parameter_line = '{} = {}'.format(param, value_to_set)
final_param_line.append(' ' * 2 + parameter_line)
raise Err('_TimeIT.get_final_inner_function()',
'POSITIONAL_ONLY !! not sure what to do .. check in future if needed: param: <{}> value.kind: <{}>'
.format(param, value.kind))
elif value.kind == value.VAR_POSITIONAL:
parameter_line = '{} = {}'.format(param, self.args_list)
final_param_line.append(' ' * 2 + parameter_line)
elif value.kind == value.KEYWORD_ONLY:
if param in self.kwargs_dict:
value_to_set = self.kwargs_dict.pop(param)
else:
value_to_set = value.default
if isinstance(value_to_set, str):
parameter_line = '{} = "{}"'.format(param, value_to_set)
else:
parameter_line = '{} = {}'.format(param, value_to_set)
final_param_line.append(' ' * 2 + parameter_line)
elif value.kind == value.VAR_KEYWORD:
parameter_line = '{} = {}'.format(param, self.kwargs_dict)
final_param_line.append(' ' * 2 + parameter_line)
else:
continue
final_setup_lines = []
for setup_line in self.setup_line_list:
setup_line = setup_line.strip()
if setup_line:
final_setup_lines.append(' ' + setup_line)
final_inner_function_lines = [
'def inner(): # orig function name: <{}>'.format(self.
orig_func_name),
' from time import perf_counter as _speeit_prefix__perf_counter',
'', ' _speeit_prefix__run_sec = {}'.format(self.run_sec), '',
' # ==================== START SETUP LINES ==================== #'
, '']
final_inner_function_lines.extend(final_setup_lines)
inner_function_lines_part2 = ['',
' # ==================== END SETUP LINES ==================== #',
'',
' # The smallest difference of calling _speeit_prefix__perf_counter() immediately after each other a couple of times'
, ' _speeit_prefix__check_reference_time = {}'.format(self.
perf_counter_reference_time), ' _speeit_prefix__loops = 0',
' _speeit_prefix__all_loops_time_sec = 0.0',
' _speeit_prefix__avg_loop_sec = 0.0',
' _speeit_prefix__best_loop_sec = 99999999999.0',
' _speeit_prefix__second_best_loop_sec = 99999999999.0',
' _speeit_prefix__worst_loop_sec = 0.0',
' _speeit_prefix__second_worst_loop_sec = 0.0',
' if _speeit_prefix__run_sec is None:', ' return {',
' "loops": _speeit_prefix__loops,',
' "all_loops_time_sec": _speeit_prefix__all_loops_time_sec,'
, ' "avg_loop_sec": _speeit_prefix__avg_loop_sec,',
' "best_loop_sec": _speeit_prefix__best_loop_sec,',
' "second_best_loop_sec": _speeit_prefix__second_best_loop_sec,'
, ' "worst_loop_sec": _speeit_prefix__worst_loop_sec,',
' "second_worst_loop_sec": _speeit_prefix__second_worst_loop_sec'
, ' }', ' elif _speeit_prefix__run_sec == -1:',
' # only run it once',
' _speeit_prefix__run_once = True', ' else:',
' _speeit_prefix__run_once = False',
' _speeit_prefix__main_start_time = _speeit_prefix__perf_counter()'
, ' while True:', ' _speeit_prefix__loops += 1',
' _speeit_prefix__result_time = 0', '',
' # ==================== START CODE BLOCK ==================== #'
, '']
final_inner_function_lines.extend(inner_function_lines_part2)
final_inner_function_lines.extend(final_param_line)
final_inner_function_lines.extend(adjusted_func_code_line)
inner_function_lines_rest = ['',
' # ==================== END CODE BLOCK ==================== #'
, '',
' _speeit_prefix__all_loops_time_sec += _speeit_prefix__result_time'
,
' if _speeit_prefix__result_time <= _speeit_prefix__best_loop_sec:'
,
' _speeit_prefix__second_best_loop_sec = _speeit_prefix__best_loop_sec'
,
' _speeit_prefix__best_loop_sec = _speeit_prefix__result_time'
,
' if _speeit_prefix__result_time >= _speeit_prefix__worst_loop_sec:'
,
' _speeit_prefix__second_worst_loop_sec = _speeit_prefix__worst_loop_sec'
,
' _speeit_prefix__worst_loop_sec = _speeit_prefix__result_time'
, ' if _speeit_prefix__run_once:', ' break',
' # check if we have to get out',
' if _speeit_prefix__perf_counter() - _speeit_prefix__main_start_time >= _speeit_prefix__run_sec:'
, ' break',
' _speeit_prefix__avg_loop_sec = _speeit_prefix__all_loops_time_sec / _speeit_prefix__loops'
,
' if _speeit_prefix__second_best_loop_sec == 99999999999.0:',
' _speeit_prefix__second_best_loop_sec = -1.0',
' if _speeit_prefix__second_worst_loop_sec == 0.0:',
' _speeit_prefix__second_worst_loop_sec = -1.0',
' return {', ' "loops": _speeit_prefix__loops,',
' "all_loops_time_sec": _speeit_prefix__all_loops_time_sec,',
' "avg_loop_sec": _speeit_prefix__avg_loop_sec,',
' "best_loop_sec": _speeit_prefix__best_loop_sec,',
' "second_best_loop_sec": _speeit_prefix__second_best_loop_sec,'
, ' "worst_loop_sec": _speeit_prefix__worst_loop_sec,',
' "second_worst_loop_sec": _speeit_prefix__second_worst_loop_sec'
, ' }', '']
final_inner_function_lines.extend(inner_function_lines_rest)
return '\n'.join(final_inner_function_lines)
def speedit_benchmark(func_dict, setup_line_list, use_func_name=True,
output_in_sec=False, benchmarkit__with_gc=False,
benchmarkit__check_too_fast=True, benchmarkit__rank_by='best',
benchmarkit__run_sec=1, benchmarkit__repeat=3):
""" Returns one txt string for the ready comparison table: format is conform with reStructuredText
Usage:
.. code-block:: python
func_dict = {
'function_f1': (function_f1, [act_one_hamlet], {}),
'function_f2': (function_f2, [act_one_hamlet], {}),
'function_f3': (function_f3, [act_one_hamlet], {}),
}
setup_line_list = [
'from random import shuffle',
'from os.path import abspath, dirname, join',
'MY_CONSTANT = 15'
]
benchmark_result = BenchmarkIT.speedit_benchmark(func_dict, setup_line_list, benchmarkit__run_sec=1.0, output_in_sec=True, use_func_name=True, benchmarkit__with_gc=False, benchmarkit__repeat=3)
Args:
func_dict (dict): mapping function names to functions
value format: tuple (function, list_of_positional_arguments, dictionary_of_keyword_arguments)
setup_line_list (list): of strings with import lines needed by the functions any global data ect..
.. warning:: no multiline string or indented code line
use_func_name (bool): if True the function name will be used in the output `name` if False the `func_dict key` will be used in the the output `name`
output_in_sec (int): if true the output is keep in seconds if false it is transformed to:
second (s)
millisecond (ms) One thousandth of one second
microsecond (µs) One millionth of one second
nanosecond (ns) One billionth of one second
benchmarkit__with_gc (bool): if True gc is kept on during timing: if False: turns off garbage collection during the timing
benchmarkit__check_too_fast(bool): if True and aa code block is timed faster than a `Reference-Time` an Exception is raised.
- Reference-Time: the smallest difference of calling perf_counter() immediately after each other a couple of times
.. seealso:: _helper_get_perf_counter_reference_time()
benchmarkit__rank_by (str): `best` or `average`
benchmarkit__run_sec (float or -1 or None): the number of loops per run is scaled to approximately fit the benchmarkit__run_sec
- if benchmarkit__run_sec is -1: then the generated function source code is only run once
- if benchmarkit__run_sec is None: then the generated function source code is only printed
this is mainly useful to see the exact final `func code block` which will be timed.
benchmarkit__repeat (int): how often everything is repeated
This is a convenience variable that calls the whole setup repeatedly
Returns:
str: ready to print or write to file: table format is conform with reStructuredText
Raises:
SpeedIT.Err
"""
if not func_dict:
raise Err('speedit_benchmark()',
'At least one function must be defined in `func_dict`: <{}>'.
format(func_dict))
if benchmarkit__rank_by != 'best' and benchmarkit__rank_by != 'average':
raise Err('speedit_benchmark()',
'<benchmarkit__rank_by> must be one of: <best, average> We got: <{}>'
.format(benchmarkit__rank_by))
if benchmarkit__repeat < 1:
raise Err('speedit_benchmark()',
'<benchmarkit__repeat> must be greater than <0> We got: <{}>'.
format(benchmarkit__repeat))
all_final_lines = []
perf_counter_reference_time = _helper_get_perf_counter_reference_time()
if benchmarkit__run_sec is None:
all_final_lines.extend([
'================ RUN SECONDS: benchmarkit__run_sec was defined as: None (benchmarkit__run_sec=None) ================'
, '', ''])
for func_name, (function_, func_positional_arguments,
func_keyword_arguments) in sorted(func_dict.items()):
if use_func_name:
name = getattr(function_, '__name__', function_)
else:
name = func_name
benchmark_result = _TimeIT(function_, func_positional_arguments,
func_keyword_arguments, setup_line_list,
benchmarkit__check_too_fast, benchmarkit__run_sec, name,
perf_counter_reference_time).benchmark_it(benchmarkit__with_gc)
all_final_lines.extend([
'===================== function name: <{}>'.format(
func_name), '', benchmark_result, '', ''])
else:
title_line = (
'SpeedIT: `BenchmarkIT` for: <{}> functions. benchmarkit__with_gc: <{}> benchmarkit__run_sec: <{}> '
.format(len(func_dict), benchmarkit__with_gc, benchmarkit__run_sec)
)
for repeat_all in range(benchmarkit__repeat):
table = []
for func_name, (function_, func_positional_arguments,
func_keyword_arguments) in sorted(func_dict.items()):
if use_func_name:
name = getattr(function_, '__name__', function_)
else:
name = func_name
benchmark_result = _TimeIT(function_,
func_positional_arguments, func_keyword_arguments,
setup_line_list, benchmarkit__check_too_fast,
benchmarkit__run_sec, name, perf_counter_reference_time
).benchmark_it(with_gc=benchmarkit__with_gc)
table.append(benchmark_result)
if benchmarkit__rank_by == 'best':
table = sorted(table, key=itemgetter('best_loop_sec'))
compare_reference = table[0]['best_loop_sec']
for idx, dict_ in enumerate(table):
dict_['compare'] = '{:,.3f}'.format(dict_[
'best_loop_sec'] / compare_reference * 100.0)
dict_['rank'] = '{:,}'.format(idx + 1)
dict_['loops'] = '{:,}'.format(dict_['loops'])
if output_in_sec:
dict_['avg_loop_sec'] = '{:.11f}'.format(dict_[
'avg_loop_sec'])
dict_['best_loop_sec'] = '{:.11f}'.format(dict_[
'best_loop_sec'])
if dict_['second_best_loop_sec'] == -1.0:
dict_['second_best_loop_sec'] = 'NOT-MEASURED'
else:
dict_['second_best_loop_sec'] = '{:.11f}'.format(
dict_['second_best_loop_sec'])
dict_['worst_loop_sec'] = '{:.11f}'.format(dict_[
'worst_loop_sec'])
if dict_['second_worst_loop_sec'] == -1.0:
dict_['second_worst_loop_sec'] = 'NOT-MEASURED'
else:
dict_['second_worst_loop_sec'] = '{:.11f}'.format(
dict_['second_worst_loop_sec'])
dict_['all_loops_time_sec'] = '{:.11f}'.format(dict_
['all_loops_time_sec'])
else:
dict_['avg_loop_sec'] = format_time(dict_[
'avg_loop_sec'])
dict_['best_loop_sec'] = format_time(dict_[
'best_loop_sec'])
dict_['second_best_loop_sec'] = format_time(dict_[
'second_best_loop_sec'])
dict_['worst_loop_sec'] = format_time(dict_[
'worst_loop_sec'])
dict_['second_worst_loop_sec'] = format_time(dict_[
'second_worst_loop_sec'])
dict_['all_loops_time_sec'] = format_time(dict_[
'all_loops_time_sec'])
elif benchmarkit__rank_by == 'average':
table = sorted(table, key=itemgetter('avg_loop_sec'))
compare_reference = table[0]['avg_loop_sec']
for idx, dict_ in enumerate(table):
dict_['compare'] = '{:,.3f}'.format(dict_[
'avg_loop_sec'] / compare_reference * 100.0)
dict_['rank'] = '{:,}'.format(idx + 1)
dict_['loops'] = '{:,}'.format(dict_['loops'])
if output_in_sec:
dict_['avg_loop_sec'] = '{:.11f}'.format(dict_[
'avg_loop_sec'])
dict_['best_loop_sec'] = '{:.11f}'.format(dict_[
'best_loop_sec'])
if dict_['second_best_loop_sec'] == -1.0:
dict_['second_best_loop_sec'] = 'NOT-MEASURED'
else:
dict_['second_best_loop_sec'] = '{:.11f}'.format(
dict_['second_best_loop_sec'])
dict_['worst_loop_sec'] = '{:.11f}'.format(dict_[
'worst_loop_sec'])
if dict_['second_worst_loop_sec'] == -1.0:
dict_['second_worst_loop_sec'] = 'NOT-MEASURED'
else:
dict_['second_worst_loop_sec'] = '{:.11f}'.format(
dict_['second_worst_loop_sec'])
dict_['all_loops_time_sec'] = '{:.11f}'.format(dict_
['all_loops_time_sec'])
else:
dict_['avg_loop_sec'] = format_time(dict_[
'avg_loop_sec'])
dict_['best_loop_sec'] = format_time(dict_[
'best_loop_sec'])
dict_['second_best_loop_sec'] = format_time(dict_[
'second_best_loop_sec'])
dict_['worst_loop_sec'] = format_time(dict_[
'worst_loop_sec'])
dict_['second_worst_loop_sec'] = format_time(dict_[
'second_worst_loop_sec'])
dict_['all_loops_time_sec'] = format_time(dict_[
'all_loops_time_sec'])
header_mapping = [('name', 'name'), ('rank-{}'.format(
benchmarkit__rank_by), 'rank'), ('compare %', 'compare'), (
'num. loops', 'loops'), ('avg_loop', 'avg_loop_sec'), (
'best_loop', 'best_loop_sec'), ('second_best_loop',
'second_best_loop_sec'), ('worst_loop', 'worst_loop_sec'),
('second_worst_loop', 'second_worst_loop_sec'), (
'all_loops time', 'all_loops_time_sec')]
all_final_lines.extend(get_table_rst_formatted_lines(table,
header_mapping, title_line))
all_final_lines.extend(['', ''])
return '\n'.join(all_final_lines)
| """ Benchmark module: can also compare multiple functions
"""
import gc
from inspect import (
signature,
getsourcelines
)
from operator import itemgetter
from time import perf_counter
from SpeedIT.ProjectErr import Err
from SpeedIT.Utils import (
format_time,
get_table_rst_formatted_lines
)
def _helper_get_perf_counter_reference_time():
""" Helper: Returns 2 times: the smallest difference of calling perf_counter() immediately after each other a couple of times
Returns:
float: 2 times the smallest difference of calling perf_counter() immediately after each other a couple of times
"""
_result_time = 99999999999.0
for y_ in range(50):
for x_ in range(3000):
temp_start = perf_counter()
temp_time = perf_counter() - temp_start
if temp_time < _result_time:
_result_time = temp_time
return _result_time * 2
class _TimeIT(object):
""" Class for timing execution speed of function code.
Partially based on code from python timeit.py
This does not execute the original function but generates a new function which executes only the code body of 'func': `func code block`
This avoids calling into the function itself
Args:
func (function):
.. warning:: the `func` function may not have any return statements: but any inner function can have one
OK
.. code-block:: python
def example_formal_func_inner(data_):
shuffle(data_)
def fninner(x):
return x[1]
result = sorted(data_.items(), key=fninner)
del result
NOT OK
.. code-block:: python
def example_pep265(data_):
shuffle(data_)
result = sorted(data_.items(), key=itemgetter(1))
return result
func_positional_arguments (list): positional arguments for the function
func_keyword_arguments (dict): any keyword arguments for the function
setup_line_list (list): of strings with import lines needed by the functions any global data ect..
this part is executed once before the actual `func code block` enters the loop
.. warning:: no multiline string or indented code line
check_too_fast(bool): if True and a code block is timed faster than a `Reference-Time` an Exception is raised.
- Reference-Time: the smallest difference of calling perf_counter() immediately after each other a couple of times
.. seealso:: _helper_get_perf_counter_reference_time()
run_sec (float or -1 or None): seconds the `func code block` will be executed (looped over)
- if run_sec is -1: then the generated function source code is only run once
- if run_sec is None: then the generated function source code is only printed
this is mainly useful to see the exact final `func code block` which will be timed.
name (str): the name used for the output `name` part
perf_counter_reference_time (float): passed on see: _helper_get_perf_counter_reference_time()
"""
def __init__(self, func, args_list, kwargs_dict, setup_line_list, check_too_fast, run_sec, name, perf_counter_reference_time):
""" Constructor. See class doc string.
"""
self.func = func
self.orig_func_name = getattr(self.func, "__name__", self.func)
self.args_list = args_list.copy()
self.kwargs_dict = kwargs_dict.copy()
self.setup_line_list = setup_line_list
self.check_too_fast = check_too_fast
self.run_sec = run_sec
self.name = name
self.perf_counter_reference_time = perf_counter_reference_time
if callable(self.func):
_ns = {}
self.src = self.__get_final_inner_function()
if self.run_sec is not None and self.run_sec != -1 and self.run_sec < 0.1:
raise Err('_TimeIT.__init__()', 'run_sec: <{:.1f}> must be at least <0.1 second> or <-1 to run it once> or <None to print the `func code block`>'.format(self.run_sec))
_code = compile(self.src, 'benchmarkit-src', "exec")
exec(_code, globals(), _ns)
self.inner = _ns["inner"]
else:
raise ValueError('<func>: is not a `callable` type: <{}>'.format(self.func))
def benchmark_it(self, with_gc):
""" Returns timing result for the `func code block`
.. note::
By default, timeit() temporarily turns off garbage collection during the timing.
The advantage of this approach is that it makes independent timings more comparable.
This disadvantage is that GC may be an important component of the performance of the function being measured.
If so, GC can be re-enabled as the with_gc=True
Returns:
dict: benchmark result: dict keys: loops, all_loops_time_sec, avg_loop_sec, best_loop_sec, worst_loop_sec
- loops: how many times the `func code block` was executed (looped over)
- all_loops_time_sec: the total time in seconds for all loops:
only loop times are counted not other times: depending on the `func code block` this can be about 25% of the total runtime
- avg_loop_sec: average loop time in seconds: this should be mostly used as measure time:
if there where only a very low number of loops - one might want to increase the `run_sec` and rerun it
- two_best_loop_sec: time in seconds for the two fastest of all loops
- two_worst_loop_sec: time in seconds for the two slowest of all loops
Raises:
SpeedIT.Err: example if `run_sec` is not <-1 run once>, <None only print> but less than 0.1
"""
if self.run_sec is None:
benchmark_result = self.src
elif with_gc:
gc_old = gc.isenabled()
gc.enable()
try:
benchmark_result = self.inner()
benchmark_result['name'] = self.name
finally:
if not gc_old:
gc.disable()
else:
gc_old = gc.isenabled()
gc.disable()
try:
benchmark_result = self.inner()
benchmark_result['name'] = self.name
finally:
if gc_old:
gc.enable()
return benchmark_result
def __get_final_inner_function(self):
""" Returns a string of an generated inner function with the code body from: func
Tries to generate a function with the 'code-body' from the passed on func as well as the args_list, kwargs_dict
.. warnings:: the `func` function may not have any return statements: but any inner function can have one
Returns:
str: generated inner function
Raises:
SpeedIT.Err: example if an indentation is encountered which is not a multiple of the first found indentation
"""
has_block_speedit = False
_start_block_stripped_line = ''
start_tag_block_speedit = 0
end_tag_block_speedit = 0
func_line, lnum = getsourcelines(self.func)
sig = signature(self.func)
indent_ = None
func_def_indent = len(func_line[0]) - len(func_line[0].lstrip())
func_body = func_line[1:]
search_docstring = False
# PREPARE: remove docstring and get final indentation
first_none_docstring_idx = 0
for idx, line_orig in enumerate(func_body):
rstripped_line = line_orig.rstrip()
if rstripped_line:
stripped_codeline = rstripped_line.lstrip()
if stripped_codeline[0] == '#': # remove comment lines
if not ('::SPEEDIT::' in stripped_codeline or '**SPEEDIT**' in stripped_codeline):
continue
if search_docstring:
if stripped_codeline[0:3] == '"""' or stripped_codeline[0:3] == "'''":
search_docstring = False
continue
else:
codebody_indent = len(rstripped_line) - len(stripped_codeline)
indent_ = codebody_indent - func_def_indent
# Check if we have a docstring
if stripped_codeline[0:3] == '"""' or stripped_codeline[0:3] == "'''":
search_docstring = True
continue
first_none_docstring_idx = idx
break
# do the func code body
adjusted_func_code_line = []
for line_orig in func_body[first_none_docstring_idx:]:
# remove empty
if line_orig:
# get indentation check it is a multiple of indent_
rstrip_line = line_orig.rstrip()
if rstrip_line:
stripped_line = rstrip_line.lstrip()
if stripped_line[0] == '#': # remove comment lines: keep any with ::SPEEDIT::
if '::SPEEDIT::' in stripped_line or '**SPEEDIT**' in stripped_line:
has_block_speedit = True
else:
continue
line_indentation = len(rstrip_line) - len(stripped_line)
if line_indentation % indent_ != 0:
raise Err('_TimeIT.get_final_inner_function', '<{}>: ERROR: indentation must be a multiple of the second function line: <{}>\n seems we encountered a wrong indented line: line_indentation: <{}>\n {}'.format(self.orig_func_name, indent_, line_indentation, line_orig))
line_indentation_level = int((line_indentation - func_def_indent) / indent_) + 1 # need one extra level
if has_block_speedit:
if '::SPEEDIT::' in stripped_line:
if start_tag_block_speedit != end_tag_block_speedit:
# expected END Tag
raise Err('_TimeIT.get_final_inner_function', '<{}>: FUNCTION INNER TAG ERROR: has_block_speedit: <{}>\n Expected an END-TAG <**SPEEDIT**>: \n {}'.format(self.orig_func_name, has_block_speedit, line_orig))
adjusted_func_code_line.append((' ' * line_indentation_level) + '_speeit_prefix__stmt_inner_start = _speeit_prefix__perf_counter() # ::SPEEDIT::START internally added')
start_tag_block_speedit += 1
_start_block_stripped_line = stripped_line
elif '**SPEEDIT**' in stripped_line:
if end_tag_block_speedit != start_tag_block_speedit - 1:
# expected START TAG
raise Err('_TimeIT.get_final_inner_function', '<{}>: FUNCTION INNER TAG ERROR: has_block_speedit: <{}>\n Expected an START-TAG <::SPEEDIT::>: \n {}'.format(self.orig_func_name, has_block_speedit, line_orig))
# Do this inner result
adjusted_func_code_line.append((' ' * line_indentation_level) + '_speeit_prefix__result_time += _speeit_prefix__perf_counter() - _speeit_prefix__stmt_inner_start # **SPEEDIT**END internally added')
if self.check_too_fast:
adjusted_func_code_line.append((' ' * line_indentation_level) + 'if _speeit_prefix__result_time < _speeit_prefix__check_reference_time: raise Exception("in function: <{}>'.format(self.orig_func_name) + ' code block: too fast to measure:\\n code part: _speeit_prefix__result_time: <{:.11f}> 2 times _smallest_perf_counter_time: <{:.11f}>\\n ' + ' _start_block_stripped_line: <{}>'.format(_start_block_stripped_line) + '".format(_speeit_prefix__result_time, _speeit_prefix__check_reference_time)) # SPEEDIT: internally added')
end_tag_block_speedit += 1
else:
adjusted_func_code_line.append((' ' * line_indentation_level) + stripped_line)
else:
adjusted_func_code_line.append((' ' * line_indentation_level) + stripped_line)
# CHECK: LAST END TAG
# e.g. if a function body ends with an END-TAG this is not returned by: inspect.getsourcelines(self.func)
if has_block_speedit:
if start_tag_block_speedit != end_tag_block_speedit:
# Do the last inner result: ADDING an END-TAG
adjusted_func_code_line.append(' _speeit_prefix__result_time += _speeit_prefix__perf_counter() - _speeit_prefix__stmt_inner_start # **SPEEDIT**END internally added')
if self.check_too_fast:
adjusted_func_code_line.append(' if _speeit_prefix__result_time < _speeit_prefix__check_reference_time: raise Exception("in function: <{}>'.format(self.orig_func_name) + ' code block: too fast to measure:\\n code part: _speeit_prefix__result_time: <{:.11f}> 2 times _smallest_perf_counter_time: <{:.11f}>\\n ' + ' _start_block_stripped_line: <{}>'.format(_start_block_stripped_line) + '".format(_speeit_prefix__result_time, _speeit_prefix__check_reference_time)) # SPEEDIT: internally added')
# add the normal perf_counter time lines
else:
adjusted_func_code_line.insert(0, ' _speeit_prefix__stmt_inner_start = _speeit_prefix__perf_counter() # ::SPEEDIT::START internally added')
adjusted_func_code_line.append(' _speeit_prefix__result_time += _speeit_prefix__perf_counter() - _speeit_prefix__stmt_inner_start # **SPEEDIT**END internally added')
if self.check_too_fast:
adjusted_func_code_line.append(' if _speeit_prefix__result_time < _speeit_prefix__check_reference_time: raise Exception("in function: <{}>'.format(self.orig_func_name) + ' code block: too fast to measure:\\n code part: _speeit_prefix__result_time: <{:.11f}> 2 times _smallest_perf_counter_time: <{:.11f}>".format(_speeit_prefix__result_time, _speeit_prefix__check_reference_time)) # SPEEDIT: internally added')
# Do the arguments
final_param_line = []
for param, value in sig.parameters.items():
if value.kind == value.POSITIONAL_OR_KEYWORD:
# check if we have a keyword
if param in self.kwargs_dict:
value_to_set = self.kwargs_dict.pop(param)
else: # use the positional
value_to_set = self.args_list.pop(0)
if isinstance(value_to_set, str):
parameter_line = '{} = "{}"'.format(param, value_to_set)
else:
parameter_line = '{} = {}'.format(param, value_to_set)
final_param_line.append((' ' * 2) + parameter_line)
elif value.kind == value.POSITIONAL_ONLY:
value_to_set = self.args_list.pop(0)
if isinstance(value_to_set, str):
parameter_line = '{} = "{}"'.format(param, value_to_set)
else:
parameter_line = '{} = {}'.format(param, value_to_set)
final_param_line.append((' ' * 2) + parameter_line)
# TODO: From docs: 3.4 Python has no explicit syntax for defining positional-only parameters, but many built-in and extension module functions (especially those that accept only one or two parameters) accept them.
raise Err('_TimeIT.get_final_inner_function()', 'POSITIONAL_ONLY !! not sure what to do .. check in future if needed: param: <{}> value.kind: <{}>'.format(param, value.kind))
elif value.kind == value.VAR_POSITIONAL: # do the remaining POSITIONAL arguments
parameter_line = '{} = {}'.format(param, self.args_list)
final_param_line.append((' ' * 2) + parameter_line)
elif value.kind == value.KEYWORD_ONLY:
if param in self.kwargs_dict:
value_to_set = self.kwargs_dict.pop(param)
else: # use the default
value_to_set = value.default
if isinstance(value_to_set, str):
parameter_line = '{} = "{}"'.format(param, value_to_set)
else:
parameter_line = '{} = {}'.format(param, value_to_set)
final_param_line.append((' ' * 2) + parameter_line)
elif value.kind == value.VAR_KEYWORD: # do the remaining KEYWORD arguments
parameter_line = '{} = {}'.format(param, self.kwargs_dict)
final_param_line.append((' ' * 2) + parameter_line)
else:
continue
# do self.setup_line_list
final_setup_lines = []
for setup_line in self.setup_line_list:
setup_line = setup_line.strip()
if setup_line:
final_setup_lines.append(' ' + setup_line)
final_inner_function_lines = [
'def inner(): # orig function name: <{}>'.format(self.orig_func_name),
' from time import perf_counter as _speeit_prefix__perf_counter',
'',
' _speeit_prefix__run_sec = {}'.format(self.run_sec),
'',
' # ==================== START SETUP LINES ==================== #',
'',
]
final_inner_function_lines.extend(final_setup_lines)
inner_function_lines_part2 = [
'',
' # ==================== END SETUP LINES ==================== #',
'',
' # The smallest difference of calling _speeit_prefix__perf_counter() immediately after each other a couple of times',
' _speeit_prefix__check_reference_time = {}'.format(self.perf_counter_reference_time),
' _speeit_prefix__loops = 0',
' _speeit_prefix__all_loops_time_sec = 0.0',
' _speeit_prefix__avg_loop_sec = 0.0',
' _speeit_prefix__best_loop_sec = 99999999999.0',
' _speeit_prefix__second_best_loop_sec = 99999999999.0',
' _speeit_prefix__worst_loop_sec = 0.0',
' _speeit_prefix__second_worst_loop_sec = 0.0',
' if _speeit_prefix__run_sec is None:',
' return {',
' "loops": _speeit_prefix__loops,',
' "all_loops_time_sec": _speeit_prefix__all_loops_time_sec,',
' "avg_loop_sec": _speeit_prefix__avg_loop_sec,',
' "best_loop_sec": _speeit_prefix__best_loop_sec,',
' "second_best_loop_sec": _speeit_prefix__second_best_loop_sec,',
' "worst_loop_sec": _speeit_prefix__worst_loop_sec,',
' "second_worst_loop_sec": _speeit_prefix__second_worst_loop_sec',
' }',
' elif _speeit_prefix__run_sec == -1:',
' # only run it once',
' _speeit_prefix__run_once = True',
' else:',
' _speeit_prefix__run_once = False',
' _speeit_prefix__main_start_time = _speeit_prefix__perf_counter()',
' while True:',
' _speeit_prefix__loops += 1',
' _speeit_prefix__result_time = 0',
'',
' # ==================== START CODE BLOCK ==================== #',
'',
]
final_inner_function_lines.extend(inner_function_lines_part2)
final_inner_function_lines.extend(final_param_line)
final_inner_function_lines.extend(adjusted_func_code_line)
inner_function_lines_rest = [
'',
' # ==================== END CODE BLOCK ==================== #',
'',
' _speeit_prefix__all_loops_time_sec += _speeit_prefix__result_time',
' if _speeit_prefix__result_time <= _speeit_prefix__best_loop_sec:',
' _speeit_prefix__second_best_loop_sec = _speeit_prefix__best_loop_sec',
' _speeit_prefix__best_loop_sec = _speeit_prefix__result_time',
' if _speeit_prefix__result_time >= _speeit_prefix__worst_loop_sec:',
' _speeit_prefix__second_worst_loop_sec = _speeit_prefix__worst_loop_sec',
' _speeit_prefix__worst_loop_sec = _speeit_prefix__result_time',
' if _speeit_prefix__run_once:',
' break',
' # check if we have to get out',
' if _speeit_prefix__perf_counter() - _speeit_prefix__main_start_time >= _speeit_prefix__run_sec:',
' break',
' _speeit_prefix__avg_loop_sec = _speeit_prefix__all_loops_time_sec / _speeit_prefix__loops',
' if _speeit_prefix__second_best_loop_sec == 99999999999.0:',
' _speeit_prefix__second_best_loop_sec = -1.0',
' if _speeit_prefix__second_worst_loop_sec == 0.0:',
' _speeit_prefix__second_worst_loop_sec = -1.0',
' return {',
' "loops": _speeit_prefix__loops,',
' "all_loops_time_sec": _speeit_prefix__all_loops_time_sec,',
' "avg_loop_sec": _speeit_prefix__avg_loop_sec,',
' "best_loop_sec": _speeit_prefix__best_loop_sec,',
' "second_best_loop_sec": _speeit_prefix__second_best_loop_sec,',
' "worst_loop_sec": _speeit_prefix__worst_loop_sec,',
' "second_worst_loop_sec": _speeit_prefix__second_worst_loop_sec',
' }',
''
]
final_inner_function_lines.extend(inner_function_lines_rest)
return '\n'.join(final_inner_function_lines)
def speedit_benchmark(func_dict, setup_line_list, use_func_name=True, output_in_sec=False, benchmarkit__with_gc=False, benchmarkit__check_too_fast=True, benchmarkit__rank_by='best', benchmarkit__run_sec=1, benchmarkit__repeat=3):
""" Returns one txt string for the ready comparison table: format is conform with reStructuredText
Usage:
.. code-block:: python
func_dict = {
'function_f1': (function_f1, [act_one_hamlet], {}),
'function_f2': (function_f2, [act_one_hamlet], {}),
'function_f3': (function_f3, [act_one_hamlet], {}),
}
setup_line_list = [
'from random import shuffle',
'from os.path import abspath, dirname, join',
'MY_CONSTANT = 15'
]
benchmark_result = BenchmarkIT.speedit_benchmark(func_dict, setup_line_list, benchmarkit__run_sec=1.0, output_in_sec=True, use_func_name=True, benchmarkit__with_gc=False, benchmarkit__repeat=3)
Args:
func_dict (dict): mapping function names to functions
value format: tuple (function, list_of_positional_arguments, dictionary_of_keyword_arguments)
setup_line_list (list): of strings with import lines needed by the functions any global data ect..
.. warning:: no multiline string or indented code line
use_func_name (bool): if True the function name will be used in the output `name` if False the `func_dict key` will be used in the the output `name`
output_in_sec (int): if true the output is keep in seconds if false it is transformed to:
second (s)
millisecond (ms) One thousandth of one second
microsecond (µs) One millionth of one second
nanosecond (ns) One billionth of one second
benchmarkit__with_gc (bool): if True gc is kept on during timing: if False: turns off garbage collection during the timing
benchmarkit__check_too_fast(bool): if True and aa code block is timed faster than a `Reference-Time` an Exception is raised.
- Reference-Time: the smallest difference of calling perf_counter() immediately after each other a couple of times
.. seealso:: _helper_get_perf_counter_reference_time()
benchmarkit__rank_by (str): `best` or `average`
benchmarkit__run_sec (float or -1 or None): the number of loops per run is scaled to approximately fit the benchmarkit__run_sec
- if benchmarkit__run_sec is -1: then the generated function source code is only run once
- if benchmarkit__run_sec is None: then the generated function source code is only printed
this is mainly useful to see the exact final `func code block` which will be timed.
benchmarkit__repeat (int): how often everything is repeated
This is a convenience variable that calls the whole setup repeatedly
Returns:
str: ready to print or write to file: table format is conform with reStructuredText
Raises:
SpeedIT.Err
"""
if not func_dict:
raise Err('speedit_benchmark()', 'At least one function must be defined in `func_dict`: <{}>'.format(func_dict))
if benchmarkit__rank_by != 'best' and benchmarkit__rank_by != 'average':
raise Err('speedit_benchmark()', '<benchmarkit__rank_by> must be one of: <best, average> We got: <{}>'.format(benchmarkit__rank_by))
if benchmarkit__repeat < 1:
raise Err('speedit_benchmark()', '<benchmarkit__repeat> must be greater than <0> We got: <{}>'.format(benchmarkit__repeat))
all_final_lines = []
# get once the perf_counter_reference_time
perf_counter_reference_time = _helper_get_perf_counter_reference_time()
if benchmarkit__run_sec is None:
all_final_lines.extend([
'================ RUN SECONDS: benchmarkit__run_sec was defined as: None (benchmarkit__run_sec=None) ================',
'',
''
])
# Run all only once and get the code
for func_name, (function_, func_positional_arguments, func_keyword_arguments) in sorted(func_dict.items()):
if use_func_name:
name = getattr(function_, "__name__", function_)
else:
name = func_name
benchmark_result = _TimeIT(function_, func_positional_arguments, func_keyword_arguments, setup_line_list, benchmarkit__check_too_fast, benchmarkit__run_sec, name, perf_counter_reference_time).benchmark_it(benchmarkit__with_gc)
all_final_lines.extend([
'===================== function name: <{}>'.format(func_name),
'',
benchmark_result,
'',
'',
])
else:
title_line = 'SpeedIT: `BenchmarkIT` for: <{}> functions. benchmarkit__with_gc: <{}> benchmarkit__run_sec: <{}> '.format(len(func_dict), benchmarkit__with_gc, benchmarkit__run_sec)
for repeat_all in range(benchmarkit__repeat):
table = []
for func_name, (function_, func_positional_arguments, func_keyword_arguments) in sorted(func_dict.items()):
if use_func_name:
name = getattr(function_, "__name__", function_)
else:
name = func_name
benchmark_result = _TimeIT(function_, func_positional_arguments, func_keyword_arguments, setup_line_list, benchmarkit__check_too_fast, benchmarkit__run_sec, name, perf_counter_reference_time).benchmark_it(with_gc=benchmarkit__with_gc)
table.append(benchmark_result)
if benchmarkit__rank_by == 'best':
table = sorted(table, key=itemgetter('best_loop_sec'))
compare_reference = table[0]['best_loop_sec']
for idx, dict_ in enumerate(table):
dict_['compare'] = '{:,.3f}'.format((dict_['best_loop_sec'] / compare_reference) * 100.0)
dict_['rank'] = '{:,}'.format(idx + 1)
dict_['loops'] = '{:,}'.format(dict_['loops'])
if output_in_sec:
dict_['avg_loop_sec'] = '{:.11f}'.format(dict_['avg_loop_sec'])
dict_['best_loop_sec'] = '{:.11f}'.format(dict_['best_loop_sec'])
if dict_['second_best_loop_sec'] == -1.0:
dict_['second_best_loop_sec'] = 'NOT-MEASURED'
else:
dict_['second_best_loop_sec'] = '{:.11f}'.format(dict_['second_best_loop_sec'])
dict_['worst_loop_sec'] = '{:.11f}'.format(dict_['worst_loop_sec'])
if dict_['second_worst_loop_sec'] == -1.0:
dict_['second_worst_loop_sec'] = 'NOT-MEASURED'
else:
dict_['second_worst_loop_sec'] = '{:.11f}'.format(dict_['second_worst_loop_sec'])
dict_['all_loops_time_sec'] = '{:.11f}'.format(dict_['all_loops_time_sec'])
else:
dict_['avg_loop_sec'] = format_time(dict_['avg_loop_sec'])
dict_['best_loop_sec'] = format_time(dict_['best_loop_sec'])
dict_['second_best_loop_sec'] = format_time(dict_['second_best_loop_sec'])
dict_['worst_loop_sec'] = format_time(dict_['worst_loop_sec'])
dict_['second_worst_loop_sec'] = format_time(dict_['second_worst_loop_sec'])
dict_['all_loops_time_sec'] = format_time(dict_['all_loops_time_sec'])
elif benchmarkit__rank_by == 'average':
table = sorted(table, key=itemgetter('avg_loop_sec'))
compare_reference = table[0]['avg_loop_sec']
for idx, dict_ in enumerate(table):
dict_['compare'] = '{:,.3f}'.format((dict_['avg_loop_sec'] / compare_reference) * 100.0)
dict_['rank'] = '{:,}'.format(idx + 1)
dict_['loops'] = '{:,}'.format(dict_['loops'])
if output_in_sec:
dict_['avg_loop_sec'] = '{:.11f}'.format(dict_['avg_loop_sec'])
dict_['best_loop_sec'] = '{:.11f}'.format(dict_['best_loop_sec'])
if dict_['second_best_loop_sec'] == -1.0:
dict_['second_best_loop_sec'] = 'NOT-MEASURED'
else:
dict_['second_best_loop_sec'] = '{:.11f}'.format(dict_['second_best_loop_sec'])
dict_['worst_loop_sec'] = '{:.11f}'.format(dict_['worst_loop_sec'])
if dict_['second_worst_loop_sec'] == -1.0:
dict_['second_worst_loop_sec'] = 'NOT-MEASURED'
else:
dict_['second_worst_loop_sec'] = '{:.11f}'.format(dict_['second_worst_loop_sec'])
dict_['all_loops_time_sec'] = '{:.11f}'.format(dict_['all_loops_time_sec'])
else:
dict_['avg_loop_sec'] = format_time(dict_['avg_loop_sec'])
dict_['best_loop_sec'] = format_time(dict_['best_loop_sec'])
dict_['second_best_loop_sec'] = format_time(dict_['second_best_loop_sec'])
dict_['worst_loop_sec'] = format_time(dict_['worst_loop_sec'])
dict_['second_worst_loop_sec'] = format_time(dict_['second_worst_loop_sec'])
dict_['all_loops_time_sec'] = format_time(dict_['all_loops_time_sec'])
header_mapping = [
('name', 'name'),
('rank-{}'.format(benchmarkit__rank_by), 'rank'),
('compare %', 'compare'),
('num. loops', 'loops'),
('avg_loop', 'avg_loop_sec'),
('best_loop', 'best_loop_sec'),
('second_best_loop', 'second_best_loop_sec'),
('worst_loop', 'worst_loop_sec'),
('second_worst_loop', 'second_worst_loop_sec'),
('all_loops time', 'all_loops_time_sec')
]
all_final_lines.extend(get_table_rst_formatted_lines(table, header_mapping, title_line))
all_final_lines.extend([
'',
'',
])
return '\n'.join(all_final_lines)
| [
4,
6,
7,
8,
9
] |
1,391 | 4e30f0a9b420123c28858aad2a71040dcc952829 | <mask token>
class MachopWatchCommand(MachopProcess):
class MachopHandler(PatternMatchingEventHandler):
""" watcher for a file system event """
def on_modified(self, event):
if event.is_directory:
return
source = event.src_path
self._watcher.modified(source)
<mask token>
<mask token>
def modified(self, eventsrc):
"""
@@@ needs proper event handling for actions!!!
"""
if not self.has_changed(eventsrc):
return
matched = False
for pattern in self.globs:
if fnmatch.fnmatch(eventsrc, pattern):
matched = True
break
if matched:
for action in self.actions:
action(cmdpath=eventsrc, log=MachopLog(self.queue, 'watch'))
self.announce()
def announce(self, nl=False):
log = self.log
msg = 'watching ' + log.yellow(self.watchpath)
for match in self.globs:
msg += ' for [' + log.yellow(match) + ']'
msg += '...'
if nl:
msg += '\n'
log.out(msg)
def run(self):
self.log = MachopLog(self.queue, 'watch')
self.handler = self.MachopHandler(patterns=self.globs)
self.handler._watcher = self
self.observer = Observer()
self.observer.schedule(self.handler, self.watchpath, recursive=True)
self.observer.start()
self.announce(True)
wait_for_interrupt(self.observer)
self.observer.stop()
self.observer.join(3)
<mask token>
| <mask token>
class MachopWatchCommand(MachopProcess):
class MachopHandler(PatternMatchingEventHandler):
""" watcher for a file system event """
def on_modified(self, event):
if event.is_directory:
return
source = event.src_path
self._watcher.modified(source)
def __init__(self, globs=None, cmds=None, path=None, queue=None):
super(MachopWatchCommand, self).__init__()
recreate = globs, cmds, path, queue
self._safe_process(queue=queue, cfgpath=path, init=recreate)
self.globs = globs if globs else []
self.actions = cmds if cmds else []
self.watchpath = path
self.queue = queue
self.hashmap = {}
self.log = None
<mask token>
def modified(self, eventsrc):
"""
@@@ needs proper event handling for actions!!!
"""
if not self.has_changed(eventsrc):
return
matched = False
for pattern in self.globs:
if fnmatch.fnmatch(eventsrc, pattern):
matched = True
break
if matched:
for action in self.actions:
action(cmdpath=eventsrc, log=MachopLog(self.queue, 'watch'))
self.announce()
def announce(self, nl=False):
log = self.log
msg = 'watching ' + log.yellow(self.watchpath)
for match in self.globs:
msg += ' for [' + log.yellow(match) + ']'
msg += '...'
if nl:
msg += '\n'
log.out(msg)
def run(self):
self.log = MachopLog(self.queue, 'watch')
self.handler = self.MachopHandler(patterns=self.globs)
self.handler._watcher = self
self.observer = Observer()
self.observer.schedule(self.handler, self.watchpath, recursive=True)
self.observer.start()
self.announce(True)
wait_for_interrupt(self.observer)
self.observer.stop()
self.observer.join(3)
<mask token>
| <mask token>
class MachopWatchCommand(MachopProcess):
class MachopHandler(PatternMatchingEventHandler):
""" watcher for a file system event """
def on_modified(self, event):
if event.is_directory:
return
source = event.src_path
self._watcher.modified(source)
def __init__(self, globs=None, cmds=None, path=None, queue=None):
super(MachopWatchCommand, self).__init__()
recreate = globs, cmds, path, queue
self._safe_process(queue=queue, cfgpath=path, init=recreate)
self.globs = globs if globs else []
self.actions = cmds if cmds else []
self.watchpath = path
self.queue = queue
self.hashmap = {}
self.log = None
def set_queue(self, queue):
self.queue = queue
def modified(self, eventsrc):
"""
@@@ needs proper event handling for actions!!!
"""
if not self.has_changed(eventsrc):
return
matched = False
for pattern in self.globs:
if fnmatch.fnmatch(eventsrc, pattern):
matched = True
break
if matched:
for action in self.actions:
action(cmdpath=eventsrc, log=MachopLog(self.queue, 'watch'))
self.announce()
def announce(self, nl=False):
log = self.log
msg = 'watching ' + log.yellow(self.watchpath)
for match in self.globs:
msg += ' for [' + log.yellow(match) + ']'
msg += '...'
if nl:
msg += '\n'
log.out(msg)
def run(self):
self.log = MachopLog(self.queue, 'watch')
self.handler = self.MachopHandler(patterns=self.globs)
self.handler._watcher = self
self.observer = Observer()
self.observer.schedule(self.handler, self.watchpath, recursive=True)
self.observer.start()
self.announce(True)
wait_for_interrupt(self.observer)
self.observer.stop()
self.observer.join(3)
def has_changed(self, key):
hasher = hashlib.md5()
with open(key, 'rb') as modfile:
hasher.update(modfile.read())
xhash = hasher.hexdigest()
if self.hashmap.get(key, '') != xhash:
self.hashmap[key] = xhash
return True
return False
| import fnmatch
import hashlib
from .mplog import MachopLog
from .utils import MachopProcess, wait_for_interrupt
from watchdog.observers import Observer
from watchdog.events import PatternMatchingEventHandler
class MachopWatchCommand(MachopProcess):
class MachopHandler(PatternMatchingEventHandler):
""" watcher for a file system event """
def on_modified(self, event):
if event.is_directory:
return
source = event.src_path
self._watcher.modified(source)
def __init__(self, globs=None, cmds=None, path=None, queue=None):
super(MachopWatchCommand, self).__init__()
recreate = globs, cmds, path, queue
self._safe_process(queue=queue, cfgpath=path, init=recreate)
self.globs = globs if globs else []
self.actions = cmds if cmds else []
self.watchpath = path
self.queue = queue
self.hashmap = {}
self.log = None
def set_queue(self, queue):
self.queue = queue
def modified(self, eventsrc):
"""
@@@ needs proper event handling for actions!!!
"""
if not self.has_changed(eventsrc):
return
matched = False
for pattern in self.globs:
if fnmatch.fnmatch(eventsrc, pattern):
matched = True
break
if matched:
for action in self.actions:
action(cmdpath=eventsrc, log=MachopLog(self.queue, 'watch'))
self.announce()
def announce(self, nl=False):
log = self.log
msg = 'watching ' + log.yellow(self.watchpath)
for match in self.globs:
msg += ' for [' + log.yellow(match) + ']'
msg += '...'
if nl:
msg += '\n'
log.out(msg)
def run(self):
self.log = MachopLog(self.queue, 'watch')
self.handler = self.MachopHandler(patterns=self.globs)
self.handler._watcher = self
self.observer = Observer()
self.observer.schedule(self.handler, self.watchpath, recursive=True)
self.observer.start()
self.announce(True)
wait_for_interrupt(self.observer)
self.observer.stop()
self.observer.join(3)
def has_changed(self, key):
hasher = hashlib.md5()
with open(key, 'rb') as modfile:
hasher.update(modfile.read())
xhash = hasher.hexdigest()
if self.hashmap.get(key, '') != xhash:
self.hashmap[key] = xhash
return True
return False
|
import fnmatch
import hashlib
from .mplog import MachopLog
from .utils import MachopProcess, wait_for_interrupt
from watchdog.observers import Observer
from watchdog.events import PatternMatchingEventHandler
class MachopWatchCommand(MachopProcess):
class MachopHandler(PatternMatchingEventHandler):
""" watcher for a file system event """
def on_modified(self, event):
if event.is_directory:
return
source = event.src_path
self._watcher.modified(source)
def __init__(self, globs=None, cmds=None, path=None, queue=None):
super(MachopWatchCommand, self).__init__()
recreate = (globs, cmds, path, queue)
self._safe_process(queue=queue, cfgpath=path, init=recreate)
self.globs = globs if globs else []
self.actions = cmds if cmds else []
self.watchpath = path
self.queue = queue
self.hashmap = {}
self.log = None
def set_queue(self, queue):
self.queue = queue
def modified(self, eventsrc):
"""
@@@ needs proper event handling for actions!!!
"""
if not self.has_changed(eventsrc):
return
matched = False
for pattern in self.globs:
if fnmatch.fnmatch(eventsrc, pattern):
matched = True
break
if matched:
for action in self.actions:
action(cmdpath=eventsrc, log=MachopLog(self.queue, 'watch'))
self.announce()
def announce(self, nl=False):
log = self.log
msg = "watching " + log.yellow(self.watchpath)
for match in self.globs:
msg += " for [" + log.yellow(match) + "]"
msg += "..."
if nl:
msg += '\n'
log.out(msg)
def run(self):
self.log = MachopLog(self.queue, 'watch')
self.handler = self.MachopHandler(patterns=self.globs)
self.handler._watcher = self
self.observer = Observer()
self.observer.schedule(self.handler, self.watchpath, recursive=True)
self.observer.start()
self.announce(True)
wait_for_interrupt(self.observer)
self.observer.stop()
self.observer.join(3)
def has_changed(self, key):
hasher = hashlib.md5()
with open(key, 'rb') as modfile:
hasher.update(modfile.read())
xhash = hasher.hexdigest()
if self.hashmap.get(key, "") != xhash:
self.hashmap[key] = xhash
return True
return False
| [
4,
5,
7,
8,
9
] |
1,392 | d19310a45a684a7bbb456555a954439df8ae92b6 | <mask token>
| <mask token>
def download_subreddit(sub):
reddit = praw.Reddit(client_id='oFOYuOd31vUb4UstBWDhnQ', client_secret=
'0W_86zufGFCJlSE4lK3CwF_0UEQEQw', username='MarshallBranin',
password='#Marshall2', user_agent=
'macos:com.example.text_app:v1.0.0 (by /u/MarshallBranin)')
reddit.read_only = True
for submission in praw.reddit.Subreddit(reddit, display_name=f'{sub}').new(
limit=None):
url = str(submission.url)
if url.endswith('jpg') or url.endswith('jpeg') or url.endswith('png'):
urllib.request.urlretrieve(url, 'instagram/INSTAGRAM.jpg')
break
| import urllib.request
import praw
from praw import reddit
from praw.models.listing.mixins import submission
def download_subreddit(sub):
reddit = praw.Reddit(client_id='oFOYuOd31vUb4UstBWDhnQ', client_secret=
'0W_86zufGFCJlSE4lK3CwF_0UEQEQw', username='MarshallBranin',
password='#Marshall2', user_agent=
'macos:com.example.text_app:v1.0.0 (by /u/MarshallBranin)')
reddit.read_only = True
for submission in praw.reddit.Subreddit(reddit, display_name=f'{sub}').new(
limit=None):
url = str(submission.url)
if url.endswith('jpg') or url.endswith('jpeg') or url.endswith('png'):
urllib.request.urlretrieve(url, 'instagram/INSTAGRAM.jpg')
break
| import urllib.request
import praw
from praw import reddit
from praw.models.listing.mixins import submission
def download_subreddit(sub):
reddit = praw.Reddit(client_id='oFOYuOd31vUb4UstBWDhnQ',
client_secret='0W_86zufGFCJlSE4lK3CwF_0UEQEQw',
username='MarshallBranin',
password='#Marshall2',
user_agent='macos:com.example.text_app:v1.0.0 (by /u/MarshallBranin)')
reddit.read_only=True
# Iterate through top submissions
for submission in praw.reddit.Subreddit(reddit, display_name=f"{sub}").new(limit=None):
# Get the link of the submission
url = str(submission.url)
# Check if the link is an image
if url.endswith("jpg") or url.endswith("jpeg") or url.endswith("png"):
# Retrieve the image and save it in current folder
urllib.request.urlretrieve(url, "instagram/INSTAGRAM.jpg")
break
| null | [
0,
1,
2,
3
] |
1,393 | f5b74ca95cb368d70139b5d36e3c8d553b8c5393 | <mask token>
| <mask token>
print('Max: {}'.format(max_value))
print('Max: {}'.format(max_value1))
print('Max: {}'.format(max_value2))
print('Max: {}'.format(max_value3))
| max_integer = __import__('9-max_integer').max_integer
my_list = [1, 90, 2, 13, 34, 5, -13, 3]
my_list1 = []
my_list2 = [1, 90, 2, 13, 34, 100, -13, 3]
max_value = max_integer(my_list)
max_value1 = max_integer(my_list1)
max_value2 = max_integer(my_list2)
max_value3 = max_integer()
print('Max: {}'.format(max_value))
print('Max: {}'.format(max_value1))
print('Max: {}'.format(max_value2))
print('Max: {}'.format(max_value3))
| #!/usr/bin/python3
max_integer = __import__('9-max_integer').max_integer
my_list = [1, 90, 2, 13, 34, 5, -13, 3]
my_list1 = []
my_list2 = [1, 90, 2, 13, 34, 100, -13, 3]
max_value = max_integer(my_list)
max_value1 = max_integer(my_list1)
max_value2 = max_integer(my_list2)
max_value3 = max_integer()
print("Max: {}".format(max_value))
print("Max: {}".format(max_value1))
print("Max: {}".format(max_value2))
print("Max: {}".format(max_value3))
| null | [
0,
1,
2,
3
] |
1,394 | 09660cfcff7d5da0339da201cb18b6f63bec2df9 | <mask token>
| <mask token>
class Migration(migrations.Migration):
<mask token>
<mask token>
| <mask token>
class Migration(migrations.Migration):
dependencies = [('shop', '0032_product_sex')]
operations = [migrations.AddField(model_name='product', name=
'price_ret_sale', field=models.IntegerField(default=0, verbose_name
='Розничная цена, с учетом скидки')), migrations.AddField(
model_name='product', name='size_5xl', field=models.IntegerField(
default=0, verbose_name='5XL размер')), migrations.AddField(
model_name='product', name='size_6xl', field=models.IntegerField(
default=0, verbose_name='6XL размер')), migrations.AlterField(
model_name='product', name='price_opt_2', field=models.IntegerField
(default=0, verbose_name='- 3% от 30000')), migrations.AlterField(
model_name='product', name='price_opt_3', field=models.IntegerField
(default=0, verbose_name='- 7% от 70000')), migrations.AlterField(
model_name='product', name='price_opt_4', field=models.IntegerField
(default=0, verbose_name='- 11% от 110000')), migrations.AlterField
(model_name='product', name='sex', field=models.CharField(choices=[
('Мужское', 'Male'), ('Женское', 'Female'), ('Детское', 'Kids'), (
'Унисекс', 'Unisex')], default='Мужское', max_length=10))]
| from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [('shop', '0032_product_sex')]
operations = [migrations.AddField(model_name='product', name=
'price_ret_sale', field=models.IntegerField(default=0, verbose_name
='Розничная цена, с учетом скидки')), migrations.AddField(
model_name='product', name='size_5xl', field=models.IntegerField(
default=0, verbose_name='5XL размер')), migrations.AddField(
model_name='product', name='size_6xl', field=models.IntegerField(
default=0, verbose_name='6XL размер')), migrations.AlterField(
model_name='product', name='price_opt_2', field=models.IntegerField
(default=0, verbose_name='- 3% от 30000')), migrations.AlterField(
model_name='product', name='price_opt_3', field=models.IntegerField
(default=0, verbose_name='- 7% от 70000')), migrations.AlterField(
model_name='product', name='price_opt_4', field=models.IntegerField
(default=0, verbose_name='- 11% от 110000')), migrations.AlterField
(model_name='product', name='sex', field=models.CharField(choices=[
('Мужское', 'Male'), ('Женское', 'Female'), ('Детское', 'Kids'), (
'Унисекс', 'Unisex')], default='Мужское', max_length=10))]
| # Generated by Django 3.1.6 on 2021-07-17 10:35
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('shop', '0032_product_sex'),
]
operations = [
migrations.AddField(
model_name='product',
name='price_ret_sale',
field=models.IntegerField(default=0, verbose_name='Розничная цена, с учетом скидки'),
),
migrations.AddField(
model_name='product',
name='size_5xl',
field=models.IntegerField(default=0, verbose_name='5XL размер'),
),
migrations.AddField(
model_name='product',
name='size_6xl',
field=models.IntegerField(default=0, verbose_name='6XL размер'),
),
migrations.AlterField(
model_name='product',
name='price_opt_2',
field=models.IntegerField(default=0, verbose_name='- 3% от 30000'),
),
migrations.AlterField(
model_name='product',
name='price_opt_3',
field=models.IntegerField(default=0, verbose_name='- 7% от 70000'),
),
migrations.AlterField(
model_name='product',
name='price_opt_4',
field=models.IntegerField(default=0, verbose_name='- 11% от 110000'),
),
migrations.AlterField(
model_name='product',
name='sex',
field=models.CharField(choices=[('Мужское', 'Male'), ('Женское', 'Female'), ('Детское', 'Kids'), ('Унисекс', 'Unisex')], default='Мужское', max_length=10),
),
]
| [
0,
1,
2,
3,
4
] |
1,395 | 7404dd324d54bb072e56985716bbae746b4dd219 | <mask token>
| <mask token>
print(jsondata)
| <mask token>
r = requests.get('http://pythonspot.com/')
jsondata = str(r.headers).replace("'", '"')
print(jsondata)
| import requests
import json
r = requests.get('http://pythonspot.com/')
jsondata = str(r.headers).replace("'", '"')
print(jsondata)
| import requests
import json
r = requests.get('http://pythonspot.com/')
jsondata = str(r.headers).replace("'", '"')
print(jsondata)
#headerObj = json.loads(jsondata)
#ERROR >> json.decoder.JSONDecodeError: Expecting ',' delimiter: line 1 column 556 (char 555)
#print(headerObj)["server"]
#print(headerObj)['content-length']
#print(headerObj)['content-encoding']
#print(headerObj)['content-type']
#print(headerObj)['date']
#print(headerObj)['x-powered-by']
## I could not the problem. | [
0,
1,
2,
3,
4
] |
1,396 | 90f1fd45d58c7e6f275a33cd9c693ff584b2df47 | <mask token>
| def print99():
"""
打印99乘法口诀表
:return:
"""
for i in range(1, 10):
for j in range(1, i + 1):
print('%dX%d=%2s ' % (j, i, i * j))
print('\n')
<mask token>
| def print99():
"""
打印99乘法口诀表
:return:
"""
for i in range(1, 10):
for j in range(1, i + 1):
print('%dX%d=%2s ' % (j, i, i * j))
print('\n')
print99()
| #-*- coding: utf-8 -*-
def print99():
"""
打印99乘法口诀表
:return:
"""
for i in range(1,10):
for j in range(1, i+1):
print('%dX%d=%2s ' %(j,i,i*j))
print('\n')
print99()
| null | [
0,
1,
2,
3
] |
1,397 | 1e9afe6435285da6c6efb678177587d7ba5a01b2 | import tornado.httpserver
import tornado.websocket
import tornado.ioloop
import tornado.web
import tornado.options
import serial
import time
from datetime import timedelta
import cv2
import time
from datetime import datetime
#for webcam users
camera=cv2.VideoCapture(0)
#for picam users
#import picam
#camera=picam.OpenCVCapture()
#if you prefer to change the resolution of the image otherwise comment below 2 lines
ret = camera.set(3,320) #width
ret = camera.set(4,240) #height
#ret=camera.set(10,0.6)
face_cascade = cv2.CascadeClassifier('/usr/share/opencv/haarcascades/haarcascade_frontalface_alt.xml')
clients = []
f=open("/home/pi/visitor_project/register.txt","a")
class WSHandler(tornado.websocket.WebSocketHandler):
def check_origin(self, origin):
return True
def open(self):
print 'A Client Is Connected'
clients.append(self)
def on_message(self, message):
print 'Incoming status', message
#a=message.split("!")
if message=='who':
count=0
list1=[]
a=""
f=open("/home/pi/visitor_project/register.txt","r")
for line in f.readlines():
if len(line) != 1 :
list1.append(line)
#count=count+1
f.close()
a=''.join(map(str,list1))
self.write_message(a)
def on_close(self):
print 'Client Closed the Connecttion '
clients.remove(self)
def send_message_to_clients(msg):
for client in clients:
client.write_message(msg)
def function_second():
ret, image=camera.read()
# gray = cv2.cvtColor(image,cv2.COLOR_BGR2GRAY)
gray = cv2.cvtColor(image,cv2.COLOR_RGB2GRAY)
# faces = face_cascade.detectMultiScale(gray, 1.3, 4)
faces = face_cascade.detectMultiScale(gray,
scaleFactor=1.3,
minNeighbors=3,
minSize=(30,30),
flags=cv2.CASCADE_SCALE_IMAGE)
print "Found "+str(len(faces))+" face(s)"
#Draw a rectangle around every found face
for (x,y,w,h) in faces:
cv2.rectangle(image,(x,y),(x+w,y+h),(255,0,0),2)
if len(faces)>=1:
send_message_to_clients(str(len(faces))+" Visitors")
cv2.imwrite('/home/pi/visitor_project/result.jpg',image)
gt=datetime.now().strftime('%Y-%m-%d- %H:%M:%S - ')
m="log-"+gt+str(len(faces))+" Visitors"
f.write("\n"+m)
tornado.ioloop.IOLoop.instance().add_timeout(timedelta(seconds=1),
function_second)
if __name__ == "__main__":
tornado.options.parse_command_line()
application=tornado.web.Application(handlers=[
(r"/ws",WSHandler),
(r'/visitor_project/(.*)',tornado.web.StaticFileHandler,{'path':'/home/pi/visitor_project'})
])
http_server = tornado.httpserver.HTTPServer(application)
http_server.listen(3030)
tornado.ioloop.IOLoop.instance().add_timeout(timedelta(seconds=1),
function_second)
tornado.ioloop.IOLoop.instance().start()
| null | null | null | null | [
0
] |
1,398 | d9b6efce92e30267a9f992c4fea698fe14e0c3e4 | <mask token>
def mesh_add_vertex_to_face_edge(mesh, key, fkey, v):
"""Add an existing vertex of the mesh to an existing face.
Parameters
----------
mesh : compas.datastructures.Mesh
The mesh data structure.
key : hashable
The identifier of the vertex.
fkey : hashable
The identifier of the face.
v : hashable
The identifier of the vertex before which the new vertex should be added.
Notes
-----
The algorithm is merely there for convenience.
It does not check if the resulting mesh is still valid.
Examples
--------
Consider the following points and one face definition and the resulting mesh.
>>> points = [[0.0, 0.0, 0.0], [1.0, 0.0, 0.0], [1.0, 1.0, 0.0], [0.0, 1.0, 0.0], [0.5, 0.0, 0.0]]
>>> faces = [[0, 1, 2, 3]]
>>> mesh = Mesh.from_vertices_and_faces(points, faces)
>>> mesh.number_of_vertices()
5
>>> mesh.number_of_faces()
1
>>> mesh.face_degree(0)
4
>>> mesh.vertex_degree(4)
0
To add the isolated vertex to the single mesh face
>>> mesh_add_vertex_to_face_edge(mesh, 4, 0, 0, 1)
>>> mesh.face_degree(0)
5
>>> mesh.vertex_degree(4)
2
"""
vertices = mesh.face_vertices(fkey)
i = vertices.index(v)
u = vertices[i - 1]
vertices.insert(key, i - 1)
mesh.halfedge[u][key] = fkey
mesh.halfedge[key][v] = fkey
if u not in mesh.halfedge[key]:
mesh.halfedge[key][u] = None
if key not in mesh.halfedge[v]:
mesh.halfedge[v][key] = None
del mesh.halfedge[u][v]
if u in mesh.halfedge[v]:
del mesh.halfedge[v][u]
if (u, v) in mesh.edgedata:
del mesh.edgedata[u, v]
if (v, u) in mesh.edgedata:
del mesh.edgedata[v, u]
<mask token>
| <mask token>
def mesh_add_vertex_to_face_edge(mesh, key, fkey, v):
"""Add an existing vertex of the mesh to an existing face.
Parameters
----------
mesh : compas.datastructures.Mesh
The mesh data structure.
key : hashable
The identifier of the vertex.
fkey : hashable
The identifier of the face.
v : hashable
The identifier of the vertex before which the new vertex should be added.
Notes
-----
The algorithm is merely there for convenience.
It does not check if the resulting mesh is still valid.
Examples
--------
Consider the following points and one face definition and the resulting mesh.
>>> points = [[0.0, 0.0, 0.0], [1.0, 0.0, 0.0], [1.0, 1.0, 0.0], [0.0, 1.0, 0.0], [0.5, 0.0, 0.0]]
>>> faces = [[0, 1, 2, 3]]
>>> mesh = Mesh.from_vertices_and_faces(points, faces)
>>> mesh.number_of_vertices()
5
>>> mesh.number_of_faces()
1
>>> mesh.face_degree(0)
4
>>> mesh.vertex_degree(4)
0
To add the isolated vertex to the single mesh face
>>> mesh_add_vertex_to_face_edge(mesh, 4, 0, 0, 1)
>>> mesh.face_degree(0)
5
>>> mesh.vertex_degree(4)
2
"""
vertices = mesh.face_vertices(fkey)
i = vertices.index(v)
u = vertices[i - 1]
vertices.insert(key, i - 1)
mesh.halfedge[u][key] = fkey
mesh.halfedge[key][v] = fkey
if u not in mesh.halfedge[key]:
mesh.halfedge[key][u] = None
if key not in mesh.halfedge[v]:
mesh.halfedge[v][key] = None
del mesh.halfedge[u][v]
if u in mesh.halfedge[v]:
del mesh.halfedge[v][u]
if (u, v) in mesh.edgedata:
del mesh.edgedata[u, v]
if (v, u) in mesh.edgedata:
del mesh.edgedata[v, u]
if __name__ == '__main__':
pass
| <mask token>
__all__ = ['mesh_add_vertex_to_face_edge']
def mesh_add_vertex_to_face_edge(mesh, key, fkey, v):
"""Add an existing vertex of the mesh to an existing face.
Parameters
----------
mesh : compas.datastructures.Mesh
The mesh data structure.
key : hashable
The identifier of the vertex.
fkey : hashable
The identifier of the face.
v : hashable
The identifier of the vertex before which the new vertex should be added.
Notes
-----
The algorithm is merely there for convenience.
It does not check if the resulting mesh is still valid.
Examples
--------
Consider the following points and one face definition and the resulting mesh.
>>> points = [[0.0, 0.0, 0.0], [1.0, 0.0, 0.0], [1.0, 1.0, 0.0], [0.0, 1.0, 0.0], [0.5, 0.0, 0.0]]
>>> faces = [[0, 1, 2, 3]]
>>> mesh = Mesh.from_vertices_and_faces(points, faces)
>>> mesh.number_of_vertices()
5
>>> mesh.number_of_faces()
1
>>> mesh.face_degree(0)
4
>>> mesh.vertex_degree(4)
0
To add the isolated vertex to the single mesh face
>>> mesh_add_vertex_to_face_edge(mesh, 4, 0, 0, 1)
>>> mesh.face_degree(0)
5
>>> mesh.vertex_degree(4)
2
"""
vertices = mesh.face_vertices(fkey)
i = vertices.index(v)
u = vertices[i - 1]
vertices.insert(key, i - 1)
mesh.halfedge[u][key] = fkey
mesh.halfedge[key][v] = fkey
if u not in mesh.halfedge[key]:
mesh.halfedge[key][u] = None
if key not in mesh.halfedge[v]:
mesh.halfedge[v][key] = None
del mesh.halfedge[u][v]
if u in mesh.halfedge[v]:
del mesh.halfedge[v][u]
if (u, v) in mesh.edgedata:
del mesh.edgedata[u, v]
if (v, u) in mesh.edgedata:
del mesh.edgedata[v, u]
if __name__ == '__main__':
pass
| from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
__all__ = ['mesh_add_vertex_to_face_edge']
def mesh_add_vertex_to_face_edge(mesh, key, fkey, v):
"""Add an existing vertex of the mesh to an existing face.
Parameters
----------
mesh : compas.datastructures.Mesh
The mesh data structure.
key : hashable
The identifier of the vertex.
fkey : hashable
The identifier of the face.
v : hashable
The identifier of the vertex before which the new vertex should be added.
Notes
-----
The algorithm is merely there for convenience.
It does not check if the resulting mesh is still valid.
Examples
--------
Consider the following points and one face definition and the resulting mesh.
>>> points = [[0.0, 0.0, 0.0], [1.0, 0.0, 0.0], [1.0, 1.0, 0.0], [0.0, 1.0, 0.0], [0.5, 0.0, 0.0]]
>>> faces = [[0, 1, 2, 3]]
>>> mesh = Mesh.from_vertices_and_faces(points, faces)
>>> mesh.number_of_vertices()
5
>>> mesh.number_of_faces()
1
>>> mesh.face_degree(0)
4
>>> mesh.vertex_degree(4)
0
To add the isolated vertex to the single mesh face
>>> mesh_add_vertex_to_face_edge(mesh, 4, 0, 0, 1)
>>> mesh.face_degree(0)
5
>>> mesh.vertex_degree(4)
2
"""
vertices = mesh.face_vertices(fkey)
i = vertices.index(v)
u = vertices[i - 1]
vertices.insert(key, i - 1)
mesh.halfedge[u][key] = fkey
mesh.halfedge[key][v] = fkey
if u not in mesh.halfedge[key]:
mesh.halfedge[key][u] = None
if key not in mesh.halfedge[v]:
mesh.halfedge[v][key] = None
del mesh.halfedge[u][v]
if u in mesh.halfedge[v]:
del mesh.halfedge[v][u]
if (u, v) in mesh.edgedata:
del mesh.edgedata[u, v]
if (v, u) in mesh.edgedata:
del mesh.edgedata[v, u]
if __name__ == '__main__':
pass
| from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
__all__ = [
'mesh_add_vertex_to_face_edge'
]
def mesh_add_vertex_to_face_edge(mesh, key, fkey, v):
"""Add an existing vertex of the mesh to an existing face.
Parameters
----------
mesh : compas.datastructures.Mesh
The mesh data structure.
key : hashable
The identifier of the vertex.
fkey : hashable
The identifier of the face.
v : hashable
The identifier of the vertex before which the new vertex should be added.
Notes
-----
The algorithm is merely there for convenience.
It does not check if the resulting mesh is still valid.
Examples
--------
Consider the following points and one face definition and the resulting mesh.
>>> points = [[0.0, 0.0, 0.0], [1.0, 0.0, 0.0], [1.0, 1.0, 0.0], [0.0, 1.0, 0.0], [0.5, 0.0, 0.0]]
>>> faces = [[0, 1, 2, 3]]
>>> mesh = Mesh.from_vertices_and_faces(points, faces)
>>> mesh.number_of_vertices()
5
>>> mesh.number_of_faces()
1
>>> mesh.face_degree(0)
4
>>> mesh.vertex_degree(4)
0
To add the isolated vertex to the single mesh face
>>> mesh_add_vertex_to_face_edge(mesh, 4, 0, 0, 1)
>>> mesh.face_degree(0)
5
>>> mesh.vertex_degree(4)
2
"""
vertices = mesh.face_vertices(fkey)
i = vertices.index(v)
u = vertices[i - 1]
vertices.insert(key, i - 1)
mesh.halfedge[u][key] = fkey
mesh.halfedge[key][v] = fkey
if u not in mesh.halfedge[key]:
mesh.halfedge[key][u] = None
if key not in mesh.halfedge[v]:
mesh.halfedge[v][key] = None
del mesh.halfedge[u][v]
if u in mesh.halfedge[v]:
del mesh.halfedge[v][u]
if (u, v) in mesh.edgedata:
del mesh.edgedata[u, v]
if (v, u) in mesh.edgedata:
del mesh.edgedata[v, u]
# ==============================================================================
# Main
# ==============================================================================
if __name__ == "__main__":
pass
| [
1,
2,
3,
4,
5
] |
1,399 | 27d9e6a868cfc18780ec9615e8dbc3b5ea2fd0c3 | <mask token>
@app.route('/')
def index():
return render_template('index.html')
@app.route('/submit', methods=['POST'])
def submit():
if request.method == 'POST':
name = request.form['name']
email = request.form['email']
subject = request.form['subject']
message = request.form['message']
msg = EmailMessage()
msg['From'] = email
msg['To'] = EMAIL_ADDRESS
msg['Subject'] = subject
msg.set_content(message)
with smtplib.SMTP_SSL('smtp.gmail.com', 465) as smtp:
smtp.login(EMAIL_ADDRESS, EMAIL_PASSWORD)
smtp.send_message(msg)
return render_template('success.html')
return render_template('index.html')
<mask token>
| <mask token>
@app.route('/')
def index():
return render_template('index.html')
@app.route('/submit', methods=['POST'])
def submit():
if request.method == 'POST':
name = request.form['name']
email = request.form['email']
subject = request.form['subject']
message = request.form['message']
msg = EmailMessage()
msg['From'] = email
msg['To'] = EMAIL_ADDRESS
msg['Subject'] = subject
msg.set_content(message)
with smtplib.SMTP_SSL('smtp.gmail.com', 465) as smtp:
smtp.login(EMAIL_ADDRESS, EMAIL_PASSWORD)
smtp.send_message(msg)
return render_template('success.html')
return render_template('index.html')
if __name__ == '__main__':
app.run()
| <mask token>
app = Flask(__name__)
EMAIL_ADDRESS = os.environ.get('EMAIL_USER')
EMAIL_PASSWORD = os.environ.get('EMAIL_PASS')
@app.route('/')
def index():
return render_template('index.html')
@app.route('/submit', methods=['POST'])
def submit():
if request.method == 'POST':
name = request.form['name']
email = request.form['email']
subject = request.form['subject']
message = request.form['message']
msg = EmailMessage()
msg['From'] = email
msg['To'] = EMAIL_ADDRESS
msg['Subject'] = subject
msg.set_content(message)
with smtplib.SMTP_SSL('smtp.gmail.com', 465) as smtp:
smtp.login(EMAIL_ADDRESS, EMAIL_PASSWORD)
smtp.send_message(msg)
return render_template('success.html')
return render_template('index.html')
if __name__ == '__main__':
app.run()
| from flask import Flask, request, render_template, redirect
import os
import smtplib
from email.message import EmailMessage
app = Flask(__name__)
EMAIL_ADDRESS = os.environ.get('EMAIL_USER')
EMAIL_PASSWORD = os.environ.get('EMAIL_PASS')
@app.route('/')
def index():
return render_template('index.html')
@app.route('/submit', methods=['POST'])
def submit():
if request.method == 'POST':
name = request.form['name']
email = request.form['email']
subject = request.form['subject']
message = request.form['message']
msg = EmailMessage()
msg['From'] = email
msg['To'] = EMAIL_ADDRESS
msg['Subject'] = subject
msg.set_content(message)
with smtplib.SMTP_SSL('smtp.gmail.com', 465) as smtp:
smtp.login(EMAIL_ADDRESS, EMAIL_PASSWORD)
smtp.send_message(msg)
return render_template('success.html')
return render_template('index.html')
if __name__ == '__main__':
app.run()
| null | [
2,
3,
4,
5
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.