repo_name
stringlengths 7
94
| repo_path
stringlengths 4
237
| repo_head_hexsha
stringlengths 40
40
| content
stringlengths 10
680k
| apis
stringlengths 2
840k
|
---|---|---|---|---|
Hacky-DH/pytorch | test/jit/test_backend_nnapi.py | 80dc4be615854570aa39a7e36495897d8a040ecc | import os
import sys
import unittest
import torch
import torch._C
from pathlib import Path
from test_nnapi import TestNNAPI
from torch.testing._internal.common_utils import TEST_WITH_ASAN
# Make the helper files in test/ importable
pytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sys.path.append(pytorch_test_dir)
if __name__ == "__main__":
raise RuntimeError(
"This test file is not meant to be run directly, use:\n\n"
"\tpython test/test_jit.py TESTNAME\n\n"
"instead."
)
"""
Unit Tests for Nnapi backend with delegate
Inherits most tests from TestNNAPI, which loads Android NNAPI models
without the delegate API.
"""
# First skip is needed for IS_WINDOWS or IS_MACOS to skip the tests.
# Second skip is because ASAN is currently causing an error.
# It is still unclear how to resolve this. T95764916
torch_root = Path(__file__).resolve().parent.parent.parent
lib_path = torch_root / 'build' / 'lib' / 'libnnapi_backend.so'
@unittest.skipIf(not os.path.exists(lib_path),
"Skipping the test as libnnapi_backend.so was not found")
@unittest.skipIf(TEST_WITH_ASAN, "Unresolved bug with ASAN")
class TestNnapiBackend(TestNNAPI):
def setUp(self):
super().setUp()
# Save default dtype
module = torch.nn.PReLU()
self.default_dtype = module.weight.dtype
# Change dtype to float32 (since a different unit test changed dtype to float64,
# which is not supported by the Android NNAPI delegate)
# Float32 should typically be the default in other files.
torch.set_default_dtype(torch.float32)
# Load nnapi delegate library
torch.ops.load_library(str(lib_path))
# Override
def call_lowering_to_nnapi(self, traced_module, args):
compile_spec = {"forward": {"inputs": args}}
return torch._C._jit_to_backend("nnapi", traced_module, compile_spec)
def test_tensor_input(self):
# Lower a simple module
args = torch.tensor([[1.0, -1.0, 2.0, -2.0]]).unsqueeze(-1).unsqueeze(-1)
module = torch.nn.PReLU()
traced = torch.jit.trace(module, args)
# Argument input is a single Tensor
self.call_lowering_to_nnapi(traced, args)
# Argument input is a Tensor in a list
self.call_lowering_to_nnapi(traced, [args])
# Test exceptions for incorrect compile specs
def test_compile_spec_santiy(self):
args = torch.tensor([[1.0, -1.0, 2.0, -2.0]]).unsqueeze(-1).unsqueeze(-1)
module = torch.nn.PReLU()
traced = torch.jit.trace(module, args)
errorMsgTail = r"""
method_compile_spec should contain a Tensor or Tensor List which bundles input parameters: shape, dtype, quantization, and dimorder.
For input shapes, use 0 for run/load time flexible input.
method_compile_spec must use the following format:
{"forward": {"inputs": at::Tensor}} OR {"forward": {"inputs": c10::List<at::Tensor>}}"""
# No forward key
compile_spec = {"backward": {"inputs": args}}
with self.assertRaisesRegex(RuntimeError, "method_compile_spec does not contain the \"forward\" key." + errorMsgTail):
torch._C._jit_to_backend("nnapi", traced, compile_spec)
# No dictionary under the forward key
compile_spec = {"forward": 1}
with self.assertRaisesRegex(RuntimeError,
"method_compile_spec does not contain a dictionary with an \"inputs\" key, "
"under it's \"forward\" key."
+ errorMsgTail):
torch._C._jit_to_backend("nnapi", traced, compile_spec)
# No inputs key (in the dictionary under the forward key)
compile_spec = {"forward": {"not inputs": args}}
with self.assertRaisesRegex(RuntimeError,
"method_compile_spec does not contain a dictionary with an \"inputs\" key, "
"under it's \"forward\" key."
+ errorMsgTail):
torch._C._jit_to_backend("nnapi", traced, compile_spec)
# No Tensor or TensorList under the inputs key
compile_spec = {"forward": {"inputs": 1}}
with self.assertRaisesRegex(RuntimeError,
"method_compile_spec does not contain either a Tensor or TensorList, under it's \"inputs\" key."
+ errorMsgTail):
torch._C._jit_to_backend("nnapi", traced, compile_spec)
compile_spec = {"forward": {"inputs": [1]}}
with self.assertRaisesRegex(RuntimeError,
"method_compile_spec does not contain either a Tensor or TensorList, under it's \"inputs\" key."
+ errorMsgTail):
torch._C._jit_to_backend("nnapi", traced, compile_spec)
def tearDown(self):
# Change dtype back to default (Otherwise, other unit tests will complain)
torch.set_default_dtype(self.default_dtype)
| [((13, 0, 13, 33), 'sys.path.append', 'sys.path.append', ({(13, 16, 13, 32): 'pytorch_test_dir'}, {}), '(pytorch_test_dir)', False, 'import sys\n'), ((34, 1, 34, 60), 'unittest.skipIf', 'unittest.skipIf', ({(34, 17, 34, 31): 'TEST_WITH_ASAN', (34, 33, 34, 59): '"""Unresolved bug with ASAN"""'}, {}), "(TEST_WITH_ASAN, 'Unresolved bug with ASAN')", False, 'import unittest\n'), ((12, 51, 12, 77), 'os.path.realpath', 'os.path.realpath', ({(12, 68, 12, 76): '__file__'}, {}), '(__file__)', False, 'import os\n'), ((40, 17, 40, 33), 'torch.nn.PReLU', 'torch.nn.PReLU', ({}, {}), '()', False, 'import torch\n'), ((45, 8, 45, 46), 'torch.set_default_dtype', 'torch.set_default_dtype', ({(45, 32, 45, 45): 'torch.float32'}, {}), '(torch.float32)', False, 'import torch\n'), ((53, 15, 53, 77), 'torch._C._jit_to_backend', 'torch._C._jit_to_backend', ({(53, 40, 53, 47): '"""nnapi"""', (53, 49, 53, 62): 'traced_module', (53, 64, 53, 76): 'compile_spec'}, {}), "('nnapi', traced_module, compile_spec)", False, 'import torch\n'), ((58, 17, 58, 33), 'torch.nn.PReLU', 'torch.nn.PReLU', ({}, {}), '()', False, 'import torch\n'), ((59, 17, 59, 46), 'torch.jit.trace', 'torch.jit.trace', ({(59, 33, 59, 39): 'module', (59, 41, 59, 45): 'args'}, {}), '(module, args)', False, 'import torch\n'), ((69, 17, 69, 33), 'torch.nn.PReLU', 'torch.nn.PReLU', ({}, {}), '()', False, 'import torch\n'), ((70, 17, 70, 46), 'torch.jit.trace', 'torch.jit.trace', ({(70, 33, 70, 39): 'module', (70, 41, 70, 45): 'args'}, {}), '(module, args)', False, 'import torch\n'), ((113, 8, 113, 51), 'torch.set_default_dtype', 'torch.set_default_dtype', ({(113, 32, 113, 50): 'self.default_dtype'}, {}), '(self.default_dtype)', False, 'import torch\n'), ((32, 21, 32, 45), 'os.path.exists', 'os.path.exists', ({(32, 36, 32, 44): 'lib_path'}, {}), '(lib_path)', False, 'import os\n'), ((81, 12, 81, 67), 'torch._C._jit_to_backend', 'torch._C._jit_to_backend', ({(81, 37, 81, 44): '"""nnapi"""', (81, 46, 81, 52): 'traced', (81, 54, 81, 66): 'compile_spec'}, {}), "('nnapi', traced, compile_spec)", False, 'import torch\n'), ((89, 12, 89, 67), 'torch._C._jit_to_backend', 'torch._C._jit_to_backend', ({(89, 37, 89, 44): '"""nnapi"""', (89, 46, 89, 52): 'traced', (89, 54, 89, 66): 'compile_spec'}, {}), "('nnapi', traced, compile_spec)", False, 'import torch\n'), ((97, 12, 97, 67), 'torch._C._jit_to_backend', 'torch._C._jit_to_backend', ({(97, 37, 97, 44): '"""nnapi"""', (97, 46, 97, 52): 'traced', (97, 54, 97, 66): 'compile_spec'}, {}), "('nnapi', traced, compile_spec)", False, 'import torch\n'), ((104, 12, 104, 67), 'torch._C._jit_to_backend', 'torch._C._jit_to_backend', ({(104, 37, 104, 44): '"""nnapi"""', (104, 46, 104, 52): 'traced', (104, 54, 104, 66): 'compile_spec'}, {}), "('nnapi', traced, compile_spec)", False, 'import torch\n'), ((109, 12, 109, 67), 'torch._C._jit_to_backend', 'torch._C._jit_to_backend', ({(109, 37, 109, 44): '"""nnapi"""', (109, 46, 109, 52): 'traced', (109, 54, 109, 66): 'compile_spec'}, {}), "('nnapi', traced, compile_spec)", False, 'import torch\n'), ((30, 13, 30, 27), 'pathlib.Path', 'Path', ({(30, 18, 30, 26): '__file__'}, {}), '(__file__)', False, 'from pathlib import Path\n'), ((57, 15, 57, 53), 'torch.tensor', 'torch.tensor', ({(57, 28, 57, 52): '[[1.0, -1.0, 2.0, -2.0]]'}, {}), '([[1.0, -1.0, 2.0, -2.0]])', False, 'import torch\n'), ((68, 15, 68, 53), 'torch.tensor', 'torch.tensor', ({(68, 28, 68, 52): '[[1.0, -1.0, 2.0, -2.0]]'}, {}), '([[1.0, -1.0, 2.0, -2.0]])', False, 'import torch\n')] |
syxu828/Graph2Seq-0.1 | main/configure.py | 36e38f755c0ee390735e49121259151da54bcc1c | train_data_path = "../data/no_cycle/train.data"
dev_data_path = "../data/no_cycle/dev.data"
test_data_path = "../data/no_cycle/test.data"
word_idx_file_path = "../data/word.idx"
word_embedding_dim = 100
train_batch_size = 32
dev_batch_size = 500
test_batch_size = 500
l2_lambda = 0.000001
learning_rate = 0.001
epochs = 100
encoder_hidden_dim = 200
num_layers_decode = 1
word_size_max = 1
dropout = 0.0
path_embed_method = "lstm" # cnn or lstm or bi-lstm
unknown_word = "<unk>"
PAD = "<PAD>"
GO = "<GO>"
EOS = "<EOS>"
deal_unknown_words = True
seq_max_len = 11
decoder_type = "greedy" # greedy, beam
beam_width = 4
attention = True
num_layers = 1 # 1 or 2
# the following are for the graph encoding method
weight_decay = 0.0000
sample_size_per_layer = 4
sample_layer_size = 4
hidden_layer_dim = 100
feature_max_len = 1
feature_encode_type = "uni"
# graph_encode_method = "max-pooling" # "lstm" or "max-pooling"
graph_encode_direction = "bi" # "single" or "bi"
concat = True
encoder = "gated_gcn" # "gated_gcn" "gcn" "seq"
lstm_in_gcn = "none" # before, after, none
| [] |
andreasbayer/AEGUIFit | dataControlWidget.py | 6a1e31091b74d648d007c75c9fef6efae4086860 | from PyQt5.QtWidgets import QLabel, QWidget, QGridLayout, QCheckBox, QGroupBox
from InftyDoubleSpinBox import InftyDoubleSpinBox
from PyQt5.QtCore import pyqtSignal, Qt
import helplib as hl
import numpy as np
class dataControlWidget(QGroupBox):
showErrorBars_changed = pyqtSignal(bool)
ignoreFirstPoint_changed = pyqtSignal(bool)
data_changed = pyqtSignal(bool, bool)
data_shift = pyqtSignal(np.float64)
load_fits = pyqtSignal(list)
load_view = pyqtSignal(str)
load_meta = pyqtSignal(str)
fit_on_startup = pyqtSignal()
SHOW_ERROR_BARS = "Show error bars"
SHOW_ERROR_BARS_NOT_LOADED = "Show error bars (could not be calculated)"
def __init__(self):
QWidget.__init__(self)
self.setTitle('Data Settings')
self.__lblEnergyShift = QLabel("Energy Shift:")
self.__dsbEnergyShift = InftyDoubleSpinBox()
self.__dsbEnergyShift.editingFinished.connect(self.__energyShiftChanged)
self.__dsbEnergyShift.setSingleStep(0.01)
self.__chkShowErrorBars = QCheckBox(self.SHOW_ERROR_BARS_NOT_LOADED)
self.__chkShowErrorBars.stateChanged.connect(self.__chkShowErrorBars_changed)
self.__chkIgnoreFirstPoint = QCheckBox('Ignore first data point.')
self.__chkIgnoreFirstPoint.stateChanged.connect(self.__chkIgnoreFirstPoint_changed)
self.__mainLayout = QGridLayout()
self.setLayout(self.__mainLayout)
self.__mainLayout.setAlignment(Qt.AlignTop)
self.__mainLayout.addWidget(self.__lblEnergyShift, 0, 0)
self.__mainLayout.addWidget(self.__dsbEnergyShift, 0, 1)
self.__mainLayout.addWidget(self.__chkShowErrorBars, 1, 0, 1, 2)
self.__mainLayout.addWidget(self.__chkIgnoreFirstPoint, 2, 0, 1, 2)
self.__chkIgnoreFirstPoint.setVisible(False)
self.reset(False)
def reset(self, enable):
self.__data = None
self.__all_data = None
self.__stdErrors = None
self.__chkShowErrorBars.setCheckable(True)
self.__chkShowErrorBars.setChecked(False)
self.__chkShowErrorBars.setEnabled(False)
self.__chkIgnoreFirstPoint.setCheckable(True)
self.__chkIgnoreFirstPoint.setChecked(False)
self.__chkIgnoreFirstPoint.setEnabled(False)
self.setEnergyShift(0.0)
self.__prevShift = 0.0
self.setEnabled(enable)
def __chkShowErrorBars_changed(self, state):
self.__chkShowErrorBars.setCheckState(state)
self.showErrorBars_changed.emit(self.getShowErrorBars())
def __chkIgnoreFirstPoint_changed(self, state):
self.__chkIgnoreFirstPoint.setCheckState(state)
self.ignoreFirstPoint_changed.emit(self.getIgnoreFirstPoint())
def __energyShiftChanged(self):
self.cause_shift()
def cause_shift(self):
energyShift = self.__dsbEnergyShift.value()
increment = energyShift - self.__prevShift
self.__prevShift = energyShift
self.data_shift.emit(increment)
self.data_changed.emit(self.getShowErrorBars(), self.getIgnoreFirstPoint())
# def setData(self, data):
# self.__data = data
def getData(self):
first_point = 0
if self.getIgnoreFirstPoint():
first_point = 1
return self.__data[first_point:,]
def getEnergyShift(self):
return (self.__dsbEnergyShift.value())
def setEnergyShift(self, value):
#increment = self.__dsbEnergyShift.value() - value
increment = value - self.__dsbEnergyShift.value()
self.__dsbEnergyShift.setValue(value)
#self.__shiftData(increment)
#self.data_shift.emit(increment)
def __shiftData(self, increment):
try:
if self.__data is not None:
for set in self.__data:
set[0] += increment
except Exception as e:
print(e)
def getStdErrors(self):
if self.__stdErrors is not None:
first_point = 0
if self.getIgnoreFirstPoint():
first_point = 1
return self.__stdErrors[first_point:]
else:
return None
def getMax_Energy(self):
if self.getData() is not None:
return self.getData()[-1][0]
else:
return None
def getMin_Energy(self):
if self.getData() is not None:
return self.getData()[0][0]
else:
return None
def getShowErrorBars(self):
return self.__chkShowErrorBars.isChecked()
def setShowErrorBars(self, value):
self.__chkShowErrorBars.setChecked(value)
def getIgnoreFirstPoint(self):
return self.__chkIgnoreFirstPoint.isChecked()
def setIgnoreFirstPoint(self, value):
self.__chkIgnoreFirstPoint.setChecked(value)
def hasStdErrors(self):
return self.__stdErrors is not None
def loadFile(self, fileName, id_string):
self.__all_data, self.__stdErrors, (fit_strings, view_string, data_string, meta_string), id_found =\
hl.readFileForFitsDataAndStdErrorAndMetaData(fileName, id_string)
#we need a copy to not save any altered data!
self.__data = (self.__all_data[:, 0:2]).copy()
if len(self.__data) <= 1:
raise Exception("Not enough data in file!")
if self.hasStdErrors():
self.__chkShowErrorBars.setText(self.SHOW_ERROR_BARS)
else:
self.__chkShowErrorBars.setText(self.SHOW_ERROR_BARS_NOT_LOADED)
self.__chkShowErrorBars.setEnabled(self.hasStdErrors())
self.__chkShowErrorBars.setChecked(self.hasStdErrors())
self.__chkIgnoreFirstPoint.setEnabled(True)
self.data_changed.emit(self.hasStdErrors(), self.getIgnoreFirstPoint())
self.load_fits.emit(fit_strings)
self.load_view.emit(view_string)
self.load_meta.emit(meta_string)
self.load_from_data_string(data_string)
self.cause_shift()
self.fit_on_startup.emit()
return id_found
def load_from_data_string(self, data_string):
if data_string is not None:
split_string = data_string.split('\v')
for i in range(0, len(split_string)):
item = split_string[i].split('=')
if len(item) == 2:
if (item[0] == 'egs'):
self.setEnergyShift(np.float64(item[1]))
elif item[0] == 'seb':
if item[1] == '1' or item[1] == 'True':
self.setShowErrorBars(True)
elif item[1] == '0' or item[1] == 'False':
self.setShowErrorBars(False)
elif item[0] == 'ifd':
if item[1] == '1' or item[1] == 'True':
self.setIgnoreFirstPoint(True)
elif item[1] == '0' or item[1] == 'False':
self.setIgnoreFirstPoint(False)
def get_data_string(self):
return 'egs=' + str(self.getEnergyShift()) + '\vseb=' + str(self.getShowErrorBars()) +\
'\vifd=' + str(self.getIgnoreFirstPoint())
def saveFile(self, fileName, id_string, fit_strings, view_string, data_string, meta_string):
hl.saveFilewithMetaData(id_string, fileName, self.__all_data, (fit_strings, view_string, data_string, meta_string))
| [((8, 28, 8, 44), 'PyQt5.QtCore.pyqtSignal', 'pyqtSignal', ({(8, 39, 8, 43): 'bool'}, {}), '(bool)', False, 'from PyQt5.QtCore import pyqtSignal, Qt\n'), ((9, 31, 9, 47), 'PyQt5.QtCore.pyqtSignal', 'pyqtSignal', ({(9, 42, 9, 46): 'bool'}, {}), '(bool)', False, 'from PyQt5.QtCore import pyqtSignal, Qt\n'), ((10, 19, 10, 41), 'PyQt5.QtCore.pyqtSignal', 'pyqtSignal', ({(10, 30, 10, 34): 'bool', (10, 36, 10, 40): 'bool'}, {}), '(bool, bool)', False, 'from PyQt5.QtCore import pyqtSignal, Qt\n'), ((11, 17, 11, 39), 'PyQt5.QtCore.pyqtSignal', 'pyqtSignal', ({(11, 28, 11, 38): 'np.float64'}, {}), '(np.float64)', False, 'from PyQt5.QtCore import pyqtSignal, Qt\n'), ((12, 16, 12, 32), 'PyQt5.QtCore.pyqtSignal', 'pyqtSignal', ({(12, 27, 12, 31): 'list'}, {}), '(list)', False, 'from PyQt5.QtCore import pyqtSignal, Qt\n'), ((13, 16, 13, 31), 'PyQt5.QtCore.pyqtSignal', 'pyqtSignal', ({(13, 27, 13, 30): 'str'}, {}), '(str)', False, 'from PyQt5.QtCore import pyqtSignal, Qt\n'), ((14, 16, 14, 31), 'PyQt5.QtCore.pyqtSignal', 'pyqtSignal', ({(14, 27, 14, 30): 'str'}, {}), '(str)', False, 'from PyQt5.QtCore import pyqtSignal, Qt\n'), ((15, 21, 15, 33), 'PyQt5.QtCore.pyqtSignal', 'pyqtSignal', ({}, {}), '()', False, 'from PyQt5.QtCore import pyqtSignal, Qt\n'), ((21, 8, 21, 30), 'PyQt5.QtWidgets.QWidget.__init__', 'QWidget.__init__', ({(21, 25, 21, 29): 'self'}, {}), '(self)', False, 'from PyQt5.QtWidgets import QLabel, QWidget, QGridLayout, QCheckBox, QGroupBox\n'), ((25, 32, 25, 55), 'PyQt5.QtWidgets.QLabel', 'QLabel', ({(25, 39, 25, 54): '"""Energy Shift:"""'}, {}), "('Energy Shift:')", False, 'from PyQt5.QtWidgets import QLabel, QWidget, QGridLayout, QCheckBox, QGroupBox\n'), ((26, 32, 26, 52), 'InftyDoubleSpinBox.InftyDoubleSpinBox', 'InftyDoubleSpinBox', ({}, {}), '()', False, 'from InftyDoubleSpinBox import InftyDoubleSpinBox\n'), ((30, 34, 30, 76), 'PyQt5.QtWidgets.QCheckBox', 'QCheckBox', ({(30, 44, 30, 75): 'self.SHOW_ERROR_BARS_NOT_LOADED'}, {}), '(self.SHOW_ERROR_BARS_NOT_LOADED)', False, 'from PyQt5.QtWidgets import QLabel, QWidget, QGridLayout, QCheckBox, QGroupBox\n'), ((33, 37, 33, 74), 'PyQt5.QtWidgets.QCheckBox', 'QCheckBox', ({(33, 47, 33, 73): '"""Ignore first data point."""'}, {}), "('Ignore first data point.')", False, 'from PyQt5.QtWidgets import QLabel, QWidget, QGridLayout, QCheckBox, QGroupBox\n'), ((36, 28, 36, 41), 'PyQt5.QtWidgets.QGridLayout', 'QGridLayout', ({}, {}), '()', False, 'from PyQt5.QtWidgets import QLabel, QWidget, QGridLayout, QCheckBox, QGroupBox\n'), ((160, 12, 160, 77), 'helplib.readFileForFitsDataAndStdErrorAndMetaData', 'hl.readFileForFitsDataAndStdErrorAndMetaData', ({(160, 57, 160, 65): 'fileName', (160, 67, 160, 76): 'id_string'}, {}), '(fileName, id_string)', True, 'import helplib as hl\n'), ((216, 8, 216, 123), 'helplib.saveFilewithMetaData', 'hl.saveFilewithMetaData', ({(216, 32, 216, 41): 'id_string', (216, 43, 216, 51): 'fileName', (216, 53, 216, 68): 'self.__all_data', (216, 70, 216, 122): '(fit_strings, view_string, data_string, meta_string)'}, {}), '(id_string, fileName, self.__all_data, (fit_strings,\n view_string, data_string, meta_string))', True, 'import helplib as hl\n'), ((199, 44, 199, 63), 'numpy.float64', 'np.float64', ({(199, 55, 199, 62): 'item[1]'}, {}), '(item[1])', True, 'import numpy as np\n')] |
strawlab/flyvr | src/freemovr_engine/calib/acquire.py | 335892cae740e53e82e07b526e1ba53fbd34b0ce | import roslib
roslib.load_manifest('sensor_msgs')
roslib.load_manifest('dynamic_reconfigure')
import rospy
import sensor_msgs.msg
import dynamic_reconfigure.srv
import dynamic_reconfigure.encoding
import numpy as np
import time
import os.path
import queue
class CameraHandler(object):
def __init__(self,topic_prefix='',debug=False,enable_dynamic_reconfigure=False):
self.topic_prefix=topic_prefix
self.debug = debug
rospy.Subscriber( '%s/image_raw'%self.topic_prefix, sensor_msgs.msg.Image,
self.get_image_callback)
self.pipeline_max_latency = 0.2
self.last_image = None
self.im_queue = None
self.recon = None
if enable_dynamic_reconfigure:
self.recon = rospy.ServiceProxy('%s/set_parameters'%self.topic_prefix, dynamic_reconfigure.srv.Reconfigure)
self.recon_cache = {}
def reconfigure(self, **params):
if self.recon is not None:
changed = {}
for k,v in list(params.items()):
if k in self.recon_cache:
if self.recon_cache[k] != v:
changed[k] = v
else:
changed[k] = v
if changed:
msg = dynamic_reconfigure.encoding.encode_config(params)
self.recon_cache.update(changed)
self.recon(msg)
if self.im_queue is not None:
#clear the queue so we get a new image with the new settings
while True:
try:
self.im_queue.get_nowait()
except queue.Empty:
break
def set_im_queue(self,q):
self.im_queue = q
def get_image_callback(self,msg):
if self.im_queue is None:
return
try:
if self.debug:
print("%s got image: %f" % (self.topic_prefix, msg.header.stamp.to_sec()))
self.im_queue.put_nowait((self.topic_prefix,msg))
except queue.Full:
if self.debug:
print(self.topic_prefix,"full")
class _Runner(object):
def __init__(self,cam_handlers,ros_latency=0.2,queue_depth=20):
self.cam_handlers = cam_handlers
self.im_queue = queue.Queue(len(cam_handlers)*queue_depth)
for ch in self.cam_handlers:
ch.set_im_queue(self.im_queue)
self.ros_latency = ros_latency
self.max_cam_latency = max( [ch.pipeline_max_latency for ch in self.cam_handlers ])
self._result = {}
@property
def result(self):
return self._result
@property
def result_as_nparray(self):
res = {}
for cam in self._result:
nimgs = len(self._result[cam])
tmpres = [0]*nimgs
for i in range(nimgs):
msg = self._result[cam][i]
shape = (msg.height, msg.width)
imarr = np.fromstring(msg.data,dtype=np.uint8)
imarr.shape = (msg.height, msg.width)
tmpres[i] = imarr
#sad to use dstack here, IMO res[cam][:,:,i] = imarr
#should have worked.
res[cam] = np.dstack(tmpres)
return res
def cycle_duration( self, dur ):
tstart = time.time()
while (time.time() - tstart) < dur:
time.sleep(0.05) # wait 50 msec
def clear_queue(self):
q = self.im_queue
while 1:
try:
q.get_nowait()
except queue.Empty:
break
def _is_done(self,rdict,n_per_camera,verbose=False):
done=True
for topic_prefix in list(rdict.keys()):
if verbose:
rospy.loginfo(' _is_done() has %d frames for %r'%(len(rdict[topic_prefix]), topic_prefix))
if len(rdict[topic_prefix]) < n_per_camera:
done=False
return done
class SimultaneousCameraRunner(_Runner):
def __init__(self,cam_handlers,**kwargs):
_Runner.__init__(self, cam_handlers,**kwargs)
def get_images(self,n_per_camera, pre_func=None, pre_func_args=[], post_func=None, post_func_args=[], verbose=False):
self._result.clear()
for ch in self.cam_handlers:
self._result[ch.topic_prefix] = []
#clear the queue
self.clear_queue()
if pre_func: pre_func(*pre_func_args)
t_latest = time.time() + (self.ros_latency + self.max_cam_latency)*n_per_camera
#wait for the images to arrive
while not self._is_done(self._result,n_per_camera,verbose=verbose):
try:
topic_prefix, msg = self.im_queue.get(1,10.0) # block, 10 second timeout
except queue.Empty:
continue
t_image = msg.header.stamp.to_sec()
if t_image > t_latest:
rospy.logwarn("image from %s at t=%f was too slow (by %f)" % (topic_prefix, t_image, t_image - t_latest))
self._result[topic_prefix].append( msg )
if post_func: post_func(*post_func_args)
class SequentialCameraRunner(_Runner):
def __init__(self,cam_handlers,**kwargs):
_Runner.__init__(self, cam_handlers,**kwargs)
self.wait_duration = kwargs.get("wait_duration", 0.1)
self.check_earliest = False
self.check_latest = False
def get_images(self,n_per_camera,verbose=False):
self._result.clear()
for ch in self.cam_handlers:
self._result[ch.topic_prefix] = []
t_earliest = time.time()
self.clear_queue()
t_latest = t_earliest + (self.ros_latency + self.max_cam_latency)
while not self._is_done(self._result,n_per_camera,verbose=verbose):
try:
topic_prefix, msg = self.im_queue.get(1,10.0) # block, 10 second timeout
except queue.Empty:
continue
t_image = msg.header.stamp.to_sec()
if self.check_latest and t_image > t_latest:
rospy.logwarn("image from %s at t=%f was too slow (by %f)" % (topic_prefix, t_image, t_image - t_latest))
if self.check_earliest and t_image < t_earliest:
rospy.logwarn("image from %s at t=%f was too early (by %f)" % (topic_prefix, t_image, t_earliest - t_image))
continue
self._result[topic_prefix].append( msg )
| [((2, 0, 2, 35), 'roslib.load_manifest', 'roslib.load_manifest', ({(2, 21, 2, 34): '"""sensor_msgs"""'}, {}), "('sensor_msgs')", False, 'import roslib\n'), ((3, 0, 3, 43), 'roslib.load_manifest', 'roslib.load_manifest', ({(3, 21, 3, 42): '"""dynamic_reconfigure"""'}, {}), "('dynamic_reconfigure')", False, 'import roslib\n'), ((19, 8, 20, 50), 'rospy.Subscriber', 'rospy.Subscriber', ({(19, 26, 19, 58): "('%s/image_raw' % self.topic_prefix)", (19, 60, 19, 81): 'sensor_msgs.msg.Image', (20, 26, 20, 49): 'self.get_image_callback'}, {}), "('%s/image_raw' % self.topic_prefix, sensor_msgs.msg.Image,\n self.get_image_callback)", False, 'import rospy\n'), ((98, 17, 98, 28), 'time.time', 'time.time', ({}, {}), '()', False, 'import time\n'), ((159, 21, 159, 32), 'time.time', 'time.time', ({}, {}), '()', False, 'import time\n'), ((27, 25, 27, 119), 'rospy.ServiceProxy', 'rospy.ServiceProxy', ({(27, 44, 27, 81): "'%s/set_parameters' % self.topic_prefix", (27, 83, 27, 118): 'dynamic_reconfigure.srv.Reconfigure'}, {}), "('%s/set_parameters' % self.topic_prefix,\n dynamic_reconfigure.srv.Reconfigure)", False, 'import rospy\n'), ((94, 23, 94, 40), 'numpy.dstack', 'np.dstack', ({(94, 33, 94, 39): 'tmpres'}, {}), '(tmpres)', True, 'import numpy as np\n'), ((100, 12, 100, 28), 'time.sleep', 'time.sleep', ({(100, 23, 100, 27): '(0.05)'}, {}), '(0.05)', False, 'import time\n'), ((132, 19, 132, 30), 'time.time', 'time.time', ({}, {}), '()', False, 'import time\n'), ((89, 24, 89, 62), 'numpy.fromstring', 'np.fromstring', (), '', True, 'import numpy as np\n'), ((99, 15, 99, 26), 'time.time', 'time.time', ({}, {}), '()', False, 'import time\n'), ((142, 16, 142, 121), 'rospy.logwarn', 'rospy.logwarn', ({(142, 30, 142, 120): "('image from %s at t=%f was too slow (by %f)' % (topic_prefix, t_image, \n t_image - t_latest))"}, {}), "('image from %s at t=%f was too slow (by %f)' % (topic_prefix,\n t_image, t_image - t_latest))", False, 'import rospy\n'), ((171, 16, 171, 121), 'rospy.logwarn', 'rospy.logwarn', ({(171, 30, 171, 120): "('image from %s at t=%f was too slow (by %f)' % (topic_prefix, t_image, \n t_image - t_latest))"}, {}), "('image from %s at t=%f was too slow (by %f)' % (topic_prefix,\n t_image, t_image - t_latest))", False, 'import rospy\n'), ((173, 16, 173, 124), 'rospy.logwarn', 'rospy.logwarn', ({(173, 30, 173, 123): "('image from %s at t=%f was too early (by %f)' % (topic_prefix, t_image, \n t_earliest - t_image))"}, {}), "('image from %s at t=%f was too early (by %f)' % (topic_prefix,\n t_image, t_earliest - t_image))", False, 'import rospy\n')] |
nixli/hfta | examples/hfht/pointnet_classification.py | 76274b5ee0e32732da20b153a3cc6550510d8a78 | import argparse
import logging
import numpy as np
import os
import pandas as pd
import random
import subprocess
from pathlib import Path
from hyperopt import hp
from hyperopt.pyll.stochastic import sample
from hfta.hfht import (tune_hyperparameters, attach_common_args,
rearrange_algorithm_kwargs, handle_integers,
generate_fusible_param_flags, generate_nonfusible_param)
from hfta.workflow import extract_logging_level
from hfta.hfht.utils import fuse_dicts
def main(args):
random.seed(args.seed)
np.random.seed(args.seed)
rng_state = np.random.RandomState(seed=args.seed)
fusibles = {
'lr': hp.uniform('lr', 0.0001, 0.01),
'beta1': hp.uniform('beta1', 0.001, 0.999),
'beta2': hp.uniform('beta2', 0.001, 0.999),
'weight_decay': hp.uniform('weight_decay', 0.0, 0.5),
'gamma': hp.uniform('gamma', 0.1, 0.9),
'step_size': hp.choice('step_size', (5, 10, 20, 40)),
}
nonfusibles = {
'batch_size': hp.choice('batch_size', (8, 16, 32)),
'feature_transform': hp.choice('feature_transform', (True, False)),
}
def _run(results_dir, epochs, iters_per_epoch, params, env_vars=None):
# Build the cmd.
cmd = [
'python',
'train_classification.py',
'--epochs',
str(epochs),
'--iters-per-epoch',
str(iters_per_epoch),
'--dataset',
args.dataset,
'--dataset_type',
args.dataset_type,
'--num_points',
str(args.num_points),
'--device',
args.device,
'--eval',
'--seed',
str(args.seed),
'--batch_size',
str(generate_nonfusible_param(params, 'batch_size')),
]
if results_dir is not None:
cmd.extend(['--outf', results_dir])
if generate_nonfusible_param(params, 'feature_transform'):
cmd.append('--feature_transform')
cmd.extend(
generate_fusible_param_flags(
params,
['lr', 'beta1', 'beta2', 'weight_decay', 'gamma', 'step_size'],
))
if args.mode == 'hfta':
cmd.append('--hfta')
if args.amp:
cmd.append('--amp')
# Launch the training process.
succeeded = True
try:
logging.info('--> Running cmd = {}'.format(cmd))
subprocess.run(
cmd,
stdout=subprocess.DEVNULL if results_dir is None else open(
os.path.join(results_dir, 'stdout.txt'),
'w',
),
stderr=subprocess.DEVNULL if results_dir is None else open(
os.path.join(results_dir, 'stderr.txt'),
'w',
),
check=True,
cwd=os.path.join(
os.path.abspath(os.path.expanduser(os.path.dirname(__file__))),
'../pointnet/'),
env=env_vars,
)
except subprocess.CalledProcessError as e:
logging.error(e)
succeeded = False
return succeeded
def try_params(ids, epochs, params, env_vars=None):
""" Running the training process for pointnet classification task.
Args:
ids: Either a single int ID (for serial), or a list of IDs (for HFTA).
epochs: number of epochs to run.
params: maps hyperparameter name to its value(s). For HFTA, the values are
provided as a list.
env_vars: optional, dict(str, str) that includes extra environment that
needs to be forwarded to the subprocess call
Returns:
result(s): A single result dict for serial or a list of result dicts for
HFTA in the same order as ids.
early_stop(s): Whether the training process early stopped. A single bool
for serial or a list of bools for HFTA in the same order as ids.
"""
epochs = int(round(epochs))
ids_str = (','.join([str(i) for i in ids]) if isinstance(
ids,
(list, tuple),
) else str(ids))
# Allocate result dir.
results_dir = os.path.join(args.outdir, ids_str)
Path(results_dir).mkdir(parents=True, exist_ok=True)
# Run training.
succeeded = _run(
results_dir,
epochs,
args.iters_per_epoch,
params,
env_vars=env_vars,
)
if not succeeded:
raise RuntimeError('_run failed!')
# Gather the results.
results_frame = pd.read_csv(os.path.join(results_dir, 'eval.csv'))
if isinstance(ids, (list, tuple)):
results = [{'acc': acc} for acc in results_frame['acc'].tolist()]
assert len(results) == len(ids)
return results, [False] * len(ids)
else:
return {'acc': results_frame['acc'][0]}, False
def dry_run(
B=None,
nonfusibles_kvs=None,
epochs=None,
iters_per_epoch=None,
env_vars=None,
):
params = [{
**handle_integers(sample(fusibles, rng=rng_state)),
**nonfusibles_kvs
} for _ in range(max(B, 1))]
if B > 0:
params = fuse_dicts(params)
else:
params = params[0]
return _run(None, epochs, iters_per_epoch, params, env_vars=env_vars)
tune_hyperparameters(
space={
**fusibles,
**nonfusibles
},
try_params_callback=try_params,
dry_run_callback=dry_run,
mode=args.mode,
algorithm=args.algorithm,
nonfusibles=nonfusibles.keys(),
dry_run_repeats=args.dry_run_repeats,
dry_run_epochs=args.dry_run_epochs,
dry_run_iters_per_epoch=args.dry_run_iters_per_epoch,
metric='acc',
goal='max',
algorithm_configs={
'hyperband': args.hyperband_kwargs,
'random': args.random_kwargs,
},
seed=args.seed,
outdir=args.outdir,
)
def attach_args(parser=argparse.ArgumentParser()):
parser.add_argument(
'--workers',
type=int,
help='number of data loading workers',
default=4,
)
parser.add_argument(
'--iters-per-epoch',
type=int,
default=int(1e9),
help='number of epochs to train for',
)
parser.add_argument('--dataset', type=str, required=True, help="dataset path")
parser.add_argument(
'--dataset-type',
type=str,
default='shapenet',
help="dataset type shapenet|modelnet40",
)
parser.add_argument(
'--num-points',
type=int,
default=2500,
help='num of points for dataset',
)
parser.add_argument(
'--device',
type=str,
default='cuda',
choices=['cpu', 'cuda', 'xla'],
help="the device where this test is running",
)
parser.add_argument(
'--amp',
default=False,
action='store_true',
help='Enable AMP; only used when --device is cuda',
)
parser = attach_common_args(parser)
return parser
if __name__ == '__main__':
args = attach_args().parse_args()
rearrange_algorithm_kwargs(args)
logging.basicConfig(level=extract_logging_level(args))
args.outdir = os.path.abspath(os.path.expanduser(args.outdir))
args.dataset = os.path.abspath(os.path.expanduser(args.dataset))
main(args)
| [((21, 2, 21, 24), 'random.seed', 'random.seed', ({(21, 14, 21, 23): 'args.seed'}, {}), '(args.seed)', False, 'import random\n'), ((22, 2, 22, 27), 'numpy.random.seed', 'np.random.seed', ({(22, 17, 22, 26): 'args.seed'}, {}), '(args.seed)', True, 'import numpy as np\n'), ((23, 14, 23, 51), 'numpy.random.RandomState', 'np.random.RandomState', (), '', True, 'import numpy as np\n'), ((185, 23, 185, 48), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ({}, {}), '()', False, 'import argparse\n'), ((224, 11, 224, 37), 'hfta.hfht.attach_common_args', 'attach_common_args', ({(224, 30, 224, 36): 'parser'}, {}), '(parser)', False, 'from hfta.hfht import tune_hyperparameters, attach_common_args, rearrange_algorithm_kwargs, handle_integers, generate_fusible_param_flags, generate_nonfusible_param\n'), ((230, 2, 230, 34), 'hfta.hfht.rearrange_algorithm_kwargs', 'rearrange_algorithm_kwargs', ({(230, 29, 230, 33): 'args'}, {}), '(args)', False, 'from hfta.hfht import tune_hyperparameters, attach_common_args, rearrange_algorithm_kwargs, handle_integers, generate_fusible_param_flags, generate_nonfusible_param\n'), ((26, 12, 26, 42), 'hyperopt.hp.uniform', 'hp.uniform', ({(26, 23, 26, 27): '"""lr"""', (26, 29, 26, 35): '(0.0001)', (26, 37, 26, 41): '(0.01)'}, {}), "('lr', 0.0001, 0.01)", False, 'from hyperopt import hp\n'), ((27, 15, 27, 48), 'hyperopt.hp.uniform', 'hp.uniform', ({(27, 26, 27, 33): '"""beta1"""', (27, 35, 27, 40): '(0.001)', (27, 42, 27, 47): '(0.999)'}, {}), "('beta1', 0.001, 0.999)", False, 'from hyperopt import hp\n'), ((28, 15, 28, 48), 'hyperopt.hp.uniform', 'hp.uniform', ({(28, 26, 28, 33): '"""beta2"""', (28, 35, 28, 40): '(0.001)', (28, 42, 28, 47): '(0.999)'}, {}), "('beta2', 0.001, 0.999)", False, 'from hyperopt import hp\n'), ((29, 22, 29, 58), 'hyperopt.hp.uniform', 'hp.uniform', ({(29, 33, 29, 47): '"""weight_decay"""', (29, 49, 29, 52): '(0.0)', (29, 54, 29, 57): '(0.5)'}, {}), "('weight_decay', 0.0, 0.5)", False, 'from hyperopt import hp\n'), ((30, 15, 30, 44), 'hyperopt.hp.uniform', 'hp.uniform', ({(30, 26, 30, 33): '"""gamma"""', (30, 35, 30, 38): '(0.1)', (30, 40, 30, 43): '(0.9)'}, {}), "('gamma', 0.1, 0.9)", False, 'from hyperopt import hp\n'), ((31, 19, 31, 58), 'hyperopt.hp.choice', 'hp.choice', ({(31, 29, 31, 40): '"""step_size"""', (31, 42, 31, 57): '(5, 10, 20, 40)'}, {}), "('step_size', (5, 10, 20, 40))", False, 'from hyperopt import hp\n'), ((34, 20, 34, 56), 'hyperopt.hp.choice', 'hp.choice', ({(34, 30, 34, 42): '"""batch_size"""', (34, 44, 34, 55): '(8, 16, 32)'}, {}), "('batch_size', (8, 16, 32))", False, 'from hyperopt import hp\n'), ((35, 27, 35, 72), 'hyperopt.hp.choice', 'hp.choice', ({(35, 37, 35, 56): '"""feature_transform"""', (35, 58, 35, 71): '(True, False)'}, {}), "('feature_transform', (True, False))", False, 'from hyperopt import hp\n'), ((63, 7, 63, 61), 'hfta.hfht.generate_nonfusible_param', 'generate_nonfusible_param', ({(63, 33, 63, 39): 'params', (63, 41, 63, 60): '"""feature_transform"""'}, {}), "(params, 'feature_transform')", False, 'from hfta.hfht import tune_hyperparameters, attach_common_args, rearrange_algorithm_kwargs, handle_integers, generate_fusible_param_flags, generate_nonfusible_param\n'), ((123, 18, 123, 52), 'os.path.join', 'os.path.join', ({(123, 31, 123, 42): 'args.outdir', (123, 44, 123, 51): 'ids_str'}, {}), '(args.outdir, ids_str)', False, 'import os\n'), ((232, 32, 232, 63), 'os.path.expanduser', 'os.path.expanduser', ({(232, 51, 232, 62): 'args.outdir'}, {}), '(args.outdir)', False, 'import os\n'), ((233, 33, 233, 65), 'os.path.expanduser', 'os.path.expanduser', ({(233, 52, 233, 64): 'args.dataset'}, {}), '(args.dataset)', False, 'import os\n'), ((66, 8, 69, 9), 'hfta.hfht.generate_fusible_param_flags', 'generate_fusible_param_flags', ({(67, 12, 67, 18): 'params', (68, 12, 68, 74): "['lr', 'beta1', 'beta2', 'weight_decay', 'gamma', 'step_size']"}, {}), "(params, ['lr', 'beta1', 'beta2',\n 'weight_decay', 'gamma', 'step_size'])", False, 'from hfta.hfht import tune_hyperparameters, attach_common_args, rearrange_algorithm_kwargs, handle_integers, generate_fusible_param_flags, generate_nonfusible_param\n'), ((136, 32, 136, 69), 'os.path.join', 'os.path.join', ({(136, 45, 136, 56): 'results_dir', (136, 58, 136, 68): '"""eval.csv"""'}, {}), "(results_dir, 'eval.csv')", False, 'import os\n'), ((156, 15, 156, 33), 'hfta.hfht.utils.fuse_dicts', 'fuse_dicts', ({(156, 26, 156, 32): 'params'}, {}), '(params)', False, 'from hfta.hfht.utils import fuse_dicts\n'), ((231, 28, 231, 55), 'hfta.workflow.extract_logging_level', 'extract_logging_level', ({(231, 50, 231, 54): 'args'}, {}), '(args)', False, 'from hfta.workflow import extract_logging_level\n'), ((59, 12, 59, 59), 'hfta.hfht.generate_nonfusible_param', 'generate_nonfusible_param', ({(59, 38, 59, 44): 'params', (59, 46, 59, 58): '"""batch_size"""'}, {}), "(params, 'batch_size')", False, 'from hfta.hfht import tune_hyperparameters, attach_common_args, rearrange_algorithm_kwargs, handle_integers, generate_fusible_param_flags, generate_nonfusible_param\n'), ((96, 6, 96, 22), 'logging.error', 'logging.error', ({(96, 20, 96, 21): 'e'}, {}), '(e)', False, 'import logging\n'), ((124, 4, 124, 21), 'pathlib.Path', 'Path', ({(124, 9, 124, 20): 'results_dir'}, {}), '(results_dir)', False, 'from pathlib import Path\n'), ((152, 26, 152, 57), 'hyperopt.pyll.stochastic.sample', 'sample', (), '', False, 'from hyperopt.pyll.stochastic import sample\n'), ((82, 14, 82, 53), 'os.path.join', 'os.path.join', ({(82, 27, 82, 38): 'results_dir', (82, 40, 82, 52): '"""stdout.txt"""'}, {}), "(results_dir, 'stdout.txt')", False, 'import os\n'), ((86, 14, 86, 53), 'os.path.join', 'os.path.join', ({(86, 27, 86, 38): 'results_dir', (86, 40, 86, 52): '"""stderr.txt"""'}, {}), "(results_dir, 'stderr.txt')", False, 'import os\n'), ((91, 49, 91, 74), 'os.path.dirname', 'os.path.dirname', ({(91, 65, 91, 73): '__file__'}, {}), '(__file__)', False, 'import os\n')] |
invinst/CPDBv2_backend | cpdb/trr/migrations/0002_alter_trr_subject_id_type.py | b4e96d620ff7a437500f525f7e911651e4a18ef9 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.4 on 2018-03-06 04:00
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('trr', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='trr',
name='subject_id',
field=models.PositiveIntegerField(null=True),
),
]
| [((18, 18, 18, 56), 'django.db.models.PositiveIntegerField', 'models.PositiveIntegerField', (), '', False, 'from django.db import migrations, models\n')] |
Ostrokrzew/standalone-linux-io-tracer | tests/utils/dut.py | 5fcbe7f0c7b027d9e5fdfb4c6e9d553c6fa617b6 | #
# Copyright(c) 2020 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause-Clear
#
from core.test_run_utils import TestRun
from utils.installer import install_iotrace, check_if_installed
from utils.iotrace import IotracePlugin
from utils.misc import kill_all_io
from test_tools.fio.fio import Fio
def dut_prepare(reinstall: bool):
if not check_if_installed() or reinstall:
TestRun.LOGGER.info("Installing iotrace:")
install_iotrace()
else:
TestRun.LOGGER.info("iotrace is already installed by previous test")
# Call it after installing iotrace because we need iotrace
# to get valid paths
dut_cleanup()
fio = Fio()
if not fio.is_installed():
TestRun.LOGGER.info("Installing fio")
fio.install()
TestRun.LOGGER.info("Killing all IO")
kill_all_io()
def dut_cleanup():
iotrace: IotracePlugin = TestRun.plugins['iotrace']
TestRun.LOGGER.info("Stopping fuzzing")
TestRun.executor.run(f'{iotrace.working_dir}/standalone-linux-io-tracer/tests/security/fuzzy/fuzz.sh clean')
output = TestRun.executor.run('pgrep iotrace')
if output.stdout != "":
TestRun.executor.run(f'kill -9 {output.stdout}')
TestRun.LOGGER.info("Removing existing traces")
trace_repository_path: str = iotrace.get_trace_repository_path()
TestRun.executor.run_expect_success(f'rm -rf {trace_repository_path}/kernel')
| [((24, 10, 24, 15), 'test_tools.fio.fio.Fio', 'Fio', ({}, {}), '()', False, 'from test_tools.fio.fio import Fio\n'), ((29, 4, 29, 41), 'core.test_run_utils.TestRun.LOGGER.info', 'TestRun.LOGGER.info', ({(29, 24, 29, 40): '"""Killing all IO"""'}, {}), "('Killing all IO')", False, 'from core.test_run_utils import TestRun\n'), ((30, 4, 30, 17), 'utils.misc.kill_all_io', 'kill_all_io', ({}, {}), '()', False, 'from utils.misc import kill_all_io\n'), ((36, 4, 36, 43), 'core.test_run_utils.TestRun.LOGGER.info', 'TestRun.LOGGER.info', ({(36, 24, 36, 42): '"""Stopping fuzzing"""'}, {}), "('Stopping fuzzing')", False, 'from core.test_run_utils import TestRun\n'), ((37, 4, 37, 112), 'core.test_run_utils.TestRun.executor.run', 'TestRun.executor.run', ({(37, 25, 37, 111): 'f"""{iotrace.working_dir}/standalone-linux-io-tracer/tests/security/fuzzy/fuzz.sh clean"""'}, {}), "(\n f'{iotrace.working_dir}/standalone-linux-io-tracer/tests/security/fuzzy/fuzz.sh clean'\n )", False, 'from core.test_run_utils import TestRun\n'), ((39, 13, 39, 50), 'core.test_run_utils.TestRun.executor.run', 'TestRun.executor.run', ({(39, 34, 39, 49): '"""pgrep iotrace"""'}, {}), "('pgrep iotrace')", False, 'from core.test_run_utils import TestRun\n'), ((43, 4, 43, 51), 'core.test_run_utils.TestRun.LOGGER.info', 'TestRun.LOGGER.info', ({(43, 24, 43, 50): '"""Removing existing traces"""'}, {}), "('Removing existing traces')", False, 'from core.test_run_utils import TestRun\n'), ((45, 4, 45, 81), 'core.test_run_utils.TestRun.executor.run_expect_success', 'TestRun.executor.run_expect_success', ({(45, 40, 45, 80): 'f"""rm -rf {trace_repository_path}/kernel"""'}, {}), "(f'rm -rf {trace_repository_path}/kernel')", False, 'from core.test_run_utils import TestRun\n'), ((15, 8, 15, 50), 'core.test_run_utils.TestRun.LOGGER.info', 'TestRun.LOGGER.info', ({(15, 28, 15, 49): '"""Installing iotrace:"""'}, {}), "('Installing iotrace:')", False, 'from core.test_run_utils import TestRun\n'), ((16, 8, 16, 25), 'utils.installer.install_iotrace', 'install_iotrace', ({}, {}), '()', False, 'from utils.installer import install_iotrace, check_if_installed\n'), ((18, 8, 18, 76), 'core.test_run_utils.TestRun.LOGGER.info', 'TestRun.LOGGER.info', ({(18, 28, 18, 75): '"""iotrace is already installed by previous test"""'}, {}), "('iotrace is already installed by previous test')", False, 'from core.test_run_utils import TestRun\n'), ((26, 8, 26, 45), 'core.test_run_utils.TestRun.LOGGER.info', 'TestRun.LOGGER.info', ({(26, 28, 26, 44): '"""Installing fio"""'}, {}), "('Installing fio')", False, 'from core.test_run_utils import TestRun\n'), ((41, 8, 41, 56), 'core.test_run_utils.TestRun.executor.run', 'TestRun.executor.run', ({(41, 29, 41, 55): 'f"""kill -9 {output.stdout}"""'}, {}), "(f'kill -9 {output.stdout}')", False, 'from core.test_run_utils import TestRun\n'), ((14, 11, 14, 31), 'utils.installer.check_if_installed', 'check_if_installed', ({}, {}), '()', False, 'from utils.installer import install_iotrace, check_if_installed\n')] |
Drew8521/MusiQ | game_service.py | e52671c7dcc4f54f6cbb829486a733a9179575b1 | from models import Song
from random import choice
def random_song(genre):
results = Song.query().filter(Song.genre==genre).fetch()
print(results)
songs = choice(results)
random_song = {
"title": songs.song,
"album": songs.album,
"artist": songs.artist.lower(),
"genre": genre,
}
return random_song
| [((7, 12, 7, 27), 'random.choice', 'choice', ({(7, 19, 7, 26): 'results'}, {}), '(results)', False, 'from random import choice\n'), ((5, 14, 5, 26), 'models.Song.query', 'Song.query', ({}, {}), '()', False, 'from models import Song\n')] |
janbodnar/Python-Course | stdlib/csv/custom_dialect.py | 51705ab5a2adef52bcdb99a800e94c0d67144a38 | #!/usr/bin/python
# custom_dialect.py
import csv
csv.register_dialect("hashes", delimiter="#")
f = open('items3.csv', 'w')
with f:
writer = csv.writer(f, dialect="hashes")
writer.writerow(("pencils", 2))
writer.writerow(("plates", 1))
writer.writerow(("books", 4))
| [((7, 0, 7, 45), 'csv.register_dialect', 'csv.register_dialect', (), '', False, 'import csv\n'), ((13, 13, 13, 44), 'csv.writer', 'csv.writer', (), '', False, 'import csv\n')] |
zorache/ServiceX_App | servicex/web/forms.py | 4479afa0f019bbdcd35812691e78abba442c9d37 | from typing import Optional
from flask_wtf import FlaskForm
from wtforms import StringField, SelectField, SubmitField
from wtforms.validators import DataRequired, Length, Email
from servicex.models import UserModel
class ProfileForm(FlaskForm):
name = StringField('Full Name', validators=[DataRequired(), Length(0, 120)])
email = StringField('Email', validators=[DataRequired(), Email()])
institution = StringField('Institution', validators=[DataRequired()])
experiment = SelectField('Experiment', validators=[DataRequired()],
choices=[("ATLAS", "ATLAS"), ("CMS", "CMS")],
default="ATLAS")
submit = SubmitField('Save Profile')
def __init__(self, user: Optional[UserModel] = None):
super().__init__()
if user:
self.name.data = user.name
self.email.data = user.email
self.institution.data = user.institution
self.experiment.data = user.experiment
| [((17, 13, 17, 40), 'wtforms.SubmitField', 'SubmitField', ({(17, 25, 17, 39): '"""Save Profile"""'}, {}), "('Save Profile')", False, 'from wtforms import StringField, SelectField, SubmitField\n'), ((11, 48, 11, 62), 'wtforms.validators.DataRequired', 'DataRequired', ({}, {}), '()', False, 'from wtforms.validators import DataRequired, Length, Email\n'), ((11, 64, 11, 78), 'wtforms.validators.Length', 'Length', ({(11, 71, 11, 72): '0', (11, 74, 11, 77): '120'}, {}), '(0, 120)', False, 'from wtforms.validators import DataRequired, Length, Email\n'), ((12, 45, 12, 59), 'wtforms.validators.DataRequired', 'DataRequired', ({}, {}), '()', False, 'from wtforms.validators import DataRequired, Length, Email\n'), ((12, 61, 12, 68), 'wtforms.validators.Email', 'Email', ({}, {}), '()', False, 'from wtforms.validators import DataRequired, Length, Email\n'), ((13, 57, 13, 71), 'wtforms.validators.DataRequired', 'DataRequired', ({}, {}), '()', False, 'from wtforms.validators import DataRequired, Length, Email\n'), ((14, 55, 14, 69), 'wtforms.validators.DataRequired', 'DataRequired', ({}, {}), '()', False, 'from wtforms.validators import DataRequired, Length, Email\n')] |
vijaykumawat256/Prompt-Summarization | data/studio21_generated/interview/1657/starter_code.py | 614f5911e2acd2933440d909de2b4f86653dc214 | def string_func(s, n):
| [] |
linye931025/FPN_Tensorflow-master | libs/export_pbs/exportPb.py | e972496a798e9d77a74ddc6062d46b152d072ce7 | # -*- coding: utf-8 -*-
from __future__ import absolute_import, print_function, division
import os, sys
import tensorflow as tf
import tf_slim as slim
from tensorflow.python.tools import freeze_graph
sys.path.append('../../')
from data.io.image_preprocess import short_side_resize_for_inference_data
from libs.configs import cfgs
from libs.networks import build_whole_network
CKPT_PATH = '/home/yjr/PycharmProjects/Faster-RCNN_Tensorflow/output/trained_weights/FasterRCNN_20180517/voc_200000model.ckpt'
OUT_DIR = '../../output/Pbs'
PB_NAME = 'FasterRCNN_Res101_Pascal.pb'
def build_detection_graph():
# 1. preprocess img
img_plac = tf.placeholder(dtype=tf.uint8, shape=[None, None, 3],
name='input_img') # is RGB. not GBR
raw_shape = tf.shape(img_plac)
raw_h, raw_w = tf.to_float(raw_shape[0]), tf.to_float(raw_shape[1])
img_batch = tf.cast(img_plac, tf.float32)
img_batch = short_side_resize_for_inference_data(img_tensor=img_batch,
target_shortside_len=cfgs.IMG_SHORT_SIDE_LEN,
length_limitation=cfgs.IMG_MAX_LENGTH)
img_batch = img_batch - tf.constant(cfgs.PIXEL_MEAN)
img_batch = tf.expand_dims(img_batch, axis=0) # [1, None, None, 3]
det_net = build_whole_network.DetectionNetwork(base_network_name=cfgs.NET_NAME,
is_training=False)
detected_boxes, detection_scores, detection_category = det_net.build_whole_detection_network(
input_img_batch=img_batch,
gtboxes_batch=None)
xmin, ymin, xmax, ymax = detected_boxes[:, 0], detected_boxes[:, 1], \
detected_boxes[:, 2], detected_boxes[:, 3]
resized_shape = tf.shape(img_batch)
resized_h, resized_w = tf.to_float(resized_shape[1]), tf.to_float(resized_shape[2])
xmin = xmin * raw_w / resized_w
xmax = xmax * raw_w / resized_w
ymin = ymin * raw_h / resized_h
ymax = ymax * raw_h / resized_h
boxes = tf.transpose(tf.stack([xmin, ymin, xmax, ymax]))
dets = tf.concat([tf.reshape(detection_category, [-1, 1]),
tf.reshape(detection_scores, [-1, 1]),
boxes], axis=1, name='DetResults')
return dets
def export_frozenPB():
tf.reset_default_graph()
dets = build_detection_graph()
saver = tf.train.Saver()
with tf.Session() as sess:
print("we have restred the weights from =====>>\n", CKPT_PATH)
saver.restore(sess, CKPT_PATH)
tf.train.write_graph(sess.graph_def, OUT_DIR, PB_NAME)
freeze_graph.freeze_graph(input_graph=os.path.join(OUT_DIR, PB_NAME),
input_saver='',
input_binary=False,
input_checkpoint=CKPT_PATH,
output_node_names="DetResults",
restore_op_name="save/restore_all",
filename_tensor_name='save/Const:0',
output_graph=os.path.join(OUT_DIR, PB_NAME.replace('.pb', '_Frozen.pb')),
clear_devices=False,
initializer_nodes='')
if __name__ == '__main__':
os.environ["CUDA_VISIBLE_DEVICES"] = ''
export_frozenPB()
| [((10, 0, 10, 25), 'sys.path.append', 'sys.path.append', ({(10, 16, 10, 24): '"""../../"""'}, {}), "('../../')", False, 'import os, sys\n'), ((22, 15, 23, 47), 'tensorflow.placeholder', 'tf.placeholder', (), '', True, 'import tensorflow as tf\n'), ((24, 16, 24, 34), 'tensorflow.shape', 'tf.shape', ({(24, 25, 24, 33): 'img_plac'}, {}), '(img_plac)', True, 'import tensorflow as tf\n'), ((27, 16, 27, 45), 'tensorflow.cast', 'tf.cast', ({(27, 24, 27, 32): 'img_plac', (27, 34, 27, 44): 'tf.float32'}, {}), '(img_plac, tf.float32)', True, 'import tensorflow as tf\n'), ((28, 16, 30, 91), 'data.io.image_preprocess.short_side_resize_for_inference_data', 'short_side_resize_for_inference_data', (), '', False, 'from data.io.image_preprocess import short_side_resize_for_inference_data\n'), ((32, 16, 32, 49), 'tensorflow.expand_dims', 'tf.expand_dims', (), '', True, 'import tensorflow as tf\n'), ((34, 14, 35, 69), 'libs.networks.build_whole_network.DetectionNetwork', 'build_whole_network.DetectionNetwork', (), '', False, 'from libs.networks import build_whole_network\n'), ((44, 20, 44, 39), 'tensorflow.shape', 'tf.shape', ({(44, 29, 44, 38): 'img_batch'}, {}), '(img_batch)', True, 'import tensorflow as tf\n'), ((63, 4, 63, 28), 'tensorflow.reset_default_graph', 'tf.reset_default_graph', ({}, {}), '()', True, 'import tensorflow as tf\n'), ((67, 12, 67, 28), 'tensorflow.train.Saver', 'tf.train.Saver', ({}, {}), '()', True, 'import tensorflow as tf\n'), ((25, 19, 25, 44), 'tensorflow.to_float', 'tf.to_float', ({(25, 31, 25, 43): 'raw_shape[0]'}, {}), '(raw_shape[0])', True, 'import tensorflow as tf\n'), ((25, 46, 25, 71), 'tensorflow.to_float', 'tf.to_float', ({(25, 58, 25, 70): 'raw_shape[1]'}, {}), '(raw_shape[1])', True, 'import tensorflow as tf\n'), ((31, 28, 31, 56), 'tensorflow.constant', 'tf.constant', ({(31, 40, 31, 55): 'cfgs.PIXEL_MEAN'}, {}), '(cfgs.PIXEL_MEAN)', True, 'import tensorflow as tf\n'), ((45, 27, 45, 56), 'tensorflow.to_float', 'tf.to_float', ({(45, 39, 45, 55): 'resized_shape[1]'}, {}), '(resized_shape[1])', True, 'import tensorflow as tf\n'), ((45, 58, 45, 87), 'tensorflow.to_float', 'tf.to_float', ({(45, 70, 45, 86): 'resized_shape[2]'}, {}), '(resized_shape[2])', True, 'import tensorflow as tf\n'), ((53, 25, 53, 59), 'tensorflow.stack', 'tf.stack', ({(53, 34, 53, 58): '[xmin, ymin, xmax, ymax]'}, {}), '([xmin, ymin, xmax, ymax])', True, 'import tensorflow as tf\n'), ((69, 9, 69, 21), 'tensorflow.Session', 'tf.Session', ({}, {}), '()', True, 'import tensorflow as tf\n'), ((73, 8, 73, 62), 'tensorflow.train.write_graph', 'tf.train.write_graph', ({(73, 29, 73, 43): 'sess.graph_def', (73, 45, 73, 52): 'OUT_DIR', (73, 54, 73, 61): 'PB_NAME'}, {}), '(sess.graph_def, OUT_DIR, PB_NAME)', True, 'import tensorflow as tf\n'), ((54, 22, 54, 61), 'tensorflow.reshape', 'tf.reshape', ({(54, 33, 54, 51): 'detection_category', (54, 53, 54, 60): '[-1, 1]'}, {}), '(detection_category, [-1, 1])', True, 'import tensorflow as tf\n'), ((55, 21, 55, 58), 'tensorflow.reshape', 'tf.reshape', ({(55, 32, 55, 48): 'detection_scores', (55, 50, 55, 57): '[-1, 1]'}, {}), '(detection_scores, [-1, 1])', True, 'import tensorflow as tf\n'), ((74, 46, 74, 76), 'os.path.join', 'os.path.join', ({(74, 59, 74, 66): 'OUT_DIR', (74, 68, 74, 75): 'PB_NAME'}, {}), '(OUT_DIR, PB_NAME)', False, 'import os, sys\n')] |
smilefreak/NaDNAP | ngadnap/command_templates/adapter_removal.py | 18354778dd896bc0ab3456ca7dbb9d194c1ebf4d | """
Adapter Removal templates
"""
# AdapterRemoval
#
# {0}: executable
# {1}: fastq1 abs
# {2}: fastq2 abs
# {3}: fastq1
# {4}: fastq2
# {5}: minimum length
# {6}: mismatch_rate
# {7}: min base uality
# {8}: min merge_length
__ADAPTER_REMOVAL__="""
{0} --collapse --file1 {1} --file2 {2} --outputstats {3}.stats --trimns --outputcollapsed {3}.collapsed --minlength {5} --output1 {3}.p1 --output2 {4}.p2 --mm {6} --minquality {7} --minalignmentlength {8} --trimqualities
"""
import os
from ngadnap.dependency_graph.graph import CommandNode
def adapter_removal(config, args, fq1 ,fq2):
fq1o = os.path.abspath(fq1)
fq2o = os.path.abspath(fq2)
cmd = __ADAPTER_REMOVAL__.format(config['adapter_removal']['executable'], fq1o, fq2o, fq1, fq2, args.adapt_min_length, args.adapt_mismatch_rate ,args.adapt_min_qual, args.adapt_alignment_length)
job_id = fq1 + ".adapter_removal"
return CommandNode(cmd, job_id, None, args.temp_directory)
| [((26, 11, 26, 31), 'os.path.abspath', 'os.path.abspath', ({(26, 27, 26, 30): 'fq1'}, {}), '(fq1)', False, 'import os\n'), ((27, 11, 27, 31), 'os.path.abspath', 'os.path.abspath', ({(27, 27, 27, 30): 'fq2'}, {}), '(fq2)', False, 'import os\n'), ((30, 11, 30, 62), 'ngadnap.dependency_graph.graph.CommandNode', 'CommandNode', ({(30, 23, 30, 26): 'cmd', (30, 28, 30, 34): 'job_id', (30, 36, 30, 40): 'None', (30, 42, 30, 61): 'args.temp_directory'}, {}), '(cmd, job_id, None, args.temp_directory)', False, 'from ngadnap.dependency_graph.graph import CommandNode\n')] |
AllenJSebastian/tripleo-common | undercloud_heat_plugins/immutable_resources.py | d510a30266e002e90c358e69cb720bfdfa736134 | #
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
from heat.engine.resources.openstack.neutron import net
from heat.engine.resources.openstack.neutron import port
from heat.engine.resources.openstack.neutron import subnet
def _copy_schema_immutable(schema):
new_schema = copy.deepcopy(schema)
if not schema.update_allowed:
new_schema.immutable = True
return new_schema
class ImmutableNet(net.Net):
'''Ensure an existing net doesn't change.'''
properties_schema = {
k: _copy_schema_immutable(v)
for k, v in net.Net.properties_schema.items()
}
class ImmutablePort(port.Port):
'''Ensure an existing port doesn't change.'''
properties_schema = {
k: _copy_schema_immutable(v)
for k, v in port.Port.properties_schema.items()
}
class ImmutableSubnet(subnet.Subnet):
'''Ensure an existing subnet doesn't change.'''
properties_schema = {
k: _copy_schema_immutable(v)
for k, v in subnet.Subnet.properties_schema.items()
}
def resource_mapping():
return {
'OS::Neutron::Net': ImmutableNet,
'OS::Neutron::Port': ImmutablePort,
'OS::Neutron::Subnet': ImmutableSubnet,
}
| [((23, 17, 23, 38), 'copy.deepcopy', 'copy.deepcopy', ({(23, 31, 23, 37): 'schema'}, {}), '(schema)', False, 'import copy\n'), ((34, 20, 34, 53), 'heat.engine.resources.openstack.neutron.net.Net.properties_schema.items', 'net.Net.properties_schema.items', ({}, {}), '()', False, 'from heat.engine.resources.openstack.neutron import net\n'), ((43, 20, 43, 55), 'heat.engine.resources.openstack.neutron.port.Port.properties_schema.items', 'port.Port.properties_schema.items', ({}, {}), '()', False, 'from heat.engine.resources.openstack.neutron import port\n'), ((52, 20, 52, 59), 'heat.engine.resources.openstack.neutron.subnet.Subnet.properties_schema.items', 'subnet.Subnet.properties_schema.items', ({}, {}), '()', False, 'from heat.engine.resources.openstack.neutron import subnet\n')] |
jmcph4/lm5 | lm5/input.py | cd6f480ad70a3769090eab6ac3f3d47378a965de | class Input(object):
def __init__(self, type, data):
self.__type = type
self.__data = deepcopy(data)
def __repr__(self):
return repr(self.__data)
def __str__(self):
return str(self.__type) + str(self.__data)
| [] |
DataKnower/dk-portia | slybot/setup.py | 24579c0160167af2442117975bf7d6a714b4d7d5 | from os.path import join, abspath, dirname, exists
from slybot import __version__
from setuptools import setup, find_packages
from setuptools.command.bdist_egg import bdist_egg
from setuptools.command.sdist import sdist
def build_js():
root = abspath(dirname(__file__))
base_path = abspath(join(root, '..', 'splash_utils'))
if not exists(base_path):
base_path = abspath(join(root, '..', 'slyd', 'splash_utils'))
files = ('waitAsync.js', 'perform_actions.js')
fdata = []
for fname in files:
with open(join(base_path, fname)) as f:
fdata.append(f.read())
js_file = abspath(join(root, 'slybot', 'splash-script-combined.js'))
with open(js_file, 'w') as f:
f.write(';(function(){\n%s\n})();' % '\n'.join(fdata))
class bdist_egg_command(bdist_egg):
def run(self):
build_js()
bdist_egg.run(self)
class sdist_command(sdist):
def run(self):
build_js()
sdist.run(self)
install_requires = ['Scrapy', 'scrapely', 'loginform', 'lxml', 'jsonschema',
'dateparser', 'scrapyjs', 'page_finder', 'six']
extras = {
'tests': ['nose', 'nose-timer'],
'clustering': ['page_clustering']
}
setup(name='slybot',
version=__version__,
license='BSD',
description='Slybot crawler',
author='Scrapy project',
author_email='[email protected]',
url='http://github.com/scrapinghub/portia',
packages=find_packages(exclude=('tests', 'tests.*')),
platforms=['Any'],
scripts=['bin/slybot', 'bin/portiacrawl'],
install_requires=install_requires,
extras_require=extras,
package_data={'': ['slybot/splash-script-combined.js']},
include_package_data=True,
cmdclass={
'bdist_egg': bdist_egg_command,
'sdist': sdist_command
},
classifiers=[
'Development Status :: 4 - Beta',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7'
])
| [((9, 19, 9, 36), 'os.path.dirname', 'dirname', ({(9, 27, 9, 35): '__file__'}, {}), '(__file__)', False, 'from os.path import join, abspath, dirname, exists\n'), ((10, 24, 10, 56), 'os.path.join', 'join', ({(10, 29, 10, 33): 'root', (10, 35, 10, 39): '""".."""', (10, 41, 10, 55): '"""splash_utils"""'}, {}), "(root, '..', 'splash_utils')", False, 'from os.path import join, abspath, dirname, exists\n'), ((11, 11, 11, 28), 'os.path.exists', 'exists', ({(11, 18, 11, 27): 'base_path'}, {}), '(base_path)', False, 'from os.path import join, abspath, dirname, exists\n'), ((19, 22, 19, 71), 'os.path.join', 'join', ({(19, 27, 19, 31): 'root', (19, 33, 19, 41): '"""slybot"""', (19, 43, 19, 70): '"""splash-script-combined.js"""'}, {}), "(root, 'slybot', 'splash-script-combined.js')", False, 'from os.path import join, abspath, dirname, exists\n'), ((27, 8, 27, 27), 'setuptools.command.bdist_egg.bdist_egg.run', 'bdist_egg.run', ({(27, 22, 27, 26): 'self'}, {}), '(self)', False, 'from setuptools.command.bdist_egg import bdist_egg\n'), ((33, 8, 33, 23), 'setuptools.command.sdist.sdist.run', 'sdist.run', ({(33, 18, 33, 22): 'self'}, {}), '(self)', False, 'from setuptools.command.sdist import sdist\n'), ((51, 15, 51, 58), 'setuptools.find_packages', 'find_packages', (), '', False, 'from setuptools import setup, find_packages\n'), ((12, 28, 12, 68), 'os.path.join', 'join', ({(12, 33, 12, 37): 'root', (12, 39, 12, 43): '""".."""', (12, 45, 12, 51): '"""slyd"""', (12, 53, 12, 67): '"""splash_utils"""'}, {}), "(root, '..', 'slyd', 'splash_utils')", False, 'from os.path import join, abspath, dirname, exists\n'), ((17, 18, 17, 40), 'os.path.join', 'join', ({(17, 23, 17, 32): 'base_path', (17, 34, 17, 39): 'fname'}, {}), '(base_path, fname)', False, 'from os.path import join, abspath, dirname, exists\n')] |
huhuhang/yolov3 | yolov3.py | 6c254b3f453c394046381e1c00cb0908b8f97b3a | import torch
import torch.nn as nn
from .yolo_layer import *
from .yolov3_base import *
class Yolov3(Yolov3Base):
def __init__(self, num_classes=80):
super().__init__()
self.backbone = Darknet([1,2,8,8,4])
anchors_per_region = 3
self.yolo_0_pre = Yolov3UpsamplePrep([512, 1024], 1024, anchors_per_region*(5+num_classes))
self.yolo_0 = YoloLayer(anchors=[(116., 90.), (156., 198.), (373., 326.)], stride=32, num_classes=num_classes)
self.yolo_1_c = ConvBN(512, 256, 1)
self.yolo_1_prep = Yolov3UpsamplePrep([256, 512], 512+256, anchors_per_region*(5+num_classes))
self.yolo_1 = YoloLayer(anchors=[(30., 61.), (62., 45.), (59., 119.)], stride=16, num_classes=num_classes)
self.yolo_2_c = ConvBN(256, 128, 1)
self.yolo_2_prep = Yolov3UpsamplePrep([128, 256], 256+128, anchors_per_region*(5+num_classes))
self.yolo_2 = YoloLayer(anchors=[(10., 13.), (16., 30.), (33., 23.)], stride=8, num_classes=num_classes)
def get_loss_layers(self):
return [self.yolo_0, self.yolo_1, self.yolo_2]
def forward_yolo(self, xb):
x, y0 = self.yolo_0_pre(xb[-1])
x = self.yolo_1_c(x)
x = nn.Upsample(scale_factor=2, mode='nearest')(x)
x = torch.cat([x, xb[-2]], 1)
x, y1 = self.yolo_1_prep(x)
x = self.yolo_2_c(x)
x = nn.Upsample(scale_factor=2, mode='nearest')(x)
x = torch.cat([x, xb[-3]], 1)
x, y2 = self.yolo_2_prep(x)
return [y0, y1, y2]
###################################################################
## Backbone and helper modules
class DarknetBlock(nn.Module):
def __init__(self, ch_in):
super().__init__()
ch_hid = ch_in//2
self.conv1 = ConvBN(ch_in, ch_hid, kernel_size=1, stride=1, padding=0)
self.conv2 = ConvBN(ch_hid, ch_in, kernel_size=3, stride=1, padding=1)
def forward(self, x): return self.conv2(self.conv1(x)) + x
class Darknet(nn.Module):
def __init__(self, num_blocks, start_nf=32):
super().__init__()
nf = start_nf
self.base = ConvBN(3, nf, kernel_size=3, stride=1) #, padding=1)
self.layers = []
for i, nb in enumerate(num_blocks):
# dn_layer = make_group_layer(nf, nb, stride=(1 if i==-1 else 2))
dn_layer = self.make_group_layer(nf, nb, stride=2)
self.add_module(f"darknet_{i}", dn_layer)
self.layers.append(dn_layer)
nf *= 2
def make_group_layer(self, ch_in, num_blocks, stride=2):
layers = [ConvBN(ch_in, ch_in*2, stride=stride)]
for i in range(num_blocks): layers.append(DarknetBlock(ch_in*2))
return nn.Sequential(*layers)
def forward(self, x):
y = [self.base(x)]
for l in self.layers:
y.append(l(y[-1]))
return y
class Yolov3UpsamplePrep(nn.Module):
def __init__(self, filters_list, in_filters, out_filters):
super().__init__()
self.branch = nn.ModuleList([
ConvBN(in_filters, filters_list[0], 1),
ConvBN(filters_list[0], filters_list[1], kernel_size=3),
ConvBN(filters_list[1], filters_list[0], kernel_size=1),
ConvBN(filters_list[0], filters_list[1], kernel_size=3),
ConvBN(filters_list[1], filters_list[0], kernel_size=1),])
self.for_yolo = nn.ModuleList([
ConvBN(filters_list[0], filters_list[1], kernel_size=3),
nn.Conv2d(filters_list[1], out_filters, kernel_size=1, stride=1,
padding=0, bias=True)])
def forward(self, x):
for m in self.branch: x = m(x)
branch_out = x
for m in self.for_yolo: x = m(x)
return branch_out, x
| [((32, 12, 32, 37), 'torch.cat', 'torch.cat', ({(32, 22, 32, 33): '[x, xb[-2]]', (32, 35, 32, 36): '1'}, {}), '([x, xb[-2]], 1)', False, 'import torch\n'), ((37, 12, 37, 37), 'torch.cat', 'torch.cat', ({(37, 22, 37, 33): '[x, xb[-3]]', (37, 35, 37, 36): '1'}, {}), '([x, xb[-3]], 1)', False, 'import torch\n'), ((72, 15, 72, 37), 'torch.nn.Sequential', 'nn.Sequential', ({(72, 29, 72, 36): '*layers'}, {}), '(*layers)', True, 'import torch.nn as nn\n'), ((31, 12, 31, 55), 'torch.nn.Upsample', 'nn.Upsample', (), '', True, 'import torch.nn as nn\n'), ((36, 12, 36, 55), 'torch.nn.Upsample', 'nn.Upsample', (), '', True, 'import torch.nn as nn\n'), ((92, 24, 93, 56), 'torch.nn.Conv2d', 'nn.Conv2d', (), '', True, 'import torch.nn as nn\n')] |
CyrilLeMat/modelkit | tests/assets/test_driver_errors.py | 2150ffe78ebb00e3302dac36ccb09e66becd5130 | import os
import pytest
from modelkit.assets import errors
from tests.conftest import skip_unless
def _perform_driver_error_object_not_found(driver):
with pytest.raises(errors.ObjectDoesNotExistError):
driver.download_object("someasset", "somedestination")
assert not os.path.isfile("somedestination")
def test_local_driver(local_assetsmanager):
local_driver = local_assetsmanager.remote_assets_store.driver
_perform_driver_error_object_not_found(local_driver)
@skip_unless("ENABLE_GCS_TEST", "True")
def test_gcs_driver(gcs_assetsmanager):
gcs_driver = gcs_assetsmanager.remote_assets_store.driver
_perform_driver_error_object_not_found(gcs_driver)
@skip_unless("ENABLE_S3_TEST", "True")
def test_s3_driver(s3_assetsmanager):
s3_driver = s3_assetsmanager.remote_assets_store.driver
_perform_driver_error_object_not_found(s3_driver)
| [((20, 1, 20, 39), 'tests.conftest.skip_unless', 'skip_unless', ({(20, 13, 20, 30): '"""ENABLE_GCS_TEST"""', (20, 32, 20, 38): '"""True"""'}, {}), "('ENABLE_GCS_TEST', 'True')", False, 'from tests.conftest import skip_unless\n'), ((26, 1, 26, 38), 'tests.conftest.skip_unless', 'skip_unless', ({(26, 13, 26, 29): '"""ENABLE_S3_TEST"""', (26, 31, 26, 37): '"""True"""'}, {}), "('ENABLE_S3_TEST', 'True')", False, 'from tests.conftest import skip_unless\n'), ((10, 9, 10, 54), 'pytest.raises', 'pytest.raises', ({(10, 23, 10, 53): 'errors.ObjectDoesNotExistError'}, {}), '(errors.ObjectDoesNotExistError)', False, 'import pytest\n'), ((12, 15, 12, 48), 'os.path.isfile', 'os.path.isfile', ({(12, 30, 12, 47): '"""somedestination"""'}, {}), "('somedestination')", False, 'import os\n')] |
Jarquevious/makewiki | wiki/tests.py | a945da5ab7704042ef9d740987e23da19ec87267 | from django.test import TestCase
from django.contrib.auth.models import User
from wiki.models import Page
# Create your tests here.
def test_detail_page(self):
""" Test to see if slug generated when saving a Page."""
# Create a user and save to the database
user = User.objects.create()
user.save()
# Create a page and save to the database
page = Page(title="My Detail Test Page", content="details_test", author=user)
page.save()
# Slug is generated matches with what we expect
slug = page.slug
response = self.client.get(f'/{slug}/')
self.assertEqual(response.status_code, 200)
info = self.client.get('/')
self.assertContains(info, 'makewiki', html=True)
def test_edit_page(self):
"""Test edit page."""
# Test data that will be displayed on the screen
user = User.objects.create()
user.save()
page = Page.objects.create(title="My Test Page", content="edit_test", author=user)
page.save()
# Make a GET request to the MakeWiki homepage that will get a response back
post_data = {
'title': 'Who',
'content': 'Are you?',
'author': user.id,
}
response = self.client.post('/form/', data=post_data)
# Check if response is 200
self.assertEqual(response.status_code, 200)
# Check the number of pages passed to the template matches the number of pages in the database
end = self.client.get('/')
result = end.context['pages']
self.assertQuerysetEqual(result, ['<Page: My Test Page>', '<Page: Test>'], ordered=False)
def test_page_creation(self):
# Create user object and save it
user = User.objects.create()
user.save()
# Create a page
page = Page.objects.create(title="The Test Page", content="edit_test", author=user)
page.save()
post_data = {
'title': 'COVID19',
'content': 'Mass Testing is Underway',
'author': user.id
}
response = self.client.post('/form/', data = post_data)
self.assertEqual(response.status_code, 302)
page_object = Page.objects.get(title='COVID19')
self.assertEqual(page_object.content, 'Mass Testing is Underway') | [((11, 11, 11, 32), 'django.contrib.auth.models.User.objects.create', 'User.objects.create', ({}, {}), '()', False, 'from django.contrib.auth.models import User\n'), ((15, 11, 15, 81), 'wiki.models.Page', 'Page', (), '', False, 'from wiki.models import Page\n'), ((32, 11, 32, 32), 'django.contrib.auth.models.User.objects.create', 'User.objects.create', ({}, {}), '()', False, 'from django.contrib.auth.models import User\n'), ((35, 11, 35, 86), 'wiki.models.Page.objects.create', 'Page.objects.create', (), '', False, 'from wiki.models import Page\n'), ((59, 11, 59, 32), 'django.contrib.auth.models.User.objects.create', 'User.objects.create', ({}, {}), '()', False, 'from django.contrib.auth.models import User\n'), ((63, 11, 63, 87), 'wiki.models.Page.objects.create', 'Page.objects.create', (), '', False, 'from wiki.models import Page\n'), ((76, 18, 76, 51), 'wiki.models.Page.objects.get', 'Page.objects.get', (), '', False, 'from wiki.models import Page\n')] |
AJB0211/BanditSim | BanditSim/__init__.py | 5426486b40c35492049b09f9b57eb18ad5d6ce63 | from .multiarmedbandit import MultiArmedBandit
from .eps_greedy_constant_stepsize import EpsilonGreedyConstantStepsize
from .greedy_constant_stepsize import GreedyConstantStepsize
from .epsilon_greedy_average_step import EpsilonGreedyAverageStep
from .greedy_average_step import GreedyAverageStep
from .greedy_bayes_update import GreedyBayesianUpdate
from .eps_greedy_bayes_update import EpsilonGreedyBayesianUpdate
| [] |
txf626/django | tests/queries/test_query.py | 95bda03f2da15172cf342f13ba8a77c007b63fbb | from datetime import datetime
from django.core.exceptions import FieldError
from django.db.models import CharField, F, Q
from django.db.models.expressions import SimpleCol
from django.db.models.fields.related_lookups import RelatedIsNull
from django.db.models.functions import Lower
from django.db.models.lookups import Exact, GreaterThan, IsNull, LessThan
from django.db.models.sql.query import Query
from django.db.models.sql.where import OR
from django.test import TestCase
from django.test.utils import register_lookup
from .models import Author, Item, ObjectC, Ranking
class TestQuery(TestCase):
def test_simple_query(self):
query = Query(Author)
where = query.build_where(Q(num__gt=2))
lookup = where.children[0]
self.assertIsInstance(lookup, GreaterThan)
self.assertEqual(lookup.rhs, 2)
self.assertEqual(lookup.lhs.target, Author._meta.get_field('num'))
def test_complex_query(self):
query = Query(Author)
where = query.build_where(Q(num__gt=2) | Q(num__lt=0))
self.assertEqual(where.connector, OR)
lookup = where.children[0]
self.assertIsInstance(lookup, GreaterThan)
self.assertEqual(lookup.rhs, 2)
self.assertEqual(lookup.lhs.target, Author._meta.get_field('num'))
lookup = where.children[1]
self.assertIsInstance(lookup, LessThan)
self.assertEqual(lookup.rhs, 0)
self.assertEqual(lookup.lhs.target, Author._meta.get_field('num'))
def test_multiple_fields(self):
query = Query(Item)
where = query.build_where(Q(modified__gt=F('created')))
lookup = where.children[0]
self.assertIsInstance(lookup, GreaterThan)
self.assertIsInstance(lookup.rhs, SimpleCol)
self.assertIsInstance(lookup.lhs, SimpleCol)
self.assertEqual(lookup.rhs.target, Item._meta.get_field('created'))
self.assertEqual(lookup.lhs.target, Item._meta.get_field('modified'))
def test_transform(self):
query = Query(Author)
with register_lookup(CharField, Lower):
where = query.build_where(~Q(name__lower='foo'))
lookup = where.children[0]
self.assertIsInstance(lookup, Exact)
self.assertIsInstance(lookup.lhs, Lower)
self.assertIsInstance(lookup.lhs.lhs, SimpleCol)
self.assertEqual(lookup.lhs.lhs.target, Author._meta.get_field('name'))
def test_negated_nullable(self):
query = Query(Item)
where = query.build_where(~Q(modified__lt=datetime(2017, 1, 1)))
self.assertTrue(where.negated)
lookup = where.children[0]
self.assertIsInstance(lookup, LessThan)
self.assertEqual(lookup.lhs.target, Item._meta.get_field('modified'))
lookup = where.children[1]
self.assertIsInstance(lookup, IsNull)
self.assertEqual(lookup.lhs.target, Item._meta.get_field('modified'))
def test_foreign_key(self):
query = Query(Item)
msg = 'Joined field references are not permitted in this query'
with self.assertRaisesMessage(FieldError, msg):
query.build_where(Q(creator__num__gt=2))
def test_foreign_key_f(self):
query = Query(Ranking)
with self.assertRaises(FieldError):
query.build_where(Q(rank__gt=F('author__num')))
def test_foreign_key_exclusive(self):
query = Query(ObjectC)
where = query.build_where(Q(objecta=None) | Q(objectb=None))
a_isnull = where.children[0]
self.assertIsInstance(a_isnull, RelatedIsNull)
self.assertIsInstance(a_isnull.lhs, SimpleCol)
self.assertEqual(a_isnull.lhs.target, ObjectC._meta.get_field('objecta'))
b_isnull = where.children[1]
self.assertIsInstance(b_isnull, RelatedIsNull)
self.assertIsInstance(b_isnull.lhs, SimpleCol)
self.assertEqual(b_isnull.lhs.target, ObjectC._meta.get_field('objectb'))
| [((19, 16, 19, 29), 'django.db.models.sql.query.Query', 'Query', ({(19, 22, 19, 28): 'Author'}, {}), '(Author)', False, 'from django.db.models.sql.query import Query\n'), ((27, 16, 27, 29), 'django.db.models.sql.query.Query', 'Query', ({(27, 22, 27, 28): 'Author'}, {}), '(Author)', False, 'from django.db.models.sql.query import Query\n'), ((42, 16, 42, 27), 'django.db.models.sql.query.Query', 'Query', ({(42, 22, 42, 26): 'Item'}, {}), '(Item)', False, 'from django.db.models.sql.query import Query\n'), ((52, 16, 52, 29), 'django.db.models.sql.query.Query', 'Query', ({(52, 22, 52, 28): 'Author'}, {}), '(Author)', False, 'from django.db.models.sql.query import Query\n'), ((62, 16, 62, 27), 'django.db.models.sql.query.Query', 'Query', ({(62, 22, 62, 26): 'Item'}, {}), '(Item)', False, 'from django.db.models.sql.query import Query\n'), ((73, 16, 73, 27), 'django.db.models.sql.query.Query', 'Query', ({(73, 22, 73, 26): 'Item'}, {}), '(Item)', False, 'from django.db.models.sql.query import Query\n'), ((79, 16, 79, 30), 'django.db.models.sql.query.Query', 'Query', ({(79, 22, 79, 29): 'Ranking'}, {}), '(Ranking)', False, 'from django.db.models.sql.query import Query\n'), ((84, 16, 84, 30), 'django.db.models.sql.query.Query', 'Query', ({(84, 22, 84, 29): 'ObjectC'}, {}), '(ObjectC)', False, 'from django.db.models.sql.query import Query\n'), ((20, 34, 20, 46), 'django.db.models.Q', 'Q', (), '', False, 'from django.db.models import CharField, F, Q\n'), ((53, 13, 53, 46), 'django.test.utils.register_lookup', 'register_lookup', ({(53, 29, 53, 38): 'CharField', (53, 40, 53, 45): 'Lower'}, {}), '(CharField, Lower)', False, 'from django.test.utils import register_lookup\n'), ((28, 34, 28, 46), 'django.db.models.Q', 'Q', (), '', False, 'from django.db.models import CharField, F, Q\n'), ((28, 49, 28, 61), 'django.db.models.Q', 'Q', (), '', False, 'from django.db.models import CharField, F, Q\n'), ((76, 30, 76, 51), 'django.db.models.Q', 'Q', (), '', False, 'from django.db.models import CharField, F, Q\n'), ((85, 34, 85, 49), 'django.db.models.Q', 'Q', (), '', False, 'from django.db.models import CharField, F, Q\n'), ((85, 52, 85, 67), 'django.db.models.Q', 'Q', (), '', False, 'from django.db.models import CharField, F, Q\n'), ((43, 49, 43, 61), 'django.db.models.F', 'F', ({(43, 51, 43, 60): '"""created"""'}, {}), "('created')", False, 'from django.db.models import CharField, F, Q\n'), ((54, 39, 54, 59), 'django.db.models.Q', 'Q', (), '', False, 'from django.db.models import CharField, F, Q\n'), ((63, 50, 63, 70), 'datetime.datetime', 'datetime', ({(63, 59, 63, 63): '2017', (63, 65, 63, 66): '1', (63, 68, 63, 69): '1'}, {}), '(2017, 1, 1)', False, 'from datetime import datetime\n'), ((81, 41, 81, 57), 'django.db.models.F', 'F', ({(81, 43, 81, 56): '"""author__num"""'}, {}), "('author__num')", False, 'from django.db.models import CharField, F, Q\n')] |
ewanlee/mackrl | src/matrix_game/matrix_game.py | 6dd505aa09830f16c35a022f67e255db935c807e | # This notebook implements a proof-of-principle for
# Multi-Agent Common Knowledge Reinforcement Learning (MACKRL)
# The entire notebook can be executed online, no need to download anything
# http://pytorch.org/
from itertools import chain
import torch
import torch.nn.functional as F
from torch.multiprocessing import Pool, set_start_method, freeze_support
try:
set_start_method('spawn')
except RuntimeError:
pass
from torch.nn import init
from torch.optim import Adam, SGD
import numpy as np
import matplotlib.pyplot as plt
use_cuda = False
payoff_values = []
payoff_values.append(torch.tensor([ # payoff values
[5, 0, 0, 2, 0],
[0, 1, 2, 4, 2],
[0, 0, 0, 2, 0],
[0, 0, 0, 1, 0],
[0, 0, 0, 0, 0],
], dtype=torch.float32) * 0.2)
payoff_values.append(
torch.tensor([ # payoff values
[0, 0, 1, 0, 5],
[0, 0, 2, 0, 0],
[1, 2, 4, 2, 1],
[0, 0, 2, 0, 0],
[0, 0, 1, 0, 0],
], dtype=torch.float32) * 0.2)
n_agents = 2
n_actions = len(payoff_values[0])
n_states_dec = 5
n_states_joint = 3
n_mix_hidden = 3
p_observation = 0.5
p_ck_noise = [0.0]
# Number of gradient steps
t_max = 202
# We'll be using a high learning rate, since we have exact gradients
lr = 0.05 # DEBUG: 0.05 if exact gradients!
optim = 'adam'
# You can reduce this number if you are short on time. (Eg. n_trials = 20)
#n_trials = 100 # 30
n_trials = 20 #15 #100
std_val = 1.0
# These are the 3 settings we run: MACRKL, Joint-action-learner (always uses CK),
# Independent Actor-Critic (always uses decentralised actions selection)
labels = ["IAC", "JAL"]
p_vec = [0.0, 0.2, 0.4, 0.6, 0.8, 1.0]
final_res = []
# # Pair-Controller with 3 input state (no CK, CK & Matrix ID = 0, CK & Matrix ID = 1), n_actions^2 actions for
# # joint action + 1 action for delegation to the independent agents.
# theta_joint = init.normal_(torch.zeros(n_states_joint, n_actions ** 2 + 1, requires_grad=True), std=0.1)
# Produce marginalised policy: pi_pc[0] * pi^a * pi^b + p(u^ab)
def p_joint_all(pi_pc, pi_dec):
p_joint = pi_pc[1:].view(n_actions, n_actions).clone()
pi_a_pi_b = torch.ger(pi_dec[0], pi_dec[1])
p_joint = pi_pc[0] * pi_a_pi_b + p_joint
return p_joint
def p_joint_all_noise_alt(pi_pcs, pi_dec, p_ck_noise, ck_state):
p_none = (1-p_ck_noise) ** 2 # both unnoised
p_both = (p_ck_noise) ** 2 # both noised
p_one = (1-p_ck_noise) * p_ck_noise # exactly one noised
p_marg_ag0_ck1 = pi_pcs[1][1:].view(n_actions, n_actions).clone().sum(dim=0)
p_marg_ag0_ck2 = pi_pcs[2][1:].view(n_actions, n_actions).clone().sum(dim=0)
p_marg_ag1_ck1 = pi_pcs[1][1:].view(n_actions, n_actions).clone().sum(dim=1)
p_marg_ag1_ck2 = pi_pcs[2][1:].view(n_actions, n_actions).clone().sum(dim=1)
p_joint_ck0 = pi_pcs[0][1:].view(n_actions, n_actions).clone()
p_joint_ck1 = pi_pcs[1][1:].view(n_actions, n_actions).clone()
p_joint_ck2 = pi_pcs[2][1:].view(n_actions, n_actions).clone()
p_d_ck0 = pi_pcs[0][0]
p_d_ck1 = pi_pcs[1][0]
p_d_ck2 = pi_pcs[2][0]
def make_joint(p1, p2, mode="interval"):
"""
1. Pick uniform random variable between [0,1]
2. Do multinomial sampling through contiguous, ordered bucketing for both p1, p2
"""
p1 = p1.clone().view(-1)
p2 = p2.clone().view(-1)
p_final = p1.clone().zero_()
if mode == "interval":
for i in range(p1.shape[0]):
# calculate overlap between the probability distributions
low1 = torch.sum(p1[:i])
high1 = low1 + p1[i]
low2 = torch.sum(p2[:i])
high2 = low2 + p2[i]
if low1 >= low2 and high2 > low1:
p_final[i] = torch.min(high1, high2) - low1
pass
elif low2 >= low1 and high1 > low2:
p_final[i] = torch.min(high1, high2) - low2
else:
p_final[i] = 0
return p_final.clone().view(n_actions, n_actions)
if ck_state == 0:
p_joint = p_joint_ck0 + p_d_ck0 * torch.ger(pi_dec[0], pi_dec[1])
return p_joint # always delegate
elif ck_state == 1:
p_joint = p_none * p_joint_ck1 + \
p_both * p_joint_ck2 + \
p_one * make_joint(p_joint_ck1, p_joint_ck2) + \
p_one * make_joint(p_joint_ck2, p_joint_ck1) + \
(p_one * p_d_ck1 * p_d_ck2
+ p_one * p_d_ck2 * p_d_ck1
+ p_both * p_d_ck2
+ p_none * p_d_ck1) * torch.ger(pi_dec[0], pi_dec[1]) \
+ p_one * p_d_ck1 * (1 - p_d_ck2) * torch.ger(pi_dec[0], p_marg_ag1_ck2) \
+ p_one * (1 - p_d_ck2) * p_d_ck1 * torch.ger(p_marg_ag0_ck2, pi_dec[1]) \
+ p_one * p_d_ck2 * (1 - p_d_ck1) * torch.ger(pi_dec[0], p_marg_ag1_ck1) \
+ p_one * (1 - p_d_ck1) * p_d_ck2 * torch.ger(p_marg_ag0_ck1, pi_dec[1])
return p_joint
elif ck_state == 2:
p_joint = p_none * p_joint_ck2 + \
p_both * p_joint_ck1 + \
p_one * make_joint(p_joint_ck2, p_joint_ck1) + \
p_one * make_joint(p_joint_ck1, p_joint_ck2) + \
(p_one * p_d_ck2 * p_d_ck1
+ p_one * p_d_ck1 * p_d_ck2
+ p_both * p_d_ck1
+ p_none * p_d_ck2) * torch.ger(pi_dec[0], pi_dec[1]) \
+ p_one * p_d_ck2 * (1 - p_d_ck1) * torch.ger(pi_dec[0], p_marg_ag1_ck1) \
+ p_one * (1 - p_d_ck1) * p_d_ck2 * torch.ger(p_marg_ag0_ck1, pi_dec[1]) \
+ p_one * p_d_ck1 * (1 - p_d_ck2) * torch.ger(pi_dec[0], p_marg_ag1_ck2) \
+ p_one * (1 - p_d_ck2) * p_d_ck1 * torch.ger(p_marg_ag0_ck2, pi_dec[1])
return p_joint
pass
def get_policies(common_knowledge, observations, run, test, thetas_dec, theta_joint, p_ck_noise=0):
if test:
beta = 100
else:
beta = 1
actions = []
pi_dec = []
# common_knowledge decides whether ck_state is informative
if common_knowledge == 0:
ck_state = 0
else:
ck_state = int(observations[0] + 1)
if p_ck_noise == 0:
pol_vals = theta_joint[ck_state, :].clone()
# logits get masked out for independent learner and joint-action-learner
# independent learner has a pair controller that always delegates
if run == 'JAL':
pol_vals[0] = -10 ** 10
elif run == 'IAC':
pol_vals[1:] = -10 ** 10
# apply temperature to set testing
pi_pc = F.softmax(pol_vals * beta, -1)
# calcuate decentralised policies
for i in range(n_agents):
dec_state = int(observations[i])
pi = F.softmax(thetas_dec[i][dec_state] * beta, -1)
pi_dec.append(pi)
return pi_pc, pi_dec
else:
pol_vals = theta_joint.clone()
pi_pcs = []
for i in range(n_states_joint):
if run == 'JAL':
pol_vals[i][0] = -10 ** 10
elif run == 'IAC':
pol_vals[i][1:] = -10 ** 10
# apply temperature to set testing
pi_pcs.append(F.softmax(pol_vals[i] * beta, -1))
# calcuate decentralised policies
for i in range(n_agents):
dec_state = int(observations[i])
pi = F.softmax(thetas_dec[i][dec_state] * beta, -1)
pi_dec.append(pi)
return pi_pcs, pi_dec, ck_state
def get_state(common_knowledge, obs_0, obs_1, matrix_id):
receives_obs = [obs_0, obs_1]
if common_knowledge == 1:
observations = np.repeat(matrix_id, 2)
else:
observations = np.ones((n_agents)) * 2 #
for ag in range(n_agents):
if receives_obs[ag]:
observations[ag] += matrix_id + 1
return common_knowledge, observations, matrix_id
# Calculate the expected return: sum_{\tau} P(\tau | pi) R(\tau)
def expected_return(p_common, p_observation, thetas, run, test, p_ck_noise=0):
thetas_dec = thetas["dec"]
theta_joint = thetas["joint"]
# Probability of CK
p_common_val = [1 - p_common, p_common]
# Probability of observation given no CK)
p_obs_val = [1 - p_observation, p_observation]
# Matrices are chosen 50 / 50
p_matrix = [0.5, 0.5]
# p_matrix = [1.0, 0.0] # DEBUG!
# Initialise expected return
ret_val = 0
for ck in [0, 1]:
for matrix_id in [0, 1]:
for obs_0 in [0, 1]:
for obs_1 in [0, 1]:
p_state = p_common_val[ck] * p_obs_val[obs_0] * p_obs_val[obs_1] * p_matrix[matrix_id]
common_knowledge, observations, matrix_id = get_state(ck, obs_0, obs_1, matrix_id)
# Get final probabilities for joint actions
if p_ck_noise==0:
pi_pc, pi_dec = get_policies(common_knowledge, observations, run, test, thetas_dec, theta_joint)
p_joint_val = p_joint_all(pi_pc, pi_dec)
else:
pol_vals, pi_dec, ck_state = get_policies(common_knowledge, observations, run, test, thetas_dec, theta_joint, p_ck_noise)
p_joint_val = p_joint_all_noise_alt(pol_vals, pi_dec, p_ck_noise, ck_state)
# Expected return is just the elementwise product of rewards and action probabilities
expected_ret = (p_joint_val * payoff_values[matrix_id]).sum()
# Add return from given state
ret_val = ret_val + p_state * expected_ret
return ret_val
def _proc(args):
p_common, p_observation, run, p_ck_noise, t_max, n_trials = args
results = []
for nt in range(n_trials):
print("Run: {} P_CK_NOISE: {} P_common: {} #Trial: {}".format(run, p_ck_noise, p_common, nt))
results_log = np.zeros((t_max // (t_max // 100),))
results_log_test = np.zeros((t_max // (t_max // 100),))
thetas = {}
thetas["dec"] = [init.normal_(torch.zeros(n_states_dec, n_actions, requires_grad=True), std=std_val) for i in
range(n_agents)]
thetas["joint"] = init.normal_(torch.zeros(n_states_joint, n_actions ** 2 + 1, requires_grad=True),
std=std_val)
params = chain(*[_v if isinstance(_v, (list, tuple)) else [_v] for _v in thetas.values()])
params = list(params)
if use_cuda:
for param in params:
param = param.to("cuda")
if optim == 'sgd':
optimizer = SGD(params, lr=lr)
else:
optimizer = Adam(params, lr=lr)
for i in range(t_max):
if run in ['MACKRL',
'JAL',
'IAC']:
loss = - expected_return(p_common, p_observation, thetas, run, False, p_ck_noise)
r_s = -loss.data.numpy()
optimizer.zero_grad()
loss.backward()
optimizer.step()
if i % (t_max // 100) == 0:
if run in ['MACKRL',
'JAL',
'IAC']:
r_test = expected_return(p_common, p_observation, thetas, run, True, p_ck_noise)
results_log_test[i // (t_max // 100)] = r_test
results_log[i // (t_max // 100)] = r_s
results.append((results_log_test, results_log))
return results
def main():
use_mp = True
if use_mp:
pool = Pool(processes=2)
# Well be appending results to these lists
run_results = []
for run in labels:
noise_results = []
for pnoise in p_ck_noise:
print("Run: {} P_CK_NOISE: {}".format(run, pnoise))
results = pool.map(_proc, [ (pc, p_observation, run, pnoise, t_max, n_trials) for pc in p_vec ])
noise_results.append(results)
run_results.append(noise_results)
for p_common_id, p_common in enumerate(p_vec):
all_res = []
all_res_test = []
for run_id, run in enumerate(labels):
for pnoise_id, pnoise in enumerate(p_ck_noise):
try:
results = run_results[run_id][pnoise_id][p_common_id]
except Exception as e:
pass
all_res_test.append(np.stack([r[0] for r in results], axis=1))
all_res.append(np.stack([r[1] for r in results], axis=1))
final_res.append([all_res_test, all_res])
pool.close()
pool.join()
else:
# Well be appending results to these lists
run_results = []
for run in labels:
noise_results = []
for pnoise in p_ck_noise:
print("Run: {} P_CK_NOISE: {}".format(run, pnoise))
results = [_proc((pc, p_observation, run, pnoise, t_max, n_trials)) for pc in p_vec ]
noise_results.append(results)
run_results.append(noise_results)
for p_common_id, p_common in enumerate(p_vec):
all_res = []
all_res_test = []
for run_id, run in enumerate(labels):
for pnoise_id, pnoise in enumerate(p_ck_noise):
try:
results = run_results[run_id][pnoise_id][p_common_id]
except Exception as e:
pass
all_res_test.append(np.stack([r[0] for r in results], axis=1))
all_res.append(np.stack([r[1] for r in results], axis=1))
final_res.append([all_res_test, all_res])
import pickle
import uuid
import os
res_dict = {}
res_dict["final_res"] = final_res
res_dict["labels"] = labels
res_dict["p_ck_noise"] = p_ck_noise
res_dict["p_vec"] = p_vec
if not os.path.exists(os.path.join(os.path.dirname(os.path.abspath(__file__)),
"pickles")):
os.makedirs(os.path.join(os.path.dirname(os.path.abspath(__file__)),
"pickles"))
pickle.dump(res_dict, open(os.path.join(os.path.dirname(os.path.abspath(__file__)),
"pickles",
"final_res_{}.p".format(uuid.uuid4().hex[:4])), "wb"))
plt.figure(figsize=(5, 5))
color = ['b', 'r','g', 'c', 'm', 'y', 'k','b', 'r','g', 'c', 'm', 'y', 'k']
titles = ['Test', 'Train Performance']
for pl in [0,1]:
ax = plt.subplot(1, 1, 1)
for i in range(len(labels)):
for pck, pcknoise in enumerate(p_ck_noise):
mean_vals = []
min_vals = []
max_vals = []
for j, p in enumerate( p_vec ):
vals = final_res[j][pl]
this_mean = np.mean( vals[i*len(p_ck_noise) + pck], 1)[-1]
std = np.std(vals[i], 1)[-1]/0.5
low = this_mean-std / (n_trials)**0.5
high = this_mean + std / (n_trials)**0.5
mean_vals.append( this_mean )
min_vals.append( low )
max_vals.append( high )
plt.plot(p_vec,
mean_vals,
color[(i*len(p_ck_noise) + pck) % len(color)],
label = "{} p_ck_noise: {}".format(labels[i], pcknoise))
plt.fill_between(p_vec,
min_vals,
max_vals,
facecolor=color[i],
alpha=0.3)
plt.xlabel('P(common knowledge)')
plt.ylabel('Expected Return')
plt.ylim([0.0, 1.01])
plt.xlim([-0.01, 1.01])
ax.set_facecolor((1.0, 1.0, 1.0))
ax.grid(color='k', linestyle='-', linewidth=1)
ax.set_title(titles[pl])
plt.legend()
plt.xticks([0, 0.5, 1])
plt.yticks([0.5, 0.75, 1])
plt.savefig("MACKRL {}.pdf".format(titles[pl]))
plt.show(block=False)
if __name__ == "__main__":
freeze_support()
main() | [((12, 5, 12, 30), 'torch.multiprocessing.set_start_method', 'set_start_method', ({(12, 22, 12, 29): '"""spawn"""'}, {}), "('spawn')", False, 'from torch.multiprocessing import Pool, set_start_method, freeze_support\n'), ((75, 16, 75, 47), 'torch.ger', 'torch.ger', ({(75, 26, 75, 35): 'pi_dec[0]', (75, 37, 75, 46): 'pi_dec[1]'}, {}), '(pi_dec[0], pi_dec[1])', False, 'import torch\n'), ((385, 4, 385, 30), 'matplotlib.pyplot.figure', 'plt.figure', (), '', True, 'import matplotlib.pyplot as plt\n'), ((429, 4, 429, 20), 'torch.multiprocessing.freeze_support', 'freeze_support', ({}, {}), '()', False, 'from torch.multiprocessing import Pool, set_start_method, freeze_support\n'), ((25, 21, 31, 23), 'torch.tensor', 'torch.tensor', (), '', False, 'import torch\n'), ((33, 4, 39, 27), 'torch.tensor', 'torch.tensor', (), '', False, 'import torch\n'), ((184, 16, 184, 46), 'torch.nn.functional.softmax', 'F.softmax', ({(184, 26, 184, 41): 'pol_vals * beta', (184, 43, 184, 45): '-1'}, {}), '(pol_vals * beta, -1)', True, 'import torch.nn.functional as F\n'), ((218, 23, 218, 46), 'numpy.repeat', 'np.repeat', ({(218, 33, 218, 42): 'matrix_id', (218, 44, 218, 45): '2'}, {}), '(matrix_id, 2)', True, 'import numpy as np\n'), ((272, 22, 272, 58), 'numpy.zeros', 'np.zeros', ({(272, 31, 272, 57): '(t_max // (t_max // 100),)'}, {}), '((t_max // (t_max // 100),))', True, 'import numpy as np\n'), ((273, 27, 273, 63), 'numpy.zeros', 'np.zeros', ({(273, 36, 273, 62): '(t_max // (t_max // 100),)'}, {}), '((t_max // (t_max // 100),))', True, 'import numpy as np\n'), ((318, 15, 318, 32), 'torch.multiprocessing.Pool', 'Pool', (), '', False, 'from torch.multiprocessing import Pool, set_start_method, freeze_support\n'), ((390, 13, 390, 33), 'matplotlib.pyplot.subplot', 'plt.subplot', ({(390, 25, 390, 26): '1', (390, 28, 390, 29): '1', (390, 31, 390, 32): '1'}, {}), '(1, 1, 1)', True, 'import matplotlib.pyplot as plt\n'), ((415, 8, 415, 41), 'matplotlib.pyplot.xlabel', 'plt.xlabel', ({(415, 19, 415, 40): '"""P(common knowledge)"""'}, {}), "('P(common knowledge)')", True, 'import matplotlib.pyplot as plt\n'), ((416, 8, 416, 37), 'matplotlib.pyplot.ylabel', 'plt.ylabel', ({(416, 19, 416, 36): '"""Expected Return"""'}, {}), "('Expected Return')", True, 'import matplotlib.pyplot as plt\n'), ((417, 8, 417, 29), 'matplotlib.pyplot.ylim', 'plt.ylim', ({(417, 17, 417, 28): '[0.0, 1.01]'}, {}), '([0.0, 1.01])', True, 'import matplotlib.pyplot as plt\n'), ((418, 8, 418, 31), 'matplotlib.pyplot.xlim', 'plt.xlim', ({(418, 17, 418, 30): '[-0.01, 1.01]'}, {}), '([-0.01, 1.01])', True, 'import matplotlib.pyplot as plt\n'), ((422, 8, 422, 20), 'matplotlib.pyplot.legend', 'plt.legend', ({}, {}), '()', True, 'import matplotlib.pyplot as plt\n'), ((423, 8, 423, 31), 'matplotlib.pyplot.xticks', 'plt.xticks', ({(423, 19, 423, 30): '[0, 0.5, 1]'}, {}), '([0, 0.5, 1])', True, 'import matplotlib.pyplot as plt\n'), ((424, 8, 424, 34), 'matplotlib.pyplot.yticks', 'plt.yticks', ({(424, 19, 424, 33): '[0.5, 0.75, 1]'}, {}), '([0.5, 0.75, 1])', True, 'import matplotlib.pyplot as plt\n'), ((426, 8, 426, 29), 'matplotlib.pyplot.show', 'plt.show', (), '', True, 'import matplotlib.pyplot as plt\n'), ((189, 17, 189, 63), 'torch.nn.functional.softmax', 'F.softmax', ({(189, 27, 189, 58): 'thetas_dec[i][dec_state] * beta', (189, 60, 189, 62): '-1'}, {}), '(thetas_dec[i][dec_state] * beta, -1)', True, 'import torch.nn.functional as F\n'), ((209, 17, 209, 63), 'torch.nn.functional.softmax', 'F.softmax', ({(209, 27, 209, 58): 'thetas_dec[i][dec_state] * beta', (209, 60, 209, 62): '-1'}, {}), '(thetas_dec[i][dec_state] * beta, -1)', True, 'import torch.nn.functional as F\n'), ((220, 23, 220, 42), 'numpy.ones', 'np.ones', ({(220, 32, 220, 40): 'n_agents'}, {}), '(n_agents)', True, 'import numpy as np\n'), ((278, 39, 278, 106), 'torch.zeros', 'torch.zeros', (), '', False, 'import torch\n'), ((289, 24, 289, 42), 'torch.optim.SGD', 'SGD', (), '', False, 'from torch.optim import Adam, SGD\n'), ((291, 24, 291, 43), 'torch.optim.Adam', 'Adam', (), '', False, 'from torch.optim import Adam, SGD\n'), ((109, 23, 109, 40), 'torch.sum', 'torch.sum', ({(109, 33, 109, 39): 'p1[:i]'}, {}), '(p1[:i])', False, 'import torch\n'), ((111, 23, 111, 40), 'torch.sum', 'torch.sum', ({(111, 33, 111, 39): 'p2[:i]'}, {}), '(p2[:i])', False, 'import torch\n'), ((124, 42, 124, 73), 'torch.ger', 'torch.ger', ({(124, 52, 124, 61): 'pi_dec[0]', (124, 63, 124, 72): 'pi_dec[1]'}, {}), '(pi_dec[0], pi_dec[1])', False, 'import torch\n'), ((204, 26, 204, 59), 'torch.nn.functional.softmax', 'F.softmax', ({(204, 36, 204, 54): '(pol_vals[i] * beta)', (204, 56, 204, 58): '(-1)'}, {}), '(pol_vals[i] * beta, -1)', True, 'import torch.nn.functional as F\n'), ((276, 38, 276, 94), 'torch.zeros', 'torch.zeros', (), '', False, 'import torch\n'), ((409, 16, 413, 43), 'matplotlib.pyplot.fill_between', 'plt.fill_between', (), '', True, 'import matplotlib.pyplot as plt\n'), ((138, 54, 138, 90), 'torch.ger', 'torch.ger', ({(138, 64, 138, 78): 'p_marg_ag0_ck1', (138, 80, 138, 89): 'pi_dec[1]'}, {}), '(p_marg_ag0_ck1, pi_dec[1])', False, 'import torch\n'), ((378, 55, 378, 80), 'os.path.abspath', 'os.path.abspath', ({(378, 71, 378, 79): '__file__'}, {}), '(__file__)', False, 'import os\n'), ((380, 49, 380, 74), 'os.path.abspath', 'os.path.abspath', ({(380, 65, 380, 73): '__file__'}, {}), '(__file__)', False, 'import os\n'), ((382, 60, 382, 85), 'os.path.abspath', 'os.path.abspath', ({(382, 76, 382, 84): '__file__'}, {}), '(__file__)', False, 'import os\n'), ((114, 33, 114, 56), 'torch.min', 'torch.min', ({(114, 43, 114, 48): 'high1', (114, 50, 114, 55): 'high2'}, {}), '(high1, high2)', False, 'import torch\n'), ((137, 54, 137, 90), 'torch.ger', 'torch.ger', ({(137, 64, 137, 73): 'pi_dec[0]', (137, 75, 137, 89): 'p_marg_ag1_ck1'}, {}), '(pi_dec[0], p_marg_ag1_ck1)', False, 'import torch\n'), ((152, 54, 152, 90), 'torch.ger', 'torch.ger', ({(152, 64, 152, 78): 'p_marg_ag0_ck2', (152, 80, 152, 89): 'pi_dec[1]'}, {}), '(p_marg_ag0_ck2, pi_dec[1])', False, 'import torch\n'), ((339, 40, 339, 81), 'numpy.stack', 'np.stack', (), '', True, 'import numpy as np\n'), ((340, 35, 340, 76), 'numpy.stack', 'np.stack', (), '', True, 'import numpy as np\n'), ((366, 40, 366, 81), 'numpy.stack', 'np.stack', (), '', True, 'import numpy as np\n'), ((367, 35, 367, 76), 'numpy.stack', 'np.stack', (), '', True, 'import numpy as np\n'), ((117, 33, 117, 56), 'torch.min', 'torch.min', ({(117, 43, 117, 48): 'high1', (117, 50, 117, 55): 'high2'}, {}), '(high1, high2)', False, 'import torch\n'), ((136, 54, 136, 90), 'torch.ger', 'torch.ger', ({(136, 64, 136, 78): 'p_marg_ag0_ck2', (136, 80, 136, 89): 'pi_dec[1]'}, {}), '(p_marg_ag0_ck2, pi_dec[1])', False, 'import torch\n'), ((151, 54, 151, 90), 'torch.ger', 'torch.ger', ({(151, 64, 151, 73): 'pi_dec[0]', (151, 75, 151, 89): 'p_marg_ag1_ck2'}, {}), '(pi_dec[0], p_marg_ag1_ck2)', False, 'import torch\n'), ((384, 69, 384, 81), 'uuid.uuid4', 'uuid.uuid4', ({}, {}), '()', False, 'import uuid\n'), ((399, 26, 399, 44), 'numpy.std', 'np.std', ({(399, 33, 399, 40): 'vals[i]', (399, 42, 399, 43): '(1)'}, {}), '(vals[i], 1)', True, 'import numpy as np\n'), ((135, 54, 135, 90), 'torch.ger', 'torch.ger', ({(135, 64, 135, 73): 'pi_dec[0]', (135, 75, 135, 89): 'p_marg_ag1_ck2'}, {}), '(pi_dec[0], p_marg_ag1_ck2)', False, 'import torch\n'), ((150, 54, 150, 90), 'torch.ger', 'torch.ger', ({(150, 64, 150, 78): 'p_marg_ag0_ck1', (150, 80, 150, 89): 'pi_dec[1]'}, {}), '(p_marg_ag0_ck1, pi_dec[1])', False, 'import torch\n'), ((134, 41, 134, 72), 'torch.ger', 'torch.ger', ({(134, 51, 134, 60): 'pi_dec[0]', (134, 62, 134, 71): 'pi_dec[1]'}, {}), '(pi_dec[0], pi_dec[1])', False, 'import torch\n'), ((149, 54, 149, 90), 'torch.ger', 'torch.ger', ({(149, 64, 149, 73): 'pi_dec[0]', (149, 75, 149, 89): 'p_marg_ag1_ck1'}, {}), '(pi_dec[0], p_marg_ag1_ck1)', False, 'import torch\n'), ((148, 41, 148, 72), 'torch.ger', 'torch.ger', ({(148, 51, 148, 60): 'pi_dec[0]', (148, 62, 148, 71): 'pi_dec[1]'}, {}), '(pi_dec[0], pi_dec[1])', False, 'import torch\n')] |
wilsonsuen/av-testing | testcases/school_bus.py | a6967b4cb4e4ad6b10d041ffd3dc62188fccad81 | import sys
import os
import glob
import json
from robot import rebot
from robot.api import TestSuite
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
if __name__ == "__main__":
main_suite = TestSuite('School Bus Scenario')
main_suite.resource.imports.library('lib/simulation.py')
testcase_paths = glob.glob('data/testdata/04_school_bus/*.json')
testcase_paths.sort()
for testcase_path in testcase_paths[110:113]:
with open(testcase_path) as f:
testdata = json.load(f)
tags = list(testdata['testcase']['context'].values()) +\
list(testdata['testcase']['input'].values())
school_bus_test = main_suite.tests.create(testdata['testcase']['name'], tags=tags)
school_bus_test.setup.config(name='Setup Scenario', args=[testcase_path])
school_bus_test.body.create_keyword('Start Simulation')
school_bus_test.body.create_keyword('Validate Result')
school_bus_test.teardown.config(name='Test Case Teardown')
main_suite.run(output='results/04_school_bus/output.xml')
rebot('results/04_school_bus/output.xml',
log="results/04_school_bus/log.html",
report="results/04_school_bus/report.html")
"""
rebot --tagstatcombine "8:00AMANDSunny:8AM and Sunny(C1)" --tagstatcombine "8:00AMANDCloudy:8AM and Cloudy(C2)" --tagstatcombine "8:00AMANDRainning:8AM and Rainning(C3)" --tagstatcombine "8:00AMANDFoggy:8AM and Foggy(C4)" --tagstatcombine "12:00PMANDSunny:12PM and Sunny(C5)" --tagstatcombine "12:00PMANDCloudy:12PM and Cloudy(C6)" --tagstatcombine "12:00PMANDRainning:12PM and Rainning(C7)" --tagstatcombine "12:00PMANDFoggy:12PM and Foggy(C8)" --tagstatcombine "3:00PMANDSunny:3PM and Sunny(C9)" --tagstatcombine "3:00PMANDCloudy:3PM and Cloudy(C10)" --tagstatcombine "3:00PMANDRainning:3PM and Rainning(C11)" --tagstatcombine "3:00PMANDFoggy:3PM and Foggy(C12)" --tagstatcombine "5:00PMANDSunny:5PM and Sunny(C13)" --tagstatcombine "5:00PMANDCloudy:5PM and Cloudy(C14)" --tagstatcombine "5:00PMANDRainning:5PM and Ranining(C15)" --tagstatcombine "5:00PMANDFoggy:5PM and Foggy(C16)" --tagstatcombine "7:00PMANDSunny:7PM and Sunny(C17)" --tagstatcombine "7:00PMANDCloudy:7PM and Cloudy(C18)" --tagstatcombine "7:00PMANDRainning:7PM and Rainning(C19)" --tagstatcombine "7:00PMANDFoggy:7PM and Foggy(C20)" --tagstatcombine MovingANDBackward_lane:Moving\ and\ Backward\ lane\(I12\) --tagstatcombine MovingANDForward_lane:Moving\ and\ Forward\ lane\(I9\) --tagstatcombine LoadingANDBackward_lane:Loading\ and\ Backward\ lane\(I6\) --tagstatcombine LoadingANDForward_lane:Loading\ and\ Forward\ lane\(I3\) --tagstatcombine StopANDBackward_lane:Stop\ and\ Backward\ lane\(I18\) --tagstatcombine StopANDForward_lane:Stop\ and\ Forward\ lane\(I15\) --tagstatexclude Forward_lane --tagstatexclude Backward_lane --tagstatexclude Moving --tagstatexclude Loading --tagstatexclude Stop --tagstatexclude 8\:00AM --tagstatexclude 12\:00PM --tagstatexclude 3\:00PM --tagstatexclude 5\:00PM --tagstatexclude 7\:00PM --tagstatexclude Sunny --tagstatexclude Foggy --tagstatexclude Rainning --tagstatexclude Cloudy -r combined_report.html -l combined_log.html output.xml
""" | [((11, 17, 11, 49), 'robot.api.TestSuite', 'TestSuite', ({(11, 27, 11, 48): '"""School Bus Scenario"""'}, {}), "('School Bus Scenario')", False, 'from robot.api import TestSuite\n'), ((14, 21, 14, 68), 'glob.glob', 'glob.glob', ({(14, 31, 14, 67): '"""data/testdata/04_school_bus/*.json"""'}, {}), "('data/testdata/04_school_bus/*.json')", False, 'import glob\n'), ((29, 4, 31, 53), 'robot.rebot', 'rebot', (), '', False, 'from robot import rebot\n'), ((7, 29, 7, 54), 'os.path.dirname', 'os.path.dirname', ({(7, 45, 7, 53): '__file__'}, {}), '(__file__)', False, 'import os\n'), ((19, 23, 19, 35), 'json.load', 'json.load', ({(19, 33, 19, 34): 'f'}, {}), '(f)', False, 'import json\n')] |
adrn/astropy-tools | pr_consistency/2.find_pr_branches.py | c26a5e4cdf8735976375dd2b77de797a7723bcd9 | # The purpose of this script is to check all the maintenance branches of the
# given repository, and find which pull requests are included in which
# branches. The output is a JSON file that contains for each pull request the
# list of all branches in which it is included. We look specifically for the
# message "Merge pull request #xxxx " in commit messages, so this is not
# completely foolproof, but seems to work for now.
import os
import sys
import json
import re
import subprocess
import tempfile
from collections import defaultdict
from astropy.utils.console import color_print
from common import get_branches
if sys.argv[1:]:
REPOSITORY_NAME = sys.argv[1]
else:
REPOSITORY_NAME = 'astropy/astropy'
print("The repository this script currently works with is '{}'.\n"
.format(REPOSITORY_NAME))
REPOSITORY = f'git://github.com/{REPOSITORY_NAME}.git'
NAME = os.path.basename(REPOSITORY_NAME)
DIRTOCLONEIN = tempfile.mkdtemp() # set this to a non-temp directory to retain the clone between runs
ORIGIN = 'origin' # set this to None to not fetch anything but rather use the directory as-is.
STARTDIR = os.path.abspath('.')
# The branches we are interested in
BRANCHES = get_branches(REPOSITORY_NAME)
# Read in a list of all the PRs
with open(f'merged_pull_requests_{NAME}.json') as merged:
merged_prs = json.load(merged)
# Set up a dictionary where each key will be a PR and each value will be a list
# of branches in which the PR is present
pr_branches = defaultdict(list)
try:
# Set up repository
color_print(f'Cloning {REPOSITORY}', 'green')
os.chdir(DIRTOCLONEIN)
if os.path.isdir(NAME):
# already exists... assume its the right thing
color_print('"{}" directory already exists - assuming it is an already '
'existing clone'.format(NAME), 'yellow')
os.chdir(NAME)
if ORIGIN:
subprocess.call(f'git fetch {ORIGIN}', shell=True)
else:
subprocess.call(f'git clone {REPOSITORY}', shell=True)
os.chdir(NAME)
# Loop over branches and find all PRs in the branch
for branch in BRANCHES:
# Change branch
color_print(f'Switching to branch {branch}', 'green')
subprocess.call('git reset --hard', shell=True)
subprocess.call('git clean -fxd', shell=True)
subprocess.call(f'git checkout {branch}', shell=True)
if ORIGIN:
subprocess.call(f'git reset --hard {ORIGIN}/{branch}', shell=True)
# Extract log:
log = subprocess.check_output('git log', shell=True).decode('utf-8')
# Check for the presence of the PR in the log
for pr in (re.findall(r'Merge pull request #(\d+) ', log) +
re.findall(r'Backport PR #(\d+):', log)):
pr_branches[pr].append(branch)
finally:
os.chdir(STARTDIR)
with open(f'pull_requests_branches_{NAME}.json', 'w') as f:
json.dump(pr_branches, f, sort_keys=True, indent=2)
| [((30, 7, 30, 40), 'os.path.basename', 'os.path.basename', ({(30, 24, 30, 39): 'REPOSITORY_NAME'}, {}), '(REPOSITORY_NAME)', False, 'import os\n'), ((32, 15, 32, 33), 'tempfile.mkdtemp', 'tempfile.mkdtemp', ({}, {}), '()', False, 'import tempfile\n'), ((35, 11, 35, 31), 'os.path.abspath', 'os.path.abspath', ({(35, 27, 35, 30): '"""."""'}, {}), "('.')", False, 'import os\n'), ((38, 11, 38, 40), 'common.get_branches', 'get_branches', ({(38, 24, 38, 39): 'REPOSITORY_NAME'}, {}), '(REPOSITORY_NAME)', False, 'from common import get_branches\n'), ((46, 14, 46, 31), 'collections.defaultdict', 'defaultdict', ({(46, 26, 46, 30): 'list'}, {}), '(list)', False, 'from collections import defaultdict\n'), ((42, 17, 42, 34), 'json.load', 'json.load', ({(42, 27, 42, 33): 'merged'}, {}), '(merged)', False, 'import json\n'), ((51, 4, 51, 49), 'astropy.utils.console.color_print', 'color_print', ({(51, 16, 51, 39): 'f"""Cloning {REPOSITORY}"""', (51, 41, 51, 48): '"""green"""'}, {}), "(f'Cloning {REPOSITORY}', 'green')", False, 'from astropy.utils.console import color_print\n'), ((52, 4, 52, 26), 'os.chdir', 'os.chdir', ({(52, 13, 52, 25): 'DIRTOCLONEIN'}, {}), '(DIRTOCLONEIN)', False, 'import os\n'), ((53, 7, 53, 26), 'os.path.isdir', 'os.path.isdir', ({(53, 21, 53, 25): 'NAME'}, {}), '(NAME)', False, 'import os\n'), ((84, 4, 84, 22), 'os.chdir', 'os.chdir', ({(84, 13, 84, 21): 'STARTDIR'}, {}), '(STARTDIR)', False, 'import os\n'), ((87, 4, 87, 55), 'json.dump', 'json.dump', (), '', False, 'import json\n'), ((57, 8, 57, 22), 'os.chdir', 'os.chdir', ({(57, 17, 57, 21): 'NAME'}, {}), '(NAME)', False, 'import os\n'), ((61, 8, 61, 62), 'subprocess.call', 'subprocess.call', (), '', False, 'import subprocess\n'), ((62, 8, 62, 22), 'os.chdir', 'os.chdir', ({(62, 17, 62, 21): 'NAME'}, {}), '(NAME)', False, 'import os\n'), ((68, 8, 68, 61), 'astropy.utils.console.color_print', 'color_print', ({(68, 20, 68, 51): 'f"""Switching to branch {branch}"""', (68, 53, 68, 60): '"""green"""'}, {}), "(f'Switching to branch {branch}', 'green')", False, 'from astropy.utils.console import color_print\n'), ((69, 8, 69, 55), 'subprocess.call', 'subprocess.call', (), '', False, 'import subprocess\n'), ((70, 8, 70, 53), 'subprocess.call', 'subprocess.call', (), '', False, 'import subprocess\n'), ((71, 8, 71, 61), 'subprocess.call', 'subprocess.call', (), '', False, 'import subprocess\n'), ((59, 12, 59, 62), 'subprocess.call', 'subprocess.call', (), '', False, 'import subprocess\n'), ((73, 12, 73, 78), 'subprocess.call', 'subprocess.call', (), '', False, 'import subprocess\n'), ((79, 19, 79, 65), 're.findall', 're.findall', ({(79, 30, 79, 59): '"""Merge pull request #(\\\\d+) """', (79, 61, 79, 64): 'log'}, {}), "('Merge pull request #(\\\\d+) ', log)", False, 'import re\n'), ((80, 19, 80, 58), 're.findall', 're.findall', ({(80, 30, 80, 52): '"""Backport PR #(\\\\d+):"""', (80, 54, 80, 57): 'log'}, {}), "('Backport PR #(\\\\d+):', log)", False, 'import re\n'), ((76, 14, 76, 60), 'subprocess.check_output', 'subprocess.check_output', (), '', False, 'import subprocess\n')] |
pedMatias/matias_hfo | agents/solo_q_agents/q_agent_test/aux.py | 6d88e1043a1455f5c1f6cc11b9380869772f4176 | from datetime import datetime as dt
import os
import numpy as np
import settings
def mkdir():
now = dt.now().replace(second=0, microsecond=0)
name_dir = "q_agent_train_" + now.strftime("%Y-%m-%d_%H:%M:%S")
path = os.path.join(settings.MODELS_DIR, name_dir)
try:
os.mkdir(path)
except FileExistsError:
name_dir += "_2"
path = os.path.join(settings.MODELS_DIR, name_dir)
os.mkdir(path)
return path
def save_model(q_table: str, directory: str, file_name: str):
file_path = os.path.join(directory, file_name)
np.save(file_path, q_table)
| [((12, 11, 12, 54), 'os.path.join', 'os.path.join', ({(12, 24, 12, 43): 'settings.MODELS_DIR', (12, 45, 12, 53): 'name_dir'}, {}), '(settings.MODELS_DIR, name_dir)', False, 'import os\n'), ((23, 16, 23, 50), 'os.path.join', 'os.path.join', ({(23, 29, 23, 38): 'directory', (23, 40, 23, 49): 'file_name'}, {}), '(directory, file_name)', False, 'import os\n'), ((24, 4, 24, 31), 'numpy.save', 'np.save', ({(24, 12, 24, 21): 'file_path', (24, 23, 24, 30): 'q_table'}, {}), '(file_path, q_table)', True, 'import numpy as np\n'), ((14, 8, 14, 22), 'os.mkdir', 'os.mkdir', ({(14, 17, 14, 21): 'path'}, {}), '(path)', False, 'import os\n'), ((10, 10, 10, 18), 'datetime.datetime.now', 'dt.now', ({}, {}), '()', True, 'from datetime import datetime as dt\n'), ((17, 15, 17, 58), 'os.path.join', 'os.path.join', ({(17, 28, 17, 47): 'settings.MODELS_DIR', (17, 49, 17, 57): 'name_dir'}, {}), '(settings.MODELS_DIR, name_dir)', False, 'import os\n'), ((18, 8, 18, 22), 'os.mkdir', 'os.mkdir', ({(18, 17, 18, 21): 'path'}, {}), '(path)', False, 'import os\n')] |
AXFS-H/Windows10Debloater | Python38/Lib/site-packages/PyInstaller/hooks/hook-PyQt4.py | ab5f8a8a8fb065bb40b7ddbd1df75563d8b8d13e | #-----------------------------------------------------------------------------
# Copyright (c) 2013-2020, PyInstaller Development Team.
#
# Distributed under the terms of the GNU General Public License (version 2
# or later) with exception for distributing the bootloader.
#
# The full license is in the file COPYING.txt, distributed with this software.
#
# SPDX-License-Identifier: (GPL-2.0-or-later WITH Bootloader-exception)
#-----------------------------------------------------------------------------
import os
from PyInstaller.utils.hooks import qt_menu_nib_dir
from PyInstaller.compat import getsitepackages, is_darwin, is_win
# On Windows system PATH has to be extended to point to the PyQt4 directory.
# The PySide directory contains Qt dlls. We need to avoid including different
# version of Qt libraries when there is installed another application (e.g. QtCreator)
if is_win:
from PyInstaller.utils.win32.winutils import extend_system_path
extend_system_path([os.path.join(x, 'PyQt4') for x in getsitepackages()])
hiddenimports = ['sip']
# For Qt to work on Mac OS X it is necessary to include directory qt_menu.nib.
# This directory contains some resource files necessary to run PyQt or PySide
# app.
if is_darwin:
datas = [
(qt_menu_nib_dir('PyQt4'), 'qt_menu.nib'),
]
| [((24, 24, 24, 48), 'os.path.join', 'os.path.join', ({(24, 37, 24, 38): 'x', (24, 40, 24, 47): '"""PyQt4"""'}, {}), "(x, 'PyQt4')", False, 'import os\n'), ((35, 9, 35, 33), 'PyInstaller.utils.hooks.qt_menu_nib_dir', 'qt_menu_nib_dir', ({(35, 25, 35, 32): '"""PyQt4"""'}, {}), "('PyQt4')", False, 'from PyInstaller.utils.hooks import qt_menu_nib_dir\n'), ((24, 58, 24, 75), 'PyInstaller.compat.getsitepackages', 'getsitepackages', ({}, {}), '()', False, 'from PyInstaller.compat import getsitepackages, is_darwin, is_win\n')] |
ig248/timeserio | timeserio/utils/functools.py | afc2a953a83e763418d417059493ef13a17d349c | import inspect
def get_default_args(func):
"""Get default arguments of a function.
"""
signature = inspect.signature(func)
return {
k: v.default
for k, v in signature.parameters.items()
if v.default is not inspect.Parameter.empty
}
| [((7, 16, 7, 39), 'inspect.signature', 'inspect.signature', ({(7, 34, 7, 38): 'func'}, {}), '(func)', False, 'import inspect\n')] |
PauloAlexSilva/Python | Sec_10_expr_lambdas_fun_integradas/a_lambdas.py | 690913cdcfd8bde52d9ddd15e3c838e6aef27730 | """
Utilizando Lambdas
Conhecidas por Expressões Lambdas, ou simplesmente Lambdas, são funções sem nome, ou seja,
funções anónimas.
# Função em Python
def funcao(x):
return 3 * x + 1
print(funcao(4))
print(funcao(7))
# Expressão Lambda
lambda x: 3 * x + 1
# Como utlizar a expressão lambda?
calc = lambda x: 3 * x + 1
print(calc(4))
print(calc(7))
# Podemos ter expressões lambdas com múltiplas entradas
nome_compelto = lambda nome, sobrenome: nome.strip().title() + ' ' + sobrenome.strip().title()
print(nome_compelto(' paulo', ' SILVA '))
print(nome_compelto(' MARIA ', ' albertina '))
# Em funções Python podemos ter nenhuma ou várias entradas. Em Lambdas também
hello = lambda: 'Hello World!'
uma = lambda x: 3 * x + 1
duas = lambda x, y: (x * y) ** 0.5
tres = lambda x, y, z: 3 / (1 / x + 1 / 7 + 1 / z)
# n = lambda x1, x2, ..., xn: <expressão>
print(hello())
print(uma(6))
print(duas(5, 7))
print(tres(3, 6, 9))
# OBS: Se passarmos mais argumentos do que parâmetros esperados teremos TypeError
# Exemplo
autores = ['Paulo Silva', 'Maria Albertina', 'Luis Marques Nunes', 'Carlos Nunes',
'Ana S. Leitão', 'Inês Garcia', 'Claudia Sofia', 'I. L. Antunes',
'Américo Silva']
print(autores)
# ['Paulo Silva', 'Maria Albertina', 'Luis Marques Nunes', 'Carlos Nunes',
# 'Ana S. Leitão', 'Inês Garcia', 'Claudia Sofia', 'I. L. Antunes', 'Américo Silva']
# Ordenar pelo sobrenome
autores.sort(key=lambda sobrenome: sobrenome.split(' ')[-1].lower())
print(autores)
# ['Maria Albertina', 'I. L. Antunes', 'Inês Garcia', 'Ana S. Leitão',
# 'Luis Marques Nunes', 'Carlos Nunes', 'Paulo Silva', 'Américo Silva', 'Claudia Sofia']
"""
# Função Quadrática
# f(x) = a * x ** 2 + b * x + c
# Definindo a função
def geradora_funcao_quadratica(a, b, c):
"""
Retorna a função f(x) = a * x ** 2 + b * x + c
"""
return lambda x: a * x ** 2 + b * x + c
teste = geradora_funcao_quadratica(2, 3, -5)
print(teste(0))
print(teste(1))
print(teste(2))
print(geradora_funcao_quadratica(3, 0, 1)(2))
| [] |
EduotavioFonseca/ProgramasPython | ex085.py | 8e0ef5f6f4239d1fe52321f8795b6573f6ff5130 | # Lista dentro de dicionário
campeonato = dict()
gol = []
aux = 0
campeonato['Jogador'] = str(input('Digite o nome do jogador: '))
print()
partidas = int(input('Quantas partidas ele jogou? '))
print()
for i in range(0, partidas):
aux = int(input(f'Quantos gols na partida {i + 1}? '))
gol.append(aux)
print()
campeonato['Gols'] = gol[:]
campeonato['Total'] = sum(gol)
print('=' * 55)
print()
print(campeonato)
print()
print('=' * 55)
print()
for k, v in campeonato.items():
print(f'O campo {k} tem o valor: {v}')
print()
print('=' * 55)
print(f'O jogador {campeonato["Jogador"]} jogou {partidas} partidas.')
print()
for i in range(0, partidas):
print(f'Na partida {i + 1} ele fez {gol[i]} gol(s).')
print()
print(f'No total ele fez {campeonato["Total"]} gols.')
print('=' * 55)
| [] |
kjetil-lye/ismo_heat | heat/initial_data.py | 09776b740a0543e270417af653d2a047c94f1b50 | import numpy
class InitialDataControlSine:
def __init__(self, coefficients):
self.coefficients = coefficients
def __call__(self, x):
u = numpy.zeros_like(x)
for k, coefficient in enumerate(self.coefficients):
u += coefficient * numpy.sin(k * numpy.pi * x)
return u
def exact_solution(self, x, t, q=1):
return sum(coefficient * numpy.exp(-q * (k * numpy.pi) ** 2 * t) * numpy.sin(
k * numpy.pi * x) for k, coefficient in enumerate(self.coefficients))
| [((9, 12, 9, 31), 'numpy.zeros_like', 'numpy.zeros_like', ({(9, 29, 9, 30): 'x'}, {}), '(x)', False, 'import numpy\n'), ((12, 32, 12, 59), 'numpy.sin', 'numpy.sin', ({(12, 42, 12, 58): '(k * numpy.pi * x)'}, {}), '(k * numpy.pi * x)', False, 'import numpy\n'), ((17, 75, 18, 29), 'numpy.sin', 'numpy.sin', ({(18, 12, 18, 28): '(k * numpy.pi * x)'}, {}), '(k * numpy.pi * x)', False, 'import numpy\n'), ((17, 33, 17, 72), 'numpy.exp', 'numpy.exp', ({(17, 43, 17, 71): '(-q * (k * numpy.pi) ** 2 * t)'}, {}), '(-q * (k * numpy.pi) ** 2 * t)', False, 'import numpy\n')] |
john18/uccross.github.io | explore/scripts/get_repos_creationhistory.py | 72cd88c7310ab1503467fba27add2338cf57d8f7 | import helpers
import json
import re
datfilepath = "../github-data/labRepos_CreationHistory.json"
allData = {}
# Check for and read existing data file
allData = helpers.read_existing(datfilepath)
# Read repo info data file (to use as repo list)
dataObj = helpers.read_json("../github-data/labReposInfo.json")
# Populate repo list
repolist = []
print("Getting internal repos ...")
repolist = sorted(dataObj["data"].keys())
print("Repo list complete. Found %d repos." % (len(repolist)))
# Read pretty GraphQL query
query_in = helpers.read_gql("../queries/repo-CreationDate.gql")
# Rest endpoint query
query_commits_in = "/repos/OWNNAME/REPONAME/commits?until=CREATETIME&per_page=100"
query_commits_in2 = "/repos/OWNNAME/REPONAME/commits?per_page=100"
# Retrieve authorization token
authhead = helpers.get_gitauth()
# Iterate through internal repos
print("Gathering data across multiple paginated queries...")
collective = {u'data': {}}
tab = " "
for repo in repolist:
# History doesn't change, only update new repos or those that had no previous commits
if "data" in allData.keys() and repo in allData["data"].keys():
if allData["data"][repo]["firstCommitAt"]:
print(tab + "Already recorded data for '%s'" % (repo))
continue
pageNum = 1
print("\n'%s'" % (repo))
print(tab + "page %d" % (pageNum))
repoSplit = repo.split("/")
# Query 1
print(tab + "Get creation date and default branch")
print(tab + "Modifying query...")
newquery = re.sub('OWNNAME', repoSplit[0], query_in)
newquery = re.sub('REPONAME', repoSplit[1], newquery)
gitquery = json.dumps({'query': newquery})
print(tab + "Query ready!")
# Actual query exchange
outObj = helpers.query_github(authhead, gitquery)
if outObj["errors"]:
print(tab + "Could not complete '%s'" % (repo))
collective["data"].pop(repo, None)
continue
# Update collective data
collective["data"][repo] = outObj["data"]["repository"]
# Query 2
print(tab + "Get pre-GitHub commit timestamps")
print(tab + "Modifying query...")
gitquery = re.sub('OWNNAME', repoSplit[0], query_commits_in)
gitquery = re.sub('REPONAME', repoSplit[1], gitquery)
gitquery = re.sub('CREATETIME', collective["data"][repo]["createdAt"], gitquery)
print(tab + "Query ready!")
# Actual query exchange
outObj = helpers.query_githubrest(authhead, gitquery)
if outObj["errors"]:
print(tab + "Could not get pre-GitHub commits for '%s'" % (repo))
outObj["data"] = []
# Update collective data
collective["data"][repo]["commitTimestamps"] = []
for commit in outObj["data"]:
collective["data"][repo]["commitTimestamps"].append(commit["commit"]["committer"]["date"])
# If no pre-GitHub commits, check the greater commit history
if len(collective["data"][repo]["commitTimestamps"]) > 0 and collective["data"][repo]["commitTimestamps"][0]:
collective["data"][repo]["initBeforeGitHubRepo"] = True
else:
print(tab + "No pre-GitHub commits found, getting full history")
collective["data"][repo]["initBeforeGitHubRepo"] = False
# Query 3
print(tab + "Modifying query...")
gitquery = re.sub('OWNNAME', repoSplit[0], query_commits_in2)
gitquery = re.sub('REPONAME', repoSplit[1], gitquery)
print(tab + "Query ready!")
# Actual query exchange
outObj = helpers.query_githubrest(authhead, gitquery)
if outObj["errors"]:
print(tab + "Could not complete '%s'" % (repo))
collective["data"].pop(repo, None)
continue
# Update collective data
for commit in outObj["data"]:
collective["data"][repo]["commitTimestamps"].append(commit["commit"]["committer"]["date"])
# Paginate if needed
hasNext = ("next" in outObj)
while hasNext:
pageNum += 1
print(tab + "page %d" % (pageNum))
print(tab + "Modifying query...")
newquery = gitquery + "&page=" + str(pageNum)
print(tab + "Query ready!")
# Actual query exchange
outObj = helpers.query_githubrest(authhead, newquery)
if outObj["errors"]:
print(tab + "Could not complete '%s'" % (repo))
collective["data"].pop(repo, None)
continue
# Update collective data
for commit in outObj["data"]:
collective["data"][repo]["commitTimestamps"].append(commit["commit"]["committer"]["date"])
hasNext = ("next" in outObj)
# Sort dates
collective["data"][repo]["commitTimestamps"].sort()
# Save earliest commit date
firstdate = None
if len(collective["data"][repo]["commitTimestamps"]) > 0:
firstdate = collective["data"][repo]["commitTimestamps"][0]
collective["data"][repo]["firstCommitAt"] = firstdate
del collective["data"][repo]["commitTimestamps"]
print("'%s' Done!" % (repo))
print("\nCollective data gathering complete!")
# Combine new data with existing data
if "data" not in allData.keys():
allData["data"] = {}
for repo in collective["data"].keys():
allData["data"][repo] = collective["data"][repo]
allDataString = json.dumps(allData, indent=4, sort_keys=True)
# Write output file
print("\nWriting file '%s'" % (datfilepath))
with open(datfilepath, "w") as fileout:
fileout.write(allDataString)
print("Wrote file!")
print("\nDone!\n")
| [((9, 10, 9, 44), 'helpers.read_existing', 'helpers.read_existing', ({(9, 32, 9, 43): 'datfilepath'}, {}), '(datfilepath)', False, 'import helpers\n'), ((12, 10, 12, 63), 'helpers.read_json', 'helpers.read_json', ({(12, 28, 12, 62): '"""../github-data/labReposInfo.json"""'}, {}), "('../github-data/labReposInfo.json')", False, 'import helpers\n'), ((21, 11, 21, 63), 'helpers.read_gql', 'helpers.read_gql', ({(21, 28, 21, 62): '"""../queries/repo-CreationDate.gql"""'}, {}), "('../queries/repo-CreationDate.gql')", False, 'import helpers\n'), ((28, 11, 28, 32), 'helpers.get_gitauth', 'helpers.get_gitauth', ({}, {}), '()', False, 'import helpers\n'), ((151, 16, 151, 61), 'json.dumps', 'json.dumps', (), '', False, 'import json\n'), ((52, 12, 52, 53), 're.sub', 're.sub', ({(52, 19, 52, 28): '"""OWNNAME"""', (52, 30, 52, 42): 'repoSplit[0]', (52, 44, 52, 52): 'query_in'}, {}), "('OWNNAME', repoSplit[0], query_in)", False, 'import re\n'), ((53, 12, 53, 54), 're.sub', 're.sub', ({(53, 19, 53, 29): '"""REPONAME"""', (53, 31, 53, 43): 'repoSplit[1]', (53, 45, 53, 53): 'newquery'}, {}), "('REPONAME', repoSplit[1], newquery)", False, 'import re\n'), ((54, 12, 54, 43), 'json.dumps', 'json.dumps', ({(54, 23, 54, 42): "{'query': newquery}"}, {}), "({'query': newquery})", False, 'import json\n'), ((58, 10, 58, 50), 'helpers.query_github', 'helpers.query_github', ({(58, 31, 58, 39): 'authhead', (58, 41, 58, 49): 'gitquery'}, {}), '(authhead, gitquery)', False, 'import helpers\n'), ((70, 12, 70, 61), 're.sub', 're.sub', ({(70, 19, 70, 28): '"""OWNNAME"""', (70, 30, 70, 42): 'repoSplit[0]', (70, 44, 70, 60): 'query_commits_in'}, {}), "('OWNNAME', repoSplit[0], query_commits_in)", False, 'import re\n'), ((71, 12, 71, 54), 're.sub', 're.sub', ({(71, 19, 71, 29): '"""REPONAME"""', (71, 31, 71, 43): 'repoSplit[1]', (71, 45, 71, 53): 'gitquery'}, {}), "('REPONAME', repoSplit[1], gitquery)", False, 'import re\n'), ((72, 12, 72, 81), 're.sub', 're.sub', ({(72, 19, 72, 31): '"""CREATETIME"""', (72, 33, 72, 70): "collective['data'][repo]['createdAt']", (72, 72, 72, 80): 'gitquery'}, {}), "('CREATETIME', collective['data'][repo]['createdAt'], gitquery)", False, 'import re\n'), ((76, 10, 76, 54), 'helpers.query_githubrest', 'helpers.query_githubrest', ({(76, 35, 76, 43): 'authhead', (76, 45, 76, 53): 'gitquery'}, {}), '(authhead, gitquery)', False, 'import helpers\n'), ((95, 13, 95, 63), 're.sub', 're.sub', ({(95, 20, 95, 29): '"""OWNNAME"""', (95, 31, 95, 43): 'repoSplit[0]', (95, 45, 95, 62): 'query_commits_in2'}, {}), "('OWNNAME', repoSplit[0], query_commits_in2)", False, 'import re\n'), ((96, 13, 96, 55), 're.sub', 're.sub', ({(96, 20, 96, 30): '"""REPONAME"""', (96, 32, 96, 44): 'repoSplit[1]', (96, 46, 96, 54): 'gitquery'}, {}), "('REPONAME', repoSplit[1], gitquery)", False, 'import re\n'), ((100, 11, 100, 55), 'helpers.query_githubrest', 'helpers.query_githubrest', ({(100, 36, 100, 44): 'authhead', (100, 46, 100, 54): 'gitquery'}, {}), '(authhead, gitquery)', False, 'import helpers\n'), ((121, 11, 121, 55), 'helpers.query_githubrest', 'helpers.query_githubrest', ({(121, 36, 121, 44): 'authhead', (121, 46, 121, 54): 'newquery'}, {}), '(authhead, newquery)', False, 'import helpers\n')] |
tomaszjonak/PBL | examples/test/runMe.py | 738b95da52cd59dcacb0b9dc244ca1713b0264ac | #! /usr/bin/env python2.7
from __future__ import print_function
import sys
sys.path.append("../../include")
import PyBool_public_interface as Bool
if __name__ == "__main__":
expr = Bool.parse_std("input.txt")
expr = expr["main_expr"]
expr = Bool.simplify(expr)
expr = Bool.nne(expr)
print(Bool.print_expr(expr))
| [((7, 0, 7, 32), 'sys.path.append', 'sys.path.append', ({(7, 16, 7, 31): '"""../../include"""'}, {}), "('../../include')", False, 'import sys\n'), ((13, 11, 13, 38), 'PyBool_public_interface.parse_std', 'Bool.parse_std', ({(13, 26, 13, 37): '"""input.txt"""'}, {}), "('input.txt')", True, 'import PyBool_public_interface as Bool\n'), ((16, 11, 16, 30), 'PyBool_public_interface.simplify', 'Bool.simplify', ({(16, 25, 16, 29): 'expr'}, {}), '(expr)', True, 'import PyBool_public_interface as Bool\n'), ((18, 11, 18, 25), 'PyBool_public_interface.nne', 'Bool.nne', ({(18, 20, 18, 24): 'expr'}, {}), '(expr)', True, 'import PyBool_public_interface as Bool\n'), ((20, 10, 20, 31), 'PyBool_public_interface.print_expr', 'Bool.print_expr', ({(20, 26, 20, 30): 'expr'}, {}), '(expr)', True, 'import PyBool_public_interface as Bool\n')] |
rupen4678/botique_management_system | calculator.py | 9b7807cc28bb15e024093d6161a8fef96ce7e291 | from tkinter import *
import random
import time
from PIL import Image
from datetime import datetime
from tinydb import *
import os
import pickle
#from database1 import *
from random import randint
root = Tk()
root.geometry("1600x800+0+0")
root.title("Suman_dai_ko_DHOKAN")
root.configure(bg="goldenrod4")
text_Input = StringVar()
operator =""
yes =""
no=""
Tops = Frame(root, width=1600 ,height=50,bg="goldenrod4", relief=RIDGE)
Tops.pack(side=TOP)
f1 = Frame(root, width = 800 ,height=500,bg="goldenrod4",relief=SUNKEN)
f1.pack(side=LEFT)
f2 = Frame(root, width = 300,height = 700,bg="dark slate blue",relief=SUNKEN)
f2.pack(side=RIGHT)
#f3= Frame(root,width=1600,height=300,fg="blue", bg="powder blue", relief=SUNKEN).pack(side=Bottom)
#==========================================================Time=======================================
localtime=time.asctime(time.localtime(time.time()))
#datetime=Label(Tops,font("arial",20,"bold"),text=nowTime,bd=10 ,bg="black", #fg="white", anchor="w").pack()
#====================================debugged========================
shirt = IntVar()
pant = IntVar()
sale = IntVar()
buy = IntVar()
deposite = IntVar()
withdraw = IntVar()
coat = IntVar()
order = IntVar()
total = IntVar()
out = IntVar()
before = IntVar() #order before the 60
stock = IntVar()
delivery = IntVar()
#########################main_gate######################
def _calculation():
shirt_mm = shirt.get()
pant_mm = pant.get()
sale_mm = sale.get()
buy_mm = buy.get()
deposite_mm = deposite.get()
withdraw_mm = withdraw.get()
coat_mm = coat.get()
order_mm = order.get()
total_mm = total.get()
time = datetime.now()
day = time.day
month = time.month
hour = time.hour
second = time.second
year = time.year
minute = time.minute
#setting the filename using the loop
#file = open("1{}".format())
'''for i in range(5):
if os.path.isfile(i):
pass
else:
file = open("{}.txt".format(i+1), "w+")
created with name {}".format(file))'''
#creating the filenames with append =1 if the name already existed
file_name = "r.txt"
if os.path.isfile(file_name):
expand = 1
while True:
expand += 1
new_file_name = file_name.split(".txt")[0] + str(expand) + ".txt"
if os.path.isfile(new_file_name): #if the newfilename exists
print("using the file {}".format(new_file_name))
#file = open("{}".format(new_file_name), "w+")
continue
else:
file_name = open(new_file_name, "w+")
print("creating the file {}".format(file_name))
#file = open("{}".format(file_name), "w+")
break
file_name = "fil.txt"
file = open("{}".format(file_name),"w+")
totalx = shirt_mm+pant_mm+sale_mm+buy_mm+deposite_mm+withdraw_mm+coat_mm+order_mm
file.write("Total:-{}".format(totalx))
file.write("shirt:-{}".format(shirt_mm))
file.write("pant_mm:-{}".format(pant_mm))
file.write("sale_mm:-{}".format(sale_mm))
file.write("buy_mm:-{}".format(buy_mm))
file.write("deposite_mm:-{}".format(deposite_mm))
file.write("withdraw_mm:-{}".format(withdraw_mm))
file.write("coat:-{}".format(coat_mm))
file.write("order:-{}".format(order_mm))
reading = file.readlines()
file.close()
#after wards set the total from here total.set
#++++++++++++++++++++++++++++++Varibales_inset+++++++++++++++++++++++++++++++++
order_bef = IntVar()
stock_full = IntVar()
shrting = IntVar()
pant = IntVar()
sari = IntVar()
order_info = IntVar()
delivery_report = IntVar()
daily_info = IntVar()
sales = IntVar()
buy = IntVar()
total_bank = IntVar()
bank_deposite = IntVar()
bank_withdraw = IntVar()
due_amount = IntVar()
order_info = IntVar()
daily_cash = IntVar()
cus_name = IntVar()
cus_no = IntVar()
employee = IntVar()
###############################class of algoriths#########################
class __main():
def __init__(self):
self.order = order
def __order_info(self):
self.now = datetime()
self.hour = now.hour
self.minute = now.minute
self.second = now.second
self.year = now.year
self.month = now.month
self.day = now.day
self.record_time = record_time
if self.hour == self.record_timeD:
print("the time for the product is actually %s left" %(self.hour-self.record_timeD))
#++++++++++++++++++++++++++++++++++++++++tinydb example++++++++++++++++++++++
#db = TinyDB("/databse/d4ta.json")
#db.insert({"cus_number":"98938232", "cus_name":"rupen"})
#def no_y():
# lis = db.all()
################Info===============
lblInfo = Label(Tops, font=("arial",60, "italic bold"),text="Botique Management Systewm",fg="white", bg="dark slate blue", bd=10, anchor="w", relief=RIDGE)
lblInfo.pack()
lblInfo = Label(Tops, font=("arial",30, "bold"),text=localtime,fg="white",bg="black", bd=10, anchor="w", relief=RIDGE)
lblInfo.pack()
#===========================================================Calculator==================================
"""def current_dir():
import os
import sys
DIR = os.getcwd()
print(DIR)
lblInfo = Label(Tops, font=("arial",60, "italic"),text=current_dir,fg="black",bg="powder blue",bd=10, anchor="W")
lblInfo.pack()
#DIR = dir
#return dir
"""
#randomBtn=Button(f1,pady=16,padx=16,bd=8,bg="powder blue", text="C_dir", command=lambda: current_dir(dir)).pack(side=TOP)
def btnClick(numbers):
global operator
operator = operator + str(numbers)
text_Input.set(operator)
def btnClearDisplay():
global operator
operator=""
text_Input.set("")
def btnEqualsInput():
global operator
sumup=str(eval(operator))
text_Input.set(sumup)
operator=""
def bill_entry():
global bill_in
global bill_out
bill_out = ""
bill_in = ""
def rupen():
global rupen
rupen = rupen
ronley = StringVar()
'''def malware_activate():
global cmd_active
if "rupen" in cmd_active:
if "rupen" in cmd_active[1]:
if "ronley" in cmd_active[2]:'''
#==============================another windows about me=====================
def ano_win1():
win1 = Toplevel()
#this is going to be the window in which there is nothing in the function
#of the system on the support in teh main loop
#there is no limit in the system of teh
win1.title("this is the owner window:")
win1.geometry("1600x800+0+0")
#win1.configure(bg="silver")
my_info = Frame(win1, width=600, height=700,bg="RoyalBlue4",relief=GROOVE)
my_info.pack(side=LEFT)
customer_info = Frame(win1, width=600, height=500,bg="RoyalBlue4", relief=GROOVE)
customer_info.pack(side=RIGHT)
others_info = Frame(win1, width=100, height=100,bg="RoyalBlue4",relief=GROOVE)
others_info.pack(side=BOTTOM)
all_info = Frame(win1, width=50, height=50,bg="RoyalBlue4",relief=RAISED)
all_info.pack()
lblname=Label(my_info,font=("arial",20,"italic"),text="Rupen Gurung",bg="powder blue", fg="green", bd=10, relief=SUNKEN).pack(side=TOP)
lblpro=Label(my_info,font=("arial", 20,"bold"),text="Software Engineer",bg="powder blue", fg="green",bd=10, relief=RAISED).pack()
ima = StringVar()
imageloc=Entry(win1,font=("arial",16,"italic"),bg="black",fg="white",bd=5,insertwidth=1,relief=GROOVE,textvariable=ima).pack()
imageButt=Button(win1,font=("arial",20, "bold"),bd=5,bg="white",fg="white",command= lambda: _image(image)).pack()
'''def _image(image):
image = image.set(imageloc)
return image
#image = Image.open("/root/Desktop/Desktop/anonymous/5.png")
imae = Label(win1,font=("arial", 20,"italic"),width=300, height=168,bg="black",fg="white", text=image,relief=FLAT).pack()
win1.mainloop()'''
#=============================getting all the infos ========================
def _price_inputs():
win2 = Toplevel()
win2.title("This is going to the section for the price inputs")
win2.geometry("1600x800")
framex = Frame(win2,width=1600,bg="RoyalBlue4",height=100,relief=GROOVE).pack(side=TOP)
frame1 = Frame(win2,width=775, height=750,bg="white", relief=SUNKEN).pack()
frame2 = Frame(win2, width=775,height=750,bg="black", relief=FLAT).pack()
#==++++===========================title=============================
llb1 = Label(framex,font=("arial", 20,"italic"),bg="powder blue",fg="green",text="INPUT THE PRICES",relief=GROOVE).pack()
win2.mainloop()
###########################sending emails############################
def __send_email():
'''import smtplib
gmail = smtplib.SMTP("smtp.gmail.com", 587)
gmail.starttls()
_file = open("/root/Desktop/Desktop/python/")
gmail.login("username", "password")
msg = "YOUR MESSAGE"
gmail.sendmail("your email adress", "the")
gmail.quit()'''
dialog = Tk()
dialog.title("Send emails")
dialog.geometry("800x800")
dframe = Frame(dialog,width=800,height=800,bg="white",relief=SUNKEN).pack()
email = StringVar()
password = StringVar()
semail = StringVar()
spassword = StringVar()
label = Label(dframe, font=("arial",16, "bold"), fg="white", bg="black", text="your_email").pack(side=LEFT)
entry1 = Entry(dframe, font=("arial",16,"bold"), fg="white",bg="black", textvariable=email,insertwidth=1,bd=5).pack(side=RIGHT)
label1 = Label(dframe, font=("arial",16, "bold"), fg="white", bg="black", text="password", relief=SUNKEN).pack()
entry2 = Entry(dframe,font=("arial", 16 ,"bold"),textvariable=password, insertwidth=1,bd=5).pack(side=RIGHT)
Label2 =Label(dframe,font=("arial",16, "bold"),fg="white",bg="black", text="sender_email",relief=SUNKEN).pack(side=LEFT)
entry2 = Entry(dframe,font=("arial",16, "bold"),bd=5,fg="white",bg="black",textvariable=semail,insertwidth=1).pack(side=LEFT)
label3 = Label(dframe,font=("arial",16,"bold"),fg="white",bg="black",text="sender_password", relief=SUNKEN).pack(side=LEFT)
entry3= Entry(dframe,font=("arial",16,"bold"),fg="white",textvariable=spassword,insertwidth=1,relief=SUNKEN).pack()
dialog.mainloop()
#btnEmail = Button(root,font=("arial", 16, "bold"), bg="black",fg="white",text="email",command=lambda: __send_email(),relief=GROOVE).pack()
#================================next section===========================
fix = Button(root, bd=10,bg="black",fg="white",command=_price_inputs,relief=GROOVE).pack(side=BOTTOM)
btnru = Button(root, font=("arial 20 bold"),bd=20, bg="black",fg="white",text="click",command=ano_win1,relief=GROOVE).pack(side=BOTTOM)
#fucking mazing yr coding
def column(col):
for coll in col:
call=cal+1
return call
#def yes_y():
# rupe = Toplevel(root)
# rupe.title("this is second window")
# return
#def no_y():
#nos = Toplevel(root)
#nos.title("this is nos window")
#return
a = Entry(f2,font=("arial", 20,"bold"), textvariable=text_Input, bd=30, insertwidth=4,
bg="dark slate blue",fg="white", justify="right").grid(columnspan=4)
btn7=Button(f2,padx=16,pady=16,bd=8, fg="black", font=("arial",20,"bold"),
text="7",bg="dim gray", command=lambda: btnClick(7)).grid(row=2,column=0)
btn8=Button(f2,padx=16,pady=16,bd=8, fg="black", font=("arial",20,"bold"),
text="8",bg="dim gray", command=lambda: btnClick(8)).grid(row=2,column=1)
btn9=Button(f2,padx=16,pady=16,bd=8, fg="black", font=("arial",20,"bold"),
text="9",bg="dim gray", command=lambda: btnClick(9)).grid(row=2,column=2)
#!!!!!!!!!!!!!!!!!!!!!!additions!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
Addition=Button(f2,padx=16,pady=16,bd=8,text="+",fg="black",bg="dim gray", command=lambda: btnClick("+")).grid(row=2,column=3)
btn6=Button(f2,padx=16,pady=16,bd=8, fg="black", font=("arial",20,"bold"),text="4", bg="dim gray", command=lambda: btnClick(4)).grid(row=3,column=0)
btn5=Button(f2,padx=16,pady=16,bd=8, fg="black", font=("arial",20,"bold"),text="5", bg="dim gray", command=lambda: btnClick(5)).grid(row=3,column=1)
btn4=Button(f2,padx=16,pady=16,bd=8, fg="black", font=("arial",20,"bold"),text="6",bg="dim gray", command=lambda: btnClick(6)).grid(row=3,column=2)
Subtract=Button(f2,padx=16,pady=16,bd=8,text="-", bg="dim gray", command=lambda: btnClick("-")).grid(row=3,column=3)
btn3=Button(f2,padx=16,pady=16,bd=8,text="3",font=("arial", 20, "bold") ,bg="dim gray", command=lambda: btnClick(3)).grid(row=4,column=0)
btn2=Button(f2,padx=16,pady=16,bd=8,text="2",font=("arial", 20, "bold"), bg="dim gray", command=lambda: btnClick(2)).grid(row=4,column=1)
btn1=Button(f2,padx=16,pady=16,bd=8,text="1",font=("arial", 20, "bold") ,bg="dim gray", command=lambda: btnClick(1)).grid(row=4,column=2)
Multiply=Button(f2,padx=16,pady=16,bd=8,text="*", bg="dim gray", command=lambda: btnClick("X")).grid(row=4,column=3)
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
btn0=Button(f2,padx=16,pady=16,bd=8,bg="dim gray",text="0",fg="black",font=("arial", 20, "bold"), command=lambda: btnClick(0)).grid(row=5,column=0)
btnClear=Button(f2,pady=16,padx=16,bd=8, fg="black",font=("arial", 20, "bold"),text="C",bg="dim gray", command=btnClearDisplay).grid(row=5,column=1)
btnEquals=Button(f2,padx=16,pady=16,fg="black",bd=8,text="=",bg="dim gray", font=("arial", 20,"bold"), command=btnEqualsInput).grid(row=5,column=2)
#btn2=Button(f2,padx=16,pady=16,bd=8,fg="black",text="2",bg="dim gray", command=lambda: btnClick(2)).grid(row=5,column=3)
division=Button(f2,padx=16,pady=16,bd=8,fg="black", text="/", bg="dim gray", command=lambda: btnClick("/")).grid(row=5,column=3)
#!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
rand = StringVar()
#lblReference = Label(f1,font=("arial", 16,"bold"), text="Reference",bd=16,fg="red",bg="red",anchor="w",relief=RIDGE).grid(row=0,column=0)
#txtReference=Entry(f1,font=("arial", 16, "bold"), textvariable=rand, bd=10,insertwidth=4,bg="red",fg="white", justify = "right").grid(row=0,column=1)
#!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
lblReference = Label(f1,font=("arial", 16,"bold"), text="Reference",bd=16,fg="white",bg="green",anchor="w", relief=RIDGE)
lblReference.grid(row=0,column=0)
b=Entry(f1,font=("arial", 16, "bold"), textvariable=rand, bd=10,insertwidth=4,fg="white",bg="black", justify = "left")
b.grid(row=0,column=1)
#img = "/root/Desktop/Desktop/python/projects/prj1_Botik/1.jpg"
#root.ima = Image.open(img)
#Label (root,bg="white",width=120,height=120, image=ima).pack()
bill_in = StringVar()
bill_out = StringVar()
shrting=Label(f1,font=("arial", 20, "bold"), text="Shirting:",bg="powder blue", fg="black",anchor="w",relief=GROOVE).grid(row=1,column=0)
shirts=Entry(f1,font=("arial", 16, "italic"), bd=10, textvariable=shirt, insertwidth=1,bg="black",fg="white", justify="left").grid(row=2,column=0)
owner=Button(root,padx=16,pady=16, font=("arial",12, "bold"),text="info", bd=8,bg="black",command=ano_win1,fg="white",relief=RAISED).pack(side=LEFT)
yes=Button(root,padx=16,pady=16,font=("arial",12, "bold"),text="Done",bd=8,bg="black", fg="white", command=_calculation(),relief=RAISED).pack(side=RIGHT)
panting=Label(f1,font=("arial",20, "bold"), text="pant_mm:", bg="powder blue",fg="black",anchor="w",relief=GROOVE).grid(row=1,column=1)
pantx=Entry(f1,font=("arial",16, "bold"), textvariable=pant, insertwidth=1, bd=10,bg="black",fg="white", justify="left").grid(row=2,column=1)
sales=Label(f1,font=("arial",16, "bold"), text="sales_total:",bg="powder blue",fg="black",anchor="w",bd=8,relief=GROOVE).grid(row=1,column=2)
salex=Entry(f1,font=("arial",16, "bold"),bg="black",fg="white",textvariable=sale,insertwidth=1,bd=10,justify="left").grid(row=2,column=2)
buying=Label(f1,font=("arial",16, "bold"), text="buying_something: ",bg="powder blue",fg="black", anchor="e", relief=GROOVE).grid(row=3,column=0)
buyx=Entry(f1,font=("arial", 16, "bold"), textvariable=buy, insertwidth=1, bd=10,bg="black", fg="white", justify="left").grid(row=4,column=0)
Bank_Total=Label(f1,font=("arial",16,"bold"),text="Bank_Deposite: ", bg="powder blue", fg="black", anchor="e",relief=GROOVE).grid(row=3, column=1)
depositex=Entry(f1,font=("arial",16,"bold"),bd=10, textvariable=deposite, bg="black", fg="white", justify="left").grid(row=4, column=1)
lblBankwith=Label(f1, font=("arial", 16, "bold"),fg="black",bg="powder blue",text="Bank_Withdraw", anchor="e",relief=GROOVE).grid(row=3,column=2)
withdrawx=Entry(f1,font=("arial",16, "bold"),bd=10, fg="white",bg="black", textvariable=withdraw, insertwidth=1).grid(row=4,column=2)
coating=Label(f1, font=("arial", 16, "bold"),text="coat_mm:", bg="powder blue",fg="black",anchor="e").grid(row=5,column=0)
coatx=Entry(f1, font=("arial", 16, "bold"), bg="black", fg="white",
textvariable=coat, insertwidth=1, justify="left",bd=10).grid(row=6,column=0)
lablsari=Label(f1,font=("arial", 16, "bold"), bg="powder blue",text="sari mm:", fg="black",anchor="e",relief=GROOVE).grid(row=5,column=1)
sarix=Entry(f1, font=("arial", 16, "bold"), bg="black",bd=10, fg="white",textvariable=sari, insertwidth=1).grid(row=6,column=1)
buying=Label(f1,font=("arial", 16, "bold"), bg="powder blue",text="buy_info:",fg="black",anchor="e",relief=GROOVE).grid(row=7,column=0)
buyx=Entry(f1,font=("arial",16, "bold"),bd=8, fg="white",bg="black",textvariable=buy,insertwidth=1).grid(row=8,column=0)
outgoing =Label(f1, font=("arial", 16, "bold"), bg="powder blue", text="outgoing:", fg="black",anchor="e",relief=GROOVE).grid(row=7,column=1)
outx=Entry(f1,font=("arial", 16, "bold"),textvariable=out, bd=8,fg="white",bg="black",insertwidth=1).grid(row=8,column=1)
ordering=Label(f1,font=("arial",16,"bold"),bg="powder blue",text="order_info:",fg="black",anchor="e",relief=GROOVE).grid(row=9,column=0)
orderx=Entry(f1,font=("arial",16,"bold"),insertwidth=1, textvariable=order,bd=8,fg="white",bg="black").grid(row=10,column=0)
lblcustomer=Label(f1,font=("arial",16,"bold"),bg="powder blue",text="cus_name:",fg="black",anchor="e",relief=GROOVE).grid(row=9,column=1)
no=Entry(f1,font=("arial",16, "bold"),bd=8,bg="black",fg="white",insertwidth=1, textvariable=cus_name).grid(row=10,column=1)
lblmonthly=Label(f1, font=("arial",16,"bold"),bg="powder blue",text="monthly:",fg="black",anchor="e",relief=GROOVE).grid(row=5,column=2)
monthly=StringVar()
monthx=Entry(f1,font=("arial",16,"bold"),show="blank",bg="black",textvariable=monthly,insertwidth=1,fg="white",bd=10).grid(row=6,column=2)
lbltotal=Label(f1, font=("arial", 16, "bold"),bg="powder blue",text="Total:",fg="black").grid(row=7,column=2)
totalx=Entry(f1, font=("arial", 16, "bold"),bg="black",textvariable=total,fg="white",insertwidth=1,bd=10).grid(row=8,column=2)
lblemployee = Label(f1,font=("arial", 16, "bold"),bg="powder blue",text="employee name:",fg="black",anchor="e",relief=GROOVE).grid(row=9,column=2)
employx= Entry(f1,font=("arial", 16,"bold"),textvariable=employee,insertwidth=1,bg="black",fg="white",bd=10).grid(row=10,column=2)
###############################database for the project######################
'''def __database():
db = TinyDB("/records.json")
#print(monthly)
#print(b)
#fuck = c.get()
a = order_bef.get()
b = stock_full.get()
c = shrting.get()
d = pant.get()
e = sari.get()
f = order_info.get()
g = delivery_report.get()
h = daily_info.get()
i = sales.get()
j = buy.get()
k = total_bank.get()
l = bank_deposite.get()
m = bank_withdraw.get()
n = due_amount.get()
o = order_info.get()
p = daily_cash.get()
q = cus_name.get()
r = cus_no.get()
s = employee.get()
files = {"a": "", "b": "", "c": "", "d": "", "e": "", "f": "", "g": "", "h": "", "i": "", "j": ""
, "k": "", "l": "", "m": "", "n": "", "o": "", "p": "", "q": "", "r": "", "s": ""}
db.insert({"total": a }),
db.insert({"regrds":"reference"}),
db.insert({"day_income":"billion"}),
db.insert({"day_outgoing":"billout"}),
db.insert({"bankdeposit":"bankdepo"}),
db.insert({"full_stock":"stock"}),
db.insert({"shirt_mm":"shirt"}),
db.insert({"bankwithdraw":"bankwith"}),
db.insert({"pantmm":"pant"}),
db.insert({"sarimm":"sari"}),
db.insert({"orderday":"orderinfo"}),
db.insert({"salling":"sales"}),
db.insert({"buying":"buy"}),
db.insert({"customern":"customer"}),
db.insert({"monthly_info":"monthly"}),
db.insert({"totaldy":"total"}),
db.insert({"employeid":"employee"})
for db in range(1):
print(db)
files = list(files)
file = open("/file.txt", "wb")
da = ""
for data in files:
if len(data) != 0:
print("this is are the files written in python\\n check the file.txt for debug ")
da += data
print(data)
da = int(da)
file.write(da)
try:
file = open("/records.txt", "r")
except:
print("creating the file from script {}".format(__file__))
file = open("/records.txt","w")
finally:
pass
check = os.path.isfile("/records.txt")
if check:
for item in db:
data = open("/records.txt","wb")
#with open("/records.txt","wb") as file:
#pickle.dump(item, data)
#file.close()
#file1 = pickle.load(file)
if len(item) == len(file1):
break
if item != file:
#item = str(item)
file.write("%s" %(item))
time.sleep(1)
print("done writing to the file")
#for item in db:
with open("/records.txt", "rb") as file:
reading = file1
if len(reading) != None:
print("its printed")
print(reading)
file.close()
#db.insert({"name":"Rupen Gurung"})
name = Query()
#db(name.type == "changed")
d = datetime.now()
month = str(d.month)
day = str(d.day)
year = str(d.year)
hour = str(d.hour)
minute = str(d.minute)
second = str(d.second)
between = str(":")'''
'''def __time(infos):
time = datetime.now()
day = str(time.day)
month = str(time.month)
hour = str(time.hour)
second = str(time.second)
year = str(time.year)
minute = str(time.minute)
#assuming the infos as the order taken that will be notified before the
#60 hours
#changing all the formats to the seconds that will be easy for the #calculation
#first calculating seconds in one day that will ease all the further operations
daysec = (24*60) * 60 * 60
###
##this is will be easy now
yearSec = daysec * 365
month = daysec * 30
daySec = daysec
hourSec = 60 * 60 * 60
minuteSec = 60 * 60
files = {"a":"", "b":"","c":"","d":"","e":"","f":"","g":"","h":"","i":"","j":""
,"k":"","l":"","m":"","n":"","o":"","p":"","q":"","r":"","s":""}'''
#files = list(files)
'''for data in files:
if len(data) != 0:
print(data)'''
#lenght = len(db)
##this will show the recorded bill numbers
def bill_in():
##assuming the variable as bill number .get var
bill = bill_in.get()
billo = bill_out.get()
bills = tinydb.TinyDb("/bills.json")
while bill or billo != None:
bills.insert({"billInput": bill, "billOutput": billo})
win = Toplevel()
win.title("bills")
winF = Frame(win, bg="black",relief=SUNKEN).pack()
winE = Entry(winF, insertwidth=10,insertheight=10,fg="white",bg="black",textvariable=bills).pack()
win.mainloop()
#l
# command=bill_in).pack(anchor=NE)
root.mainloop()
#__database()
#add1=Button(f2,padx=16,pady=16,bd=8, fg="black", font=("arial",20,"bold"),
#text="+",bg="powder blue", command=lambda: btnClick("+")).grid(row=3,column=6)
#btn10=Button(f2,padx=16,padx=16, fg="blue", font("arial",5,"bold"),
# text="rupen",bg="powder blue", command=rupen).grid(row=3,column=5)
#def function():
# pass():
# pass main():
# root.mainloop()
#for the revies of the follow in the sorry of the same of the tkinter in the main function of the sollow
#main()
| [((79, 11, 79, 25), 'datetime.datetime.now', 'datetime.now', ({}, {}), '()', False, 'from datetime import datetime\n'), ((100, 7, 100, 32), 'os.path.isfile', 'os.path.isfile', ({(100, 22, 100, 31): 'file_name'}, {}), '(file_name)', False, 'import os\n'), ((169, 19, 169, 29), 'datetime.datetime', 'datetime', ({}, {}), '()', False, 'from datetime import datetime\n'), ((105, 15, 105, 44), 'os.path.isfile', 'os.path.isfile', ({(105, 30, 105, 43): 'new_file_name'}, {}), '(new_file_name)', False, 'import os\n')] |
Lanselott/mmdetection | mmdet/models/anchor_heads/embedding_nnms_head_v2_limited.py | 03ce0a87f4d52f4adf4f78fd39ad30b2da394376 | import torch
import torch.nn as nn
from mmcv.cnn import normal_init
from mmdet.core import distance2bbox, force_fp32, multi_apply, multiclass_nms, bbox_overlaps
from ..builder import build_loss
from ..registry import HEADS
from ..utils import ConvModule, Scale, bias_init_with_prob
from IPython import embed
INF = 1e8
@HEADS.register_module
class EmbeddingNNmsHeadV2limited(nn.Module):
"""
Fully Convolutional One-Stage Object Detection head from [1]_.
The FCOS head does not use anchor boxes. Instead bounding boxes are
predicted at each pixel and a centerness measure is used to supress
low-quality predictions.
References:
.. [1] https://arxiv.org/abs/1904.01355
Example:
>>> self = FCOSHead(11, 7)
>>> feats = [torch.rand(1, 7, s, s) for s in [4, 8, 16, 32, 64]]
>>> cls_score, bbox_pred, centerness = self.forward(feats)
>>> assert len(cls_score) == len(self.scales)
"""
def __init__(self,
num_classes,
in_channels,
feat_channels=256,
stacked_convs=4,
embedding_convs_num=2,
strides=(4, 8, 16, 32, 64),
delta=2.0,
regress_ranges=((-1, 64), (64, 128), (128, 256), (256, 512),
(512, INF)),
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
loss_bbox=dict(type='IoULoss', loss_weight=1.0),
conv_cfg=None,
norm_cfg=dict(type='GN', num_groups=32, requires_grad=True)):
super(EmbeddingNNmsHeadV2limited, self).__init__()
self.num_classes = num_classes
self.cls_out_channels = num_classes - 1
self.in_channels = in_channels
self.feat_channels = feat_channels
self.stacked_convs = stacked_convs
self.embedding_convs_num = embedding_convs_num
self.strides = strides
self.delta = delta
self.regress_ranges = regress_ranges
self.loss_cls = build_loss(loss_cls)
self.loss_bbox = build_loss(loss_bbox)
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
self.fp16_enabled = False
self._init_layers()
def _init_layers(self):
self.cls_convs = nn.ModuleList()
self.reg_convs = nn.ModuleList()
self.embedding_convs = nn.ModuleList()
for i in range(self.stacked_convs):
chn = self.in_channels if i == 0 else self.feat_channels
self.cls_convs.append(
ConvModule(
chn,
self.feat_channels,
3,
stride=1,
padding=1,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg,
bias=self.norm_cfg is None))
self.reg_convs.append(
ConvModule(
chn,
self.feat_channels,
3,
stride=1,
padding=1,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg,
bias=self.norm_cfg is None))
self.embedding_convs.append(
ConvModule(
chn,
self.feat_channels,
3,
stride=1,
padding=1,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg,
bias=self.norm_cfg is None))
self.fcos_cls = nn.Conv2d(
self.feat_channels, self.cls_out_channels, 3, padding=1)
self.fcos_reg = nn.Conv2d(self.feat_channels, 4, 3, padding=1)
self.embedding_cls = nn.Conv2d(self.feat_channels, 1, 3, padding=1)
self.scales = nn.ModuleList([Scale(1.0) for _ in self.strides])
# Pull and Push loss
self.pull_loss = nn.MSELoss()
def init_weights(self):
for m in self.cls_convs:
normal_init(m.conv, std=0.01)
for m in self.reg_convs:
normal_init(m.conv, std=0.01)
bias_cls = bias_init_with_prob(0.01)
normal_init(self.fcos_cls, std=0.01, bias=bias_cls)
normal_init(self.fcos_reg, std=0.01)
normal_init(self.embedding_cls, std=0.01)
def forward(self, feats):
return multi_apply(self.forward_single, feats, self.scales)
def forward_single(self, x, scale):
cls_feat = x
reg_feat = x
embedding_feat = x
for cls_layer in self.cls_convs:
cls_feat = cls_layer(cls_feat)
cls_score = self.fcos_cls(cls_feat)
for embedding_layer in self.embedding_convs:
embedding_feat = embedding_layer(embedding_feat)
embedding_pred = self.embedding_cls(embedding_feat)
for reg_layer in self.reg_convs:
reg_feat = reg_layer(reg_feat)
# scale the bbox_pred of different level
# float to avoid overflow when enabling FP16
bbox_pred = scale(self.fcos_reg(reg_feat)).float().exp()
return cls_score, bbox_pred, embedding_pred
@force_fp32(apply_to=('cls_scores', 'bbox_preds'))
def loss(self,
cls_scores,
bbox_preds,
embedding_preds,
gt_bboxes,
gt_labels,
img_metas,
cfg,
gt_bboxes_ignore=None):
assert len(cls_scores) == len(bbox_preds) == len(embedding_preds)
featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores]
all_level_points = self.get_points(featmap_sizes, bbox_preds[0].dtype,
bbox_preds[0].device)
labels, bbox_targets = self.fcos_target(all_level_points, gt_bboxes,
gt_labels)
num_imgs = cls_scores[0].size(0)
# flatten cls_scores and bbox_preds
flatten_cls_scores = [
cls_score.permute(0, 2, 3, 1).reshape(-1, self.cls_out_channels)
for cls_score in cls_scores
]
flatten_bbox_preds = [
bbox_pred.permute(0, 2, 3, 1).reshape(-1, 4)
for bbox_pred in bbox_preds
]
flatten_embedding_preds = [
embedding_feat.permute(0, 2, 3, 1).reshape(-1, 1)
for embedding_feat in embedding_preds
]
flatten_cls_scores = torch.cat(flatten_cls_scores)
flatten_bbox_preds = torch.cat(flatten_bbox_preds)
flatten_embedding_preds = torch.cat(flatten_embedding_preds)
flatten_labels = torch.cat(labels)
flatten_bbox_targets = torch.cat(bbox_targets)
# repeat points to align with bbox_preds
flatten_points = torch.cat(
[points.repeat(num_imgs, 1) for points in all_level_points])
pos_inds = flatten_labels.nonzero().reshape(-1)
num_pos = len(pos_inds)
loss_cls = self.loss_cls(
flatten_cls_scores, flatten_labels,
avg_factor=num_pos + num_imgs) # avoid num_pos is 0
pos_bbox_preds = flatten_bbox_preds[pos_inds]
if num_pos > 0:
pos_bbox_targets = flatten_bbox_targets[pos_inds]
pos_points = flatten_points[pos_inds]
pos_decoded_bbox_preds = distance2bbox(pos_points, pos_bbox_preds)
pos_decoded_target_preds = distance2bbox(pos_points,
pos_bbox_targets)
pos_iou_scores = bbox_overlaps(pos_decoded_bbox_preds, pos_decoded_target_preds, is_aligned=True).clamp(min=1e-6)
max_scores, max_inds = flatten_cls_scores.sigmoid().max(1)
pos_embedding_preds = flatten_embedding_preds[pos_inds]
# Instance level op
dist_conf_mask_list = []
# generate instance levels index
instance_counter = torch.zeros(num_pos, device=pos_points.device)
remove = torch.zeros(num_pos, device=pos_points.device)
obj_id = 0
# NOTE: get mask for each obj
for i in range(len(pos_decoded_target_preds)):
if remove[i] == 0:
current_bbox = pos_decoded_target_preds[i]
mask = ((pos_decoded_target_preds == current_bbox).sum(1)==4).nonzero()
instance_counter[mask] = obj_id
remove[mask] = 1
obj_id += 1
instance_counter = instance_counter.int()
obj_ids = torch.bincount(instance_counter).nonzero().int()
for obj_id in obj_ids:
dist_conf_mask_list.append((instance_counter==obj_id).float())
# Opt for each obj
objs_embedding_list = []
obj_embedding_means_list = []
obj_embedding_means_expand_list = []
for dist_conf_mask in dist_conf_mask_list:
obj_mask_inds = dist_conf_mask.nonzero().reshape(-1)
obj_embedding_preds = pos_embedding_preds[obj_mask_inds]
objs_embedding_list.append(obj_embedding_preds)
# mean value
embedding_mean = obj_embedding_preds.sum() / obj_embedding_preds.shape[0]
obj_embedding_means_list.append(embedding_mean)
obj_embedding_means_expand_list.append(torch.zeros_like(obj_embedding_preds).fill_(embedding_mean))
embed()
# pull loss
theta = 1
embedding_expand_means = torch.cat(obj_embedding_means_expand_list)
pull_embedding = torch.cat(objs_embedding_list)
pull_loss = theta * self.pull_loss(pull_embedding, embedding_expand_means)
# push loss
N_samples = len(dist_conf_mask_list)
push_loss = 0
for obj_j_embedding_mean in obj_embedding_means_list:
for obj_k_embedding_mean in obj_embedding_means_list:
if torch.equal(obj_j_embedding_mean, obj_k_embedding_mean):
continue
else:
push_dist = self.delta - torch.abs(obj_k_embedding_mean - obj_j_embedding_mean)
push_loss += torch.max(push_dist, torch.zeros(1, device=push_dist.device))
push_loss = push_loss / N_samples**2
# iou loss
loss_bbox = self.loss_bbox(
pos_decoded_bbox_preds,
pos_decoded_target_preds)
else:
loss_bbox = pos_bbox_preds.sum()
push_loss = pos_bbox_preds.sum()
pull_loss = pos_bbox_preds.sum()
return dict(
loss_cls=loss_cls,
loss_bbox=loss_bbox,
push_loss=push_loss,
pull_loss=pull_loss)
@force_fp32(apply_to=('cls_scores', 'bbox_preds'))
def get_bboxes(self,
cls_scores,
bbox_preds,
img_metas,
cfg,
rescale=None):
assert len(cls_scores) == len(bbox_preds)
num_levels = len(cls_scores)
featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores]
mlvl_points = self.get_points(featmap_sizes, bbox_preds[0].dtype,
bbox_preds[0].device)
result_list = []
for img_id in range(len(img_metas)):
cls_score_list = [
cls_scores[i][img_id].detach() for i in range(num_levels)
]
bbox_pred_list = [
bbox_preds[i][img_id].detach() for i in range(num_levels)
]
img_shape = img_metas[img_id]['img_shape']
scale_factor = img_metas[img_id]['scale_factor']
det_bboxes = self.get_bboxes_single(cls_score_list, bbox_pred_list,
mlvl_points, img_shape,
scale_factor, cfg, rescale)
result_list.append(det_bboxes)
return result_list
def get_bboxes_single(self,
cls_scores,
bbox_preds,
mlvl_points,
img_shape,
scale_factor,
cfg,
rescale=False):
assert len(cls_scores) == len(bbox_preds) == len(mlvl_points)
mlvl_bboxes = []
mlvl_scores = []
for cls_score, bbox_pred, points in zip(
cls_scores, bbox_preds, mlvl_points):
assert cls_score.size()[-2:] == bbox_pred.size()[-2:]
scores = cls_score.permute(1, 2, 0).reshape(
-1, self.cls_out_channels).sigmoid()
bbox_pred = bbox_pred.permute(1, 2, 0).reshape(-1, 4)
nms_pre = cfg.get('nms_pre', -1)
if nms_pre > 0 and scores.shape[0] > nms_pre:
max_scores, _ = scores.max(dim=1)
_, topk_inds = max_scores.topk(nms_pre)
points = points[topk_inds, :]
bbox_pred = bbox_pred[topk_inds, :]
scores = scores[topk_inds, :]
bboxes = distance2bbox(points, bbox_pred, max_shape=img_shape)
mlvl_bboxes.append(bboxes)
mlvl_scores.append(scores)
mlvl_bboxes = torch.cat(mlvl_bboxes)
if rescale:
mlvl_bboxes /= mlvl_bboxes.new_tensor(scale_factor)
mlvl_scores = torch.cat(mlvl_scores)
padding = mlvl_scores.new_zeros(mlvl_scores.shape[0], 1)
mlvl_scores = torch.cat([padding, mlvl_scores], dim=1)
det_bboxes, det_labels = multiclass_nms(
mlvl_bboxes,
mlvl_scores,
cfg.score_thr,
cfg.nms,
cfg.max_per_img)
return det_bboxes, det_labels
def get_points(self, featmap_sizes, dtype, device):
"""Get points according to feature map sizes.
Args:
featmap_sizes (list[tuple]): Multi-level feature map sizes.
dtype (torch.dtype): Type of points.
device (torch.device): Device of points.
Returns:
tuple: points of each image.
"""
mlvl_points = []
for i in range(len(featmap_sizes)):
mlvl_points.append(
self.get_points_single(featmap_sizes[i], self.strides[i],
dtype, device))
return mlvl_points
def get_points_single(self, featmap_size, stride, dtype, device):
h, w = featmap_size
x_range = torch.arange(
0, w * stride, stride, dtype=dtype, device=device)
y_range = torch.arange(
0, h * stride, stride, dtype=dtype, device=device)
y, x = torch.meshgrid(y_range, x_range)
points = torch.stack(
(x.reshape(-1), y.reshape(-1)), dim=-1) + stride // 2
return points
def fcos_target(self, points, gt_bboxes_list, gt_labels_list):
assert len(points) == len(self.regress_ranges)
num_levels = len(points)
# expand regress ranges to align with points
expanded_regress_ranges = [
points[i].new_tensor(self.regress_ranges[i])[None].expand_as(
points[i]) for i in range(num_levels)
]
# concat all levels points and regress ranges
concat_regress_ranges = torch.cat(expanded_regress_ranges, dim=0)
concat_points = torch.cat(points, dim=0)
# get labels and bbox_targets of each image
labels_list, bbox_targets_list = multi_apply(
self.fcos_target_single,
gt_bboxes_list,
gt_labels_list,
points=concat_points,
regress_ranges=concat_regress_ranges)
# split to per img, per level
num_points = [center.size(0) for center in points]
labels_list = [labels.split(num_points, 0) for labels in labels_list]
bbox_targets_list = [
bbox_targets.split(num_points, 0)
for bbox_targets in bbox_targets_list
]
# concat per level image
concat_lvl_labels = []
concat_lvl_bbox_targets = []
for i in range(num_levels):
concat_lvl_labels.append(
torch.cat([labels[i] for labels in labels_list]))
concat_lvl_bbox_targets.append(
torch.cat(
[bbox_targets[i] for bbox_targets in bbox_targets_list]))
return concat_lvl_labels, concat_lvl_bbox_targets
def fcos_target_single(self, gt_bboxes, gt_labels, points, regress_ranges):
num_points = points.size(0)
num_gts = gt_labels.size(0)
if num_gts == 0:
return gt_labels.new_zeros(num_points), \
gt_bboxes.new_zeros((num_points, 4))
areas = (gt_bboxes[:, 2] - gt_bboxes[:, 0] + 1) * (
gt_bboxes[:, 3] - gt_bboxes[:, 1] + 1)
# TODO: figure out why these two are different
# areas = areas[None].expand(num_points, num_gts)
areas = areas[None].repeat(num_points, 1)
regress_ranges = regress_ranges[:, None, :].expand(
num_points, num_gts, 2)
gt_bboxes = gt_bboxes[None].expand(num_points, num_gts, 4)
xs, ys = points[:, 0], points[:, 1]
xs = xs[:, None].expand(num_points, num_gts)
ys = ys[:, None].expand(num_points, num_gts)
left = xs - gt_bboxes[..., 0]
right = gt_bboxes[..., 2] - xs
top = ys - gt_bboxes[..., 1]
bottom = gt_bboxes[..., 3] - ys
bbox_targets = torch.stack((left, top, right, bottom), -1)
# condition1: inside a gt bbox
inside_gt_bbox_mask = bbox_targets.min(-1)[0] > 0
# condition2: limit the regression range for each location
max_regress_distance = bbox_targets.max(-1)[0]
inside_regress_range = (
max_regress_distance >= regress_ranges[..., 0]) & (
max_regress_distance <= regress_ranges[..., 1])
# if there are still more than one objects for a location,
# we choose the one with minimal area
areas[inside_gt_bbox_mask == 0] = INF
areas[inside_regress_range == 0] = INF
min_area, min_area_inds = areas.min(dim=1)
labels = gt_labels[min_area_inds]
labels[min_area == INF] = 0
bbox_targets = bbox_targets[range(num_points), min_area_inds]
return labels, bbox_targets
| [((151, 5, 151, 54), 'mmdet.core.force_fp32', 'force_fp32', (), '', False, 'from mmdet.core import distance2bbox, force_fp32, multi_apply, multiclass_nms, bbox_overlaps\n'), ((277, 5, 277, 54), 'mmdet.core.force_fp32', 'force_fp32', (), '', False, 'from mmdet.core import distance2bbox, force_fp32, multi_apply, multiclass_nms, bbox_overlaps\n'), ((71, 25, 71, 40), 'torch.nn.ModuleList', 'nn.ModuleList', ({}, {}), '()', True, 'import torch.nn as nn\n'), ((72, 25, 72, 40), 'torch.nn.ModuleList', 'nn.ModuleList', ({}, {}), '()', True, 'import torch.nn as nn\n'), ((73, 31, 73, 46), 'torch.nn.ModuleList', 'nn.ModuleList', ({}, {}), '()', True, 'import torch.nn as nn\n'), ((108, 24, 109, 68), 'torch.nn.Conv2d', 'nn.Conv2d', (), '', True, 'import torch.nn as nn\n'), ((110, 24, 110, 70), 'torch.nn.Conv2d', 'nn.Conv2d', (), '', True, 'import torch.nn as nn\n'), ((111, 29, 111, 75), 'torch.nn.Conv2d', 'nn.Conv2d', (), '', True, 'import torch.nn as nn\n'), ((116, 25, 116, 37), 'torch.nn.MSELoss', 'nn.MSELoss', ({}, {}), '()', True, 'import torch.nn as nn\n'), ((124, 8, 124, 59), 'mmcv.cnn.normal_init', 'normal_init', (), '', False, 'from mmcv.cnn import normal_init\n'), ((125, 8, 125, 44), 'mmcv.cnn.normal_init', 'normal_init', (), '', False, 'from mmcv.cnn import normal_init\n'), ((126, 8, 126, 49), 'mmcv.cnn.normal_init', 'normal_init', (), '', False, 'from mmcv.cnn import normal_init\n'), ((129, 15, 129, 67), 'mmdet.core.multi_apply', 'multi_apply', ({(129, 27, 129, 46): 'self.forward_single', (129, 48, 129, 53): 'feats', (129, 55, 129, 66): 'self.scales'}, {}), '(self.forward_single, feats, self.scales)', False, 'from mmdet.core import distance2bbox, force_fp32, multi_apply, multiclass_nms, bbox_overlaps\n'), ((182, 29, 182, 58), 'torch.cat', 'torch.cat', ({(182, 39, 182, 57): 'flatten_cls_scores'}, {}), '(flatten_cls_scores)', False, 'import torch\n'), ((183, 29, 183, 58), 'torch.cat', 'torch.cat', ({(183, 39, 183, 57): 'flatten_bbox_preds'}, {}), '(flatten_bbox_preds)', False, 'import torch\n'), ((184, 34, 184, 68), 'torch.cat', 'torch.cat', ({(184, 44, 184, 67): 'flatten_embedding_preds'}, {}), '(flatten_embedding_preds)', False, 'import torch\n'), ((185, 25, 185, 42), 'torch.cat', 'torch.cat', ({(185, 35, 185, 41): 'labels'}, {}), '(labels)', False, 'import torch\n'), ((186, 31, 186, 54), 'torch.cat', 'torch.cat', ({(186, 41, 186, 53): 'bbox_targets'}, {}), '(bbox_targets)', False, 'import torch\n'), ((335, 22, 335, 44), 'torch.cat', 'torch.cat', ({(335, 32, 335, 43): 'mlvl_bboxes'}, {}), '(mlvl_bboxes)', False, 'import torch\n'), ((338, 22, 338, 44), 'torch.cat', 'torch.cat', ({(338, 32, 338, 43): 'mlvl_scores'}, {}), '(mlvl_scores)', False, 'import torch\n'), ((340, 22, 340, 62), 'torch.cat', 'torch.cat', (), '', False, 'import torch\n'), ((341, 33, 346, 28), 'mmdet.core.multiclass_nms', 'multiclass_nms', ({(342, 12, 342, 23): 'mlvl_bboxes', (343, 12, 343, 23): 'mlvl_scores', (344, 12, 344, 25): 'cfg.score_thr', (345, 12, 345, 19): 'cfg.nms', (346, 12, 346, 27): 'cfg.max_per_img'}, {}), '(mlvl_bboxes, mlvl_scores, cfg.score_thr, cfg.nms, cfg.\n max_per_img)', False, 'from mmdet.core import distance2bbox, force_fp32, multi_apply, multiclass_nms, bbox_overlaps\n'), ((370, 18, 371, 62), 'torch.arange', 'torch.arange', (), '', False, 'import torch\n'), ((372, 18, 373, 62), 'torch.arange', 'torch.arange', (), '', False, 'import torch\n'), ((374, 15, 374, 47), 'torch.meshgrid', 'torch.meshgrid', ({(374, 30, 374, 37): 'y_range', (374, 39, 374, 46): 'x_range'}, {}), '(y_range, x_range)', False, 'import torch\n'), ((388, 32, 388, 73), 'torch.cat', 'torch.cat', (), '', False, 'import torch\n'), ((389, 24, 389, 48), 'torch.cat', 'torch.cat', (), '', False, 'import torch\n'), ((391, 41, 396, 49), 'mmdet.core.multi_apply', 'multi_apply', (), '', False, 'from mmdet.core import distance2bbox, force_fp32, multi_apply, multiclass_nms, bbox_overlaps\n'), ((440, 23, 440, 66), 'torch.stack', 'torch.stack', ({(440, 35, 440, 61): '(left, top, right, bottom)', (440, 63, 440, 65): '-1'}, {}), '((left, top, right, bottom), -1)', False, 'import torch\n'), ((120, 12, 120, 41), 'mmcv.cnn.normal_init', 'normal_init', (), '', False, 'from mmcv.cnn import normal_init\n'), ((122, 12, 122, 41), 'mmcv.cnn.normal_init', 'normal_init', (), '', False, 'from mmcv.cnn import normal_init\n'), ((200, 37, 200, 78), 'mmdet.core.distance2bbox', 'distance2bbox', ({(200, 51, 200, 61): 'pos_points', (200, 63, 200, 77): 'pos_bbox_preds'}, {}), '(pos_points, pos_bbox_preds)', False, 'from mmdet.core import distance2bbox, force_fp32, multi_apply, multiclass_nms, bbox_overlaps\n'), ((201, 39, 202, 70), 'mmdet.core.distance2bbox', 'distance2bbox', ({(201, 53, 201, 63): 'pos_points', (202, 53, 202, 69): 'pos_bbox_targets'}, {}), '(pos_points, pos_bbox_targets)', False, 'from mmdet.core import distance2bbox, force_fp32, multi_apply, multiclass_nms, bbox_overlaps\n'), ((210, 31, 210, 77), 'torch.zeros', 'torch.zeros', (), '', False, 'import torch\n'), ((211, 21, 211, 67), 'torch.zeros', 'torch.zeros', (), '', False, 'import torch\n'), ((245, 37, 245, 79), 'torch.cat', 'torch.cat', ({(245, 47, 245, 78): 'obj_embedding_means_expand_list'}, {}), '(obj_embedding_means_expand_list)', False, 'import torch\n'), ((246, 29, 246, 59), 'torch.cat', 'torch.cat', ({(246, 39, 246, 58): 'objs_embedding_list'}, {}), '(objs_embedding_list)', False, 'import torch\n'), ((332, 21, 332, 74), 'mmdet.core.distance2bbox', 'distance2bbox', (), '', False, 'from mmdet.core import distance2bbox, force_fp32, multi_apply, multiclass_nms, bbox_overlaps\n'), ((241, 16, 241, 23), 'IPython.embed', 'embed', ({}, {}), '()', False, 'from IPython import embed\n'), ((411, 16, 411, 64), 'torch.cat', 'torch.cat', ({(411, 26, 411, 63): '[labels[i] for labels in labels_list]'}, {}), '([labels[i] for labels in labels_list])', False, 'import torch\n'), ((413, 16, 414, 76), 'torch.cat', 'torch.cat', ({(414, 20, 414, 75): '[bbox_targets[i] for bbox_targets in bbox_targets_list]'}, {}), '([bbox_targets[i] for bbox_targets in bbox_targets_list])', False, 'import torch\n'), ((203, 29, 203, 109), 'mmdet.core.bbox_overlaps', 'bbox_overlaps', (), '', False, 'from mmdet.core import distance2bbox, force_fp32, multi_apply, multiclass_nms, bbox_overlaps\n'), ((254, 23, 254, 78), 'torch.equal', 'torch.equal', ({(254, 35, 254, 55): 'obj_j_embedding_mean', (254, 57, 254, 77): 'obj_k_embedding_mean'}, {}), '(obj_j_embedding_mean, obj_k_embedding_mean)', False, 'import torch\n'), ((224, 22, 224, 54), 'torch.bincount', 'torch.bincount', ({(224, 37, 224, 53): 'instance_counter'}, {}), '(instance_counter)', False, 'import torch\n'), ((240, 55, 240, 92), 'torch.zeros_like', 'torch.zeros_like', ({(240, 72, 240, 91): 'obj_embedding_preds'}, {}), '(obj_embedding_preds)', False, 'import torch\n'), ((257, 49, 257, 103), 'torch.abs', 'torch.abs', ({(257, 59, 257, 102): '(obj_k_embedding_mean - obj_j_embedding_mean)'}, {}), '(obj_k_embedding_mean - obj_j_embedding_mean)', False, 'import torch\n'), ((258, 58, 258, 97), 'torch.zeros', 'torch.zeros', (), '', False, 'import torch\n')] |
Haehnchen/trivago-firefly | firefly_flask/app/models.py | ee92450fda42059f1865971849dc234a42dc9027 | from . import db
from sqlalchemy.dialects.mysql import LONGTEXT
class Search(db.Model):
__tablename__ = 'spots'
id = db.Column(db.Integer, primary_key=True)
search_string = db.Column(db.Text)
lat = db.Column(db.Float)
lon = db.Column(db.Float)
location_name = db.Column(db.Text)
json_result = db.Column(LONGTEXT)
class Photo(db.Model):
__tablename__ = 'photos'
id = db.Column(db.Integer, primary_key=True)
spotname = db.Column(db.Text)
source_id = db.Column(db.Text)
latitude = db.Column(db.Float)
longitude = db.Column(db.Float)
tags = db.Column(db.Text)
views = db.Column(db.Integer)
favourites = db.Column(db.Integer)
comments = db.Column(db.Integer)
username = db.Column(db.Text)
photo_url = db.Column(db.Text)
search_id = db.Column(db.ForeignKey(Search.id),nullable=False)
| [] |
HarishOsthe/Plotly_Dash_Practice_Codes | plotly_basic_plots/line_chart2.py | ca709509d27803a4d727b3986d4473cdd71a41a6 | import pandas as pd
import numpy as np
import plotly.offline as pyo
import plotly.graph_objs as go
df= pd.read_csv("Data/nst-est2017-alldata.csv")
df2=df[df["DIVISION"] == '1']
df2.set_index("NAME",inplace=True)
list_of_pop_col=[col for col in df2.columns if col.startswith('POP')]
df2=df2[list_of_pop_col]
data=[go.Scatter(x=df2.columns,
y=df2.loc[name],
mode='lines',
name=name) for name in df2.index]
pyo.plot(data) | [((7, 4, 7, 47), 'pandas.read_csv', 'pd.read_csv', ({(7, 16, 7, 46): '"""Data/nst-est2017-alldata.csv"""'}, {}), "('Data/nst-est2017-alldata.csv')", True, 'import pandas as pd\n'), ((19, 0, 19, 14), 'plotly.offline.plot', 'pyo.plot', ({(19, 9, 19, 13): 'data'}, {}), '(data)', True, 'import plotly.offline as pyo\n'), ((14, 6, 17, 26), 'plotly.graph_objs.Scatter', 'go.Scatter', (), '', True, 'import plotly.graph_objs as go\n')] |
samdoran/sphinx | tests/test_markup.py | 4c91c038b220d07bbdfe0c1680af42fe897f342c | """
test_markup
~~~~~~~~~~~
Test various Sphinx-specific markup extensions.
:copyright: Copyright 2007-2021 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
import pytest
from docutils import frontend, nodes, utils
from docutils.parsers.rst import Parser as RstParser
from sphinx import addnodes
from sphinx.builders.html.transforms import KeyboardTransform
from sphinx.builders.latex import LaTeXBuilder
from sphinx.roles import XRefRole
from sphinx.testing.util import Struct, assert_node
from sphinx.transforms import SphinxSmartQuotes
from sphinx.util import docutils, texescape
from sphinx.util.docutils import sphinx_domains
from sphinx.writers.html import HTMLTranslator, HTMLWriter
from sphinx.writers.latex import LaTeXTranslator, LaTeXWriter
@pytest.fixture
def settings(app):
texescape.init() # otherwise done by the latex builder
optparser = frontend.OptionParser(
components=(RstParser, HTMLWriter, LaTeXWriter))
settings = optparser.get_default_values()
settings.smart_quotes = True
settings.env = app.builder.env
settings.env.temp_data['docname'] = 'dummy'
settings.contentsname = 'dummy'
settings.rfc_base_url = 'http://tools.ietf.org/html/'
domain_context = sphinx_domains(settings.env)
domain_context.enable()
yield settings
domain_context.disable()
@pytest.fixture
def new_document(settings):
def create():
document = utils.new_document('test data', settings)
document['file'] = 'dummy'
return document
return create
@pytest.fixture
def inliner(new_document):
document = new_document()
document.reporter.get_source_and_line = lambda line=1: ('dummy.rst', line)
return Struct(document=document, reporter=document.reporter)
@pytest.fixture
def parse(new_document):
def parse_(rst):
document = new_document()
parser = RstParser()
parser.parse(rst, document)
SphinxSmartQuotes(document, startnode=None).apply()
for msg in document.traverse(nodes.system_message):
if msg['level'] == 1:
msg.replace_self([])
return document
return parse_
# since we're not resolving the markup afterwards, these nodes may remain
class ForgivingTranslator:
def visit_pending_xref(self, node):
pass
def depart_pending_xref(self, node):
pass
class ForgivingHTMLTranslator(HTMLTranslator, ForgivingTranslator):
pass
class ForgivingLaTeXTranslator(LaTeXTranslator, ForgivingTranslator):
pass
@pytest.fixture
def verify_re_html(app, parse):
def verify(rst, html_expected):
document = parse(rst)
KeyboardTransform(document).apply()
html_translator = ForgivingHTMLTranslator(document, app.builder)
document.walkabout(html_translator)
html_translated = ''.join(html_translator.fragment).strip()
assert re.match(html_expected, html_translated), 'from ' + rst
return verify
@pytest.fixture
def verify_re_latex(app, parse):
def verify(rst, latex_expected):
document = parse(rst)
app.builder = LaTeXBuilder(app)
app.builder.set_environment(app.env)
app.builder.init()
theme = app.builder.themes.get('manual')
latex_translator = ForgivingLaTeXTranslator(document, app.builder, theme)
latex_translator.first_document = -1 # don't write \begin{document}
document.walkabout(latex_translator)
latex_translated = ''.join(latex_translator.body).strip()
assert re.match(latex_expected, latex_translated), 'from ' + repr(rst)
return verify
@pytest.fixture
def verify_re(verify_re_html, verify_re_latex):
def verify_re_(rst, html_expected, latex_expected):
if html_expected:
verify_re_html(rst, html_expected)
if latex_expected:
verify_re_latex(rst, latex_expected)
return verify_re_
@pytest.fixture
def verify(verify_re_html, verify_re_latex):
def verify_(rst, html_expected, latex_expected):
if html_expected:
verify_re_html(rst, re.escape(html_expected) + '$')
if latex_expected:
verify_re_latex(rst, re.escape(latex_expected) + '$')
return verify_
@pytest.fixture
def get_verifier(verify, verify_re):
v = {
'verify': verify,
'verify_re': verify_re,
}
def get(name):
return v[name]
return get
@pytest.mark.parametrize('type,rst,html_expected,latex_expected', [
(
# pep role
'verify',
':pep:`8`',
('<p><span class="target" id="index-0"></span><a class="pep reference external" '
'href="http://www.python.org/dev/peps/pep-0008"><strong>PEP 8</strong></a></p>'),
('\\sphinxAtStartPar\n'
'\\index{Python Enhancement Proposals@\\spxentry{Python Enhancement Proposals}'
'!PEP 8@\\spxentry{PEP 8}}\\sphinxhref{http://www.python.org/dev/peps/pep-0008}'
'{\\sphinxstylestrong{PEP 8}}')
),
(
# pep role with anchor
'verify',
':pep:`8#id1`',
('<p><span class="target" id="index-0"></span><a class="pep reference external" '
'href="http://www.python.org/dev/peps/pep-0008#id1">'
'<strong>PEP 8#id1</strong></a></p>'),
('\\sphinxAtStartPar\n'
'\\index{Python Enhancement Proposals@\\spxentry{Python Enhancement Proposals}'
'!PEP 8\\#id1@\\spxentry{PEP 8\\#id1}}\\sphinxhref'
'{http://www.python.org/dev/peps/pep-0008\\#id1}'
'{\\sphinxstylestrong{PEP 8\\#id1}}')
),
(
# rfc role
'verify',
':rfc:`2324`',
('<p><span class="target" id="index-0"></span><a class="rfc reference external" '
'href="http://tools.ietf.org/html/rfc2324.html"><strong>RFC 2324</strong></a></p>'),
('\\sphinxAtStartPar\n'
'\\index{RFC@\\spxentry{RFC}!RFC 2324@\\spxentry{RFC 2324}}'
'\\sphinxhref{http://tools.ietf.org/html/rfc2324.html}'
'{\\sphinxstylestrong{RFC 2324}}')
),
(
# rfc role with anchor
'verify',
':rfc:`2324#id1`',
('<p><span class="target" id="index-0"></span><a class="rfc reference external" '
'href="http://tools.ietf.org/html/rfc2324.html#id1">'
'<strong>RFC 2324#id1</strong></a></p>'),
('\\sphinxAtStartPar\n'
'\\index{RFC@\\spxentry{RFC}!RFC 2324\\#id1@\\spxentry{RFC 2324\\#id1}}'
'\\sphinxhref{http://tools.ietf.org/html/rfc2324.html\\#id1}'
'{\\sphinxstylestrong{RFC 2324\\#id1}}')
),
(
# correct interpretation of code with whitespace
'verify_re',
'``code sample``',
('<p><code class="(samp )?docutils literal notranslate"><span class="pre">'
'code</span>   <span class="pre">sample</span></code></p>'),
r'\\sphinxAtStartPar\n\\sphinxcode{\\sphinxupquote{code sample}}',
),
(
# interpolation of arrows in menuselection
'verify',
':menuselection:`a --> b`',
('<p><span class="menuselection">a \N{TRIANGULAR BULLET} b</span></p>'),
'\\sphinxAtStartPar\n\\sphinxmenuselection{a \\(\\rightarrow\\) b}',
),
(
# interpolation of ampersands in menuselection
'verify',
':menuselection:`&Foo -&&- &Bar`',
('<p><span class="menuselection"><span class="accelerator">F</span>oo '
'-&- <span class="accelerator">B</span>ar</span></p>'),
('\\sphinxAtStartPar\n'
r'\sphinxmenuselection{\sphinxaccelerator{F}oo \sphinxhyphen{}'
r'\&\sphinxhyphen{} \sphinxaccelerator{B}ar}'),
),
(
# interpolation of ampersands in guilabel
'verify',
':guilabel:`&Foo -&&- &Bar`',
('<p><span class="guilabel"><span class="accelerator">F</span>oo '
'-&- <span class="accelerator">B</span>ar</span></p>'),
('\\sphinxAtStartPar\n'
r'\sphinxguilabel{\sphinxaccelerator{F}oo \sphinxhyphen{}\&\sphinxhyphen{} \sphinxaccelerator{B}ar}'),
),
(
# no ampersands in guilabel
'verify',
':guilabel:`Foo`',
'<p><span class="guilabel">Foo</span></p>',
'\\sphinxAtStartPar\n\\sphinxguilabel{Foo}',
),
(
# kbd role
'verify',
':kbd:`space`',
'<p><kbd class="kbd docutils literal notranslate">space</kbd></p>',
'\\sphinxAtStartPar\n\\sphinxkeyboard{\\sphinxupquote{space}}',
),
(
# kbd role
'verify',
':kbd:`Control+X`',
('<p><kbd class="kbd compound docutils literal notranslate">'
'<kbd class="kbd docutils literal notranslate">Control</kbd>'
'+'
'<kbd class="kbd docutils literal notranslate">X</kbd>'
'</kbd></p>'),
'\\sphinxAtStartPar\n\\sphinxkeyboard{\\sphinxupquote{Control+X}}',
),
(
# kbd role
'verify',
':kbd:`Alt+^`',
('<p><kbd class="kbd compound docutils literal notranslate">'
'<kbd class="kbd docutils literal notranslate">Alt</kbd>'
'+'
'<kbd class="kbd docutils literal notranslate">^</kbd>'
'</kbd></p>'),
('\\sphinxAtStartPar\n'
'\\sphinxkeyboard{\\sphinxupquote{Alt+\\textasciicircum{}}}'),
),
(
# kbd role
'verify',
':kbd:`M-x M-s`',
('<p><kbd class="kbd compound docutils literal notranslate">'
'<kbd class="kbd docutils literal notranslate">M</kbd>'
'-'
'<kbd class="kbd docutils literal notranslate">x</kbd>'
' '
'<kbd class="kbd docutils literal notranslate">M</kbd>'
'-'
'<kbd class="kbd docutils literal notranslate">s</kbd>'
'</kbd></p>'),
('\\sphinxAtStartPar\n'
'\\sphinxkeyboard{\\sphinxupquote{M\\sphinxhyphen{}x M\\sphinxhyphen{}s}}'),
),
(
# kbd role
'verify',
':kbd:`-`',
'<p><kbd class="kbd docutils literal notranslate">-</kbd></p>',
('\\sphinxAtStartPar\n'
'\\sphinxkeyboard{\\sphinxupquote{\\sphinxhyphen{}}}'),
),
(
# kbd role
'verify',
':kbd:`Caps Lock`',
'<p><kbd class="kbd docutils literal notranslate">Caps Lock</kbd></p>',
('\\sphinxAtStartPar\n'
'\\sphinxkeyboard{\\sphinxupquote{Caps Lock}}'),
),
(
# non-interpolation of dashes in option role
'verify_re',
':option:`--with-option`',
('<p><code( class="xref std std-option docutils literal notranslate")?>'
'<span class="pre">--with-option</span></code></p>$'),
(r'\\sphinxAtStartPar\n'
r'\\sphinxcode{\\sphinxupquote{\\sphinxhyphen{}\\sphinxhyphen{}with\\sphinxhyphen{}option}}$'),
),
(
# verify smarty-pants quotes
'verify',
'"John"',
'<p>“John”</p>',
"\\sphinxAtStartPar\n“John”",
),
(
# ... but not in literal text
'verify',
'``"John"``',
('<p><code class="docutils literal notranslate"><span class="pre">'
'"John"</span></code></p>'),
'\\sphinxAtStartPar\n\\sphinxcode{\\sphinxupquote{"John"}}',
),
(
# verify classes for inline roles
'verify',
':manpage:`mp(1)`',
'<p><em class="manpage">mp(1)</em></p>',
'\\sphinxAtStartPar\n\\sphinxstyleliteralemphasis{\\sphinxupquote{mp(1)}}',
),
(
# correct escaping in normal mode
'verify',
'Γ\\\\∞$',
None,
'\\sphinxAtStartPar\nΓ\\textbackslash{}\\(\\infty\\)\\$',
),
(
# in verbatim code fragments
'verify',
'::\n\n @Γ\\∞${}',
None,
('\\begin{sphinxVerbatim}[commandchars=\\\\\\{\\}]\n'
'@Γ\\PYGZbs{}\\(\\infty\\)\\PYGZdl{}\\PYGZob{}\\PYGZcb{}\n'
'\\end{sphinxVerbatim}'),
),
(
# in URIs
'verify_re',
'`test <https://www.google.com/~me/>`_',
None,
r'\\sphinxAtStartPar\n\\sphinxhref{https://www.google.com/~me/}{test}.*',
),
(
# description list: simple
'verify',
'term\n description',
'<dl class="docutils">\n<dt>term</dt><dd>description</dd>\n</dl>',
None,
),
(
# description list: with classifiers
'verify',
'term : class1 : class2\n description',
('<dl class="docutils">\n<dt>term<span class="classifier">class1</span>'
'<span class="classifier">class2</span></dt><dd>description</dd>\n</dl>'),
None,
),
(
# glossary (description list): multiple terms
'verify',
'.. glossary::\n\n term1\n term2\n description',
('<dl class="glossary docutils">\n'
'<dt id="term-term1">term1<a class="headerlink" href="#term-term1"'
' title="Permalink to this term">¶</a></dt>'
'<dt id="term-term2">term2<a class="headerlink" href="#term-term2"'
' title="Permalink to this term">¶</a></dt>'
'<dd>description</dd>\n</dl>'),
None,
),
])
def test_inline(get_verifier, type, rst, html_expected, latex_expected):
verifier = get_verifier(type)
verifier(rst, html_expected, latex_expected)
@pytest.mark.parametrize('type,rst,html_expected,latex_expected', [
(
'verify',
r'4 backslashes \\\\',
r'<p>4 backslashes \\</p>',
None,
),
])
@pytest.mark.skipif(docutils.__version_info__ < (0, 16),
reason='docutils-0.16 or above is required')
def test_inline_docutils16(get_verifier, type, rst, html_expected, latex_expected):
verifier = get_verifier(type)
verifier(rst, html_expected, latex_expected)
@pytest.mark.sphinx(confoverrides={'latex_engine': 'xelatex'})
@pytest.mark.parametrize('type,rst,html_expected,latex_expected', [
(
# in verbatim code fragments
'verify',
'::\n\n @Γ\\∞${}',
None,
('\\begin{sphinxVerbatim}[commandchars=\\\\\\{\\}]\n'
'@Γ\\PYGZbs{}∞\\PYGZdl{}\\PYGZob{}\\PYGZcb{}\n'
'\\end{sphinxVerbatim}'),
),
])
def test_inline_for_unicode_latex_engine(get_verifier, type, rst,
html_expected, latex_expected):
verifier = get_verifier(type)
verifier(rst, html_expected, latex_expected)
def test_samp_role(parse):
# no braces
text = ':samp:`a{b}c`'
doctree = parse(text)
assert_node(doctree[0], [nodes.paragraph, nodes.literal, ("a",
[nodes.emphasis, "b"],
"c")])
# nested braces
text = ':samp:`a{{b}}c`'
doctree = parse(text)
assert_node(doctree[0], [nodes.paragraph, nodes.literal, ("a",
[nodes.emphasis, "{b"],
"}c")])
# half-opened braces
text = ':samp:`a{bc`'
doctree = parse(text)
assert_node(doctree[0], [nodes.paragraph, nodes.literal, "a{bc"])
# escaped braces
text = ':samp:`a\\\\{b}c`'
doctree = parse(text)
assert_node(doctree[0], [nodes.paragraph, nodes.literal, "a{b}c"])
# no braces (whitespaces are keeped as is)
text = ':samp:`code sample`'
doctree = parse(text)
assert_node(doctree[0], [nodes.paragraph, nodes.literal, "code sample"])
def test_download_role(parse):
# implicit
text = ':download:`sphinx.rst`'
doctree = parse(text)
assert_node(doctree[0], [nodes.paragraph, addnodes.download_reference,
nodes.literal, "sphinx.rst"])
assert_node(doctree[0][0], refdoc='dummy', refdomain='', reftype='download',
refexplicit=False, reftarget='sphinx.rst', refwarn=False)
assert_node(doctree[0][0][0], classes=['xref', 'download'])
# explicit
text = ':download:`reftitle <sphinx.rst>`'
doctree = parse(text)
assert_node(doctree[0], [nodes.paragraph, addnodes.download_reference,
nodes.literal, "reftitle"])
assert_node(doctree[0][0], refdoc='dummy', refdomain='', reftype='download',
refexplicit=True, reftarget='sphinx.rst', refwarn=False)
assert_node(doctree[0][0][0], classes=['xref', 'download'])
def test_XRefRole(inliner):
role = XRefRole()
# implicit
doctrees, errors = role('ref', 'rawtext', 'text', 5, inliner, {}, [])
assert len(doctrees) == 1
assert_node(doctrees[0], [addnodes.pending_xref, nodes.literal, 'text'])
assert_node(doctrees[0], refdoc='dummy', refdomain='', reftype='ref', reftarget='text',
refexplicit=False, refwarn=False)
assert errors == []
# explicit
doctrees, errors = role('ref', 'rawtext', 'title <target>', 5, inliner, {}, [])
assert_node(doctrees[0], [addnodes.pending_xref, nodes.literal, 'title'])
assert_node(doctrees[0], refdoc='dummy', refdomain='', reftype='ref', reftarget='target',
refexplicit=True, refwarn=False)
# bang
doctrees, errors = role('ref', 'rawtext', '!title <target>', 5, inliner, {}, [])
assert_node(doctrees[0], [nodes.literal, 'title <target>'])
# refdomain
doctrees, errors = role('test:doc', 'rawtext', 'text', 5, inliner, {}, [])
assert_node(doctrees[0], [addnodes.pending_xref, nodes.literal, 'text'])
assert_node(doctrees[0], refdoc='dummy', refdomain='test', reftype='doc', reftarget='text',
refexplicit=False, refwarn=False)
# fix_parens
role = XRefRole(fix_parens=True)
doctrees, errors = role('ref', 'rawtext', 'text()', 5, inliner, {}, [])
assert_node(doctrees[0], [addnodes.pending_xref, nodes.literal, 'text()'])
assert_node(doctrees[0], refdoc='dummy', refdomain='', reftype='ref', reftarget='text',
refexplicit=False, refwarn=False)
# lowercase
role = XRefRole(lowercase=True)
doctrees, errors = role('ref', 'rawtext', 'TEXT', 5, inliner, {}, [])
assert_node(doctrees[0], [addnodes.pending_xref, nodes.literal, 'TEXT'])
assert_node(doctrees[0], refdoc='dummy', refdomain='', reftype='ref', reftarget='text',
refexplicit=False, refwarn=False)
@pytest.mark.sphinx('dummy', testroot='prolog')
def test_rst_prolog(app, status, warning):
app.builder.build_all()
rst = app.env.get_doctree('restructuredtext')
md = app.env.get_doctree('markdown')
# rst_prolog
assert_node(rst[0], nodes.paragraph)
assert_node(rst[0][0], nodes.emphasis)
assert_node(rst[0][0][0], nodes.Text)
assert rst[0][0][0] == 'Hello world'
# rst_epilog
assert_node(rst[-1], nodes.section)
assert_node(rst[-1][-1], nodes.paragraph)
assert_node(rst[-1][-1][0], nodes.emphasis)
assert_node(rst[-1][-1][0][0], nodes.Text)
assert rst[-1][-1][0][0] == 'Good-bye world'
# rst_prolog & rst_epilog on exlucding reST parser
assert not md.rawsource.startswith('*Hello world*.')
assert not md.rawsource.endswith('*Good-bye world*.\n')
@pytest.mark.sphinx('dummy', testroot='keep_warnings')
def test_keep_warnings_is_True(app, status, warning):
app.builder.build_all()
doctree = app.env.get_doctree('index')
assert_node(doctree[0], nodes.section)
assert len(doctree[0]) == 2
assert_node(doctree[0][1], nodes.system_message)
@pytest.mark.sphinx('dummy', testroot='keep_warnings',
confoverrides={'keep_warnings': False})
def test_keep_warnings_is_False(app, status, warning):
app.builder.build_all()
doctree = app.env.get_doctree('index')
assert_node(doctree[0], nodes.section)
assert len(doctree[0]) == 1
@pytest.mark.sphinx('dummy', testroot='refonly_bullet_list')
def test_compact_refonly_bullet_list(app, status, warning):
app.builder.build_all()
doctree = app.env.get_doctree('index')
assert_node(doctree[0], nodes.section)
assert len(doctree[0]) == 5
assert doctree[0][1].astext() == 'List A:'
assert_node(doctree[0][2], nodes.bullet_list)
assert_node(doctree[0][2][0][0], addnodes.compact_paragraph)
assert doctree[0][2][0][0].astext() == 'genindex'
assert doctree[0][3].astext() == 'List B:'
assert_node(doctree[0][4], nodes.bullet_list)
assert_node(doctree[0][4][0][0], nodes.paragraph)
assert doctree[0][4][0][0].astext() == 'Hello'
@pytest.mark.sphinx('dummy', testroot='default_role')
def test_default_role1(app, status, warning):
app.builder.build_all()
# default-role: pep
doctree = app.env.get_doctree('index')
assert_node(doctree[0], nodes.section)
assert_node(doctree[0][1], nodes.paragraph)
assert_node(doctree[0][1][0], addnodes.index)
assert_node(doctree[0][1][1], nodes.target)
assert_node(doctree[0][1][2], nodes.reference, classes=["pep"])
# no default-role
doctree = app.env.get_doctree('foo')
assert_node(doctree[0], nodes.section)
assert_node(doctree[0][1], nodes.paragraph)
assert_node(doctree[0][1][0], nodes.title_reference)
assert_node(doctree[0][1][1], nodes.Text)
@pytest.mark.sphinx('dummy', testroot='default_role',
confoverrides={'default_role': 'guilabel'})
def test_default_role2(app, status, warning):
app.builder.build_all()
# default-role directive is stronger than configratuion
doctree = app.env.get_doctree('index')
assert_node(doctree[0], nodes.section)
assert_node(doctree[0][1], nodes.paragraph)
assert_node(doctree[0][1][0], addnodes.index)
assert_node(doctree[0][1][1], nodes.target)
assert_node(doctree[0][1][2], nodes.reference, classes=["pep"])
# default_role changes the default behavior
doctree = app.env.get_doctree('foo')
assert_node(doctree[0], nodes.section)
assert_node(doctree[0][1], nodes.paragraph)
assert_node(doctree[0][1][0], nodes.inline, classes=["guilabel"])
assert_node(doctree[0][1][1], nodes.Text)
| [((154, 1, 386, 2), 'pytest.mark.parametrize', 'pytest.mark.parametrize', ({(154, 25, 154, 64): '"""type,rst,html_expected,latex_expected"""', (154, 66, 386, 1): '[(\'verify\', \':pep:`8`\',\n \'<p><span class="target" id="index-0"></span><a class="pep reference external" href="http://www.python.org/dev/peps/pep-0008"><strong>PEP 8</strong></a></p>\'\n ,\n """\\\\sphinxAtStartPar\n\\\\index{Python Enhancement Proposals@\\\\spxentry{Python Enhancement Proposals}!PEP 8@\\\\spxentry{PEP 8}}\\\\sphinxhref{http://www.python.org/dev/peps/pep-0008}{\\\\sphinxstylestrong{PEP 8}}"""\n ), (\'verify\', \':pep:`8#id1`\',\n \'<p><span class="target" id="index-0"></span><a class="pep reference external" href="http://www.python.org/dev/peps/pep-0008#id1"><strong>PEP 8#id1</strong></a></p>\'\n ,\n """\\\\sphinxAtStartPar\n\\\\index{Python Enhancement Proposals@\\\\spxentry{Python Enhancement Proposals}!PEP 8\\\\#id1@\\\\spxentry{PEP 8\\\\#id1}}\\\\sphinxhref{http://www.python.org/dev/peps/pep-0008\\\\#id1}{\\\\sphinxstylestrong{PEP 8\\\\#id1}}"""\n ), (\'verify\', \':rfc:`2324`\',\n \'<p><span class="target" id="index-0"></span><a class="rfc reference external" href="http://tools.ietf.org/html/rfc2324.html"><strong>RFC 2324</strong></a></p>\'\n ,\n """\\\\sphinxAtStartPar\n\\\\index{RFC@\\\\spxentry{RFC}!RFC 2324@\\\\spxentry{RFC 2324}}\\\\sphinxhref{http://tools.ietf.org/html/rfc2324.html}{\\\\sphinxstylestrong{RFC 2324}}"""\n ), (\'verify\', \':rfc:`2324#id1`\',\n \'<p><span class="target" id="index-0"></span><a class="rfc reference external" href="http://tools.ietf.org/html/rfc2324.html#id1"><strong>RFC 2324#id1</strong></a></p>\'\n ,\n """\\\\sphinxAtStartPar\n\\\\index{RFC@\\\\spxentry{RFC}!RFC 2324\\\\#id1@\\\\spxentry{RFC 2324\\\\#id1}}\\\\sphinxhref{http://tools.ietf.org/html/rfc2324.html\\\\#id1}{\\\\sphinxstylestrong{RFC 2324\\\\#id1}}"""\n ), (\'verify_re\', \'``code sample``\',\n \'<p><code class="(samp )?docutils literal notranslate"><span class="pre">code</span>   <span class="pre">sample</span></code></p>\'\n ,\n \'\\\\\\\\sphinxAtStartPar\\\\n\\\\\\\\sphinxcode{\\\\\\\\sphinxupquote{code sample}}\'\n ), (\'verify\', \':menuselection:`a --> b`\',\n \'<p><span class="menuselection">a ‣ b</span></p>\',\n """\\\\sphinxAtStartPar\n\\\\sphinxmenuselection{a \\\\(\\\\rightarrow\\\\) b}"""),\n (\'verify\', \':menuselection:`&Foo -&&- &Bar`\',\n \'<p><span class="menuselection"><span class="accelerator">F</span>oo -&- <span class="accelerator">B</span>ar</span></p>\'\n ,\n """\\\\sphinxAtStartPar\n\\\\sphinxmenuselection{\\\\sphinxaccelerator{F}oo \\\\sphinxhyphen{}\\\\&\\\\sphinxhyphen{} \\\\sphinxaccelerator{B}ar}"""\n ), (\'verify\', \':guilabel:`&Foo -&&- &Bar`\',\n \'<p><span class="guilabel"><span class="accelerator">F</span>oo -&- <span class="accelerator">B</span>ar</span></p>\'\n ,\n """\\\\sphinxAtStartPar\n\\\\sphinxguilabel{\\\\sphinxaccelerator{F}oo \\\\sphinxhyphen{}\\\\&\\\\sphinxhyphen{} \\\\sphinxaccelerator{B}ar}"""\n ), (\'verify\', \':guilabel:`Foo`\',\n \'<p><span class="guilabel">Foo</span></p>\',\n """\\\\sphinxAtStartPar\n\\\\sphinxguilabel{Foo}"""), (\'verify\',\n \':kbd:`space`\',\n \'<p><kbd class="kbd docutils literal notranslate">space</kbd></p>\',\n """\\\\sphinxAtStartPar\n\\\\sphinxkeyboard{\\\\sphinxupquote{space}}"""), (\n \'verify\', \':kbd:`Control+X`\',\n \'<p><kbd class="kbd compound docutils literal notranslate"><kbd class="kbd docutils literal notranslate">Control</kbd>+<kbd class="kbd docutils literal notranslate">X</kbd></kbd></p>\'\n , """\\\\sphinxAtStartPar\n\\\\sphinxkeyboard{\\\\sphinxupquote{Control+X}}"""\n ), (\'verify\', \':kbd:`Alt+^`\',\n \'<p><kbd class="kbd compound docutils literal notranslate"><kbd class="kbd docutils literal notranslate">Alt</kbd>+<kbd class="kbd docutils literal notranslate">^</kbd></kbd></p>\'\n ,\n """\\\\sphinxAtStartPar\n\\\\sphinxkeyboard{\\\\sphinxupquote{Alt+\\\\textasciicircum{}}}"""\n ), (\'verify\', \':kbd:`M-x M-s`\',\n \'<p><kbd class="kbd compound docutils literal notranslate"><kbd class="kbd docutils literal notranslate">M</kbd>-<kbd class="kbd docutils literal notranslate">x</kbd> <kbd class="kbd docutils literal notranslate">M</kbd>-<kbd class="kbd docutils literal notranslate">s</kbd></kbd></p>\'\n ,\n """\\\\sphinxAtStartPar\n\\\\sphinxkeyboard{\\\\sphinxupquote{M\\\\sphinxhyphen{}x M\\\\sphinxhyphen{}s}}"""\n ), (\'verify\', \':kbd:`-`\',\n \'<p><kbd class="kbd docutils literal notranslate">-</kbd></p>\',\n """\\\\sphinxAtStartPar\n\\\\sphinxkeyboard{\\\\sphinxupquote{\\\\sphinxhyphen{}}}"""\n ), (\'verify\', \':kbd:`Caps Lock`\',\n \'<p><kbd class="kbd docutils literal notranslate">Caps Lock</kbd></p>\',\n """\\\\sphinxAtStartPar\n\\\\sphinxkeyboard{\\\\sphinxupquote{Caps Lock}}"""),\n (\'verify_re\', \':option:`--with-option`\',\n \'<p><code( class="xref std std-option docutils literal notranslate")?><span class="pre">--with-option</span></code></p>$\'\n ,\n \'\\\\\\\\sphinxAtStartPar\\\\n\\\\\\\\sphinxcode{\\\\\\\\sphinxupquote{\\\\\\\\sphinxhyphen{}\\\\\\\\sphinxhyphen{}with\\\\\\\\sphinxhyphen{}option}}$\'\n ), (\'verify\', \'"John"\', \'<p>“John”</p>\',\n """\\\\sphinxAtStartPar\n“John”"""), (\'verify\', \'``"John"``\',\n \'<p><code class="docutils literal notranslate"><span class="pre">"John"</span></code></p>\'\n , """\\\\sphinxAtStartPar\n\\\\sphinxcode{\\\\sphinxupquote{"John"}}"""), (\n \'verify\', \':manpage:`mp(1)`\', \'<p><em class="manpage">mp(1)</em></p>\',\n """\\\\sphinxAtStartPar\n\\\\sphinxstyleliteralemphasis{\\\\sphinxupquote{mp(1)}}"""\n ), (\'verify\', \'Γ\\\\\\\\∞$\', None,\n """\\\\sphinxAtStartPar\nΓ\\\\textbackslash{}\\\\(\\\\infty\\\\)\\\\$"""), (\'verify\',\n \'::\\n\\n @Γ\\\\∞${}\', None,\n """\\\\begin{sphinxVerbatim}[commandchars=\\\\\\\\\\\\{\\\\}]\n@Γ\\\\PYGZbs{}\\\\(\\\\infty\\\\)\\\\PYGZdl{}\\\\PYGZob{}\\\\PYGZcb{}\n\\\\end{sphinxVerbatim}"""\n ), (\'verify_re\', \'`test <https://www.google.com/~me/>`_\', None,\n \'\\\\\\\\sphinxAtStartPar\\\\n\\\\\\\\sphinxhref{https://www.google.com/~me/}{test}.*\'\n ), (\'verify\', """term\n description""",\n \'<dl class="docutils">\\n<dt>term</dt><dd>description</dd>\\n</dl>\', None\n ), (\'verify\', """term : class1 : class2\n description""",\n """<dl class="docutils">\n<dt>term<span class="classifier">class1</span><span class="classifier">class2</span></dt><dd>description</dd>\n</dl>"""\n , None), (\'verify\',\n """.. glossary::\n\n term1\n term2\n description""",\n """<dl class="glossary docutils">\n<dt id="term-term1">term1<a class="headerlink" href="#term-term1" title="Permalink to this term">¶</a></dt><dt id="term-term2">term2<a class="headerlink" href="#term-term2" title="Permalink to this term">¶</a></dt><dd>description</dd>\n</dl>"""\n , None)]'}, {}), '(\'type,rst,html_expected,latex_expected\', [(\'verify\',\n \':pep:`8`\',\n \'<p><span class="target" id="index-0"></span><a class="pep reference external" href="http://www.python.org/dev/peps/pep-0008"><strong>PEP 8</strong></a></p>\'\n ,\n """\\\\sphinxAtStartPar\n\\\\index{Python Enhancement Proposals@\\\\spxentry{Python Enhancement Proposals}!PEP 8@\\\\spxentry{PEP 8}}\\\\sphinxhref{http://www.python.org/dev/peps/pep-0008}{\\\\sphinxstylestrong{PEP 8}}"""\n ), (\'verify\', \':pep:`8#id1`\',\n \'<p><span class="target" id="index-0"></span><a class="pep reference external" href="http://www.python.org/dev/peps/pep-0008#id1"><strong>PEP 8#id1</strong></a></p>\'\n ,\n """\\\\sphinxAtStartPar\n\\\\index{Python Enhancement Proposals@\\\\spxentry{Python Enhancement Proposals}!PEP 8\\\\#id1@\\\\spxentry{PEP 8\\\\#id1}}\\\\sphinxhref{http://www.python.org/dev/peps/pep-0008\\\\#id1}{\\\\sphinxstylestrong{PEP 8\\\\#id1}}"""\n ), (\'verify\', \':rfc:`2324`\',\n \'<p><span class="target" id="index-0"></span><a class="rfc reference external" href="http://tools.ietf.org/html/rfc2324.html"><strong>RFC 2324</strong></a></p>\'\n ,\n """\\\\sphinxAtStartPar\n\\\\index{RFC@\\\\spxentry{RFC}!RFC 2324@\\\\spxentry{RFC 2324}}\\\\sphinxhref{http://tools.ietf.org/html/rfc2324.html}{\\\\sphinxstylestrong{RFC 2324}}"""\n ), (\'verify\', \':rfc:`2324#id1`\',\n \'<p><span class="target" id="index-0"></span><a class="rfc reference external" href="http://tools.ietf.org/html/rfc2324.html#id1"><strong>RFC 2324#id1</strong></a></p>\'\n ,\n """\\\\sphinxAtStartPar\n\\\\index{RFC@\\\\spxentry{RFC}!RFC 2324\\\\#id1@\\\\spxentry{RFC 2324\\\\#id1}}\\\\sphinxhref{http://tools.ietf.org/html/rfc2324.html\\\\#id1}{\\\\sphinxstylestrong{RFC 2324\\\\#id1}}"""\n ), (\'verify_re\', \'``code sample``\',\n \'<p><code class="(samp )?docutils literal notranslate"><span class="pre">code</span>   <span class="pre">sample</span></code></p>\'\n ,\n \'\\\\\\\\sphinxAtStartPar\\\\n\\\\\\\\sphinxcode{\\\\\\\\sphinxupquote{code sample}}\'\n ), (\'verify\', \':menuselection:`a --> b`\',\n \'<p><span class="menuselection">a ‣ b</span></p>\',\n """\\\\sphinxAtStartPar\n\\\\sphinxmenuselection{a \\\\(\\\\rightarrow\\\\) b}"""),\n (\'verify\', \':menuselection:`&Foo -&&- &Bar`\',\n \'<p><span class="menuselection"><span class="accelerator">F</span>oo -&- <span class="accelerator">B</span>ar</span></p>\'\n ,\n """\\\\sphinxAtStartPar\n\\\\sphinxmenuselection{\\\\sphinxaccelerator{F}oo \\\\sphinxhyphen{}\\\\&\\\\sphinxhyphen{} \\\\sphinxaccelerator{B}ar}"""\n ), (\'verify\', \':guilabel:`&Foo -&&- &Bar`\',\n \'<p><span class="guilabel"><span class="accelerator">F</span>oo -&- <span class="accelerator">B</span>ar</span></p>\'\n ,\n """\\\\sphinxAtStartPar\n\\\\sphinxguilabel{\\\\sphinxaccelerator{F}oo \\\\sphinxhyphen{}\\\\&\\\\sphinxhyphen{} \\\\sphinxaccelerator{B}ar}"""\n ), (\'verify\', \':guilabel:`Foo`\',\n \'<p><span class="guilabel">Foo</span></p>\',\n """\\\\sphinxAtStartPar\n\\\\sphinxguilabel{Foo}"""), (\'verify\',\n \':kbd:`space`\',\n \'<p><kbd class="kbd docutils literal notranslate">space</kbd></p>\',\n """\\\\sphinxAtStartPar\n\\\\sphinxkeyboard{\\\\sphinxupquote{space}}"""), (\n \'verify\', \':kbd:`Control+X`\',\n \'<p><kbd class="kbd compound docutils literal notranslate"><kbd class="kbd docutils literal notranslate">Control</kbd>+<kbd class="kbd docutils literal notranslate">X</kbd></kbd></p>\'\n , """\\\\sphinxAtStartPar\n\\\\sphinxkeyboard{\\\\sphinxupquote{Control+X}}"""\n ), (\'verify\', \':kbd:`Alt+^`\',\n \'<p><kbd class="kbd compound docutils literal notranslate"><kbd class="kbd docutils literal notranslate">Alt</kbd>+<kbd class="kbd docutils literal notranslate">^</kbd></kbd></p>\'\n ,\n """\\\\sphinxAtStartPar\n\\\\sphinxkeyboard{\\\\sphinxupquote{Alt+\\\\textasciicircum{}}}"""\n ), (\'verify\', \':kbd:`M-x M-s`\',\n \'<p><kbd class="kbd compound docutils literal notranslate"><kbd class="kbd docutils literal notranslate">M</kbd>-<kbd class="kbd docutils literal notranslate">x</kbd> <kbd class="kbd docutils literal notranslate">M</kbd>-<kbd class="kbd docutils literal notranslate">s</kbd></kbd></p>\'\n ,\n """\\\\sphinxAtStartPar\n\\\\sphinxkeyboard{\\\\sphinxupquote{M\\\\sphinxhyphen{}x M\\\\sphinxhyphen{}s}}"""\n ), (\'verify\', \':kbd:`-`\',\n \'<p><kbd class="kbd docutils literal notranslate">-</kbd></p>\',\n """\\\\sphinxAtStartPar\n\\\\sphinxkeyboard{\\\\sphinxupquote{\\\\sphinxhyphen{}}}"""\n ), (\'verify\', \':kbd:`Caps Lock`\',\n \'<p><kbd class="kbd docutils literal notranslate">Caps Lock</kbd></p>\',\n """\\\\sphinxAtStartPar\n\\\\sphinxkeyboard{\\\\sphinxupquote{Caps Lock}}"""),\n (\'verify_re\', \':option:`--with-option`\',\n \'<p><code( class="xref std std-option docutils literal notranslate")?><span class="pre">--with-option</span></code></p>$\'\n ,\n \'\\\\\\\\sphinxAtStartPar\\\\n\\\\\\\\sphinxcode{\\\\\\\\sphinxupquote{\\\\\\\\sphinxhyphen{}\\\\\\\\sphinxhyphen{}with\\\\\\\\sphinxhyphen{}option}}$\'\n ), (\'verify\', \'"John"\', \'<p>“John”</p>\',\n """\\\\sphinxAtStartPar\n“John”"""), (\'verify\', \'``"John"``\',\n \'<p><code class="docutils literal notranslate"><span class="pre">"John"</span></code></p>\'\n , """\\\\sphinxAtStartPar\n\\\\sphinxcode{\\\\sphinxupquote{"John"}}"""), (\n \'verify\', \':manpage:`mp(1)`\', \'<p><em class="manpage">mp(1)</em></p>\',\n """\\\\sphinxAtStartPar\n\\\\sphinxstyleliteralemphasis{\\\\sphinxupquote{mp(1)}}"""\n ), (\'verify\', \'Γ\\\\\\\\∞$\', None,\n """\\\\sphinxAtStartPar\nΓ\\\\textbackslash{}\\\\(\\\\infty\\\\)\\\\$"""), (\'verify\',\n \'::\\n\\n @Γ\\\\∞${}\', None,\n """\\\\begin{sphinxVerbatim}[commandchars=\\\\\\\\\\\\{\\\\}]\n@Γ\\\\PYGZbs{}\\\\(\\\\infty\\\\)\\\\PYGZdl{}\\\\PYGZob{}\\\\PYGZcb{}\n\\\\end{sphinxVerbatim}"""\n ), (\'verify_re\', \'`test <https://www.google.com/~me/>`_\', None,\n \'\\\\\\\\sphinxAtStartPar\\\\n\\\\\\\\sphinxhref{https://www.google.com/~me/}{test}.*\'\n ), (\'verify\', """term\n description""",\n \'<dl class="docutils">\\n<dt>term</dt><dd>description</dd>\\n</dl>\', None\n ), (\'verify\', """term : class1 : class2\n description""",\n """<dl class="docutils">\n<dt>term<span class="classifier">class1</span><span class="classifier">class2</span></dt><dd>description</dd>\n</dl>"""\n , None), (\'verify\',\n """.. glossary::\n\n term1\n term2\n description""",\n """<dl class="glossary docutils">\n<dt id="term-term1">term1<a class="headerlink" href="#term-term1" title="Permalink to this term">¶</a></dt><dt id="term-term2">term2<a class="headerlink" href="#term-term2" title="Permalink to this term">¶</a></dt><dd>description</dd>\n</dl>"""\n , None)])', False, 'import pytest\n'), ((392, 1, 399, 2), 'pytest.mark.parametrize', 'pytest.mark.parametrize', ({(392, 25, 392, 64): '"""type,rst,html_expected,latex_expected"""', (392, 66, 399, 1): "[('verify', '4 backslashes \\\\\\\\\\\\\\\\', '<p>4 backslashes \\\\\\\\</p>', None)]"}, {}), "('type,rst,html_expected,latex_expected', [('verify',\n '4 backslashes \\\\\\\\\\\\\\\\', '<p>4 backslashes \\\\\\\\</p>', None)])", False, 'import pytest\n'), ((400, 1, 401, 64), 'pytest.mark.skipif', 'pytest.mark.skipif', (), '', False, 'import pytest\n'), ((407, 1, 407, 62), 'pytest.mark.sphinx', 'pytest.mark.sphinx', (), '', False, 'import pytest\n'), ((408, 1, 418, 2), 'pytest.mark.parametrize', 'pytest.mark.parametrize', ({(408, 25, 408, 64): '"""type,rst,html_expected,latex_expected"""', (408, 66, 418, 1): '[(\'verify\', \'::\\n\\n @Γ\\\\∞${}\', None,\n """\\\\begin{sphinxVerbatim}[commandchars=\\\\\\\\\\\\{\\\\}]\n@Γ\\\\PYGZbs{}∞\\\\PYGZdl{}\\\\PYGZob{}\\\\PYGZcb{}\n\\\\end{sphinxVerbatim}"""\n )]'}, {}), '(\'type,rst,html_expected,latex_expected\', [(\'verify\',\n \'::\\n\\n @Γ\\\\∞${}\', None,\n """\\\\begin{sphinxVerbatim}[commandchars=\\\\\\\\\\\\{\\\\}]\n@Γ\\\\PYGZbs{}∞\\\\PYGZdl{}\\\\PYGZob{}\\\\PYGZcb{}\n\\\\end{sphinxVerbatim}"""\n )])', False, 'import pytest\n'), ((517, 1, 517, 47), 'pytest.mark.sphinx', 'pytest.mark.sphinx', (), '', False, 'import pytest\n'), ((541, 1, 541, 54), 'pytest.mark.sphinx', 'pytest.mark.sphinx', (), '', False, 'import pytest\n'), ((550, 1, 551, 59), 'pytest.mark.sphinx', 'pytest.mark.sphinx', (), '', False, 'import pytest\n'), ((559, 1, 559, 60), 'pytest.mark.sphinx', 'pytest.mark.sphinx', (), '', False, 'import pytest\n'), ((577, 1, 577, 53), 'pytest.mark.sphinx', 'pytest.mark.sphinx', (), '', False, 'import pytest\n'), ((597, 1, 598, 63), 'pytest.mark.sphinx', 'pytest.mark.sphinx', (), '', False, 'import pytest\n'), ((31, 4, 31, 20), 'sphinx.util.texescape.init', 'texescape.init', ({}, {}), '()', False, 'from sphinx.util import docutils, texescape\n'), ((32, 16, 33, 56), 'docutils.frontend.OptionParser', 'frontend.OptionParser', (), '', False, 'from docutils import frontend, nodes, utils\n'), ((40, 21, 40, 49), 'sphinx.util.docutils.sphinx_domains', 'sphinx_domains', ({(40, 36, 40, 48): 'settings.env'}, {}), '(settings.env)', False, 'from sphinx.util.docutils import sphinx_domains\n'), ((60, 11, 60, 64), 'sphinx.testing.util.Struct', 'Struct', (), '', False, 'from sphinx.testing.util import Struct, assert_node\n'), ((429, 4, 431, 68), 'sphinx.testing.util.assert_node', 'assert_node', ({(429, 16, 429, 26): 'doctree[0]', (429, 28, 431, 67): "[nodes.paragraph, nodes.literal, ('a', [nodes.emphasis, 'b'], 'c')]"}, {}), "(doctree[0], [nodes.paragraph, nodes.literal, ('a', [nodes.\n emphasis, 'b'], 'c')])", False, 'from sphinx.testing.util import Struct, assert_node\n'), ((435, 4, 437, 69), 'sphinx.testing.util.assert_node', 'assert_node', ({(435, 16, 435, 26): 'doctree[0]', (435, 28, 437, 68): "[nodes.paragraph, nodes.literal, ('a', [nodes.emphasis, '{b'], '}c')]"}, {}), "(doctree[0], [nodes.paragraph, nodes.literal, ('a', [nodes.\n emphasis, '{b'], '}c')])", False, 'from sphinx.testing.util import Struct, assert_node\n'), ((442, 4, 442, 69), 'sphinx.testing.util.assert_node', 'assert_node', ({(442, 16, 442, 26): 'doctree[0]', (442, 28, 442, 68): "[nodes.paragraph, nodes.literal, 'a{bc']"}, {}), "(doctree[0], [nodes.paragraph, nodes.literal, 'a{bc'])", False, 'from sphinx.testing.util import Struct, assert_node\n'), ((447, 4, 447, 70), 'sphinx.testing.util.assert_node', 'assert_node', ({(447, 16, 447, 26): 'doctree[0]', (447, 28, 447, 69): "[nodes.paragraph, nodes.literal, 'a{b}c']"}, {}), "(doctree[0], [nodes.paragraph, nodes.literal, 'a{b}c'])", False, 'from sphinx.testing.util import Struct, assert_node\n'), ((452, 4, 452, 78), 'sphinx.testing.util.assert_node', 'assert_node', ({(452, 16, 452, 26): 'doctree[0]', (452, 28, 452, 77): "[nodes.paragraph, nodes.literal, 'code sample']"}, {}), "(doctree[0], [nodes.paragraph, nodes.literal, 'code sample'])", False, 'from sphinx.testing.util import Struct, assert_node\n'), ((459, 4, 460, 58), 'sphinx.testing.util.assert_node', 'assert_node', ({(459, 16, 459, 26): 'doctree[0]', (459, 28, 460, 57): "[nodes.paragraph, addnodes.download_reference, nodes.literal, 'sphinx.rst']"}, {}), "(doctree[0], [nodes.paragraph, addnodes.download_reference,\n nodes.literal, 'sphinx.rst'])", False, 'from sphinx.testing.util import Struct, assert_node\n'), ((461, 4, 462, 73), 'sphinx.testing.util.assert_node', 'assert_node', (), '', False, 'from sphinx.testing.util import Struct, assert_node\n'), ((463, 4, 463, 63), 'sphinx.testing.util.assert_node', 'assert_node', (), '', False, 'from sphinx.testing.util import Struct, assert_node\n'), ((468, 4, 469, 56), 'sphinx.testing.util.assert_node', 'assert_node', ({(468, 16, 468, 26): 'doctree[0]', (468, 28, 469, 55): "[nodes.paragraph, addnodes.download_reference, nodes.literal, 'reftitle']"}, {}), "(doctree[0], [nodes.paragraph, addnodes.download_reference,\n nodes.literal, 'reftitle'])", False, 'from sphinx.testing.util import Struct, assert_node\n'), ((470, 4, 471, 72), 'sphinx.testing.util.assert_node', 'assert_node', (), '', False, 'from sphinx.testing.util import Struct, assert_node\n'), ((472, 4, 472, 63), 'sphinx.testing.util.assert_node', 'assert_node', (), '', False, 'from sphinx.testing.util import Struct, assert_node\n'), ((476, 11, 476, 21), 'sphinx.roles.XRefRole', 'XRefRole', ({}, {}), '()', False, 'from sphinx.roles import XRefRole\n'), ((481, 4, 481, 76), 'sphinx.testing.util.assert_node', 'assert_node', ({(481, 16, 481, 27): 'doctrees[0]', (481, 29, 481, 75): "[addnodes.pending_xref, nodes.literal, 'text']"}, {}), "(doctrees[0], [addnodes.pending_xref, nodes.literal, 'text'])", False, 'from sphinx.testing.util import Struct, assert_node\n'), ((482, 4, 483, 49), 'sphinx.testing.util.assert_node', 'assert_node', (), '', False, 'from sphinx.testing.util import Struct, assert_node\n'), ((488, 4, 488, 77), 'sphinx.testing.util.assert_node', 'assert_node', ({(488, 16, 488, 27): 'doctrees[0]', (488, 29, 488, 76): "[addnodes.pending_xref, nodes.literal, 'title']"}, {}), "(doctrees[0], [addnodes.pending_xref, nodes.literal, 'title'])", False, 'from sphinx.testing.util import Struct, assert_node\n'), ((489, 4, 490, 48), 'sphinx.testing.util.assert_node', 'assert_node', (), '', False, 'from sphinx.testing.util import Struct, assert_node\n'), ((494, 4, 494, 63), 'sphinx.testing.util.assert_node', 'assert_node', ({(494, 16, 494, 27): 'doctrees[0]', (494, 29, 494, 62): "[nodes.literal, 'title <target>']"}, {}), "(doctrees[0], [nodes.literal, 'title <target>'])", False, 'from sphinx.testing.util import Struct, assert_node\n'), ((498, 4, 498, 76), 'sphinx.testing.util.assert_node', 'assert_node', ({(498, 16, 498, 27): 'doctrees[0]', (498, 29, 498, 75): "[addnodes.pending_xref, nodes.literal, 'text']"}, {}), "(doctrees[0], [addnodes.pending_xref, nodes.literal, 'text'])", False, 'from sphinx.testing.util import Struct, assert_node\n'), ((499, 4, 500, 49), 'sphinx.testing.util.assert_node', 'assert_node', (), '', False, 'from sphinx.testing.util import Struct, assert_node\n'), ((503, 11, 503, 36), 'sphinx.roles.XRefRole', 'XRefRole', (), '', False, 'from sphinx.roles import XRefRole\n'), ((505, 4, 505, 78), 'sphinx.testing.util.assert_node', 'assert_node', ({(505, 16, 505, 27): 'doctrees[0]', (505, 29, 505, 77): "[addnodes.pending_xref, nodes.literal, 'text()']"}, {}), "(doctrees[0], [addnodes.pending_xref, nodes.literal, 'text()'])", False, 'from sphinx.testing.util import Struct, assert_node\n'), ((506, 4, 507, 49), 'sphinx.testing.util.assert_node', 'assert_node', (), '', False, 'from sphinx.testing.util import Struct, assert_node\n'), ((510, 11, 510, 35), 'sphinx.roles.XRefRole', 'XRefRole', (), '', False, 'from sphinx.roles import XRefRole\n'), ((512, 4, 512, 76), 'sphinx.testing.util.assert_node', 'assert_node', ({(512, 16, 512, 27): 'doctrees[0]', (512, 29, 512, 75): "[addnodes.pending_xref, nodes.literal, 'TEXT']"}, {}), "(doctrees[0], [addnodes.pending_xref, nodes.literal, 'TEXT'])", False, 'from sphinx.testing.util import Struct, assert_node\n'), ((513, 4, 514, 49), 'sphinx.testing.util.assert_node', 'assert_node', (), '', False, 'from sphinx.testing.util import Struct, assert_node\n'), ((524, 4, 524, 40), 'sphinx.testing.util.assert_node', 'assert_node', ({(524, 16, 524, 22): 'rst[0]', (524, 24, 524, 39): 'nodes.paragraph'}, {}), '(rst[0], nodes.paragraph)', False, 'from sphinx.testing.util import Struct, assert_node\n'), ((525, 4, 525, 42), 'sphinx.testing.util.assert_node', 'assert_node', ({(525, 16, 525, 25): 'rst[0][0]', (525, 27, 525, 41): 'nodes.emphasis'}, {}), '(rst[0][0], nodes.emphasis)', False, 'from sphinx.testing.util import Struct, assert_node\n'), ((526, 4, 526, 41), 'sphinx.testing.util.assert_node', 'assert_node', ({(526, 16, 526, 28): 'rst[0][0][0]', (526, 30, 526, 40): 'nodes.Text'}, {}), '(rst[0][0][0], nodes.Text)', False, 'from sphinx.testing.util import Struct, assert_node\n'), ((530, 4, 530, 39), 'sphinx.testing.util.assert_node', 'assert_node', ({(530, 16, 530, 23): 'rst[-1]', (530, 25, 530, 38): 'nodes.section'}, {}), '(rst[-1], nodes.section)', False, 'from sphinx.testing.util import Struct, assert_node\n'), ((531, 4, 531, 45), 'sphinx.testing.util.assert_node', 'assert_node', ({(531, 16, 531, 27): 'rst[-1][-1]', (531, 29, 531, 44): 'nodes.paragraph'}, {}), '(rst[-1][-1], nodes.paragraph)', False, 'from sphinx.testing.util import Struct, assert_node\n'), ((532, 4, 532, 47), 'sphinx.testing.util.assert_node', 'assert_node', ({(532, 16, 532, 30): 'rst[-1][-1][0]', (532, 32, 532, 46): 'nodes.emphasis'}, {}), '(rst[-1][-1][0], nodes.emphasis)', False, 'from sphinx.testing.util import Struct, assert_node\n'), ((533, 4, 533, 46), 'sphinx.testing.util.assert_node', 'assert_node', ({(533, 16, 533, 33): 'rst[-1][-1][0][0]', (533, 35, 533, 45): 'nodes.Text'}, {}), '(rst[-1][-1][0][0], nodes.Text)', False, 'from sphinx.testing.util import Struct, assert_node\n'), ((545, 4, 545, 42), 'sphinx.testing.util.assert_node', 'assert_node', ({(545, 16, 545, 26): 'doctree[0]', (545, 28, 545, 41): 'nodes.section'}, {}), '(doctree[0], nodes.section)', False, 'from sphinx.testing.util import Struct, assert_node\n'), ((547, 4, 547, 52), 'sphinx.testing.util.assert_node', 'assert_node', ({(547, 16, 547, 29): 'doctree[0][1]', (547, 31, 547, 51): 'nodes.system_message'}, {}), '(doctree[0][1], nodes.system_message)', False, 'from sphinx.testing.util import Struct, assert_node\n'), ((555, 4, 555, 42), 'sphinx.testing.util.assert_node', 'assert_node', ({(555, 16, 555, 26): 'doctree[0]', (555, 28, 555, 41): 'nodes.section'}, {}), '(doctree[0], nodes.section)', False, 'from sphinx.testing.util import Struct, assert_node\n'), ((563, 4, 563, 42), 'sphinx.testing.util.assert_node', 'assert_node', ({(563, 16, 563, 26): 'doctree[0]', (563, 28, 563, 41): 'nodes.section'}, {}), '(doctree[0], nodes.section)', False, 'from sphinx.testing.util import Struct, assert_node\n'), ((567, 4, 567, 49), 'sphinx.testing.util.assert_node', 'assert_node', ({(567, 16, 567, 29): 'doctree[0][2]', (567, 31, 567, 48): 'nodes.bullet_list'}, {}), '(doctree[0][2], nodes.bullet_list)', False, 'from sphinx.testing.util import Struct, assert_node\n'), ((568, 4, 568, 64), 'sphinx.testing.util.assert_node', 'assert_node', ({(568, 16, 568, 35): 'doctree[0][2][0][0]', (568, 37, 568, 63): 'addnodes.compact_paragraph'}, {}), '(doctree[0][2][0][0], addnodes.compact_paragraph)', False, 'from sphinx.testing.util import Struct, assert_node\n'), ((572, 4, 572, 49), 'sphinx.testing.util.assert_node', 'assert_node', ({(572, 16, 572, 29): 'doctree[0][4]', (572, 31, 572, 48): 'nodes.bullet_list'}, {}), '(doctree[0][4], nodes.bullet_list)', False, 'from sphinx.testing.util import Struct, assert_node\n'), ((573, 4, 573, 53), 'sphinx.testing.util.assert_node', 'assert_node', ({(573, 16, 573, 35): 'doctree[0][4][0][0]', (573, 37, 573, 52): 'nodes.paragraph'}, {}), '(doctree[0][4][0][0], nodes.paragraph)', False, 'from sphinx.testing.util import Struct, assert_node\n'), ((583, 4, 583, 42), 'sphinx.testing.util.assert_node', 'assert_node', ({(583, 16, 583, 26): 'doctree[0]', (583, 28, 583, 41): 'nodes.section'}, {}), '(doctree[0], nodes.section)', False, 'from sphinx.testing.util import Struct, assert_node\n'), ((584, 4, 584, 47), 'sphinx.testing.util.assert_node', 'assert_node', ({(584, 16, 584, 29): 'doctree[0][1]', (584, 31, 584, 46): 'nodes.paragraph'}, {}), '(doctree[0][1], nodes.paragraph)', False, 'from sphinx.testing.util import Struct, assert_node\n'), ((585, 4, 585, 49), 'sphinx.testing.util.assert_node', 'assert_node', ({(585, 16, 585, 32): 'doctree[0][1][0]', (585, 34, 585, 48): 'addnodes.index'}, {}), '(doctree[0][1][0], addnodes.index)', False, 'from sphinx.testing.util import Struct, assert_node\n'), ((586, 4, 586, 47), 'sphinx.testing.util.assert_node', 'assert_node', ({(586, 16, 586, 32): 'doctree[0][1][1]', (586, 34, 586, 46): 'nodes.target'}, {}), '(doctree[0][1][1], nodes.target)', False, 'from sphinx.testing.util import Struct, assert_node\n'), ((587, 4, 587, 67), 'sphinx.testing.util.assert_node', 'assert_node', (), '', False, 'from sphinx.testing.util import Struct, assert_node\n'), ((591, 4, 591, 42), 'sphinx.testing.util.assert_node', 'assert_node', ({(591, 16, 591, 26): 'doctree[0]', (591, 28, 591, 41): 'nodes.section'}, {}), '(doctree[0], nodes.section)', False, 'from sphinx.testing.util import Struct, assert_node\n'), ((592, 4, 592, 47), 'sphinx.testing.util.assert_node', 'assert_node', ({(592, 16, 592, 29): 'doctree[0][1]', (592, 31, 592, 46): 'nodes.paragraph'}, {}), '(doctree[0][1], nodes.paragraph)', False, 'from sphinx.testing.util import Struct, assert_node\n'), ((593, 4, 593, 56), 'sphinx.testing.util.assert_node', 'assert_node', ({(593, 16, 593, 32): 'doctree[0][1][0]', (593, 34, 593, 55): 'nodes.title_reference'}, {}), '(doctree[0][1][0], nodes.title_reference)', False, 'from sphinx.testing.util import Struct, assert_node\n'), ((594, 4, 594, 45), 'sphinx.testing.util.assert_node', 'assert_node', ({(594, 16, 594, 32): 'doctree[0][1][1]', (594, 34, 594, 44): 'nodes.Text'}, {}), '(doctree[0][1][1], nodes.Text)', False, 'from sphinx.testing.util import Struct, assert_node\n'), ((604, 4, 604, 42), 'sphinx.testing.util.assert_node', 'assert_node', ({(604, 16, 604, 26): 'doctree[0]', (604, 28, 604, 41): 'nodes.section'}, {}), '(doctree[0], nodes.section)', False, 'from sphinx.testing.util import Struct, assert_node\n'), ((605, 4, 605, 47), 'sphinx.testing.util.assert_node', 'assert_node', ({(605, 16, 605, 29): 'doctree[0][1]', (605, 31, 605, 46): 'nodes.paragraph'}, {}), '(doctree[0][1], nodes.paragraph)', False, 'from sphinx.testing.util import Struct, assert_node\n'), ((606, 4, 606, 49), 'sphinx.testing.util.assert_node', 'assert_node', ({(606, 16, 606, 32): 'doctree[0][1][0]', (606, 34, 606, 48): 'addnodes.index'}, {}), '(doctree[0][1][0], addnodes.index)', False, 'from sphinx.testing.util import Struct, assert_node\n'), ((607, 4, 607, 47), 'sphinx.testing.util.assert_node', 'assert_node', ({(607, 16, 607, 32): 'doctree[0][1][1]', (607, 34, 607, 46): 'nodes.target'}, {}), '(doctree[0][1][1], nodes.target)', False, 'from sphinx.testing.util import Struct, assert_node\n'), ((608, 4, 608, 67), 'sphinx.testing.util.assert_node', 'assert_node', (), '', False, 'from sphinx.testing.util import Struct, assert_node\n'), ((612, 4, 612, 42), 'sphinx.testing.util.assert_node', 'assert_node', ({(612, 16, 612, 26): 'doctree[0]', (612, 28, 612, 41): 'nodes.section'}, {}), '(doctree[0], nodes.section)', False, 'from sphinx.testing.util import Struct, assert_node\n'), ((613, 4, 613, 47), 'sphinx.testing.util.assert_node', 'assert_node', ({(613, 16, 613, 29): 'doctree[0][1]', (613, 31, 613, 46): 'nodes.paragraph'}, {}), '(doctree[0][1], nodes.paragraph)', False, 'from sphinx.testing.util import Struct, assert_node\n'), ((614, 4, 614, 69), 'sphinx.testing.util.assert_node', 'assert_node', (), '', False, 'from sphinx.testing.util import Struct, assert_node\n'), ((615, 4, 615, 45), 'sphinx.testing.util.assert_node', 'assert_node', ({(615, 16, 615, 32): 'doctree[0][1][1]', (615, 34, 615, 44): 'nodes.Text'}, {}), '(doctree[0][1][1], nodes.Text)', False, 'from sphinx.testing.util import Struct, assert_node\n'), ((49, 19, 49, 60), 'docutils.utils.new_document', 'utils.new_document', ({(49, 38, 49, 49): '"""test data"""', (49, 51, 49, 59): 'settings'}, {}), "('test data', settings)", False, 'from docutils import frontend, nodes, utils\n'), ((67, 17, 67, 28), 'docutils.parsers.rst.Parser', 'RstParser', ({}, {}), '()', True, 'from docutils.parsers.rst import Parser as RstParser\n'), ((102, 15, 102, 55), 're.match', 're.match', ({(102, 24, 102, 37): 'html_expected', (102, 39, 102, 54): 'html_translated'}, {}), '(html_expected, html_translated)', False, 'import re\n'), ((110, 22, 110, 39), 'sphinx.builders.latex.LaTeXBuilder', 'LaTeXBuilder', ({(110, 35, 110, 38): 'app'}, {}), '(app)', False, 'from sphinx.builders.latex import LaTeXBuilder\n'), ((118, 15, 118, 57), 're.match', 're.match', ({(118, 24, 118, 38): 'latex_expected', (118, 40, 118, 56): 'latex_translated'}, {}), '(latex_expected, latex_translated)', False, 'import re\n'), ((69, 8, 69, 51), 'sphinx.transforms.SphinxSmartQuotes', 'SphinxSmartQuotes', (), '', False, 'from sphinx.transforms import SphinxSmartQuotes\n'), ((98, 8, 98, 35), 'sphinx.builders.html.transforms.KeyboardTransform', 'KeyboardTransform', ({(98, 26, 98, 34): 'document'}, {}), '(document)', False, 'from sphinx.builders.html.transforms import KeyboardTransform\n'), ((136, 32, 136, 56), 're.escape', 're.escape', ({(136, 42, 136, 55): 'html_expected'}, {}), '(html_expected)', False, 'import re\n'), ((138, 33, 138, 58), 're.escape', 're.escape', ({(138, 43, 138, 57): 'latex_expected'}, {}), '(latex_expected)', False, 'import re\n')] |
CrankySupertoon01/Toontown-2 | dev/tools/leveleditor/direct/showbase/ContainerLeakDetector.py | 60893d104528a8e7eb4aced5d0015f22e203466d | from pandac.PandaModules import PStatCollector
from direct.directnotify.DirectNotifyGlobal import directNotify
from direct.showbase.PythonUtil import Queue, invertDictLossless, makeFlywheelGen
from direct.showbase.PythonUtil import itype, serialNum, safeRepr, fastRepr
from direct.showbase.Job import Job
import types, weakref, random, __builtin__
def _createContainerLeak():
def leakContainer(task=None):
base = getBase()
if not hasattr(base, 'leakContainer'):
base.leakContainer = {}
# use tuples as keys since they can't be weakref'd, and use an instance
# since it can't be repr/eval'd
# that will force the leak detector to hold a normal 'non-weak' reference
class LeakKey:
pass
base.leakContainer[(LeakKey(),)] = {}
# test the non-weakref object reference handling
if random.random() < .01:
key = random.choice(base.leakContainer.keys())
ContainerLeakDetector.notify.debug(
'removing reference to leakContainer key %s so it will be garbage-collected' % safeRepr(key))
del base.leakContainer[key]
taskMgr.doMethodLater(10, leakContainer, 'leakContainer-%s' % serialNum())
if task:
return task.done
leakContainer()
def _createTaskLeak():
leakTaskName = uniqueName('leakedTask')
leakDoLaterName = uniqueName('leakedDoLater')
def nullTask(task=None):
return task.cont
def nullDoLater(task=None):
return task.done
def leakTask(task=None, leakTaskName=leakTaskName):
base = getBase()
taskMgr.add(nullTask, uniqueName(leakTaskName))
taskMgr.doMethodLater(1 << 31, nullDoLater, uniqueName(leakDoLaterName))
taskMgr.doMethodLater(10, leakTask, 'doLeakTask-%s' % serialNum())
if task:
return task.done
leakTask()
class NoDictKey:
pass
class Indirection:
"""
Represents the indirection that brings you from a container to an element of the container.
Stored as a string to be used as part of an eval, or as a key to be looked up in a dict.
Each dictionary dereference is individually eval'd since the dict key might have been
garbage-collected
TODO: store string components that are duplicates of strings in the actual system so that
Python will keep one copy and reduce memory usage
"""
def __init__(self, evalStr=None, dictKey=NoDictKey):
# if this is a dictionary lookup, pass dictKey instead of evalStr
self.evalStr = evalStr
self.dictKey = NoDictKey
# is the dictKey a weak reference?
self._isWeakRef = False
self._refCount = 0
if dictKey is not NoDictKey:
# if we can repr/eval the key, store it as an evalStr
keyRepr = safeRepr(dictKey)
useEval = False
try:
keyEval = eval(keyRepr)
useEval = True
except:
pass
if useEval:
# check to make sure the eval succeeded
if hash(keyEval) != hash(dictKey):
useEval = False
if useEval:
# eval/repr succeeded, store as an evalStr
self.evalStr = '[%s]' % keyRepr
else:
try:
# store a weakref to the key
self.dictKey = weakref.ref(dictKey)
self._isWeakRef = True
except TypeError, e:
ContainerLeakDetector.notify.debug('could not weakref dict key %s' % keyRepr)
self.dictKey = dictKey
self._isWeakRef = False
def destroy(self):
# re-entrant
self.dictKey = NoDictKey
def acquire(self):
self._refCount += 1
def release(self):
self._refCount -= 1
if self._refCount == 0:
self.destroy()
def isDictKey(self):
# is this an indirection through a dictionary?
return self.dictKey is not NoDictKey
def _getNonWeakDictKey(self):
if not self._isWeakRef:
return self.dictKey
else:
key = self.dictKey()
if key is None:
return '<garbage-collected dict key>'
return key
def dereferenceDictKey(self, parentDict):
# look ourselves up in parentDict
key = self._getNonWeakDictKey()
# objects in __builtin__ will have parentDict==None
if parentDict is None:
return key
return parentDict[key]
def getString(self, prevIndirection=None, nextIndirection=None):
# return our contribution to the full name of an object
instanceDictStr = '.__dict__'
if self.evalStr is not None:
# if we're an instance dict, skip over this one (obj.__dict__[keyName] == obj.keyName)
if nextIndirection is not None and self.evalStr[-len(instanceDictStr):] == instanceDictStr:
return self.evalStr[:-len(instanceDictStr)]
# if the previous indirection was an instance dict, change our syntax from ['key'] to .key
if prevIndirection is not None and prevIndirection.evalStr is not None:
if prevIndirection.evalStr[-len(instanceDictStr):] == instanceDictStr:
return '.%s' % self.evalStr[2:-2]
return self.evalStr
# we're stored as a dict key
keyRepr = safeRepr(self._getNonWeakDictKey())
# if the previous indirection was an instance dict, change our syntax from ['key'] to .key
if prevIndirection is not None and prevIndirection.evalStr is not None:
if prevIndirection.evalStr[-len(instanceDictStr):] == instanceDictStr:
return '.%s' % keyRepr
return '[%s]' % keyRepr
def __repr__(self):
return self.getString()
class ObjectRef:
"""
stores a reference to a container in a way that does not prevent garbage
collection of the container if possible
stored as a series of 'indirections' (obj.foo -> '.foo', dict[key] -> '[key]', etc.)
"""
notify = directNotify.newCategory("ObjectRef")
class FailedEval(Exception):
pass
def __init__(self, indirection, objId, other=None):
self._indirections = []
# are we building off of an existing ref?
if other is not None:
for ind in other._indirections:
self._indirections.append(ind)
# make sure we're not storing a reference to the actual object,
# that could cause a memory leak
assert type(objId) in (types.IntType, types.LongType)
# prevent cycles (i.e. base.loader.base.loader)
assert not self.goesThrough(objId=objId)
self._indirections.append(indirection)
# make sure our indirections don't get destroyed while we're using them
for ind in self._indirections:
ind.acquire()
self.notify.debug(repr(self))
def destroy(self):
for indirection in self._indirections:
indirection.release()
del self._indirections
def getNumIndirections(self):
return len(self._indirections)
def goesThroughGen(self, obj=None, objId=None):
if obj is None:
assert type(objId) in (types.IntType, types.LongType)
else:
objId = id(obj)
o = None
evalStr = ''
curObj = None
# make sure the indirections don't go away on us
indirections = self._indirections
for indirection in indirections:
yield None
indirection.acquire()
for indirection in indirections:
yield None
if not indirection.isDictKey():
# build up a string to be eval'd
evalStr += indirection.getString()
else:
curObj = self._getContainerByEval(evalStr, curObj=curObj)
if curObj is None:
raise FailedEval(evalStr)
# try to look up this key in the curObj dictionary
curObj = indirection.dereferenceDictKey(curObj)
evalStr = ''
yield None
o = self._getContainerByEval(evalStr, curObj=curObj)
if id(o) == objId:
break
for indirection in indirections:
yield None
indirection.release()
yield id(o) == objId
def goesThrough(self, obj=None, objId=None):
# since we cache the ids involved in this reference,
# this isn't perfect, for example if base.myObject is reassigned
# to a different object after this Ref was created this would return
# false, allowing a ref to base.myObject.otherObject.myObject
for goesThrough in self.goesThroughGen(obj=obj, objId=objId):
pass
return goesThrough
def _getContainerByEval(self, evalStr, curObj=None):
if curObj is not None:
# eval('curObj.foo.bar.someDict')
evalStr = 'curObj%s' % evalStr
else:
# this eval is not based off of curObj, use the global__builtin__ namespace
# put __builtin__ at the start if it's not already there
bis = '__builtin__'
if evalStr[:len(bis)] != bis:
evalStr = '%s.%s' % (bis, evalStr)
try:
container = eval(evalStr)
except NameError, ne:
return None
except AttributeError, ae:
return None
except KeyError, ke:
return None
return container
def getContainerGen(self, getInstance=False):
# try to get a handle on the container by eval'ing and looking things
# up in dictionaries, depending on the type of each indirection
# if getInstance is True, will return instance instead of instance dict
#import pdb;pdb.set_trace()
evalStr = ''
curObj = None
# make sure the indirections don't go away on us
indirections = self._indirections
for indirection in indirections:
indirection.acquire()
for indirection in indirections:
yield None
if not indirection.isDictKey():
# build up a string to be eval'd
evalStr += indirection.getString()
else:
curObj = self._getContainerByEval(evalStr, curObj=curObj)
if curObj is None:
raise FailedEval(evalStr)
# try to look up this key in the curObj dictionary
curObj = indirection.dereferenceDictKey(curObj)
evalStr = ''
for indirection in indirections:
yield None
indirection.release()
if getInstance:
lenDict = len('.__dict__')
if evalStr[-lenDict:] == '.__dict__':
evalStr = evalStr[:-lenDict]
# TODO: check that this is still the object we originally pointed to
yield self._getContainerByEval(evalStr, curObj=curObj)
def getEvalStrGen(self, getInstance=False):
str = ''
prevIndirection = None
curIndirection = None
nextIndirection = None
# make sure the indirections don't go away on us
indirections = self._indirections
for indirection in indirections:
indirection.acquire()
for i in xrange(len(indirections)):
yield None
if i > 0:
prevIndirection = indirections[i-1]
else:
prevIndirection = None
curIndirection = indirections[i]
if i < len(indirections)-1:
nextIndirection = indirections[i+1]
else:
nextIndirection = None
str += curIndirection.getString(prevIndirection=prevIndirection,
nextIndirection=nextIndirection)
if getInstance:
lenDict = len('.__dict__')
if str[-lenDict:] == '.__dict__':
str = str[:-lenDict]
for indirection in indirections:
yield None
indirection.release()
yield str
def getFinalIndirectionStr(self):
prevIndirection = None
if len(self._indirections) > 1:
prevIndirection = self._indirections[-2]
return self._indirections[-1].getString(prevIndirection=prevIndirection)
def __repr__(self):
for result in self.getEvalStrGen():
pass
return result
class FindContainers(Job):
"""
Explore the Python graph, looking for objects that support __len__()
"""
def __init__(self, name, leakDetector):
Job.__init__(self, name)
self._leakDetector = leakDetector
self._id2ref = self._leakDetector._id2ref
# these hold objects that we should start traversals from often and not-as-often,
# respectively
self._id2baseStartRef = {}
self._id2discoveredStartRef = {}
# these are working copies so that our iterations aren't disturbed by changes to the
# definitive ref sets
self._baseStartRefWorkingList = ScratchPad(refGen=nullGen(),
source=self._id2baseStartRef)
self._discoveredStartRefWorkingList = ScratchPad(refGen=nullGen(),
source=self._id2discoveredStartRef)
self.notify = self._leakDetector.notify
ContainerLeakDetector.addPrivateObj(self.__dict__)
# set up the base containers, the ones that hold most objects
ref = ObjectRef(Indirection(evalStr='__builtin__.__dict__'), id(__builtin__.__dict__))
self._id2baseStartRef[id(__builtin__.__dict__)] = ref
# container for objects that want to make sure they are found by
# the object exploration algorithm, including objects that exist
# just to measure things such as C++ memory usage, scene graph size,
# framerate, etc. See LeakDetectors.py
if not hasattr(__builtin__, "leakDetectors"):
__builtin__.leakDetectors = {}
ref = ObjectRef(Indirection(evalStr='leakDetectors'), id(leakDetectors))
self._id2baseStartRef[id(leakDetectors)] = ref
for i in self._addContainerGen(__builtin__.__dict__, ref):
pass
try:
base
except:
pass
else:
ref = ObjectRef(Indirection(evalStr='base.__dict__'), id(base.__dict__))
self._id2baseStartRef[id(base.__dict__)] = ref
for i in self._addContainerGen(base.__dict__, ref):
pass
try:
simbase
except:
pass
else:
ref = ObjectRef(Indirection(evalStr='simbase.__dict__'), id(simbase.__dict__))
self._id2baseStartRef[id(simbase.__dict__)] = ref
for i in self._addContainerGen(simbase.__dict__, ref):
pass
def destroy(self):
ContainerLeakDetector.removePrivateObj(self.__dict__)
Job.destroy(self)
def getPriority(self):
return Job.Priorities.Low
@staticmethod
def getStartObjAffinity(startObj):
# how good of a starting object is this object for traversing the object graph?
try:
return len(startObj)
except:
return 1
def _isDeadEnd(self, obj, objName=None):
if type(obj) in (types.BooleanType, types.BuiltinFunctionType,
types.BuiltinMethodType, types.ComplexType,
types.FloatType, types.IntType, types.LongType,
types.NoneType, types.NotImplementedType,
types.TypeType, types.CodeType, types.FunctionType,
types.StringType, types.UnicodeType,
types.TupleType):
return True
# if it's an internal object, ignore it
if id(obj) in ContainerLeakDetector.PrivateIds:
return True
# prevent crashes in objects that define __cmp__ and don't handle strings
if type(objName) == types.StringType and objName in ('im_self', 'im_class'):
return True
try:
className = obj.__class__.__name__
except:
pass
else:
# prevent infinite recursion in built-in containers related to methods
if className == 'method-wrapper':
return True
return False
def _hasLength(self, obj):
return hasattr(obj, '__len__')
def _addContainerGen(self, cont, objRef):
contId = id(cont)
# if this container is new, or the objRef repr is shorter than what we already have,
# put it in the table
if contId in self._id2ref:
for existingRepr in self._id2ref[contId].getEvalStrGen():
yield None
for newRepr in objRef.getEvalStrGen():
yield None
if contId not in self._id2ref or len(newRepr) < len(existingRepr):
if contId in self._id2ref:
self._leakDetector.removeContainerById(contId)
self._id2ref[contId] = objRef
def _addDiscoveredStartRef(self, obj, ref):
# we've discovered an object that can be used to start an object graph traversal
objId = id(obj)
if objId in self._id2discoveredStartRef:
existingRef = self._id2discoveredStartRef[objId]
if type(existingRef) not in (types.IntType, types.LongType):
if (existingRef.getNumIndirections() >=
ref.getNumIndirections()):
# the ref that we already have is more concise than the new ref
return
if objId in self._id2ref:
if (self._id2ref[objId].getNumIndirections() >=
ref.getNumIndirections()):
# the ref that we already have is more concise than the new ref
return
storedItem = ref
# if we already are storing a reference to this object, don't store a second reference
if objId in self._id2ref:
storedItem = objId
self._id2discoveredStartRef[objId] = storedItem
def run(self):
try:
# this yields a different set of start refs every time we start a new traversal
# force creation of a new workingListSelector inside the while loop right off the bat
workingListSelector = nullGen()
# this holds the current step of the current traversal
curObjRef = None
while True:
# yield up here instead of at the end, since we skip back to the
# top of the while loop from various points
yield None
#import pdb;pdb.set_trace()
if curObjRef is None:
# choose an object to start a traversal from
try:
startRefWorkingList = workingListSelector.next()
except StopIteration:
# do relative # of traversals on each set based on how many refs it contains
baseLen = len(self._baseStartRefWorkingList.source)
discLen = len(self._discoveredStartRefWorkingList.source)
minLen = float(max(1, min(baseLen, discLen)))
# this will cut down the traversals of the larger set by 2/3
minLen *= 3.
workingListSelector = flywheel([self._baseStartRefWorkingList, self._discoveredStartRefWorkingList],
[baseLen/minLen, discLen/minLen])
yield None
continue
# grab the next start ref from this sequence and see if it's still valid
while True:
yield None
try:
curObjRef = startRefWorkingList.refGen.next()
break
except StopIteration:
# we've run out of refs, grab a new set
if len(startRefWorkingList.source) == 0:
# ref set is empty, choose another
break
# make a generator that yields containers a # of times that is
# proportional to their length
for fw in makeFlywheelGen(
startRefWorkingList.source.values(),
countFunc=lambda x: self.getStartObjAffinity(x),
scale=.05):
yield None
startRefWorkingList.refGen = fw
if curObjRef is None:
# this ref set is empty, choose another
# the base set should never be empty (__builtin__ etc.)
continue
# do we need to go look up the object in _id2ref? sometimes we do that
# to avoid storing multiple redundant refs to a single item
if type(curObjRef) in (types.IntType, types.LongType):
startId = curObjRef
curObjRef = None
try:
for containerRef in self._leakDetector.getContainerByIdGen(startId):
yield None
except:
# ref is invalid
self.notify.debug('invalid startRef, stored as id %s' % startId)
self._leakDetector.removeContainerById(startId)
continue
curObjRef = containerRef
try:
for curObj in curObjRef.getContainerGen():
yield None
except:
self.notify.debug('lost current container, ref.getContainerGen() failed')
# that container is gone, try again
curObjRef = None
continue
self.notify.debug('--> %s' % curObjRef)
#import pdb;pdb.set_trace()
# store a copy of the current objRef
parentObjRef = curObjRef
# if we hit a dead end, start over from another container
curObjRef = None
if hasattr(curObj, '__dict__'):
child = curObj.__dict__
hasLength = self._hasLength(child)
notDeadEnd = not self._isDeadEnd(child)
if hasLength or notDeadEnd:
# prevent cycles in the references (i.e. base.loader.base)
for goesThrough in parentObjRef.goesThroughGen(child):
# don't yield, container might lose this element
pass
if not goesThrough:
objRef = ObjectRef(Indirection(evalStr='.__dict__'),
id(child), parentObjRef)
yield None
if hasLength:
for i in self._addContainerGen(child, objRef):
yield None
if notDeadEnd:
self._addDiscoveredStartRef(child, objRef)
curObjRef = objRef
continue
if type(curObj) is types.DictType:
key = None
attr = None
keys = curObj.keys()
# we will continue traversing the object graph via one key of the dict,
# choose it at random without taking a big chunk of CPU time
numKeysLeft = len(keys) + 1
for key in keys:
yield None
numKeysLeft -= 1
try:
attr = curObj[key]
except KeyError, e:
# this is OK because we are yielding during the iteration
self.notify.debug('could not index into %s with key %s' % (
parentObjRef, safeRepr(key)))
continue
hasLength = self._hasLength(attr)
notDeadEnd = False
# if we haven't picked the next ref, check if this one is a candidate
if curObjRef is None:
notDeadEnd = not self._isDeadEnd(attr, key)
if hasLength or notDeadEnd:
# prevent cycles in the references (i.e. base.loader.base)
for goesThrough in parentObjRef.goesThroughGen(curObj[key]):
# don't yield, container might lose this element
pass
if not goesThrough:
if curObj is __builtin__.__dict__:
objRef = ObjectRef(Indirection(evalStr='%s' % key),
id(curObj[key]))
else:
objRef = ObjectRef(Indirection(dictKey=key),
id(curObj[key]), parentObjRef)
yield None
if hasLength:
for i in self._addContainerGen(attr, objRef):
yield None
if notDeadEnd:
self._addDiscoveredStartRef(attr, objRef)
if curObjRef is None and random.randrange(numKeysLeft) == 0:
curObjRef = objRef
del key
del attr
continue
try:
childNames = dir(curObj)
except:
pass
else:
try:
index = -1
attrs = []
while 1:
yield None
try:
attr = itr.next()
except:
# some custom classes don't do well when iterated
attr = None
break
attrs.append(attr)
# we will continue traversing the object graph via one attr,
# choose it at random without taking a big chunk of CPU time
numAttrsLeft = len(attrs) + 1
for attr in attrs:
yield None
index += 1
numAttrsLeft -= 1
hasLength = self._hasLength(attr)
notDeadEnd = False
if curObjRef is None:
notDeadEnd = not self._isDeadEnd(attr)
if hasLength or notDeadEnd:
# prevent cycles in the references (i.e. base.loader.base)
for goesThrough in parentObjRef.goesThrough(curObj[index]):
# don't yield, container might lose this element
pass
if not goesThrough:
objRef = ObjectRef(Indirection(evalStr='[%s]' % index),
id(curObj[index]), parentObjRef)
yield None
if hasLength:
for i in self._addContainerGen(attr, objRef):
yield None
if notDeadEnd:
self._addDiscoveredStartRef(attr, objRef)
if curObjRef is None and random.randrange(numAttrsLeft) == 0:
curObjRef = objRef
del attr
except StopIteration, e:
pass
del itr
continue
except Exception, e:
print 'FindContainers job caught exception: %s' % e
if __dev__:
raise
yield Job.Done
class CheckContainers(Job):
"""
Job to check container sizes and find potential leaks; sub-job of ContainerLeakDetector
"""
ReprItems = 5
def __init__(self, name, leakDetector, index):
Job.__init__(self, name)
self._leakDetector = leakDetector
self.notify = self._leakDetector.notify
self._index = index
ContainerLeakDetector.addPrivateObj(self.__dict__)
def destroy(self):
ContainerLeakDetector.removePrivateObj(self.__dict__)
Job.destroy(self)
def getPriority(self):
return Job.Priorities.Normal
def run(self):
try:
self._leakDetector._index2containerId2len[self._index] = {}
ids = self._leakDetector.getContainerIds()
# record the current len of each container
for objId in ids:
yield None
try:
for result in self._leakDetector.getContainerByIdGen(objId):
yield None
container = result
except Exception, e:
# this container no longer exists
if self.notify.getDebug():
for contName in self._leakDetector.getContainerNameByIdGen(objId):
yield None
self.notify.debug(
'%s no longer exists; caught exception in getContainerById (%s)' % (
contName, e))
self._leakDetector.removeContainerById(objId)
continue
if container is None:
# this container no longer exists
if self.notify.getDebug():
for contName in self._leakDetector.getContainerNameByIdGen(objId):
yield None
self.notify.debug('%s no longer exists; getContainerById returned None' %
contName)
self._leakDetector.removeContainerById(objId)
continue
try:
cLen = len(container)
except Exception, e:
# this container no longer exists
if self.notify.getDebug():
for contName in self._leakDetector.getContainerNameByIdGen(objId):
yield None
self.notify.debug(
'%s is no longer a container, it is now %s (%s)' %
(contName, safeRepr(container), e))
self._leakDetector.removeContainerById(objId)
continue
self._leakDetector._index2containerId2len[self._index][objId] = cLen
# compare the current len of each container to past lens
if self._index > 0:
idx2id2len = self._leakDetector._index2containerId2len
for objId in idx2id2len[self._index]:
yield None
if objId in idx2id2len[self._index-1]:
diff = idx2id2len[self._index][objId] - idx2id2len[self._index-1][objId]
"""
# this check is too spammy
if diff > 20:
if diff > idx2id2len[self._index-1][objId]:
minutes = (self._leakDetector._index2delay[self._index] -
self._leakDetector._index2delay[self._index-1]) / 60.
name = self._leakDetector.getContainerNameById(objId)
if idx2id2len[self._index-1][objId] != 0:
percent = 100. * (float(diff) / float(idx2id2len[self._index-1][objId]))
try:
for container in self._leakDetector.getContainerByIdGen(objId):
yield None
except:
# TODO
self.notify.debug('caught exception in getContainerByIdGen (1)')
else:
self.notify.warning(
'%s (%s) grew %.2f%% in %.2f minutes (%s items at last measurement, current contents: %s)' % (
name, itype(container), percent, minutes, idx2id2len[self._index][objId],
fastRepr(container, maxLen=CheckContainers.ReprItems)))
yield None
"""
if (self._index > 2 and
objId in idx2id2len[self._index-2] and
objId in idx2id2len[self._index-3]):
diff2 = idx2id2len[self._index-1][objId] - idx2id2len[self._index-2][objId]
diff3 = idx2id2len[self._index-2][objId] - idx2id2len[self._index-3][objId]
if self._index <= 4:
if diff > 0 and diff2 > 0 and diff3 > 0:
name = self._leakDetector.getContainerNameById(objId)
try:
for container in self._leakDetector.getContainerByIdGen(objId):
yield None
except:
# TODO
self.notify.debug('caught exception in getContainerByIdGen (2)')
else:
msg = ('%s (%s) consistently increased in size over the last '
'3 periods (%s items at last measurement, current contents: %s)' %
(name, itype(container), idx2id2len[self._index][objId],
fastRepr(container, maxLen=CheckContainers.ReprItems)))
self.notify.warning(msg)
yield None
elif (objId in idx2id2len[self._index-4] and
objId in idx2id2len[self._index-5]):
# if size has consistently increased over the last 5 checks,
# send out a warning
diff4 = idx2id2len[self._index-3][objId] - idx2id2len[self._index-4][objId]
diff5 = idx2id2len[self._index-4][objId] - idx2id2len[self._index-5][objId]
if diff > 0 and diff2 > 0 and diff3 > 0 and diff4 > 0 and diff5 > 0:
name = self._leakDetector.getContainerNameById(objId)
try:
for container in self._leakDetector.getContainerByIdGen(objId):
yield None
except:
# TODO
self.notify.debug('caught exception in getContainerByIdGen (3)')
else:
msg = ('leak detected: %s (%s) consistently increased in size over the last '
'5 periods (%s items at last measurement, current contents: %s)' %
(name, itype(container), idx2id2len[self._index][objId],
fastRepr(container, maxLen=CheckContainers.ReprItems)))
self.notify.warning(msg)
yield None
messenger.send(self._leakDetector.getLeakEvent(), [container, name])
if config.GetBool('pdb-on-leak-detect', 0):
import pdb;pdb.set_trace()
pass
except Exception, e:
print 'CheckContainers job caught exception: %s' % e
if __dev__:
raise
yield Job.Done
class FPTObjsOfType(Job):
def __init__(self, name, leakDetector, otn, doneCallback=None):
Job.__init__(self, name)
self._leakDetector = leakDetector
self.notify = self._leakDetector.notify
self._otn = otn
self._doneCallback = doneCallback
self._ldde = self._leakDetector._getDestroyEvent()
self.accept(self._ldde, self._handleLDDestroy)
ContainerLeakDetector.addPrivateObj(self.__dict__)
def destroy(self):
self.ignore(self._ldde)
self._leakDetector = None
self._doneCallback = None
ContainerLeakDetector.removePrivateObj(self.__dict__)
Job.destroy(self)
def _handleLDDestroy(self):
self.destroy()
def getPriority(self):
return Job.Priorities.High
def run(self):
ids = self._leakDetector.getContainerIds()
try:
for id in ids:
getInstance = (self._otn.lower() not in 'dict')
yield None
try:
for container in self._leakDetector.getContainerByIdGen(
id, getInstance=getInstance):
yield None
except:
pass
else:
if hasattr(container, '__class__'):
cName = container.__class__.__name__
else:
cName = container.__name__
if (self._otn.lower() in cName.lower()):
try:
for ptc in self._leakDetector.getContainerNameByIdGen(
id, getInstance=getInstance):
yield None
except:
pass
else:
print 'GPTC(' + self._otn + '):' + self.getJobName() + ': ' + ptc
except Exception, e:
print 'FPTObjsOfType job caught exception: %s' % e
if __dev__:
raise
yield Job.Done
def finished(self):
if self._doneCallback:
self._doneCallback(self)
class FPTObjsNamed(Job):
def __init__(self, name, leakDetector, on, doneCallback=None):
Job.__init__(self, name)
self._leakDetector = leakDetector
self.notify = self._leakDetector.notify
self._on = on
self._doneCallback = doneCallback
self._ldde = self._leakDetector._getDestroyEvent()
self.accept(self._ldde, self._handleLDDestroy)
ContainerLeakDetector.addPrivateObj(self.__dict__)
def destroy(self):
self.ignore(self._ldde)
self._leakDetector = None
self._doneCallback = None
ContainerLeakDetector.removePrivateObj(self.__dict__)
Job.destroy(self)
def _handleLDDestroy(self):
self.destroy()
def getPriority(self):
return Job.Priorities.High
def run(self):
ids = self._leakDetector.getContainerIds()
try:
for id in ids:
yield None
try:
for container in self._leakDetector.getContainerByIdGen(id):
yield None
except:
pass
else:
name = self._leakDetector._id2ref[id].getFinalIndirectionStr()
if self._on.lower() in name.lower():
try:
for ptc in self._leakDetector.getContainerNameByIdGen(id):
yield None
except:
pass
else:
print 'GPTCN(' + self._on + '):' + self.getJobName() + ': ' + ptc
except Exception, e:
print 'FPTObjsNamed job caught exception: %s' % e
if __dev__:
raise
yield Job.Done
def finished(self):
if self._doneCallback:
self._doneCallback(self)
class PruneObjectRefs(Job):
"""
Job to destroy any container refs that are no longer valid.
Checks validity by asking for each container
"""
def __init__(self, name, leakDetector):
Job.__init__(self, name)
self._leakDetector = leakDetector
self.notify = self._leakDetector.notify
ContainerLeakDetector.addPrivateObj(self.__dict__)
def destroy(self):
ContainerLeakDetector.removePrivateObj(self.__dict__)
Job.destroy(self)
def getPriority(self):
return Job.Priorities.Normal
def run(self):
try:
ids = self._leakDetector.getContainerIds()
for id in ids:
yield None
try:
for container in self._leakDetector.getContainerByIdGen(id):
yield None
except:
# reference is invalid, remove it
self._leakDetector.removeContainerById(id)
_id2baseStartRef = self._leakDetector._findContainersJob._id2baseStartRef
ids = _id2baseStartRef.keys()
for id in ids:
yield None
try:
for container in _id2baseStartRef[id].getContainerGen():
yield None
except:
# reference is invalid, remove it
del _id2baseStartRef[id]
_id2discoveredStartRef = self._leakDetector._findContainersJob._id2discoveredStartRef
ids = _id2discoveredStartRef.keys()
for id in ids:
yield None
try:
for container in _id2discoveredStartRef[id].getContainerGen():
yield None
except:
# reference is invalid, remove it
del _id2discoveredStartRef[id]
except Exception, e:
print 'PruneObjectRefs job caught exception: %s' % e
if __dev__:
raise
yield Job.Done
class ContainerLeakDetector(Job):
"""
Low-priority Python object-graph walker that looks for leaking containers.
To reduce memory usage, this does a random walk of the Python objects to
discover containers rather than keep a set of all visited objects; it may
visit the same object many times but eventually it will discover every object.
Checks container sizes at ever-increasing intervals.
"""
notify = directNotify.newCategory("ContainerLeakDetector")
# set of containers that should not be examined
PrivateIds = set()
def __init__(self, name, firstCheckDelay = None):
Job.__init__(self, name)
self._serialNum = serialNum()
self._findContainersJob = None
self._checkContainersJob = None
self._pruneContainersJob = None
if firstCheckDelay is None:
firstCheckDelay = 60. * 15.
# divide by two, since the first check just takes length measurements and
# doesn't check for leaks
self._nextCheckDelay = firstCheckDelay/2.
self._checkDelayScale = config.GetFloat('leak-detector-check-delay-scale', 1.5)
self._pruneTaskPeriod = config.GetFloat('leak-detector-prune-period', 60. * 30.)
# main dict of id(container)->containerRef
self._id2ref = {}
# storage for results of check-container job
self._index2containerId2len = {}
self._index2delay = {}
if config.GetBool('leak-container', 0):
_createContainerLeak()
if config.GetBool('leak-tasks', 0):
_createTaskLeak()
# don't check our own tables for leaks
ContainerLeakDetector.addPrivateObj(ContainerLeakDetector.PrivateIds)
ContainerLeakDetector.addPrivateObj(self.__dict__)
self.setPriority(Job.Priorities.Min)
jobMgr.add(self)
def destroy(self):
messenger.send(self._getDestroyEvent())
self.ignoreAll()
if self._pruneContainersJob is not None:
jobMgr.remove(self._pruneContainersJob)
self._pruneContainersJob = None
if self._checkContainersJob is not None:
jobMgr.remove(self._checkContainersJob)
self._checkContainersJob = None
jobMgr.remove(self._findContainersJob)
self._findContainersJob = None
del self._id2ref
del self._index2containerId2len
del self._index2delay
def _getDestroyEvent(self):
# sent when leak detector is about to be destroyed
return 'cldDestroy-%s' % self._serialNum
def getLeakEvent(self):
# sent when a leak is detected
# passes description string as argument
return 'containerLeakDetected-%s' % self._serialNum
@classmethod
def addPrivateObj(cls, obj):
cls.PrivateIds.add(id(obj))
@classmethod
def removePrivateObj(cls, obj):
cls.PrivateIds.remove(id(obj))
def _getCheckTaskName(self):
return 'checkForLeakingContainers-%s' % self._serialNum
def _getPruneTaskName(self):
return 'pruneLeakingContainerRefs-%s' % self._serialNum
def getContainerIds(self):
return self._id2ref.keys()
def getContainerByIdGen(self, id, **kwArgs):
# return a generator to look up a container
return self._id2ref[id].getContainerGen(**kwArgs)
def getContainerById(self, id):
for result in self._id2ref[id].getContainerGen():
pass
return result
def getContainerNameByIdGen(self, id, **kwArgs):
return self._id2ref[id].getEvalStrGen(**kwArgs)
def getContainerNameById(self, id):
if id in self._id2ref:
return repr(self._id2ref[id])
return '<unknown container>'
def removeContainerById(self, id):
if id in self._id2ref:
self._id2ref[id].destroy()
del self._id2ref[id]
def run(self):
# start looking for containers
self._findContainersJob = FindContainers(
'%s-findContainers' % self.getJobName(), self)
jobMgr.add(self._findContainersJob)
self._scheduleNextLeakCheck()
self._scheduleNextPruning()
while True:
yield Job.Sleep
def getPathsToContainers(self, name, ot, doneCallback=None):
j = FPTObjsOfType(name, self, ot, doneCallback)
jobMgr.add(j)
return j
def getPathsToContainersNamed(self, name, on, doneCallback=None):
j = FPTObjsNamed(name, self, on, doneCallback)
jobMgr.add(j)
return j
def _scheduleNextLeakCheck(self):
taskMgr.doMethodLater(self._nextCheckDelay, self._checkForLeaks,
self._getCheckTaskName())
# delay between checks
# fib: 1 1 2 3 5 8 13 21 34 55 89
# * 2.: 1 2 4 8 16 32 64 128 256 512 1024
# * 1.5: 1 1.5 2.3 3.4 5.1 7.6 11.4 17.1 25.6 38.4 57.7
#
# delay from job start
# fib: 1 2 4 7 12 20 33 54 88 143 232
# * 2.: 1 3 7 15 31 63 127 255 511 1023 2047
# * 1.5: 1 2.5 4.75 8.1 13.2 20.8 32.2 49.3 74.9 113.3 171
self._nextCheckDelay = self._nextCheckDelay * self._checkDelayScale
def _checkForLeaks(self, task=None):
self._index2delay[len(self._index2containerId2len)] = self._nextCheckDelay
self._checkContainersJob = CheckContainers(
'%s-checkForLeaks' % self.getJobName(), self, len(self._index2containerId2len))
self.acceptOnce(self._checkContainersJob.getFinishedEvent(),
self._scheduleNextLeakCheck)
jobMgr.add(self._checkContainersJob)
return task.done
def _scheduleNextPruning(self):
taskMgr.doMethodLater(self._pruneTaskPeriod, self._pruneObjectRefs,
self._getPruneTaskName())
def _pruneObjectRefs(self, task=None):
self._pruneContainersJob = PruneObjectRefs(
'%s-pruneObjectRefs' % self.getJobName(), self)
self.acceptOnce(self._pruneContainersJob.getFinishedEvent(),
self._scheduleNextPruning)
jobMgr.add(self._pruneContainersJob)
return task.done
| [] |
mzazakeith/flask-blog | virtual/lib/python3.6/site-packages/sqlalchemy/sql/default_comparator.py | 2833404cc5e96ffdbfb767f35b9caf2bdcce7997 | # sql/default_comparator.py
# Copyright (C) 2005-2018 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Default implementation of SQL comparison operations.
"""
from .. import exc, util
from . import type_api
from . import operators
from .elements import BindParameter, True_, False_, BinaryExpression, \
Null, _const_expr, _clause_element_as_expr, \
ClauseList, ColumnElement, TextClause, UnaryExpression, \
collate, _is_literal, _literal_as_text, ClauseElement, and_, or_, \
Slice, Visitable, _literal_as_binds, CollectionAggregate
from .selectable import SelectBase, Alias, Selectable, ScalarSelect
def _boolean_compare(expr, op, obj, negate=None, reverse=False,
_python_is_types=(util.NoneType, bool),
result_type = None,
**kwargs):
if result_type is None:
result_type = type_api.BOOLEANTYPE
if isinstance(obj, _python_is_types + (Null, True_, False_)):
# allow x ==/!= True/False to be treated as a literal.
# this comes out to "== / != true/false" or "1/0" if those
# constants aren't supported and works on all platforms
if op in (operators.eq, operators.ne) and \
isinstance(obj, (bool, True_, False_)):
return BinaryExpression(expr,
_literal_as_text(obj),
op,
type_=result_type,
negate=negate, modifiers=kwargs)
elif op in (operators.is_distinct_from, operators.isnot_distinct_from):
return BinaryExpression(expr,
_literal_as_text(obj),
op,
type_=result_type,
negate=negate, modifiers=kwargs)
else:
# all other None/True/False uses IS, IS NOT
if op in (operators.eq, operators.is_):
return BinaryExpression(expr, _const_expr(obj),
operators.is_,
negate=operators.isnot,
type_=result_type
)
elif op in (operators.ne, operators.isnot):
return BinaryExpression(expr, _const_expr(obj),
operators.isnot,
negate=operators.is_,
type_=result_type
)
else:
raise exc.ArgumentError(
"Only '=', '!=', 'is_()', 'isnot()', "
"'is_distinct_from()', 'isnot_distinct_from()' "
"operators can be used with None/True/False")
else:
obj = _check_literal(expr, op, obj)
if reverse:
return BinaryExpression(obj,
expr,
op,
type_=result_type,
negate=negate, modifiers=kwargs)
else:
return BinaryExpression(expr,
obj,
op,
type_=result_type,
negate=negate, modifiers=kwargs)
def _custom_op_operate(expr, op, obj, reverse=False, result_type=None,
**kw):
if result_type is None:
if op.return_type:
result_type = op.return_type
elif op.is_comparison:
result_type = type_api.BOOLEANTYPE
return _binary_operate(
expr, op, obj, reverse=reverse, result_type=result_type, **kw)
def _binary_operate(expr, op, obj, reverse=False, result_type=None,
**kw):
obj = _check_literal(expr, op, obj)
if reverse:
left, right = obj, expr
else:
left, right = expr, obj
if result_type is None:
op, result_type = left.comparator._adapt_expression(
op, right.comparator)
return BinaryExpression(
left, right, op, type_=result_type, modifiers=kw)
def _conjunction_operate(expr, op, other, **kw):
if op is operators.and_:
return and_(expr, other)
elif op is operators.or_:
return or_(expr, other)
else:
raise NotImplementedError()
def _scalar(expr, op, fn, **kw):
return fn(expr)
def _in_impl(expr, op, seq_or_selectable, negate_op, **kw):
seq_or_selectable = _clause_element_as_expr(seq_or_selectable)
if isinstance(seq_or_selectable, ScalarSelect):
return _boolean_compare(expr, op, seq_or_selectable,
negate=negate_op)
elif isinstance(seq_or_selectable, SelectBase):
# TODO: if we ever want to support (x, y, z) IN (select x,
# y, z from table), we would need a multi-column version of
# as_scalar() to produce a multi- column selectable that
# does not export itself as a FROM clause
return _boolean_compare(
expr, op, seq_or_selectable.as_scalar(),
negate=negate_op, **kw)
elif isinstance(seq_or_selectable, (Selectable, TextClause)):
return _boolean_compare(expr, op, seq_or_selectable,
negate=negate_op, **kw)
elif isinstance(seq_or_selectable, ClauseElement):
if isinstance(seq_or_selectable, BindParameter) and \
seq_or_selectable.expanding:
return _boolean_compare(
expr, op,
seq_or_selectable,
negate=negate_op)
else:
raise exc.InvalidRequestError(
'in_() accepts'
' either a list of expressions, '
'a selectable, or an "expanding" bound parameter: %r'
% seq_or_selectable)
# Handle non selectable arguments as sequences
args = []
for o in seq_or_selectable:
if not _is_literal(o):
if not isinstance(o, operators.ColumnOperators):
raise exc.InvalidRequestError(
'in_() accepts'
' either a list of expressions, '
'a selectable, or an "expanding" bound parameter: %r' % o)
elif o is None:
o = Null()
else:
o = expr._bind_param(op, o)
args.append(o)
if len(args) == 0:
op, negate_op = (
operators.empty_in_op,
operators.empty_notin_op) if op is operators.in_op \
else (
operators.empty_notin_op,
operators.empty_in_op)
return _boolean_compare(expr, op,
ClauseList(*args).self_group(against=op),
negate=negate_op)
def _getitem_impl(expr, op, other, **kw):
if isinstance(expr.type, type_api.INDEXABLE):
other = _check_literal(expr, op, other)
return _binary_operate(expr, op, other, **kw)
else:
_unsupported_impl(expr, op, other, **kw)
def _unsupported_impl(expr, op, *arg, **kw):
raise NotImplementedError("Operator '%s' is not supported on "
"this expression" % op.__name__)
def _inv_impl(expr, op, **kw):
"""See :meth:`.ColumnOperators.__inv__`."""
if hasattr(expr, 'negation_clause'):
return expr.negation_clause
else:
return expr._negate()
def _neg_impl(expr, op, **kw):
"""See :meth:`.ColumnOperators.__neg__`."""
return UnaryExpression(expr, operator=operators.neg, type_=expr.type)
def _match_impl(expr, op, other, **kw):
"""See :meth:`.ColumnOperators.match`."""
return _boolean_compare(
expr, operators.match_op,
_check_literal(
expr, operators.match_op, other),
result_type=type_api.MATCHTYPE,
negate=operators.notmatch_op
if op is operators.match_op else operators.match_op,
**kw
)
def _distinct_impl(expr, op, **kw):
"""See :meth:`.ColumnOperators.distinct`."""
return UnaryExpression(expr, operator=operators.distinct_op,
type_=expr.type)
def _between_impl(expr, op, cleft, cright, **kw):
"""See :meth:`.ColumnOperators.between`."""
return BinaryExpression(
expr,
ClauseList(
_check_literal(expr, operators.and_, cleft),
_check_literal(expr, operators.and_, cright),
operator=operators.and_,
group=False, group_contents=False),
op,
negate=operators.notbetween_op
if op is operators.between_op
else operators.between_op,
modifiers=kw)
def _collate_impl(expr, op, other, **kw):
return collate(expr, other)
# a mapping of operators with the method they use, along with
# their negated operator for comparison operators
operator_lookup = {
"and_": (_conjunction_operate,),
"or_": (_conjunction_operate,),
"inv": (_inv_impl,),
"add": (_binary_operate,),
"mul": (_binary_operate,),
"sub": (_binary_operate,),
"div": (_binary_operate,),
"mod": (_binary_operate,),
"truediv": (_binary_operate,),
"custom_op": (_custom_op_operate,),
"json_path_getitem_op": (_binary_operate, ),
"json_getitem_op": (_binary_operate, ),
"concat_op": (_binary_operate,),
"any_op": (_scalar, CollectionAggregate._create_any),
"all_op": (_scalar, CollectionAggregate._create_all),
"lt": (_boolean_compare, operators.ge),
"le": (_boolean_compare, operators.gt),
"ne": (_boolean_compare, operators.eq),
"gt": (_boolean_compare, operators.le),
"ge": (_boolean_compare, operators.lt),
"eq": (_boolean_compare, operators.ne),
"is_distinct_from": (_boolean_compare, operators.isnot_distinct_from),
"isnot_distinct_from": (_boolean_compare, operators.is_distinct_from),
"like_op": (_boolean_compare, operators.notlike_op),
"ilike_op": (_boolean_compare, operators.notilike_op),
"notlike_op": (_boolean_compare, operators.like_op),
"notilike_op": (_boolean_compare, operators.ilike_op),
"contains_op": (_boolean_compare, operators.notcontains_op),
"startswith_op": (_boolean_compare, operators.notstartswith_op),
"endswith_op": (_boolean_compare, operators.notendswith_op),
"desc_op": (_scalar, UnaryExpression._create_desc),
"asc_op": (_scalar, UnaryExpression._create_asc),
"nullsfirst_op": (_scalar, UnaryExpression._create_nullsfirst),
"nullslast_op": (_scalar, UnaryExpression._create_nullslast),
"in_op": (_in_impl, operators.notin_op),
"notin_op": (_in_impl, operators.in_op),
"is_": (_boolean_compare, operators.is_),
"isnot": (_boolean_compare, operators.isnot),
"collate": (_collate_impl,),
"match_op": (_match_impl,),
"notmatch_op": (_match_impl,),
"distinct_op": (_distinct_impl,),
"between_op": (_between_impl, ),
"notbetween_op": (_between_impl, ),
"neg": (_neg_impl,),
"getitem": (_getitem_impl,),
"lshift": (_unsupported_impl,),
"rshift": (_unsupported_impl,),
"contains": (_unsupported_impl,),
}
def _check_literal(expr, operator, other, bindparam_type=None):
if isinstance(other, (ColumnElement, TextClause)):
if isinstance(other, BindParameter) and \
other.type._isnull:
other = other._clone()
other.type = expr.type
return other
elif hasattr(other, '__clause_element__'):
other = other.__clause_element__()
elif isinstance(other, type_api.TypeEngine.Comparator):
other = other.expr
if isinstance(other, (SelectBase, Alias)):
return other.as_scalar()
elif not isinstance(other, Visitable):
return expr._bind_param(operator, other, type_=bindparam_type)
else:
return other
| [] |
klharshini/recipe-django-api | recipes/serializers.py | 7ceb00ab26f6e0d19196519ece297d2f4d616a5d | from django.contrib.auth.validators import UnicodeUsernameValidator
from rest_framework import serializers
from django.contrib.auth.models import User
from recipes.models import Recipe, Ingredient, Step
class UserSerializer(serializers.ModelSerializer):
class Meta:
model = User
fields = ("username", "last_name", "first_name", "email")
extra_kwargs = {
'username': {
'validators': [UnicodeUsernameValidator()],
}
}
class IngredientSerializer(serializers.ModelSerializer):
class Meta:
model = Ingredient
fields = ["text"]
class StepSerializer(serializers.ModelSerializer):
class Meta:
model = Step
fields = ["step_text"]
class RecipeSerializer(serializers.ModelSerializer):
ingredients = IngredientSerializer(many=True, required=False)
steps = StepSerializer(many=True, required=False)
user = UserSerializer(required=True)
def create(self, validated_data):
steps_data = validated_data.pop('steps')
ingredients_data = validated_data.pop('ingredients')
user_data = validated_data.pop('user')
username = user_data.pop('username')
user = User.objects.get_by_natural_key(username)
recipe = Recipe.objects.create(user=user, **validated_data)
for steps in steps_data:
Step.objects.create(recipe=recipe, **steps)
for ingredients in ingredients_data:
Ingredient.objects.create(recipe=recipe, **ingredients)
return recipe
class Meta:
model = Recipe
fields = ("name", "user", "steps", "ingredients")
def update(self, instance, validated_data):
steps_data = validated_data.pop('steps')
ingredients_data = validated_data.pop('ingredients')
Step.objects.filter(recipe=instance).delete()
Ingredient.objects.filter(recipe=instance).delete()
for steps in steps_data:
Step.objects.create(recipe=instance, **steps)
for ingredients in ingredients_data:
Ingredient.objects.create(recipe=instance, **ingredients)
return instance
| [((41, 15, 41, 56), 'django.contrib.auth.models.User.objects.get_by_natural_key', 'User.objects.get_by_natural_key', ({(41, 47, 41, 55): 'username'}, {}), '(username)', False, 'from django.contrib.auth.models import User\n'), ((42, 17, 42, 67), 'recipes.models.Recipe.objects.create', 'Recipe.objects.create', (), '', False, 'from recipes.models import Recipe, Ingredient, Step\n'), ((44, 12, 44, 55), 'recipes.models.Step.objects.create', 'Step.objects.create', (), '', False, 'from recipes.models import Recipe, Ingredient, Step\n'), ((47, 12, 47, 67), 'recipes.models.Ingredient.objects.create', 'Ingredient.objects.create', (), '', False, 'from recipes.models import Recipe, Ingredient, Step\n'), ((60, 12, 60, 57), 'recipes.models.Step.objects.create', 'Step.objects.create', (), '', False, 'from recipes.models import Recipe, Ingredient, Step\n'), ((63, 12, 63, 69), 'recipes.models.Ingredient.objects.create', 'Ingredient.objects.create', (), '', False, 'from recipes.models import Recipe, Ingredient, Step\n'), ((57, 8, 57, 44), 'recipes.models.Step.objects.filter', 'Step.objects.filter', (), '', False, 'from recipes.models import Recipe, Ingredient, Step\n'), ((58, 8, 58, 50), 'recipes.models.Ingredient.objects.filter', 'Ingredient.objects.filter', (), '', False, 'from recipes.models import Recipe, Ingredient, Step\n'), ((14, 31, 14, 57), 'django.contrib.auth.validators.UnicodeUsernameValidator', 'UnicodeUsernameValidator', ({}, {}), '()', False, 'from django.contrib.auth.validators import UnicodeUsernameValidator\n')] |
jcwon0/BlurHPE | tests/test_model/test_temporal_regression_head.py | c97a57e92a8a7f171b0403aee640222a32513562 | import numpy as np
import pytest
import torch
from mmpose.models import TemporalRegressionHead
def test_temporal_regression_head():
"""Test temporal head."""
head = TemporalRegressionHead(
in_channels=1024,
num_joints=17,
loss_keypoint=dict(type='MPJPELoss', use_target_weight=True))
head.init_weights()
with pytest.raises(AssertionError):
# ndim of the input tensor should be 3
input_shape = (1, 1024, 1, 1)
inputs = _demo_inputs(input_shape)
_ = head(inputs)
with pytest.raises(AssertionError):
# size of the last dim should be 1
input_shape = (1, 1024, 3)
inputs = _demo_inputs(input_shape)
_ = head(inputs)
input_shape = (1, 1024, 1)
inputs = _demo_inputs(input_shape)
out = head(inputs)
assert out.shape == torch.Size([1, 17, 3])
loss = head.get_loss(out, out, torch.ones_like(out))
assert torch.allclose(loss['reg_loss'], torch.tensor(0.))
_ = head.inference_model(inputs)
_ = head.inference_model(inputs, [(0, 1), (2, 3)])
acc = head.get_accuracy(out, out, torch.ones_like(out))
assert acc['mpjpe'] == 0.
np.testing.assert_almost_equal(acc['p_mpjpe'], 0.)
def _demo_inputs(input_shape=(1, 1024, 1)):
"""Create a superset of inputs needed to run head.
Args:
input_shape (tuple): input batch dimensions.
Default: (1, 1024, 1).
Returns:
Random input tensor with the size of input_shape.
"""
inps = np.random.random(input_shape)
inps = torch.FloatTensor(inps)
return inps
| [((42, 4, 42, 54), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', ({(42, 35, 42, 49): "acc['p_mpjpe']", (42, 51, 42, 53): '(0.0)'}, {}), "(acc['p_mpjpe'], 0.0)", True, 'import numpy as np\n'), ((54, 11, 54, 40), 'numpy.random.random', 'np.random.random', ({(54, 28, 54, 39): 'input_shape'}, {}), '(input_shape)', True, 'import numpy as np\n'), ((55, 11, 55, 34), 'torch.FloatTensor', 'torch.FloatTensor', ({(55, 29, 55, 33): 'inps'}, {}), '(inps)', False, 'import torch\n'), ((17, 9, 17, 38), 'pytest.raises', 'pytest.raises', ({(17, 23, 17, 37): 'AssertionError'}, {}), '(AssertionError)', False, 'import pytest\n'), ((23, 9, 23, 38), 'pytest.raises', 'pytest.raises', ({(23, 23, 23, 37): 'AssertionError'}, {}), '(AssertionError)', False, 'import pytest\n'), ((32, 24, 32, 46), 'torch.Size', 'torch.Size', ({(32, 35, 32, 45): '[1, 17, 3]'}, {}), '([1, 17, 3])', False, 'import torch\n'), ((34, 35, 34, 55), 'torch.ones_like', 'torch.ones_like', ({(34, 51, 34, 54): 'out'}, {}), '(out)', False, 'import torch\n'), ((35, 44, 35, 60), 'torch.tensor', 'torch.tensor', ({(35, 57, 35, 59): '(0.0)'}, {}), '(0.0)', False, 'import torch\n'), ((40, 38, 40, 58), 'torch.ones_like', 'torch.ones_like', ({(40, 54, 40, 57): 'out'}, {}), '(out)', False, 'import torch\n')] |
gfhuertac/coding_dojo_python | django_orm/sports_orm/leagues/migrations/0002_auto_20161031_1620.py | 4d17bb63fb2b9669216a0f60326d4a4b9055af7e | # -*- coding: utf-8 -*-
# Generated by Django 1.10.2 on 2016-10-31 23:20
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('leagues', '0001_initial'),
]
operations = [
migrations.RenameField(
model_name='team',
old_name='city',
new_name='location',
),
]
| [((15, 8, 19, 9), 'django.db.migrations.RenameField', 'migrations.RenameField', (), '', False, 'from django.db import migrations\n')] |
Tanych/CodeTracking | 353-Design-Snake-Game/solution.py | 86f1cb98de801f58c39d9a48ce9de12df7303d20 | class SnakeGame(object):
def __init__(self, width,height,food):
"""
Initialize your data structure here.
@param width - screen width
@param height - screen height
@param food - A list of food positions
E.g food = [[1,1], [1,0]] means the first food is positioned at [1,1], the second is at [1,0].
:type width: int
:type height: int
:type food: List[List[int]]
"""
self.width=width
self.height=height
self.food=collections.deque(food)
self.position=collections.deque([(0,0)])
self.moveops={'U':(-1,0),'L':(0,-1),'R':(0,1),'D':(1,0)}
self.score=0
def move(self, direction):
"""
Moves the snake.
@param direction - 'U' = Up, 'L' = Left, 'R' = Right, 'D' = Down
@return The game's score after the move. Return -1 if game over.
Game over when snake crosses the screen boundary or bites its body.
:type direction: str
:rtype: int
"""
if direction not in self.moveops:
return -1
peak,tail=self.position[0],self.position[-1]
self.position.pop()
idxi,idxj=self.moveops[direction]
newi,newj=peak[0]+idxi,peak[1]+idxj
if (newi,newj) in self.position or \
newi<0 or newi>=self.height or \
newj<0 or newj>=self.width:
return -1
self.position.appendleft((newi,newj))
if self.food and [newi,newj]==self.food[0]:
self.food.popleft()
self.position.append(tail)
self.score+=1
return self.score
# Your SnakeGame object will be instantiated and called as such:
# obj = SnakeGame(width, height, food)
# param_1 = obj.move(direction) | [] |
jessebrennan/azul | scripts/register_sam.py | 65970a0947f38fae439a3bf8fd960d351787b7a3 | from itertools import (
chain,
)
import logging
from azul import (
config,
require,
)
from azul.logging import (
configure_script_logging,
)
from azul.terra import (
TDRClient,
TDRSourceName,
)
log = logging.getLogger(__name__)
def main():
configure_script_logging(log)
tdr = TDRClient()
tdr.register_with_sam()
tdr_catalogs = (
catalog.name
for catalog in config.catalogs.values()
if catalog.plugins['repository'] == 'tdr'
)
for source in set(chain(*map(config.tdr_sources, tdr_catalogs))):
source = TDRSourceName.parse(source)
api_project = tdr.lookup_source_project(source)
require(api_project == source.project,
'Actual Google project of TDR source differs from configured '
'one',
api_project, source)
tdr.check_api_access(source)
tdr.check_bigquery_access(source)
if __name__ == '__main__':
main()
| [((18, 6, 18, 33), 'logging.getLogger', 'logging.getLogger', ({(18, 24, 18, 32): '__name__'}, {}), '(__name__)', False, 'import logging\n'), ((22, 4, 22, 33), 'azul.logging.configure_script_logging', 'configure_script_logging', ({(22, 29, 22, 32): 'log'}, {}), '(log)', False, 'from azul.logging import configure_script_logging\n'), ((23, 10, 23, 21), 'azul.terra.TDRClient', 'TDRClient', ({}, {}), '()', False, 'from azul.terra import TDRClient, TDRSourceName\n'), ((32, 17, 32, 44), 'azul.terra.TDRSourceName.parse', 'TDRSourceName.parse', ({(32, 37, 32, 43): 'source'}, {}), '(source)', False, 'from azul.terra import TDRClient, TDRSourceName\n'), ((34, 8, 37, 36), 'azul.require', 'require', ({(34, 16, 34, 45): '(api_project == source.project)', (35, 16, 36, 21): '"""Actual Google project of TDR source differs from configured one"""', (37, 16, 37, 27): 'api_project', (37, 29, 37, 35): 'source'}, {}), "(api_project == source.project,\n 'Actual Google project of TDR source differs from configured one',\n api_project, source)", False, 'from azul import config, require\n'), ((28, 23, 28, 47), 'azul.config.catalogs.values', 'config.catalogs.values', ({}, {}), '()', False, 'from azul import config, require\n')] |
StamKaly/altitude-mod-foundation | altitude/players.py | 403befeba7d0e2e6afe3897081cd3e01f438e3d5 | class Player:
def __init__(self, nickname, vapor_id, player_id, ip):
self.nickname = nickname
self.vapor_id = vapor_id
self.player_id = player_id
self.ip = ip
self.not_joined = True
self.loads_map = True
self.joined_after_change_map = True
class Players:
def __init__(self, main_object, modded, lobby):
self.main = main_object
self.players = []
self.modded = modded
self.map_changed = False
self.lobby = lobby
self.commands = None
def get_commands_object(self, commands_object):
self.commands = commands_object
def _on_map_change(self, map_name):
self.map_changed = map_name
if self.modded and self.players:
for player in self.players:
player.loads_map = True
def check_if_everyone_joined_after_change_map(self):
for player in self.players:
if player.loads_map and not player.joined_after_change_map:
return False
return True
def _on_player_info_ev(self, player_id):
player = [player for player in self.players if player.player_id == player_id][0]
if self.map_changed or hasattr(player, "not_joined"):
if player.loads_map and player.joined_after_change_map:
player.joined_after_change_map = False
elif player.loads_map and not player.joined_after_change_map:
player.loads_map = False
player.joined_after_change_map = True
self.main.on_player_map_change(player, self.map_changed)
if hasattr(player, "not_joined"):
del player.not_joined
self.main.on_client_join(player)
if self.check_if_everyone_joined_after_change_map():
self.map_changed = False
def check_nickname_existence(self, nickname):
for player in self.players:
if nickname == player.nickname:
return True
return False
def get_all_players(self, nicknames, vapor_ids, player_ids, ips):
players_list = [nicknames, vapor_ids, player_ids, ips]
for count in range(len(nicknames)):
self.players.append(Player(*[player[count] for player in players_list]))
def add(self, nickname, vapor_id, player_id, ip):
self.players.append(Player(nickname, vapor_id, player_id, ip))
def remove(self, nickname):
for player in self.players:
if nickname == player.nickname:
self.players.remove(player)
break
if self.lobby and len(self.players) == 0:
self.commands.change_map(self.lobby)
def nickname_change(self, old_nickname, new_nickname):
for player in self.players:
if old_nickname == player.nickname:
player.nickname = new_nickname
break
def all_nicknames(self):
return [player.nickname for player in self.players]
def player_from_nickname(self, nickname):
for player in self.players:
if nickname == player.nickname:
return player
def player_from_vapor_id(self, vapor_id):
for player in self.players:
if vapor_id == player.vapor_id:
return player
def player_from_player_id(self, player_id):
for player in self.players:
if player_id == player.player_id:
return player
def get_all_vapor_ids(self):
return [player.vapor_id for player in self.players]
| [] |
expressionsofchange/nerf0 | dsn/editor/construct.py | 788203619fc89c92e8c7301d62bbc4f1f4ee66e1 | """
Tools to "play notes for the editor clef", which may be thought of as "executing editor commands".
NOTE: in the below, we often connect notes together "manually", i.e. using NoteSlur(..., previous_hash). As an
alternative, we could consider `nouts_for_notes`.
"""
from s_address import node_for_s_address, s_dfs
from dsn.s_expr.legato import NoteSlur, NoteCapo
from dsn.s_expr.utils import (
bubble_history_up,
calc_possibility,
insert_text_at,
insert_node_at,
replace_text_at,
weave_disjoint_replaces,
)
from dsn.s_expr.clef import Delete, Insert, Replace, BecomeNode
from dsn.s_expr.structure import TreeNode
from dsn.editor.clef import (
CursorChild,
CursorDFS,
CursorParent,
CursorSet,
EDelete,
EncloseWithParent,
InsertNodeChild,
InsertNodeSibbling,
MoveSelectionChild,
MoveSelectionSibbling,
LeaveChildrenBehind,
SwapSibbling,
TextInsert,
TextReplace,
)
def edit_note_play(structure, edit_note):
# :: EditStructure, EditNote => (new) s_cursor, posacts, error
def an_error():
return structure.s_cursor, [], True
if isinstance(edit_note, TextInsert):
posacts = insert_text_at(structure.tree, edit_note.parent_s_address, edit_note.index, edit_note.text)
new_s_cursor = edit_note.parent_s_address + [edit_note.index]
return new_s_cursor, posacts, False
if isinstance(edit_note, TextReplace):
posacts = replace_text_at(structure.tree, edit_note.s_address, edit_note.text)
return edit_note.s_address, posacts, False
if isinstance(edit_note, InsertNodeSibbling):
if structure.s_cursor == []:
return an_error() # adding sibblings to the root is not possible (it would lead to a forest)
# There is no need to check that the new index is a valid one. (Assuming: the cursor is valid, and direction is
# in the range [0, 1]; such assumptions fit with the general idea of "we only check that the user's command can
# be executed at this point, we do not check for arbitrary programming errors here). The proof flows directly
# from the idea that, for lists of length n, insertions at [0, n] are valid (insertion at n being an append).
index = structure.s_cursor[-1] + edit_note.direction
posacts = insert_node_at(structure.tree, structure.s_cursor[:-1], index)
new_s_cursor = structure.s_cursor[:-1] + [index]
return new_s_cursor, posacts, False
if isinstance(edit_note, InsertNodeChild):
cursor_node = node_for_s_address(structure.tree, structure.s_cursor)
if not isinstance(cursor_node, TreeNode):
# for now... we just silently ignore the user's request when they ask to add a child node to a non-node
return an_error()
index = len(cursor_node.children)
posacts = insert_node_at(structure.tree, structure.s_cursor, index)
new_s_cursor = structure.s_cursor + [index]
return new_s_cursor, posacts, False
if isinstance(edit_note, EDelete):
if structure.s_cursor == []:
# silently ignored ('delete root' is not defined, because the root is assumed to exist.)
return an_error()
delete_from = structure.s_cursor[:-1]
delete_at_index = structure.s_cursor[-1]
delete_from_hash = node_for_s_address(structure.tree, delete_from).metadata.nout_hash
p, h = calc_possibility(NoteSlur(Delete(delete_at_index), delete_from_hash))
if delete_at_index == len(node_for_s_address(structure.tree, delete_from).children) - 1:
# deletion makes cursor pos invalid: up to parent (alternative: sibbling-up first, until no more sibblings)
new_s_cursor = delete_from
else:
new_s_cursor = structure.s_cursor # "stay in place (although new contents slide into the cursor position)
posacts = [p] + bubble_history_up(h, structure.tree, delete_from)
return new_s_cursor, posacts, False
if isinstance(edit_note, SwapSibbling):
if structure.s_cursor == []:
return an_error() # root has no sibblings
parent = node_for_s_address(structure.tree, structure.s_cursor[:-1])
index = structure.s_cursor[-1] + edit_note.direction
if not (0 <= index <= len(parent.children) - 1):
return an_error()
# For now, SwapSibbling is simply implemented as a "delete and insert"; if (or when) we'll introduce "Move" into
# the Clef, we should note the move here.
parent_s_address = structure.s_cursor[:-1]
delete_at_index = structure.s_cursor[-1]
delete_from_hash = node_for_s_address(structure.tree, parent_s_address).metadata.nout_hash
reinsert_later_hash = node_for_s_address(structure.tree, structure.s_cursor).metadata.nout_hash
p0, hash_after_deletion = calc_possibility(NoteSlur(Delete(delete_at_index), delete_from_hash))
p1, hash_after_insertion = calc_possibility(NoteSlur(Insert(index, reinsert_later_hash), hash_after_deletion))
new_cursor = structure.s_cursor[:-1] + [index]
posacts = [p0, p1] + bubble_history_up(hash_after_insertion, structure.tree, parent_s_address)
return new_cursor, posacts, False
if isinstance(edit_note, MoveSelectionChild):
cursor_node = node_for_s_address(structure.tree, structure.s_cursor)
if not hasattr(cursor_node, 'children'):
return an_error() # The target must be a node to be able to add as a child
return do_move(structure, edit_note, structure.s_cursor, len(cursor_node.children))
if isinstance(edit_note, MoveSelectionSibbling):
if len(structure.s_cursor) == 0:
return an_error() # there is no sibbling of the root node
# edit_note.direction points to a valid insertion point for the same reasons detailed in the comment on
# InsertNodeSibbling
return do_move(structure, edit_note, structure.s_cursor[:-1], structure.s_cursor[-1] + edit_note.direction)
if isinstance(edit_note, LeaveChildrenBehind):
cursor_node = node_for_s_address(structure.tree, structure.s_cursor)
if not hasattr(cursor_node, 'children'):
return an_error() # Leave _children_ behind presupposes the existance of children
if structure.s_cursor == []:
return an_error() # Root cannot die
# For now, LeaveChildrenBehind is simply implemented as a "delete and insert"; if (or when) we'll introduce
# "Move" into the Clef, we should note the move here.
parent_s_address = structure.s_cursor[:-1]
delete_at_index = structure.s_cursor[-1]
delete_from_hash = node_for_s_address(structure.tree, parent_s_address).metadata.nout_hash
p, hash_ = calc_possibility(NoteSlur(Delete(delete_at_index), delete_from_hash))
posacts = [p]
removed_node = node_for_s_address(structure.tree, structure.s_cursor)
for i, child in enumerate(removed_node.children):
p, hash_ = calc_possibility(NoteSlur(Insert(structure.s_cursor[-1] + i, child.metadata.nout_hash), hash_))
posacts.append(p)
# In general, leaving the cursor at the same s_address will be great: post-deletion you'll be in the right spot
new_cursor = structure.s_cursor
if len(removed_node.children) == 0:
# ... however, if there are no children to leave behind... this "right spot" may be illegal
parent_node = node_for_s_address(structure.tree, parent_s_address)
if len(parent_node.children) == 1:
# if the deleted node was the only node: fall back to the parent
new_cursor = parent_s_address
else:
# otherwise, make sure to stay in bounds.
new_cursor[len(new_cursor) - 1] = min(
len(parent_node.children) - 1 - 1, # len - 1 idiom; -1 for deletion.
new_cursor[len(new_cursor) - 1])
posacts += bubble_history_up(hash_, structure.tree, parent_s_address)
return new_cursor, posacts, False
if isinstance(edit_note, EncloseWithParent):
cursor_node = node_for_s_address(structure.tree, structure.s_cursor)
if structure.s_cursor == []:
# I am not sure about this one yet: should we have the option to create a new root? I don't see any direct
# objections (by which I mean: it's possible in terms of the math), but I still have a sense that it may
# create some asymmetries. For now I'm disallowing it; we'll see whether a use case arises.
return an_error()
# For now, EncloseWithParent is simply implemented as a "replace with the parent"; if (or when) we'll introduce
# "Move" (in particular: the MoveReplace) into the Clef, we should note the move here.
parent_s_address = structure.s_cursor[:-1]
replace_at_index = structure.s_cursor[-1]
replace_on_hash = node_for_s_address(structure.tree, parent_s_address).metadata.nout_hash
reinsert_later_hash = node_for_s_address(structure.tree, structure.s_cursor).metadata.nout_hash
p_capo, hash_capo = calc_possibility(NoteCapo())
p_create, hash_create = calc_possibility(NoteSlur(BecomeNode(), hash_capo))
p_enclosure, hash_enclosure = calc_possibility(NoteSlur(Insert(0, reinsert_later_hash), hash_create))
p_replace, hash_replace = calc_possibility(
NoteSlur(Replace(replace_at_index, hash_enclosure), replace_on_hash))
posacts = [p_capo, p_create, p_enclosure, p_replace] + bubble_history_up(
hash_replace, structure.tree, parent_s_address)
# We jump the cursor to the newly enclosed location:
new_cursor = structure.s_cursor + [0]
return new_cursor, posacts, False
def move_cursor(new_cursor):
return new_cursor, [], False
if isinstance(edit_note, CursorDFS):
dfs = s_dfs(structure.tree, [])
dfs_index = dfs.index(structure.s_cursor) + edit_note.direction
if not (0 <= dfs_index <= len(dfs) - 1):
return an_error()
return move_cursor(dfs[dfs_index])
"""At some point I had "regular sibbling" (as opposed to DFS sibbling) in the edit_clef. It looks like this:
if structure.s_cursor == []:
return an_error() # root has no sibblings
parent = node_for_s_address(structure.tree, s_cursor[:-1])
index = s_cursor[-1] + edit_node.direction
if not (0 <= index <= len(parent.children) - 1):
return an_error()
return move_cursor(s_cursor[:-1] + [index])
"""
if isinstance(edit_note, CursorSet):
return move_cursor(edit_note.s_address)
if isinstance(edit_note, CursorParent):
if structure.s_cursor == []:
return an_error()
return move_cursor(structure.s_cursor[:-1])
if isinstance(edit_note, CursorChild):
cursor_node = node_for_s_address(structure.tree, structure.s_cursor)
if not hasattr(cursor_node, 'children') or len(cursor_node.children) == 0:
return an_error()
return move_cursor(structure.s_cursor + [0])
raise Exception("Unknown Note")
def do_move(structure, edit_note, target_parent_path, target_index):
selection_edge_0 = edit_note.selection_edge_0
selection_edge_1 = edit_note.selection_edge_1
def an_error():
return structure.s_cursor, [], True
if selection_edge_0[:-1] != selection_edge_1[:-1]:
# i.e. if not same-parent: this is an error. This may very well be too restrictive, but I'd rather move in the
# direction of "relax constraints later" than in the other directions. One particular reason I'm so restrictive
# for now: if I ever want to express a note "move" using a target_node, a source node and to indices in the
# source node, such a single-parent restriction is indeed a necessity.
# Note that "single parent" implies "same depth", but not vice versa. One possible relaxation is: make the
# restriction on "same depth" instead.
# Generally, the paths towards relaxation are to either [a] "be smart about the meaning of the selection's
# edges", i.e. find the first common ancestor and the relevant children of that ancestor or [b] to not care so
# much about single-parent.
return an_error()
if selection_edge_0 <= (target_parent_path + [target_index])[:len(selection_edge_0)] <= selection_edge_1:
# If the full target location, truncated to the length of the sources, is (inclusively) in the source's range,
# you're trying to move to [a descendant of] yourself. This is illegal. Moving something to a child of itself:
# I simply don't know what it would mean. Moving something to the same location (single source item, target path
# identical to the source path) could at least be understood to mean the no-op, so it's slightly less
# meaningless, but here I don't find that enough, so I'm just calling both scenarios error-scenarios.
# This implies protection against moving the root node around (because everything descends from the root node)
return an_error()
source_parent_path = selection_edge_0[:-1]
source_parent = node_for_s_address(structure.tree, source_parent_path)
target_parent = node_for_s_address(structure.tree, target_parent_path)
# For now, the "edit move" operations are simply implemented as a "insert and delete"; if (or when) we'll introduce
# "Move" into the Clef, we should note the move here.
posacts = []
source_index_lo, source_index_hi = sorted([selection_edge_0[-1], selection_edge_1[-1]])
hash_ = target_parent.metadata.nout_hash
for target_offset, source_index in enumerate(range(source_index_lo, source_index_hi + 1)): # edge-inclusive range
insert_hash = node_for_s_address(structure.tree, source_parent_path + [source_index]).metadata.nout_hash
p, hash_ = calc_possibility(NoteSlur(Insert(target_index + target_offset, insert_hash), hash_))
posacts.append(p)
weave_correction = 0
cursor_correction = 0
# TODO this part is still broken:
# Not only if the parents are exactly the same, but also if one parent is a prefix of the other (said differently:
# the longest_common_prefix of both parents matches one of them).
# In that case, we need to somehow connect the parents....
# (For the case of "parents match exactly", I did this using the idea "just don't reset hash_"... which works,
# because it allows you to continue operating on the the same "future". But in the case of shared prefix, this won't
# work.
if source_parent_path != target_parent_path:
wdr_hash = hash_
hash_ = source_parent.metadata.nout_hash
else:
if target_index < source_index_lo:
# We insert before we delete. If we do this on the same parent, and the insertions happen at lower indices
# than the deletions, they will affect the locations where the deletions must take place, by precisely the
# number of insertions that happened. (If we reverse the order of operations, we have the opposite problem)
# The reason we have this problem at all, is because we implement something that is atomic from the user's
# point of view in a non-atomic way in the clef. The problem may auto-disappear if we add "Move" to the
# clef.
# Another way we could handle the problem is once we have some tools to "realinearize while preserving
# meaning". I.e. we have deletions, we have insertions: at one point (e.g. once we build the cooperative
# editor) we should be able to express "weave those together, rewriting indices as required".
# In the if-statement above, we could pick either lo/hi for the comparison; source_index_lo and
# source_index_hi will never straddle target_index, because of the child-of-yourself checks at the top.
weave_correction = source_index_hi - source_index_lo + 1
else:
cursor_correction = source_index_hi - source_index_lo + 1
# we do _not_ fetch hash_ here, the idea being: it's the hash we just created.
# nor do we bubble up (yet); we can do a single bubble-up
for source_index in range(source_index_lo, source_index_hi + 1): # edge-inclusive range
# Note: we just Delete n times at the "lo" index (everything shifting to the left after each deletion)
p, hash_ = calc_possibility(NoteSlur(Delete(source_index_lo + weave_correction), hash_))
posacts.append(p)
if source_parent_path != target_parent_path:
posacts = posacts + weave_disjoint_replaces(
structure.tree,
target_parent_path, wdr_hash,
source_parent_path, hash_)
else:
posacts = posacts + bubble_history_up(hash_, structure.tree, source_parent_path)
# The current solution for "where to put the cursor after the move" is "at the end". This "seems intuitive" (but
# that may just be habituation). In any case, it's wat e.g. LibreOffice does when cut/pasting. (However, for a
# mouse-drag initiated move in LibreOffice, the selection is preserved).
# As it stands: the selection disappears automatically, because it points at a no-longer existing location. If we
# want to make the selection appear at the target-location, we need to change the interface of edit_note_play to
# include the resulting selection.
new_cursor = target_parent_path + [target_index + target_offset - cursor_correction]
return new_cursor, posacts, False
| [((293, 20, 293, 74), 's_address.node_for_s_address', 'node_for_s_address', ({(293, 39, 293, 53): 'structure.tree', (293, 55, 293, 73): 'source_parent_path'}, {}), '(structure.tree, source_parent_path)', False, 'from s_address import node_for_s_address, s_dfs\n'), ((295, 20, 295, 74), 's_address.node_for_s_address', 'node_for_s_address', ({(295, 39, 295, 53): 'structure.tree', (295, 55, 295, 73): 'target_parent_path'}, {}), '(structure.tree, target_parent_path)', False, 'from s_address import node_for_s_address, s_dfs\n'), ((49, 18, 49, 109), 'dsn.s_expr.utils.insert_text_at', 'insert_text_at', ({(49, 33, 49, 47): 'structure.tree', (49, 49, 49, 75): 'edit_note.parent_s_address', (49, 77, 49, 92): 'edit_note.index', (49, 94, 49, 108): 'edit_note.text'}, {}), '(structure.tree, edit_note.parent_s_address, edit_note.index,\n edit_note.text)', False, 'from dsn.s_expr.utils import bubble_history_up, calc_possibility, insert_text_at, insert_node_at, replace_text_at, weave_disjoint_replaces\n'), ((54, 18, 54, 86), 'dsn.s_expr.utils.replace_text_at', 'replace_text_at', ({(54, 34, 54, 48): 'structure.tree', (54, 50, 54, 69): 'edit_note.s_address', (54, 71, 54, 85): 'edit_note.text'}, {}), '(structure.tree, edit_note.s_address, edit_note.text)', False, 'from dsn.s_expr.utils import bubble_history_up, calc_possibility, insert_text_at, insert_node_at, replace_text_at, weave_disjoint_replaces\n'), ((67, 18, 67, 80), 'dsn.s_expr.utils.insert_node_at', 'insert_node_at', ({(67, 33, 67, 47): 'structure.tree', (67, 49, 67, 72): 'structure.s_cursor[:-1]', (67, 74, 67, 79): 'index'}, {}), '(structure.tree, structure.s_cursor[:-1], index)', False, 'from dsn.s_expr.utils import bubble_history_up, calc_possibility, insert_text_at, insert_node_at, replace_text_at, weave_disjoint_replaces\n'), ((73, 22, 73, 76), 's_address.node_for_s_address', 'node_for_s_address', ({(73, 41, 73, 55): 'structure.tree', (73, 57, 73, 75): 'structure.s_cursor'}, {}), '(structure.tree, structure.s_cursor)', False, 'from s_address import node_for_s_address, s_dfs\n'), ((79, 18, 79, 75), 'dsn.s_expr.utils.insert_node_at', 'insert_node_at', ({(79, 33, 79, 47): 'structure.tree', (79, 49, 79, 67): 'structure.s_cursor', (79, 69, 79, 74): 'index'}, {}), '(structure.tree, structure.s_cursor, index)', False, 'from dsn.s_expr.utils import bubble_history_up, calc_possibility, insert_text_at, insert_node_at, replace_text_at, weave_disjoint_replaces\n'), ((109, 17, 109, 76), 's_address.node_for_s_address', 'node_for_s_address', ({(109, 36, 109, 50): 'structure.tree', (109, 52, 109, 75): 'structure.s_cursor[:-1]'}, {}), '(structure.tree, structure.s_cursor[:-1])', False, 'from s_address import node_for_s_address, s_dfs\n'), ((131, 22, 131, 76), 's_address.node_for_s_address', 'node_for_s_address', ({(131, 41, 131, 55): 'structure.tree', (131, 57, 131, 75): 'structure.s_cursor'}, {}), '(structure.tree, structure.s_cursor)', False, 'from s_address import node_for_s_address, s_dfs\n'), ((147, 22, 147, 76), 's_address.node_for_s_address', 'node_for_s_address', ({(147, 41, 147, 55): 'structure.tree', (147, 57, 147, 75): 'structure.s_cursor'}, {}), '(structure.tree, structure.s_cursor)', False, 'from s_address import node_for_s_address, s_dfs\n'), ((164, 23, 164, 77), 's_address.node_for_s_address', 'node_for_s_address', ({(164, 42, 164, 56): 'structure.tree', (164, 58, 164, 76): 'structure.s_cursor'}, {}), '(structure.tree, structure.s_cursor)', False, 'from s_address import node_for_s_address, s_dfs\n'), ((183, 19, 183, 77), 'dsn.s_expr.utils.bubble_history_up', 'bubble_history_up', ({(183, 37, 183, 42): 'hash_', (183, 44, 183, 58): 'structure.tree', (183, 60, 183, 76): 'parent_s_address'}, {}), '(hash_, structure.tree, parent_s_address)', False, 'from dsn.s_expr.utils import bubble_history_up, calc_possibility, insert_text_at, insert_node_at, replace_text_at, weave_disjoint_replaces\n'), ((188, 22, 188, 76), 's_address.node_for_s_address', 'node_for_s_address', ({(188, 41, 188, 55): 'structure.tree', (188, 57, 188, 75): 'structure.s_cursor'}, {}), '(structure.tree, structure.s_cursor)', False, 'from s_address import node_for_s_address, s_dfs\n'), ((225, 14, 225, 39), 's_address.s_dfs', 's_dfs', ({(225, 20, 225, 34): 'structure.tree', (225, 36, 225, 38): '[]'}, {}), '(structure.tree, [])', False, 'from s_address import node_for_s_address, s_dfs\n'), ((252, 22, 252, 76), 's_address.node_for_s_address', 'node_for_s_address', ({(252, 41, 252, 55): 'structure.tree', (252, 57, 252, 75): 'structure.s_cursor'}, {}), '(structure.tree, structure.s_cursor)', False, 'from s_address import node_for_s_address, s_dfs\n'), ((101, 24, 101, 73), 'dsn.s_expr.utils.bubble_history_up', 'bubble_history_up', ({(101, 42, 101, 43): 'h', (101, 45, 101, 59): 'structure.tree', (101, 61, 101, 72): 'delete_from'}, {}), '(h, structure.tree, delete_from)', False, 'from dsn.s_expr.utils import bubble_history_up, calc_possibility, insert_text_at, insert_node_at, replace_text_at, weave_disjoint_replaces\n'), ((127, 29, 127, 102), 'dsn.s_expr.utils.bubble_history_up', 'bubble_history_up', ({(127, 47, 127, 67): 'hash_after_insertion', (127, 69, 127, 83): 'structure.tree', (127, 85, 127, 101): 'parent_s_address'}, {}), '(hash_after_insertion, structure.tree, parent_s_address)', False, 'from dsn.s_expr.utils import bubble_history_up, calc_possibility, insert_text_at, insert_node_at, replace_text_at, weave_disjoint_replaces\n'), ((173, 26, 173, 78), 's_address.node_for_s_address', 'node_for_s_address', ({(173, 45, 173, 59): 'structure.tree', (173, 61, 173, 77): 'parent_s_address'}, {}), '(structure.tree, parent_s_address)', False, 'from s_address import node_for_s_address, s_dfs\n'), ((205, 45, 205, 55), 'dsn.s_expr.legato.NoteCapo', 'NoteCapo', ({}, {}), '()', False, 'from dsn.s_expr.legato import NoteSlur, NoteCapo\n'), ((213, 63, 214, 59), 'dsn.s_expr.utils.bubble_history_up', 'bubble_history_up', ({(214, 12, 214, 24): 'hash_replace', (214, 26, 214, 40): 'structure.tree', (214, 42, 214, 58): 'parent_s_address'}, {}), '(hash_replace, structure.tree, parent_s_address)', False, 'from dsn.s_expr.utils import bubble_history_up, calc_possibility, insert_text_at, insert_node_at, replace_text_at, weave_disjoint_replaces\n'), ((359, 28, 362, 38), 'dsn.s_expr.utils.weave_disjoint_replaces', 'weave_disjoint_replaces', ({(360, 12, 360, 26): 'structure.tree', (361, 12, 361, 30): 'target_parent_path', (361, 32, 361, 40): 'wdr_hash', (362, 12, 362, 30): 'source_parent_path', (362, 32, 362, 37): 'hash_'}, {}), '(structure.tree, target_parent_path, wdr_hash,\n source_parent_path, hash_)', False, 'from dsn.s_expr.utils import bubble_history_up, calc_possibility, insert_text_at, insert_node_at, replace_text_at, weave_disjoint_replaces\n'), ((365, 28, 365, 88), 'dsn.s_expr.utils.bubble_history_up', 'bubble_history_up', ({(365, 46, 365, 51): 'hash_', (365, 53, 365, 67): 'structure.tree', (365, 69, 365, 87): 'source_parent_path'}, {}), '(hash_, structure.tree, source_parent_path)', False, 'from dsn.s_expr.utils import bubble_history_up, calc_possibility, insert_text_at, insert_node_at, replace_text_at, weave_disjoint_replaces\n'), ((91, 27, 91, 74), 's_address.node_for_s_address', 'node_for_s_address', ({(91, 46, 91, 60): 'structure.tree', (91, 62, 91, 73): 'delete_from'}, {}), '(structure.tree, delete_from)', False, 'from s_address import node_for_s_address, s_dfs\n'), ((93, 41, 93, 64), 'dsn.s_expr.clef.Delete', 'Delete', ({(93, 48, 93, 63): 'delete_at_index'}, {}), '(delete_at_index)', False, 'from dsn.s_expr.clef import Delete, Insert, Replace, BecomeNode\n'), ((120, 27, 120, 79), 's_address.node_for_s_address', 'node_for_s_address', ({(120, 46, 120, 60): 'structure.tree', (120, 62, 120, 78): 'parent_s_address'}, {}), '(structure.tree, parent_s_address)', False, 'from s_address import node_for_s_address, s_dfs\n'), ((121, 30, 121, 84), 's_address.node_for_s_address', 'node_for_s_address', ({(121, 49, 121, 63): 'structure.tree', (121, 65, 121, 83): 'structure.s_cursor'}, {}), '(structure.tree, structure.s_cursor)', False, 'from s_address import node_for_s_address, s_dfs\n'), ((123, 60, 123, 83), 'dsn.s_expr.clef.Delete', 'Delete', ({(123, 67, 123, 82): 'delete_at_index'}, {}), '(delete_at_index)', False, 'from dsn.s_expr.clef import Delete, Insert, Replace, BecomeNode\n'), ((124, 61, 124, 95), 'dsn.s_expr.clef.Insert', 'Insert', ({(124, 68, 124, 73): 'index', (124, 75, 124, 94): 'reinsert_later_hash'}, {}), '(index, reinsert_later_hash)', False, 'from dsn.s_expr.clef import Delete, Insert, Replace, BecomeNode\n'), ((159, 27, 159, 79), 's_address.node_for_s_address', 'node_for_s_address', ({(159, 46, 159, 60): 'structure.tree', (159, 62, 159, 78): 'parent_s_address'}, {}), '(structure.tree, parent_s_address)', False, 'from s_address import node_for_s_address, s_dfs\n'), ((161, 45, 161, 68), 'dsn.s_expr.clef.Delete', 'Delete', ({(161, 52, 161, 67): 'delete_at_index'}, {}), '(delete_at_index)', False, 'from dsn.s_expr.clef import Delete, Insert, Replace, BecomeNode\n'), ((201, 26, 201, 78), 's_address.node_for_s_address', 'node_for_s_address', ({(201, 45, 201, 59): 'structure.tree', (201, 61, 201, 77): 'parent_s_address'}, {}), '(structure.tree, parent_s_address)', False, 'from s_address import node_for_s_address, s_dfs\n'), ((203, 30, 203, 84), 's_address.node_for_s_address', 'node_for_s_address', ({(203, 49, 203, 63): 'structure.tree', (203, 65, 203, 83): 'structure.s_cursor'}, {}), '(structure.tree, structure.s_cursor)', False, 'from s_address import node_for_s_address, s_dfs\n'), ((206, 58, 206, 70), 'dsn.s_expr.clef.BecomeNode', 'BecomeNode', ({}, {}), '()', False, 'from dsn.s_expr.clef import Delete, Insert, Replace, BecomeNode\n'), ((208, 64, 208, 94), 'dsn.s_expr.clef.Insert', 'Insert', ({(208, 71, 208, 72): '0', (208, 74, 208, 93): 'reinsert_later_hash'}, {}), '(0, reinsert_later_hash)', False, 'from dsn.s_expr.clef import Delete, Insert, Replace, BecomeNode\n'), ((211, 21, 211, 62), 'dsn.s_expr.clef.Replace', 'Replace', ({(211, 29, 211, 45): 'replace_at_index', (211, 47, 211, 61): 'hash_enclosure'}, {}), '(replace_at_index, hash_enclosure)', False, 'from dsn.s_expr.clef import Delete, Insert, Replace, BecomeNode\n'), ((307, 22, 307, 93), 's_address.node_for_s_address', 'node_for_s_address', ({(307, 41, 307, 55): 'structure.tree', (307, 57, 307, 92): '(source_parent_path + [source_index])'}, {}), '(structure.tree, source_parent_path + [source_index])', False, 'from s_address import node_for_s_address, s_dfs\n'), ((308, 45, 308, 94), 'dsn.s_expr.clef.Insert', 'Insert', ({(308, 52, 308, 80): 'target_index + target_offset', (308, 82, 308, 93): 'insert_hash'}, {}), '(target_index + target_offset, insert_hash)', False, 'from dsn.s_expr.clef import Delete, Insert, Replace, BecomeNode\n'), ((355, 45, 355, 87), 'dsn.s_expr.clef.Delete', 'Delete', ({(355, 52, 355, 86): 'source_index_lo + weave_correction'}, {}), '(source_index_lo + weave_correction)', False, 'from dsn.s_expr.clef import Delete, Insert, Replace, BecomeNode\n'), ((166, 49, 166, 109), 'dsn.s_expr.clef.Insert', 'Insert', ({(166, 56, 166, 82): 'structure.s_cursor[-1] + i', (166, 84, 166, 108): 'child.metadata.nout_hash'}, {}), '(structure.s_cursor[-1] + i, child.metadata.nout_hash)', False, 'from dsn.s_expr.clef import Delete, Insert, Replace, BecomeNode\n'), ((95, 34, 95, 81), 's_address.node_for_s_address', 'node_for_s_address', ({(95, 53, 95, 67): 'structure.tree', (95, 69, 95, 80): 'delete_from'}, {}), '(structure.tree, delete_from)', False, 'from s_address import node_for_s_address, s_dfs\n')] |
richtong/pytong | src/pytong/base.py | 6ff07a1bdf1d5e2232bfc102cce2dd74783bb111 | """Base for all Classes.
Base mainly includes the description fields
"""
import logging
from typing import Optional
from .log import Log # type: ignore
class BaseLog:
"""
Set a base logging.
Use this as the base class for all your work. This adds a logging root.
"""
def __init__(self, log_root: Optional[Log] = None):
"""Set the Root Log."""
# since we have no log otherwise
self.log_root = log_root
self.log = (
log_root.log_class(self)
if log_root is not None
else logging.getLogger(__name__)
)
self.log.debug(f"{self=}")
| [((25, 17, 25, 44), 'logging.getLogger', 'logging.getLogger', ({(25, 35, 25, 43): '__name__'}, {}), '(__name__)', False, 'import logging\n')] |
GuillaumeFalourd/poc-subprocess | subprocess-10.py | 8f014a709ac2e471092d4ea1f61f1a9ff65ff571 | import subprocess
import re
programs = input('Separe the programs with a space: ').split()
secure_pattern = '[\w\d]'
for program in programs:
if not re.match(secure_pattern, program):
print("Sorry we can't check that program")
continue
process = subprocess. run(
['which', program], capture_output=True, text=True)
if process.returncode == 0:
print(f'The program "{program}" is installed')
print(f'The location of the binary is: {process.stdout}')
else:
print(f'Sorry the {program} is not installed')
print(process.stderr)
print('\n') | [((15, 14, 16, 59), 'subprocess.run', 'subprocess.run', (), '', False, 'import subprocess\n'), ((10, 11, 10, 44), 're.match', 're.match', ({(10, 20, 10, 34): 'secure_pattern', (10, 36, 10, 43): 'program'}, {}), '(secure_pattern, program)', False, 'import re\n')] |
vo0doO/pydj-persweb | authentication/socialaccount/forms.py | efcd6b7090230f7c0b9ec056008f6d1d9e876ed9 | from __future__ import absolute_import
from django import forms
from authentication.account.forms import BaseSignupForm
from . import app_settings, signals
from .adapter import get_adapter
from .models import SocialAccount
class SignupForm(BaseSignupForm):
def __init__(self, *args, **kwargs):
self.sociallogin = kwargs.pop('sociallogin')
initial = get_adapter().get_signup_form_initial_data(
self.sociallogin)
kwargs.update({
'initial': initial,
'email_required': kwargs.get('email_required',
app_settings.EMAIL_REQUIRED)})
super(SignupForm, self).__init__(*args, **kwargs)
def save(self, request):
adapter = get_adapter(request)
user = adapter.save_user(request, self.sociallogin, form=self)
self.custom_signup(request, user)
return user
def validate_unique_email(self, value):
try:
return super(SignupForm, self).validate_unique_email(value)
except forms.ValidationError:
raise forms.ValidationError(
get_adapter().error_messages['email_taken']
% self.sociallogin.account.get_provider().name)
class DisconnectForm(forms.Form):
account = forms.ModelChoiceField(queryset=SocialAccount.objects.none(),
widget=forms.RadioSelect,
required=True)
def __init__(self, *args, **kwargs):
self.request = kwargs.pop('request')
self.accounts = SocialAccount.objects.filter(user=self.request.user)
super(DisconnectForm, self).__init__(*args, **kwargs)
self.fields['account'].queryset = self.accounts
def clean(self):
cleaned_data = super(DisconnectForm, self).clean()
account = cleaned_data.get('account')
if account:
get_adapter(self.request).validate_disconnect(
account,
self.accounts)
return cleaned_data
def save(self):
account = self.cleaned_data['account']
account.delete()
signals.social_account_removed.send(sender=SocialAccount,
request=self.request,
socialaccount=account)
| [] |
nilsbeck/pytheos | pytheos/pytheos.py | de4f3a03330ddb28e68ddcaa7b4888ea9a25e238 | #!/usr/bin/env python
""" Provides the primary interface into the library """
from __future__ import annotations
import asyncio
import logging
from typing import Callable, Optional, Union
from . import utils
from . import controllers
from .networking.connection import Connection
from .networking.types import SSDPResponse
from .networking.errors import ChannelUnavailableError
from .models.heos import HEOSEvent
from .models.system import AccountStatus
logger = logging.getLogger('pytheos')
class Pytheos:
""" Pytheos interface """
DEFAULT_PORT = 1255
@staticmethod
def check_channel_availability(channel: Connection):
""" Checks to make sure that the provided channel is available.
:param channel: Channel connection
:raises: ChannelUnavailableError
:return: None
"""
if not channel or not channel.connected:
raise ChannelUnavailableError()
@property
def log_level(self):
return logger.level
@log_level.setter
def log_level(self, value):
logger.setLevel(value)
@property
def connected(self):
return self._connected
@property
def signed_in(self):
return self._account_status == AccountStatus.SignedIn
@property
def username(self):
return self._account_username
def __init__(self, server: Union[str, SSDPResponse]=None, port: Optional[int]=DEFAULT_PORT):
""" Constructor
:param server: Server hostname or IP
:param port: Port number
"""
if isinstance(server, SSDPResponse):
server = utils.extract_host(server.location)
self.server: str = server
self.port: int = port
self._command_channel = Connection()
self._event_channel = Connection()
self._event_queue = asyncio.Queue()
self._event_task: Optional[asyncio.Task] = None
self._event_processor: Optional[asyncio.Task] = None
self._connected: bool = False
self._event_subscriptions: dict = {}
self._receive_events: bool = True
self._account_status: Optional[AccountStatus] = None
self._account_username: Optional[str] = None
self._players: list = []
self._groups: dict = {} # FIXME?: Not sure I like having this as a dict.
self._sources: dict = {} # FIXME?: Not sure I like having this as a dict.
self.api: Connection = self._command_channel
self._init_internal_event_handlers()
def __repr__(self):
return f'<Pytheos(server={self.server}, port={self.port})>'
def __enter__(self):
if not self._connected:
self.connect()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
if self._connected:
self.close()
async def connect(self, enable_event_connection: bool=True, refresh: bool=True) -> Pytheos:
""" Connect to our HEOS device.
:param enable_event_connection: Enables establishing an additional connection for system events
:param refresh: Determines if the system state should be automatically refreshed
:return: self
"""
logger.info(f'Connecting to {self.server}:{self.port}')
await self._command_channel.connect(self.server, self.port)
self._connected = True
self._receive_events = enable_event_connection
if self._receive_events:
await self._event_channel.connect(self.server, self.port, deduplicate=True)
await self.enable_event_reception(True)
loop = asyncio.get_running_loop()
self._event_task = loop.create_task(self._listen_for_events())
self._event_processor = loop.create_task(self._process_events())
if refresh:
await self.refresh()
return self
async def _set_register_for_change_events(self, value: bool):
""" Notifies HEOS that we want event messages on the event channel.
:param value: True or False
:return: None
"""
await self._event_channel.system.register_for_change_events(value)
def close(self):
""" Close the connection to our HEOS device
:return: None
"""
logger.info(f'Closing connection to {self.server}:{self.port}')
if self._event_task:
self._event_task.cancel()
if self._event_processor:
self._event_processor.cancel()
self._connected = False
def subscribe(self, event_name: str, callback: Callable):
""" Subscribe a callback function to a specific event
:param event_name: Event name
:param callback: Callback function
:return: None
"""
# FIXME: Change event_name to an enum
if self._event_subscriptions.get(event_name) is None:
self._event_subscriptions[event_name] = []
self._event_subscriptions[event_name].append(callback)
async def refresh(self):
""" Refreshes internal information from the HEOS system.
:return: None
"""
await self.check_account()
await self.get_players()
await self.get_groups()
await self.get_sources()
async def reboot(self):
""" Instructs the system to reboot.
:return: None
"""
await self.api.system.reboot()
async def check_account(self) -> tuple:
""" Checks if the system is logged into HEOS and returns the status and account name, if available.
:return: tuple
"""
self._account_status, self._account_username = await self.api.system.check_account()
return self._account_status, self._account_username
async def sign_in(self, username: str, password: str):
""" Signs the system into the HEOS service.
:param username: Username
:param password: Password
:return: None
"""
await self.api.system.sign_in(username, password)
async def sign_out(self):
""" Signs out from the HEOS service.
:return: None
"""
await self.api.system.sign_out()
async def get_players(self):
""" Retrieves a mapping of IDs to Players present in the HEOS system.
:return: list
"""
self._players = [controllers.Player(self, player) for player in await self.api.player.get_players()]
return self._players
async def get_group(self, group_id):
""" Retrieve a specific group by ID.
:param group_id: Group ID
:return: PytheosGroup
"""
groups = await self.get_groups()
return groups.get(group_id)
async def get_groups(self):
""" Retrieves a mapping of IDs to Groups present in the HEOS system.
:return: dict
"""
self._groups = {}
for group in await self.api.group.get_groups():
self._groups[group.group_id] = controllers.Group(self, group)
return self._groups
async def get_sources(self):
""" Retrieves a mapping of IDs to Sources present in the HEOS system.
:return:
"""
self._sources = {}
for source in await self.api.browse.get_music_sources():
self._sources[source.source_id] = controllers.Source(self, source)
return self._sources
def is_receiving_events(self):
""" Retrieves whether or not we're receiving events.
:return: bool
"""
return self._receive_events
async def enable_event_reception(self, value):
""" Enables or disables event reception.
:param value: True or False
:return: None
"""
self._receive_events = value
await self._set_register_for_change_events(value)
async def _listen_for_events(self):
""" Async task that reads messages from the event channel and adds them to our event queue for
later processing.
:return: None
"""
while True:
results = await self._event_channel.read_message()
if results:
event = HEOSEvent(results)
logger.debug(f"Received event: {event!r}")
await self._event_queue.put(event)
await asyncio.sleep(0.5)
async def _process_events(self):
""" Async task that processes events that originate from the event channel.
:return: None
"""
while True:
event = await self._event_queue.get()
if event:
logger.debug(f'Processing event: {event!r}')
await self._event_handler(event)
await asyncio.sleep(0.5)
async def _event_handler(self, event: HEOSEvent):
""" Internal event handler
:param event: HEOS Event
:return: None
"""
loop = asyncio.get_running_loop()
for callback in self._event_subscriptions.get(event.command, []):
logger.debug(f'Calling registered callback {callback} for event {event!r}')
loop.create_task(callback(event))
def _init_internal_event_handlers(self):
""" Initialize the internal event handlers
:return: None
"""
# FIXME: Meh, do something better with this.
internal_handler_map = {
# 'event/sources_changed': self._handle_sources_changed,
# 'event/players_changed': self._handle_players_changed,
# 'event/groups_changed': self._handle_groups_changed,
# 'event/player_state_changed': self._handle_player_state_changed,
# 'event/player_now_playing_changed': self._handle_now_playing_changed,
# 'event/player_now_playing_progress': self._handle_now_playing_progress,
# 'event/player_playback_error': self._handle_playback_error,
# 'event/player_queue_changed': self._handle_queue_changed,
# 'event/player_volume_changed': self._handle_volume_changed,
# 'event/repeat_mode_changed': self._handle_repeat_mode_changed,
# 'event/shuffle_mode_changed': self._handle_shuffle_mode_changed,
# 'event/group_volume_changed': self._handle_group_volume_changed,
# 'event/user_changed': self._handle_user_changed,
}
for event, callback in internal_handler_map.items():
self.subscribe(event, callback)
def _handle_sources_changed(self, event: HEOSEvent):
raise NotImplementedError()
def _handle_players_changed(self, event: HEOSEvent):
raise NotImplementedError()
def _handle_groups_changed(self, event: HEOSEvent):
raise NotImplementedError()
def _handle_player_state_changed(self, event: HEOSEvent):
raise NotImplementedError()
def _handle_now_playing_changed(self, event: HEOSEvent):
raise NotImplementedError()
def _handle_now_playing_progress(self, event: HEOSEvent):
raise NotImplementedError()
def _handle_playback_error(self, event: HEOSEvent):
raise NotImplementedError()
def _handle_queue_changed(self, event: HEOSEvent):
raise NotImplementedError()
def _handle_volume_changed(self, event: HEOSEvent):
raise NotImplementedError()
def _handle_repeat_mode_changed(self, event: HEOSEvent):
raise NotImplementedError()
def _handle_shuffle_mode_changed(self, event: HEOSEvent):
raise NotImplementedError()
def _handle_group_volume_changed(self, event: HEOSEvent):
raise NotImplementedError()
def _handle_user_changed(self, event: HEOSEvent):
raise NotImplementedError()
async def connect(host: Union[SSDPResponse, str], port: int=Pytheos.DEFAULT_PORT) -> Pytheos:
""" Connect to the provided host and return a context manager for use with the connection.
:param host: Host to connect to
:param port: Port to connect to
:raises: ValueError
:return: The Pytheos instance
"""
if isinstance(host, SSDPResponse):
host = utils.extract_host(host.location)
conn = Pytheos(host, port)
return await conn.connect()
| [((18, 9, 18, 37), 'logging.getLogger', 'logging.getLogger', ({(18, 27, 18, 36): '"""pytheos"""'}, {}), "('pytheos')", False, 'import logging\n'), ((70, 28, 70, 43), 'asyncio.Queue', 'asyncio.Queue', ({}, {}), '()', False, 'import asyncio\n'), ((296, 15, 296, 41), 'asyncio.get_running_loop', 'asyncio.get_running_loop', ({}, {}), '()', False, 'import asyncio\n'), ((116, 19, 116, 45), 'asyncio.get_running_loop', 'asyncio.get_running_loop', ({}, {}), '()', False, 'import asyncio\n'), ((275, 18, 275, 36), 'asyncio.sleep', 'asyncio.sleep', ({(275, 32, 275, 35): '(0.5)'}, {}), '(0.5)', False, 'import asyncio\n'), ((288, 18, 288, 36), 'asyncio.sleep', 'asyncio.sleep', ({(288, 32, 288, 35): '(0.5)'}, {}), '(0.5)', False, 'import asyncio\n')] |
FelixLuciano/DesSoft-2020.2 | source/188-Lista_celulares.py | a44063d63778329f1e1266881f20f7954ecb528b | # Lista celulares
# O departamento de marketing da sua empresa está interessado em obter apenas os números de telefone celular, separando-os dos telefones fixos. Para simplificar esta operação serão considerados números de celular apenas aqueles que, após o código de área, iniciarem com o dígito adicional 9.
# Você recebeu a tarefa de obter uma lista com os números de celular, sem o código de área. Entretanto, o cadastro de telefones do departamento de marketing não está padronizado e existem números seguindo 3 formatos distintos:
# 1. Números completos (13 ou 14 caracteres), incluindo o código do país (+55) e o código de área (ex: 11). Exemplos: '+5511912345678' ou '+551133334444' (note que ambos começam com o caractere '+');
# 2. Número contendo apenas o código de área (10 ou 11 caracteres). Exemplos: '11987654321' ou '1155556666';
# 3. Número sem código de área (8 ou 9 caracteres). Exemplos: '918273645' ou '77778888'.
# Note que em todos os casos, o primeiro exemplo é um número de celular e o segundo não.
# Faça uma função que recebe uma lista de números de telefone e devolve uma lista contendo apenas os telefones celulares. Cada telefone da lista de entrada (recebida como argumento da sua função) pode estar em qualquer um dos 3 formatos acima. Os telefones da lista de saída (retornada pela sua função) devem conter apenas os dígitos do telefone, removendo o código do país e código de área se for necessário.
# Exemplo: a chamada lista_celulares(['+5511912345678', '1155556666', '77778888', '+551133334444', '918273645', '11987654321']) deve retornar a lista ['912345678', '918273645', '987654321']
# O nome da sua função deve ser lista_celulares.
| [] |
1goodday/Google-Dictionary-Pronunciation.ankiaddon | test_modules/language_dictionary_test.py | 35837802e41d81733aec656fbf4ad1c8e4aeec5e | import csv
_iso_639_1_codes_file = open("files/ISO-639-1_Codes.csv", mode='r')
_iso_639_1_codes_dictreader = csv.DictReader(_iso_639_1_codes_file)
_iso_639_1_codes_dict: dict = {}
for _row in _iso_639_1_codes_dictreader:
_iso_639_1_codes_dict[_row['ISO-639-1 Code']] = _row['Language']
print(str(_iso_639_1_codes_dict)) | [((4, 30, 4, 67), 'csv.DictReader', 'csv.DictReader', ({(4, 45, 4, 66): '_iso_639_1_codes_file'}, {}), '(_iso_639_1_codes_file)', False, 'import csv\n')] |
DZ9/tianshou | tianshou/data/collector.py | 04208e6cce722b7a2353d9a5f4d6f0fc05797d67 | import time
import torch
import warnings
import numpy as np
from tianshou.env import BaseVectorEnv
from tianshou.data import Batch, ReplayBuffer,\
ListReplayBuffer
from tianshou.utils import MovAvg
class Collector(object):
"""docstring for Collector"""
def __init__(self, policy, env, buffer=None, stat_size=100):
super().__init__()
self.env = env
self.env_num = 1
self.collect_step = 0
self.collect_episode = 0
self.collect_time = 0
if buffer is None:
self.buffer = ReplayBuffer(100)
else:
self.buffer = buffer
self.policy = policy
self.process_fn = policy.process_fn
self._multi_env = isinstance(env, BaseVectorEnv)
self._multi_buf = False # True if buf is a list
# need multiple cache buffers only if storing in one buffer
self._cached_buf = []
if self._multi_env:
self.env_num = len(env)
if isinstance(self.buffer, list):
assert len(self.buffer) == self.env_num, \
'The number of data buffer does not match the number of ' \
'input env.'
self._multi_buf = True
elif isinstance(self.buffer, ReplayBuffer):
self._cached_buf = [
ListReplayBuffer() for _ in range(self.env_num)]
else:
raise TypeError('The buffer in data collector is invalid!')
self.reset_env()
self.reset_buffer()
# state over batch is either a list, an np.ndarray, or a torch.Tensor
self.state = None
self.step_speed = MovAvg(stat_size)
self.episode_speed = MovAvg(stat_size)
def reset_buffer(self):
if self._multi_buf:
for b in self.buffer:
b.reset()
else:
self.buffer.reset()
def get_env_num(self):
return self.env_num
def reset_env(self):
self._obs = self.env.reset()
self._act = self._rew = self._done = self._info = None
if self._multi_env:
self.reward = np.zeros(self.env_num)
self.length = np.zeros(self.env_num)
else:
self.reward, self.length = 0, 0
for b in self._cached_buf:
b.reset()
def seed(self, seed=None):
if hasattr(self.env, 'seed'):
return self.env.seed(seed)
def render(self, **kwargs):
if hasattr(self.env, 'render'):
return self.env.render(**kwargs)
def close(self):
if hasattr(self.env, 'close'):
self.env.close()
def _make_batch(self, data):
if isinstance(data, np.ndarray):
return data[None]
else:
return np.array([data])
def collect(self, n_step=0, n_episode=0, render=0):
warning_count = 0
if not self._multi_env:
n_episode = np.sum(n_episode)
start_time = time.time()
assert sum([(n_step != 0), (n_episode != 0)]) == 1, \
"One and only one collection number specification permitted!"
cur_step = 0
cur_episode = np.zeros(self.env_num) if self._multi_env else 0
reward_sum = 0
length_sum = 0
while True:
if warning_count >= 100000:
warnings.warn(
'There are already many steps in an episode. '
'You should add a time limitation to your environment!',
Warning)
if self._multi_env:
batch_data = Batch(
obs=self._obs, act=self._act, rew=self._rew,
done=self._done, obs_next=None, info=self._info)
else:
batch_data = Batch(
obs=self._make_batch(self._obs),
act=self._make_batch(self._act),
rew=self._make_batch(self._rew),
done=self._make_batch(self._done),
obs_next=None,
info=self._make_batch(self._info))
result = self.policy(batch_data, self.state)
self.state = result.state if hasattr(result, 'state') else None
if isinstance(result.act, torch.Tensor):
self._act = result.act.detach().cpu().numpy()
elif not isinstance(self._act, np.ndarray):
self._act = np.array(result.act)
else:
self._act = result.act
obs_next, self._rew, self._done, self._info = self.env.step(
self._act if self._multi_env else self._act[0])
if render > 0:
self.env.render()
time.sleep(render)
self.length += 1
self.reward += self._rew
if self._multi_env:
for i in range(self.env_num):
data = {
'obs': self._obs[i], 'act': self._act[i],
'rew': self._rew[i], 'done': self._done[i],
'obs_next': obs_next[i], 'info': self._info[i]}
if self._cached_buf:
warning_count += 1
self._cached_buf[i].add(**data)
elif self._multi_buf:
warning_count += 1
self.buffer[i].add(**data)
cur_step += 1
else:
warning_count += 1
self.buffer.add(**data)
cur_step += 1
if self._done[i]:
if n_step != 0 or np.isscalar(n_episode) or \
cur_episode[i] < n_episode[i]:
cur_episode[i] += 1
reward_sum += self.reward[i]
length_sum += self.length[i]
if self._cached_buf:
cur_step += len(self._cached_buf[i])
self.buffer.update(self._cached_buf[i])
self.reward[i], self.length[i] = 0, 0
if self._cached_buf:
self._cached_buf[i].reset()
if isinstance(self.state, list):
self.state[i] = None
elif self.state is not None:
if isinstance(self.state[i], dict):
self.state[i] = {}
else:
self.state[i] = self.state[i] * 0
if isinstance(self.state, torch.Tensor):
# remove ref count in pytorch (?)
self.state = self.state.detach()
if sum(self._done):
obs_next = self.env.reset(np.where(self._done)[0])
if n_episode != 0:
if isinstance(n_episode, list) and \
(cur_episode >= np.array(n_episode)).all() or \
np.isscalar(n_episode) and \
cur_episode.sum() >= n_episode:
break
else:
self.buffer.add(
self._obs, self._act[0], self._rew,
self._done, obs_next, self._info)
cur_step += 1
if self._done:
cur_episode += 1
reward_sum += self.reward
length_sum += self.length
self.reward, self.length = 0, 0
self.state = None
obs_next = self.env.reset()
if n_episode != 0 and cur_episode >= n_episode:
break
if n_step != 0 and cur_step >= n_step:
break
self._obs = obs_next
self._obs = obs_next
if self._multi_env:
cur_episode = sum(cur_episode)
duration = time.time() - start_time
self.step_speed.add(cur_step / duration)
self.episode_speed.add(cur_episode / duration)
self.collect_step += cur_step
self.collect_episode += cur_episode
self.collect_time += duration
if isinstance(n_episode, list):
n_episode = np.sum(n_episode)
else:
n_episode = max(cur_episode, 1)
return {
'n/ep': cur_episode,
'n/st': cur_step,
'v/st': self.step_speed.get(),
'v/ep': self.episode_speed.get(),
'rew': reward_sum / n_episode,
'len': length_sum / n_episode,
}
def sample(self, batch_size):
if self._multi_buf:
if batch_size > 0:
lens = [len(b) for b in self.buffer]
total = sum(lens)
batch_index = np.random.choice(
total, batch_size, p=np.array(lens) / total)
else:
batch_index = np.array([])
batch_data = Batch()
for i, b in enumerate(self.buffer):
cur_batch = (batch_index == i).sum()
if batch_size and cur_batch or batch_size <= 0:
batch, indice = b.sample(cur_batch)
batch = self.process_fn(batch, b, indice)
batch_data.append(batch)
else:
batch_data, indice = self.buffer.sample(batch_size)
batch_data = self.process_fn(batch_data, self.buffer, indice)
return batch_data
| [((47, 26, 47, 43), 'tianshou.utils.MovAvg', 'MovAvg', ({(47, 33, 47, 42): 'stat_size'}, {}), '(stat_size)', False, 'from tianshou.utils import MovAvg\n'), ((48, 29, 48, 46), 'tianshou.utils.MovAvg', 'MovAvg', ({(48, 36, 48, 45): 'stat_size'}, {}), '(stat_size)', False, 'from tianshou.utils import MovAvg\n'), ((93, 21, 93, 32), 'time.time', 'time.time', ({}, {}), '()', False, 'import time\n'), ((22, 26, 22, 43), 'tianshou.data.ReplayBuffer', 'ReplayBuffer', ({(22, 39, 22, 42): '100'}, {}), '(100)', False, 'from tianshou.data import Batch, ReplayBuffer, ListReplayBuffer\n'), ((64, 26, 64, 48), 'numpy.zeros', 'np.zeros', ({(64, 35, 64, 47): 'self.env_num'}, {}), '(self.env_num)', True, 'import numpy as np\n'), ((65, 26, 65, 48), 'numpy.zeros', 'np.zeros', ({(65, 35, 65, 47): 'self.env_num'}, {}), '(self.env_num)', True, 'import numpy as np\n'), ((87, 19, 87, 35), 'numpy.array', 'np.array', ({(87, 28, 87, 34): '[data]'}, {}), '([data])', True, 'import numpy as np\n'), ((92, 24, 92, 41), 'numpy.sum', 'np.sum', ({(92, 31, 92, 40): 'n_episode'}, {}), '(n_episode)', True, 'import numpy as np\n'), ((97, 22, 97, 44), 'numpy.zeros', 'np.zeros', ({(97, 31, 97, 43): 'self.env_num'}, {}), '(self.env_num)', True, 'import numpy as np\n'), ((200, 19, 200, 30), 'time.time', 'time.time', ({}, {}), '()', False, 'import time\n'), ((207, 24, 207, 41), 'numpy.sum', 'np.sum', ({(207, 31, 207, 40): 'n_episode'}, {}), '(n_episode)', True, 'import numpy as np\n'), ((228, 25, 228, 32), 'tianshou.data.Batch', 'Batch', ({}, {}), '()', False, 'from tianshou.data import Batch, ReplayBuffer, ListReplayBuffer\n'), ((102, 16, 105, 28), 'warnings.warn', 'warnings.warn', ({(103, 20, 104, 75): '"""There are already many steps in an episode. You should add a time limitation to your environment!"""', (105, 20, 105, 27): 'Warning'}, {}), "(\n 'There are already many steps in an episode. You should add a time limitation to your environment!'\n , Warning)", False, 'import warnings\n'), ((107, 29, 109, 68), 'tianshou.data.Batch', 'Batch', (), '', False, 'from tianshou.data import Batch, ReplayBuffer, ListReplayBuffer\n'), ((130, 16, 130, 34), 'time.sleep', 'time.sleep', ({(130, 27, 130, 33): 'render'}, {}), '(render)', False, 'import time\n'), ((227, 30, 227, 42), 'numpy.array', 'np.array', ({(227, 39, 227, 41): '[]'}, {}), '([])', True, 'import numpy as np\n'), ((123, 28, 123, 48), 'numpy.array', 'np.array', ({(123, 37, 123, 47): 'result.act'}, {}), '(result.act)', True, 'import numpy as np\n'), ((40, 20, 40, 38), 'tianshou.data.ListReplayBuffer', 'ListReplayBuffer', ({}, {}), '()', False, 'from tianshou.data import Batch, ReplayBuffer, ListReplayBuffer\n'), ((151, 42, 151, 64), 'numpy.isscalar', 'np.isscalar', ({(151, 54, 151, 63): 'n_episode'}, {}), '(n_episode)', True, 'import numpy as np\n'), ((173, 46, 173, 66), 'numpy.where', 'np.where', ({(173, 55, 173, 65): 'self._done'}, {}), '(self._done)', True, 'import numpy as np\n'), ((177, 28, 177, 50), 'numpy.isscalar', 'np.isscalar', ({(177, 40, 177, 49): 'n_episode'}, {}), '(n_episode)', True, 'import numpy as np\n'), ((225, 41, 225, 55), 'numpy.array', 'np.array', ({(225, 50, 225, 54): 'lens'}, {}), '(lens)', True, 'import numpy as np\n'), ((176, 44, 176, 63), 'numpy.array', 'np.array', ({(176, 53, 176, 62): 'n_episode'}, {}), '(n_episode)', True, 'import numpy as np\n')] |
henriquebraga/drink-partners | drink_partners/partners/tests/views/test_search_partner_view.py | 4702263ae3e43ea9403cff5a72b68245d61880c7 | from drink_partners.contrib.samples import partner_bar_legal
class TestSearchPartner:
async def test_should_return_bad_request_for_str_coordinates(
self,
client,
partner_search_with_str_coordinates_url
):
async with client.get(partner_search_with_str_coordinates_url) as response: # noqa
assert response.status == 400
response_json = await response.json()
assert response_json['error_code'] == 'bad_request'
assert response_json['error_message'] == (
'Invalid coordinate longitude:a latitude:a'
)
async def test_should_return_nearest_partner_for_coordinate(
self,
client,
partner_search_coordinates_url,
save_partners
):
async with client.get(partner_search_coordinates_url) as response: # noqa
assert response.status == 200
response_json = await response.json()
assert response_json == partner_bar_legal()
async def test_should_return_not_found_when_no_partner_covers_coordinate(
self,
client,
partner_search_coordinates_url
):
async with client.get(partner_search_coordinates_url) as response: # noqa
assert response.status == 404
response_json = await response.json()
assert response_json['error_code'] == 'not_found'
assert response_json['error_message'] == (
'Partners not found covering area for '
'latitude:-43.36556 longitude:-22.99669'
)
| [((32, 36, 32, 55), 'drink_partners.contrib.samples.partner_bar_legal', 'partner_bar_legal', ({}, {}), '()', False, 'from drink_partners.contrib.samples import partner_bar_legal\n')] |
ysh329/Titanic-Machine-Learning-from-Disaster | Titanic/class_create_model_of_logistic_regression.py | d2ba330625e40b648b2946a8ca221198af148368 | # -*- coding: utf-8 -*-
# !/usr/bin/python
################################### PART0 DESCRIPTION #################################
# Filename: class_create_model_of_logistic_regression.py
# Description:
#
# Author: Shuai Yuan
# E-mail: [email protected]
# Create: 2016-01-23 23:32:49
# Last:
__author__ = 'yuens'
################################### PART1 IMPORT ######################################
import MySQLdb
import logging
import time
import pylab
from numpy import *
from math import exp
import csv
import decorator_of_function
################################### PART2 CLASS && FUNCTION ###########################
class CreateLogisticRegressionModel(object):
Decorator = decorator_of_function.CreateDecorator()
@Decorator.log_of_function
def __init__(self):
self.start = time.clock()
logging.basicConfig(level = logging.INFO,
format = '%(asctime)s %(levelname)5s %(filename)19s[line:%(lineno)3d] %(funcName)s %(message)s',
datefmt = '%y-%m-%d %H:%M:%S',
filename = 'main.log',
filemode = 'a')
console = logging.StreamHandler()
console.setLevel(logging.INFO)
formatter = logging.Formatter('%(asctime)s %(levelname)5s %(filename)19s[line:%(lineno)3d] %(funcName)s %(message)s')
console.setFormatter(formatter)
logging.getLogger('').addHandler(console)
logging.info("START CLASS {class_name}.".format(class_name = CreateLogisticRegressionModel.__name__))
try:
self.con = MySQLdb.connect(host='localhost', user='root', passwd='931209', charset='utf8')
logging.info("Success in connecting MySQL.")
except MySQLdb.Error, e:
logging.error("Fail in connecting MySQL.")
logging.error("MySQL Error {error_num}: {error_info}.".format(error_num = e.args[0], error_info = e.args[1]))
@Decorator.log_of_function
def __del__(self):
try:
self.con.close()
logging.info("Success in quiting MySQL.")
except MySQLdb.Error, e:
self.con.rollback()
logging.error("Fail in quiting MySQL.")
logging.error("MySQL Error {error_num}: {error_info}.".format(error_num = e.args[0], error_info = e.args[1]))
logging.info("END CLASS {class_name}.".format(class_name = CreateLogisticRegressionModel.__name__))
self.end = time.clock()
logging.info("The class {class_name} run time is : {delta_time} seconds".format(class_name = CreateLogisticRegressionModel.__name__, delta_time = self.end - self.start))
@Decorator.log_of_function
def get_data_from_database(self, database_name, passenger_table_name):
cursor = self.con.cursor()
sql_list = []
# training set
sql_list.append("""SELECT PassengerId, Survived, Pclass, Sex, Age, SibSp, Parch FROM {database_name}.{table_name} WHERE Is_train=1"""\
.format(database_name = database_name,\
table_name = passenger_table_name)\
)
# test set
sql_list.append("""SELECT PassengerId, Survived, Pclass, Sex, Age, SibSp, Parch FROM {database_name}.{table_name} WHERE Is_train=0"""\
.format(database_name = database_name,\
table_name = passenger_table_name)\
)
for sql_idx in xrange(len(sql_list)):
sql = sql_list[sql_idx]
try:
cursor.execute(sql)
if sql_idx == 0:
train_data = cursor.fetchall()
logging.info("len(train_data):{0}".format(len(train_data)))
logging.info("train_data[0]:{0}".format(train_data[0]))
logging.info("type(train_data[0]):{0}".format(type(train_data[0])))
elif sql_idx == 1:
test_data = cursor.fetchall()
logging.info("len(test_data):{0}".format(len(test_data)))
logging.info("test_data[0]:{0}".format(test_data[0]))
logging.info("type(test_data[0]):{0}".format(type(test_data[0])))
except MySQLdb.Error, e:
self.con.rollback()
logging.error("Fail in fetch data from MySQL.")
logging.error("MySQL Error {error_num}: {error_info}.".format(error_num = e.args[0], error_info = e.args[1]))
train_data = map(lambda (PassengerId, Survived, Pclass, Sex, Age, SibSp, Parch):\
(int(PassengerId),\
int(Survived),\
int(Pclass),\
Sex,\
int(Age),\
int(SibSp),\
int(Parch)\
),\
train_data)
logging.info("len(train_data):{0}".format(len(train_data)))
logging.info("train_data[0]:{0}".format(train_data[0]))
logging.info("type(train_data[0]):{0}".format(type(train_data[0])))
test_data = map(lambda (PassengerId, Survived, Pclass, Sex, Age, SibSp, Parch):\
(int(PassengerId),\
int(Survived),\
int(Pclass),\
Sex,\
int(Age),\
int(SibSp),\
int(Parch)\
),\
test_data)
logging.info("len(test_data):{0}".format(len(test_data)))
logging.info("test_data[0]:{0}".format(test_data[0]))
logging.info("type(test_data[0]):{0}".format(type(test_data[0])))
return train_data, test_data
@Decorator.log_of_function
def add_intercept_term(self, train_feature_tuple_list, test_feature_tuple_list):
logging.info("len(train_feature_tuple_list[0]):{0}".format(len(train_feature_tuple_list[0])))
logging.info("len(train_feature_tuple_list):{0}".format(len(train_feature_tuple_list)))
logging.info("train_feature_tuple_list[0]:{0}".format(train_feature_tuple_list[0]))
logging.info("test_feature_tuple_list[0]:{0}".format(len(test_feature_tuple_list[0])))
logging.info("len(test_feature_tuple_list):{0}".format(len(test_feature_tuple_list)))
logging.info("test_feature_tuple_list[0]:{0}".format(test_feature_tuple_list[0]))
# len(train_feature_tuple_list[0]): 7
# PassengerId, Pclass, Sex, Age, SibSp, Parch, Fare
train_feature_intercept_term_added_tuple_list = map(lambda (PassengerId, Pclass, Sex, Age, SibSp, Parch, Fare): \
(PassengerId, 1.0, Pclass, Sex, Age, SibSp, Parch, Fare),\
train_feature_tuple_list)
test_feature_intercept_term_added_tuple_list = map(lambda (PassengerId, Pclass, Sex, Age, SibSp, Parch, Fare): \
(PassengerId, 1.0, Pclass, Sex, Age, SibSp, Parch, Fare),\
test_feature_tuple_list)
logging.info("len(train_feature_intercept_term_added_tuple_list):{0}".format(len(train_feature_intercept_term_added_tuple_list)))
logging.info("train_feature_intercept_term_added_tuple_list[0]:{0}".format(train_feature_intercept_term_added_tuple_list[0]))
logging.info("len(test_feature_intercept_term_added_tuple_list):{0}".format(len(test_feature_intercept_term_added_tuple_list)))
logging.info("test_feature_intercept_term_added_tuple_list[0]:{0}".format(test_feature_intercept_term_added_tuple_list[0]))
return train_feature_intercept_term_added_tuple_list,\
test_feature_intercept_term_added_tuple_list
@Decorator.log_of_function
def sigmoid_function(self, inX):
return 1.0 / (1.0 + exp(-inX))
@Decorator.log_of_function
def gradient_descent(self, train_feature_tuple_list, train_label_list, learning_rate = 0.01, max_iteration_time = 500, lambda_regularization = 0.1):
############################
# Initial parameters
# learning_rate = 0.01
# max_iteration_time = 500
############################
'''
train_feature_tuple_list_without_PassengerId = map(lambda (PassengerId, InterceptTerm, Pclass, Sex, Age, SibSp, Parch, Fare):\
(InterceptTerm, Pclass, Sex, Age, SibSp, Parch, Fare),\
train_feature_tuple_list)
'''
train_feature_tuple_list_without_PassengerId = map(lambda (PassengerId, InterceptTerm, Pclass, Sex, Age, SibSp, Parch, Fare):\
(InterceptTerm, Sex, Fare),\
train_feature_tuple_list)
# [891, 7]
train_input_matrix = mat(train_feature_tuple_list_without_PassengerId)
# [891, 1]
train_label_matrix = mat(train_label_list).transpose()
train_sample_num, feature_num = shape(train_input_matrix)
weight_matrix = ones((feature_num, 1))
cost_list = []
error_list = []
optimal_solution = {}
for cur_iter in xrange(max_iteration_time):
# [891, 1] <- [891, 7]*[7, 1]
hypothesis = self.sigmoid_function(train_input_matrix * weight_matrix)
# real <- sum([891, 1]T*[891, 1] + [891, 1]T*[891, 1])
cost = -float(1) / (train_sample_num) * \
sum( train_label_matrix.transpose()*log(hypothesis) + (1-train_label_matrix.transpose())*log(1-hypothesis) ) + \
lambda_regularization / (2*train_sample_num) * (array(weight_matrix[1:]) * array(weight_matrix[1:])).sum()
cost_list.append(cost)
# [891, 1]
error = train_label_matrix - hypothesis
error_list.append(error)
logging.info("cur_iter:{0}, cost:{1}, sum(error):{2}".format(cur_iter+1, cost, sum(error)))
# 1 = 1 + 1 * [891, 1].T *[891, 1]
weight_matrix[0] = weight_matrix[0] + learning_rate * (float(1)/train_sample_num) * train_input_matrix[:, 0].transpose() * error
# [6, 1] = [6, 1] + 1 * \
# ( 1 / 1 * [891, 6].T * [891, 1]
# )
weight_matrix[1:] = weight_matrix[1:] + learning_rate * \
( (float(1)/train_sample_num) * train_input_matrix[:, 1::].transpose() * error - \
float(lambda_regularization) / train_sample_num * weight_matrix[1:] \
)
#weight_matrix = weight_matrix + learning_rate * train_input_matrix.transpose() * error
#"""
# find optimal solution
if cur_iter == 0:
optimal_solution['cur_iter'] = cur_iter
optimal_solution['cost'] = cost
optimal_solution['abs(error.sum())'] = abs(error.sum())
optimal_solution['weight_matrix'] = weight_matrix
elif cur_iter != 0 and optimal_solution['abs(error.sum())'] > abs(error.sum()):
optimal_solution['cur_iter'] = cur_iter
optimal_solution['cost'] = cost
optimal_solution['abs(error.sum())'] = abs(error.sum())
optimal_solution['weight_matrix'] = weight_matrix
logging.info("optimal_solution['cur_iter']:{0}".format(optimal_solution['cur_iter']))
logging.info("optimal_solution['cost':{0}".format(optimal_solution['cost']))
logging.info("optimal_solution['abs(error.sum())']:{0}".format(optimal_solution['abs(error.sum())']))
logging.info("optimal_solution['weight_matrix'].tolist():{0}".format(optimal_solution['weight_matrix'].tolist()))
#"""
pylab.plot(cost_list)
pylab.show()
return weight_matrix
#return optimal_solution['weight_matrix']
@Decorator.log_of_function
def predict(self, train_feature_tuple_list, weight_matrix):
'''
train_feature_tuple_list_without_PassengerId = map(lambda (PassengerId, InterceptTerm, Pclass, Sex, Age, SibSp, Parch, Fare):\
(InterceptTerm, Pclass, Sex, Age, SibSp, Parch, Fare),\
train_feature_tuple_list)
'''
train_feature_tuple_list_without_PassengerId = map(lambda (PassengerId, InterceptTerm, Pclass, Sex, Age, SibSp, Parch, Fare):\
(InterceptTerm, Sex, Fare),\
train_feature_tuple_list)
train_input_matrix = mat(train_feature_tuple_list_without_PassengerId)
predict_prob_matrix = self.sigmoid_function(train_input_matrix * weight_matrix)
'''
row, col = shape(predict_label_matrix)
for i in xrange(row):
print i+1, predict_label_matrix[i][0]
'''
predict_prob_list = predict_prob_matrix.transpose().tolist()[0]
predict_label_list = []
for prob_idx in xrange(len(predict_prob_list)):
predict_prob = predict_prob_list[prob_idx]
if predict_prob > 0.5:
predict_label_list.append(1)
else:
predict_label_list.append(0)
return predict_label_list
@Decorator.log_of_function
def accuracy(self, train_label_list, predict_label_list):
logging.info("len(train_label_list):{0}".format(len(train_label_list)))
logging.info("len(predict_label_list):{0}".format(len(predict_label_list)))
# compute accuracy
def compute_accuracy(train_label_list, predict_label_list):
right_predict_num = 0
if len(train_label_list) == len(predict_label_list):
for idx in xrange(len(train_label_list)):
if train_label_list[idx] == predict_label_list[idx]:
right_predict_num = right_predict_num + 1
accuracy = float(right_predict_num)/len(train_label_list)
return right_predict_num, accuracy
def compute_precision_and_recall_and_F1(train_label_list, predict_label_list):
if len(train_label_list) == len(predict_label_list):
# compute precision and recall
true_positive_num = 10E-10
true_negative_num = 10E-10
predicted_positive_num = predict_label_list.count(1)
predicted_negative_num = predict_label_list.count(0)
for idx in xrange(len(train_label_list)):
if predict_label_list[idx] == train_label_list[idx] == 1:
true_positive_num = true_positive_num + 1
elif predict_label_list[idx] == train_label_list[idx] == 0:
true_negative_num = true_negative_num + 1
precision = float(true_positive_num) / (predicted_positive_num + 10E-10)
recall = float(true_negative_num) / (predicted_negative_num + 10E-10)
F1 = 2 * precision * recall / (precision + recall)
return precision, recall, F1
right_predict_num, accuracy = compute_accuracy(train_label_list = train_label_list,\
predict_label_list = predict_label_list)
logging.info("right_predict_num:{0}".format(right_predict_num))
logging.info("accuracy:{0}".format(accuracy))
precision, recall, F1 = compute_precision_and_recall_and_F1(train_label_list = train_label_list,\
predict_label_list = predict_label_list)
logging.info("precision:{0}".format(precision))
logging.info("recall:{0}".format(recall))
logging.info("F1:{0}".format(F1))
return accuracy, precision, recall, F1
@Decorator.log_of_function
def write_csv_file(self, start_id, predict_label_list, result_csv_dir):
# open csv file
try:
result_csv_handle = file(result_csv_dir, 'wb')
logging.info("Success in attaining file handle of {0}.".format(result_csv_dir))
except Exception as e:
logging.error("Fail in attaining file handle of {0}.".format(result_csv_dir))
logging.error(e)
return -1
# create csv writer
result_csv_writer = csv.writer(result_csv_handle)
# write csv file
result_csv_writer.writerow(["PassengerId", "Survived"])
for list_idx in xrange(len(predict_label_list)):
PassengerId = start_id + list_idx
predict_label = predict_label_list[list_idx]
result_csv_writer.writerow([PassengerId, predict_label])
# close csv file
try:
result_csv_handle.close()
logging.info("Success in closing file handle of {0}.".format(result_csv_dir))
except Exception as e:
logging.error("Fail in closing file handle of {0}.".format(result_csv_dir))
logging.error(e)
@Decorator.log_of_function
def plot_decision_bondary(self, weight_matrix):
pass
################################### PART3 CLASS TEST ##################################
"""
# Initial parameters
database_name = "TitanicDB"
passenger_table_name = "passenger_table"
LRModel = CreateLogisticRegressionModel()
""" | [] |
mje-nz/mjecv | mjecv/io/base.py | 9a02c005a0abc7d21594f65c348cfe5185c90184 | import multiprocessing
from typing import List, Optional
import numpy as np
from ..util import dill_for_apply
class ImageSequenceWriter:
def __init__(self, pattern, writer, *, max_index=None):
if type(pattern) is not str:
raise ValueError("Pattern must be string")
if pattern.format(1, index="1") == pattern.format(2, index="2"):
raise ValueError("Pattern must use {} or {index}")
self._pattern = pattern
self._writer = writer
self._max_index = max_index
self._index = 1
@property
def next_filename(self):
index = str(self._index)
if self._max_index:
index = "{:0{}d}".format(self._index, len(str(self._max_index)))
return self._pattern.format(self._index, index=index)
def _save(self, filename: str, image: np.ndarray):
self._writer(filename, image)
def save(self, image: np.ndarray):
self._save(self.next_filename, image)
self._index += 1
def finish(self):
pass
class MultiprocessingImageSequenceWriter(ImageSequenceWriter):
"""Image sequence writer that uses multiprocessing to save several images in
parallel.
This falls apart for large objects, as multiprocessing pickles them and pipes them
into the subprocesses.
"""
def __init__(self, *args, max_workers=None, max_waiting=None, **kwargs):
super().__init__(*args, **kwargs)
if max_workers is None:
max_workers = multiprocessing.cpu_count() - 1
ctx = multiprocessing.get_context("spawn")
self._pool = ctx.Pool(max_workers)
if max_waiting is not None:
# Semaphore's value is number of slots available for tasks to wait in
self._sem = ctx.Semaphore(
max_waiting
) # type: Optional[multiprocessing.synchronize.Semaphore]
else:
self._sem = None
self._results = [] # type: List[multiprocessing.pool.AsyncResult]
def __del__(self):
self.terminate()
def _save(self, filename: str, image: np.ndarray):
# Limit number of waiting tasks
if self._sem:
self._sem.acquire()
def callback(v):
assert self._sem is not None
self._sem.release()
else:
callback = None # type: ignore
args = (self._writer, (filename, image))
if dill_for_apply:
# Use dill instead of pickle, and make sure writer returns the filename
_writer = self._writer # Exclude self from capture to avoid dilling _pool
args = dill_for_apply(lambda f, i: _writer(f, i) or f, filename, image)
result = self._pool.apply_async(
*args, callback=callback, error_callback=callback,
)
self._results.append(result)
def terminate(self):
self._pool.terminate()
self._pool.join()
def finish(self, result_handler=None):
try:
# self._pool.close()
for result in self._results:
filename = result.get()
if result_handler is not None:
result_handler(filename)
self._pool.close()
except KeyboardInterrupt:
self._pool.terminate()
finally:
self._pool.join()
| [((50, 14, 50, 50), 'multiprocessing.get_context', 'multiprocessing.get_context', ({(50, 42, 50, 49): '"""spawn"""'}, {}), "('spawn')", False, 'import multiprocessing\n'), ((49, 26, 49, 53), 'multiprocessing.cpu_count', 'multiprocessing.cpu_count', ({}, {}), '()', False, 'import multiprocessing\n')] |
gengwg/leetcode | 377_combination_sum_iv.py | 0af5256ec98149ef5863f3bba78ed1e749650f6e | # 377 Combination Sum IV
# Given an integer array with all positive numbers and no duplicates,
# find the number of possible combinations that add up to a positive integer target.
#
# Example:
#
# nums = [1, 2, 3]
# target = 4
#
# The possible combination ways are:
# (1, 1, 1, 1)
# (1, 1, 2)
# (1, 2, 1)
# (1, 3)
# (2, 1, 1)
# (2, 2)
# (3, 1)
#
# Note that different sequences are counted as different combinations.
#
# Therefore the output is 7.
#
# Follow up:
# What if negative numbers are allowed in the given array?
# How does it change the problem?
# What limitation we need to add to the question to allow negative numbers?
class Solution:
def combinationSum4(self, nums, target):
"""
:type nums: List[int]
:type target: int
:rtype: int
"""
nums.sort()
res = [0] * (target + 1)
for i in range(1, len(res)):
for num in nums:
if num > i:
break
elif num == i:
res[i] += 1
else:
res[i] += res[i-num]
return res[target]
# https://www.hrwhisper.me/leetcode-combination-sum-iv/
# dp[i] += dp[i-num]
def combinationSum4(self, nums, target):
dp = [1] + [0] * target
for i in range(1, target+1):
for num in nums:
if i >= num:
dp[i] += dp[i-num]
return dp[target]
print(Solution().combinationSum4([1, 2, 3], 4))
| [] |
koeleck/conan-packages | nvidia-texture-tools/conanfile.py | da43e82c2444e934e69a38e524998d028f8edcc3 | from conans import ConanFile, CMake, tools
import os
STATIC_LIBS = ["nvtt", "squish", "rg_etc1", "nvimage", "bc6h", "posh",
"bc7", "nvmath", "nvthread", "nvcore"]
SHARED_LIBS = ["nvtt", "nvimage", "nvthread", "nvmath", "nvcore"]
class NvidiatexturetoolsConan(ConanFile):
name = "nvidia-texture-tools"
version = "662d223626185f7c6c7e0d822a4796a691acc05a"
license = "MIT"
author = "koeleck"
url = "<Package recipe repository url here, for issues about the package>"
description = "The NVIDIA Texture Tools is a collection of image processing and texture manipulation tools, designed to be integrated in game tools and asset processing pipelines."
settings = "os", "compiler", "build_type", "arch"
source_subfolder = "nvtt"
no_copy_source = True
options = {"shared": [True, False],
"fPIC": [True, False],
"use_OpenMP": [True, False]
}
default_options = "shared=False", "fPIC=True", "use_OpenMP=True"
generators = "cmake"
def config_options(self):
if self.settings.os == "Windows":
del self.options.fPIC
def source(self):
url = "https://github.com/castano/nvidia-texture-tools/archive/{}.zip".format(self.version)
tools.get(url)
os.rename('nvidia-texture-tools-{}'.format(self.version), self.source_subfolder)
tools.replace_in_file(os.path.join(self.source_subfolder, "CMakeLists.txt"), "PROJECT(NV)",
'''PROJECT(NV)
include(${CMAKE_BINARY_DIR}/conanbuildinfo.cmake)
conan_basic_setup()''')
def build(self):
cmake = CMake(self)
cmake.definitions["HAVE_CUDA"] = False
cmake.definitions["HAVE_OPENMP"] = self.options.use_OpenMP
cmake.configure(source_folder=self.source_subfolder)
cmake.build()
def package(self):
self.copy("license*", src=self.source_subfolder, ignore_case=True, keep_path=False)
self.copy("nvtt.h", dst="include/nvtt", src=os.path.join(self.source_subfolder, "src", "nvtt"), keep_path=False)
self.copy("nvtt_wrapper.h", dst="include/nvtt", src=os.path.join(self.source_subfolder, "src", "nvtt"), keep_path=False)
if self.options.shared:
for libname in SHARED_LIBS:
self.copy("*{}*.dll".format(libname), dst="bin", src=os.path.join(self.build_folder, "bin"), keep_path=False)
self.copy("*{}*.lib".format(libname), dst="lib", src=os.path.join(self.build_folder, "lib"), keep_path=False)
self.copy("*{}*.so*".format(libname), dst="lib", src=os.path.join(self.build_folder, "lib"), keep_path=False)
else:
for libname in STATIC_LIBS:
self.copy("*{}*.a".format(libname), dst="lib", src=os.path.join(self.build_folder, "lib"), keep_path=False)
self.copy("*{}*.lib".format(libname), dst="lib", src=os.path.join(self.build_folder, "lib"), keep_path=False)
def package_info(self):
all_libs = tools.collect_libs(self)
if self.options.shared:
libs = all_libs
else:
libs = []
for libname in STATIC_LIBS:
libs += [lib for lib in all_libs if libname in lib]
self.cpp_info.libs = libs
if self.settings.os == "Linux":
self.cpp_info.libs.extend(["dl", "pthread"])
if self.options.shared:
self.cpp_info.defines = ["NVTT_SHARED=1"]
| [((31, 8, 31, 22), 'conans.tools.get', 'tools.get', ({(31, 18, 31, 21): 'url'}, {}), '(url)', False, 'from conans import ConanFile, CMake, tools\n'), ((39, 16, 39, 27), 'conans.CMake', 'CMake', ({(39, 22, 39, 26): 'self'}, {}), '(self)', False, 'from conans import ConanFile, CMake, tools\n'), ((60, 19, 60, 43), 'conans.tools.collect_libs', 'tools.collect_libs', ({(60, 38, 60, 42): 'self'}, {}), '(self)', False, 'from conans import ConanFile, CMake, tools\n'), ((33, 30, 33, 83), 'os.path.join', 'os.path.join', ({(33, 43, 33, 64): 'self.source_subfolder', (33, 66, 33, 82): '"""CMakeLists.txt"""'}, {}), "(self.source_subfolder, 'CMakeLists.txt')", False, 'import os\n'), ((47, 52, 47, 102), 'os.path.join', 'os.path.join', ({(47, 65, 47, 86): 'self.source_subfolder', (47, 88, 47, 93): '"""src"""', (47, 95, 47, 101): '"""nvtt"""'}, {}), "(self.source_subfolder, 'src', 'nvtt')", False, 'import os\n'), ((48, 60, 48, 110), 'os.path.join', 'os.path.join', ({(48, 73, 48, 94): 'self.source_subfolder', (48, 96, 48, 101): '"""src"""', (48, 103, 48, 109): '"""nvtt"""'}, {}), "(self.source_subfolder, 'src', 'nvtt')", False, 'import os\n'), ((51, 69, 51, 107), 'os.path.join', 'os.path.join', ({(51, 82, 51, 99): 'self.build_folder', (51, 101, 51, 106): '"""bin"""'}, {}), "(self.build_folder, 'bin')", False, 'import os\n'), ((52, 69, 52, 107), 'os.path.join', 'os.path.join', ({(52, 82, 52, 99): 'self.build_folder', (52, 101, 52, 106): '"""lib"""'}, {}), "(self.build_folder, 'lib')", False, 'import os\n'), ((53, 69, 53, 107), 'os.path.join', 'os.path.join', ({(53, 82, 53, 99): 'self.build_folder', (53, 101, 53, 106): '"""lib"""'}, {}), "(self.build_folder, 'lib')", False, 'import os\n'), ((56, 67, 56, 105), 'os.path.join', 'os.path.join', ({(56, 80, 56, 97): 'self.build_folder', (56, 99, 56, 104): '"""lib"""'}, {}), "(self.build_folder, 'lib')", False, 'import os\n'), ((57, 69, 57, 107), 'os.path.join', 'os.path.join', ({(57, 82, 57, 99): 'self.build_folder', (57, 101, 57, 106): '"""lib"""'}, {}), "(self.build_folder, 'lib')", False, 'import os\n')] |
MyWay/Create-Your-Own-Image-Classifier | train_args.py | 70e5744084435af8a74b2cfe2098c25b0745c9af | #!/usr/bin/env python3
""" train_args.py
train_args.py command-line args.
"""
import argparse
def get_args():
"""
"""
parser = argparse.ArgumentParser(
description="This script lets you train and save your model.",
usage="python3 train.py flowers/train --gpu --learning_rate 0.001 --epochs 11 --gpu --hidden_units 500",
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument('data_directory', action="store")
parser.add_argument('--arch',
action="store",
default="alexnet",
dest='arch',
type=str,
help='Directory to save the model file.',
)
parser.add_argument('--save_dir',
action="store",
default=".",
dest='save_dir',
type=str,
help='Directory to save the model file.',
)
parser.add_argument('--save_name',
action="store",
default="checkpoint",
dest='save_name',
type=str,
help='Checkpoint filename.',
)
parser.add_argument('--categories_json',
action="store",
default="cat_to_name.json",
dest='categories_json',
type=str,
help='Path to file containing the categories.',
)
parser.add_argument('--gpu',
action="store_true",
dest="use_gpu",
default=False,
help='Use the GPU to train instead of the CPU')
hp = parser.add_argument_group('hyperparameters')
hp.add_argument('--learning_rate',
action="store",
default=0.001,
type=float,
help='Learning rate')
hp.add_argument('--hidden_units', '-hu',
action="store",
dest="hidden_units",
default=[4096],
type=int,
nargs='+',
help='Hidden layer units')
hp.add_argument('--epochs',
action="store",
dest="epochs",
default=1,
type=int,
help='Epochs to train the model for')
parser.parse_args()
return parser
def main():
"""
Main Function
"""
print(f'Command line argument utility for train.py.\nTry "python train.py -h".')
if __name__ == '__main__':
main()
"""
main() is called if script is executed on it's own.
""" | [((12, 13, 16, 5), 'argparse.ArgumentParser', 'argparse.ArgumentParser', (), '', False, 'import argparse\n')] |
canadiyaman/thetask | apps/payment/views.py | 0f1cea1d8eea4966138ef0bdc303a53e3511e57d | from django.http import HttpResponseRedirect
from django.conf import settings
from django.views.generic import TemplateView
from apps.payment.models import PaymentLog
from apps.payment.stripe import get_token, get_payment_charge
from apps.subscription.views import start_subscription
class ChargeView(TemplateView):
template_name = 'payment/charge.html'
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['stripe_public_key'] = settings.STRIPE_PUBLISHABLE_KEY
context['amount'] = 100
context['currency'] = 'tl'
return context
def post(self, request):
name = request.POST.get('name')
card_number = request.POST.get('cardnumber')
exp_month = int(request.POST.get('exp-date').split('/')[0])
exp_year = int(request.POST.get('exp-date').split('/')[1])
cvc = request.POST.get('cvc')
card = {
"name": name,
"number": card_number,
"exp_month": exp_month,
"exp_year": exp_year,
"cvc": cvc
}
token = get_token(card)
charge = get_payment_charge(amount=100, currency="usd", description="test", token=token.stripe_id)
if charge.paid:
log_payment(user=request.user, data=charge)
start_subscription(request.user)
return HttpResponseRedirect('/')
def log_payment(user, data):
PaymentLog.objects.create(user=user, data=data)
| [((42, 4, 42, 51), 'apps.payment.models.PaymentLog.objects.create', 'PaymentLog.objects.create', (), '', False, 'from apps.payment.models import PaymentLog\n'), ((33, 16, 33, 31), 'apps.payment.stripe.get_token', 'get_token', ({(33, 26, 33, 30): 'card'}, {}), '(card)', False, 'from apps.payment.stripe import get_token, get_payment_charge\n'), ((34, 17, 34, 106), 'apps.payment.stripe.get_payment_charge', 'get_payment_charge', (), '', False, 'from apps.payment.stripe import get_token, get_payment_charge\n'), ((38, 15, 38, 40), 'django.http.HttpResponseRedirect', 'HttpResponseRedirect', ({(38, 36, 38, 39): '"""/"""'}, {}), "('/')", False, 'from django.http import HttpResponseRedirect\n'), ((37, 12, 37, 44), 'apps.subscription.views.start_subscription', 'start_subscription', ({(37, 31, 37, 43): 'request.user'}, {}), '(request.user)', False, 'from apps.subscription.views import start_subscription\n')] |
srinidhibhat/booknotes | users/apps.py | 666f92fac309b97c13b79e91f5493220f934cab3 | from django.apps import AppConfig
class UsersConfig(AppConfig):
name = 'users'
# below piece of code is needed for automatic profile creation for user
def ready(self):
import users.signals
| [] |
HumanBrainProject/secure-data-store | secure_data_store/cli.py | 69b615cf979fc08f4ae8474ca9cd3e6d2f04b7f0 | # -*- coding: utf-8 -*-
"""Console script for secure_data_store."""
import click
from . import secure_data_store as sds
CONFIG='~/.sdsrc'
@click.group()
def main():
"""Wrapper for GoCryptFS"""
@main.command()
@click.argument('name')
@click.option('--config', help='Path to config file', default='~/.sdsrc')
def create(name, config=None):
"""Create a new secure data container NAME."""
try:
config = sds.read_config(config)
sds.create(config, name)
except (sds.ContainerError, sds.GCFSError, FileExistsError, sds.ConfigError) as err:
print(err)
@main.command()
@click.argument('name')
@click.option('--config', help='Path to config file', default='~/.sdsrc')
def open(name, config=None):
"""Open an existing secure data container NAME.
Will print path to the opened, clear-text container."""
try:
config = sds.read_config(config)
sds.mount(config, name)
except (sds.ContainerError, sds.GCFSError, sds.ConfigError, sds.MountError) as err:
print(err)
@main.command()
@click.argument('name')
@click.option('--config', help='Path to config file', default='~/.sdsrc')
def close(name, config=None):
"""Close an opend data container NAME."""
try:
config = sds.read_config(config)
sds.unmount(config, name)
except (sds.ContainerError, sds.GCFSError, sds.ConfigError) as err:
print(err)
main()
| [((8, 1, 8, 14), 'click.group', 'click.group', ({}, {}), '()', False, 'import click\n'), ((13, 1, 13, 23), 'click.argument', 'click.argument', ({(13, 16, 13, 22): '"""name"""'}, {}), "('name')", False, 'import click\n'), ((14, 1, 14, 73), 'click.option', 'click.option', (), '', False, 'import click\n'), ((24, 1, 24, 23), 'click.argument', 'click.argument', ({(24, 16, 24, 22): '"""name"""'}, {}), "('name')", False, 'import click\n'), ((25, 1, 25, 73), 'click.option', 'click.option', (), '', False, 'import click\n'), ((37, 1, 37, 23), 'click.argument', 'click.argument', ({(37, 16, 37, 22): '"""name"""'}, {}), "('name')", False, 'import click\n'), ((38, 1, 38, 73), 'click.option', 'click.option', (), '', False, 'import click\n')] |
marient/PelePhysics | Support/Fuego/Pythia/pythia-0.4/packages/pyre/pyre/graph/Node.py | e6ad1839d77b194e09ab44ff850c9489652e5d81 | #!/usr/bin/env python
#
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# Michael A.G. Aivazis
# California Institute of Technology
# (C) 1998-2003 All Rights Reserved
#
# <LicenseText>
#
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
from Drawable import Drawable
def nodeAttributes():
"""return a list of valid attributes for Node"""
return Node._validAttributes.keys()
class Node(Drawable):
def id(self): return self._id
def __init__(self, id):
Drawable.__init__(self)
self._id = id
return
_validAttributes = {
"color" : None,
"fontcolor" : None,
"fontname" : None,
"fontsize" : None,
"height" : None,
"label" : None,
"layer" : None,
"shape" : None,
"shapefile" : None,
"style" : None,
"width" : None
}
# version
__id__ = "$Id$"
#
# End of file
| [((29, 8, 29, 31), 'Drawable.Drawable.__init__', 'Drawable.__init__', ({(29, 26, 29, 30): 'self'}, {}), '(self)', False, 'from Drawable import Drawable\n')] |
RachelLar/cairis_update | cairis/gui/RiskScatterPanel.py | 0b1d6d17ce49bc74887d1684e28c53c1b06e2fa2 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import os
import pprint
import random
import wx
from cairis.core.armid import *
from cairis.core.Borg import Borg
import matplotlib
matplotlib.use('WXAgg')
from matplotlib.figure import Figure
from matplotlib.backends.backend_wxagg import \
FigureCanvasWxAgg as FigCanvas, \
NavigationToolbar2WxAgg as NavigationToolbar
def riskColourCode(riskScore):
if (riskScore <= 1):
return '#fef2ec'
elif (riskScore == 2):
return '#fcd9c8'
elif (riskScore == 3):
return '#f7ac91'
elif (riskScore == 4):
return '#f67e61'
elif (riskScore == 5):
return '#f2543d'
elif (riskScore == 6):
return '#e42626'
elif (riskScore == 7):
return '#b9051a'
elif (riskScore == 8):
return '#900014'
else:
return '#52000D'
class RiskScatterPanel(wx.Panel):
def __init__(self,parent):
wx.Panel.__init__(self,parent,RISKSCATTER_ID)
b = Borg()
self.dbProxy = b.dbProxy
self.dpi = 100
self.fig = Figure((5.0, 4.0), dpi=self.dpi)
self.canvas = FigCanvas(self, -1, self.fig)
self.axes = self.fig.add_subplot(111,xlabel='Severity',ylabel='Likelihood',autoscale_on=False)
self.axes.set_xticklabels(['Marginal','Critical','Catastrophic'])
self.axes.set_yticks([0,1,2,3,4,5])
self.toolbar = NavigationToolbar(self.canvas)
envs = self.dbProxy.getDimensionNames('environment')
self.envCombo = wx.ComboBox(self,RISKSCATTER_COMBOENVIRONMENT_ID,envs[0],choices=envs,size=(300,-1),style=wx.CB_DROPDOWN)
self.envCombo.Bind(wx.EVT_COMBOBOX,self.onEnvironmentChange)
self.vbox = wx.BoxSizer(wx.VERTICAL)
self.vbox.Add(self.toolbar, 0, wx.EXPAND)
self.vbox.Add(self.envCombo,0, wx.EXPAND)
self.vbox.Add(self.canvas, 1, wx.LEFT | wx.TOP | wx.GROW)
self.SetSizer(self.vbox)
self.vbox.Fit(self)
self.drawScatter(envs[0])
def drawScatter(self,envName):
self.axes.clear()
self.axes.grid(True)
self.axes.set_xlabel('Severity')
self.axes.set_ylabel('Likelihood')
self.axes.set_xbound(0,4)
self.axes.set_ybound(0,5)
xs,ys,cs = self.dbProxy.riskScatter(envName)
ccs = []
for c in cs:
ccs.append(riskColourCode(c))
if ((len(xs) > 0) and (len(ys) > 0)):
self.axes.scatter(xs,ys,c=ccs,marker='d')
self.canvas.draw()
def onEnvironmentChange(self,evt):
envName = self.envCombo.GetStringSelection()
self.drawScatter(envName)
def on_save_plot(self, event):
fileChoices = "PNG (*.png)|*.png"
dlg = wx.FileDialog(self,message="Save risk scatter",defaultDir=os.getcwd(),defaultFile="scatter.png",wildcard=fileChoices,style=wx.SAVE)
if dlg.ShowModal() == wx.ID_OK:
path = dlg.GetPath()
self.canvas.print_figure(path, dpi=self.dpi)
| [((27, 0, 27, 23), 'matplotlib.use', 'matplotlib.use', ({(27, 15, 27, 22): '"""WXAgg"""'}, {}), "('WXAgg')", False, 'import matplotlib\n'), ((55, 4, 55, 49), 'wx.Panel.__init__', 'wx.Panel.__init__', ({(55, 22, 55, 26): 'self', (55, 27, 55, 33): 'parent', (55, 34, 55, 48): 'RISKSCATTER_ID'}, {}), '(self, parent, RISKSCATTER_ID)', False, 'import wx\n'), ((56, 8, 56, 14), 'cairis.core.Borg.Borg', 'Borg', ({}, {}), '()', False, 'from cairis.core.Borg import Borg\n'), ((59, 15, 59, 47), 'matplotlib.figure.Figure', 'Figure', (), '', False, 'from matplotlib.figure import Figure\n'), ((60, 18, 60, 47), 'matplotlib.backends.backend_wxagg.FigureCanvasWxAgg', 'FigCanvas', ({(60, 28, 60, 32): 'self', (60, 34, 60, 36): '-1', (60, 38, 60, 46): 'self.fig'}, {}), '(self, -1, self.fig)', True, 'from matplotlib.backends.backend_wxagg import FigureCanvasWxAgg as FigCanvas, NavigationToolbar2WxAgg as NavigationToolbar\n'), ((64, 19, 64, 49), 'matplotlib.backends.backend_wxagg.NavigationToolbar2WxAgg', 'NavigationToolbar', ({(64, 37, 64, 48): 'self.canvas'}, {}), '(self.canvas)', True, 'from matplotlib.backends.backend_wxagg import FigureCanvasWxAgg as FigCanvas, NavigationToolbar2WxAgg as NavigationToolbar\n'), ((67, 20, 67, 125), 'wx.ComboBox', 'wx.ComboBox', (), '', False, 'import wx\n'), ((70, 16, 70, 40), 'wx.BoxSizer', 'wx.BoxSizer', ({(70, 28, 70, 39): 'wx.VERTICAL'}, {}), '(wx.VERTICAL)', False, 'import wx\n'), ((100, 68, 100, 79), 'os.getcwd', 'os.getcwd', ({}, {}), '()', False, 'import os\n')] |
aangelisc/pulumi-azure | sdk/python/pulumi_azure/containerservice/get_registry.py | 71dd9c75403146e16f7480e5a60b08bc0329660e | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = [
'GetRegistryResult',
'AwaitableGetRegistryResult',
'get_registry',
]
@pulumi.output_type
class GetRegistryResult:
"""
A collection of values returned by getRegistry.
"""
def __init__(__self__, admin_enabled=None, admin_password=None, admin_username=None, id=None, location=None, login_server=None, name=None, resource_group_name=None, sku=None, storage_account_id=None, tags=None):
if admin_enabled and not isinstance(admin_enabled, bool):
raise TypeError("Expected argument 'admin_enabled' to be a bool")
pulumi.set(__self__, "admin_enabled", admin_enabled)
if admin_password and not isinstance(admin_password, str):
raise TypeError("Expected argument 'admin_password' to be a str")
pulumi.set(__self__, "admin_password", admin_password)
if admin_username and not isinstance(admin_username, str):
raise TypeError("Expected argument 'admin_username' to be a str")
pulumi.set(__self__, "admin_username", admin_username)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if location and not isinstance(location, str):
raise TypeError("Expected argument 'location' to be a str")
pulumi.set(__self__, "location", location)
if login_server and not isinstance(login_server, str):
raise TypeError("Expected argument 'login_server' to be a str")
pulumi.set(__self__, "login_server", login_server)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if resource_group_name and not isinstance(resource_group_name, str):
raise TypeError("Expected argument 'resource_group_name' to be a str")
pulumi.set(__self__, "resource_group_name", resource_group_name)
if sku and not isinstance(sku, str):
raise TypeError("Expected argument 'sku' to be a str")
pulumi.set(__self__, "sku", sku)
if storage_account_id and not isinstance(storage_account_id, str):
raise TypeError("Expected argument 'storage_account_id' to be a str")
pulumi.set(__self__, "storage_account_id", storage_account_id)
if tags and not isinstance(tags, dict):
raise TypeError("Expected argument 'tags' to be a dict")
pulumi.set(__self__, "tags", tags)
@property
@pulumi.getter(name="adminEnabled")
def admin_enabled(self) -> bool:
"""
Is the Administrator account enabled for this Container Registry.
"""
return pulumi.get(self, "admin_enabled")
@property
@pulumi.getter(name="adminPassword")
def admin_password(self) -> str:
"""
The Password associated with the Container Registry Admin account - if the admin account is enabled.
"""
return pulumi.get(self, "admin_password")
@property
@pulumi.getter(name="adminUsername")
def admin_username(self) -> str:
"""
The Username associated with the Container Registry Admin account - if the admin account is enabled.
"""
return pulumi.get(self, "admin_username")
@property
@pulumi.getter
def id(self) -> str:
"""
The provider-assigned unique ID for this managed resource.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def location(self) -> str:
"""
The Azure Region in which this Container Registry exists.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter(name="loginServer")
def login_server(self) -> str:
"""
The URL that can be used to log into the container registry.
"""
return pulumi.get(self, "login_server")
@property
@pulumi.getter
def name(self) -> str:
return pulumi.get(self, "name")
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> str:
return pulumi.get(self, "resource_group_name")
@property
@pulumi.getter
def sku(self) -> str:
"""
The SKU of this Container Registry, such as `Basic`.
"""
return pulumi.get(self, "sku")
@property
@pulumi.getter(name="storageAccountId")
def storage_account_id(self) -> str:
"""
The ID of the Storage Account used for this Container Registry. This is only returned for `Classic` SKU's.
"""
return pulumi.get(self, "storage_account_id")
@property
@pulumi.getter
def tags(self) -> Mapping[str, str]:
"""
A map of tags assigned to the Container Registry.
"""
return pulumi.get(self, "tags")
class AwaitableGetRegistryResult(GetRegistryResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetRegistryResult(
admin_enabled=self.admin_enabled,
admin_password=self.admin_password,
admin_username=self.admin_username,
id=self.id,
location=self.location,
login_server=self.login_server,
name=self.name,
resource_group_name=self.resource_group_name,
sku=self.sku,
storage_account_id=self.storage_account_id,
tags=self.tags)
def get_registry(name: Optional[str] = None,
resource_group_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetRegistryResult:
"""
Use this data source to access information about an existing Container Registry.
## Example Usage
```python
import pulumi
import pulumi_azure as azure
example = azure.containerservice.get_registry(name="testacr",
resource_group_name="test")
pulumi.export("loginServer", example.login_server)
```
:param str name: The name of the Container Registry.
:param str resource_group_name: The Name of the Resource Group where this Container Registry exists.
"""
__args__ = dict()
__args__['name'] = name
__args__['resourceGroupName'] = resource_group_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure:containerservice/getRegistry:getRegistry', __args__, opts=opts, typ=GetRegistryResult).value
return AwaitableGetRegistryResult(
admin_enabled=__ret__.admin_enabled,
admin_password=__ret__.admin_password,
admin_username=__ret__.admin_username,
id=__ret__.id,
location=__ret__.location,
login_server=__ret__.login_server,
name=__ret__.name,
resource_group_name=__ret__.resource_group_name,
sku=__ret__.sku,
storage_account_id=__ret__.storage_account_id,
tags=__ret__.tags)
| [((58, 5, 58, 39), 'pulumi.getter', 'pulumi.getter', (), '', False, 'import pulumi\n'), ((66, 5, 66, 40), 'pulumi.getter', 'pulumi.getter', (), '', False, 'import pulumi\n'), ((74, 5, 74, 40), 'pulumi.getter', 'pulumi.getter', (), '', False, 'import pulumi\n'), ((98, 5, 98, 38), 'pulumi.getter', 'pulumi.getter', (), '', False, 'import pulumi\n'), ((111, 5, 111, 44), 'pulumi.getter', 'pulumi.getter', (), '', False, 'import pulumi\n'), ((124, 5, 124, 43), 'pulumi.getter', 'pulumi.getter', (), '', False, 'import pulumi\n'), ((25, 8, 25, 60), 'pulumi.set', 'pulumi.set', ({(25, 19, 25, 27): '__self__', (25, 29, 25, 44): '"""admin_enabled"""', (25, 46, 25, 59): 'admin_enabled'}, {}), "(__self__, 'admin_enabled', admin_enabled)", False, 'import pulumi\n'), ((28, 8, 28, 62), 'pulumi.set', 'pulumi.set', ({(28, 19, 28, 27): '__self__', (28, 29, 28, 45): '"""admin_password"""', (28, 47, 28, 61): 'admin_password'}, {}), "(__self__, 'admin_password', admin_password)", False, 'import pulumi\n'), ((31, 8, 31, 62), 'pulumi.set', 'pulumi.set', ({(31, 19, 31, 27): '__self__', (31, 29, 31, 45): '"""admin_username"""', (31, 47, 31, 61): 'admin_username'}, {}), "(__self__, 'admin_username', admin_username)", False, 'import pulumi\n'), ((34, 8, 34, 38), 'pulumi.set', 'pulumi.set', ({(34, 19, 34, 27): '__self__', (34, 29, 34, 33): '"""id"""', (34, 35, 34, 37): 'id'}, {}), "(__self__, 'id', id)", False, 'import pulumi\n'), ((37, 8, 37, 50), 'pulumi.set', 'pulumi.set', ({(37, 19, 37, 27): '__self__', (37, 29, 37, 39): '"""location"""', (37, 41, 37, 49): 'location'}, {}), "(__self__, 'location', location)", False, 'import pulumi\n'), ((40, 8, 40, 58), 'pulumi.set', 'pulumi.set', ({(40, 19, 40, 27): '__self__', (40, 29, 40, 43): '"""login_server"""', (40, 45, 40, 57): 'login_server'}, {}), "(__self__, 'login_server', login_server)", False, 'import pulumi\n'), ((43, 8, 43, 42), 'pulumi.set', 'pulumi.set', ({(43, 19, 43, 27): '__self__', (43, 29, 43, 35): '"""name"""', (43, 37, 43, 41): 'name'}, {}), "(__self__, 'name', name)", False, 'import pulumi\n'), ((46, 8, 46, 72), 'pulumi.set', 'pulumi.set', ({(46, 19, 46, 27): '__self__', (46, 29, 46, 50): '"""resource_group_name"""', (46, 52, 46, 71): 'resource_group_name'}, {}), "(__self__, 'resource_group_name', resource_group_name)", False, 'import pulumi\n'), ((49, 8, 49, 40), 'pulumi.set', 'pulumi.set', ({(49, 19, 49, 27): '__self__', (49, 29, 49, 34): '"""sku"""', (49, 36, 49, 39): 'sku'}, {}), "(__self__, 'sku', sku)", False, 'import pulumi\n'), ((52, 8, 52, 70), 'pulumi.set', 'pulumi.set', ({(52, 19, 52, 27): '__self__', (52, 29, 52, 49): '"""storage_account_id"""', (52, 51, 52, 69): 'storage_account_id'}, {}), "(__self__, 'storage_account_id', storage_account_id)", False, 'import pulumi\n'), ((55, 8, 55, 42), 'pulumi.set', 'pulumi.set', ({(55, 19, 55, 27): '__self__', (55, 29, 55, 35): '"""tags"""', (55, 37, 55, 41): 'tags'}, {}), "(__self__, 'tags', tags)", False, 'import pulumi\n'), ((63, 15, 63, 48), 'pulumi.get', 'pulumi.get', ({(63, 26, 63, 30): 'self', (63, 32, 63, 47): '"""admin_enabled"""'}, {}), "(self, 'admin_enabled')", False, 'import pulumi\n'), ((71, 15, 71, 49), 'pulumi.get', 'pulumi.get', ({(71, 26, 71, 30): 'self', (71, 32, 71, 48): '"""admin_password"""'}, {}), "(self, 'admin_password')", False, 'import pulumi\n'), ((79, 15, 79, 49), 'pulumi.get', 'pulumi.get', ({(79, 26, 79, 30): 'self', (79, 32, 79, 48): '"""admin_username"""'}, {}), "(self, 'admin_username')", False, 'import pulumi\n'), ((87, 15, 87, 37), 'pulumi.get', 'pulumi.get', ({(87, 26, 87, 30): 'self', (87, 32, 87, 36): '"""id"""'}, {}), "(self, 'id')", False, 'import pulumi\n'), ((95, 15, 95, 43), 'pulumi.get', 'pulumi.get', ({(95, 26, 95, 30): 'self', (95, 32, 95, 42): '"""location"""'}, {}), "(self, 'location')", False, 'import pulumi\n'), ((103, 15, 103, 47), 'pulumi.get', 'pulumi.get', ({(103, 26, 103, 30): 'self', (103, 32, 103, 46): '"""login_server"""'}, {}), "(self, 'login_server')", False, 'import pulumi\n'), ((108, 15, 108, 39), 'pulumi.get', 'pulumi.get', ({(108, 26, 108, 30): 'self', (108, 32, 108, 38): '"""name"""'}, {}), "(self, 'name')", False, 'import pulumi\n'), ((113, 15, 113, 54), 'pulumi.get', 'pulumi.get', ({(113, 26, 113, 30): 'self', (113, 32, 113, 53): '"""resource_group_name"""'}, {}), "(self, 'resource_group_name')", False, 'import pulumi\n'), ((121, 15, 121, 38), 'pulumi.get', 'pulumi.get', ({(121, 26, 121, 30): 'self', (121, 32, 121, 37): '"""sku"""'}, {}), "(self, 'sku')", False, 'import pulumi\n'), ((129, 15, 129, 53), 'pulumi.get', 'pulumi.get', ({(129, 26, 129, 30): 'self', (129, 32, 129, 52): '"""storage_account_id"""'}, {}), "(self, 'storage_account_id')", False, 'import pulumi\n'), ((137, 15, 137, 39), 'pulumi.get', 'pulumi.get', ({(137, 26, 137, 30): 'self', (137, 32, 137, 38): '"""tags"""'}, {}), "(self, 'tags')", False, 'import pulumi\n'), ((184, 15, 184, 37), 'pulumi.InvokeOptions', 'pulumi.InvokeOptions', ({}, {}), '()', False, 'import pulumi\n'), ((187, 14, 187, 129), 'pulumi.runtime.invoke', 'pulumi.runtime.invoke', (), '', False, 'import pulumi\n')] |
TimWhalen/graphite-web | contrib/memcache_whisper.py | e150af45e01d01141a8767ec0597e218105b9914 | #!/usr/bin/env python
# Copyright 2008 Orbitz WorldWide
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
# This module is an implementation of the Whisper database API
# Here is the basic layout of a whisper data file
#
# File = Header,Data
# Header = Metadata,ArchiveInfo+
# Metadata = lastUpdate,maxRetention,xFilesFactor,archiveCount
# ArchiveInfo = Offset,SecondsPerPoint,Points
# Data = Archive+
# Archive = Point+
# Point = timestamp,value
"""
NOTE: This is a modified version of whisper.py
For details on the modification, read https://bugs.launchpad.net/graphite/+bug/245835
"""
import os, struct, time
try:
import fcntl
CAN_LOCK = True
except ImportError:
CAN_LOCK = False
LOCK = False
CACHE_HEADERS = False
__headerCache = {}
longFormat = "!L"
longSize = struct.calcsize(longFormat)
floatFormat = "!f"
floatSize = struct.calcsize(floatFormat)
timestampFormat = "!L"
timestampSize = struct.calcsize(timestampFormat)
valueFormat = "!d"
valueSize = struct.calcsize(valueFormat)
pointFormat = "!Ld"
pointSize = struct.calcsize(pointFormat)
metadataFormat = "!2LfL"
metadataSize = struct.calcsize(metadataFormat)
archiveInfoFormat = "!3L"
archiveInfoSize = struct.calcsize(archiveInfoFormat)
debug = startBlock = endBlock = lambda *a,**k: None
def exists(path):
return os.path.exists(path)
def drop(path):
os.remove(path)
def enableMemcache(servers = ['127.0.0.1:11211'], min_compress_len = 0):
from StringIO import StringIO
import memcache
global open, exists, drop
MC = memcache.Client(servers)
class open(StringIO):
def __init__(self,*args,**kwargs):
self.name = args[0]
self.mode = args[1]
if self.mode == "r+b" or self.mode == "rb":
StringIO.__init__(self, MC.get(self.name))
else:
StringIO.__init__(self)
def close(self):
if self.mode == "r+b" or self.mode == "wb":
MC.set(self.name, self.getvalue(), min_compress_len = min_compress_len)
StringIO.close(self)
def exists(path):
return MC.get(path) != None
def drop(path):
MC.delete(path)
def enableDebug():
global open, debug, startBlock, endBlock
class open(file):
def __init__(self,*args,**kwargs):
file.__init__(self,*args,**kwargs)
self.writeCount = 0
self.readCount = 0
def write(self,data):
self.writeCount += 1
debug('WRITE %d bytes #%d' % (len(data),self.writeCount))
return file.write(self,data)
def read(self,bytes):
self.readCount += 1
debug('READ %d bytes #%d' % (bytes,self.readCount))
return file.read(self,bytes)
def debug(message):
print('DEBUG :: %s' % message)
__timingBlocks = {}
def startBlock(name):
__timingBlocks[name] = time.time()
def endBlock(name):
debug("%s took %.5f seconds" % (name,time.time() - __timingBlocks.pop(name)))
def __readHeader(fh):
info = __headerCache.get(fh.name)
if info: return info
#startBlock('__readHeader')
originalOffset = fh.tell()
fh.seek(0)
packedMetadata = fh.read(metadataSize)
(lastUpdate,maxRetention,xff,archiveCount) = struct.unpack(metadataFormat,packedMetadata)
archives = []
for i in xrange(archiveCount):
packedArchiveInfo = fh.read(archiveInfoSize)
(offset,secondsPerPoint,points) = struct.unpack(archiveInfoFormat,packedArchiveInfo)
archiveInfo = {
'offset' : offset,
'secondsPerPoint' : secondsPerPoint,
'points' : points,
'retention' : secondsPerPoint * points,
'size' : points * pointSize,
}
archives.append(archiveInfo)
fh.seek(originalOffset)
info = {
'lastUpdate' : lastUpdate,
'maxRetention' : maxRetention,
'xFilesFactor' : xff,
'archives' : archives,
}
if CACHE_HEADERS:
__headerCache[fh.name] = info
#endBlock('__readHeader')
return info
def __changeLastUpdate(fh):
return #XXX Make this a NOP, use os.stat(filename).st_mtime instead
startBlock('__changeLastUpdate()')
originalOffset = fh.tell()
fh.seek(0) #Based on assumption that first field is lastUpdate
now = int( time.time() )
packedTime = struct.pack(timestampFormat,now)
fh.write(packedTime)
fh.seek(originalOffset)
endBlock('__changeLastUpdate()')
def create(path,archiveList,xFilesFactor=0.5):
"""create(path,archiveList,xFilesFactor=0.5)
path is a string
archiveList is a list of archives, each of which is of the form (secondsPerPoint,numberOfPoints)
xFilesFactor specifies the fraction of data points in a propagation interval that must have known values for a propagation to occur
"""
#Validate archive configurations...
assert archiveList, "You must specify at least one archive configuration!"
archiveList.sort(key=lambda a: a[0]) #sort by precision (secondsPerPoint)
for i,archive in enumerate(archiveList):
if i == len(archiveList) - 1: break
next = archiveList[i+1]
assert archive[0] < next[0],\
"You cannot configure two archives with the same precision %s,%s" % (archive,next)
assert (next[0] % archive[0]) == 0,\
"Higher precision archives' precision must evenly divide all lower precision archives' precision %s,%s" % (archive[0],next[0])
retention = archive[0] * archive[1]
nextRetention = next[0] * next[1]
assert nextRetention > retention,\
"Lower precision archives must cover larger time intervals than higher precision archives %s,%s" % (archive,next)
#Looks good, now we create the file and write the header
assert not exists(path), "File %s already exists!" % path
fh = open(path,'wb')
if LOCK: fcntl.flock( fh.fileno(), fcntl.LOCK_EX )
lastUpdate = struct.pack( timestampFormat, int(time.time()) )
oldest = sorted([secondsPerPoint * points for secondsPerPoint,points in archiveList])[-1]
maxRetention = struct.pack( longFormat, oldest )
xFilesFactor = struct.pack( floatFormat, float(xFilesFactor) )
archiveCount = struct.pack(longFormat, len(archiveList))
packedMetadata = lastUpdate + maxRetention + xFilesFactor + archiveCount
fh.write(packedMetadata)
headerSize = metadataSize + (archiveInfoSize * len(archiveList))
archiveOffsetPointer = headerSize
for secondsPerPoint,points in archiveList:
archiveInfo = struct.pack(archiveInfoFormat, archiveOffsetPointer, secondsPerPoint, points)
fh.write(archiveInfo)
archiveOffsetPointer += (points * pointSize)
zeroes = '\x00' * (archiveOffsetPointer - headerSize)
fh.write(zeroes)
fh.close()
def __propagate(fh,timestamp,xff,higher,lower):
lowerIntervalStart = timestamp - (timestamp % lower['secondsPerPoint'])
lowerIntervalEnd = lowerIntervalStart + lower['secondsPerPoint']
fh.seek(higher['offset'])
packedPoint = fh.read(pointSize)
(higherBaseInterval,higherBaseValue) = struct.unpack(pointFormat,packedPoint)
if higherBaseInterval == 0:
higherFirstOffset = higher['offset']
else:
timeDistance = lowerIntervalStart - higherBaseInterval
pointDistance = timeDistance / higher['secondsPerPoint']
byteDistance = pointDistance * pointSize
higherFirstOffset = higher['offset'] + (byteDistance % higher['size'])
higherPoints = lower['secondsPerPoint'] / higher['secondsPerPoint']
higherSize = higherPoints * pointSize
higherLastOffset = higherFirstOffset + (higherSize % higher['size'])
fh.seek(higherFirstOffset)
if higherFirstOffset < higherLastOffset: #we don't wrap the archive
seriesString = fh.read(higherLastOffset - higherFirstOffset)
else: #We do wrap the archive
higherEnd = higher['offset'] + higher['size']
seriesString = fh.read(higherEnd - higherFirstOffset)
fh.seek(higher['offset'])
seriesString += fh.read(higherLastOffset - higher['offset'])
#Now we unpack the series data we just read
byteOrder,pointTypes = pointFormat[0],pointFormat[1:]
points = len(seriesString) / pointSize
seriesFormat = byteOrder + (pointTypes * points)
unpackedSeries = struct.unpack(seriesFormat, seriesString)
#And finally we construct a list of values
neighborValues = [None] * points
currentInterval = lowerIntervalStart
step = higher['secondsPerPoint']
for i in xrange(0,len(unpackedSeries),2):
pointTime = unpackedSeries[i]
if pointTime == currentInterval:
neighborValues[i/2] = unpackedSeries[i+1]
currentInterval += step
#Propagate aggregateValue to propagate from neighborValues if we have enough known points
knownValues = [v for v in neighborValues if v is not None]
knownPercent = float(len(knownValues)) / float(len(neighborValues))
if knownPercent >= xff: #we have enough data to propagate a value!
aggregateValue = float(sum(knownValues)) / float(len(knownValues)) #TODO another CF besides average?
myPackedPoint = struct.pack(pointFormat,lowerIntervalStart,aggregateValue)
fh.seek(lower['offset'])
packedPoint = fh.read(pointSize)
(lowerBaseInterval,lowerBaseValue) = struct.unpack(pointFormat,packedPoint)
if lowerBaseInterval == 0: #First propagated update to this lower archive
fh.seek(lower['offset'])
fh.write(myPackedPoint)
else: #Not our first propagated update to this lower archive
timeDistance = lowerIntervalStart - lowerBaseInterval
pointDistance = timeDistance / lower['secondsPerPoint']
byteDistance = pointDistance * pointSize
lowerOffset = lower['offset'] + (byteDistance % lower['size'])
fh.seek(lowerOffset)
fh.write(myPackedPoint)
return True
else:
return False
def update(path,value,timestamp=None):
"""update(path,value,timestamp=None)
path is a string
value is a float
timestamp is either an int or float
"""
#startBlock('complete update')
value = float(value)
fh = open(path,'r+b')
if LOCK: fcntl.flock( fh.fileno(), fcntl.LOCK_EX )
header = __readHeader(fh)
now = int( time.time() )
if timestamp is None: timestamp = now
timestamp = int(timestamp)
diff = now - timestamp
assert diff < header['maxRetention'] and diff >= 0, "Timestamp not covered by any archives in this database"
for i,archive in enumerate(header['archives']): #Find the highest-precision archive that covers timestamp
if archive['retention'] < diff: continue
lowerArchives = header['archives'][i+1:] #We'll pass on the update to these lower precision archives later
break
#First we update the highest-precision archive
myInterval = timestamp - (timestamp % archive['secondsPerPoint'])
myPackedPoint = struct.pack(pointFormat,myInterval,value)
fh.seek(archive['offset'])
packedPoint = fh.read(pointSize)
(baseInterval,baseValue) = struct.unpack(pointFormat,packedPoint)
if baseInterval == 0: #This file's first update
fh.seek(archive['offset'])
fh.write(myPackedPoint)
baseInterval,baseValue = myInterval,value
else: #Not our first update
timeDistance = myInterval - baseInterval
pointDistance = timeDistance / archive['secondsPerPoint']
byteDistance = pointDistance * pointSize
myOffset = archive['offset'] + (byteDistance % archive['size'])
fh.seek(myOffset)
fh.write(myPackedPoint)
#Now we propagate the update to lower-precision archives
#startBlock('update propagation')
higher = archive
for lower in lowerArchives:
if not __propagate(fh,myInterval,header['xFilesFactor'],higher,lower): break
higher = lower
#endBlock('update propagation')
__changeLastUpdate(fh)
fh.close()
#endBlock('complete update')
def update_many(path,points):
"""update_many(path,points)
path is a string
points is a list of (timestamp,value) points
"""
#startBlock('complete update_many path=%s points=%d' % (path,len(points)))
if not points: return
points = [ (int(t),float(v)) for (t,v) in points]
points.sort(key=lambda p: p[0],reverse=True) #order points by timestamp, newest first
fh = open(path,'r+b')
if LOCK: fcntl.flock( fh.fileno(), fcntl.LOCK_EX )
header = __readHeader(fh)
now = int( time.time() )
archives = iter( header['archives'] )
currentArchive = next(archives)
#debug(' update_many currentArchive=%s' % str(currentArchive))
currentPoints = []
for point in points:
age = now - point[0]
#debug(' update_many iterating points, point=%s age=%d' % (str(point),age))
while currentArchive['retention'] < age: #we can't fit any more points in this archive
#debug(' update_many this point is too old to fit here, currentPoints=%d' % len(currentPoints))
if currentPoints: #commit all the points we've found that it can fit
currentPoints.reverse() #put points in chronological order
__archive_update_many(fh,header,currentArchive,currentPoints)
currentPoints = []
try:
currentArchive = next(archives)
#debug(' update_many using next archive %s' % str(currentArchive))
except StopIteration:
#debug(' update_many no more archives!')
currentArchive = None
break
if not currentArchive: break #drop remaining points that don't fit in the database
#debug(' update_many adding point=%s' % str(point))
currentPoints.append(point)
#debug(' update_many done iterating points')
if currentArchive and currentPoints: #don't forget to commit after we've checked all the archives
currentPoints.reverse()
__archive_update_many(fh,header,currentArchive,currentPoints)
__changeLastUpdate(fh)
fh.close()
#endBlock('complete update_many path=%s points=%d' % (path,len(points)))
def __archive_update_many(fh,header,archive,points):
step = archive['secondsPerPoint']
#startBlock('__archive_update_many file=%s archive=%s points=%d' % (fh.name,step,len(points)))
alignedPoints = [ (timestamp - (timestamp % step), value)
for (timestamp,value) in points ]
#Create a packed string for each contiguous sequence of points
#startBlock('__archive_update_many string packing')
packedStrings = []
previousInterval = None
currentString = ""
for (interval,value) in alignedPoints:
#debug('__archive_update_many iterating alignedPoint at %s' % interval)
if (not previousInterval) or (interval == previousInterval + step):
#debug('__archive_update_many was expected, packing onto currentString')
currentString += struct.pack(pointFormat,interval,value)
previousInterval = interval
else:
numberOfPoints = len(currentString) / pointSize
startInterval = previousInterval - (step * (numberOfPoints-1))
#debug('__archive_update_many was NOT expected, appending to packedStrings startInterval=%s currentString=%d bytes' % (startInterval,len(currentString)))
packedStrings.append( (startInterval,currentString) )
currentString = struct.pack(pointFormat,interval,value)
previousInterval = interval
if currentString:
#startInterval = previousInterval - (step * len(currentString) / pointSize) + step
numberOfPoints = len(currentString) / pointSize
startInterval = previousInterval - (step * (numberOfPoints-1))
#debug('__archive_update_many done iterating alignedPoints, remainder currentString of %d bytes, startInterval=%s' % (len(currentString),startInterval))
packedStrings.append( (startInterval,currentString) )
#endBlock('__archive_update_many string packing')
#Read base point and determine where our writes will start
fh.seek(archive['offset'])
packedBasePoint = fh.read(pointSize)
(baseInterval,baseValue) = struct.unpack(pointFormat,packedBasePoint)
if baseInterval == 0: #This file's first update
#debug('__archive_update_many first update')
baseInterval = packedStrings[0][0] #use our first string as the base, so we start at the start
#debug('__archive_update_many baseInterval is %s' % baseInterval)
#Write all of our packed strings in locations determined by the baseInterval
#startBlock('__archive_update_many write() operations')
for (interval,packedString) in packedStrings:
timeDistance = interval - baseInterval
pointDistance = timeDistance / step
byteDistance = pointDistance * pointSize
myOffset = archive['offset'] + (byteDistance % archive['size'])
fh.seek(myOffset)
archiveEnd = archive['offset'] + archive['size']
bytesBeyond = (myOffset + len(packedString)) - archiveEnd
#debug(' __archive_update_many myOffset=%d packedString=%d archiveEnd=%d bytesBeyond=%d' % (myOffset,len(packedString),archiveEnd,bytesBeyond))
if bytesBeyond > 0:
fh.write( packedString[:-bytesBeyond] )
#debug('We wrapped an archive!')
assert fh.tell() == archiveEnd, "archiveEnd=%d fh.tell=%d bytesBeyond=%d len(packedString)=%d" % (archiveEnd,fh.tell(),bytesBeyond,len(packedString))
fh.seek( archive['offset'] )
fh.write( packedString[-bytesBeyond:] ) #safe because it can't exceed the archive (retention checking logic above)
else:
fh.write(packedString)
#endBlock('__archive_update_many write() operations')
#Now we propagate the updates to lower-precision archives
#startBlock('__archive_update_many propagation')
higher = archive
lowerArchives = [arc for arc in header['archives'] if arc['secondsPerPoint'] > archive['secondsPerPoint']]
#debug('__archive_update_many I have %d lower archives' % len(lowerArchives))
for lower in lowerArchives:
fit = lambda i: i - (i % lower['secondsPerPoint'])
lowerIntervals = [fit(p[0]) for p in alignedPoints]
uniqueLowerIntervals = set(lowerIntervals)
#debug(' __archive_update_many points=%d unique=%d' % (len(alignedPoints),len(uniqueLowerIntervals)))
propagateFurther = False
for interval in uniqueLowerIntervals:
#debug(' __archive_update_many propagating from %d to %d, interval=%d' % (higher['secondsPerPoint'],lower['secondsPerPoint'],interval))
if __propagate(fh,interval,header['xFilesFactor'],higher,lower):
propagateFurther = True
#debug(' __archive_update_many Successful propagation!')
#debug(' __archive_update_many propagateFurther=%s' % propagateFurther)
if not propagateFurther: break
higher = lower
#endBlock('__archive_update_many propagation')
#endBlock('__archive_update_many file=%s archive=%s points=%d' % (fh.name,step,len(points)))
def info(path):
"""info(path)
path is a string
"""
fh = open(path,'rb')
info = __readHeader(fh)
fh.close()
return info
def fetch(path,fromTime,untilTime=None):
"""fetch(path,fromTime,untilTime=None)
path is a string
fromTime is an epoch time
untilTime is also an epoch time, but defaults to now
"""
fh = open(path,'rb')
header = __readHeader(fh)
now = int( time.time() )
if untilTime is None or untilTime > now:
untilTime = now
if fromTime < (now - header['maxRetention']):
fromTime = now - header['maxRetention']
assert fromTime < untilTime, "Invalid time interval"
diff = now - fromTime
for archive in header['archives']:
if archive['retention'] >= diff: break
fromInterval = int( fromTime - (fromTime % archive['secondsPerPoint']) )
untilInterval = int( untilTime - (untilTime % archive['secondsPerPoint']) )
fh.seek(archive['offset'])
packedPoint = fh.read(pointSize)
(baseInterval,baseValue) = struct.unpack(pointFormat,packedPoint)
if baseInterval == 0:
step = archive['secondsPerPoint']
points = (untilInterval - fromInterval) / step
timeInfo = (fromInterval,untilInterval,step)
valueList = [None] * points
return (timeInfo,valueList)
#Determine fromOffset
timeDistance = fromInterval - baseInterval
pointDistance = timeDistance / archive['secondsPerPoint']
byteDistance = pointDistance * pointSize
fromOffset = archive['offset'] + (byteDistance % archive['size'])
#Determine untilOffset
timeDistance = untilInterval - baseInterval
pointDistance = timeDistance / archive['secondsPerPoint']
byteDistance = pointDistance * pointSize
untilOffset = archive['offset'] + (byteDistance % archive['size'])
#Read all the points in the interval
fh.seek(fromOffset)
if fromOffset < untilOffset: #If we don't wrap around the archive
seriesString = fh.read(untilOffset - fromOffset)
else: #We do wrap around the archive, so we need two reads
archiveEnd = archive['offset'] + archive['size']
seriesString = fh.read(archiveEnd - fromOffset)
fh.seek(archive['offset'])
seriesString += fh.read(untilOffset - archive['offset'])
#Now we unpack the series data we just read (anything faster than unpack?)
byteOrder,pointTypes = pointFormat[0],pointFormat[1:]
points = len(seriesString) / pointSize
seriesFormat = byteOrder + (pointTypes * points)
unpackedSeries = struct.unpack(seriesFormat, seriesString)
#And finally we construct a list of values (optimize this!)
valueList = [None] * points #pre-allocate entire list for speed
currentInterval = fromInterval
step = archive['secondsPerPoint']
for i in xrange(0,len(unpackedSeries),2):
pointTime = unpackedSeries[i]
if pointTime == currentInterval:
pointValue = unpackedSeries[i+1]
valueList[i/2] = pointValue #in-place reassignment is faster than append()
currentInterval += step
fh.close()
timeInfo = (fromInterval,untilInterval,step)
return (timeInfo,valueList)
| [((45, 11, 45, 38), 'struct.calcsize', 'struct.calcsize', ({(45, 27, 45, 37): 'longFormat'}, {}), '(longFormat)', False, 'import os, struct, time\n'), ((47, 12, 47, 40), 'struct.calcsize', 'struct.calcsize', ({(47, 28, 47, 39): 'floatFormat'}, {}), '(floatFormat)', False, 'import os, struct, time\n'), ((49, 16, 49, 48), 'struct.calcsize', 'struct.calcsize', ({(49, 32, 49, 47): 'timestampFormat'}, {}), '(timestampFormat)', False, 'import os, struct, time\n'), ((51, 12, 51, 40), 'struct.calcsize', 'struct.calcsize', ({(51, 28, 51, 39): 'valueFormat'}, {}), '(valueFormat)', False, 'import os, struct, time\n'), ((53, 12, 53, 40), 'struct.calcsize', 'struct.calcsize', ({(53, 28, 53, 39): 'pointFormat'}, {}), '(pointFormat)', False, 'import os, struct, time\n'), ((55, 15, 55, 46), 'struct.calcsize', 'struct.calcsize', ({(55, 31, 55, 45): 'metadataFormat'}, {}), '(metadataFormat)', False, 'import os, struct, time\n'), ((57, 18, 57, 52), 'struct.calcsize', 'struct.calcsize', ({(57, 34, 57, 51): 'archiveInfoFormat'}, {}), '(archiveInfoFormat)', False, 'import os, struct, time\n'), ((62, 9, 62, 29), 'os.path.exists', 'os.path.exists', ({(62, 24, 62, 28): 'path'}, {}), '(path)', False, 'import os, struct, time\n'), ((65, 2, 65, 17), 'os.remove', 'os.remove', ({(65, 12, 65, 16): 'path'}, {}), '(path)', False, 'import os, struct, time\n'), ((72, 7, 72, 31), 'memcache.Client', 'memcache.Client', ({(72, 23, 72, 30): 'servers'}, {}), '(servers)', False, 'import memcache\n'), ((131, 47, 131, 91), 'struct.unpack', 'struct.unpack', ({(131, 61, 131, 75): 'metadataFormat', (131, 76, 131, 90): 'packedMetadata'}, {}), '(metadataFormat, packedMetadata)', False, 'import os, struct, time\n'), ((163, 15, 163, 47), 'struct.pack', 'struct.pack', ({(163, 27, 163, 42): 'timestampFormat', (163, 43, 163, 46): 'now'}, {}), '(timestampFormat, now)', False, 'import os, struct, time\n'), ((196, 17, 196, 50), 'struct.pack', 'struct.pack', ({(196, 30, 196, 40): 'longFormat', (196, 42, 196, 48): 'oldest'}, {}), '(longFormat, oldest)', False, 'import os, struct, time\n'), ((217, 41, 217, 79), 'struct.unpack', 'struct.unpack', ({(217, 55, 217, 66): 'pointFormat', (217, 67, 217, 78): 'packedPoint'}, {}), '(pointFormat, packedPoint)', False, 'import os, struct, time\n'), ((240, 19, 240, 60), 'struct.unpack', 'struct.unpack', ({(240, 33, 240, 45): 'seriesFormat', (240, 47, 240, 59): 'seriesString'}, {}), '(seriesFormat, seriesString)', False, 'import os, struct, time\n'), ((297, 18, 297, 59), 'struct.pack', 'struct.pack', ({(297, 30, 297, 41): 'pointFormat', (297, 42, 297, 52): 'myInterval', (297, 53, 297, 58): 'value'}, {}), '(pointFormat, myInterval, value)', False, 'import os, struct, time\n'), ((300, 29, 300, 67), 'struct.unpack', 'struct.unpack', ({(300, 43, 300, 54): 'pointFormat', (300, 55, 300, 66): 'packedPoint'}, {}), '(pointFormat, packedPoint)', False, 'import os, struct, time\n'), ((404, 29, 404, 71), 'struct.unpack', 'struct.unpack', ({(404, 43, 404, 54): 'pointFormat', (404, 55, 404, 70): 'packedBasePoint'}, {}), '(pointFormat, packedBasePoint)', False, 'import os, struct, time\n'), ((487, 29, 487, 67), 'struct.unpack', 'struct.unpack', ({(487, 43, 487, 54): 'pointFormat', (487, 55, 487, 66): 'packedPoint'}, {}), '(pointFormat, packedPoint)', False, 'import os, struct, time\n'), ((517, 19, 517, 60), 'struct.unpack', 'struct.unpack', ({(517, 33, 517, 45): 'seriesFormat', (517, 47, 517, 59): 'seriesString'}, {}), '(seriesFormat, seriesString)', False, 'import os, struct, time\n'), ((118, 27, 118, 38), 'time.time', 'time.time', ({}, {}), '()', False, 'import os, struct, time\n'), ((135, 38, 135, 88), 'struct.unpack', 'struct.unpack', ({(135, 52, 135, 69): 'archiveInfoFormat', (135, 70, 135, 87): 'packedArchiveInfo'}, {}), '(archiveInfoFormat, packedArchiveInfo)', False, 'import os, struct, time\n'), ((162, 13, 162, 24), 'time.time', 'time.time', ({}, {}), '()', False, 'import os, struct, time\n'), ((204, 18, 204, 95), 'struct.pack', 'struct.pack', ({(204, 30, 204, 47): 'archiveInfoFormat', (204, 49, 204, 69): 'archiveOffsetPointer', (204, 71, 204, 86): 'secondsPerPoint', (204, 88, 204, 94): 'points'}, {}), '(archiveInfoFormat, archiveOffsetPointer, secondsPerPoint, points)', False, 'import os, struct, time\n'), ((255, 20, 255, 78), 'struct.pack', 'struct.pack', ({(255, 32, 255, 43): 'pointFormat', (255, 44, 255, 62): 'lowerIntervalStart', (255, 63, 255, 77): 'aggregateValue'}, {}), '(pointFormat, lowerIntervalStart, aggregateValue)', False, 'import os, struct, time\n'), ((258, 41, 258, 79), 'struct.unpack', 'struct.unpack', ({(258, 55, 258, 66): 'pointFormat', (258, 67, 258, 78): 'packedPoint'}, {}), '(pointFormat, packedPoint)', False, 'import os, struct, time\n'), ((286, 13, 286, 24), 'time.time', 'time.time', ({}, {}), '()', False, 'import os, struct, time\n'), ((337, 13, 337, 24), 'time.time', 'time.time', ({}, {}), '()', False, 'import os, struct, time\n'), ((474, 13, 474, 24), 'time.time', 'time.time', ({}, {}), '()', False, 'import os, struct, time\n'), ((86, 6, 86, 26), 'StringIO.StringIO.close', 'StringIO.close', ({(86, 21, 86, 25): 'self'}, {}), '(self)', False, 'from StringIO import StringIO\n'), ((194, 49, 194, 60), 'time.time', 'time.time', ({}, {}), '()', False, 'import os, struct, time\n'), ((384, 23, 384, 62), 'struct.pack', 'struct.pack', ({(384, 35, 384, 46): 'pointFormat', (384, 47, 384, 55): 'interval', (384, 56, 384, 61): 'value'}, {}), '(pointFormat, interval, value)', False, 'import os, struct, time\n'), ((391, 22, 391, 61), 'struct.pack', 'struct.pack', ({(391, 34, 391, 45): 'pointFormat', (391, 46, 391, 54): 'interval', (391, 55, 391, 60): 'value'}, {}), '(pointFormat, interval, value)', False, 'import os, struct, time\n'), ((81, 8, 81, 31), 'StringIO.StringIO.__init__', 'StringIO.__init__', ({(81, 26, 81, 30): 'self'}, {}), '(self)', False, 'from StringIO import StringIO\n'), ((121, 41, 121, 52), 'time.time', 'time.time', ({}, {}), '()', False, 'import os, struct, time\n')] |
showtimesynergy/mojify | main.py | 8c012730b9f56d6e7e2003e8db99669516f4e027 | from PIL import Image
import csv
from ast import literal_eval as make_tuple
from math import sqrt
import argparse
import os.path
def load_img(image):
# load an image as a PIL object
im = Image.open(image).convert('RGBA')
return im
def color_distance(c_tuple1, c_tuple2):
# calculate the color distance between two rgb tuples
red_mean = (c_tuple1[0] + c_tuple2[0]) / 2
red = c_tuple1[0] - c_tuple2[0]
green = c_tuple1[1] - c_tuple2[1]
blue = c_tuple1[2] - c_tuple2[2]
delta = (2 + (red_mean / 256)) * (red ** 2)
delta += (4 * (green ** 2))
delta += (2 + ((255 - red_mean) / 256)) * (blue ** 2)
delta = sqrt(delta)
return delta
def write_out(text_matrix):
# write out emoji grid to txt file
with open('out.txt', '+w', encoding='utf-8') as out:
for line in text_matrix:
line_out = ''
for char in line:
# TODO: ZWJ support
if char is None:
line_out += '\u2001\u2006'
else:
char_code = '0x' + char
char_code = int(char_code, base=16)
line_out += chr(char_code)
out.writelines(line_out + '\n')
def gen_matrix(pix_data):
# generate unicode data from colors
pix = pix_data.load()
emoji_grid = []
for y in range(0, size[1]):
emoji_grid.append([])
for x in range(0, size[0]):
pixel = pix[x, y]
best_delta = float('Inf')
for entry in emoji_list:
emoji_color = entry[1]
if pixel[3] == 0:
best = None
else:
delta = color_distance(emoji_color, pixel)
if delta < best_delta:
best = entry[0]
best_delta = delta
emoji_grid[-1].append(best)
return emoji_grid
def handle_arguments():
parser = argparse.ArgumentParser(
description='Represent an image using emoji'
)
parser.add_argument('image', help='image to be processed')
args = parser.parse_args()
return args
if __name__ == '__main__':
args = handle_arguments()
path = args.image
emoji_list = []
with open('proc.csv') as raw_list:
emoji_list = []
reader = csv.reader(raw_list)
raw_list = list(reader)
for entry in raw_list:
emoji_list.append([entry[0], make_tuple(entry[1])])
image = load_img(path)
size = image.size
emoji_grid = gen_matrix(image)
write_out(emoji_grid)
print('Output in out.txt')
| [((24, 12, 24, 23), 'math.sqrt', 'sqrt', ({(24, 17, 24, 22): 'delta'}, {}), '(delta)', False, 'from math import sqrt\n'), ((67, 13, 69, 5), 'argparse.ArgumentParser', 'argparse.ArgumentParser', (), '', False, 'import argparse\n'), ((80, 17, 80, 37), 'csv.reader', 'csv.reader', ({(80, 28, 80, 36): 'raw_list'}, {}), '(raw_list)', False, 'import csv\n'), ((11, 9, 11, 26), 'PIL.Image.open', 'Image.open', ({(11, 20, 11, 25): 'image'}, {}), '(image)', False, 'from PIL import Image\n'), ((83, 41, 83, 61), 'ast.literal_eval', 'make_tuple', ({(83, 52, 83, 60): 'entry[1]'}, {}), '(entry[1])', True, 'from ast import literal_eval as make_tuple\n')] |
umr-bot/sliding-puzzle-solver-bot | venv/lib/python3.7/site-packages/Xlib/ext/xinput.py | 826532a426f343bcc66034b241a42b3bd864e07c | # Xlib.ext.xinput -- XInput extension module
#
# Copyright (C) 2012 Outpost Embedded, LLC
# Forest Bond <[email protected]>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public License
# as published by the Free Software Foundation; either version 2.1
# of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc.,
# 59 Temple Place,
# Suite 330,
# Boston, MA 02111-1307 USA
'''
A very incomplete implementation of the XInput extension.
'''
import sys
import array
import struct
# Python 2/3 compatibility.
from six import integer_types
from Xlib.protocol import rq
from Xlib import X
extname = 'XInputExtension'
PropertyDeleted = 0
PropertyCreated = 1
PropertyModified = 2
NotifyNormal = 0
NotifyGrab = 1
NotifyUngrab = 2
NotifyWhileGrabbed = 3
NotifyPassiveGrab = 4
NotifyPassiveUngrab = 5
NotifyAncestor = 0
NotifyVirtual = 1
NotifyInferior = 2
NotifyNonlinear = 3
NotifyNonlinearVirtual = 4
NotifyPointer = 5
NotifyPointerRoot = 6
NotifyDetailNone = 7
GrabtypeButton = 0
GrabtypeKeycode = 1
GrabtypeEnter = 2
GrabtypeFocusIn = 3
GrabtypeTouchBegin = 4
AnyModifier = (1 << 31)
AnyButton = 0
AnyKeycode = 0
AsyncDevice = 0
SyncDevice = 1
ReplayDevice = 2
AsyncPairedDevice = 3
AsyncPair = 4
SyncPair = 5
SlaveSwitch = 1
DeviceChange = 2
MasterAdded = (1 << 0)
MasterRemoved = (1 << 1)
SlaveAdded = (1 << 2)
SlaveRemoved = (1 << 3)
SlaveAttached = (1 << 4)
SlaveDetached = (1 << 5)
DeviceEnabled = (1 << 6)
DeviceDisabled = (1 << 7)
AddMaster = 1
RemoveMaster = 2
AttachSlave = 3
DetachSlave = 4
AttachToMaster = 1
Floating = 2
ModeRelative = 0
ModeAbsolute = 1
MasterPointer = 1
MasterKeyboard = 2
SlavePointer = 3
SlaveKeyboard = 4
FloatingSlave = 5
KeyClass = 0
ButtonClass = 1
ValuatorClass = 2
ScrollClass = 3
TouchClass = 8
KeyRepeat = (1 << 16)
AllDevices = 0
AllMasterDevices = 1
DeviceChanged = 1
KeyPress = 2
KeyRelease = 3
ButtonPress = 4
ButtonRelease = 5
Motion = 6
Enter = 7
Leave = 8
FocusIn = 9
FocusOut = 10
HierarchyChanged = 11
PropertyEvent = 12
RawKeyPress = 13
RawKeyRelease = 14
RawButtonPress = 15
RawButtonRelease = 16
RawMotion = 17
DeviceChangedMask = (1 << DeviceChanged)
KeyPressMask = (1 << KeyPress)
KeyReleaseMask = (1 << KeyRelease)
ButtonPressMask = (1 << ButtonPress)
ButtonReleaseMask = (1 << ButtonRelease)
MotionMask = (1 << Motion)
EnterMask = (1 << Enter)
LeaveMask = (1 << Leave)
FocusInMask = (1 << FocusIn)
FocusOutMask = (1 << FocusOut)
HierarchyChangedMask = (1 << HierarchyChanged)
PropertyEventMask = (1 << PropertyEvent)
RawKeyPressMask = (1 << RawKeyPress)
RawKeyReleaseMask = (1 << RawKeyRelease)
RawButtonPressMask = (1 << RawButtonPress)
RawButtonReleaseMask = (1 << RawButtonRelease)
RawMotionMask = (1 << RawMotion)
GrabModeSync = 0
GrabModeAsync = 1
GrabModeTouch = 2
DEVICEID = rq.Card16
DEVICE = rq.Card16
DEVICEUSE = rq.Card8
class FP1616(rq.Int32):
def check_value(self, value):
return int(value * 65536.0)
def parse_value(self, value, display):
return float(value) / float(1 << 16)
class FP3232(rq.ValueField):
structcode = 'lL'
structvalues = 2
def check_value(self, value):
return value
def parse_value(self, value, display):
integral, frac = value
ret = float(integral)
# optimised math.ldexp(float(frac), -32)
ret += float(frac) * (1.0 / (1 << 32))
return ret
class XIQueryVersion(rq.ReplyRequest):
_request = rq.Struct(
rq.Card8('opcode'),
rq.Opcode(47),
rq.RequestLength(),
rq.Card16('major_version'),
rq.Card16('minor_version'),
)
_reply = rq.Struct(
rq.ReplyCode(),
rq.Pad(1),
rq.Card16('sequence_number'),
rq.ReplyLength(),
rq.Card16('major_version'),
rq.Card16('minor_version'),
rq.Pad(20),
)
def query_version(self):
return XIQueryVersion(
display=self.display,
opcode=self.display.get_extension_major(extname),
major_version=2,
minor_version=0,
)
class Mask(rq.List):
def __init__(self, name):
rq.List.__init__(self, name, rq.Card32, pad=0)
def pack_value(self, val):
mask_seq = array.array(rq.struct_to_array_codes['L'])
if isinstance(val, integer_types):
# We need to build a "binary mask" that (as far as I can tell) is
# encoded in native byte order from end to end. The simple case is
# with a single unsigned 32-bit value, for which we construct an
# array with just one item. For values too big to fit inside 4
# bytes we build a longer array, being careful to maintain native
# byte order across the entire set of values.
if sys.byteorder == 'little':
def fun(val):
mask_seq.insert(0, val)
elif sys.byteorder == 'big':
fun = mask_seq.append
else:
raise AssertionError(sys.byteorder)
while val:
fun(val & 0xFFFFFFFF)
val = val >> 32
else:
mask_seq.extend(val)
return mask_seq.tostring(), len(mask_seq), None
EventMask = rq.Struct(
DEVICE('deviceid'),
rq.LengthOf('mask', 2),
Mask('mask'),
)
class XISelectEvents(rq.Request):
_request = rq.Struct(
rq.Card8('opcode'),
rq.Opcode(46),
rq.RequestLength(),
rq.Window('window'),
rq.LengthOf('masks', 2),
rq.Pad(2),
rq.List('masks', EventMask),
)
def select_events(self, event_masks):
'''
select_events(event_masks)
event_masks:
Sequence of (deviceid, mask) pairs, where deviceid is a numerical device
ID, or AllDevices or AllMasterDevices, and mask is either an unsigned
integer or sequence of 32 bits unsigned values
'''
return XISelectEvents(
display=self.display,
opcode=self.display.get_extension_major(extname),
window=self,
masks=event_masks,
)
AnyInfo = rq.Struct(
rq.Card16('type'),
rq.Card16('length'),
rq.Card16('sourceid'),
rq.Pad(2),
)
class ButtonMask(object):
def __init__(self, value, length):
self._value = value
self._length = length
def __len__(self):
return self._length
def __getitem__(self, key):
return self._value & (1 << key)
def __str__(self):
return repr(self)
def __repr__(self):
return '0b{value:0{width}b}'.format(value=self._value,
width=self._length)
class ButtonState(rq.ValueField):
structcode = None
def __init__(self, name):
rq.ValueField.__init__(self, name)
def parse_binary_value(self, data, display, length, fmt):
# Mask: bitfield of <length> button states.
mask_len = 4 * ((((length + 7) >> 3) + 3) >> 2)
mask_data = data[:mask_len]
mask_value = 0
for byte in reversed(struct.unpack('={0:d}B'.format(mask_len), mask_data)):
mask_value <<= 8
mask_value |= byte
data = data[mask_len:]
assert (mask_value & 1) == 0
return ButtonMask(mask_value >> 1, length), data
ButtonInfo = rq.Struct(
rq.Card16('type'),
rq.Card16('length'),
rq.Card16('sourceid'),
rq.LengthOf(('state', 'labels'), 2),
ButtonState('state'),
rq.List('labels', rq.Card32),
)
KeyInfo = rq.Struct(
rq.Card16('type'),
rq.Card16('length'),
rq.Card16('sourceid'),
rq.LengthOf('keycodes', 2),
rq.List('keycodes', rq.Card32),
)
ValuatorInfo = rq.Struct(
rq.Card16('type'),
rq.Card16('length'),
rq.Card16('sourceid'),
rq.Card16('number'),
rq.Card32('label'),
FP3232('min'),
FP3232('max'),
FP3232('value'),
rq.Card32('resolution'),
rq.Card8('mode'),
rq.Pad(3),
)
ScrollInfo = rq.Struct(
rq.Card16('type'),
rq.Card16('length'),
rq.Card16('sourceid'),
rq.Card16('number'),
rq.Card16('scroll_type'),
rq.Pad(2),
rq.Card32('flags'),
FP3232('increment'),
)
TouchInfo = rq.Struct(
rq.Card16('type'),
rq.Card16('length'),
rq.Card16('sourceid'),
rq.Card8('mode'),
rq.Card8('num_touches'),
)
INFO_CLASSES = {
KeyClass: KeyInfo,
ButtonClass: ButtonInfo,
ValuatorClass: ValuatorInfo,
ScrollClass: ScrollInfo,
TouchClass: TouchInfo,
}
class ClassInfoClass(object):
structcode = None
def parse_binary(self, data, display):
class_type, length = struct.unpack('=HH', data[:4])
class_struct = INFO_CLASSES.get(class_type, AnyInfo)
class_data, _ = class_struct.parse_binary(data, display)
data = data[length * 4:]
return class_data, data
ClassInfo = ClassInfoClass()
DeviceInfo = rq.Struct(
DEVICEID('deviceid'),
rq.Card16('use'),
rq.Card16('attachment'),
rq.LengthOf('classes', 2),
rq.LengthOf('name', 2),
rq.Bool('enabled'),
rq.Pad(1),
rq.String8('name', 4),
rq.List('classes', ClassInfo),
)
class XIQueryDevice(rq.ReplyRequest):
_request = rq.Struct(
rq.Card8('opcode'),
rq.Opcode(48),
rq.RequestLength(),
DEVICEID('deviceid'),
rq.Pad(2),
)
_reply = rq.Struct(
rq.ReplyCode(),
rq.Pad(1),
rq.Card16('sequence_number'),
rq.ReplyLength(),
rq.LengthOf('devices', 2),
rq.Pad(22),
rq.List('devices', DeviceInfo),
)
def query_device(self, deviceid):
return XIQueryDevice(
display=self.display,
opcode=self.display.get_extension_major(extname),
deviceid=deviceid,
)
class XIGrabDevice(rq.ReplyRequest):
_request = rq.Struct(
rq.Card8('opcode'),
rq.Opcode(51),
rq.RequestLength(),
rq.Window('grab_window'),
rq.Card32('time'),
rq.Cursor('cursor', (X.NONE, )),
DEVICEID('deviceid'),
rq.Set('grab_mode', 1, (GrabModeSync, GrabModeAsync)),
rq.Set('paired_device_mode', 1, (GrabModeSync, GrabModeAsync)),
rq.Bool('owner_events'),
rq.Pad(1),
rq.LengthOf('mask', 2),
Mask('mask'),
)
_reply = rq.Struct(
rq.ReplyCode(),
rq.Pad(1),
rq.Card16('sequence_number'),
rq.ReplyLength(),
rq.Card8('status'),
rq.Pad(23),
)
def grab_device(self, deviceid, time, grab_mode, paired_device_mode, owner_events, event_mask):
return XIGrabDevice(
display=self.display,
opcode=self.display.get_extension_major(extname),
deviceid=deviceid,
grab_window=self,
time=time,
cursor=X.NONE,
grab_mode=grab_mode,
paired_device_mode=paired_device_mode,
owner_events=owner_events,
mask=event_mask,
)
class XIUngrabDevice(rq.Request):
_request = rq.Struct(
rq.Card8('opcode'),
rq.Opcode(52),
rq.RequestLength(),
rq.Card32('time'),
DEVICEID('deviceid'),
rq.Pad(2),
)
def ungrab_device(self, deviceid, time):
return XIUngrabDevice(
display=self.display,
opcode=self.display.get_extension_major(extname),
time=time,
deviceid=deviceid,
)
class XIPassiveGrabDevice(rq.ReplyRequest):
_request = rq.Struct(
rq.Card8('opcode'),
rq.Opcode(54),
rq.RequestLength(),
rq.Card32('time'),
rq.Window('grab_window'),
rq.Cursor('cursor', (X.NONE, )),
rq.Card32('detail'),
DEVICEID('deviceid'),
rq.LengthOf('modifiers', 2),
rq.LengthOf('mask', 2),
rq.Set('grab_type', 1, (GrabtypeButton, GrabtypeKeycode, GrabtypeEnter,
GrabtypeFocusIn, GrabtypeTouchBegin)),
rq.Set('grab_mode', 1, (GrabModeSync, GrabModeAsync)),
rq.Set('paired_device_mode', 1, (GrabModeSync, GrabModeAsync)),
rq.Bool('owner_events'),
rq.Pad(2),
Mask('mask'),
rq.List('modifiers', rq.Card32),
)
_reply = rq.Struct(
rq.ReplyCode(),
rq.Pad(1),
rq.Card16('sequence_number'),
rq.ReplyLength(),
rq.LengthOf('modifiers', 2),
rq.Pad(22),
rq.List('modifiers', rq.Card32),
)
def passive_grab_device(self, deviceid, time, detail,
grab_type, grab_mode, paired_device_mode,
owner_events, event_mask, modifiers):
return XIPassiveGrabDevice(
display=self.display,
opcode=self.display.get_extension_major(extname),
deviceid=deviceid,
grab_window=self,
time=time,
cursor=X.NONE,
detail=detail,
grab_type=grab_type,
grab_mode=grab_mode,
paired_device_mode=paired_device_mode,
owner_events=owner_events,
mask=event_mask,
modifiers=modifiers,
)
def grab_keycode(self, deviceid, time, keycode,
grab_mode, paired_device_mode,
owner_events, event_mask, modifiers):
return passive_grab_device(self, deviceid, time, keycode,
GrabtypeKeycode,
grab_mode, paired_device_mode,
owner_events, event_mask, modifiers)
class XIPassiveUngrabDevice(rq.Request):
_request = rq.Struct(
rq.Card8('opcode'),
rq.Opcode(55),
rq.RequestLength(),
rq.Window('grab_window'),
rq.Card32('detail'),
DEVICEID('deviceid'),
rq.LengthOf('modifiers', 2),
rq.Set('grab_type', 1, (GrabtypeButton, GrabtypeKeycode,
GrabtypeEnter, GrabtypeFocusIn,
GrabtypeTouchBegin)),
rq.Pad(3),
rq.List('modifiers', rq.Card32),
)
def passive_ungrab_device(self, deviceid, detail, grab_type, modifiers):
return XIPassiveUngrabDevice(
display=self.display,
opcode=self.display.get_extension_major(extname),
deviceid=deviceid,
grab_window=self,
detail=detail,
grab_type=grab_type,
modifiers=modifiers,
)
def ungrab_keycode(self, deviceid, keycode, modifiers):
return passive_ungrab_device(self, deviceid, keycode,
GrabtypeKeycode, modifiers)
HierarchyInfo = rq.Struct(
DEVICEID('deviceid'),
DEVICEID('attachment'),
DEVICEUSE('type'),
rq.Bool('enabled'),
rq.Pad(2),
rq.Card32('flags'),
)
HierarchyEventData = rq.Struct(
DEVICEID('deviceid'),
rq.Card32('time'),
rq.Card32('flags'),
rq.LengthOf('info', 2),
rq.Pad(10),
rq.List('info', HierarchyInfo),
)
ModifierInfo = rq.Struct(
rq.Card32('base_mods'),
rq.Card32('latched_mods'),
rq.Card32('locked_mods'),
rq.Card32('effective_mods'),
)
GroupInfo = rq.Struct(
rq.Card8('base_group'),
rq.Card8('latched_group'),
rq.Card8('locked_group'),
rq.Card8('effective_group'),
)
DeviceEventData = rq.Struct(
DEVICEID('deviceid'),
rq.Card32('time'),
rq.Card32('detail'),
rq.Window('root'),
rq.Window('event'),
rq.Window('child'),
FP1616('root_x'),
FP1616('root_y'),
FP1616('event_x'),
FP1616('event_y'),
rq.LengthOf('buttons', 2),
rq.Card16('valulators_len'),
DEVICEID('sourceid'),
rq.Pad(2),
rq.Card32('flags'),
rq.Object('mods', ModifierInfo),
rq.Object('groups', GroupInfo),
ButtonState('buttons'),
)
DeviceChangedEventData = rq.Struct(
DEVICEID('deviceid'),
rq.Card32('time'),
rq.LengthOf('classes', 2),
DEVICEID('sourceid'),
rq.Card8('reason'),
rq.Pad(11),
rq.List('classes', ClassInfo),
)
def init(disp, info):
disp.extension_add_method('display', 'xinput_query_version', query_version)
disp.extension_add_method('window', 'xinput_select_events', select_events)
disp.extension_add_method('display', 'xinput_query_device', query_device)
disp.extension_add_method('window', 'xinput_grab_device', grab_device)
disp.extension_add_method('display', 'xinput_ungrab_device', ungrab_device)
disp.extension_add_method('window', 'xinput_grab_keycode', grab_keycode)
disp.extension_add_method('window', 'xinput_ungrab_keycode', ungrab_keycode)
if hasattr(disp,"ge_add_event_data"):
for device_event in (ButtonPress, ButtonRelease, KeyPress, KeyRelease, Motion):
disp.ge_add_event_data(info.major_opcode, device_event, DeviceEventData)
disp.ge_add_event_data(info.major_opcode, DeviceChanged, DeviceEventData)
disp.ge_add_event_data(info.major_opcode, HierarchyChanged, HierarchyEventData)
| [((243, 4, 243, 26), 'Xlib.protocol.rq.LengthOf', 'rq.LengthOf', ({(243, 16, 243, 22): '"""mask"""', (243, 24, 243, 25): '2'}, {}), "('mask', 2)", False, 'from Xlib.protocol import rq\n'), ((276, 4, 276, 21), 'Xlib.protocol.rq.Card16', 'rq.Card16', ({(276, 14, 276, 20): '"""type"""'}, {}), "('type')", False, 'from Xlib.protocol import rq\n'), ((277, 4, 277, 23), 'Xlib.protocol.rq.Card16', 'rq.Card16', ({(277, 14, 277, 22): '"""length"""'}, {}), "('length')", False, 'from Xlib.protocol import rq\n'), ((278, 4, 278, 25), 'Xlib.protocol.rq.Card16', 'rq.Card16', ({(278, 14, 278, 24): '"""sourceid"""'}, {}), "('sourceid')", False, 'from Xlib.protocol import rq\n'), ((279, 4, 279, 13), 'Xlib.protocol.rq.Pad', 'rq.Pad', ({(279, 11, 279, 12): '2'}, {}), '(2)', False, 'from Xlib.protocol import rq\n'), ((321, 4, 321, 21), 'Xlib.protocol.rq.Card16', 'rq.Card16', ({(321, 14, 321, 20): '"""type"""'}, {}), "('type')", False, 'from Xlib.protocol import rq\n'), ((322, 4, 322, 23), 'Xlib.protocol.rq.Card16', 'rq.Card16', ({(322, 14, 322, 22): '"""length"""'}, {}), "('length')", False, 'from Xlib.protocol import rq\n'), ((323, 4, 323, 25), 'Xlib.protocol.rq.Card16', 'rq.Card16', ({(323, 14, 323, 24): '"""sourceid"""'}, {}), "('sourceid')", False, 'from Xlib.protocol import rq\n'), ((324, 4, 324, 39), 'Xlib.protocol.rq.LengthOf', 'rq.LengthOf', ({(324, 16, 324, 35): "('state', 'labels')", (324, 37, 324, 38): '2'}, {}), "(('state', 'labels'), 2)", False, 'from Xlib.protocol import rq\n'), ((326, 4, 326, 32), 'Xlib.protocol.rq.List', 'rq.List', ({(326, 12, 326, 20): '"""labels"""', (326, 22, 326, 31): 'rq.Card32'}, {}), "('labels', rq.Card32)", False, 'from Xlib.protocol import rq\n'), ((330, 4, 330, 21), 'Xlib.protocol.rq.Card16', 'rq.Card16', ({(330, 14, 330, 20): '"""type"""'}, {}), "('type')", False, 'from Xlib.protocol import rq\n'), ((331, 4, 331, 23), 'Xlib.protocol.rq.Card16', 'rq.Card16', ({(331, 14, 331, 22): '"""length"""'}, {}), "('length')", False, 'from Xlib.protocol import rq\n'), ((332, 4, 332, 25), 'Xlib.protocol.rq.Card16', 'rq.Card16', ({(332, 14, 332, 24): '"""sourceid"""'}, {}), "('sourceid')", False, 'from Xlib.protocol import rq\n'), ((333, 4, 333, 30), 'Xlib.protocol.rq.LengthOf', 'rq.LengthOf', ({(333, 16, 333, 26): '"""keycodes"""', (333, 28, 333, 29): '2'}, {}), "('keycodes', 2)", False, 'from Xlib.protocol import rq\n'), ((334, 4, 334, 34), 'Xlib.protocol.rq.List', 'rq.List', ({(334, 12, 334, 22): '"""keycodes"""', (334, 24, 334, 33): 'rq.Card32'}, {}), "('keycodes', rq.Card32)", False, 'from Xlib.protocol import rq\n'), ((338, 4, 338, 21), 'Xlib.protocol.rq.Card16', 'rq.Card16', ({(338, 14, 338, 20): '"""type"""'}, {}), "('type')", False, 'from Xlib.protocol import rq\n'), ((339, 4, 339, 23), 'Xlib.protocol.rq.Card16', 'rq.Card16', ({(339, 14, 339, 22): '"""length"""'}, {}), "('length')", False, 'from Xlib.protocol import rq\n'), ((340, 4, 340, 25), 'Xlib.protocol.rq.Card16', 'rq.Card16', ({(340, 14, 340, 24): '"""sourceid"""'}, {}), "('sourceid')", False, 'from Xlib.protocol import rq\n'), ((341, 4, 341, 23), 'Xlib.protocol.rq.Card16', 'rq.Card16', ({(341, 14, 341, 22): '"""number"""'}, {}), "('number')", False, 'from Xlib.protocol import rq\n'), ((342, 4, 342, 22), 'Xlib.protocol.rq.Card32', 'rq.Card32', ({(342, 14, 342, 21): '"""label"""'}, {}), "('label')", False, 'from Xlib.protocol import rq\n'), ((346, 4, 346, 27), 'Xlib.protocol.rq.Card32', 'rq.Card32', ({(346, 14, 346, 26): '"""resolution"""'}, {}), "('resolution')", False, 'from Xlib.protocol import rq\n'), ((347, 4, 347, 20), 'Xlib.protocol.rq.Card8', 'rq.Card8', ({(347, 13, 347, 19): '"""mode"""'}, {}), "('mode')", False, 'from Xlib.protocol import rq\n'), ((348, 4, 348, 13), 'Xlib.protocol.rq.Pad', 'rq.Pad', ({(348, 11, 348, 12): '3'}, {}), '(3)', False, 'from Xlib.protocol import rq\n'), ((352, 4, 352, 21), 'Xlib.protocol.rq.Card16', 'rq.Card16', ({(352, 14, 352, 20): '"""type"""'}, {}), "('type')", False, 'from Xlib.protocol import rq\n'), ((353, 4, 353, 23), 'Xlib.protocol.rq.Card16', 'rq.Card16', ({(353, 14, 353, 22): '"""length"""'}, {}), "('length')", False, 'from Xlib.protocol import rq\n'), ((354, 4, 354, 25), 'Xlib.protocol.rq.Card16', 'rq.Card16', ({(354, 14, 354, 24): '"""sourceid"""'}, {}), "('sourceid')", False, 'from Xlib.protocol import rq\n'), ((355, 4, 355, 23), 'Xlib.protocol.rq.Card16', 'rq.Card16', ({(355, 14, 355, 22): '"""number"""'}, {}), "('number')", False, 'from Xlib.protocol import rq\n'), ((356, 4, 356, 28), 'Xlib.protocol.rq.Card16', 'rq.Card16', ({(356, 14, 356, 27): '"""scroll_type"""'}, {}), "('scroll_type')", False, 'from Xlib.protocol import rq\n'), ((357, 4, 357, 13), 'Xlib.protocol.rq.Pad', 'rq.Pad', ({(357, 11, 357, 12): '2'}, {}), '(2)', False, 'from Xlib.protocol import rq\n'), ((358, 4, 358, 22), 'Xlib.protocol.rq.Card32', 'rq.Card32', ({(358, 14, 358, 21): '"""flags"""'}, {}), "('flags')", False, 'from Xlib.protocol import rq\n'), ((363, 4, 363, 21), 'Xlib.protocol.rq.Card16', 'rq.Card16', ({(363, 14, 363, 20): '"""type"""'}, {}), "('type')", False, 'from Xlib.protocol import rq\n'), ((364, 4, 364, 23), 'Xlib.protocol.rq.Card16', 'rq.Card16', ({(364, 14, 364, 22): '"""length"""'}, {}), "('length')", False, 'from Xlib.protocol import rq\n'), ((365, 4, 365, 25), 'Xlib.protocol.rq.Card16', 'rq.Card16', ({(365, 14, 365, 24): '"""sourceid"""'}, {}), "('sourceid')", False, 'from Xlib.protocol import rq\n'), ((366, 4, 366, 20), 'Xlib.protocol.rq.Card8', 'rq.Card8', ({(366, 13, 366, 19): '"""mode"""'}, {}), "('mode')", False, 'from Xlib.protocol import rq\n'), ((367, 4, 367, 27), 'Xlib.protocol.rq.Card8', 'rq.Card8', ({(367, 13, 367, 26): '"""num_touches"""'}, {}), "('num_touches')", False, 'from Xlib.protocol import rq\n'), ((393, 4, 393, 20), 'Xlib.protocol.rq.Card16', 'rq.Card16', ({(393, 14, 393, 19): '"""use"""'}, {}), "('use')", False, 'from Xlib.protocol import rq\n'), ((394, 4, 394, 27), 'Xlib.protocol.rq.Card16', 'rq.Card16', ({(394, 14, 394, 26): '"""attachment"""'}, {}), "('attachment')", False, 'from Xlib.protocol import rq\n'), ((395, 4, 395, 29), 'Xlib.protocol.rq.LengthOf', 'rq.LengthOf', ({(395, 16, 395, 25): '"""classes"""', (395, 27, 395, 28): '2'}, {}), "('classes', 2)", False, 'from Xlib.protocol import rq\n'), ((396, 4, 396, 26), 'Xlib.protocol.rq.LengthOf', 'rq.LengthOf', ({(396, 16, 396, 22): '"""name"""', (396, 24, 396, 25): '2'}, {}), "('name', 2)", False, 'from Xlib.protocol import rq\n'), ((397, 4, 397, 22), 'Xlib.protocol.rq.Bool', 'rq.Bool', ({(397, 12, 397, 21): '"""enabled"""'}, {}), "('enabled')", False, 'from Xlib.protocol import rq\n'), ((398, 4, 398, 13), 'Xlib.protocol.rq.Pad', 'rq.Pad', ({(398, 11, 398, 12): '1'}, {}), '(1)', False, 'from Xlib.protocol import rq\n'), ((399, 4, 399, 25), 'Xlib.protocol.rq.String8', 'rq.String8', ({(399, 15, 399, 21): '"""name"""', (399, 23, 399, 24): '4'}, {}), "('name', 4)", False, 'from Xlib.protocol import rq\n'), ((400, 4, 400, 33), 'Xlib.protocol.rq.List', 'rq.List', ({(400, 12, 400, 21): '"""classes"""', (400, 23, 400, 32): 'ClassInfo'}, {}), "('classes', ClassInfo)", False, 'from Xlib.protocol import rq\n'), ((582, 4, 582, 22), 'Xlib.protocol.rq.Bool', 'rq.Bool', ({(582, 12, 582, 21): '"""enabled"""'}, {}), "('enabled')", False, 'from Xlib.protocol import rq\n'), ((583, 4, 583, 13), 'Xlib.protocol.rq.Pad', 'rq.Pad', ({(583, 11, 583, 12): '2'}, {}), '(2)', False, 'from Xlib.protocol import rq\n'), ((584, 4, 584, 22), 'Xlib.protocol.rq.Card32', 'rq.Card32', ({(584, 14, 584, 21): '"""flags"""'}, {}), "('flags')", False, 'from Xlib.protocol import rq\n'), ((590, 4, 590, 21), 'Xlib.protocol.rq.Card32', 'rq.Card32', ({(590, 14, 590, 20): '"""time"""'}, {}), "('time')", False, 'from Xlib.protocol import rq\n'), ((591, 4, 591, 22), 'Xlib.protocol.rq.Card32', 'rq.Card32', ({(591, 14, 591, 21): '"""flags"""'}, {}), "('flags')", False, 'from Xlib.protocol import rq\n'), ((592, 4, 592, 26), 'Xlib.protocol.rq.LengthOf', 'rq.LengthOf', ({(592, 16, 592, 22): '"""info"""', (592, 24, 592, 25): '2'}, {}), "('info', 2)", False, 'from Xlib.protocol import rq\n'), ((593, 4, 593, 14), 'Xlib.protocol.rq.Pad', 'rq.Pad', ({(593, 11, 593, 13): '10'}, {}), '(10)', False, 'from Xlib.protocol import rq\n'), ((594, 4, 594, 34), 'Xlib.protocol.rq.List', 'rq.List', ({(594, 12, 594, 18): '"""info"""', (594, 20, 594, 33): 'HierarchyInfo'}, {}), "('info', HierarchyInfo)", False, 'from Xlib.protocol import rq\n'), ((598, 4, 598, 26), 'Xlib.protocol.rq.Card32', 'rq.Card32', ({(598, 14, 598, 25): '"""base_mods"""'}, {}), "('base_mods')", False, 'from Xlib.protocol import rq\n'), ((599, 4, 599, 29), 'Xlib.protocol.rq.Card32', 'rq.Card32', ({(599, 14, 599, 28): '"""latched_mods"""'}, {}), "('latched_mods')", False, 'from Xlib.protocol import rq\n'), ((600, 4, 600, 28), 'Xlib.protocol.rq.Card32', 'rq.Card32', ({(600, 14, 600, 27): '"""locked_mods"""'}, {}), "('locked_mods')", False, 'from Xlib.protocol import rq\n'), ((601, 4, 601, 31), 'Xlib.protocol.rq.Card32', 'rq.Card32', ({(601, 14, 601, 30): '"""effective_mods"""'}, {}), "('effective_mods')", False, 'from Xlib.protocol import rq\n'), ((605, 4, 605, 26), 'Xlib.protocol.rq.Card8', 'rq.Card8', ({(605, 13, 605, 25): '"""base_group"""'}, {}), "('base_group')", False, 'from Xlib.protocol import rq\n'), ((606, 4, 606, 29), 'Xlib.protocol.rq.Card8', 'rq.Card8', ({(606, 13, 606, 28): '"""latched_group"""'}, {}), "('latched_group')", False, 'from Xlib.protocol import rq\n'), ((607, 4, 607, 28), 'Xlib.protocol.rq.Card8', 'rq.Card8', ({(607, 13, 607, 27): '"""locked_group"""'}, {}), "('locked_group')", False, 'from Xlib.protocol import rq\n'), ((608, 4, 608, 31), 'Xlib.protocol.rq.Card8', 'rq.Card8', ({(608, 13, 608, 30): '"""effective_group"""'}, {}), "('effective_group')", False, 'from Xlib.protocol import rq\n'), ((613, 4, 613, 21), 'Xlib.protocol.rq.Card32', 'rq.Card32', ({(613, 14, 613, 20): '"""time"""'}, {}), "('time')", False, 'from Xlib.protocol import rq\n'), ((614, 4, 614, 23), 'Xlib.protocol.rq.Card32', 'rq.Card32', ({(614, 14, 614, 22): '"""detail"""'}, {}), "('detail')", False, 'from Xlib.protocol import rq\n'), ((615, 4, 615, 21), 'Xlib.protocol.rq.Window', 'rq.Window', ({(615, 14, 615, 20): '"""root"""'}, {}), "('root')", False, 'from Xlib.protocol import rq\n'), ((616, 4, 616, 22), 'Xlib.protocol.rq.Window', 'rq.Window', ({(616, 14, 616, 21): '"""event"""'}, {}), "('event')", False, 'from Xlib.protocol import rq\n'), ((617, 4, 617, 22), 'Xlib.protocol.rq.Window', 'rq.Window', ({(617, 14, 617, 21): '"""child"""'}, {}), "('child')", False, 'from Xlib.protocol import rq\n'), ((622, 4, 622, 29), 'Xlib.protocol.rq.LengthOf', 'rq.LengthOf', ({(622, 16, 622, 25): '"""buttons"""', (622, 27, 622, 28): '2'}, {}), "('buttons', 2)", False, 'from Xlib.protocol import rq\n'), ((623, 4, 623, 31), 'Xlib.protocol.rq.Card16', 'rq.Card16', ({(623, 14, 623, 30): '"""valulators_len"""'}, {}), "('valulators_len')", False, 'from Xlib.protocol import rq\n'), ((625, 4, 625, 13), 'Xlib.protocol.rq.Pad', 'rq.Pad', ({(625, 11, 625, 12): '2'}, {}), '(2)', False, 'from Xlib.protocol import rq\n'), ((626, 4, 626, 22), 'Xlib.protocol.rq.Card32', 'rq.Card32', ({(626, 14, 626, 21): '"""flags"""'}, {}), "('flags')", False, 'from Xlib.protocol import rq\n'), ((627, 4, 627, 35), 'Xlib.protocol.rq.Object', 'rq.Object', ({(627, 14, 627, 20): '"""mods"""', (627, 22, 627, 34): 'ModifierInfo'}, {}), "('mods', ModifierInfo)", False, 'from Xlib.protocol import rq\n'), ((628, 4, 628, 34), 'Xlib.protocol.rq.Object', 'rq.Object', ({(628, 14, 628, 22): '"""groups"""', (628, 24, 628, 33): 'GroupInfo'}, {}), "('groups', GroupInfo)", False, 'from Xlib.protocol import rq\n'), ((634, 4, 634, 21), 'Xlib.protocol.rq.Card32', 'rq.Card32', ({(634, 14, 634, 20): '"""time"""'}, {}), "('time')", False, 'from Xlib.protocol import rq\n'), ((635, 4, 635, 29), 'Xlib.protocol.rq.LengthOf', 'rq.LengthOf', ({(635, 16, 635, 25): '"""classes"""', (635, 27, 635, 28): '2'}, {}), "('classes', 2)", False, 'from Xlib.protocol import rq\n'), ((637, 4, 637, 22), 'Xlib.protocol.rq.Card8', 'rq.Card8', ({(637, 13, 637, 21): '"""reason"""'}, {}), "('reason')", False, 'from Xlib.protocol import rq\n'), ((638, 4, 638, 14), 'Xlib.protocol.rq.Pad', 'rq.Pad', ({(638, 11, 638, 13): '11'}, {}), '(11)', False, 'from Xlib.protocol import rq\n'), ((639, 4, 639, 33), 'Xlib.protocol.rq.List', 'rq.List', ({(639, 12, 639, 21): '"""classes"""', (639, 23, 639, 32): 'ClassInfo'}, {}), "('classes', ClassInfo)", False, 'from Xlib.protocol import rq\n'), ((185, 8, 185, 26), 'Xlib.protocol.rq.Card8', 'rq.Card8', ({(185, 17, 185, 25): '"""opcode"""'}, {}), "('opcode')", False, 'from Xlib.protocol import rq\n'), ((186, 8, 186, 21), 'Xlib.protocol.rq.Opcode', 'rq.Opcode', ({(186, 18, 186, 20): '47'}, {}), '(47)', False, 'from Xlib.protocol import rq\n'), ((187, 8, 187, 26), 'Xlib.protocol.rq.RequestLength', 'rq.RequestLength', ({}, {}), '()', False, 'from Xlib.protocol import rq\n'), ((188, 8, 188, 34), 'Xlib.protocol.rq.Card16', 'rq.Card16', ({(188, 18, 188, 33): '"""major_version"""'}, {}), "('major_version')", False, 'from Xlib.protocol import rq\n'), ((189, 8, 189, 34), 'Xlib.protocol.rq.Card16', 'rq.Card16', ({(189, 18, 189, 33): '"""minor_version"""'}, {}), "('minor_version')", False, 'from Xlib.protocol import rq\n'), ((192, 8, 192, 22), 'Xlib.protocol.rq.ReplyCode', 'rq.ReplyCode', ({}, {}), '()', False, 'from Xlib.protocol import rq\n'), ((193, 8, 193, 17), 'Xlib.protocol.rq.Pad', 'rq.Pad', ({(193, 15, 193, 16): '1'}, {}), '(1)', False, 'from Xlib.protocol import rq\n'), ((194, 8, 194, 36), 'Xlib.protocol.rq.Card16', 'rq.Card16', ({(194, 18, 194, 35): '"""sequence_number"""'}, {}), "('sequence_number')", False, 'from Xlib.protocol import rq\n'), ((195, 8, 195, 24), 'Xlib.protocol.rq.ReplyLength', 'rq.ReplyLength', ({}, {}), '()', False, 'from Xlib.protocol import rq\n'), ((196, 8, 196, 34), 'Xlib.protocol.rq.Card16', 'rq.Card16', ({(196, 18, 196, 33): '"""major_version"""'}, {}), "('major_version')", False, 'from Xlib.protocol import rq\n'), ((197, 8, 197, 34), 'Xlib.protocol.rq.Card16', 'rq.Card16', ({(197, 18, 197, 33): '"""minor_version"""'}, {}), "('minor_version')", False, 'from Xlib.protocol import rq\n'), ((198, 8, 198, 18), 'Xlib.protocol.rq.Pad', 'rq.Pad', ({(198, 15, 198, 17): '20'}, {}), '(20)', False, 'from Xlib.protocol import rq\n'), ((213, 8, 213, 54), 'Xlib.protocol.rq.List.__init__', 'rq.List.__init__', (), '', False, 'from Xlib.protocol import rq\n'), ((217, 19, 217, 61), 'array.array', 'array.array', ({(217, 31, 217, 60): "rq.struct_to_array_codes['L']"}, {}), "(rq.struct_to_array_codes['L'])", False, 'import array\n'), ((250, 8, 250, 26), 'Xlib.protocol.rq.Card8', 'rq.Card8', ({(250, 17, 250, 25): '"""opcode"""'}, {}), "('opcode')", False, 'from Xlib.protocol import rq\n'), ((251, 8, 251, 21), 'Xlib.protocol.rq.Opcode', 'rq.Opcode', ({(251, 18, 251, 20): '46'}, {}), '(46)', False, 'from Xlib.protocol import rq\n'), ((252, 8, 252, 26), 'Xlib.protocol.rq.RequestLength', 'rq.RequestLength', ({}, {}), '()', False, 'from Xlib.protocol import rq\n'), ((253, 8, 253, 27), 'Xlib.protocol.rq.Window', 'rq.Window', ({(253, 18, 253, 26): '"""window"""'}, {}), "('window')", False, 'from Xlib.protocol import rq\n'), ((254, 8, 254, 31), 'Xlib.protocol.rq.LengthOf', 'rq.LengthOf', ({(254, 20, 254, 27): '"""masks"""', (254, 29, 254, 30): '2'}, {}), "('masks', 2)", False, 'from Xlib.protocol import rq\n'), ((255, 8, 255, 17), 'Xlib.protocol.rq.Pad', 'rq.Pad', ({(255, 15, 255, 16): '2'}, {}), '(2)', False, 'from Xlib.protocol import rq\n'), ((256, 8, 256, 35), 'Xlib.protocol.rq.List', 'rq.List', ({(256, 16, 256, 23): '"""masks"""', (256, 25, 256, 34): 'EventMask'}, {}), "('masks', EventMask)", False, 'from Xlib.protocol import rq\n'), ((306, 8, 306, 42), 'Xlib.protocol.rq.ValueField.__init__', 'rq.ValueField.__init__', ({(306, 31, 306, 35): 'self', (306, 37, 306, 41): 'name'}, {}), '(self, name)', False, 'from Xlib.protocol import rq\n'), ((383, 29, 383, 59), 'struct.unpack', 'struct.unpack', ({(383, 43, 383, 48): '"""=HH"""', (383, 50, 383, 58): 'data[:4]'}, {}), "('=HH', data[:4])", False, 'import struct\n'), ((405, 8, 405, 26), 'Xlib.protocol.rq.Card8', 'rq.Card8', ({(405, 17, 405, 25): '"""opcode"""'}, {}), "('opcode')", False, 'from Xlib.protocol import rq\n'), ((406, 8, 406, 21), 'Xlib.protocol.rq.Opcode', 'rq.Opcode', ({(406, 18, 406, 20): '48'}, {}), '(48)', False, 'from Xlib.protocol import rq\n'), ((407, 8, 407, 26), 'Xlib.protocol.rq.RequestLength', 'rq.RequestLength', ({}, {}), '()', False, 'from Xlib.protocol import rq\n'), ((409, 8, 409, 17), 'Xlib.protocol.rq.Pad', 'rq.Pad', ({(409, 15, 409, 16): '2'}, {}), '(2)', False, 'from Xlib.protocol import rq\n'), ((413, 8, 413, 22), 'Xlib.protocol.rq.ReplyCode', 'rq.ReplyCode', ({}, {}), '()', False, 'from Xlib.protocol import rq\n'), ((414, 8, 414, 17), 'Xlib.protocol.rq.Pad', 'rq.Pad', ({(414, 15, 414, 16): '1'}, {}), '(1)', False, 'from Xlib.protocol import rq\n'), ((415, 8, 415, 36), 'Xlib.protocol.rq.Card16', 'rq.Card16', ({(415, 18, 415, 35): '"""sequence_number"""'}, {}), "('sequence_number')", False, 'from Xlib.protocol import rq\n'), ((416, 8, 416, 24), 'Xlib.protocol.rq.ReplyLength', 'rq.ReplyLength', ({}, {}), '()', False, 'from Xlib.protocol import rq\n'), ((417, 8, 417, 33), 'Xlib.protocol.rq.LengthOf', 'rq.LengthOf', ({(417, 20, 417, 29): '"""devices"""', (417, 31, 417, 32): '2'}, {}), "('devices', 2)", False, 'from Xlib.protocol import rq\n'), ((418, 8, 418, 18), 'Xlib.protocol.rq.Pad', 'rq.Pad', ({(418, 15, 418, 17): '22'}, {}), '(22)', False, 'from Xlib.protocol import rq\n'), ((419, 8, 419, 38), 'Xlib.protocol.rq.List', 'rq.List', ({(419, 16, 419, 25): '"""devices"""', (419, 27, 419, 37): 'DeviceInfo'}, {}), "('devices', DeviceInfo)", False, 'from Xlib.protocol import rq\n'), ((431, 8, 431, 26), 'Xlib.protocol.rq.Card8', 'rq.Card8', ({(431, 17, 431, 25): '"""opcode"""'}, {}), "('opcode')", False, 'from Xlib.protocol import rq\n'), ((432, 8, 432, 21), 'Xlib.protocol.rq.Opcode', 'rq.Opcode', ({(432, 18, 432, 20): '51'}, {}), '(51)', False, 'from Xlib.protocol import rq\n'), ((433, 8, 433, 26), 'Xlib.protocol.rq.RequestLength', 'rq.RequestLength', ({}, {}), '()', False, 'from Xlib.protocol import rq\n'), ((434, 8, 434, 32), 'Xlib.protocol.rq.Window', 'rq.Window', ({(434, 18, 434, 31): '"""grab_window"""'}, {}), "('grab_window')", False, 'from Xlib.protocol import rq\n'), ((435, 8, 435, 25), 'Xlib.protocol.rq.Card32', 'rq.Card32', ({(435, 18, 435, 24): '"""time"""'}, {}), "('time')", False, 'from Xlib.protocol import rq\n'), ((436, 8, 436, 39), 'Xlib.protocol.rq.Cursor', 'rq.Cursor', ({(436, 18, 436, 26): '"""cursor"""', (436, 28, 436, 38): '(X.NONE,)'}, {}), "('cursor', (X.NONE,))", False, 'from Xlib.protocol import rq\n'), ((438, 8, 438, 61), 'Xlib.protocol.rq.Set', 'rq.Set', ({(438, 15, 438, 26): '"""grab_mode"""', (438, 28, 438, 29): '1', (438, 31, 438, 60): '(GrabModeSync, GrabModeAsync)'}, {}), "('grab_mode', 1, (GrabModeSync, GrabModeAsync))", False, 'from Xlib.protocol import rq\n'), ((439, 8, 439, 70), 'Xlib.protocol.rq.Set', 'rq.Set', ({(439, 15, 439, 35): '"""paired_device_mode"""', (439, 37, 439, 38): '1', (439, 40, 439, 69): '(GrabModeSync, GrabModeAsync)'}, {}), "('paired_device_mode', 1, (GrabModeSync, GrabModeAsync))", False, 'from Xlib.protocol import rq\n'), ((440, 8, 440, 31), 'Xlib.protocol.rq.Bool', 'rq.Bool', ({(440, 16, 440, 30): '"""owner_events"""'}, {}), "('owner_events')", False, 'from Xlib.protocol import rq\n'), ((441, 8, 441, 17), 'Xlib.protocol.rq.Pad', 'rq.Pad', ({(441, 15, 441, 16): '1'}, {}), '(1)', False, 'from Xlib.protocol import rq\n'), ((442, 8, 442, 30), 'Xlib.protocol.rq.LengthOf', 'rq.LengthOf', ({(442, 20, 442, 26): '"""mask"""', (442, 28, 442, 29): '2'}, {}), "('mask', 2)", False, 'from Xlib.protocol import rq\n'), ((447, 8, 447, 22), 'Xlib.protocol.rq.ReplyCode', 'rq.ReplyCode', ({}, {}), '()', False, 'from Xlib.protocol import rq\n'), ((448, 8, 448, 17), 'Xlib.protocol.rq.Pad', 'rq.Pad', ({(448, 15, 448, 16): '1'}, {}), '(1)', False, 'from Xlib.protocol import rq\n'), ((449, 8, 449, 36), 'Xlib.protocol.rq.Card16', 'rq.Card16', ({(449, 18, 449, 35): '"""sequence_number"""'}, {}), "('sequence_number')", False, 'from Xlib.protocol import rq\n'), ((450, 8, 450, 24), 'Xlib.protocol.rq.ReplyLength', 'rq.ReplyLength', ({}, {}), '()', False, 'from Xlib.protocol import rq\n'), ((451, 8, 451, 26), 'Xlib.protocol.rq.Card8', 'rq.Card8', ({(451, 17, 451, 25): '"""status"""'}, {}), "('status')", False, 'from Xlib.protocol import rq\n'), ((452, 8, 452, 18), 'Xlib.protocol.rq.Pad', 'rq.Pad', ({(452, 15, 452, 17): '23'}, {}), '(23)', False, 'from Xlib.protocol import rq\n'), ((471, 8, 471, 26), 'Xlib.protocol.rq.Card8', 'rq.Card8', ({(471, 17, 471, 25): '"""opcode"""'}, {}), "('opcode')", False, 'from Xlib.protocol import rq\n'), ((472, 8, 472, 21), 'Xlib.protocol.rq.Opcode', 'rq.Opcode', ({(472, 18, 472, 20): '52'}, {}), '(52)', False, 'from Xlib.protocol import rq\n'), ((473, 8, 473, 26), 'Xlib.protocol.rq.RequestLength', 'rq.RequestLength', ({}, {}), '()', False, 'from Xlib.protocol import rq\n'), ((474, 8, 474, 25), 'Xlib.protocol.rq.Card32', 'rq.Card32', ({(474, 18, 474, 24): '"""time"""'}, {}), "('time')", False, 'from Xlib.protocol import rq\n'), ((476, 8, 476, 17), 'Xlib.protocol.rq.Pad', 'rq.Pad', ({(476, 15, 476, 16): '2'}, {}), '(2)', False, 'from Xlib.protocol import rq\n'), ((489, 8, 489, 26), 'Xlib.protocol.rq.Card8', 'rq.Card8', ({(489, 17, 489, 25): '"""opcode"""'}, {}), "('opcode')", False, 'from Xlib.protocol import rq\n'), ((490, 8, 490, 21), 'Xlib.protocol.rq.Opcode', 'rq.Opcode', ({(490, 18, 490, 20): '54'}, {}), '(54)', False, 'from Xlib.protocol import rq\n'), ((491, 8, 491, 26), 'Xlib.protocol.rq.RequestLength', 'rq.RequestLength', ({}, {}), '()', False, 'from Xlib.protocol import rq\n'), ((492, 8, 492, 25), 'Xlib.protocol.rq.Card32', 'rq.Card32', ({(492, 18, 492, 24): '"""time"""'}, {}), "('time')", False, 'from Xlib.protocol import rq\n'), ((493, 8, 493, 32), 'Xlib.protocol.rq.Window', 'rq.Window', ({(493, 18, 493, 31): '"""grab_window"""'}, {}), "('grab_window')", False, 'from Xlib.protocol import rq\n'), ((494, 8, 494, 39), 'Xlib.protocol.rq.Cursor', 'rq.Cursor', ({(494, 18, 494, 26): '"""cursor"""', (494, 28, 494, 38): '(X.NONE,)'}, {}), "('cursor', (X.NONE,))", False, 'from Xlib.protocol import rq\n'), ((495, 8, 495, 27), 'Xlib.protocol.rq.Card32', 'rq.Card32', ({(495, 18, 495, 26): '"""detail"""'}, {}), "('detail')", False, 'from Xlib.protocol import rq\n'), ((497, 8, 497, 35), 'Xlib.protocol.rq.LengthOf', 'rq.LengthOf', ({(497, 20, 497, 31): '"""modifiers"""', (497, 33, 497, 34): '2'}, {}), "('modifiers', 2)", False, 'from Xlib.protocol import rq\n'), ((498, 8, 498, 30), 'Xlib.protocol.rq.LengthOf', 'rq.LengthOf', ({(498, 20, 498, 26): '"""mask"""', (498, 28, 498, 29): '2'}, {}), "('mask', 2)", False, 'from Xlib.protocol import rq\n'), ((499, 8, 500, 69), 'Xlib.protocol.rq.Set', 'rq.Set', ({(499, 15, 499, 26): '"""grab_type"""', (499, 28, 499, 29): '1', (499, 31, 500, 68): '(GrabtypeButton, GrabtypeKeycode, GrabtypeEnter, GrabtypeFocusIn,\n GrabtypeTouchBegin)'}, {}), "('grab_type', 1, (GrabtypeButton, GrabtypeKeycode, GrabtypeEnter,\n GrabtypeFocusIn, GrabtypeTouchBegin))", False, 'from Xlib.protocol import rq\n'), ((501, 8, 501, 61), 'Xlib.protocol.rq.Set', 'rq.Set', ({(501, 15, 501, 26): '"""grab_mode"""', (501, 28, 501, 29): '1', (501, 31, 501, 60): '(GrabModeSync, GrabModeAsync)'}, {}), "('grab_mode', 1, (GrabModeSync, GrabModeAsync))", False, 'from Xlib.protocol import rq\n'), ((502, 8, 502, 70), 'Xlib.protocol.rq.Set', 'rq.Set', ({(502, 15, 502, 35): '"""paired_device_mode"""', (502, 37, 502, 38): '1', (502, 40, 502, 69): '(GrabModeSync, GrabModeAsync)'}, {}), "('paired_device_mode', 1, (GrabModeSync, GrabModeAsync))", False, 'from Xlib.protocol import rq\n'), ((503, 8, 503, 31), 'Xlib.protocol.rq.Bool', 'rq.Bool', ({(503, 16, 503, 30): '"""owner_events"""'}, {}), "('owner_events')", False, 'from Xlib.protocol import rq\n'), ((504, 8, 504, 17), 'Xlib.protocol.rq.Pad', 'rq.Pad', ({(504, 15, 504, 16): '2'}, {}), '(2)', False, 'from Xlib.protocol import rq\n'), ((506, 8, 506, 39), 'Xlib.protocol.rq.List', 'rq.List', ({(506, 16, 506, 27): '"""modifiers"""', (506, 29, 506, 38): 'rq.Card32'}, {}), "('modifiers', rq.Card32)", False, 'from Xlib.protocol import rq\n'), ((510, 8, 510, 22), 'Xlib.protocol.rq.ReplyCode', 'rq.ReplyCode', ({}, {}), '()', False, 'from Xlib.protocol import rq\n'), ((511, 8, 511, 17), 'Xlib.protocol.rq.Pad', 'rq.Pad', ({(511, 15, 511, 16): '1'}, {}), '(1)', False, 'from Xlib.protocol import rq\n'), ((512, 8, 512, 36), 'Xlib.protocol.rq.Card16', 'rq.Card16', ({(512, 18, 512, 35): '"""sequence_number"""'}, {}), "('sequence_number')", False, 'from Xlib.protocol import rq\n'), ((513, 8, 513, 24), 'Xlib.protocol.rq.ReplyLength', 'rq.ReplyLength', ({}, {}), '()', False, 'from Xlib.protocol import rq\n'), ((514, 8, 514, 35), 'Xlib.protocol.rq.LengthOf', 'rq.LengthOf', ({(514, 20, 514, 31): '"""modifiers"""', (514, 33, 514, 34): '2'}, {}), "('modifiers', 2)", False, 'from Xlib.protocol import rq\n'), ((515, 8, 515, 18), 'Xlib.protocol.rq.Pad', 'rq.Pad', ({(515, 15, 515, 17): '22'}, {}), '(22)', False, 'from Xlib.protocol import rq\n'), ((516, 8, 516, 39), 'Xlib.protocol.rq.List', 'rq.List', ({(516, 16, 516, 27): '"""modifiers"""', (516, 29, 516, 38): 'rq.Card32'}, {}), "('modifiers', rq.Card32)", False, 'from Xlib.protocol import rq\n'), ((549, 8, 549, 26), 'Xlib.protocol.rq.Card8', 'rq.Card8', ({(549, 17, 549, 25): '"""opcode"""'}, {}), "('opcode')", False, 'from Xlib.protocol import rq\n'), ((550, 8, 550, 21), 'Xlib.protocol.rq.Opcode', 'rq.Opcode', ({(550, 18, 550, 20): '55'}, {}), '(55)', False, 'from Xlib.protocol import rq\n'), ((551, 8, 551, 26), 'Xlib.protocol.rq.RequestLength', 'rq.RequestLength', ({}, {}), '()', False, 'from Xlib.protocol import rq\n'), ((552, 8, 552, 32), 'Xlib.protocol.rq.Window', 'rq.Window', ({(552, 18, 552, 31): '"""grab_window"""'}, {}), "('grab_window')", False, 'from Xlib.protocol import rq\n'), ((553, 8, 553, 27), 'Xlib.protocol.rq.Card32', 'rq.Card32', ({(553, 18, 553, 26): '"""detail"""'}, {}), "('detail')", False, 'from Xlib.protocol import rq\n'), ((555, 8, 555, 35), 'Xlib.protocol.rq.LengthOf', 'rq.LengthOf', ({(555, 20, 555, 31): '"""modifiers"""', (555, 33, 555, 34): '2'}, {}), "('modifiers', 2)", False, 'from Xlib.protocol import rq\n'), ((556, 8, 558, 52), 'Xlib.protocol.rq.Set', 'rq.Set', ({(556, 15, 556, 26): '"""grab_type"""', (556, 28, 556, 29): '1', (556, 31, 558, 51): '(GrabtypeButton, GrabtypeKeycode, GrabtypeEnter, GrabtypeFocusIn,\n GrabtypeTouchBegin)'}, {}), "('grab_type', 1, (GrabtypeButton, GrabtypeKeycode, GrabtypeEnter,\n GrabtypeFocusIn, GrabtypeTouchBegin))", False, 'from Xlib.protocol import rq\n'), ((559, 8, 559, 17), 'Xlib.protocol.rq.Pad', 'rq.Pad', ({(559, 15, 559, 16): '3'}, {}), '(3)', False, 'from Xlib.protocol import rq\n'), ((560, 8, 560, 39), 'Xlib.protocol.rq.List', 'rq.List', ({(560, 16, 560, 27): '"""modifiers"""', (560, 29, 560, 38): 'rq.Card32'}, {}), "('modifiers', rq.Card32)", False, 'from Xlib.protocol import rq\n')] |
tigerwlin/vel | vel/notebook/__init__.py | 00e4fbb7b612e888e2cbb5d8455146664638cd0b | from .loader import load | [] |
rayhanrock/django-yourjobaid-api | YourJobAidApi/migrations/0019_remove_category_count_post.py | 17751dac5a298998aeecf7a70b79792f8311b9b2 | # Generated by Django 3.0.4 on 2020-04-16 23:10
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('YourJobAidApi', '0018_category_count_post'),
]
operations = [
migrations.RemoveField(
model_name='category',
name='count_post',
),
]
| [((12, 8, 15, 9), 'django.db.migrations.RemoveField', 'migrations.RemoveField', (), '', False, 'from django.db import migrations\n')] |
CharlieZhao95/easy-quant | easyquant/login/__init__.py | 9df126433e27d92eced9b087e581b5fd66c5a400 | # @Time : 2022/1/26 23:07
# @Author : zhaoyu
# @Site :
# @File : __init__.py.py
# @Software: PyCharm
# @Note : xx | [] |
DowneyTung/saleor | tests/api/test_attributes.py | 50f299d8e276b594753ee439d9e1a212f85a91b1 | from typing import Union
from unittest import mock
import graphene
import pytest
from django.core.exceptions import ValidationError
from django.db.models import Q
from django.template.defaultfilters import slugify
from graphene.utils.str_converters import to_camel_case
from saleor.core.taxes import zero_money
from saleor.graphql.core.utils import snake_to_camel_case
from saleor.graphql.product.enums import AttributeTypeEnum, AttributeValueType
from saleor.graphql.product.filters import filter_attributes_by_product_types
from saleor.graphql.product.mutations.attributes import validate_value_is_unique
from saleor.graphql.product.types.attributes import resolve_attribute_value_type
from saleor.product import AttributeInputType
from saleor.product.error_codes import ProductErrorCode
from saleor.product.models import (
Attribute,
AttributeProduct,
AttributeValue,
AttributeVariant,
Category,
Collection,
Product,
ProductType,
ProductVariant,
)
from saleor.product.utils.attributes import associate_attribute_values_to_instance
from tests.api.utils import get_graphql_content
def test_validate_value_is_unique(color_attribute):
value = color_attribute.values.first()
# a new value but with existing slug should raise an error
with pytest.raises(ValidationError):
validate_value_is_unique(color_attribute, AttributeValue(slug=value.slug))
# a new value with a new slug should pass
validate_value_is_unique(
color_attribute, AttributeValue(slug="spanish-inquisition")
)
# value that already belongs to the attribute shouldn't be taken into account
validate_value_is_unique(color_attribute, value)
def test_get_single_attribute_by_pk(user_api_client, color_attribute_without_values):
attribute_gql_id = graphene.Node.to_global_id(
"Attribute", color_attribute_without_values.id
)
query = """
query($id: ID!) {
attribute(id: $id) {
id
slug
}
}
"""
content = get_graphql_content(
user_api_client.post_graphql(query, {"id": attribute_gql_id})
)
assert content["data"]["attribute"], "Should have found an attribute"
assert content["data"]["attribute"]["id"] == attribute_gql_id
assert content["data"]["attribute"]["slug"] == color_attribute_without_values.slug
QUERY_ATTRIBUTES = """
query {
attributes(first: 20) {
edges {
node {
id
name
slug
values {
id
name
slug
}
}
}
}
}
"""
def test_attributes_query(user_api_client, product):
attributes = Attribute.objects
query = QUERY_ATTRIBUTES
response = user_api_client.post_graphql(query)
content = get_graphql_content(response)
attributes_data = content["data"]["attributes"]["edges"]
assert attributes_data
assert len(attributes_data) == attributes.count()
def test_attributes_query_hidden_attribute(user_api_client, product, color_attribute):
query = QUERY_ATTRIBUTES
# hide the attribute
color_attribute.visible_in_storefront = False
color_attribute.save(update_fields=["visible_in_storefront"])
attribute_count = Attribute.objects.get_visible_to_user(
user_api_client.user
).count()
assert attribute_count == 1
response = user_api_client.post_graphql(query)
content = get_graphql_content(response)
attributes_data = content["data"]["attributes"]["edges"]
assert len(attributes_data) == attribute_count
def test_attributes_query_hidden_attribute_as_staff_user(
staff_api_client, product, color_attribute, permission_manage_products
):
query = QUERY_ATTRIBUTES
# hide the attribute
color_attribute.visible_in_storefront = False
color_attribute.save(update_fields=["visible_in_storefront"])
attribute_count = Attribute.objects.all().count()
# The user doesn't have the permission yet to manage products,
# the user shouldn't be able to see the hidden attributes
assert Attribute.objects.get_visible_to_user(staff_api_client.user).count() == 1
# The user should now be able to see the attributes
staff_api_client.user.user_permissions.add(permission_manage_products)
response = staff_api_client.post_graphql(query)
content = get_graphql_content(response)
attributes_data = content["data"]["attributes"]["edges"]
assert len(attributes_data) == attribute_count
QUERY_PRODUCT_AND_VARIANTS_ATTRIBUTES = """
{
products(first: 1) {
edges {
node {
attributes {
attribute {
slug
}
values {
slug
}
value {
slug
}
}
variants {
attributes {
attribute {
slug
}
values {
slug
}
value {
slug
}
}
}
}
}
}
}
"""
@pytest.mark.parametrize("is_staff", (False, True))
def test_resolve_attributes_with_hidden(
user_api_client,
product,
color_attribute,
size_attribute,
staff_user,
is_staff,
permission_manage_products,
):
"""Ensure non-staff users don't see hidden attributes, and staff users having
the 'manage product' permission can.
"""
query = QUERY_PRODUCT_AND_VARIANTS_ATTRIBUTES
api_client = user_api_client
variant = product.variants.first()
product_attribute = color_attribute
variant_attribute = size_attribute
expected_product_attribute_count = product.attributes.count() - 1
expected_variant_attribute_count = variant.attributes.count() - 1
if is_staff:
api_client.user = staff_user
expected_product_attribute_count += 1
expected_variant_attribute_count += 1
staff_user.user_permissions.add(permission_manage_products)
# Hide one product and variant attribute from the storefront
for attribute in (product_attribute, variant_attribute):
attribute.visible_in_storefront = False
attribute.save(update_fields=["visible_in_storefront"])
product = get_graphql_content(api_client.post_graphql(query))["data"]["products"][
"edges"
][0]["node"]
assert len(product["attributes"]) == expected_product_attribute_count
assert len(product["variants"][0]["attributes"]) == expected_variant_attribute_count
def test_resolve_attribute_values(user_api_client, product, staff_user):
"""Ensure the attribute values are properly resolved."""
query = QUERY_PRODUCT_AND_VARIANTS_ATTRIBUTES
api_client = user_api_client
variant = product.variants.first()
assert product.attributes.count() == 1
assert variant.attributes.count() == 1
product_attribute_values = list(
product.attributes.first().values.values_list("slug", flat=True)
)
variant_attribute_values = list(
variant.attributes.first().values.values_list("slug", flat=True)
)
assert len(product_attribute_values) == 1
assert len(variant_attribute_values) == 1
product = get_graphql_content(api_client.post_graphql(query))["data"]["products"][
"edges"
][0]["node"]
product_attributes = product["attributes"]
variant_attributes = product["variants"][0]["attributes"]
assert len(product_attributes) == len(product_attribute_values)
assert len(variant_attributes) == len(variant_attribute_values)
assert product_attributes[0]["attribute"]["slug"] == "color"
assert product_attributes[0]["values"][0]["slug"] == product_attribute_values[0]
assert product_attributes[0]["value"]["slug"] == product_attribute_values[0]
assert variant_attributes[0]["attribute"]["slug"] == "size"
assert variant_attributes[0]["values"][0]["slug"] == variant_attribute_values[0]
assert variant_attributes[0]["value"]["slug"] == variant_attribute_values[0]
def test_resolve_attribute_values_non_assigned_to_node(
user_api_client, product, staff_user
):
"""Ensure the attribute values are properly resolved when an attribute is part
of the product type but not of the node (product/variant), thus no values should be
resolved.
"""
query = QUERY_PRODUCT_AND_VARIANTS_ATTRIBUTES
api_client = user_api_client
variant = product.variants.first()
product_type = product.product_type
# Create dummy attributes
unassigned_product_attribute = Attribute.objects.create(name="P", slug="product")
unassigned_variant_attribute = Attribute.objects.create(name="V", slug="variant")
# Create a value for each dummy attribute to ensure they are not returned
# by the product or variant as they are not associated to them
AttributeValue.objects.bulk_create(
[
AttributeValue(slug="a", name="A", attribute=unassigned_product_attribute),
AttributeValue(slug="b", name="B", attribute=unassigned_product_attribute),
]
)
# Assign the dummy attributes to the product type and push them at the top
# through a sort_order=0 as the other attributes have sort_order=null
AttributeProduct.objects.create(
attribute=unassigned_product_attribute, product_type=product_type, sort_order=0
)
AttributeVariant.objects.create(
attribute=unassigned_variant_attribute, product_type=product_type, sort_order=0
)
assert product.attributes.count() == 1
assert variant.attributes.count() == 1
product = get_graphql_content(api_client.post_graphql(query))["data"]["products"][
"edges"
][0]["node"]
product_attributes = product["attributes"]
variant_attributes = product["variants"][0]["attributes"]
assert len(product_attributes) == 2, "Non-assigned attr from the PT may be missing"
assert len(variant_attributes) == 2, "Non-assigned attr from the PT may be missing"
assert product_attributes[0]["attribute"]["slug"] == "product"
assert product_attributes[0]["values"] == []
assert variant_attributes[0]["value"] is None
assert variant_attributes[0]["attribute"]["slug"] == "variant"
assert variant_attributes[0]["values"] == []
assert variant_attributes[0]["value"] is None
def test_attributes_filter_by_product_type_with_empty_value():
"""Ensure passing an empty or null value is ignored and the queryset is simply
returned without any modification.
"""
qs = Attribute.objects.all()
assert filter_attributes_by_product_types(qs, "...", "") is qs
assert filter_attributes_by_product_types(qs, "...", None) is qs
def test_attributes_filter_by_product_type_with_unsupported_field():
"""Ensure using an unknown field to filter attributes by raises a NotImplemented
exception.
"""
qs = Attribute.objects.all()
with pytest.raises(NotImplementedError) as exc:
filter_attributes_by_product_types(qs, "in_space", "a-value")
assert exc.value.args == ("Filtering by in_space is unsupported",)
def test_attributes_filter_by_non_existing_category_id():
"""Ensure using a non-existing category ID returns an empty query set."""
category_id = graphene.Node.to_global_id("Category", -1)
mocked_qs = mock.MagicMock()
qs = filter_attributes_by_product_types(mocked_qs, "in_category", category_id)
assert qs == mocked_qs.none.return_value
@pytest.mark.parametrize("test_deprecated_filter", [True, False])
@pytest.mark.parametrize("tested_field", ["inCategory", "inCollection"])
def test_attributes_in_collection_query(
user_api_client,
product_type,
category,
collection,
collection_with_products,
test_deprecated_filter,
tested_field,
):
if "Collection" in tested_field:
filtered_by_node_id = graphene.Node.to_global_id("Collection", collection.pk)
elif "Category" in tested_field:
filtered_by_node_id = graphene.Node.to_global_id("Category", category.pk)
else:
raise AssertionError(tested_field)
expected_qs = Attribute.objects.filter(
Q(attributeproduct__product_type_id=product_type.pk)
| Q(attributevariant__product_type_id=product_type.pk)
)
# Create another product type and attribute that shouldn't get matched
other_category = Category.objects.create(name="Other Category", slug="other-cat")
other_attribute = Attribute.objects.create(name="Other", slug="other")
other_product_type = ProductType.objects.create(
name="Other type", has_variants=True, is_shipping_required=True
)
other_product_type.product_attributes.add(other_attribute)
other_product = Product.objects.create(
name=f"Another Product",
product_type=other_product_type,
category=other_category,
price=zero_money(),
is_published=True,
)
# Create another collection with products but shouldn't get matched
# as we don't look for this other collection
other_collection = Collection.objects.create(
name="Other Collection",
slug="other-collection",
is_published=True,
description="Description",
)
other_collection.products.add(other_product)
query = """
query($nodeID: ID!) {
attributes(first: 20, %(filter_input)s) {
edges {
node {
id
name
slug
}
}
}
}
"""
if test_deprecated_filter:
query = query % {"filter_input": f"{tested_field}: $nodeID"}
else:
query = query % {"filter_input": "filter: { %s: $nodeID }" % tested_field}
variables = {"nodeID": filtered_by_node_id}
content = get_graphql_content(user_api_client.post_graphql(query, variables))
attributes_data = content["data"]["attributes"]["edges"]
flat_attributes_data = [attr["node"]["slug"] for attr in attributes_data]
expected_flat_attributes_data = list(expected_qs.values_list("slug", flat=True))
assert flat_attributes_data == expected_flat_attributes_data
CREATE_ATTRIBUTES_QUERY = """
mutation createAttribute($name: String!, $values: [AttributeValueCreateInput]) {
attributeCreate(input: {name: $name, values: $values}) {
errors {
field
message
}
productErrors {
field
message
code
}
attribute {
name
slug
values {
name
slug
}
productTypes(first: 10) {
edges {
node {
id
}
}
}
}
}
}
"""
def test_create_attribute_and_attribute_values(
staff_api_client, permission_manage_products
):
query = CREATE_ATTRIBUTES_QUERY
attribute_name = "Example name"
name = "Value name"
variables = {"name": attribute_name, "values": [{"name": name}]}
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_products]
)
content = get_graphql_content(response)
assert not content["data"]["attributeCreate"]["errors"]
data = content["data"]["attributeCreate"]
# Check if the attribute was correctly created
assert data["attribute"]["name"] == attribute_name
assert data["attribute"]["slug"] == slugify(
attribute_name
), "The default slug should be the slugified name"
assert (
data["attribute"]["productTypes"]["edges"] == []
), "The attribute should not have been assigned to a product type"
# Check if the attribute values were correctly created
assert len(data["attribute"]["values"]) == 1
assert data["attribute"]["values"][0]["name"] == name
assert data["attribute"]["values"][0]["slug"] == slugify(name)
@pytest.mark.parametrize(
"input_slug, expected_slug, expected_error",
(
("my-slug", "my-slug", []),
(None, "my-name", []),
(
"",
None,
[{"field": "slug", "message": "The attribute's slug cannot be blank."}],
),
),
)
def test_create_attribute_with_given_slug(
staff_api_client,
permission_manage_products,
input_slug,
expected_slug,
expected_error,
):
staff_api_client.user.user_permissions.add(permission_manage_products)
query = """
mutation createAttribute(
$name: String!, $slug: String) {
attributeCreate(input: {name: $name, slug: $slug}) {
errors {
field
message
}
attribute {
slug
}
}
}
"""
attribute_name = "My Name"
variables = {"name": attribute_name, "slug": input_slug}
content = get_graphql_content(staff_api_client.post_graphql(query, variables))
# Check if the error is as expected: null or something else
assert content["data"]["attributeCreate"]["errors"] == expected_error
# Check if the slug was correctly set if no error was expected
if expected_error is None:
assert content["data"]["attributeCreate"]["attribute"]["slug"] == expected_slug
@pytest.mark.parametrize(
"name_1, name_2, error_msg, error_code",
(
(
"Red color",
"Red color",
"Provided values are not unique.",
ProductErrorCode.UNIQUE,
),
(
"Red color",
"red color",
"Provided values are not unique.",
ProductErrorCode.UNIQUE,
),
),
)
def test_create_attribute_and_attribute_values_errors(
staff_api_client,
name_1,
name_2,
error_msg,
error_code,
permission_manage_products,
product_type,
):
query = CREATE_ATTRIBUTES_QUERY
variables = {"name": "Example name", "values": [{"name": name_1}, {"name": name_2}]}
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_products]
)
content = get_graphql_content(response)
errors = content["data"]["attributeCreate"]["errors"]
assert errors
assert errors[0]["field"] == "values"
assert errors[0]["message"] == error_msg
product_errors = content["data"]["attributeCreate"]["productErrors"]
assert product_errors[0]["code"] == error_code.name
UPDATE_ATTRIBUTE_QUERY = """
mutation updateAttribute(
$id: ID!, $name: String!, $addValues: [AttributeValueCreateInput]!,
$removeValues: [ID]!) {
attributeUpdate(
id: $id,
input: {
name: $name, addValues: $addValues,
removeValues: $removeValues}) {
errors {
field
message
}
productErrors {
field
message
code
}
attribute {
name
slug
values {
name
slug
}
productTypes(first: 10) {
edges {
node {
id
}
}
}
}
}
}
"""
def test_update_attribute_name(
staff_api_client, color_attribute, permission_manage_products
):
query = UPDATE_ATTRIBUTE_QUERY
attribute = color_attribute
name = "Wings name"
node_id = graphene.Node.to_global_id("Attribute", attribute.id)
variables = {"name": name, "id": node_id, "addValues": [], "removeValues": []}
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_products]
)
content = get_graphql_content(response)
attribute.refresh_from_db()
data = content["data"]["attributeUpdate"]
assert data["attribute"]["name"] == name == attribute.name
assert data["attribute"]["productTypes"]["edges"] == []
def test_update_attribute_remove_and_add_values(
staff_api_client, color_attribute, permission_manage_products
):
query = UPDATE_ATTRIBUTE_QUERY
attribute = color_attribute
name = "Wings name"
attribute_value_name = "Red Color"
node_id = graphene.Node.to_global_id("Attribute", attribute.id)
attribute_value_id = attribute.values.first().id
value_id = graphene.Node.to_global_id("AttributeValue", attribute_value_id)
variables = {
"name": name,
"id": node_id,
"addValues": [{"name": attribute_value_name}],
"removeValues": [value_id],
}
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_products]
)
content = get_graphql_content(response)
attribute.refresh_from_db()
data = content["data"]["attributeUpdate"]
assert not data["errors"]
assert data["attribute"]["name"] == name == attribute.name
assert not attribute.values.filter(pk=attribute_value_id).exists()
assert attribute.values.filter(name=attribute_value_name).exists()
def test_update_empty_attribute_and_add_values(
staff_api_client, color_attribute_without_values, permission_manage_products
):
query = UPDATE_ATTRIBUTE_QUERY
attribute = color_attribute_without_values
name = "Wings name"
attribute_value_name = "Yellow Color"
node_id = graphene.Node.to_global_id("Attribute", attribute.id)
variables = {
"name": name,
"id": node_id,
"addValues": [{"name": attribute_value_name}],
"removeValues": [],
}
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_products]
)
get_graphql_content(response)
attribute.refresh_from_db()
assert attribute.values.count() == 1
assert attribute.values.filter(name=attribute_value_name).exists()
@pytest.mark.parametrize(
"name_1, name_2, error_msg, error_code",
(
(
"Red color",
"Red color",
"Provided values are not unique.",
ProductErrorCode.UNIQUE,
),
(
"Red color",
"red color",
"Provided values are not unique.",
ProductErrorCode.UNIQUE,
),
),
)
def test_update_attribute_and_add_attribute_values_errors(
staff_api_client,
name_1,
name_2,
error_msg,
error_code,
color_attribute,
permission_manage_products,
):
query = UPDATE_ATTRIBUTE_QUERY
attribute = color_attribute
node_id = graphene.Node.to_global_id("Attribute", attribute.id)
variables = {
"name": "Example name",
"id": node_id,
"removeValues": [],
"addValues": [{"name": name_1}, {"name": name_2}],
}
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_products]
)
content = get_graphql_content(response)
errors = content["data"]["attributeUpdate"]["errors"]
assert errors
assert errors[0]["field"] == "addValues"
assert errors[0]["message"] == error_msg
product_errors = content["data"]["attributeUpdate"]["productErrors"]
assert product_errors[0]["code"] == error_code.name
def test_update_attribute_and_remove_others_attribute_value(
staff_api_client, color_attribute, size_attribute, permission_manage_products
):
query = UPDATE_ATTRIBUTE_QUERY
attribute = color_attribute
node_id = graphene.Node.to_global_id("Attribute", attribute.id)
size_attribute = size_attribute.values.first()
attr_id = graphene.Node.to_global_id("AttributeValue", size_attribute.pk)
variables = {
"name": "Example name",
"id": node_id,
"slug": "example-slug",
"addValues": [],
"removeValues": [attr_id],
}
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_products]
)
content = get_graphql_content(response)
errors = content["data"]["attributeUpdate"]["errors"]
assert errors
assert errors[0]["field"] == "removeValues"
err_msg = "Value %s does not belong to this attribute." % str(size_attribute)
assert errors[0]["message"] == err_msg
product_errors = content["data"]["attributeUpdate"]["productErrors"]
assert product_errors[0]["code"] == ProductErrorCode.INVALID.name
def test_delete_attribute(
staff_api_client, color_attribute, permission_manage_products, product_type
):
attribute = color_attribute
query = """
mutation deleteAttribute($id: ID!) {
attributeDelete(id: $id) {
errors {
field
message
}
attribute {
id
}
}
}
"""
node_id = graphene.Node.to_global_id("Attribute", attribute.id)
variables = {"id": node_id}
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_products]
)
content = get_graphql_content(response)
data = content["data"]["attributeDelete"]
assert data["attribute"]["id"] == variables["id"]
with pytest.raises(attribute._meta.model.DoesNotExist):
attribute.refresh_from_db()
CREATE_ATTRIBUTE_VALUE_QUERY = """
mutation createAttributeValue(
$attributeId: ID!, $name: String!) {
attributeValueCreate(
attribute: $attributeId, input: {name: $name}) {
productErrors {
field
message
code
}
attribute {
values {
name
}
}
attributeValue {
name
type
slug
}
}
}
"""
def test_create_attribute_value(
staff_api_client, color_attribute, permission_manage_products
):
attribute = color_attribute
query = CREATE_ATTRIBUTE_VALUE_QUERY
attribute_id = graphene.Node.to_global_id("Attribute", attribute.id)
name = "test name"
variables = {"name": name, "attributeId": attribute_id}
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_products]
)
content = get_graphql_content(response)
data = content["data"]["attributeValueCreate"]
assert not data["productErrors"]
attr_data = data["attributeValue"]
assert attr_data["name"] == name
assert attr_data["slug"] == slugify(name)
assert attr_data["type"] == "STRING"
assert name in [value["name"] for value in data["attribute"]["values"]]
def test_create_attribute_value_not_unique_name(
staff_api_client, color_attribute, permission_manage_products
):
attribute = color_attribute
query = CREATE_ATTRIBUTE_VALUE_QUERY
attribute_id = graphene.Node.to_global_id("Attribute", attribute.id)
value_name = attribute.values.first().name
variables = {"name": value_name, "attributeId": attribute_id}
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_products]
)
content = get_graphql_content(response)
data = content["data"]["attributeValueCreate"]
assert data["productErrors"]
assert data["productErrors"][0]["code"] == ProductErrorCode.ALREADY_EXISTS.name
assert data["productErrors"][0]["field"] == "name"
def test_create_attribute_value_capitalized_name(
staff_api_client, color_attribute, permission_manage_products
):
attribute = color_attribute
query = CREATE_ATTRIBUTE_VALUE_QUERY
attribute_id = graphene.Node.to_global_id("Attribute", attribute.id)
value_name = attribute.values.first().name
variables = {"name": value_name.upper(), "attributeId": attribute_id}
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_products]
)
content = get_graphql_content(response)
data = content["data"]["attributeValueCreate"]
assert data["productErrors"]
assert data["productErrors"][0]["code"] == ProductErrorCode.ALREADY_EXISTS.name
assert data["productErrors"][0]["field"] == "name"
UPDATE_ATTRIBUTE_VALUE_QUERY = """
mutation updateChoice(
$id: ID!, $name: String!) {
attributeValueUpdate(
id: $id, input: {name: $name}) {
errors {
field
message
}
attributeValue {
name
slug
}
attribute {
values {
name
}
}
}
}
"""
def test_update_attribute_value(
staff_api_client, pink_attribute_value, permission_manage_products
):
query = UPDATE_ATTRIBUTE_VALUE_QUERY
value = pink_attribute_value
node_id = graphene.Node.to_global_id("AttributeValue", value.id)
name = "Crimson name"
variables = {"name": name, "id": node_id}
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_products]
)
content = get_graphql_content(response)
data = content["data"]["attributeValueUpdate"]
value.refresh_from_db()
assert data["attributeValue"]["name"] == name == value.name
assert data["attributeValue"]["slug"] == slugify(name)
assert name in [value["name"] for value in data["attribute"]["values"]]
def test_update_attribute_value_name_not_unique(
staff_api_client, pink_attribute_value, permission_manage_products
):
query = UPDATE_ATTRIBUTE_VALUE_QUERY
value = pink_attribute_value.attribute.values.create(
name="Example Name", slug="example-name", value="#RED"
)
node_id = graphene.Node.to_global_id("AttributeValue", value.id)
variables = {"name": pink_attribute_value.name, "id": node_id}
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_products]
)
content = get_graphql_content(response)
data = content["data"]["attributeValueUpdate"]
assert data["errors"]
assert data["errors"][0]["message"]
assert data["errors"][0]["field"] == "name"
def test_delete_attribute_value(
staff_api_client, color_attribute, pink_attribute_value, permission_manage_products
):
value = color_attribute.values.get(name="Red")
query = """
mutation updateChoice($id: ID!) {
attributeValueDelete(id: $id) {
attributeValue {
name
slug
}
}
}
"""
node_id = graphene.Node.to_global_id("AttributeValue", value.id)
variables = {"id": node_id}
staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_products]
)
with pytest.raises(value._meta.model.DoesNotExist):
value.refresh_from_db()
@pytest.mark.parametrize(
"raw_value, expected_type",
[
("#0000", AttributeValueType.COLOR),
("#FF69B4", AttributeValueType.COLOR),
("rgb(255, 0, 0)", AttributeValueType.COLOR),
("hsl(0, 100%, 50%)", AttributeValueType.COLOR),
("hsla(120, 60%, 70%, 0.3)", AttributeValueType.COLOR),
("rgba(100%, 255, 0, 0)", AttributeValueType.COLOR),
("http://example.com", AttributeValueType.URL),
("https://example.com", AttributeValueType.URL),
("ftp://example.com", AttributeValueType.URL),
("example.com", AttributeValueType.STRING),
("Foo", AttributeValueType.STRING),
("linear-gradient(red, yellow)", AttributeValueType.GRADIENT),
("radial-gradient(#0000, yellow)", AttributeValueType.GRADIENT),
],
)
def test_resolve_attribute_value_type(raw_value, expected_type):
assert resolve_attribute_value_type(raw_value) == expected_type
def test_resolve_assigned_attribute_without_values(api_client, product_type, product):
"""Ensure the attributes assigned to a product type are resolved even if
the product doesn't provide any value for it or is not directly associated to it.
"""
# Retrieve the product's variant
variant = product.variants.get()
# Remove all attributes and values from the product and its variant
product.attributesrelated.clear()
variant.attributesrelated.clear()
# Retrieve the product and variant's attributes
products = get_graphql_content(
api_client.post_graphql(
"""
{
products(first: 10) {
edges {
node {
attributes {
attribute {
slug
}
values {
name
}
}
variants {
attributes {
attribute {
slug
}
values {
name
}
}
}
}
}
}
}
"""
)
)["data"]["products"]["edges"]
# Ensure we are only working on one product and variant, the ones we are testing
assert len(products) == 1
assert len(products[0]["node"]["variants"]) == 1
# Retrieve the nodes data
product = products[0]["node"]
variant = product["variants"][0]
# Ensure the product attributes values are all None
assert len(product["attributes"]) == 1
assert product["attributes"][0]["attribute"]["slug"] == "color"
assert product["attributes"][0]["values"] == []
# Ensure the variant attributes values are all None
assert variant["attributes"][0]["attribute"]["slug"] == "size"
assert variant["attributes"][0]["values"] == []
ASSIGN_ATTR_QUERY = """
mutation assign($productTypeId: ID!, $operations: [AttributeAssignInput]!) {
attributeAssign(productTypeId: $productTypeId, operations: $operations) {
errors {
field
message
}
productType {
id
productAttributes {
id
}
variantAttributes {
id
}
}
}
}
"""
def test_assign_attributes_to_product_type(
staff_api_client, permission_manage_products, attribute_list
):
product_type = ProductType.objects.create(name="Default Type", has_variants=True)
product_type_global_id = graphene.Node.to_global_id("ProductType", product_type.pk)
query = ASSIGN_ATTR_QUERY
operations = []
variables = {"productTypeId": product_type_global_id, "operations": operations}
product_attributes_ids = {attr.pk for attr in attribute_list[:2]}
variant_attributes_ids = {attr.pk for attr in attribute_list[2:]}
for attr_id in product_attributes_ids:
operations.append(
{"type": "PRODUCT", "id": graphene.Node.to_global_id("Attribute", attr_id)}
)
for attr_id in variant_attributes_ids:
operations.append(
{"type": "VARIANT", "id": graphene.Node.to_global_id("Attribute", attr_id)}
)
content = get_graphql_content(
staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_products]
)
)["data"]["attributeAssign"]
assert not content["errors"], "Should have succeeded"
assert content["productType"]["id"] == product_type_global_id
assert len(content["productType"]["productAttributes"]) == len(
product_attributes_ids
)
assert len(content["productType"]["variantAttributes"]) == len(
variant_attributes_ids
)
found_product_attrs_ids = {
int(graphene.Node.from_global_id(attr["id"])[1])
for attr in content["productType"]["productAttributes"]
}
found_variant_attrs_ids = {
int(graphene.Node.from_global_id(attr["id"])[1])
for attr in content["productType"]["variantAttributes"]
}
assert found_product_attrs_ids == product_attributes_ids
assert found_variant_attrs_ids == variant_attributes_ids
def test_assign_variant_attribute_to_product_type_with_disabled_variants(
staff_api_client,
permission_manage_products,
product_type_without_variant,
color_attribute_without_values,
):
"""The assignAttribute mutation should raise an error when trying
to add an attribute as a variant attribute when
the product type doesn't support variants"""
product_type = product_type_without_variant
attribute = color_attribute_without_values
staff_api_client.user.user_permissions.add(permission_manage_products)
product_type_global_id = graphene.Node.to_global_id("ProductType", product_type.pk)
query = ASSIGN_ATTR_QUERY
operations = [
{"type": "VARIANT", "id": graphene.Node.to_global_id("Attribute", attribute.pk)}
]
variables = {"productTypeId": product_type_global_id, "operations": operations}
content = get_graphql_content(staff_api_client.post_graphql(query, variables))[
"data"
]["attributeAssign"]
assert content["errors"] == [
{
"field": "operations",
"message": "Variants are disabled in this product type.",
}
]
def test_assign_variant_attribute_having_unsupported_input_type(
staff_api_client, permission_manage_products, product_type, size_attribute
):
"""The assignAttribute mutation should raise an error when trying
to use an attribute as a variant attribute when
the attribute's input type doesn't support variants"""
attribute = size_attribute
attribute.input_type = AttributeInputType.MULTISELECT
attribute.save(update_fields=["input_type"])
product_type.variant_attributes.clear()
staff_api_client.user.user_permissions.add(permission_manage_products)
product_type_global_id = graphene.Node.to_global_id("ProductType", product_type.pk)
query = ASSIGN_ATTR_QUERY
operations = [
{"type": "VARIANT", "id": graphene.Node.to_global_id("Attribute", attribute.pk)}
]
variables = {"productTypeId": product_type_global_id, "operations": operations}
content = get_graphql_content(staff_api_client.post_graphql(query, variables))[
"data"
]["attributeAssign"]
assert content["errors"] == [
{
"field": "operations",
"message": (
"Attributes having for input types ['multiselect'] cannot be assigned "
"as variant attributes"
),
}
]
@pytest.mark.parametrize(
"product_type_attribute_type, gql_attribute_type",
(
(AttributeTypeEnum.PRODUCT, AttributeTypeEnum.VARIANT),
(AttributeTypeEnum.VARIANT, AttributeTypeEnum.PRODUCT),
(AttributeTypeEnum.PRODUCT, AttributeTypeEnum.PRODUCT),
(AttributeTypeEnum.VARIANT, AttributeTypeEnum.VARIANT),
),
)
def test_assign_attribute_to_product_type_having_already_that_attribute(
staff_api_client,
permission_manage_products,
color_attribute_without_values,
product_type_attribute_type,
gql_attribute_type,
):
"""The assignAttribute mutation should raise an error when trying
to add an attribute already contained in the product type."""
product_type = ProductType.objects.create(name="Type")
attribute = color_attribute_without_values
staff_api_client.user.user_permissions.add(permission_manage_products)
product_type_global_id = graphene.Node.to_global_id("ProductType", product_type.pk)
if product_type_attribute_type == AttributeTypeEnum.PRODUCT:
product_type.product_attributes.add(attribute)
elif product_type_attribute_type == AttributeTypeEnum.VARIANT:
product_type.variant_attributes.add(attribute)
else:
raise ValueError(f"Unknown: {product_type}")
query = ASSIGN_ATTR_QUERY
operations = [
{
"type": gql_attribute_type.value,
"id": graphene.Node.to_global_id("Attribute", attribute.pk),
}
]
variables = {"productTypeId": product_type_global_id, "operations": operations}
content = get_graphql_content(staff_api_client.post_graphql(query, variables))[
"data"
]["attributeAssign"]
assert content["errors"] == [
{
"field": "operations",
"message": "Color (color) have already been assigned to this product type.",
}
]
UNASSIGN_ATTR_QUERY = """
mutation unAssignAttribute(
$productTypeId: ID!, $attributeIds: [ID]!
) {
attributeUnassign(productTypeId: $productTypeId, attributeIds: $attributeIds) {
errors {
field
message
}
productType {
id
variantAttributes {
id
}
productAttributes {
id
}
}
}
}
"""
def test_unassign_attributes_from_product_type(
staff_api_client, permission_manage_products, attribute_list
):
product_type = ProductType.objects.create(name="Type")
product_type_global_id = graphene.Node.to_global_id("ProductType", product_type.pk)
variant_attribute, *product_attributes = attribute_list
product_type.product_attributes.add(*product_attributes)
product_type.variant_attributes.add(variant_attribute)
remaining_attribute_global_id = graphene.Node.to_global_id(
"Attribute", product_attributes[1].pk
)
query = UNASSIGN_ATTR_QUERY
variables = {
"productTypeId": product_type_global_id,
"attributeIds": [
graphene.Node.to_global_id("Attribute", product_attributes[0].pk)
],
}
content = get_graphql_content(
staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_products]
)
)["data"]["attributeUnassign"]
assert not content["errors"]
assert content["productType"]["id"] == product_type_global_id
assert len(content["productType"]["productAttributes"]) == 1
assert len(content["productType"]["variantAttributes"]) == 1
assert (
content["productType"]["productAttributes"][0]["id"]
== remaining_attribute_global_id
)
def test_unassign_attributes_not_in_product_type(
staff_api_client, permission_manage_products, color_attribute_without_values
):
"""The unAssignAttribute mutation should not raise any error when trying
to remove an attribute that is not/no longer in the product type."""
staff_api_client.user.user_permissions.add(permission_manage_products)
product_type = ProductType.objects.create(name="Type")
product_type_global_id = graphene.Node.to_global_id("ProductType", product_type.pk)
query = UNASSIGN_ATTR_QUERY
variables = {
"productTypeId": product_type_global_id,
"attributeIds": [
graphene.Node.to_global_id("Attribute", color_attribute_without_values.pk)
],
}
content = get_graphql_content(staff_api_client.post_graphql(query, variables))[
"data"
]["attributeUnassign"]
assert not content["errors"]
assert content["productType"]["id"] == product_type_global_id
assert len(content["productType"]["productAttributes"]) == 0
assert len(content["productType"]["variantAttributes"]) == 0
def test_retrieve_product_attributes_input_type(
staff_api_client, product, permission_manage_products
):
query = """
{
products(first: 10) {
edges {
node {
attributes {
values {
type
inputType
}
}
}
}
}
}
"""
found_products = get_graphql_content(
staff_api_client.post_graphql(query, permissions=[permission_manage_products])
)["data"]["products"]["edges"]
assert len(found_products) == 1
for gql_attr in found_products[0]["node"]["attributes"]:
assert len(gql_attr["values"]) == 1
assert gql_attr["values"][0]["type"] == "STRING"
assert gql_attr["values"][0]["inputType"] == "DROPDOWN"
@pytest.mark.parametrize(
"attribute, expected_value",
(
("filterable_in_storefront", True),
("filterable_in_dashboard", True),
("visible_in_storefront", True),
("available_in_grid", True),
("value_required", False),
("storefront_search_position", 0),
),
)
def test_retrieving_the_restricted_attributes_restricted(
staff_api_client,
color_attribute,
permission_manage_products,
attribute,
expected_value,
):
"""Checks if the attributes are restricted and if their default value
is the expected one."""
attribute = to_camel_case(attribute)
query = (
"""
{
attributes(first: 10) {
edges {
node {
%s
}
}
}
}
"""
% attribute
)
found_attributes = get_graphql_content(
staff_api_client.post_graphql(query, permissions=[permission_manage_products])
)["data"]["attributes"]["edges"]
assert len(found_attributes) == 1
assert found_attributes[0]["node"][attribute] == expected_value
ATTRIBUTES_RESORT_QUERY = """
mutation ProductTypeReorderAttributes(
$productTypeId: ID!
$moves: [ReorderInput]!
$type: AttributeTypeEnum!
) {
productTypeReorderAttributes(
productTypeId: $productTypeId
moves: $moves
type: $type
) {
productType {
id
variantAttributes {
id
slug
}
productAttributes {
id
}
}
errors {
field
message
}
}
}
"""
def test_sort_attributes_within_product_type_invalid_product_type(
staff_api_client, permission_manage_products
):
"""Try to reorder an invalid product type (invalid ID)."""
product_type_id = graphene.Node.to_global_id("ProductType", -1)
attribute_id = graphene.Node.to_global_id("Attribute", -1)
variables = {
"type": "VARIANT",
"productTypeId": product_type_id,
"moves": [{"id": attribute_id, "sortOrder": 1}],
}
content = get_graphql_content(
staff_api_client.post_graphql(
ATTRIBUTES_RESORT_QUERY, variables, permissions=[permission_manage_products]
)
)["data"]["productTypeReorderAttributes"]
assert content["errors"] == [
{
"field": "productTypeId",
"message": f"Couldn't resolve to a product type: {product_type_id}",
}
]
def test_sort_attributes_within_product_type_invalid_id(
staff_api_client, permission_manage_products, color_attribute
):
"""Try to reorder an attribute not associated to the given product type."""
product_type = ProductType.objects.create(name="Dummy Type")
product_type_id = graphene.Node.to_global_id("ProductType", product_type.id)
attribute_id = graphene.Node.to_global_id("Attribute", color_attribute.id)
variables = {
"type": "VARIANT",
"productTypeId": product_type_id,
"moves": [{"id": attribute_id, "sortOrder": 1}],
}
content = get_graphql_content(
staff_api_client.post_graphql(
ATTRIBUTES_RESORT_QUERY, variables, permissions=[permission_manage_products]
)
)["data"]["productTypeReorderAttributes"]
assert content["errors"] == [
{
"field": "moves",
"message": f"Couldn't resolve to an attribute: {attribute_id}",
}
]
@pytest.mark.parametrize(
"attribute_type, relation_field, backref_field",
(
("VARIANT", "variant_attributes", "attributevariant"),
("PRODUCT", "product_attributes", "attributeproduct"),
),
)
def test_sort_attributes_within_product_type(
staff_api_client,
attribute_list,
permission_manage_products,
attribute_type,
relation_field,
backref_field,
):
attributes = attribute_list
assert len(attributes) == 3
staff_api_client.user.user_permissions.add(permission_manage_products)
product_type = ProductType.objects.create(name="Dummy Type")
product_type_id = graphene.Node.to_global_id("ProductType", product_type.id)
m2m_attributes = getattr(product_type, relation_field)
m2m_attributes.set(attributes)
sort_method = getattr(m2m_attributes, f"{relation_field}_sorted")
attributes = list(sort_method())
assert len(attributes) == 3
variables = {
"type": attribute_type,
"productTypeId": product_type_id,
"moves": [
{
"id": graphene.Node.to_global_id("Attribute", attributes[0].pk),
"sortOrder": +1,
},
{
"id": graphene.Node.to_global_id("Attribute", attributes[2].pk),
"sortOrder": -1,
},
],
}
expected_order = [attributes[1].pk, attributes[2].pk, attributes[0].pk]
content = get_graphql_content(
staff_api_client.post_graphql(ATTRIBUTES_RESORT_QUERY, variables)
)["data"]["productTypeReorderAttributes"]
assert not content["errors"]
assert (
content["productType"]["id"] == product_type_id
), "Did not return the correct product type"
gql_attributes = content["productType"][snake_to_camel_case(relation_field)]
assert len(gql_attributes) == len(expected_order)
for attr, expected_pk in zip(gql_attributes, expected_order):
gql_type, gql_attr_id = graphene.Node.from_global_id(attr["id"])
assert gql_type == "Attribute"
assert int(gql_attr_id) == expected_pk
ATTRIBUTE_VALUES_RESORT_QUERY = """
mutation attributeReorderValues($attributeId: ID!, $moves: [ReorderInput]!) {
attributeReorderValues(attributeId: $attributeId, moves: $moves) {
attribute {
id
values {
id
}
}
errors {
field
message
}
}
}
"""
def test_sort_values_within_attribute_invalid_product_type(
staff_api_client, permission_manage_products
):
"""Try to reorder an invalid attribute (invalid ID)."""
attribute_id = graphene.Node.to_global_id("Attribute", -1)
value_id = graphene.Node.to_global_id("AttributeValue", -1)
variables = {
"attributeId": attribute_id,
"moves": [{"id": value_id, "sortOrder": 1}],
}
content = get_graphql_content(
staff_api_client.post_graphql(
ATTRIBUTE_VALUES_RESORT_QUERY,
variables,
permissions=[permission_manage_products],
)
)["data"]["attributeReorderValues"]
assert content["errors"] == [
{
"field": "attributeId",
"message": f"Couldn't resolve to an attribute: {attribute_id}",
}
]
def test_sort_values_within_attribute_invalid_id(
staff_api_client, permission_manage_products, color_attribute
):
"""Try to reorder a value not associated to the given attribute."""
attribute_id = graphene.Node.to_global_id("Attribute", color_attribute.id)
value_id = graphene.Node.to_global_id("AttributeValue", -1)
variables = {
"type": "VARIANT",
"attributeId": attribute_id,
"moves": [{"id": value_id, "sortOrder": 1}],
}
content = get_graphql_content(
staff_api_client.post_graphql(
ATTRIBUTE_VALUES_RESORT_QUERY,
variables,
permissions=[permission_manage_products],
)
)["data"]["attributeReorderValues"]
assert content["errors"] == [
{
"field": "moves",
"message": f"Couldn't resolve to an attribute value: {value_id}",
}
]
def test_sort_values_within_attribute(
staff_api_client, color_attribute, permission_manage_products
):
attribute = color_attribute
AttributeValue.objects.create(attribute=attribute, name="Green", slug="green")
values = list(attribute.values.all())
assert len(values) == 3
staff_api_client.user.user_permissions.add(permission_manage_products)
attribute_id = graphene.Node.to_global_id("Attribute", attribute.id)
m2m_values = attribute.values
m2m_values.set(values)
assert values == sorted(
values, key=lambda o: o.sort_order if o.sort_order is not None else o.pk
), "The values are not properly ordered"
variables = {
"attributeId": attribute_id,
"moves": [
{
"id": graphene.Node.to_global_id("AttributeValue", values[0].pk),
"sortOrder": +1,
},
{
"id": graphene.Node.to_global_id("AttributeValue", values[2].pk),
"sortOrder": -1,
},
],
}
expected_order = [values[1].pk, values[2].pk, values[0].pk]
content = get_graphql_content(
staff_api_client.post_graphql(ATTRIBUTE_VALUES_RESORT_QUERY, variables)
)["data"]["attributeReorderValues"]
assert not content["errors"]
assert content["attribute"]["id"] == attribute_id
gql_values = content["attribute"]["values"]
assert len(gql_values) == len(expected_order)
actual_order = []
for attr, expected_pk in zip(gql_values, expected_order):
gql_type, gql_attr_id = graphene.Node.from_global_id(attr["id"])
assert gql_type == "AttributeValue"
actual_order.append(int(gql_attr_id))
assert actual_order == expected_order
ATTRIBUTES_FILTER_QUERY = """
query($filters: AttributeFilterInput!) {
attributes(first: 10, filter: $filters) {
edges {
node {
name
slug
}
}
}
}
"""
def test_search_attributes(api_client, color_attribute, size_attribute):
variables = {"filters": {"search": "color"}}
attributes = get_graphql_content(
api_client.post_graphql(ATTRIBUTES_FILTER_QUERY, variables)
)["data"]["attributes"]["edges"]
assert len(attributes) == 1
assert attributes[0]["node"]["slug"] == "color"
def test_filter_attributes_if_filterable_in_dashboard(
api_client, color_attribute, size_attribute
):
color_attribute.filterable_in_dashboard = False
color_attribute.save(update_fields=["filterable_in_dashboard"])
variables = {"filters": {"filterableInDashboard": True}}
attributes = get_graphql_content(
api_client.post_graphql(ATTRIBUTES_FILTER_QUERY, variables)
)["data"]["attributes"]["edges"]
assert len(attributes) == 1
assert attributes[0]["node"]["slug"] == "size"
def test_filter_attributes_if_available_in_grid(
api_client, color_attribute, size_attribute
):
color_attribute.available_in_grid = False
color_attribute.save(update_fields=["available_in_grid"])
variables = {"filters": {"availableInGrid": True}}
attributes = get_graphql_content(
api_client.post_graphql(ATTRIBUTES_FILTER_QUERY, variables)
)["data"]["attributes"]["edges"]
assert len(attributes) == 1
assert attributes[0]["node"]["slug"] == "size"
def test_filter_attributes_by_global_id_list(api_client, attribute_list):
global_ids = [
graphene.Node.to_global_id("Attribute", attribute.pk)
for attribute in attribute_list[:2]
]
variables = {"filters": {"ids": global_ids}}
expected_slugs = sorted([attribute_list[0].slug, attribute_list[1].slug])
attributes = get_graphql_content(
api_client.post_graphql(ATTRIBUTES_FILTER_QUERY, variables)
)["data"]["attributes"]["edges"]
assert len(attributes) == 2
received_slugs = sorted(
[attributes[0]["node"]["slug"], attributes[1]["node"]["slug"]]
)
assert received_slugs == expected_slugs
ATTRIBUTES_SORT_QUERY = """
query($sortBy: AttributeSortingInput) {
attributes(first: 10, sortBy: $sortBy) {
edges {
node {
slug
}
}
}
}
"""
def test_sort_attributes_by_slug(api_client):
Attribute.objects.bulk_create(
[
Attribute(name="MyAttribute", slug="b"),
Attribute(name="MyAttribute", slug="a"),
]
)
variables = {"sortBy": {"field": "SLUG", "direction": "ASC"}}
attributes = get_graphql_content(
api_client.post_graphql(ATTRIBUTES_SORT_QUERY, variables)
)["data"]["attributes"]["edges"]
assert len(attributes) == 2
assert attributes[0]["node"]["slug"] == "a"
assert attributes[1]["node"]["slug"] == "b"
@pytest.mark.parametrize(
"sort_field, m2m_model",
(
("DASHBOARD_VARIANT_POSITION", AttributeVariant),
("DASHBOARD_PRODUCT_POSITION", AttributeProduct),
),
)
def test_sort_attributes_by_position_in_product_type(
api_client,
color_attribute,
size_attribute,
sort_field: str,
m2m_model: Union[AttributeVariant, AttributeProduct],
):
"""Sorts attributes for dashboard custom ordering inside a given product type."""
product_type = ProductType.objects.create(name="My Product Type")
m2m_model.objects.create(
product_type=product_type, attribute=color_attribute, sort_order=0
)
m2m_model.objects.create(
product_type=product_type, attribute=size_attribute, sort_order=1
)
variables = {"sortBy": {"field": sort_field, "direction": "DESC"}}
attributes = get_graphql_content(
api_client.post_graphql(ATTRIBUTES_SORT_QUERY, variables)
)["data"]["attributes"]["edges"]
assert len(attributes) == 2
assert attributes[0]["node"]["slug"] == "size"
assert attributes[1]["node"]["slug"] == "color"
def test_sort_attributes_by_default_sorting(api_client):
"""Don't provide any sorting, this should sort by name by default."""
Attribute.objects.bulk_create(
[Attribute(name="A", slug="b"), Attribute(name="B", slug="a")]
)
attributes = get_graphql_content(
api_client.post_graphql(ATTRIBUTES_SORT_QUERY, {})
)["data"]["attributes"]["edges"]
assert len(attributes) == 2
assert attributes[0]["node"]["slug"] == "b"
assert attributes[1]["node"]["slug"] == "a"
@pytest.mark.parametrize("is_variant", (True, False))
def test_attributes_of_products_are_sorted(
staff_api_client, product, color_attribute, is_variant
):
"""Ensures the attributes of products and variants are sorted."""
variant = product.variants.first()
if is_variant:
query = """
query($id: ID!) {
productVariant(id: $id) {
attributes {
attribute {
id
}
}
}
}
"""
else:
query = """
query($id: ID!) {
product(id: $id) {
attributes {
attribute {
id
}
}
}
}
"""
# Create a dummy attribute with a higher ID
# This will allow us to make sure it is always the last attribute
# when sorted by ID. Thus, we are sure the query is actually passing the test.
other_attribute = Attribute.objects.create(name="Other", slug="other")
# Add the attribute to the product type
if is_variant:
product.product_type.variant_attributes.set([color_attribute, other_attribute])
else:
product.product_type.product_attributes.set([color_attribute, other_attribute])
# Retrieve the M2M object for the attribute vs the product type
if is_variant:
m2m_rel_other_attr = other_attribute.attributevariant.last()
else:
m2m_rel_other_attr = other_attribute.attributeproduct.last()
# Push the last attribute to the top and let the others to None
m2m_rel_other_attr.sort_order = 0
m2m_rel_other_attr.save(update_fields=["sort_order"])
# Assign attributes to the product
node = variant if is_variant else product # type: Union[Product, ProductVariant]
node.attributesrelated.clear()
associate_attribute_values_to_instance(
node, color_attribute, color_attribute.values.first()
)
# Sort the database attributes by their sort order and ID (when None)
expected_order = [other_attribute.pk, color_attribute.pk]
# Make the node ID
if is_variant:
node_id = graphene.Node.to_global_id("ProductVariant", variant.pk)
else:
node_id = graphene.Node.to_global_id("Product", product.pk)
# Retrieve the attributes
data = get_graphql_content(staff_api_client.post_graphql(query, {"id": node_id}))[
"data"
]
attributes = data["productVariant" if is_variant else "product"]["attributes"]
actual_order = [
int(graphene.Node.from_global_id(attr["attribute"]["id"])[1])
for attr in attributes
]
# Compare the received data against our expectations
assert actual_order == expected_order
| [((179, 1, 179, 51), 'pytest.mark.parametrize', 'pytest.mark.parametrize', ({(179, 25, 179, 35): '"""is_staff"""', (179, 37, 179, 50): '(False, True)'}, {}), "('is_staff', (False, True))", False, 'import pytest\n'), ((351, 1, 351, 65), 'pytest.mark.parametrize', 'pytest.mark.parametrize', ({(351, 25, 351, 49): '"""test_deprecated_filter"""', (351, 51, 351, 64): '[True, False]'}, {}), "('test_deprecated_filter', [True, False])", False, 'import pytest\n'), ((352, 1, 352, 72), 'pytest.mark.parametrize', 'pytest.mark.parametrize', ({(352, 25, 352, 39): '"""tested_field"""', (352, 41, 352, 71): "['inCategory', 'inCollection']"}, {}), "('tested_field', ['inCategory', 'inCollection'])", False, 'import pytest\n'), ((489, 1, 500, 1), 'pytest.mark.parametrize', 'pytest.mark.parametrize', ({(490, 4, 490, 47): '"""input_slug, expected_slug, expected_error"""', (491, 4, 499, 5): '((\'my-slug\', \'my-slug\', []), (None, \'my-name\', []), (\'\', None, [{\'field\':\n \'slug\', \'message\': "The attribute\'s slug cannot be blank."}]))'}, {}), '(\'input_slug, expected_slug, expected_error\', ((\n \'my-slug\', \'my-slug\', []), (None, \'my-name\', []), (\'\', None, [{\'field\':\n \'slug\', \'message\': "The attribute\'s slug cannot be blank."}])))', False, 'import pytest\n'), ((536, 1, 552, 1), 'pytest.mark.parametrize', 'pytest.mark.parametrize', ({(537, 4, 537, 43): '"""name_1, name_2, error_msg, error_code"""', (538, 4, 551, 5): "(('Red color', 'Red color', 'Provided values are not unique.',\n ProductErrorCode.UNIQUE), ('Red color', 'red color',\n 'Provided values are not unique.', ProductErrorCode.UNIQUE))"}, {}), "('name_1, name_2, error_msg, error_code', ((\n 'Red color', 'Red color', 'Provided values are not unique.',\n ProductErrorCode.UNIQUE), ('Red color', 'red color',\n 'Provided values are not unique.', ProductErrorCode.UNIQUE)))", False, 'import pytest\n'), ((684, 1, 700, 1), 'pytest.mark.parametrize', 'pytest.mark.parametrize', ({(685, 4, 685, 43): '"""name_1, name_2, error_msg, error_code"""', (686, 4, 699, 5): "(('Red color', 'Red color', 'Provided values are not unique.',\n ProductErrorCode.UNIQUE), ('Red color', 'red color',\n 'Provided values are not unique.', ProductErrorCode.UNIQUE))"}, {}), "('name_1, name_2, error_msg, error_code', ((\n 'Red color', 'Red color', 'Provided values are not unique.',\n ProductErrorCode.UNIQUE), ('Red color', 'red color',\n 'Provided values are not unique.', ProductErrorCode.UNIQUE)))", False, 'import pytest\n'), ((957, 1, 974, 1), 'pytest.mark.parametrize', 'pytest.mark.parametrize', ({(958, 4, 958, 30): '"""raw_value, expected_type"""', (959, 4, 973, 5): "[('#0000', AttributeValueType.COLOR), ('#FF69B4', AttributeValueType.COLOR),\n ('rgb(255, 0, 0)', AttributeValueType.COLOR), ('hsl(0, 100%, 50%)',\n AttributeValueType.COLOR), ('hsla(120, 60%, 70%, 0.3)',\n AttributeValueType.COLOR), ('rgba(100%, 255, 0, 0)', AttributeValueType\n .COLOR), ('http://example.com', AttributeValueType.URL), (\n 'https://example.com', AttributeValueType.URL), ('ftp://example.com',\n AttributeValueType.URL), ('example.com', AttributeValueType.STRING), (\n 'Foo', AttributeValueType.STRING), ('linear-gradient(red, yellow)',\n AttributeValueType.GRADIENT), ('radial-gradient(#0000, yellow)',\n AttributeValueType.GRADIENT)]"}, {}), "('raw_value, expected_type', [('#0000',\n AttributeValueType.COLOR), ('#FF69B4', AttributeValueType.COLOR), (\n 'rgb(255, 0, 0)', AttributeValueType.COLOR), ('hsl(0, 100%, 50%)',\n AttributeValueType.COLOR), ('hsla(120, 60%, 70%, 0.3)',\n AttributeValueType.COLOR), ('rgba(100%, 255, 0, 0)', AttributeValueType\n .COLOR), ('http://example.com', AttributeValueType.URL), (\n 'https://example.com', AttributeValueType.URL), ('ftp://example.com',\n AttributeValueType.URL), ('example.com', AttributeValueType.STRING), (\n 'Foo', AttributeValueType.STRING), ('linear-gradient(red, yellow)',\n AttributeValueType.GRADIENT), ('radial-gradient(#0000, yellow)',\n AttributeValueType.GRADIENT)])", False, 'import pytest\n'), ((1183, 1, 1191, 1), 'pytest.mark.parametrize', 'pytest.mark.parametrize', ({(1184, 4, 1184, 53): '"""product_type_attribute_type, gql_attribute_type"""', (1185, 4, 1190, 5): '((AttributeTypeEnum.PRODUCT, AttributeTypeEnum.VARIANT), (AttributeTypeEnum\n .VARIANT, AttributeTypeEnum.PRODUCT), (AttributeTypeEnum.PRODUCT,\n AttributeTypeEnum.PRODUCT), (AttributeTypeEnum.VARIANT,\n AttributeTypeEnum.VARIANT))'}, {}), "('product_type_attribute_type, gql_attribute_type',\n ((AttributeTypeEnum.PRODUCT, AttributeTypeEnum.VARIANT), (\n AttributeTypeEnum.VARIANT, AttributeTypeEnum.PRODUCT), (\n AttributeTypeEnum.PRODUCT, AttributeTypeEnum.PRODUCT), (\n AttributeTypeEnum.VARIANT, AttributeTypeEnum.VARIANT)))", False, 'import pytest\n'), ((1357, 1, 1367, 1), 'pytest.mark.parametrize', 'pytest.mark.parametrize', ({(1358, 4, 1358, 31): '"""attribute, expected_value"""', (1359, 4, 1366, 5): "(('filterable_in_storefront', True), ('filterable_in_dashboard', True), (\n 'visible_in_storefront', True), ('available_in_grid', True), (\n 'value_required', False), ('storefront_search_position', 0))"}, {}), "('attribute, expected_value', ((\n 'filterable_in_storefront', True), ('filterable_in_dashboard', True), (\n 'visible_in_storefront', True), ('available_in_grid', True), (\n 'value_required', False), ('storefront_search_position', 0)))", False, 'import pytest\n'), ((1491, 1, 1497, 1), 'pytest.mark.parametrize', 'pytest.mark.parametrize', ({(1492, 4, 1492, 51): '"""attribute_type, relation_field, backref_field"""', (1493, 4, 1496, 5): "(('VARIANT', 'variant_attributes', 'attributevariant'), ('PRODUCT',\n 'product_attributes', 'attributeproduct'))"}, {}), "('attribute_type, relation_field, backref_field', ((\n 'VARIANT', 'variant_attributes', 'attributevariant'), ('PRODUCT',\n 'product_attributes', 'attributeproduct')))", False, 'import pytest\n'), ((1798, 1, 1804, 1), 'pytest.mark.parametrize', 'pytest.mark.parametrize', ({(1799, 4, 1799, 27): '"""sort_field, m2m_model"""', (1800, 4, 1803, 5): "(('DASHBOARD_VARIANT_POSITION', AttributeVariant), (\n 'DASHBOARD_PRODUCT_POSITION', AttributeProduct))"}, {}), "('sort_field, m2m_model', ((\n 'DASHBOARD_VARIANT_POSITION', AttributeVariant), (\n 'DASHBOARD_PRODUCT_POSITION', AttributeProduct)))", False, 'import pytest\n'), ((1848, 1, 1848, 53), 'pytest.mark.parametrize', 'pytest.mark.parametrize', ({(1848, 25, 1848, 37): '"""is_variant"""', (1848, 39, 1848, 52): '(True, False)'}, {}), "('is_variant', (True, False))", False, 'import pytest\n'), ((47, 4, 47, 52), 'saleor.graphql.product.mutations.attributes.validate_value_is_unique', 'validate_value_is_unique', ({(47, 29, 47, 44): 'color_attribute', (47, 46, 47, 51): 'value'}, {}), '(color_attribute, value)', False, 'from saleor.graphql.product.mutations.attributes import validate_value_is_unique\n'), ((51, 23, 53, 5), 'graphene.Node.to_global_id', 'graphene.Node.to_global_id', ({(52, 8, 52, 19): '"""Attribute"""', (52, 21, 52, 54): 'color_attribute_without_values.id'}, {}), "('Attribute', color_attribute_without_values.id)", False, 'import graphene\n'), ((95, 14, 95, 43), 'tests.api.utils.get_graphql_content', 'get_graphql_content', ({(95, 34, 95, 42): 'response'}, {}), '(response)', False, 'from tests.api.utils import get_graphql_content\n'), ((114, 14, 114, 43), 'tests.api.utils.get_graphql_content', 'get_graphql_content', ({(114, 34, 114, 42): 'response'}, {}), '(response)', False, 'from tests.api.utils import get_graphql_content\n'), ((138, 14, 138, 43), 'tests.api.utils.get_graphql_content', 'get_graphql_content', ({(138, 34, 138, 42): 'response'}, {}), '(response)', False, 'from tests.api.utils import get_graphql_content\n'), ((275, 35, 275, 85), 'saleor.product.models.Attribute.objects.create', 'Attribute.objects.create', (), '', False, 'from saleor.product.models import Attribute, AttributeProduct, AttributeValue, AttributeVariant, Category, Collection, Product, ProductType, ProductVariant\n'), ((276, 35, 276, 85), 'saleor.product.models.Attribute.objects.create', 'Attribute.objects.create', (), '', False, 'from saleor.product.models import Attribute, AttributeProduct, AttributeValue, AttributeVariant, Category, Collection, Product, ProductType, ProductVariant\n'), ((289, 4, 291, 5), 'saleor.product.models.AttributeProduct.objects.create', 'AttributeProduct.objects.create', (), '', False, 'from saleor.product.models import Attribute, AttributeProduct, AttributeValue, AttributeVariant, Category, Collection, Product, ProductType, ProductVariant\n'), ((292, 4, 294, 5), 'saleor.product.models.AttributeVariant.objects.create', 'AttributeVariant.objects.create', (), '', False, 'from saleor.product.models import Attribute, AttributeProduct, AttributeValue, AttributeVariant, Category, Collection, Product, ProductType, ProductVariant\n'), ((323, 9, 323, 32), 'saleor.product.models.Attribute.objects.all', 'Attribute.objects.all', ({}, {}), '()', False, 'from saleor.product.models import Attribute, AttributeProduct, AttributeValue, AttributeVariant, Category, Collection, Product, ProductType, ProductVariant\n'), ((334, 9, 334, 32), 'saleor.product.models.Attribute.objects.all', 'Attribute.objects.all', ({}, {}), '()', False, 'from saleor.product.models import Attribute, AttributeProduct, AttributeValue, AttributeVariant, Category, Collection, Product, ProductType, ProductVariant\n'), ((345, 18, 345, 60), 'graphene.Node.to_global_id', 'graphene.Node.to_global_id', ({(345, 45, 345, 55): '"""Category"""', (345, 57, 345, 59): '-1'}, {}), "('Category', -1)", False, 'import graphene\n'), ((346, 16, 346, 32), 'unittest.mock.MagicMock', 'mock.MagicMock', ({}, {}), '()', False, 'from unittest import mock\n'), ((347, 9, 347, 82), 'saleor.graphql.product.filters.filter_attributes_by_product_types', 'filter_attributes_by_product_types', ({(347, 44, 347, 53): 'mocked_qs', (347, 55, 347, 68): '"""in_category"""', (347, 70, 347, 81): 'category_id'}, {}), "(mocked_qs, 'in_category', category_id)", False, 'from saleor.graphql.product.filters import filter_attributes_by_product_types\n'), ((374, 21, 374, 85), 'saleor.product.models.Category.objects.create', 'Category.objects.create', (), '', False, 'from saleor.product.models import Attribute, AttributeProduct, AttributeValue, AttributeVariant, Category, Collection, Product, ProductType, ProductVariant\n'), ((375, 22, 375, 74), 'saleor.product.models.Attribute.objects.create', 'Attribute.objects.create', (), '', False, 'from saleor.product.models import Attribute, AttributeProduct, AttributeValue, AttributeVariant, Category, Collection, Product, ProductType, ProductVariant\n'), ((376, 25, 378, 5), 'saleor.product.models.ProductType.objects.create', 'ProductType.objects.create', (), '', False, 'from saleor.product.models import Attribute, AttributeProduct, AttributeValue, AttributeVariant, Category, Collection, Product, ProductType, ProductVariant\n'), ((390, 23, 395, 5), 'saleor.product.models.Collection.objects.create', 'Collection.objects.create', (), '', False, 'from saleor.product.models import Attribute, AttributeProduct, AttributeValue, AttributeVariant, Category, Collection, Product, ProductType, ProductVariant\n'), ((470, 14, 470, 43), 'tests.api.utils.get_graphql_content', 'get_graphql_content', ({(470, 34, 470, 42): 'response'}, {}), '(response)', False, 'from tests.api.utils import get_graphql_content\n'), ((567, 14, 567, 43), 'tests.api.utils.get_graphql_content', 'get_graphql_content', ({(567, 34, 567, 42): 'response'}, {}), '(response)', False, 'from tests.api.utils import get_graphql_content\n'), ((621, 14, 621, 67), 'graphene.Node.to_global_id', 'graphene.Node.to_global_id', ({(621, 41, 621, 52): '"""Attribute"""', (621, 54, 621, 66): 'attribute.id'}, {}), "('Attribute', attribute.id)", False, 'import graphene\n'), ((626, 14, 626, 43), 'tests.api.utils.get_graphql_content', 'get_graphql_content', ({(626, 34, 626, 42): 'response'}, {}), '(response)', False, 'from tests.api.utils import get_graphql_content\n'), ((640, 14, 640, 67), 'graphene.Node.to_global_id', 'graphene.Node.to_global_id', ({(640, 41, 640, 52): '"""Attribute"""', (640, 54, 640, 66): 'attribute.id'}, {}), "('Attribute', attribute.id)", False, 'import graphene\n'), ((642, 15, 642, 79), 'graphene.Node.to_global_id', 'graphene.Node.to_global_id', ({(642, 42, 642, 58): '"""AttributeValue"""', (642, 60, 642, 78): 'attribute_value_id'}, {}), "('AttributeValue', attribute_value_id)", False, 'import graphene\n'), ((652, 14, 652, 43), 'tests.api.utils.get_graphql_content', 'get_graphql_content', ({(652, 34, 652, 42): 'response'}, {}), '(response)', False, 'from tests.api.utils import get_graphql_content\n'), ((668, 14, 668, 67), 'graphene.Node.to_global_id', 'graphene.Node.to_global_id', ({(668, 41, 668, 52): '"""Attribute"""', (668, 54, 668, 66): 'attribute.id'}, {}), "('Attribute', attribute.id)", False, 'import graphene\n'), ((678, 4, 678, 33), 'tests.api.utils.get_graphql_content', 'get_graphql_content', ({(678, 24, 678, 32): 'response'}, {}), '(response)', False, 'from tests.api.utils import get_graphql_content\n'), ((712, 14, 712, 67), 'graphene.Node.to_global_id', 'graphene.Node.to_global_id', ({(712, 41, 712, 52): '"""Attribute"""', (712, 54, 712, 66): 'attribute.id'}, {}), "('Attribute', attribute.id)", False, 'import graphene\n'), ((722, 14, 722, 43), 'tests.api.utils.get_graphql_content', 'get_graphql_content', ({(722, 34, 722, 42): 'response'}, {}), '(response)', False, 'from tests.api.utils import get_graphql_content\n'), ((737, 14, 737, 67), 'graphene.Node.to_global_id', 'graphene.Node.to_global_id', ({(737, 41, 737, 52): '"""Attribute"""', (737, 54, 737, 66): 'attribute.id'}, {}), "('Attribute', attribute.id)", False, 'import graphene\n'), ((739, 14, 739, 77), 'graphene.Node.to_global_id', 'graphene.Node.to_global_id', ({(739, 41, 739, 57): '"""AttributeValue"""', (739, 59, 739, 76): 'size_attribute.pk'}, {}), "('AttributeValue', size_attribute.pk)", False, 'import graphene\n'), ((750, 14, 750, 43), 'tests.api.utils.get_graphql_content', 'get_graphql_content', ({(750, 34, 750, 42): 'response'}, {}), '(response)', False, 'from tests.api.utils import get_graphql_content\n'), ((778, 14, 778, 67), 'graphene.Node.to_global_id', 'graphene.Node.to_global_id', ({(778, 41, 778, 52): '"""Attribute"""', (778, 54, 778, 66): 'attribute.id'}, {}), "('Attribute', attribute.id)", False, 'import graphene\n'), ((783, 14, 783, 43), 'tests.api.utils.get_graphql_content', 'get_graphql_content', ({(783, 34, 783, 42): 'response'}, {}), '(response)', False, 'from tests.api.utils import get_graphql_content\n'), ((820, 19, 820, 72), 'graphene.Node.to_global_id', 'graphene.Node.to_global_id', ({(820, 46, 820, 57): '"""Attribute"""', (820, 59, 820, 71): 'attribute.id'}, {}), "('Attribute', attribute.id)", False, 'import graphene\n'), ((826, 14, 826, 43), 'tests.api.utils.get_graphql_content', 'get_graphql_content', ({(826, 34, 826, 42): 'response'}, {}), '(response)', False, 'from tests.api.utils import get_graphql_content\n'), ((842, 19, 842, 72), 'graphene.Node.to_global_id', 'graphene.Node.to_global_id', ({(842, 46, 842, 57): '"""Attribute"""', (842, 59, 842, 71): 'attribute.id'}, {}), "('Attribute', attribute.id)", False, 'import graphene\n'), ((848, 14, 848, 43), 'tests.api.utils.get_graphql_content', 'get_graphql_content', ({(848, 34, 848, 42): 'response'}, {}), '(response)', False, 'from tests.api.utils import get_graphql_content\n'), ((860, 19, 860, 72), 'graphene.Node.to_global_id', 'graphene.Node.to_global_id', ({(860, 46, 860, 57): '"""Attribute"""', (860, 59, 860, 71): 'attribute.id'}, {}), "('Attribute', attribute.id)", False, 'import graphene\n'), ((866, 14, 866, 43), 'tests.api.utils.get_graphql_content', 'get_graphql_content', ({(866, 34, 866, 42): 'response'}, {}), '(response)', False, 'from tests.api.utils import get_graphql_content\n'), ((901, 14, 901, 68), 'graphene.Node.to_global_id', 'graphene.Node.to_global_id', ({(901, 41, 901, 57): '"""AttributeValue"""', (901, 59, 901, 67): 'value.id'}, {}), "('AttributeValue', value.id)", False, 'import graphene\n'), ((907, 14, 907, 43), 'tests.api.utils.get_graphql_content', 'get_graphql_content', ({(907, 34, 907, 42): 'response'}, {}), '(response)', False, 'from tests.api.utils import get_graphql_content\n'), ((922, 14, 922, 68), 'graphene.Node.to_global_id', 'graphene.Node.to_global_id', ({(922, 41, 922, 57): '"""AttributeValue"""', (922, 59, 922, 67): 'value.id'}, {}), "('AttributeValue', value.id)", False, 'import graphene\n'), ((927, 14, 927, 43), 'tests.api.utils.get_graphql_content', 'get_graphql_content', ({(927, 34, 927, 42): 'response'}, {}), '(response)', False, 'from tests.api.utils import get_graphql_content\n'), ((948, 14, 948, 68), 'graphene.Node.to_global_id', 'graphene.Node.to_global_id', ({(948, 41, 948, 57): '"""AttributeValue"""', (948, 59, 948, 67): 'value.id'}, {}), "('AttributeValue', value.id)", False, 'import graphene\n'), ((1066, 19, 1066, 85), 'saleor.product.models.ProductType.objects.create', 'ProductType.objects.create', (), '', False, 'from saleor.product.models import Attribute, AttributeProduct, AttributeValue, AttributeVariant, Category, Collection, Product, ProductType, ProductVariant\n'), ((1067, 29, 1067, 87), 'graphene.Node.to_global_id', 'graphene.Node.to_global_id', ({(1067, 56, 1067, 69): '"""ProductType"""', (1067, 71, 1067, 86): 'product_type.pk'}, {}), "('ProductType', product_type.pk)", False, 'import graphene\n'), ((1128, 29, 1128, 87), 'graphene.Node.to_global_id', 'graphene.Node.to_global_id', ({(1128, 56, 1128, 69): '"""ProductType"""', (1128, 71, 1128, 86): 'product_type.pk'}, {}), "('ProductType', product_type.pk)", False, 'import graphene\n'), ((1161, 29, 1161, 87), 'graphene.Node.to_global_id', 'graphene.Node.to_global_id', ({(1161, 56, 1161, 69): '"""ProductType"""', (1161, 71, 1161, 86): 'product_type.pk'}, {}), "('ProductType', product_type.pk)", False, 'import graphene\n'), ((1202, 19, 1202, 58), 'saleor.product.models.ProductType.objects.create', 'ProductType.objects.create', (), '', False, 'from saleor.product.models import Attribute, AttributeProduct, AttributeValue, AttributeVariant, Category, Collection, Product, ProductType, ProductVariant\n'), ((1206, 29, 1206, 87), 'graphene.Node.to_global_id', 'graphene.Node.to_global_id', ({(1206, 56, 1206, 69): '"""ProductType"""', (1206, 71, 1206, 86): 'product_type.pk'}, {}), "('ProductType', product_type.pk)", False, 'import graphene\n'), ((1261, 19, 1261, 58), 'saleor.product.models.ProductType.objects.create', 'ProductType.objects.create', (), '', False, 'from saleor.product.models import Attribute, AttributeProduct, AttributeValue, AttributeVariant, Category, Collection, Product, ProductType, ProductVariant\n'), ((1262, 29, 1262, 87), 'graphene.Node.to_global_id', 'graphene.Node.to_global_id', ({(1262, 56, 1262, 69): '"""ProductType"""', (1262, 71, 1262, 86): 'product_type.pk'}, {}), "('ProductType', product_type.pk)", False, 'import graphene\n'), ((1268, 36, 1270, 5), 'graphene.Node.to_global_id', 'graphene.Node.to_global_id', ({(1269, 8, 1269, 19): '"""Attribute"""', (1269, 21, 1269, 45): 'product_attributes[1].pk'}, {}), "('Attribute', product_attributes[1].pk)", False, 'import graphene\n'), ((1305, 19, 1305, 58), 'saleor.product.models.ProductType.objects.create', 'ProductType.objects.create', (), '', False, 'from saleor.product.models import Attribute, AttributeProduct, AttributeValue, AttributeVariant, Category, Collection, Product, ProductType, ProductVariant\n'), ((1306, 29, 1306, 87), 'graphene.Node.to_global_id', 'graphene.Node.to_global_id', ({(1306, 56, 1306, 69): '"""ProductType"""', (1306, 71, 1306, 86): 'product_type.pk'}, {}), "('ProductType', product_type.pk)", False, 'import graphene\n'), ((1378, 16, 1378, 40), 'graphene.utils.str_converters.to_camel_case', 'to_camel_case', ({(1378, 30, 1378, 39): 'attribute'}, {}), '(attribute)', False, 'from graphene.utils.str_converters import to_camel_case\n'), ((1438, 22, 1438, 67), 'graphene.Node.to_global_id', 'graphene.Node.to_global_id', ({(1438, 49, 1438, 62): '"""ProductType"""', (1438, 64, 1438, 66): '-1'}, {}), "('ProductType', -1)", False, 'import graphene\n'), ((1439, 19, 1439, 62), 'graphene.Node.to_global_id', 'graphene.Node.to_global_id', ({(1439, 46, 1439, 57): '"""Attribute"""', (1439, 59, 1439, 61): '-1'}, {}), "('Attribute', -1)", False, 'import graphene\n'), ((1466, 19, 1466, 64), 'saleor.product.models.ProductType.objects.create', 'ProductType.objects.create', (), '', False, 'from saleor.product.models import Attribute, AttributeProduct, AttributeValue, AttributeVariant, Category, Collection, Product, ProductType, ProductVariant\n'), ((1467, 22, 1467, 80), 'graphene.Node.to_global_id', 'graphene.Node.to_global_id', ({(1467, 49, 1467, 62): '"""ProductType"""', (1467, 64, 1467, 79): 'product_type.id'}, {}), "('ProductType', product_type.id)", False, 'import graphene\n'), ((1469, 19, 1469, 78), 'graphene.Node.to_global_id', 'graphene.Node.to_global_id', ({(1469, 46, 1469, 57): '"""Attribute"""', (1469, 59, 1469, 77): 'color_attribute.id'}, {}), "('Attribute', color_attribute.id)", False, 'import graphene\n'), ((1511, 19, 1511, 64), 'saleor.product.models.ProductType.objects.create', 'ProductType.objects.create', (), '', False, 'from saleor.product.models import Attribute, AttributeProduct, AttributeValue, AttributeVariant, Category, Collection, Product, ProductType, ProductVariant\n'), ((1512, 22, 1512, 80), 'graphene.Node.to_global_id', 'graphene.Node.to_global_id', ({(1512, 49, 1512, 62): '"""ProductType"""', (1512, 64, 1512, 79): 'product_type.id'}, {}), "('ProductType', product_type.id)", False, 'import graphene\n'), ((1580, 19, 1580, 62), 'graphene.Node.to_global_id', 'graphene.Node.to_global_id', ({(1580, 46, 1580, 57): '"""Attribute"""', (1580, 59, 1580, 61): '-1'}, {}), "('Attribute', -1)", False, 'import graphene\n'), ((1581, 15, 1581, 63), 'graphene.Node.to_global_id', 'graphene.Node.to_global_id', ({(1581, 42, 1581, 58): '"""AttributeValue"""', (1581, 60, 1581, 62): '-1'}, {}), "('AttributeValue', -1)", False, 'import graphene\n'), ((1609, 19, 1609, 78), 'graphene.Node.to_global_id', 'graphene.Node.to_global_id', ({(1609, 46, 1609, 57): '"""Attribute"""', (1609, 59, 1609, 77): 'color_attribute.id'}, {}), "('Attribute', color_attribute.id)", False, 'import graphene\n'), ((1610, 15, 1610, 63), 'graphene.Node.to_global_id', 'graphene.Node.to_global_id', ({(1610, 42, 1610, 58): '"""AttributeValue"""', (1610, 60, 1610, 62): '-1'}, {}), "('AttributeValue', -1)", False, 'import graphene\n'), ((1638, 4, 1638, 82), 'saleor.product.models.AttributeValue.objects.create', 'AttributeValue.objects.create', (), '', False, 'from saleor.product.models import Attribute, AttributeProduct, AttributeValue, AttributeVariant, Category, Collection, Product, ProductType, ProductVariant\n'), ((1644, 19, 1644, 72), 'graphene.Node.to_global_id', 'graphene.Node.to_global_id', ({(1644, 46, 1644, 57): '"""Attribute"""', (1644, 59, 1644, 71): 'attribute.id'}, {}), "('Attribute', attribute.id)", False, 'import graphene\n'), ((1814, 19, 1814, 69), 'saleor.product.models.ProductType.objects.create', 'ProductType.objects.create', (), '', False, 'from saleor.product.models import Attribute, AttributeProduct, AttributeValue, AttributeVariant, Category, Collection, Product, ProductType, ProductVariant\n'), ((1884, 22, 1884, 74), 'saleor.product.models.Attribute.objects.create', 'Attribute.objects.create', (), '', False, 'from saleor.product.models import Attribute, AttributeProduct, AttributeValue, AttributeVariant, Category, Collection, Product, ProductType, ProductVariant\n'), ((38, 9, 38, 39), 'pytest.raises', 'pytest.raises', ({(38, 23, 38, 38): 'ValidationError'}, {}), '(ValidationError)', False, 'import pytest\n'), ((43, 25, 43, 67), 'saleor.product.models.AttributeValue', 'AttributeValue', (), '', False, 'from saleor.product.models import Attribute, AttributeProduct, AttributeValue, AttributeVariant, Category, Collection, Product, ProductType, ProductVariant\n'), ((325, 11, 325, 60), 'saleor.graphql.product.filters.filter_attributes_by_product_types', 'filter_attributes_by_product_types', ({(325, 46, 325, 48): 'qs', (325, 50, 325, 55): '"""..."""', (325, 57, 325, 59): '""""""'}, {}), "(qs, '...', '')", False, 'from saleor.graphql.product.filters import filter_attributes_by_product_types\n'), ((326, 11, 326, 62), 'saleor.graphql.product.filters.filter_attributes_by_product_types', 'filter_attributes_by_product_types', ({(326, 46, 326, 48): 'qs', (326, 50, 326, 55): '"""..."""', (326, 57, 326, 61): 'None'}, {}), "(qs, '...', None)", False, 'from saleor.graphql.product.filters import filter_attributes_by_product_types\n'), ((336, 9, 336, 43), 'pytest.raises', 'pytest.raises', ({(336, 23, 336, 42): 'NotImplementedError'}, {}), '(NotImplementedError)', False, 'import pytest\n'), ((337, 8, 337, 69), 'saleor.graphql.product.filters.filter_attributes_by_product_types', 'filter_attributes_by_product_types', ({(337, 43, 337, 45): 'qs', (337, 47, 337, 57): '"""in_space"""', (337, 59, 337, 68): '"""a-value"""'}, {}), "(qs, 'in_space', 'a-value')", False, 'from saleor.graphql.product.filters import filter_attributes_by_product_types\n'), ((363, 30, 363, 85), 'graphene.Node.to_global_id', 'graphene.Node.to_global_id', ({(363, 57, 363, 69): '"""Collection"""', (363, 71, 363, 84): 'collection.pk'}, {}), "('Collection', collection.pk)", False, 'import graphene\n'), ((476, 40, 478, 5), 'django.template.defaultfilters.slugify', 'slugify', ({(477, 8, 477, 22): 'attribute_name'}, {}), '(attribute_name)', False, 'from django.template.defaultfilters import slugify\n'), ((486, 53, 486, 66), 'django.template.defaultfilters.slugify', 'slugify', ({(486, 61, 486, 65): 'name'}, {}), '(name)', False, 'from django.template.defaultfilters import slugify\n'), ((786, 9, 786, 58), 'pytest.raises', 'pytest.raises', ({(786, 23, 786, 57): 'attribute._meta.model.DoesNotExist'}, {}), '(attribute._meta.model.DoesNotExist)', False, 'import pytest\n'), ((832, 32, 832, 45), 'django.template.defaultfilters.slugify', 'slugify', ({(832, 40, 832, 44): 'name'}, {}), '(name)', False, 'from django.template.defaultfilters import slugify\n'), ((911, 45, 911, 58), 'django.template.defaultfilters.slugify', 'slugify', ({(911, 53, 911, 57): 'name'}, {}), '(name)', False, 'from django.template.defaultfilters import slugify\n'), ((953, 9, 953, 54), 'pytest.raises', 'pytest.raises', ({(953, 23, 953, 53): 'value._meta.model.DoesNotExist'}, {}), '(value._meta.model.DoesNotExist)', False, 'import pytest\n'), ((976, 11, 976, 50), 'saleor.graphql.product.types.attributes.resolve_attribute_value_type', 'resolve_attribute_value_type', ({(976, 40, 976, 49): 'raw_value'}, {}), '(raw_value)', False, 'from saleor.graphql.product.types.attributes import resolve_attribute_value_type\n'), ((1551, 32, 1551, 72), 'graphene.Node.from_global_id', 'graphene.Node.from_global_id', ({(1551, 61, 1551, 71): "attr['id']"}, {}), "(attr['id'])", False, 'import graphene\n'), ((1681, 32, 1681, 72), 'graphene.Node.from_global_id', 'graphene.Node.from_global_id', ({(1681, 61, 1681, 71): "attr['id']"}, {}), "(attr['id'])", False, 'import graphene\n'), ((1747, 8, 1747, 61), 'graphene.Node.to_global_id', 'graphene.Node.to_global_id', ({(1747, 35, 1747, 46): '"""Attribute"""', (1747, 48, 1747, 60): 'attribute.pk'}, {}), "('Attribute', attribute.pk)", False, 'import graphene\n'), ((1914, 18, 1914, 74), 'graphene.Node.to_global_id', 'graphene.Node.to_global_id', ({(1914, 45, 1914, 61): '"""ProductVariant"""', (1914, 63, 1914, 73): 'variant.pk'}, {}), "('ProductVariant', variant.pk)", False, 'import graphene\n'), ((1916, 18, 1916, 67), 'graphene.Node.to_global_id', 'graphene.Node.to_global_id', ({(1916, 45, 1916, 54): '"""Product"""', (1916, 56, 1916, 66): 'product.pk'}, {}), "('Product', product.pk)", False, 'import graphene\n'), ((39, 50, 39, 81), 'saleor.product.models.AttributeValue', 'AttributeValue', (), '', False, 'from saleor.product.models import Attribute, AttributeProduct, AttributeValue, AttributeVariant, Category, Collection, Product, ProductType, ProductVariant\n'), ((108, 22, 110, 5), 'saleor.product.models.Attribute.objects.get_visible_to_user', 'Attribute.objects.get_visible_to_user', ({(109, 8, 109, 28): 'user_api_client.user'}, {}), '(user_api_client.user)', False, 'from saleor.product.models import Attribute, AttributeProduct, AttributeValue, AttributeVariant, Category, Collection, Product, ProductType, ProductVariant\n'), ((128, 22, 128, 45), 'saleor.product.models.Attribute.objects.all', 'Attribute.objects.all', ({}, {}), '()', False, 'from saleor.product.models import Attribute, AttributeProduct, AttributeValue, AttributeVariant, Category, Collection, Product, ProductType, ProductVariant\n'), ((282, 12, 282, 86), 'saleor.product.models.AttributeValue', 'AttributeValue', (), '', False, 'from saleor.product.models import Attribute, AttributeProduct, AttributeValue, AttributeVariant, Category, Collection, Product, ProductType, ProductVariant\n'), ((283, 12, 283, 86), 'saleor.product.models.AttributeValue', 'AttributeValue', (), '', False, 'from saleor.product.models import Attribute, AttributeProduct, AttributeValue, AttributeVariant, Category, Collection, Product, ProductType, ProductVariant\n'), ((365, 30, 365, 81), 'graphene.Node.to_global_id', 'graphene.Node.to_global_id', ({(365, 57, 365, 67): '"""Category"""', (365, 69, 365, 80): 'category.pk'}, {}), "('Category', category.pk)", False, 'import graphene\n'), ((369, 8, 369, 60), 'django.db.models.Q', 'Q', (), '', False, 'from django.db.models import Q\n'), ((370, 10, 370, 62), 'django.db.models.Q', 'Q', (), '', False, 'from django.db.models import Q\n'), ((384, 14, 384, 26), 'saleor.core.taxes.zero_money', 'zero_money', ({}, {}), '()', False, 'from saleor.core.taxes import zero_money\n'), ((1132, 34, 1132, 87), 'graphene.Node.to_global_id', 'graphene.Node.to_global_id', ({(1132, 61, 1132, 72): '"""Attribute"""', (1132, 74, 1132, 86): 'attribute.pk'}, {}), "('Attribute', attribute.pk)", False, 'import graphene\n'), ((1165, 34, 1165, 87), 'graphene.Node.to_global_id', 'graphene.Node.to_global_id', ({(1165, 61, 1165, 72): '"""Attribute"""', (1165, 74, 1165, 86): 'attribute.pk'}, {}), "('Attribute', attribute.pk)", False, 'import graphene\n'), ((1219, 18, 1219, 71), 'graphene.Node.to_global_id', 'graphene.Node.to_global_id', ({(1219, 45, 1219, 56): '"""Attribute"""', (1219, 58, 1219, 70): 'attribute.pk'}, {}), "('Attribute', attribute.pk)", False, 'import graphene\n'), ((1276, 12, 1276, 77), 'graphene.Node.to_global_id', 'graphene.Node.to_global_id', ({(1276, 39, 1276, 50): '"""Attribute"""', (1276, 52, 1276, 76): 'product_attributes[0].pk'}, {}), "('Attribute', product_attributes[0].pk)", False, 'import graphene\n'), ((1312, 12, 1312, 86), 'graphene.Node.to_global_id', 'graphene.Node.to_global_id', ({(1312, 39, 1312, 50): '"""Attribute"""', (1312, 52, 1312, 85): 'color_attribute_without_values.pk'}, {}), "('Attribute', color_attribute_without_values.pk)", False, 'import graphene\n'), ((1547, 44, 1547, 79), 'saleor.graphql.core.utils.snake_to_camel_case', 'snake_to_camel_case', ({(1547, 64, 1547, 78): 'relation_field'}, {}), '(relation_field)', False, 'from saleor.graphql.core.utils import snake_to_camel_case\n'), ((1782, 12, 1782, 51), 'saleor.product.models.Attribute', 'Attribute', (), '', False, 'from saleor.product.models import Attribute, AttributeProduct, AttributeValue, AttributeVariant, Category, Collection, Product, ProductType, ProductVariant\n'), ((1783, 12, 1783, 51), 'saleor.product.models.Attribute', 'Attribute', (), '', False, 'from saleor.product.models import Attribute, AttributeProduct, AttributeValue, AttributeVariant, Category, Collection, Product, ProductType, ProductVariant\n'), ((1836, 9, 1836, 38), 'saleor.product.models.Attribute', 'Attribute', (), '', False, 'from saleor.product.models import Attribute, AttributeProduct, AttributeValue, AttributeVariant, Category, Collection, Product, ProductType, ProductVariant\n'), ((1836, 40, 1836, 69), 'saleor.product.models.Attribute', 'Attribute', (), '', False, 'from saleor.product.models import Attribute, AttributeProduct, AttributeValue, AttributeVariant, Category, Collection, Product, ProductType, ProductVariant\n'), ((132, 11, 132, 71), 'saleor.product.models.Attribute.objects.get_visible_to_user', 'Attribute.objects.get_visible_to_user', ({(132, 49, 132, 70): 'staff_api_client.user'}, {}), '(staff_api_client.user)', False, 'from saleor.product.models import Attribute, AttributeProduct, AttributeValue, AttributeVariant, Category, Collection, Product, ProductType, ProductVariant\n'), ((1078, 38, 1078, 86), 'graphene.Node.to_global_id', 'graphene.Node.to_global_id', ({(1078, 65, 1078, 76): '"""Attribute"""', (1078, 78, 1078, 85): 'attr_id'}, {}), "('Attribute', attr_id)", False, 'import graphene\n'), ((1083, 38, 1083, 86), 'graphene.Node.to_global_id', 'graphene.Node.to_global_id', ({(1083, 65, 1083, 76): '"""Attribute"""', (1083, 78, 1083, 85): 'attr_id'}, {}), "('Attribute', attr_id)", False, 'import graphene\n'), ((1102, 12, 1102, 52), 'graphene.Node.from_global_id', 'graphene.Node.from_global_id', ({(1102, 41, 1102, 51): "attr['id']"}, {}), "(attr['id'])", False, 'import graphene\n'), ((1106, 12, 1106, 52), 'graphene.Node.from_global_id', 'graphene.Node.from_global_id', ({(1106, 41, 1106, 51): "attr['id']"}, {}), "(attr['id'])", False, 'import graphene\n'), ((1526, 22, 1526, 79), 'graphene.Node.to_global_id', 'graphene.Node.to_global_id', ({(1526, 49, 1526, 60): '"""Attribute"""', (1526, 62, 1526, 78): 'attributes[0].pk'}, {}), "('Attribute', attributes[0].pk)", False, 'import graphene\n'), ((1530, 22, 1530, 79), 'graphene.Node.to_global_id', 'graphene.Node.to_global_id', ({(1530, 49, 1530, 60): '"""Attribute"""', (1530, 62, 1530, 78): 'attributes[2].pk'}, {}), "('Attribute', attributes[2].pk)", False, 'import graphene\n'), ((1656, 22, 1656, 80), 'graphene.Node.to_global_id', 'graphene.Node.to_global_id', ({(1656, 49, 1656, 65): '"""AttributeValue"""', (1656, 67, 1656, 79): 'values[0].pk'}, {}), "('AttributeValue', values[0].pk)", False, 'import graphene\n'), ((1660, 22, 1660, 80), 'graphene.Node.to_global_id', 'graphene.Node.to_global_id', ({(1660, 49, 1660, 65): '"""AttributeValue"""', (1660, 67, 1660, 79): 'values[2].pk'}, {}), "('AttributeValue', values[2].pk)", False, 'import graphene\n'), ((1924, 12, 1924, 65), 'graphene.Node.from_global_id', 'graphene.Node.from_global_id', ({(1924, 41, 1924, 64): "attr['attribute']['id']"}, {}), "(attr['attribute']['id'])", False, 'import graphene\n')] |
rafacm/aws-serverless-workshop-innovator-island | 3-photos/1-chromakey/app.py | 3f982ef6f70d28dfdc4e1d19103c181609b06b08 | import os
import json
import cv2
import logging
import boto3
import botocore
s3 = boto3.client('s3')
logger = logging.getLogger()
logger.setLevel(logging.INFO)
def upload_file(file_name, bucket, object_name=None):
"""Upload a file to an S3 bucket
:param file_name: File to upload
:param bucket: Bucket to upload to
:param object_name: S3 object name. If not specified then same as file_name
:return: True if file was uploaded, else False
"""
# If S3 object_name was not specified, use file_name
if object_name is None:
object_name = file_name
# Upload the file
s3_client = s3
try:
response = s3_client.upload_file(file_name, bucket, object_name)
except botocore.exceptions.ClientError as e:
logging.error(e)
return False
return True
def scale_image(image):
_image = image
target_height = 800
height, width, channels = _image.shape
logger.info('Original size: {}h x {}w'.format(height, width))
scale = height/target_height
if scale > 1:
_image = cv2.resize(image, (int(width/scale), int(height/scale)))
height, width, channels = image.shape
logger.info('New size: {}h x {}w'.format(int(height/scale), int(width/scale)))
return _image
def lambda_handler(event, context):
print ("Starting handler")
# get object metadata from event
input_bucket_name = event['Records'][0]['s3']['bucket']['name']
file_key = event['Records'][0]['s3']['object']['key']
output_bucket_name = os.environ['OUTPUT_BUCKET_NAME']
output_file_key = file_key.replace('.jpg', '.png')
print("Input bucket: ", input_bucket_name)
print("Output bucket: ", output_bucket_name)
if output_bucket_name is None:
print("Error: No OUTPUT_BUCKET_NAME environment variable specified.")
return
# set up local temp file names
local_input_temp_file = '/tmp/' + file_key
local_output_temp_file = '/tmp/out_' + file_key.replace('.jpg', '.png')
logger.info('Local input file: {}'.format(local_input_temp_file))
logger.info('Local output file: {}'.format(local_output_temp_file))
# get the object
s3.download_file(input_bucket_name, file_key, local_input_temp_file)
# HSV range
# (36, 25, 25) - most extreme
# (36, 50, 50) - average
# (36, 100, 100) - relaxed
lower_range = eval(os.environ["HSV_LOWER"])
# (70, 255, 255) - default
upper_range = eval(os.environ["HSV_UPPER"])
print('Lower HSV range: ', lower_range)
print('Upper HSV range: ', upper_range)
# Read in the file
image = cv2.imread(local_input_temp_file)
# Resize the image if larger than target size
image = scale_image(image)
# Flip from RGB of JPEG to BGR of OpenCV
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
# Convert BGR to HSV color space
hsv = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
# convert to RGBA
image_alpha = cv2.cvtColor(image, cv2.COLOR_BGR2RGBA)
# Threshold the HSV image to only green colors
mask = cv2.inRange(hsv, lower_range, upper_range)
# Invert the mask (i.e. select everything not green)
mask = ~mask
# Extract the non-green parts of the image
result = cv2.bitwise_and(image_alpha, image_alpha, mask=mask)
#Save the result
cv2.imwrite(local_output_temp_file,result)
#Save to S3
if upload_file(local_output_temp_file, output_bucket_name, output_file_key):
print('Processed file uploaded.')
return True
| [((8, 5, 8, 23), 'boto3.client', 'boto3.client', ({(8, 18, 8, 22): '"""s3"""'}, {}), "('s3')", False, 'import boto3\n'), ((9, 9, 9, 28), 'logging.getLogger', 'logging.getLogger', ({}, {}), '()', False, 'import logging\n'), ((85, 12, 85, 45), 'cv2.imread', 'cv2.imread', ({(85, 23, 85, 44): 'local_input_temp_file'}, {}), '(local_input_temp_file)', False, 'import cv2\n'), ((91, 12, 91, 50), 'cv2.cvtColor', 'cv2.cvtColor', ({(91, 25, 91, 30): 'image', (91, 32, 91, 49): 'cv2.COLOR_BGR2RGB'}, {}), '(image, cv2.COLOR_BGR2RGB)', False, 'import cv2\n'), ((94, 10, 94, 48), 'cv2.cvtColor', 'cv2.cvtColor', ({(94, 23, 94, 28): 'image', (94, 30, 94, 47): 'cv2.COLOR_BGR2HSV'}, {}), '(image, cv2.COLOR_BGR2HSV)', False, 'import cv2\n'), ((97, 18, 97, 57), 'cv2.cvtColor', 'cv2.cvtColor', ({(97, 31, 97, 36): 'image', (97, 38, 97, 56): 'cv2.COLOR_BGR2RGBA'}, {}), '(image, cv2.COLOR_BGR2RGBA)', False, 'import cv2\n'), ((100, 11, 100, 53), 'cv2.inRange', 'cv2.inRange', ({(100, 23, 100, 26): 'hsv', (100, 28, 100, 39): 'lower_range', (100, 41, 100, 52): 'upper_range'}, {}), '(hsv, lower_range, upper_range)', False, 'import cv2\n'), ((106, 13, 106, 65), 'cv2.bitwise_and', 'cv2.bitwise_and', (), '', False, 'import cv2\n'), ((109, 4, 109, 46), 'cv2.imwrite', 'cv2.imwrite', ({(109, 16, 109, 38): 'local_output_temp_file', (109, 39, 109, 45): 'result'}, {}), '(local_output_temp_file, result)', False, 'import cv2\n'), ((30, 8, 30, 24), 'logging.error', 'logging.error', ({(30, 22, 30, 23): 'e'}, {}), '(e)', False, 'import logging\n')] |
DEKHTIARJonathan/pyinstrument | metrics/overflow.py | cc4f3f6fc1b493d7cd058ecf41ad012e0030a512 | from pyinstrument import Profiler
p = Profiler(use_signal=False)
p.start()
def func(num):
if num == 0:
return
b = 0
for x in range(1,100000):
b += x
return func(num - 1)
func(900)
p.stop()
print(p.output_text())
with open('overflow_out.html', 'w') as f:
f.write(p.output_html())
| [((3, 4, 3, 30), 'pyinstrument.Profiler', 'Profiler', (), '', False, 'from pyinstrument import Profiler\n')] |
wawang621/optee_os | scripts/gen_tee_bin.py | bf7298044beca7a4501ece95c6146b5987cecaa4 | #!/usr/bin/env python3
# SPDX-License-Identifier: BSD-2-Clause
#
# Copyright (c) 2019, Linaro Limited
#
from __future__ import print_function
from __future__ import division
import argparse
import sys
import struct
import re
import hashlib
try:
from elftools.elf.elffile import ELFFile
from elftools.elf.constants import SH_FLAGS
from elftools.elf.enums import ENUM_RELOC_TYPE_ARM
from elftools.elf.enums import ENUM_RELOC_TYPE_AARCH64
from elftools.elf.sections import SymbolTableSection
from elftools.elf.relocation import RelocationSection
except ImportError:
print("""
***
Can't find elftools module. Probably it is not installed on your system.
You can install this module with
$ apt install python3-pyelftools
if you are using Ubuntu. Or try to search for "pyelftools" or "elftools" in
your package manager if you are using some other distribution.
***
""")
raise
small_page_size = 4 * 1024
elffile_symbols = None
tee_pageable_bin = None
tee_pager_bin = None
tee_embdata_bin = None
def eprint(*args, **kwargs):
print(*args, file=sys.stderr, **kwargs)
def round_up(n, m):
if n == 0:
return 0
else:
return (((n - 1) // m) + 1) * m
def get_arch_id(elffile):
e_machine = elffile.header['e_machine']
if e_machine == 'EM_ARM':
return 0
if e_machine == 'EM_AARCH64':
return 1
eprint('Unknown e_machine "%s"' % e_machine)
sys.exit(1)
def get_name(obj):
# Symbol or section .name might be a byte array or a string, we want a
# string
try:
name = obj.name.decode()
except (UnicodeDecodeError, AttributeError):
name = obj.name
return name
def get_symbol(elffile, name):
global elffile_symbols
global lsyms_def
if elffile_symbols is None:
elffile_symbols = dict()
lsyms_def = dict()
symbol_tables = [s for s in elffile.iter_sections()
if isinstance(s, SymbolTableSection)]
for section in symbol_tables:
for symbol in section.iter_symbols():
symbol_name = get_name(symbol)
if symbol['st_info']['bind'] == 'STB_GLOBAL':
elffile_symbols[symbol_name] = symbol
elif symbol['st_info']['bind'] == 'STB_LOCAL':
if symbol_name not in elffile_symbols.keys():
elffile_symbols[symbol_name] = symbol
if symbol_name not in lsyms_def.keys():
lsyms_def[symbol_name] = 1
else:
lsyms_def[symbol_name] += 1
if name in lsyms_def.keys() and lsyms_def[name] > 1:
eprint("Multiple definitions of local symbol %s" % name)
sys.exit(1)
if name not in elffile_symbols.keys():
eprint("Cannot find symbol %s" % name)
sys.exit(1)
return elffile_symbols[name]
def get_sections(elffile, pad_to, dump_names):
last_end = 0
bin_data = bytearray()
for section in elffile.iter_sections():
section_name = get_name(section)
if (section['sh_type'] == 'SHT_NOBITS' or
not (section['sh_flags'] & SH_FLAGS.SHF_ALLOC) or
not dump_names.match(section_name)):
continue
if last_end == 0:
bin_data = section.data()
else:
if section['sh_addr'] > last_end:
bin_data += bytearray(section['sh_addr'] - last_end)
bin_data += section.data()
last_end = section['sh_addr'] + section['sh_size']
if pad_to > last_end:
bin_data += bytearray(pad_to - last_end)
last_end = pad_to
return bin_data
def get_pageable_bin(elffile):
global tee_pageable_bin
if tee_pageable_bin is None:
pad_to = 0
dump_names = re.compile(r'^\..*_(pageable|init)$')
tee_pageable_bin = get_sections(elffile, pad_to, dump_names)
return tee_pageable_bin
def get_pager_bin(elffile):
global tee_pager_bin
if tee_pager_bin is None:
pad_to = get_symbol(elffile, '__data_end')['st_value']
dump_names = re.compile(
r'^\.(text|rodata|got|data|ARM\.exidx|ARM\.extab)$')
tee_pager_bin = get_sections(elffile, pad_to, dump_names)
return tee_pager_bin
def get_reloc_bin(elffile):
if get_arch_id(elffile) == 0:
exp_rel_type = ENUM_RELOC_TYPE_ARM['R_ARM_RELATIVE']
else:
exp_rel_type = ENUM_RELOC_TYPE_AARCH64['R_AARCH64_RELATIVE']
link_address = get_symbol(elffile, '__text_start')['st_value']
addrs = []
for section in elffile.iter_sections():
if not isinstance(section, RelocationSection):
continue
for rel in section.iter_relocations():
if rel['r_info_type'] == 0:
continue
if rel['r_info_type'] != exp_rel_type:
eprint("Unexpected relocation type 0x%x" %
rel['r_info_type'])
sys.exit(1)
addrs.append(rel['r_offset'] - link_address)
addrs.sort()
data = bytearray()
for a in addrs:
data += struct.pack('<I', a)
# Relocations has been reduced to only become the relative type with
# addend at the address (r_offset) of relocation, that is, increase by
# load_offset. The addresses (r_offset) are also sorted. The format is
# then:
# uint32_t: relocation #1
# uint32_t: relocation #2
# ...
# uint32_t: relocation #n
return data
def get_hashes_bin(elffile):
pageable_bin = get_pageable_bin(elffile)
if len(pageable_bin) % small_page_size != 0:
eprint("pageable size not a multiple of 4K: "
"{}".format(paged_area_size))
sys.exit(1)
data = bytearray()
for n in range(0, len(pageable_bin), small_page_size):
page = pageable_bin[n:n + small_page_size]
data += hashlib.sha256(page).digest()
return data
def get_embdata_bin(elffile):
global tee_embdata_bin
if tee_embdata_bin is None:
hashes_bin = get_hashes_bin(elffile)
reloc_bin = get_reloc_bin(elffile)
num_entries = 2
hash_offs = 2 * 4 + num_entries * (2 * 4)
hash_pad = round_up(len(hashes_bin), 8) - len(hashes_bin)
reloc_offs = hash_offs + len(hashes_bin) + hash_pad
reloc_pad = round_up(len(reloc_bin), 8) - len(reloc_bin)
total_len = reloc_offs + len(reloc_bin) + reloc_pad
tee_embdata_bin = struct.pack('<IIIIII', total_len, num_entries,
hash_offs, len(hashes_bin),
reloc_offs, len(reloc_bin))
tee_embdata_bin += hashes_bin + bytearray(hash_pad)
tee_embdata_bin += reloc_bin + bytearray(reloc_pad)
# The embedded data region is designed to be easy to extend when
# needed, it's formatted as:
# +---------------------------------------------------------+
# | uint32_t: Length of entire area including this field |
# +---------------------------------------------------------+
# | uint32_t: Number of entries "2" |
# +---------------------------------------------------------+
# | uint32_t: Offset of hashes from beginning of table |
# +---------------------------------------------------------+
# | uint32_t: Length of hashes |
# +---------------------------------------------------------+
# | uint32_t: Offset of relocations from beginning of table |
# +---------------------------------------------------------+
# | uint32_t: Length of relocations |
# +---------------------------------------------------------+
# | Data of hashes + eventual padding |
# +---------------------------------------------------------+
# | Data of relocations + eventual padding |
# +---------------------------------------------------------+
return tee_embdata_bin
def output_pager_bin(elffile, outf):
outf.write(get_pager_bin(elffile))
def output_pageable_bin(elffile, outf):
outf.write(get_pageable_bin(elffile))
def get_init_load_addr(elffile):
init_load_addr = get_symbol(elffile, '_start')['st_value']
init_load_addr_hi = init_load_addr >> 32
init_load_addr_lo = init_load_addr & 0xffffffff
return init_load_addr_hi, init_load_addr_lo
def output_header_v1(elffile, outf):
arch_id = get_arch_id(elffile)
pager_bin = get_pager_bin(elffile)
pageable_bin = get_pageable_bin(elffile)
embdata_bin = get_embdata_bin(elffile)
init_load_addr = get_init_load_addr(elffile)
init_bin_size = get_symbol(elffile, '__init_size')['st_value']
pager_bin_size = len(pager_bin)
paged_area_size = len(pageable_bin)
init_mem_usage = (get_symbol(elffile, '__get_tee_init_end')['st_value'] -
get_symbol(elffile, '__text_start')['st_value'] +
len(embdata_bin))
init_size = (pager_bin_size + min(init_bin_size, paged_area_size) +
len(embdata_bin))
paged_size = paged_area_size - min(init_bin_size, paged_area_size)
magic = 0x4554504f # 'OPTE'
version = 1
flags = 0
outf.write(struct.pack('<IBBHIIIII', magic, version, arch_id, flags,
init_size, init_load_addr[0], init_load_addr[1],
init_mem_usage, paged_size))
outf.write(pager_bin)
outf.write(pageable_bin[:init_bin_size])
outf.write(embdata_bin)
outf.write(pageable_bin[init_bin_size:])
def output_header_v2(elffile, outf):
arch_id = get_arch_id(elffile)
init_load_addr = get_init_load_addr(elffile)
init_bin_size = get_symbol(elffile, '__init_size')['st_value']
pager_bin_size = len(get_pager_bin(elffile))
paged_area_size = len(get_pageable_bin(elffile))
embdata_bin_size = len(get_embdata_bin(elffile))
init_size = (pager_bin_size + min(init_bin_size, paged_area_size) +
embdata_bin_size)
paged_size = paged_area_size - min(init_bin_size, paged_area_size)
magic = 0x4554504f # 'OPTE'
version = 2
flags = 0
nb_images = 1 if paged_size == 0 else 2
outf.write(struct.pack('<IBBHI', magic, version, arch_id, flags,
nb_images))
outf.write(struct.pack('<IIII', init_load_addr[0], init_load_addr[1],
0, init_size))
if nb_images == 2:
outf.write(struct.pack('<IIII', 0xffffffff, 0xffffffff, 1, paged_size))
def output_pager_v2(elffile, outf):
init_bin_size = get_symbol(elffile, '__init_size')['st_value']
pager_bin = get_pager_bin(elffile)
pageable_bin = get_pageable_bin(elffile)
embdata_bin = get_embdata_bin(elffile)
outf.write(pager_bin)
outf.write(pageable_bin[:init_bin_size])
outf.write(embdata_bin)
def output_pageable_v2(elffile, outf):
init_bin_size = get_symbol(elffile, '__init_size')['st_value']
outf.write(get_pageable_bin(elffile)[init_bin_size:])
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument('--input',
required=True, type=argparse.FileType('rb'),
help='The input tee.elf')
parser.add_argument('--out_tee_bin',
required=False, type=argparse.FileType('wb'),
help='The output tee.bin')
parser.add_argument('--out_tee_pager_bin',
required=False, type=argparse.FileType('wb'),
help='The output tee_pager.bin')
parser.add_argument('--out_tee_pageable_bin',
required=False, type=argparse.FileType('wb'),
help='The output tee_pageable.bin')
parser.add_argument('--out_header_v2',
required=False, type=argparse.FileType('wb'),
help='The output tee_header_v2.bin')
parser.add_argument('--out_pager_v2',
required=False, type=argparse.FileType('wb'),
help='The output tee_pager_v2.bin')
parser.add_argument('--out_pageable_v2',
required=False, type=argparse.FileType('wb'),
help='The output tee_pageable_v2.bin')
return parser.parse_args()
def main():
args = get_args()
elffile = ELFFile(args.input)
if args.out_tee_bin:
output_header_v1(elffile, args.out_tee_bin)
if args.out_tee_pager_bin:
output_pager_bin(elffile, args.out_tee_pager_bin)
if args.out_tee_pageable_bin:
output_pageable_bin(elffile, args.out_tee_pageable_bin)
if args.out_header_v2:
output_header_v2(elffile, args.out_header_v2)
if args.out_pager_v2:
output_pager_v2(elffile, args.out_pager_v2)
if args.out_pageable_v2:
output_pageable_v2(elffile, args.out_pageable_v2)
if __name__ == "__main__":
main()
| [((62, 4, 62, 15), 'sys.exit', 'sys.exit', ({(62, 13, 62, 14): '(1)'}, {}), '(1)', False, 'import sys\n'), ((334, 13, 334, 38), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ({}, {}), '()', False, 'import argparse\n'), ((370, 14, 370, 33), 'elftools.elf.elffile.ELFFile', 'ELFFile', ({(370, 22, 370, 32): 'args.input'}, {}), '(args.input)', False, 'from elftools.elf.elffile import ELFFile\n'), ((98, 8, 98, 19), 'sys.exit', 'sys.exit', ({(98, 17, 98, 18): '(1)'}, {}), '(1)', False, 'import sys\n'), ((101, 8, 101, 19), 'sys.exit', 'sys.exit', ({(101, 17, 101, 18): '(1)'}, {}), '(1)', False, 'import sys\n'), ((137, 21, 137, 58), 're.compile', 're.compile', ({(137, 32, 137, 57): '"""^\\\\..*_(pageable|init)$"""'}, {}), "('^\\\\..*_(pageable|init)$')", False, 'import re\n'), ((146, 21, 147, 64), 're.compile', 're.compile', ({(147, 12, 147, 63): '"""^\\\\.(text|rodata|got|data|ARM\\\\.exidx|ARM\\\\.extab)$"""'}, {}), "('^\\\\.(text|rodata|got|data|ARM\\\\.exidx|ARM\\\\.extab)$')", False, 'import re\n'), ((177, 16, 177, 36), 'struct.pack', 'struct.pack', ({(177, 28, 177, 32): '"""<I"""', (177, 34, 177, 35): 'a'}, {}), "('<I', a)", False, 'import struct\n'), ((196, 8, 196, 19), 'sys.exit', 'sys.exit', ({(196, 17, 196, 18): '(1)'}, {}), '(1)', False, 'import sys\n'), ((284, 15, 286, 54), 'struct.pack', 'struct.pack', ({(284, 27, 284, 39): '"""<IBBHIIIII"""', (284, 41, 284, 46): 'magic', (284, 48, 284, 55): 'version', (284, 57, 284, 64): 'arch_id', (284, 66, 284, 71): 'flags', (285, 27, 285, 36): 'init_size', (285, 38, 285, 55): 'init_load_addr[0]', (285, 57, 285, 74): 'init_load_addr[1]', (286, 27, 286, 41): 'init_mem_usage', (286, 43, 286, 53): 'paged_size'}, {}), "('<IBBHIIIII', magic, version, arch_id, flags, init_size,\n init_load_addr[0], init_load_addr[1], init_mem_usage, paged_size)", False, 'import struct\n'), ((309, 15, 310, 37), 'struct.pack', 'struct.pack', ({(309, 27, 309, 35): '"""<IBBHI"""', (309, 37, 309, 42): 'magic', (309, 44, 309, 51): 'version', (309, 53, 309, 60): 'arch_id', (309, 62, 309, 67): 'flags', (310, 27, 310, 36): 'nb_images'}, {}), "('<IBBHI', magic, version, arch_id, flags, nb_images)", False, 'import struct\n'), ((311, 15, 312, 40), 'struct.pack', 'struct.pack', ({(311, 27, 311, 34): '"""<IIII"""', (311, 36, 311, 53): 'init_load_addr[0]', (311, 55, 311, 72): 'init_load_addr[1]', (312, 27, 312, 28): '(0)', (312, 30, 312, 39): 'init_size'}, {}), "('<IIII', init_load_addr[0], init_load_addr[1], 0, init_size)", False, 'import struct\n'), ((314, 19, 314, 78), 'struct.pack', 'struct.pack', ({(314, 31, 314, 38): '"""<IIII"""', (314, 40, 314, 50): '(4294967295)', (314, 52, 314, 62): '(4294967295)', (314, 64, 314, 65): '(1)', (314, 67, 314, 77): 'paged_size'}, {}), "('<IIII', 4294967295, 4294967295, 1, paged_size)", False, 'import struct\n'), ((337, 44, 337, 67), 'argparse.FileType', 'argparse.FileType', ({(337, 62, 337, 66): '"""rb"""'}, {}), "('rb')", False, 'import argparse\n'), ((341, 45, 341, 68), 'argparse.FileType', 'argparse.FileType', ({(341, 63, 341, 67): '"""wb"""'}, {}), "('wb')", False, 'import argparse\n'), ((345, 45, 345, 68), 'argparse.FileType', 'argparse.FileType', ({(345, 63, 345, 67): '"""wb"""'}, {}), "('wb')", False, 'import argparse\n'), ((349, 45, 349, 68), 'argparse.FileType', 'argparse.FileType', ({(349, 63, 349, 67): '"""wb"""'}, {}), "('wb')", False, 'import argparse\n'), ((353, 45, 353, 68), 'argparse.FileType', 'argparse.FileType', ({(353, 63, 353, 67): '"""wb"""'}, {}), "('wb')", False, 'import argparse\n'), ((357, 45, 357, 68), 'argparse.FileType', 'argparse.FileType', ({(357, 63, 357, 67): '"""wb"""'}, {}), "('wb')", False, 'import argparse\n'), ((361, 45, 361, 68), 'argparse.FileType', 'argparse.FileType', ({(361, 63, 361, 67): '"""wb"""'}, {}), "('wb')", False, 'import argparse\n'), ((171, 16, 171, 27), 'sys.exit', 'sys.exit', ({(171, 25, 171, 26): '(1)'}, {}), '(1)', False, 'import sys\n'), ((201, 16, 201, 36), 'hashlib.sha256', 'hashlib.sha256', ({(201, 31, 201, 35): 'page'}, {}), '(page)', False, 'import hashlib\n')] |
gamblor21/Adafruit_Learning_System_Guides | CircuitPython_JEplayer_mp3/repeat.py | f5dab4a758bc82d0bfc3c299683fe89dc093912a | # The MIT License (MIT)
#
# Copyright (c) 2020 Jeff Epler for Adafruit Industries LLC
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""
Make a key (button) repeat when held down
"""
import time
class KeyRepeat:
"""Track the state of a button and, while it is held, output a press every
'rate' seconds"""
def __init__(self, getter, rate=0.5):
self.getter = getter
self.rate_ns = round(rate * 1e9)
self.next = -1
@property
def value(self):
"""True when a button is first pressed, or once every 'rate' seconds
thereafter"""
state = self.getter()
if not state:
self.next = -1
return False
now = time.monotonic_ns()
if state and now > self.next:
self.next = now + self.rate_ns
return True
return False
| [((43, 14, 43, 33), 'time.monotonic_ns', 'time.monotonic_ns', ({}, {}), '()', False, 'import time\n')] |
Geralonx/Classes_Tutorial | Kapitel_1/_1_public_private.py | 9499db8159efce1e3c38975b66a9c649631c6727 | # --- Klassendeklaration mit Konstruktor --- #
class PC:
def __init__(self, cpu, gpu, ram):
self.cpu = cpu
self.gpu = gpu
self.__ram = ram
# --- Instanziierung einer Klasse ---#
# --- Ich bevorzuge die Initialisierung mit den Keywords --- #
pc_instanz = PC(cpu='Ryzen 7', gpu='RTX2070Super', ram='GSkill')
# --- Zugriff auf normale _public_ Attribute --- #
print(pc_instanz.cpu)
print(pc_instanz.gpu)
# --- Zugriff auf ein _privates_ Attribut --- #
# Auskommentiert, da es einen AttributeError schmeißt.
# print(pc_instanz.__ram)
# --- Zugriff auf das Instanz-Dictionary, um die Inhalte jener Instanz zu erhalten. --- #
print(pc_instanz.__dict__)
# --- Zugriff auf das eigentlich _private_ Attribut. --- #
print(pc_instanz._PC__ram)
| [] |
delaanthonio/hackerrank | algorithm/dynamic_programming/coin_change/solution.py | b1f2e1e93b3260be90eb3b8cb8e86e9a700acf27 | #!/usr/bin/env python3
"""
The Coin Change Problem
:author: Dela Anthonio
:hackerrank: https://hackerrank.com/delaanthonio
:problem: https://www.hackerrank.com/challenges/coin-change/problem
"""
from typing import List
def count_ways(amount: int, coins: List[int]) -> int:
"""Return the number of ways we can count to ``amount`` with values ``coins``."""
ways = [1] + [0] * amount
for coin in coins:
for val in range(coin, amount + 1):
ways[val] += ways[val - coin]
return ways[-1]
def main():
m, n = [int(x) for x in input().strip().split()]
coins = sorted({int(x) for x in input().strip().split()})
print(count_ways(m, coins))
if __name__ == '__main__':
main()
| [] |
javawolfpack/ClimbProject | climbproject/climbapp/admin.py | 508cf822a1eb0b78f7120a3d469ceb65e3b423f7 | from django.contrib import admin
#from .models import *
from . import models
# Register your models here.
admin.site.register(models.ClimbModel)
| [((7, 0, 7, 38), 'django.contrib.admin.site.register', 'admin.site.register', ({(7, 20, 7, 37): 'models.ClimbModel'}, {}), '(models.ClimbModel)', False, 'from django.contrib import admin\n')] |
TheMagicNacho/artemis-nozzle | setup.py | 5c02672feb7b437a4ff0ccc45394de3010bcd5ab | # coding: utf-8
from runpy import run_path
from setuptools import setup
# Get the version from the relevant file
d = run_path('skaero/version.py')
__version__ = d['__version__']
setup(
name="scikit-aero",
version=__version__,
description="Aeronautical engineering calculations in Python.",
author="Juan Luis Cano",
author_email="[email protected]",
url="https://github.com/Juanlu001/scikit-aero",
license="BSD",
keywords=[
"aero", "aeronautical", "aerospace",
"engineering", "atmosphere", "gas"
],
requires=["numpy", "scipy"],
packages=[
"skaero",
"skaero.atmosphere", "skaero.gasdynamics",
"skaero.util"
],
classifiers=[
"Development Status :: 2 - Pre-Alpha",
"Intended Audience :: Education",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: BSD License",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: Implementation :: CPython",
"Topic :: Scientific/Engineering",
"Topic :: Scientific/Engineering :: Physics"
],
long_description=open('README.rst').read()
)
| [((6, 4, 6, 33), 'runpy.run_path', 'run_path', ({(6, 13, 6, 32): '"""skaero/version.py"""'}, {}), "('skaero/version.py')", False, 'from runpy import run_path\n')] |
royqh1979/programming_with_python | appendix/AI.by.Search/backtracking.search/3-1.eight.queens.py | 7e1e8f88381151b803b6ae6ebda9809d9cc6664a | """
8皇后问题
使用栈实现回溯法
"""
def print_board(n,count):
print(f"------解.{count}------")
print(" ",end="")
for j in range(n):
print(f"{j:<2}" ,end="")
print()
for i in range(1,n+1):
print(f"{i:<2}",end="")
for j in range(1,n+1):
if queens[i] == j:
print("Q ",end="")
else:
print(" ",end="")
print()
def set_flags(i,j,n):
col_flags[j]=1
diag_flags[i+j-1]=1
diag2_flags[n+i-j]=1
def clear_flags(i,j,n):
col_flags[j]=0
diag_flags[i+j-1]=0
diag2_flags[n+i-j]=0
def can_stay(i,j,n):
if col_flags[j]==1:
return False
if diag_flags[i+j-1]==1:
return False
if diag2_flags[n+i-j]==1:
return False
return True
def try_queen(i,n):
global count
i=1
while True:
queens[i]+=1
if queens[i]>n: # backtracking
i-=1
if i<1: # all possible solutions have been tried, quit searching
break
clear_flags(i,queens[i],n)
elif can_stay(i,queens[i],n):
if i==n:
count += 1
print_board(n, count)
else:
set_flags(i, queens[i], n)
i+=1
queens[i] = 0
def queen(n):
try_queen(1,n)
n=int(input("请输入n:"))
queens = [0]*(n+1)
# 列标志
col_flags=[0]*(n+1)
# 主对角线标志
diag_flags = [0]*(2*n)
# 副对角线标志
diag2_flags = [0] * (2*n)
count = 0
queen(n)
print(f"共有{count}种解法\n")
| [] |
amzn/multimodal-affinities | multimodal_affinities/evaluation/analysis/plots_producer.py | 23045eb6a9387ce0c9c6f5a15227cf1cc4282626 | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: CC-BY-4.0
import os
import cv2
from collections import namedtuple
import imageio
from PIL import Image
from random import randrange
import numpy as np
from sklearn.decomposition import PCA
from scipy.spatial.distance import pdist, squareform
import torch
import matplotlib
matplotlib.use('Agg') # Required for gif animations
import matplotlib as mpl
import matplotlib.pyplot as plt
import matplotlib.image as image
import matplotlib.patches as patches
from multimodal_affinities.visualization.vis_handler import VisHandler
from multimodal_affinities.visualization.image_utils import resize_image
from multimodal_affinities.visualization.colors_util import rgb_hex_to_tuple
class PlotsProducer:
def __init__(self, document, output_path):
# Load background image
self.image_path = document.image_path
self.img = plt.imread(self.image_path)
self.img_opencv = cv2.imread(self.image_path)
dpi = 120
mpl.rcParams['figure.dpi'] = dpi
height = self.img.shape[0]
width = self.img.shape[1]
self.figsize = width / float(dpi), height / float(dpi) # Fig size in inches
self.document = document
self.output_path = output_path
if not os.path.exists(output_path):
os.makedirs(output_path)
def plot_word_boxes_on_image(self):
set_of_words = [[word] for word in self.document.get_words()] # list of singleton word lists
fig, ax = plt.subplots(1, figsize=self.figsize)
monochrome_colors_list = ['#5a5d8f' for _ in self.document.get_words()]
self._draw_entity_bounding_boxes(fig=fig, ax=ax, bg_img=self.img,
title='',
entity_sets=set_of_words,
colors_list=monochrome_colors_list)
fig.savefig(os.path.join(self.output_path, self.document.basename + '_word_boxes.png'))
plt.close(fig)
def save_phrase_detection_results(self):
set_of_phrases = [[phrase] for phrase in self.document.get_phrases()] # list of singleton phrase lists
fig, ax = plt.subplots(1, figsize=self.figsize)
self._draw_entity_bounding_boxes(fig=fig, ax=ax, bg_img=self.img,
title='Phrase Detection', entity_sets=set_of_phrases)
fig.savefig(os.path.join(self.output_path, self.document.basename + '_phrase_detection.png'))
plt.close(fig)
def save_clustering_results(self, with_title=True, colors_list=None):
set_of_clusters = [cluster.words for cluster in self.document.get_clusters()] # list of list of words (clusters)
self._save_set_of_clusters(set_of_clusters, with_title, colors_list)
def save_clustering_labels(self, clustering_labels, colors_list=None):
cluster_ids = np.unique(np.array(clustering_labels))
cluster_id_to_cluster_idx = {cluster_id: idx for idx, cluster_id in enumerate(cluster_ids)}
# Converts from list of labels to list of list of words (clusters)
set_of_clusters = [[] for _ in range(len(cluster_ids))]
for word_idx, word in enumerate(self.document.get_words()):
cluster_id = clustering_labels[word_idx]
if cluster_id == -1: # Ignore non-clustered words
continue
cluster_idx = cluster_id_to_cluster_idx[cluster_id]
set_of_clusters[cluster_idx].append(word)
self._save_set_of_clusters(set_of_clusters, colors_list)
def _save_set_of_clusters(self, set_of_clusters, with_title=True, colors_list=None):
"""
:param document:
:param set_of_clusters: list of list of words (clusters)
:return:
"""
output_img = self._draw_entity_bounding_boxes_opencv(bg_img=self.img_opencv,
entity_sets=set_of_clusters,
colors_list=colors_list)
cv2.imwrite(os.path.join(self.output_path, self.document.basename + '_clustering.png'), output_img)
@staticmethod
def _draw_entity_bounding_boxes_opencv(bg_img, entity_sets, colors_list=None):
img_height = bg_img.shape[0]
img_width = bg_img.shape[1]
if colors_list is None:
colors_list = VisHandler.generate_colors_list(amount=len(entity_sets))
face_colors = colors_list
edge_colors = VisHandler.generate_darker_palette(colors_list)
output_img = bg_img.copy()
alpha = 0.8
for set_idx, entities_set in enumerate(entity_sets):
face_color = face_colors[set_idx]
edge_color = edge_colors[set_idx]
for entity in entities_set:
x = entity.geometry.left * img_width
y = entity.geometry.top * img_height
width = entity.geometry.width * img_width
height = entity.geometry.height * img_height
# writing the text onto the image and returning it
rgb_color = rgb_hex_to_tuple(face_color)
cv2.rectangle(output_img, (int(x), int(y)), (int(x + width), int(y + height)),
(rgb_color[2], rgb_color[1], rgb_color[0]), cv2.FILLED)
output_img = cv2.addWeighted(output_img, alpha, bg_img, 1 - alpha, 0)
return output_img
@staticmethod
def _draw_entity_bounding_boxes(fig, ax, bg_img, title, entity_sets, colors_list=None):
ax.set_title(title)
plt.tick_params(axis='both', which='both',
bottom='off', top='off', labelbottom='off', right='off', left='off',
labelleft='off')
plt.imshow(bg_img)
img_height = bg_img.shape[0]
img_width = bg_img.shape[1]
if colors_list is None:
colors_list = VisHandler.generate_colors_list(amount=len(entity_sets))
face_colors = colors_list
edge_colors = VisHandler.generate_darker_palette(colors_list)
for set_idx, entities_set in enumerate(entity_sets):
face_color = face_colors[set_idx]
edge_color = edge_colors[set_idx]
for entity in entities_set:
x = entity.geometry.left * img_width
y = entity.geometry.top * img_height
width = entity.geometry.width * img_width
height = entity.geometry.height * img_height
rect = patches.Rectangle((x, y), width, height,
linewidth=2,
edgecolor=edge_color,
facecolor=face_color,
alpha=0.4)
ax.add_patch(rect)
@staticmethod
def plot_pca_embedding_space_for_clusters(document, output_path,
embedding_property='embedding',
title=''):
"""
Plot 2d PCA visualization of the embedding space according to cluster colors.
:param document: Document with clustering results
:param embedding_property: Embedding property of words - normally 'embedding' or 'unprojected_embedding'
:return:
"""
if not os.path.exists(output_path):
os.makedirs(output_path)
words = document.get_words()
clusters = document.get_clusters()
if len(words) == 0 or getattr(words[0], embedding_property) is None:
return
if embedding_property == 'unprojected_embedding':
embeddings = []
for word in words:
unprojected_embedding = torch.cat(word.unprojected_embedding['embeddings'], dim=1)
unprojected_embedding = unprojected_embedding.detach().cpu().numpy()
embeddings.append(unprojected_embedding)
else:
embeddings = [getattr(word, embedding_property).detach().cpu().numpy() for word in words]
colors_palette = VisHandler.generate_colors_list(amount=len(clusters))
word_to_color = {word: colors_palette[cluster_idx]
for cluster_idx, cluster in enumerate(clusters)
for word in cluster.words}
colors = [word_to_color[word] for word in words]
embeddings_array = np.array(embeddings).squeeze()
num_pca_comp = 2
embeddings_2d = PCA(n_components=num_pca_comp).fit_transform(embeddings_array)
x_list = [embeddings_2d[i, 0] for i in range(embeddings_2d.shape[0])]
y_list = [embeddings_2d[i, 1] for i in range(embeddings_2d.shape[0])]
fig, ax = plt.subplots(1)
plot_title = embedding_property
if plot_title != '':
plot_title += ': ' + title
plt.title(plot_title)
plt.scatter(x_list, y_list, c=colors, s=1, alpha=0.8)
fig.tight_layout()
fig.savefig(os.path.join(output_path, document.basename + '_' + embedding_property + '_pca.png'))
plt.close(fig)
@staticmethod
def _find_k_furthest_words_per_cluster(document, embeddings_2d, k=3):
""" Greedy approximation algorithm for finding k furthest neighbour words per cluster.
k is expected to be relatively small (< 100)
"""
words = document.get_words()
word_to_embedding_2d_idx = {word: idx for idx, word in enumerate(words)}
clusters = document.get_clusters()
solution_per_cluster = {}
ClusterSolution = namedtuple('ClusterSolution', ['word_indices', 'words'])
for cluster in clusters:
# Generate cluster pairwise distances matrix
all_cluster_embeddings_indices = [word_to_embedding_2d_idx[word] for word in cluster.words]
all_cluster_embeddings = np.take(embeddings_2d, all_cluster_embeddings_indices, axis=0)
pairwise_distances = pdist(all_cluster_embeddings, metric='euclidean')
distances_matrix = squareform(pairwise_distances)
# Total distance from selected set so far
distances_accumulator = np.zeros(len(cluster.words))
# Sample first point
random_index = randrange(len(cluster.words))
# Indices of selected points
selected_points = [random_index]
# How many points we need to add
points_to_calc_count = min(k - 1, len(words) - 1)
for _ in range(points_to_calc_count):
last_point_selected = selected_points[-1]
# Update accumulator with distance collected from last point
distances_accumulator += distances_matrix[last_point_selected]
# Eliminate last point selected from distance matrix & accumulator
distances_matrix[:, random_index] = 0
distances_matrix[random_index, :] = 0
furthrest_point_from_set = np.argmax(distances_accumulator, axis=0)
selected_points.append(furthrest_point_from_set)
selected_words = [cluster.words[point] for point in selected_points]
selected_word_indices = [word_to_embedding_2d_idx[word] for word in selected_words]
solution_per_cluster[cluster] = ClusterSolution(word_indices=selected_word_indices, words=selected_words)
return solution_per_cluster
@staticmethod
def _extract_crops_per_cluster_solution(document, solution_per_cluster):
"""
Extracts crops for each selected word in k-furthest neighbours solution
:param document:
:param solution_per_cluster: Solution of k-furthest neighbours
:return:
"""
word_indices_to_crops = {}
for cluster, cluster_solution in solution_per_cluster.items():
for word_index, word in zip(cluster_solution.word_indices, cluster_solution.words):
bbox = word.get_bbox() # left, top, width, height
y_min = int(round(bbox[1] * document.height))
y_max = int(round((bbox[1] + bbox[3]) * document.height))
x_min = int(round(bbox[0] * document.width))
x_max = int(round((bbox[0] + bbox[2]) * document.width))
image_of_crop = document.image[max(0, y_min):min(y_max, document.height),
max(0, x_min):min(x_max, document.width), :]
pil_image = Image.fromarray(image_of_crop[...,::-1]) # BGR to RGB
pil_image = pil_image.convert('RGB')
word_indices_to_crops[word_index] = pil_image
return word_indices_to_crops
@staticmethod
def _space_out_crops(indices_to_crops, words, x_list, y_list, dist_from_pt=0.01, height=0.02):
"""
Calculates the positions and dimensions of crop images on the embedding space plot.
Makes sure crops don't overlay each other.
This method assumes a small number of crops (< 1000) and performs a naive linear comparison for each crop.
:param indices_to_crops: dict of word index (by order in doc) to PIL crop
:param words: List of words
:param x_list: List of corresponding pt x positions
:param y_list: List of corresponding pt y positions
:param dist_from_pt: How far in (x-y) coords the crop should be placed from the plot
:param height: Height of the crop, in figure axes dimensions (note: for normalized pca space: -1 to 1)
:return: indices_to_extents: dict of word index to extens describing position and dimensions of each crop.
Crops are shifted so they don't cover each other,
"""
indices_to_extents = {}
MatplotExtent = namedtuple('matplot_extent', ['left', 'right', 'bottom', 'top'])
is_extent_x_intersect = lambda e1, e2: not (e1.right < e2.left or e1.left > e2.right)
is_extent_y_intersect = lambda e1, e2: not (e1.top > e2.bottom or e1.bottom < e2.top)
is_extent_intersect = lambda e1, e2: is_extent_x_intersect(e1, e2) and is_extent_y_intersect(e1, e2)
min_x, max_x = min(x_list), max(x_list)
min_y, max_y = min(y_list), max(y_list)
height = (max_y - min_y) * height
dist_from_pt = min(max_y - min_y, max_x - min_x) * dist_from_pt
for point_index, crop in indices_to_crops.items():
word_aspect_ratio = words[point_index].geometry.width / words[point_index].geometry.height
axis_ratio = (max_x-min_x) / (max_y-min_y) / 2
width = height * word_aspect_ratio * axis_ratio
left, right = x_list[point_index] + dist_from_pt, x_list[point_index] + dist_from_pt + width
bottom, top = y_list[point_index] + dist_from_pt + height, y_list[point_index] + dist_from_pt
overlap = True
while overlap:
overlap = False
extent = MatplotExtent(left, right, bottom, top)
for other_crop_extent in indices_to_extents.values():
other_left, other_right, other_bottom, other_top = other_crop_extent
spaceout_margin = dist_from_pt / 2
if is_extent_intersect(extent, other_crop_extent):
overlap = True
# shift below
if other_bottom <= top <= other_top:
top = other_bottom + spaceout_margin
bottom = top + height
else: # shift above
bottom = other_top - spaceout_margin
top = bottom - height
continue
indices_to_extents[point_index] = extent
return indices_to_extents
def plot_clusters_and_embedding_space_with_crops(self, document, output_path, crops_per_cluster=3,
embedding_properties=['embedding', 'unprojected_embedding'],
unprojected_caption=None):
"""
Plot 2d PCA visualization of the embedding space according to cluster colors.
:param document: Document with clustering results
:param embedding_property: Embedding property of words - normally 'embedding' or 'unprojected_embedding'
:return:
"""
if not os.path.exists(output_path):
os.makedirs(output_path)
words = document.get_words()
clusters = document.get_clusters()
if len(words) == 0 or \
all([getattr(words[0], embedding_property) is None for embedding_property in embedding_properties]):
return
colors_palette = VisHandler.generate_colors_list(amount=len(clusters))
word_to_color = {word: colors_palette[cluster_idx]
for cluster_idx, cluster in enumerate(clusters)
for word in cluster.words}
colors = [word_to_color[word] for word in words]
# Initially empty, the first embedding property we process will set those for all figures
selected_word_crops_per_cluster = None
indices_to_crops = None
for embedding_property in embedding_properties:
if embedding_property == 'unprojected_embedding': # Can't handle tuples, concat them
embeddings = []
for word in words:
unprojected_embedding = torch.cat(word.unprojected_embedding['embeddings'], dim=1)
unprojected_embedding = unprojected_embedding.detach().cpu().numpy()
embeddings.append(unprojected_embedding)
else:
embeddings = [getattr(word, embedding_property).detach().cpu().numpy() for word in words]
embeddings_array = np.array(embeddings).squeeze()
num_pca_comp = 2
embeddings_2d = PCA(n_components=num_pca_comp).fit_transform(embeddings_array)
x_list = [embeddings_2d[i, 0] for i in range(embeddings_2d.shape[0])]
y_list = [embeddings_2d[i, 1] for i in range(embeddings_2d.shape[0])]
fig, ax = plt.subplots(1)
if crops_per_cluster > 0:
if selected_word_crops_per_cluster is None and indices_to_crops is None: # Calculate per first attribute
selected_word_crops_per_cluster = PlotsProducer._find_k_furthest_words_per_cluster(document, embeddings_2d, k=crops_per_cluster)
indices_to_crops = PlotsProducer._extract_crops_per_cluster_solution(document, selected_word_crops_per_cluster)
indices_to_extents = PlotsProducer._space_out_crops(indices_to_crops, words,
x_list, y_list, dist_from_pt=0.02, height=0.04)
# Plot crop images
for point_index, crop in indices_to_crops.items():
extent = indices_to_extents[point_index]
rect = patches.Rectangle((extent.left, extent.top), extent.right-extent.left, extent.bottom-extent.top,
linewidth=0.5,
edgecolor="black",
facecolor="none",
zorder=5)
ax.imshow(crop, aspect='auto', alpha=0.65, extent=extent, zorder=4)
ax.add_patch(rect)
# Plot points
if embedding_property == 'unprojected_embedding':
plot_title = 'Initial unprojected embeddings, pre training (PCA)'
else:
if unprojected_caption is None:
plot_title = 'Projected embeddings, post training (PCA)'
else:
plot_title = unprojected_caption
plt.title(plot_title)
plt.scatter(x_list, y_list, c=colors, s=18, alpha=1.0, edgecolors='black', linewidth=1.0, zorder=3)
plt.tick_params(axis='both', which='both',
bottom='off', top='off', labelbottom='off', right='off', left='off',
labelleft='off')
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
fig.tight_layout()
fig.savefig(os.path.join(output_path, document.basename + '_' + embedding_property + '_pca.png'))
plt.close(fig)
# Finally plot clusters on original image
self.save_clustering_results(with_title=False, colors_list=colors_palette)
return colors_palette
@staticmethod
def animate_pca_embedding_space_for_clusters(document, output_path, embeddings_history, colors_palette=None):
"""
Plot 2d PCA visualization of the embedding space according to cluster colors.
:param document: Document with clustering results
:param embedding_property: Embedding property of words - normally 'embedding' or 'unprojected_embedding'
:return:
"""
if not os.path.exists(output_path):
os.makedirs(output_path)
words = document.get_words()
clusters = document.get_clusters()
if len(words) == 0 or embeddings_history is None or len(embeddings_history) == 0:
return
if colors_palette is None:
colors_palette = VisHandler.generate_colors_list(amount=len(clusters))
word_to_color = {word: colors_palette[cluster_idx]
for cluster_idx, cluster in enumerate(clusters)
for word in cluster.words}
colors = [word_to_color[word] for word in words]
scatter_data = []
for state_idx, embeddings_state in enumerate(embeddings_history):
epoch = state_idx + 1
normalized_embeddings_dict = embeddings_state['normalized']
unnormalized_embeddings_dict = embeddings_state['unnormalized']
if len(normalized_embeddings_dict) > 0:
normalized_embeddings = [normalized_embeddings_dict[word].detach().cpu().numpy() for word in words]
chosen_embedding = normalized_embeddings
elif len(unnormalized_embeddings_dict) > 0:
unnormalized_embeddings = [unnormalized_embeddings_dict[word].detach().cpu().numpy() for word in words]
chosen_embedding = unnormalized_embeddings
else:
return
embeddings_array = np.array(chosen_embedding).squeeze()
num_pca_comp = 2
embeddings_2d = PCA(n_components=num_pca_comp).fit_transform(embeddings_array)
x_list = [embeddings_2d[i, 0] for i in range(embeddings_2d.shape[0])]
y_list = [embeddings_2d[i, 1] for i in range(embeddings_2d.shape[0])]
push_pull_ratio = embeddings_state['push_pull_ratio']
scatter_data.append((epoch, x_list, y_list, push_pull_ratio))
min_x = min(min(scatter_data, key=lambda entry: min(entry[1]))[1])
max_x = max(max(scatter_data, key=lambda entry: max(entry[1]))[1])
min_y = min(min(scatter_data, key=lambda entry: min(entry[2]))[2])
max_y = max(max(scatter_data, key=lambda entry: max(entry[2]))[2])
padding_factor = 0.1
min_x -= (max_x - min_x) * padding_factor
max_x += (max_x - min_x) * padding_factor
min_y -= (max_y - min_y) * padding_factor
max_y += (max_y - min_y) * padding_factor
frames = []
for epoch, x_list, y_list, push_pull_ratio in scatter_data:
fig, ax = plt.subplots(1)
ax.set_xlim(min_x, max_x)
ax.set_ylim(min_y, max_y)
plot_title = 'Projected embeddings at epoch #' + str(epoch) + ' (PCA)'
plt.title(plot_title)
plt.scatter(x_list, y_list, c=colors, s=18, alpha=1.0, edgecolors='black', linewidth=1.0, zorder=3)
plt.tick_params(axis='both', which='both',
bottom='off', top='off', labelbottom='off', right='off', left='off',
labelleft='off')
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
# Used to return the plot as an image rray
fig.tight_layout()
fig.canvas.draw() # draw the canvas, cache the renderer
output_frame = np.frombuffer(fig.canvas.tostring_rgb(), dtype='uint8')
output_frame = output_frame.reshape(fig.canvas.get_width_height()[::-1] + (3,))
frames.append(output_frame)
imageio.mimsave(os.path.join(output_path, document.basename + '_embeddings_history.gif'), frames, fps=2)
| [((15, 0, 15, 21), 'matplotlib.use', 'matplotlib.use', ({(15, 15, 15, 20): '"""Agg"""'}, {}), "('Agg')", False, 'import matplotlib\n'), ((29, 19, 29, 46), 'matplotlib.pyplot.imread', 'plt.imread', ({(29, 30, 29, 45): 'self.image_path'}, {}), '(self.image_path)', True, 'import matplotlib.pyplot as plt\n'), ((30, 26, 30, 53), 'cv2.imread', 'cv2.imread', ({(30, 37, 30, 52): 'self.image_path'}, {}), '(self.image_path)', False, 'import cv2\n'), ((45, 18, 45, 55), 'matplotlib.pyplot.subplots', 'plt.subplots', (), '', True, 'import matplotlib.pyplot as plt\n'), ((52, 8, 52, 22), 'matplotlib.pyplot.close', 'plt.close', ({(52, 18, 52, 21): 'fig'}, {}), '(fig)', True, 'import matplotlib.pyplot as plt\n'), ((56, 18, 56, 55), 'matplotlib.pyplot.subplots', 'plt.subplots', (), '', True, 'import matplotlib.pyplot as plt\n'), ((60, 8, 60, 22), 'matplotlib.pyplot.close', 'plt.close', ({(60, 18, 60, 21): 'fig'}, {}), '(fig)', True, 'import matplotlib.pyplot as plt\n'), ((102, 22, 102, 69), 'multimodal_affinities.visualization.vis_handler.VisHandler.generate_darker_palette', 'VisHandler.generate_darker_palette', ({(102, 57, 102, 68): 'colors_list'}, {}), '(colors_list)', False, 'from multimodal_affinities.visualization.vis_handler import VisHandler\n'), ((118, 21, 118, 77), 'cv2.addWeighted', 'cv2.addWeighted', ({(118, 37, 118, 47): 'output_img', (118, 49, 118, 54): 'alpha', (118, 56, 118, 62): 'bg_img', (118, 64, 118, 73): '1 - alpha', (118, 75, 118, 76): '0'}, {}), '(output_img, alpha, bg_img, 1 - alpha, 0)', False, 'import cv2\n'), ((126, 8, 128, 40), 'matplotlib.pyplot.tick_params', 'plt.tick_params', (), '', True, 'import matplotlib.pyplot as plt\n'), ((130, 8, 130, 26), 'matplotlib.pyplot.imshow', 'plt.imshow', ({(130, 19, 130, 25): 'bg_img'}, {}), '(bg_img)', True, 'import matplotlib.pyplot as plt\n'), ((138, 22, 138, 69), 'multimodal_affinities.visualization.vis_handler.VisHandler.generate_darker_palette', 'VisHandler.generate_darker_palette', ({(138, 57, 138, 68): 'colors_list'}, {}), '(colors_list)', False, 'from multimodal_affinities.visualization.vis_handler import VisHandler\n'), ((192, 18, 192, 33), 'matplotlib.pyplot.subplots', 'plt.subplots', ({(192, 31, 192, 32): '1'}, {}), '(1)', True, 'import matplotlib.pyplot as plt\n'), ((196, 8, 196, 29), 'matplotlib.pyplot.title', 'plt.title', ({(196, 18, 196, 28): 'plot_title'}, {}), '(plot_title)', True, 'import matplotlib.pyplot as plt\n'), ((197, 8, 197, 61), 'matplotlib.pyplot.scatter', 'plt.scatter', (), '', True, 'import matplotlib.pyplot as plt\n'), ((201, 8, 201, 22), 'matplotlib.pyplot.close', 'plt.close', ({(201, 18, 201, 21): 'fig'}, {}), '(fig)', True, 'import matplotlib.pyplot as plt\n'), ((212, 26, 212, 82), 'collections.namedtuple', 'namedtuple', ({(212, 37, 212, 54): '"""ClusterSolution"""', (212, 56, 212, 81): "['word_indices', 'words']"}, {}), "('ClusterSolution', ['word_indices', 'words'])", False, 'from collections import namedtuple\n'), ((291, 24, 291, 88), 'collections.namedtuple', 'namedtuple', ({(291, 35, 291, 51): '"""matplot_extent"""', (291, 53, 291, 87): "['left', 'right', 'bottom', 'top']"}, {}), "('matplot_extent', ['left', 'right', 'bottom', 'top'])", False, 'from collections import namedtuple\n'), ((40, 15, 40, 42), 'os.path.exists', 'os.path.exists', ({(40, 30, 40, 41): 'output_path'}, {}), '(output_path)', False, 'import os\n'), ((41, 12, 41, 36), 'os.makedirs', 'os.makedirs', ({(41, 24, 41, 35): 'output_path'}, {}), '(output_path)', False, 'import os\n'), ((51, 20, 51, 94), 'os.path.join', 'os.path.join', ({(51, 33, 51, 49): 'self.output_path', (51, 51, 51, 93): "(self.document.basename + '_word_boxes.png')"}, {}), "(self.output_path, self.document.basename + '_word_boxes.png')", False, 'import os\n'), ((59, 20, 59, 100), 'os.path.join', 'os.path.join', ({(59, 33, 59, 49): 'self.output_path', (59, 51, 59, 99): "(self.document.basename + '_phrase_detection.png')"}, {}), "(self.output_path, self.document.basename + '_phrase_detection.png'\n )", False, 'import os\n'), ((67, 32, 67, 59), 'numpy.array', 'np.array', ({(67, 41, 67, 58): 'clustering_labels'}, {}), '(clustering_labels)', True, 'import numpy as np\n'), ((90, 20, 90, 94), 'os.path.join', 'os.path.join', ({(90, 33, 90, 49): 'self.output_path', (90, 51, 90, 93): "(self.document.basename + '_clustering.png')"}, {}), "(self.output_path, self.document.basename + '_clustering.png')", False, 'import os\n'), ((165, 15, 165, 42), 'os.path.exists', 'os.path.exists', ({(165, 30, 165, 41): 'output_path'}, {}), '(output_path)', False, 'import os\n'), ((166, 12, 166, 36), 'os.makedirs', 'os.makedirs', ({(166, 24, 166, 35): 'output_path'}, {}), '(output_path)', False, 'import os\n'), ((200, 20, 200, 104), 'os.path.join', 'os.path.join', ({(200, 33, 200, 44): 'output_path', (200, 46, 200, 103): "(document.basename + '_' + embedding_property + '_pca.png')"}, {}), "(output_path, document.basename + '_' + embedding_property +\n '_pca.png')", False, 'import os\n'), ((216, 37, 216, 99), 'numpy.take', 'np.take', (), '', True, 'import numpy as np\n'), ((217, 33, 217, 82), 'scipy.spatial.distance.pdist', 'pdist', (), '', False, 'from scipy.spatial.distance import pdist, squareform\n'), ((218, 31, 218, 61), 'scipy.spatial.distance.squareform', 'squareform', ({(218, 42, 218, 60): 'pairwise_distances'}, {}), '(pairwise_distances)', False, 'from scipy.spatial.distance import pdist, squareform\n'), ((338, 15, 338, 42), 'os.path.exists', 'os.path.exists', ({(338, 30, 338, 41): 'output_path'}, {}), '(output_path)', False, 'import os\n'), ((339, 12, 339, 36), 'os.makedirs', 'os.makedirs', ({(339, 24, 339, 35): 'output_path'}, {}), '(output_path)', False, 'import os\n'), ((373, 22, 373, 37), 'matplotlib.pyplot.subplots', 'plt.subplots', ({(373, 35, 373, 36): '1'}, {}), '(1)', True, 'import matplotlib.pyplot as plt\n'), ((400, 12, 400, 33), 'matplotlib.pyplot.title', 'plt.title', ({(400, 22, 400, 32): 'plot_title'}, {}), '(plot_title)', True, 'import matplotlib.pyplot as plt\n'), ((401, 12, 401, 111), 'matplotlib.pyplot.scatter', 'plt.scatter', (), '', True, 'import matplotlib.pyplot as plt\n'), ((402, 12, 404, 44), 'matplotlib.pyplot.tick_params', 'plt.tick_params', (), '', True, 'import matplotlib.pyplot as plt\n'), ((410, 12, 410, 26), 'matplotlib.pyplot.close', 'plt.close', ({(410, 22, 410, 25): 'fig'}, {}), '(fig)', True, 'import matplotlib.pyplot as plt\n'), ((425, 15, 425, 42), 'os.path.exists', 'os.path.exists', ({(425, 30, 425, 41): 'output_path'}, {}), '(output_path)', False, 'import os\n'), ((426, 12, 426, 36), 'os.makedirs', 'os.makedirs', ({(426, 24, 426, 35): 'output_path'}, {}), '(output_path)', False, 'import os\n'), ((474, 22, 474, 37), 'matplotlib.pyplot.subplots', 'plt.subplots', ({(474, 35, 474, 36): '1'}, {}), '(1)', True, 'import matplotlib.pyplot as plt\n'), ((478, 12, 478, 33), 'matplotlib.pyplot.title', 'plt.title', ({(478, 22, 478, 32): 'plot_title'}, {}), '(plot_title)', True, 'import matplotlib.pyplot as plt\n'), ((480, 12, 480, 111), 'matplotlib.pyplot.scatter', 'plt.scatter', (), '', True, 'import matplotlib.pyplot as plt\n'), ((481, 12, 483, 44), 'matplotlib.pyplot.tick_params', 'plt.tick_params', (), '', True, 'import matplotlib.pyplot as plt\n'), ((494, 24, 494, 96), 'os.path.join', 'os.path.join', ({(494, 37, 494, 48): 'output_path', (494, 50, 494, 95): "(document.basename + '_embeddings_history.gif')"}, {}), "(output_path, document.basename + '_embeddings_history.gif')", False, 'import os\n'), ((114, 28, 114, 56), 'multimodal_affinities.visualization.colors_util.rgb_hex_to_tuple', 'rgb_hex_to_tuple', ({(114, 45, 114, 55): 'face_color'}, {}), '(face_color)', False, 'from multimodal_affinities.visualization.colors_util import rgb_hex_to_tuple\n'), ((148, 23, 152, 51), 'matplotlib.patches.Rectangle', 'patches.Rectangle', (), '', True, 'import matplotlib.patches as patches\n'), ((175, 40, 175, 98), 'torch.cat', 'torch.cat', (), '', False, 'import torch\n'), ((186, 27, 186, 47), 'numpy.array', 'np.array', ({(186, 36, 186, 46): 'embeddings'}, {}), '(embeddings)', True, 'import numpy as np\n'), ((188, 24, 188, 54), 'sklearn.decomposition.PCA', 'PCA', (), '', False, 'from sklearn.decomposition import PCA\n'), ((242, 43, 242, 83), 'numpy.argmax', 'np.argmax', (), '', True, 'import numpy as np\n'), ((270, 28, 270, 68), 'PIL.Image.fromarray', 'Image.fromarray', ({(270, 44, 270, 67): 'image_of_crop[(...), ::-1]'}, {}), '(image_of_crop[(...), ::-1])', False, 'from PIL import Image\n'), ((409, 24, 409, 108), 'os.path.join', 'os.path.join', ({(409, 37, 409, 48): 'output_path', (409, 50, 409, 107): "(document.basename + '_' + embedding_property + '_pca.png')"}, {}), "(output_path, document.basename + '_' + embedding_property +\n '_pca.png')", False, 'import os\n'), ((361, 44, 361, 102), 'torch.cat', 'torch.cat', (), '', False, 'import torch\n'), ((367, 31, 367, 51), 'numpy.array', 'np.array', ({(367, 40, 367, 50): 'embeddings'}, {}), '(embeddings)', True, 'import numpy as np\n'), ((369, 28, 369, 58), 'sklearn.decomposition.PCA', 'PCA', (), '', False, 'from sklearn.decomposition import PCA\n'), ((384, 27, 388, 54), 'matplotlib.patches.Rectangle', 'patches.Rectangle', (), '', True, 'import matplotlib.patches as patches\n'), ((454, 31, 454, 57), 'numpy.array', 'np.array', ({(454, 40, 454, 56): 'chosen_embedding'}, {}), '(chosen_embedding)', True, 'import numpy as np\n'), ((456, 28, 456, 58), 'sklearn.decomposition.PCA', 'PCA', (), '', False, 'from sklearn.decomposition import PCA\n')] |
Jgorsick/Advocacy_Angular | openstates/openstates-master/openstates/de/legislators.py | 8906af3ba729b2303880f319d52bce0d6595764c | import re
import lxml.html
from openstates.utils import LXMLMixin
from billy.scrape.legislators import LegislatorScraper, Legislator
class DELegislatorScraper(LegislatorScraper,LXMLMixin):
jurisdiction = 'de'
def scrape(self, chamber, term):
url = {
'upper': 'http://legis.delaware.gov/legislature.nsf/sen?openview',
'lower': 'http://legis.delaware.gov/Legislature.nsf/Reps?openview',
}[chamber]
doc = self.lxmlize(url)
if chamber == "upper":
#for the senate, it's the same table
#but the html is hard-coded in js.
table_js = doc.xpath('.//script')[-1].text_content()
table = None
for line in table_js.split("\n"):
if line.strip().startswith("var") and "sen=" in line:
table = line.replace("var","")
table = table.replace('sen="<','<')
table = table.replace('>";','>')
break
assert table is not None, "Senate table could not be found"
table = lxml.html.fromstring(table)
table.make_links_absolute(url)
trs = table.xpath('//tr')
else:
#same table for the house, but kindly in actual html
trs = doc.xpath('//tr')
base_url = "http://legis.delaware.gov"
for tr in trs:
name_and_url = tr.xpath('.//a')[0]
bio_url = name_and_url.attrib["href"]
bio_url = bio_url.replace("JavaScript:window.top.location.href=","")
bio_url = bio_url.replace('"','')
name = name_and_url.text_content()
if name.strip() == "." or name.strip() == "":
continue
if name.strip().lower().startswith("vacant"):
continue
re_spaces=re.compile(r'\s{1,5}')
name = ' '.join(re_spaces.split(name))
district = tr.xpath('.//td')[2].text_content()
district = district.replace("District:","").strip()
leg = self.scrape_bio(term, chamber, district, name, bio_url)
leg.add_source(bio_url, page="legislator detail page")
leg.add_source(url, page="legislator list page")
self.save_legislator(leg)
def scrape_bio(self, term, chamber, district, name, url):
# this opens the committee section without having to do another request
url += '&TableRow=1.5.5'
frame_doc = self.lxmlize(url)
actual_url = frame_doc.xpath("//frame[@name='right']/@src")[0]
doc = self.lxmlize(actual_url)
# party is in one of these
party = doc.xpath('//div[@id="page_header"]')[0].text.strip()[-3:]
if '(D)' in party:
party = 'Democratic'
elif '(R)' in party:
party = 'Republican'
else:
raise AssertionError("No party found for {name}".format(name=name))
leg = Legislator(term, chamber, district, name, party=party)
photo_url = doc.xpath('//img[contains(@src, "jpg")]/@src')
if photo_url:
leg['photo_url'] = photo_url[0]
contact_info = self.scrape_contact_info(doc)
leg.update(contact_info)
return leg
def scrape_contact_info(self, doc):
# Email
email = doc.xpath(".//a[contains(@href,'mailto')]")
email = email[0].text_content().strip()
leg_email = None
dist_email = None
try:
emails = email.split(";")
except AttributeError:
pass
else:
for e in emails:
e = e.strip()
if e:
if "state.de.us" in e:
leg_email = e
else:
dist_email = e
# Offices
leg_office = dict(name="Capitol Office", type="capitol",
phone=None, fax=None, email=leg_email, address=None)
dist_office = dict(name="Outside Office", type="capitol",
phone=None,fax=None, email=dist_email, address=None)
#this is enormously painful, DE.
office_list = doc.xpath("//tr")
for office in office_list:
title_td = 0
#in some trs the photo is the first td
if len(office.xpath("./td/img")) > 0:
title_td = 1
try:
title_text = office.xpath("./td")[title_td].text_content().lower()
content = office.xpath("./td")[title_td+1].text_content()
except IndexError:
continue
leg_office = self.add_contact("legislative",
title_text,content,leg_office)
dist_office = self.add_contact("outside",
title_text,content,dist_office)
offices = [o for o in [leg_office,dist_office] if o["address"]]
assert len(offices) > 0, "No offices with addresses found "\
"make sure we're not losing any data."
return {"offices":offices}
def add_contact(self,office_type,
title_text,content,office):
#office type is the name of the office
#either "legislative" or "outside"
if "{} office".format(office_type) in title_text:
office["address"] = content.strip()
if "{} phone".format(office_type) in title_text:
phones = content.lower().split("\n")
if len(phones) == 1:
phone = self.clean_phone(phones[0])
if phone:
office["phone"] = phone
else:
for line in phones:
if "phone" in line:
phone = self.clean_phone(line)
if phone:
office["phone"] = phone
elif "fax" in line:
phone = self.clean_phone(line)
if phone:
office["fax"] = phone
return office
def clean_phone(self,phone):
if not phone.strip():
return
if not re.search("\d",phone):
return
if not ":" in phone:
return phone
return phone.split(":")[1].strip()
| [((83, 14, 83, 68), 'billy.scrape.legislators.Legislator', 'Legislator', (), '', False, 'from billy.scrape.legislators import LegislatorScraper, Legislator\n'), ((56, 22, 56, 44), 're.compile', 're.compile', ({(56, 33, 56, 43): '"""\\\\s{1,5}"""'}, {}), "('\\\\s{1,5}')", False, 'import re\n'), ((172, 15, 172, 36), 're.search', 're.search', ({(172, 25, 172, 29): '"""\\\\d"""', (172, 30, 172, 35): 'phone'}, {}), "('\\\\d', phone)", False, 'import re\n')] |
gmftbyGMFTBY/SimpleReDial-v1 | simpleredial/dataloader/fine_grained_test_dataloader.py | f45b8eb23d1499ec617b4cc4f417d83d8f2b6bde | from header import *
from .utils import *
from .util_func import *
'''Only for Testing'''
class FineGrainedTestDataset(Dataset):
def __init__(self, vocab, path, **args):
self.args = args
self.vocab = vocab
self.vocab.add_tokens(['[EOS]'])
self.pad = self.vocab.convert_tokens_to_ids('[PAD]')
self.sep = self.vocab.convert_tokens_to_ids('[SEP]')
self.eos = self.vocab.convert_tokens_to_ids('[EOS]')
self.cls = self.vocab.convert_tokens_to_ids('[CLS]')
suffix = args['tokenizer'].replace('/', '_')
self.pp_path = f'{os.path.splitext(path)[0]}_fg_test_{suffix}.pt'
if os.path.exists(self.pp_path):
self.data = torch.load(self.pp_path)
print(f'[!] load preprocessed file from {self.pp_path}')
return None
self.data = []
for fix in ['brandenwang', 'lt', 'lt2']:
path = f'{args["root_dir"]}/data/{args["dataset"]}/fg-{fix}-test.txt'
data = read_text_data_utterances(path, lang=self.args['lang'])
for i in tqdm(range(0, len(data), 7)):
batch = data[i:i+7]
rids = []
for label, utterances in batch:
item = self.vocab.batch_encode_plus(utterances, add_special_tokens=False)['input_ids']
cids, rids_ = item[:-1], item[-1]
ids = []
for u in cids:
ids.extend(u + [self.sep])
ids.pop()
ids = ids[-(self.args['max_len']-2):] # ignore [CLS] and [SEP]
rids_ = rids_[:(self.args['res_max_len']-2)]
ids = [self.cls] + ids + [self.sep]
rids_ = [self.cls] + rids_ + [self.sep]
rids.append(rids_)
self.data.append({
'label': [b[0] for b in batch],
'ids': ids,
'rids': rids,
'text': ['\t'.join(b[1]) for b in batch],
'owner': fix,
})
def __len__(self):
return len(self.data)
def __getitem__(self, i):
bundle = self.data[i]
ids = torch.LongTensor(bundle['ids'])
rids = [torch.LongTensor(i) for i in bundle['rids']]
return ids, rids, bundle['label'], bundle['text'], bundle['owner']
def save(self):
data = torch.save(self.data, self.pp_path)
print(f'[!] save preprocessed dataset into {self.pp_path}')
def collate(self, batch):
assert len(batch) == 1
ids, rids, label, text, owner = batch[0]
rids = pad_sequence(rids, batch_first=True, padding_value=self.pad)
rids_mask = generate_mask(rids)
label = torch.LongTensor(label)
ids, rids, rids_mask, label = to_cuda(ids, rids, rids_mask, label)
return {
'ids': ids,
'rids': rids,
'rids_mask': rids_mask,
'label': label,
'text': text,
'owner': owner,
}
class FineGrainedTestPositionWeightDataset(Dataset):
def __init__(self, vocab, path, **args):
self.args = args
self.vocab = vocab
self.vocab.add_tokens(['[EOS]'])
self.pad = self.vocab.convert_tokens_to_ids('[PAD]')
self.sep = self.vocab.convert_tokens_to_ids('[SEP]')
self.eos = self.vocab.convert_tokens_to_ids('[EOS]')
self.cls = self.vocab.convert_tokens_to_ids('[CLS]')
self.unk = self.vocab.convert_tokens_to_ids('[UNK]')
self.special_tokens = set([self.unk, self.cls, self.sep])
suffix = args['tokenizer'].replace('/', '_')
self.pp_path = f'{os.path.splitext(path)[0]}_fg_test_pw_{suffix}.pt'
if os.path.exists(self.pp_path):
self.data = torch.load(self.pp_path)
print(f'[!] load preprocessed file from {self.pp_path}')
return None
self.data = []
for fix in ['brandenwang', 'lt', 'lt2']:
path = f'{args["root_dir"]}/data/{args["dataset"]}/fg-{fix}-test.txt'
data = read_text_data_utterances(path, lang=self.args['lang'])
for i in tqdm(range(0, len(data), 7)):
batch = data[i:i+7]
rids = []
for label, utterances in batch:
item = self.vocab.batch_encode_plus(utterances, add_special_tokens=False)['input_ids']
cids, rids_ = item[:-1], item[-1]
ids = []
position_w, w = [], self.args['min_w']
for u in cids:
ids.extend(u + [self.sep])
for token in u + [self.sep]:
if token not in self.special_tokens:
position_w.append(w)
else:
position_w.append(self.args['w_sp_token'])
w += self.args['w_delta']
ids.pop()
position_w.pop()
ids = ids[-(self.args['max_len']-2):] # ignore [CLS] and [SEP]
position_w = position_w[-(self.args['max_len']-2):]
rids_ = rids_[:(self.args['res_max_len']-2)]
ids = [self.cls] + ids + [self.sep]
position_w = [w-self.args['w_delta']] + position_w + [self.args['w_sp_token']]
rids_ = [self.cls] + rids_ + [self.sep]
rids.append(rids_)
self.data.append({
'label': [b[0] for b in batch],
'ids': ids,
'rids': rids,
'text': ['\t'.join(b[1]) for b in batch],
'position_w': position_w,
'owner': fix,
})
def __len__(self):
return len(self.data)
def __getitem__(self, i):
bundle = self.data[i]
ids = torch.LongTensor(bundle['ids'])
rids = [torch.LongTensor(i) for i in bundle['rids']]
position_w = torch.tensor(bundle['position_w'])
return ids, rids, position_w, bundle['label'], bundle['text'], bundle['owner']
def save(self):
data = torch.save(self.data, self.pp_path)
print(f'[!] save preprocessed dataset into {self.pp_path}')
def collate(self, batch):
assert len(batch) == 1
ids, rids, pos_w, label, text, owner = batch[0]
rids = pad_sequence(rids, batch_first=True, padding_value=self.pad)
rids_mask = generate_mask(rids)
label = torch.LongTensor(label)
ids, rids, pos_w, rids_mask, label = to_cuda(ids, rids, pos_w, rids_mask, label)
return {
'ids': ids,
'rids': rids,
'rids_mask': rids_mask,
'pos_w': pos_w,
'label': label,
'text': text,
'owner': owner,
}
class FineGrainedTestInteractionDataset(Dataset):
def __init__(self, vocab, path, **args):
self.args = args
self.vocab = vocab
self.vocab.add_tokens(['[EOS]'])
self.pad = self.vocab.convert_tokens_to_ids('[PAD]')
self.sep = self.vocab.convert_tokens_to_ids('[SEP]')
self.eos = self.vocab.convert_tokens_to_ids('[EOS]')
self.cls = self.vocab.convert_tokens_to_ids('[CLS]')
suffix = args['tokenizer'].replace('/', '_')
self.pp_path = f'{os.path.splitext(path)[0]}_fg_interaction_test_{suffix}.pt'
if os.path.exists(self.pp_path):
self.data = torch.load(self.pp_path)
print(f'[!] load preprocessed file from {self.pp_path}')
return None
self.data = []
for fix in ['brandenwang', 'lt', 'lt2']:
path = f'{args["root_dir"]}/data/{args["dataset"]}/fg-{fix}-test.txt'
data = read_text_data_utterances(path, lang=self.args['lang'])
for i in tqdm(range(0, len(data), 7)):
batch = data[i:i+7]
rids = []
ids, tids = [], []
context, responses = [], []
for _, utterances in batch:
item = self.vocab.batch_encode_plus(utterances, add_special_tokens=False)['input_ids']
cids = []
for u in item[:-1]:
cids.extend(u + [self.eos])
cids.pop()
rids = item[-1]
truncate_pair(cids, rids, self.args['max_len'])
ids_ = [self.cls] + cids + [self.sep] + rids + [self.sep]
tids_ = [0] * (len(cids) + 2) + [1] * (len(rids) + 1)
ids.append(ids_)
tids.append(tids_)
responses.append(utterances[-1])
context = ' [SEP] '.join(utterances[:-1])
self.data.append({
'label': [b[0] for b in batch],
'ids': ids,
'tids': tids,
'context': context,
'responses': responses,
'owner': fix,
})
def __len__(self):
return len(self.data)
def __getitem__(self, i):
bundle = self.data[i]
ids = [torch.LongTensor(i) for i in bundle['ids']]
tids = [torch.LongTensor(i) for i in bundle['tids']]
context, responses = bundle['context'], bundle['responses']
return ids, tids, bundle['label'], context, responses, bundle['owner']
def save(self):
data = torch.save(self.data, self.pp_path)
print(f'[!] save preprocessed dataset into {self.pp_path}')
def collate(self, batch):
assert len(batch) == 1
ids, tids, label, context, responses, owner = batch[0]
ids = pad_sequence(ids, batch_first=True, padding_value=self.pad)
tids = pad_sequence(tids, batch_first=True, padding_value=self.pad)
label = torch.LongTensor(label)
mask = generate_mask(ids)
ids, tids, mask, label = to_cuda(ids, tids, mask, label)
return {
'ids': ids,
'tids': tids,
'mask': mask,
'label': label,
'owner': owner,
}
| [] |
SvajkaJ/dabing | dabing/DABING-MIB.py | 8ddd8c1056b182b52f76028e23cd2ba8418a0dec | #
# PySNMP MIB module DABING-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file://..\DABING-MIB.mib
# Produced by pysmi-0.3.4 at Tue Mar 22 12:53:47 2022
# On host ? platform ? version ? by user ?
# Using Python version 3.8.2 (tags/v3.8.2:7b3ab59, Feb 25 2020, 22:45:29) [MSC v.1916 32 bit (Intel)]
#
OctetString, ObjectIdentifier, Integer = mibBuilder.importSymbols("ASN1", "OctetString", "ObjectIdentifier", "Integer")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ConstraintsIntersection, ConstraintsUnion, ValueRangeConstraint, SingleValueConstraint, ValueSizeConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "ConstraintsIntersection", "ConstraintsUnion", "ValueRangeConstraint", "SingleValueConstraint", "ValueSizeConstraint")
NotificationGroup, ModuleCompliance = mibBuilder.importSymbols("SNMPv2-CONF", "NotificationGroup", "ModuleCompliance")
MibScalar, MibTable, MibTableRow, MibTableColumn, Gauge32, ModuleIdentity, IpAddress, ObjectIdentity, iso, Counter32, Unsigned32, Bits, NotificationType, TimeTicks, Counter64, enterprises, MibIdentifier, Integer32 = mibBuilder.importSymbols("SNMPv2-SMI", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "Gauge32", "ModuleIdentity", "IpAddress", "ObjectIdentity", "iso", "Counter32", "Unsigned32", "Bits", "NotificationType", "TimeTicks", "Counter64", "enterprises", "MibIdentifier", "Integer32")
DisplayString, TextualConvention = mibBuilder.importSymbols("SNMPv2-TC", "DisplayString", "TextualConvention")
dabing = ModuleIdentity((1, 3, 6, 1, 4, 1, 55532))
dabing.setRevisions(('2022-03-17 00:00',))
if mibBuilder.loadTexts: dabing.setLastUpdated('202203170000Z')
if mibBuilder.loadTexts: dabing.setOrganization('www.stuba.sk')
Parameters = MibIdentifier((1, 3, 6, 1, 4, 1, 55532, 1))
Agent = MibIdentifier((1, 3, 6, 1, 4, 1, 55532, 2))
Manager = MibIdentifier((1, 3, 6, 1, 4, 1, 55532, 3))
Notifications = MibIdentifier((1, 3, 6, 1, 4, 1, 55532, 4))
NotificationPrefix = MibIdentifier((1, 3, 6, 1, 4, 1, 55532, 4, 1))
NotificationObjects = MibIdentifier((1, 3, 6, 1, 4, 1, 55532, 4, 2))
channel = MibScalar((1, 3, 6, 1, 4, 1, 55532, 1, 1), OctetString().clone('12C')).setMaxAccess("readonly")
if mibBuilder.loadTexts: channel.setStatus('current')
interval = MibScalar((1, 3, 6, 1, 4, 1, 55532, 1, 2), Integer32().clone(960)).setMaxAccess("readonly")
if mibBuilder.loadTexts: interval.setStatus('current')
trapEnabled = MibScalar((1, 3, 6, 1, 4, 1, 55532, 1, 3), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: trapEnabled.setStatus('current')
agentIdentifier = MibScalar((1, 3, 6, 1, 4, 1, 55532, 2, 1), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: agentIdentifier.setStatus('current')
agentLabel = MibScalar((1, 3, 6, 1, 4, 1, 55532, 2, 2), OctetString()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: agentLabel.setStatus('current')
agentStatus = MibScalar((1, 3, 6, 1, 4, 1, 55532, 2, 3), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: agentStatus.setStatus('current')
managerHostname = MibScalar((1, 3, 6, 1, 4, 1, 55532, 3, 1), OctetString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: managerHostname.setStatus('current')
managerPort = MibScalar((1, 3, 6, 1, 4, 1, 55532, 3, 2), Integer32().clone(162)).setMaxAccess("readonly")
if mibBuilder.loadTexts: managerPort.setStatus('current')
genericPayload = MibScalar((1, 3, 6, 1, 4, 1, 55532, 4, 2, 1), OctetString()).setMaxAccess("accessiblefornotify")
if mibBuilder.loadTexts: genericPayload.setStatus('current')
malfunctionTrap = NotificationType((1, 3, 6, 1, 4, 1, 55532, 4, 1, 1)).setObjects(("DABING-MIB", "genericPayload"))
if mibBuilder.loadTexts: malfunctionTrap.setStatus('current')
testTrap = NotificationType((1, 3, 6, 1, 4, 1, 55532, 4, 1, 2)).setObjects(("DABING-MIB", "genericPayload"))
if mibBuilder.loadTexts: testTrap.setStatus('current')
mibBuilder.exportSymbols("DABING-MIB", Notifications=Notifications, channel=channel, PYSNMP_MODULE_ID=dabing, testTrap=testTrap, malfunctionTrap=malfunctionTrap, Parameters=Parameters, agentLabel=agentLabel, managerPort=managerPort, trapEnabled=trapEnabled, managerHostname=managerHostname, Manager=Manager, NotificationPrefix=NotificationPrefix, Agent=Agent, genericPayload=genericPayload, NotificationObjects=NotificationObjects, agentIdentifier=agentIdentifier, dabing=dabing, agentStatus=agentStatus, interval=interval)
| [] |
kharris/allen-voxel-network | parameter_setup/run_setup_extra_vis.py | 3c39cf7e7400c09f78ebe9d1d9f8a6d7b9ef6d7b | import os
import numpy as np
save_stem='extra_vis_friday_harbor'
data_dir='../../data/sdk_new_100'
resolution=100
cre=False
source_acronyms=['VISal','VISam','VISl','VISp','VISpl','VISpm',
'VISli','VISpor','VISrl','VISa']
lambda_list = np.logspace(3,12,10)
scale_lambda=True
min_vox=0
# save_file_name='visual_output.hdf5'
#source_coverage=0.90
source_coverage=0.95
#source_shell = 1
source_shell=None
save_dir=os.path.join('../../data/connectivities',save_stem)
experiments_fn=None
target_acronyms=source_acronyms
solver=os.path.abspath('../smoothness_c/solve')
cmdfile=os.path.join(save_dir,'model_fitting_cmds')
selected_fit_cmds=os.path.join(save_dir,'model_fitting_after_selection_cmds')
save_mtx=True
cross_val_matrices=True
cross_val=5
fit_gaussian=False
select_one_lambda=False
if select_one_lambda:
lambda_fn='lambda_opt'
else:
lambda_fn='lambda_ipsi_contra_opt'
laplacian='free'
shuffle_seed=666
max_injection_volume=0.7
| [((10, 14, 10, 34), 'numpy.logspace', 'np.logspace', ({(10, 26, 10, 27): '3', (10, 28, 10, 30): '12', (10, 31, 10, 33): '10'}, {}), '(3, 12, 10)', True, 'import numpy as np\n'), ((18, 9, 18, 60), 'os.path.join', 'os.path.join', ({(18, 22, 18, 49): '"""../../data/connectivities"""', (18, 50, 18, 59): 'save_stem'}, {}), "('../../data/connectivities', save_stem)", False, 'import os\n'), ((21, 7, 21, 47), 'os.path.abspath', 'os.path.abspath', ({(21, 23, 21, 46): '"""../smoothness_c/solve"""'}, {}), "('../smoothness_c/solve')", False, 'import os\n'), ((22, 8, 22, 51), 'os.path.join', 'os.path.join', ({(22, 21, 22, 29): 'save_dir', (22, 30, 22, 50): '"""model_fitting_cmds"""'}, {}), "(save_dir, 'model_fitting_cmds')", False, 'import os\n'), ((23, 18, 23, 77), 'os.path.join', 'os.path.join', ({(23, 31, 23, 39): 'save_dir', (23, 40, 23, 76): '"""model_fitting_after_selection_cmds"""'}, {}), "(save_dir, 'model_fitting_after_selection_cmds')", False, 'import os\n')] |
GNiklas/MOSSEPy | examples/runall.py | fbae1294beefe48f321bc5dbbc70e6c72d3ffe1f | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Nov 20 09:42:39 2020
@author: niklas
"""
from mossepy.mosse_tracker import MOSSE
# choose position of object in first frame
# that should be done by mouse click
objPos = [256, 256]
# choose tracker type
tracker = MOSSE()
# initialize object position in first frame
tracker.setObjPos(objPos)
# start tracking
tracker.trackImg() | [((16, 10, 16, 17), 'mossepy.mosse_tracker.MOSSE', 'MOSSE', ({}, {}), '()', False, 'from mossepy.mosse_tracker import MOSSE\n')] |
mike006322/PolynomialCalculator | core/formulas.py | bf56b0e773a3461ab2aa958d0d90e08f80a4d201 | def solve(polynomial):
"""
input is polynomial
if more than one variable, returns 'too many variables'
looks for formula to apply to coefficients
returns solution or 'I cannot solve yet...'
"""
if len(polynomial.term_matrix[0]) > 2:
return 'too many variables'
elif len(polynomial.term_matrix[0]) == 1:
return polynomial.term_matrix[1][0]
elif len(polynomial.term_matrix[0]) == 2:
degree = polynomial.term_matrix[1][1]
if degree == 1:
if len(polynomial.term_matrix) == 2:
return 0
else:
return -polynomial.term_matrix[2][0]/polynomial.term_matrix[1][0]
if degree == 2:
ans = quadratic_formula(polynomial)
return ans
if degree > 2:
return Durand_Kerner(polynomial)
def quadratic_formula(polynomial):
"""
input is single-variable polynomial of degree 2
returns zeros
"""
if len(polynomial.term_matrix) == 3:
if polynomial.term_matrix[2][1] == 1:
a, b = polynomial.term_matrix[1][0], polynomial.term_matrix[2][0]
return 0, -b/a
a, c = polynomial.term_matrix[1][0], polynomial.term_matrix[2][0]
return (-c/a)**.5, -(-c/a)**.5
if len(polynomial.term_matrix) == 2:
a, b, c, = polynomial.term_matrix[1][0], 0, 0
elif len(polynomial.term_matrix) == 3:
a, b, c = polynomial.term_matrix[1][0], polynomial.term_matrix[2][0], 0
else:
a, b, c = polynomial.term_matrix[1][0], polynomial.term_matrix[2][0], polynomial.term_matrix[3][0]
ans1 = (-b + (b**2 - 4*a*c)**.5)/2*a
ans2 = (-b - (b**2 - 4*a*c)**.5)/2*a
if ans1 == ans2:
return ans1
return ans1, ans2
def isclose(a, b, rel_tol=1e-09, abs_tol=0.0001):
"""
returns boolean whether abs(a-b) is less than abs_total or rel_total*max(a, b)
"""
return abs(a-b) <= max(rel_tol * max(abs(a), abs(b)), abs_tol)
def Durand_Kerner(f):
"""
input polynomial
returns numerical approximation of all complex roots
"""
roots = []
for i in range(f.degree()):
roots.append((0.4 + 0.9j)**i)
diff = 1
diff_temp = 0
def iterate():
nonlocal roots
new_roots = roots[:]
for i in range(len(roots)):
q = 1
for j, root in enumerate(roots):
if j != i:
q *= roots[i] - root
new_roots[i] = roots[i] - f(roots[i])/q
nonlocal diff
nonlocal diff_temp
diff_temp = diff
diff = 0
for i in range(len(roots)):
diff += abs(roots[i] - new_roots[i])
roots = new_roots
while diff > .00000001 and not isclose(diff_temp, diff):
iterate()
for i in range(len(roots)):
if isclose(roots[i].real, round(roots[i].real)):
temp = round(roots[i].real)
roots[i] -= roots[i].real
roots[i] += temp
if isclose(roots[i].imag, round(roots[i].imag)):
temp = round(roots[i].imag)
roots[i] -= roots[i].imag*1j
roots[i] += temp*1j
return roots
if __name__ == '__main__':
pass
| [] |
5Points7Edges/manim | manim/mobject/svg/style_utils.py | 1c2a5099133dbf0abdd5517b2ac93cfc8275b842 | """Utility functions for parsing SVG styles."""
__all__ = ["cascade_element_style", "parse_style", "parse_color_string"]
from xml.dom.minidom import Element as MinidomElement
from colour import web2hex
from ...utils.color import rgb_to_hex
from typing import Dict, List
CASCADING_STYLING_ATTRIBUTES: List[str] = [
"fill",
"stroke",
"fill-opacity",
"stroke-opacity",
]
# The default styling specifications for SVG images,
# according to https://www.w3.org/TR/SVG/painting.html
# (ctrl-F for "initial")
SVG_DEFAULT_ATTRIBUTES: Dict[str, str] = {
"fill": "black",
"fill-opacity": "1",
"stroke": "none",
"stroke-opacity": "1",
}
def cascade_element_style(
element: MinidomElement, inherited: Dict[str, str]
) -> Dict[str, str]:
"""Collect the element's style attributes based upon both its inheritance and its own attributes.
SVG uses cascading element styles. A closer ancestor's style takes precedence over a more distant ancestor's
style. In order to correctly calculate the styles, the attributes are passed down through the inheritance tree,
updating where necessary.
Note that this method only copies the values and does not parse them. See :meth:`parse_color_string` for converting
from SVG attributes to manim keyword arguments.
Parameters
----------
element : :class:`MinidomElement`
Element of the SVG parse tree
inherited : :class:`dict`
Dictionary of SVG attributes inherited from the parent element.
Returns
-------
:class:`dict`
Dictionary mapping svg attributes to values with `element`'s values overriding inherited values.
"""
style = inherited.copy()
# cascade the regular elements.
for attr in CASCADING_STYLING_ATTRIBUTES:
entry = element.getAttribute(attr)
if entry:
style[attr] = entry
# the style attribute should be handled separately in order to
# break it up nicely. furthermore, style takes priority over other
# attributes in the same element.
style_specs = element.getAttribute("style")
if style_specs:
for style_spec in style_specs.split(";"):
try:
key, value = style_spec.split(":")
except ValueError as e:
if not style_spec.strip():
# there was just a stray semicolon at the end, producing an emptystring
pass
else:
raise e
else:
style[key.strip()] = value.strip()
return style
def parse_color_string(color_spec: str) -> str:
"""Handle the SVG-specific color strings and convert them to HTML #rrggbb format.
Parameters
----------
color_spec : :class:`str`
String in any web-compatible format
Returns
-------
:class:`str`
Hexadecimal color string in the format `#rrggbb`
"""
if color_spec[0:3] == "rgb":
# these are only in integer form, but the Colour module wants them in floats.
splits = color_spec[4:-1].split(",")
if splits[0][-1] == "%":
# if the last character of the first number is a percentage,
# then interpret the number as a percentage
parsed_rgbs = [float(i[:-1]) / 100.0 for i in splits]
else:
parsed_rgbs = [int(i) / 255.0 for i in splits]
hex_color = rgb_to_hex(parsed_rgbs)
elif color_spec[0] == "#":
# its OK, parse as hex color standard.
hex_color = color_spec
else:
# attempt to convert color names like "red" to hex color
hex_color = web2hex(color_spec, force_long=True)
return hex_color
def fill_default_values(svg_style: Dict) -> None:
"""
Fill in the default values for properties of SVG elements,
if they are not currently set in the style dictionary.
Parameters
----------
svg_style : :class:`dict`
Style dictionary with SVG property names. Some may be missing.
Returns
-------
:class:`dict`
Style attributes; none are missing.
"""
for key in SVG_DEFAULT_ATTRIBUTES:
if key not in svg_style:
svg_style[key] = SVG_DEFAULT_ATTRIBUTES[key]
def parse_style(svg_style: Dict[str, str]) -> Dict:
"""Convert a dictionary of SVG attributes to Manim VMobject keyword arguments.
Parameters
----------
svg_style : :class:`dict`
Style attributes as a string-to-string dictionary. Keys are valid SVG element attributes (fill, stroke, etc)
Returns
-------
:class:`dict`
Style attributes, but in manim kwargs form, e.g., keys are fill_color, stroke_color
"""
manim_style = {}
fill_default_values(svg_style)
if "fill-opacity" in svg_style:
manim_style["fill_opacity"] = float(svg_style["fill-opacity"])
if "stroke-opacity" in svg_style:
manim_style["stroke_opacity"] = float(svg_style["stroke-opacity"])
# nones need to be handled specially
if "fill" in svg_style:
if svg_style["fill"] == "none":
manim_style["fill_opacity"] = 0
else:
manim_style["fill_color"] = parse_color_string(svg_style["fill"])
if "stroke" in svg_style:
if svg_style["stroke"] == "none":
# In order to not break animations.creation.Write,
# we interpret no stroke as stroke-width of zero and
# color the same as the fill color, if it exists.
manim_style["stroke_width"] = 0
if "fill_color" in manim_style:
manim_style["stroke_color"] = manim_style["fill_color"]
else:
manim_style["stroke_color"] = parse_color_string(svg_style["stroke"])
return manim_style
| [((118, 20, 118, 56), 'colour.web2hex', 'web2hex', (), '', False, 'from colour import web2hex\n')] |
mm011106/iotrigger | iotrigger.py | 643ced0440a8c4fb95ade56399f813c88ac8ddd6 | #!/usr/bin/env python
#coding:utf-8
import os
import RPi.GPIO as GPIO #
import json
from time import sleep #
from twython import Twython
f=open("tw_config.json",'r')
config=json.load(f)
f.close()
CONSUMER_KEY =config['consumer_key']
CONSUMER_SECRET =config['consumer_secret']
ACCESS_TOKEN =config['access_token']
ACCESS_SECRET =config['access_secret']
dist=config['dist']
def on_positive_edge(channel):
#time stamp
timestamp = 'date +%F_%H:%M:%S'
current_time=os.popen(timestamp).readline().strip()
# get CPU temperature
cmd = '/opt/vc/bin/vcgencmd measure_temp'
line = os.popen(cmd).readline().strip()
temp = line.split('=')[1].split("'")[0]
direct_message='CPU:'+temp+'deg @'+current_time+' : by Python script'
global ledstate
if channel == trigger_input:
ledstate = not ledstate
GPIO.output(25, ledstate)
api.send_direct_message(text=direct_message ,screen_name=dist)
api = Twython(CONSUMER_KEY,CONSUMER_SECRET,ACCESS_TOKEN,ACCESS_SECRET)
trigger_input=21
GPIO.setmode(GPIO.BCM)
GPIO.setup(25, GPIO.OUT)
GPIO.setup(trigger_input, GPIO.IN, pull_up_down=GPIO.PUD_UP)
GPIO.add_event_detect(trigger_input, GPIO.RISING, callback=on_positive_edge, bouncetime=1000)
ledstate = GPIO.LOW
try:
while True:
sleep(0.01)
except KeyboardInterrupt: #
pass
GPIO.cleanup() #
| [((11, 7, 11, 19), 'json.load', 'json.load', ({(11, 17, 11, 18): 'f'}, {}), '(f)', False, 'import json\n'), ((42, 6, 42, 70), 'twython.Twython', 'Twython', ({(42, 14, 42, 26): 'CONSUMER_KEY', (42, 27, 42, 42): 'CONSUMER_SECRET', (42, 43, 42, 55): 'ACCESS_TOKEN', (42, 56, 42, 69): 'ACCESS_SECRET'}, {}), '(CONSUMER_KEY, CONSUMER_SECRET, ACCESS_TOKEN, ACCESS_SECRET)', False, 'from twython import Twython\n'), ((46, 0, 46, 22), 'RPi.GPIO.setmode', 'GPIO.setmode', ({(46, 13, 46, 21): 'GPIO.BCM'}, {}), '(GPIO.BCM)', True, 'import RPi.GPIO as GPIO\n'), ((47, 0, 47, 24), 'RPi.GPIO.setup', 'GPIO.setup', ({(47, 11, 47, 13): '(25)', (47, 15, 47, 23): 'GPIO.OUT'}, {}), '(25, GPIO.OUT)', True, 'import RPi.GPIO as GPIO\n'), ((48, 0, 48, 60), 'RPi.GPIO.setup', 'GPIO.setup', (), '', True, 'import RPi.GPIO as GPIO\n'), ((50, 0, 50, 93), 'RPi.GPIO.add_event_detect', 'GPIO.add_event_detect', (), '', True, 'import RPi.GPIO as GPIO\n'), ((61, 0, 61, 14), 'RPi.GPIO.cleanup', 'GPIO.cleanup', ({}, {}), '()', True, 'import RPi.GPIO as GPIO\n'), ((38, 8, 38, 33), 'RPi.GPIO.output', 'GPIO.output', ({(38, 20, 38, 22): '(25)', (38, 24, 38, 32): 'ledstate'}, {}), '(25, ledstate)', True, 'import RPi.GPIO as GPIO\n'), ((56, 8, 56, 19), 'time.sleep', 'sleep', ({(56, 14, 56, 18): '(0.01)'}, {}), '(0.01)', False, 'from time import sleep\n'), ((24, 17, 24, 36), 'os.popen', 'os.popen', ({(24, 26, 24, 35): 'timestamp'}, {}), '(timestamp)', False, 'import os\n'), ((29, 11, 29, 24), 'os.popen', 'os.popen', ({(29, 20, 29, 23): 'cmd'}, {}), '(cmd)', False, 'import os\n')] |
nxsofsys/wheezy.template | src/wheezy/template/tests/test_utils.py | b65b70b2927974790ff2413843ec752dd9c6c609 |
""" Unit tests for ``wheezy.templates.utils``.
"""
import unittest
class FindAllBalancedTestCase(unittest.TestCase):
""" Test the ``find_all_balanced``.
"""
def test_start_out(self):
""" The start index is out of range.
"""
from wheezy.template.utils import find_all_balanced
assert 10 == find_all_balanced('test', 10)
def test_start_separator(self):
""" If text doesn't start with ``([`` return.
"""
from wheezy.template.utils import find_all_balanced
assert 0 == find_all_balanced('test([', 0)
assert 3 == find_all_balanced('test([', 3)
def test_not_balanced(self):
""" Separators are not balanced.
"""
from wheezy.template.utils import find_all_balanced
assert 4 == find_all_balanced('test(a, b', 4)
assert 4 == find_all_balanced('test[a, b()', 4)
def test_balanced(self):
""" Separators are balanced.
"""
from wheezy.template.utils import find_all_balanced
assert 10 == find_all_balanced('test(a, b)', 4)
assert 13 == find_all_balanced('test(a, b)[0]', 4)
assert 12 == find_all_balanced('test(a, b())', 4)
assert 17 == find_all_balanced('test(a, b())[0]()', 4)
class FindBalancedTestCase(unittest.TestCase):
""" Test the ``find_balanced``.
"""
def test_start_out(self):
""" The start index is out of range.
"""
from wheezy.template.utils import find_balanced
assert 10 == find_balanced('test', 10)
def test_start_separator(self):
""" If text doesn't start with ``start_sep`` return.
"""
from wheezy.template.utils import find_balanced
assert 0 == find_balanced('test(', 0)
assert 3 == find_balanced('test(', 3)
def test_not_balanced(self):
""" Separators are not balanced.
"""
from wheezy.template.utils import find_balanced
assert 4 == find_balanced('test(a, b', 4)
assert 4 == find_balanced('test(a, b()', 4)
def test_balanced(self):
""" Separators are balanced.
"""
from wheezy.template.utils import find_balanced
assert 10 == find_balanced('test(a, b)', 4)
assert 12 == find_balanced('test(a, b())', 4)
| [((16, 21, 16, 50), 'wheezy.template.utils.find_all_balanced', 'find_all_balanced', ({(16, 39, 16, 45): '"""test"""', (16, 47, 16, 49): '(10)'}, {}), "('test', 10)", False, 'from wheezy.template.utils import find_all_balanced\n'), ((22, 20, 22, 50), 'wheezy.template.utils.find_all_balanced', 'find_all_balanced', ({(22, 38, 22, 46): '"""test(["""', (22, 48, 22, 49): '(0)'}, {}), "('test([', 0)", False, 'from wheezy.template.utils import find_all_balanced\n'), ((23, 20, 23, 50), 'wheezy.template.utils.find_all_balanced', 'find_all_balanced', ({(23, 38, 23, 46): '"""test(["""', (23, 48, 23, 49): '(3)'}, {}), "('test([', 3)", False, 'from wheezy.template.utils import find_all_balanced\n'), ((29, 20, 29, 53), 'wheezy.template.utils.find_all_balanced', 'find_all_balanced', ({(29, 38, 29, 49): '"""test(a, b"""', (29, 51, 29, 52): '(4)'}, {}), "('test(a, b', 4)", False, 'from wheezy.template.utils import find_all_balanced\n'), ((30, 20, 30, 55), 'wheezy.template.utils.find_all_balanced', 'find_all_balanced', ({(30, 38, 30, 51): '"""test[a, b()"""', (30, 53, 30, 54): '(4)'}, {}), "('test[a, b()', 4)", False, 'from wheezy.template.utils import find_all_balanced\n'), ((36, 21, 36, 55), 'wheezy.template.utils.find_all_balanced', 'find_all_balanced', ({(36, 39, 36, 51): '"""test(a, b)"""', (36, 53, 36, 54): '(4)'}, {}), "('test(a, b)', 4)", False, 'from wheezy.template.utils import find_all_balanced\n'), ((37, 21, 37, 58), 'wheezy.template.utils.find_all_balanced', 'find_all_balanced', ({(37, 39, 37, 54): '"""test(a, b)[0]"""', (37, 56, 37, 57): '(4)'}, {}), "('test(a, b)[0]', 4)", False, 'from wheezy.template.utils import find_all_balanced\n'), ((38, 21, 38, 57), 'wheezy.template.utils.find_all_balanced', 'find_all_balanced', ({(38, 39, 38, 53): '"""test(a, b())"""', (38, 55, 38, 56): '(4)'}, {}), "('test(a, b())', 4)", False, 'from wheezy.template.utils import find_all_balanced\n'), ((39, 21, 39, 62), 'wheezy.template.utils.find_all_balanced', 'find_all_balanced', ({(39, 39, 39, 58): '"""test(a, b())[0]()"""', (39, 60, 39, 61): '(4)'}, {}), "('test(a, b())[0]()', 4)", False, 'from wheezy.template.utils import find_all_balanced\n'), ((50, 21, 50, 46), 'wheezy.template.utils.find_balanced', 'find_balanced', ({(50, 35, 50, 41): '"""test"""', (50, 43, 50, 45): '(10)'}, {}), "('test', 10)", False, 'from wheezy.template.utils import find_balanced\n'), ((56, 20, 56, 45), 'wheezy.template.utils.find_balanced', 'find_balanced', ({(56, 34, 56, 41): '"""test("""', (56, 43, 56, 44): '(0)'}, {}), "('test(', 0)", False, 'from wheezy.template.utils import find_balanced\n'), ((57, 20, 57, 45), 'wheezy.template.utils.find_balanced', 'find_balanced', ({(57, 34, 57, 41): '"""test("""', (57, 43, 57, 44): '(3)'}, {}), "('test(', 3)", False, 'from wheezy.template.utils import find_balanced\n'), ((63, 20, 63, 49), 'wheezy.template.utils.find_balanced', 'find_balanced', ({(63, 34, 63, 45): '"""test(a, b"""', (63, 47, 63, 48): '(4)'}, {}), "('test(a, b', 4)", False, 'from wheezy.template.utils import find_balanced\n'), ((64, 20, 64, 51), 'wheezy.template.utils.find_balanced', 'find_balanced', ({(64, 34, 64, 47): '"""test(a, b()"""', (64, 49, 64, 50): '(4)'}, {}), "('test(a, b()', 4)", False, 'from wheezy.template.utils import find_balanced\n'), ((70, 21, 70, 51), 'wheezy.template.utils.find_balanced', 'find_balanced', ({(70, 35, 70, 47): '"""test(a, b)"""', (70, 49, 70, 50): '(4)'}, {}), "('test(a, b)', 4)", False, 'from wheezy.template.utils import find_balanced\n'), ((71, 21, 71, 53), 'wheezy.template.utils.find_balanced', 'find_balanced', ({(71, 35, 71, 49): '"""test(a, b())"""', (71, 51, 71, 52): '(4)'}, {}), "('test(a, b())', 4)", False, 'from wheezy.template.utils import find_balanced\n')] |
peterrosetu/akshare | akshare/economic/macro_constitute.py | 9eac9ccb531b6e07d39140830d65349ea9441dc3 | # -*- coding:utf-8 -*-
# /usr/bin/env python
"""
Date: 2019/10/21 12:08
Desc: 获取金十数据-数据中心-主要机构-宏观经济
"""
import json
import time
import pandas as pd
import requests
from tqdm import tqdm
from akshare.economic.cons import (
JS_CONS_GOLD_ETF_URL,
JS_CONS_SLIVER_ETF_URL,
JS_CONS_OPEC_URL,
)
def macro_cons_gold_volume():
"""
全球最大黄金ETF—SPDR Gold Trust持仓报告, 数据区间从20041118-至今
:return: pandas.Series
2004-11-18 8.09
2004-11-19 57.85
2004-11-22 87.09
2004-11-23 87.09
2004-11-24 96.42
...
2019-10-20 924.64
2019-10-21 924.64
2019-10-22 919.66
2019-10-23 918.48
2019-10-24 918.48
"""
t = time.time()
res = requests.get(
JS_CONS_GOLD_ETF_URL.format(
str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)
)
)
json_data = json.loads(res.text[res.text.find("{"): res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
value_list = [item["datas"]["黄金"] for item in json_data["list"]]
value_df = pd.DataFrame(value_list)
value_df.columns = json_data["kinds"]
value_df.index = pd.to_datetime(date_list)
temp_df = value_df["总库存(吨)"]
url = "https://datacenter-api.jin10.com/reports/list_v2"
params = {
"max_date": "",
"category": "etf",
"attr_id": "1",
"_": str(int(round(t * 1000))),
}
headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
"cache-control": "no-cache",
"origin": "https://datacenter.jin10.com",
"pragma": "no-cache",
"referer": "https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36",
"x-app-id": "rU6QIu7JHe2gOUeR",
"x-csrf-token": "",
"x-version": "1.0.0",
}
r = requests.get(url, params=params, headers=headers)
temp_se = pd.DataFrame(r.json()["data"]["values"]).iloc[:, :2]
temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])
temp_se = temp_se.iloc[:, 1]
temp_df = temp_df.append(temp_se)
temp_df.dropna(inplace=True)
temp_df.sort_index(inplace=True)
temp_df = temp_df.reset_index()
temp_df.drop_duplicates(subset="index", keep="last", inplace=True)
temp_df.set_index("index", inplace=True)
temp_df = temp_df.squeeze()
temp_df.index.name = None
temp_df.name = "gold_volume"
temp_df = temp_df.astype(float)
return temp_df
def macro_cons_gold_change():
"""
全球最大黄金ETF—SPDR Gold Trust持仓报告, 数据区间从20041118-至今
:return: pandas.Series
2004-11-18 0
2004-11-19 49.76
2004-11-22 29.24
2004-11-23 0.00
2004-11-24 9.33
...
2019-10-20 0.00
2019-10-21 0.00
2019-10-22 -4.98
2019-10-23 -1.18
2019-10-24 0.00
"""
t = time.time()
res = requests.get(
JS_CONS_GOLD_ETF_URL.format(
str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)
)
)
json_data = json.loads(res.text[res.text.find("{"): res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
value_list = [item["datas"]["黄金"] for item in json_data["list"]]
value_df = pd.DataFrame(value_list)
value_df.columns = json_data["kinds"]
value_df.index = pd.to_datetime(date_list)
temp_df = value_df["增持/减持(吨)"]
url = "https://datacenter-api.jin10.com/reports/list_v2"
params = {
"max_date": "",
"category": "etf",
"attr_id": "1",
"_": str(int(round(t * 1000))),
}
headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
"cache-control": "no-cache",
"origin": "https://datacenter.jin10.com",
"pragma": "no-cache",
"referer": "https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36",
"x-app-id": "rU6QIu7JHe2gOUeR",
"x-csrf-token": "",
"x-version": "1.0.0",
}
r = requests.get(url, params=params, headers=headers)
temp_se = pd.DataFrame(r.json()["data"]["values"]).iloc[:, [0, 2]]
temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])
temp_se = temp_se.iloc[:, 1]
temp_df = temp_df.append(temp_se)
temp_df.dropna(inplace=True)
temp_df.sort_index(inplace=True)
temp_df = temp_df.reset_index()
temp_df.drop_duplicates(subset="index", keep="last", inplace=True)
temp_df.set_index("index", inplace=True)
temp_df = temp_df.squeeze()
temp_df.index.name = None
temp_df.name = "gold_change"
temp_df = temp_df.astype(float)
return temp_df
def macro_cons_gold_amount():
"""
全球最大黄金ETF—SPDR Gold Trust持仓报告, 数据区间从20041118-至今
:return: pandas.Series
2004-11-18 114920000.00
2004-11-19 828806907.20
2004-11-22 1253785205.50
2004-11-23 1254751438.19
2004-11-24 1390568824.08
...
2019-10-20 44286078486.23
2019-10-21 44333677232.68
2019-10-22 43907962483.56
2019-10-23 44120217405.82
2019-10-24 44120217405.82
"""
t = time.time()
res = requests.get(
JS_CONS_GOLD_ETF_URL.format(
str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)
)
)
json_data = json.loads(res.text[res.text.find("{"): res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
value_list = [item["datas"]["黄金"] for item in json_data["list"]]
value_df = pd.DataFrame(value_list)
value_df.columns = json_data["kinds"]
value_df.index = pd.to_datetime(date_list)
temp_df = value_df["总价值(美元)"]
url = "https://datacenter-api.jin10.com/reports/list_v2"
params = {
"max_date": "",
"category": "etf",
"attr_id": "1",
"_": str(int(round(t * 1000))),
}
headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
"cache-control": "no-cache",
"origin": "https://datacenter.jin10.com",
"pragma": "no-cache",
"referer": "https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36",
"x-app-id": "rU6QIu7JHe2gOUeR",
"x-csrf-token": "",
"x-version": "1.0.0",
}
r = requests.get(url, params=params, headers=headers)
temp_se = pd.DataFrame(r.json()["data"]["values"]).iloc[:, [0, 3]]
temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])
temp_se = temp_se.iloc[:, 1]
temp_df = temp_df.append(temp_se)
temp_df.dropna(inplace=True)
temp_df.sort_index(inplace=True)
temp_df = temp_df.reset_index()
temp_df.drop_duplicates(subset="index", keep="last", inplace=True)
temp_df.set_index("index", inplace=True)
temp_df = temp_df.squeeze()
temp_df.index.name = None
temp_df.name = "gold_amount"
temp_df = temp_df.astype(float)
return temp_df
def macro_cons_silver_volume():
"""
全球最大白银ETF--iShares Silver Trust持仓报告, 数据区间从20060429-至今
:return: pandas.Series
2006-04-29 653.17
2006-05-02 653.17
2006-05-03 995.28
2006-05-04 1197.43
2006-05-05 1306.29
...
2019-10-17 11847.91
2019-10-18 11847.91
2019-10-21 11813.02
2019-10-22 11751.96
2019-10-23 11751.96
"""
t = time.time()
res = requests.get(
JS_CONS_SLIVER_ETF_URL.format(
str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)
)
)
json_data = json.loads(res.text[res.text.find("{"): res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
value_list = [item["datas"]["白银"] for item in json_data["list"]]
value_df = pd.DataFrame(value_list)
value_df.columns = json_data["kinds"]
value_df.index = pd.to_datetime(date_list)
temp_df = value_df["总库存(吨)"]
url = "https://datacenter-api.jin10.com/reports/list_v2"
params = {
"max_date": "",
"category": "etf",
"attr_id": "2",
"_": str(int(round(t * 1000))),
}
headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
"cache-control": "no-cache",
"origin": "https://datacenter.jin10.com",
"pragma": "no-cache",
"referer": "https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36",
"x-app-id": "rU6QIu7JHe2gOUeR",
"x-csrf-token": "",
"x-version": "1.0.0",
}
r = requests.get(url, params=params, headers=headers)
temp_se = pd.DataFrame(r.json()["data"]["values"]).iloc[:, [0, 1]]
temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])
temp_se = temp_se.iloc[:, 1]
temp_df = temp_df.append(temp_se)
temp_df.dropna(inplace=True)
temp_df.sort_index(inplace=True)
temp_df = temp_df.reset_index()
temp_df.drop_duplicates(subset="index", keep="last", inplace=True)
temp_df.set_index("index", inplace=True)
temp_df = temp_df.squeeze()
temp_df.index.name = None
temp_df.name = "silver_volume"
url = "https://cdn.jin10.com/data_center/reports/etf_2.json"
r = requests.get(url)
data_json = r.json()
append_temp_df = pd.DataFrame(data_json["values"]).T
append_temp_df.columns = [item["name"] for item in data_json["keys"]]
temp_append_df = append_temp_df["总库存"]
temp_append_df.name = "silver_volume"
temp_df = temp_df.reset_index()
temp_df["index"] = temp_df["index"].astype(str)
temp_df = temp_df.append(temp_append_df.reset_index())
temp_df.drop_duplicates(subset=["index"], keep="last", inplace=True)
temp_df.index = pd.to_datetime(temp_df["index"])
del temp_df["index"]
temp_df = temp_df[temp_df != 'Show All']
temp_df.sort_index(inplace=True)
temp_df = temp_df.astype(float)
return temp_df
def macro_cons_silver_change():
"""
全球最大白银ETF--iShares Silver Trust持仓报告, 数据区间从20060429-至今
:return: pandas.Series
2006-04-29 0
2006-05-02 0.00
2006-05-03 342.11
2006-05-04 202.15
2006-05-05 108.86
...
2019-10-17 -58.16
2019-10-18 0.00
2019-10-21 -34.89
2019-10-22 -61.06
2019-10-23 0.00
"""
t = time.time()
res = requests.get(
JS_CONS_SLIVER_ETF_URL.format(
str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)
)
)
json_data = json.loads(res.text[res.text.find("{"): res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
value_list = [item["datas"]["白银"] for item in json_data["list"]]
value_df = pd.DataFrame(value_list)
value_df.columns = json_data["kinds"]
value_df.index = pd.to_datetime(date_list)
temp_df = value_df["增持/减持(吨)"]
temp_df.name = "silver_change"
url = "https://datacenter-api.jin10.com/reports/list_v2"
params = {
"max_date": "",
"category": "etf",
"attr_id": "2",
"_": str(int(round(t * 1000))),
}
headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
"cache-control": "no-cache",
"origin": "https://datacenter.jin10.com",
"pragma": "no-cache",
"referer": "https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36",
"x-app-id": "rU6QIu7JHe2gOUeR",
"x-csrf-token": "",
"x-version": "1.0.0",
}
r = requests.get(url, params=params, headers=headers)
temp_se = pd.DataFrame(r.json()["data"]["values"]).iloc[:, [0, 2]]
temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])
temp_se = temp_se.iloc[:, 1]
temp_df = temp_df.append(temp_se)
temp_df.dropna(inplace=True)
temp_df.sort_index(inplace=True)
temp_df = temp_df.reset_index()
temp_df.drop_duplicates(subset="index", keep="last", inplace=True)
temp_df.set_index("index", inplace=True)
temp_df = temp_df.squeeze()
temp_df.index.name = None
temp_df.name = "silver_change"
url = "https://cdn.jin10.com/data_center/reports/etf_2.json"
r = requests.get(url)
data_json = r.json()
append_temp_df = pd.DataFrame(data_json["values"]).T
append_temp_df.columns = [item["name"] for item in data_json["keys"]]
temp_append_df = append_temp_df["增持/减持"]
temp_append_df.name = "silver_change"
temp_df = temp_df.reset_index()
temp_df["index"] = temp_df["index"].astype(str)
temp_df = temp_df.append(temp_append_df.reset_index())
temp_df.drop_duplicates(subset=["index"], keep="last", inplace=True)
temp_df.index = pd.to_datetime(temp_df["index"])
del temp_df["index"]
temp_df = temp_df[temp_df != 'Show All']
temp_df.sort_index(inplace=True)
temp_df = temp_df.astype(float)
return temp_df
def macro_cons_silver_amount():
"""
全球最大白银ETF--iShares Silver Trust持仓报告, 数据区间从20060429-至今
:return: pandas.Series
2006-04-29 263651152
2006-05-02 263651152
2006-05-03 445408550
2006-05-04 555123947
2006-05-05 574713264
...
2019-10-17 Show All
2019-10-18 Show All
2019-10-21 Show All
2019-10-22 Show All
2019-10-23 Show All
"""
t = time.time()
res = requests.get(
JS_CONS_SLIVER_ETF_URL.format(
str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)
)
)
json_data = json.loads(res.text[res.text.find("{"): res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
value_list = [item["datas"]["白银"] for item in json_data["list"]]
value_df = pd.DataFrame(value_list)
value_df.columns = json_data["kinds"]
value_df.index = pd.to_datetime(date_list)
temp_df = value_df["总价值(美元)"]
url = "https://datacenter-api.jin10.com/reports/list_v2"
params = {
"max_date": "",
"category": "etf",
"attr_id": "2",
"_": str(int(round(t * 1000))),
}
headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
"cache-control": "no-cache",
"origin": "https://datacenter.jin10.com",
"pragma": "no-cache",
"referer": "https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36",
"x-app-id": "rU6QIu7JHe2gOUeR",
"x-csrf-token": "",
"x-version": "1.0.0",
}
r = requests.get(url, params=params, headers=headers)
temp_se = pd.DataFrame(r.json()["data"]["values"]).iloc[:, [0, 3]]
temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])
temp_se = temp_se.iloc[:, 1]
temp_df = temp_df.append(temp_se)
temp_df.dropna(inplace=True)
temp_df.sort_index(inplace=True)
temp_df = temp_df.reset_index()
temp_df.drop_duplicates(subset="index", keep="last", inplace=True)
temp_df.set_index("index", inplace=True)
temp_df = temp_df.squeeze()
temp_df.index.name = None
temp_df.name = "silver_amount"
url = "https://cdn.jin10.com/data_center/reports/etf_2.json"
r = requests.get(url)
data_json = r.json()
append_temp_df = pd.DataFrame(data_json["values"]).T
append_temp_df.columns = [item["name"] for item in data_json["keys"]]
temp_append_df = append_temp_df["总价值"]
temp_append_df.name = "silver_amount"
temp_df = temp_df.reset_index()
temp_df["index"] = temp_df["index"].astype(str)
temp_df = temp_df.append(temp_append_df.reset_index())
temp_df.drop_duplicates(subset=["index"], keep="last", inplace=True)
temp_df.index = pd.to_datetime(temp_df["index"])
del temp_df["index"]
temp_df = temp_df[temp_df != 'Show All']
temp_df.sort_index(inplace=True)
temp_df = temp_df.astype(float)
return temp_df
def macro_cons_opec_near_change():
"""
欧佩克报告-变动, 数据区间从20170118-至今
:return: pandas.Series
阿尔及利亚 安哥拉 厄瓜多尔 加蓬 伊朗 伊拉克 科威特 利比亚 尼日利亚 \
2017-01-18 -0.87 3.56 -0.25 -0.87 0.95 4.26 0.20 3.13 -11.35
2017-02-13 -4.17 -2.32 -1.67 -1.00 5.02 -16.57 -14.12 6.47 10.18
2017-03-14 -0.02 -1.82 -0.44 -0.69 3.61 -6.20 -0.93 -1.11 5.80
2017-04-12 0.45 -1.87 -0.28 0.19 -2.87 -0.85 -0.95 -6.08 -2.98
2017-05-11 -0.75 9.71 -0.06 0.88 -3.47 -3.91 0.03 -6.16 5.08
2017-06-13 0.96 -5.42 0.22 -0.13 0.45 4.44 0.00 17.82 17.42
2017-07-12 -0.09 6.60 -0.21 -0.77 1.67 6.06 -0.02 12.70 9.67
2017-08-10 -0.10 -1.93 0.85 0.71 0.69 -3.31 -0.74 15.43 3.43
2017-09-12 0.41 0.83 -0.03 -3.23 -0.23 -2.31 0.01 -11.23 13.83
2017-10-11 -0.85 -0.29 -0.05 1.44 0.09 3.16 -0.17 5.39 5.08
2017-11-13 -3.84 6.98 0.71 0.18 -1.13 -13.10 -0.37 4.23 -5.44
2017-12-13 1.41 -10.87 -0.51 -0.47 -0.22 0.10 -0.53 0.61 9.58
2018-01-18 3.03 4.48 -0.72 -0.01 1.32 0.79 -0.25 -0.70 7.57
2018-04-12 -4.95 -8.17 0.26 -0.91 0.33 -1.31 0.23 -3.72 1.82
2018-05-14 1.77 -0.78 0.31 -0.93 1.00 -0.07 0.08 0.69 -0.83
2018-06-12 3.90 1.40 0.06 0.18 0.56 2.77 -0.57 -2.43 -5.35
2018-07-11 0.46 -8.83 -0.09 0.35 -2.27 7.15 2.73 -25.43 2.78
2018-08-13 1.38 1.17 0.42 -0.34 -5.63 2.41 7.85 -5.67 7.05
2018-09-12 -1.40 -0.80 0.40 18.80 -15.00 9.00 0.80 25.60 7.40
2018-10-11 -0.80 5.70 53.10 -0.10 -15.00 0.80 0.60 10.30 2.60
2018-11-13 -0.40 2.20 -0.30 0.30 -15.60 465.30 -3.30 6.00 -1.70
2018-12-12 -0.50 0.30 0.10 -1.10 -38.00 -2.30 4.50 -1.10 -3.00
2019-03-14 0.20 2.20 0.50 0.70 1.20 -7.00 -1.40 2.30 1.00
2019-04-10 -0.70 0.70 52.40 0.90 -2.80 -12.60 -0.10 19.60 1.10
2019-06-13 0.60 7.40 -0.10 2.30 -22.70 9.40 1.30 -0.30 -9.20
沙特 阿联酋 委内瑞拉 欧佩克产量
2017-01-18 -14.93 -0.63 -4.52 -22.09
2017-02-13 -49.62 -15.93 -3.05 -89.02
2017-03-14 -6.81 -3.69 -1.60 -13.95
2017-04-12 4.16 -3.27 -2.59 -15.27
2017-05-11 4.92 -6.23 -2.60 -1.82
2017-06-13 0.23 -1.80 -0.77 33.61
2017-07-12 5.13 -0.07 -1.36 39.35
2017-08-10 3.18 -0.67 -1.58 17.26
2017-09-12 -1.03 -2.02 -3.19 -7.91
2017-10-11 -0.07 -0.84 -5.19 8.85
2017-11-13 1.69 -0.60 -4.36 -15.09
2017-12-13 -4.54 -3.55 -4.16 -13.35
2018-01-18 -1.09 -0.70 -8.22 4.24
2018-04-12 -4.69 4.49 -5.53 -20.14
2018-05-14 4.65 0.61 -4.17 1.21
2018-06-12 8.55 -0.63 -4.25 3.54
2018-07-11 40.54 3.51 -4.75 17.34
2018-08-13 -5.28 6.92 -4.77 4.07
2018-09-12 3.80 1.20 -3.60 27.80
2018-10-11 10.80 3.00 -4.20 13.20
2018-11-13 12.70 14.20 -4.00 12.70
2018-12-12 37.70 7.10 -5.20 -1.10
2019-03-14 -8.60 -0.40 -14.20 -22.10
2019-04-10 -32.40 -0.90 -28.90 -53.40
2019-06-13 -7.60 0.30 -3.50 -23.60
"""
t = time.time()
big_df = pd.DataFrame()
headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
"cache-control": "no-cache",
"origin": "https://datacenter.jin10.com",
"pragma": "no-cache",
"referer": "https://datacenter.jin10.com/reportType/dc_opec_report",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.117 Safari/537.36",
"x-app-id": "rU6QIu7JHe2gOUeR",
"x-csrf-token": "",
"x-version": "1.0.0",
}
res = requests.get(f"https://datacenter-api.jin10.com/reports/dates?category=opec&_={str(int(round(t * 1000)))}",
headers=headers) # 日期序列
all_date_list = res.json()["data"]
bar = tqdm(reversed(all_date_list[:-1]))
for item in bar:
bar.set_description(f"Please wait for a moment, now downing {item}'s data")
res = requests.get(
f"https://datacenter-api.jin10.com/reports/list?category=opec&date={item}&_={str(int(round(t * 1000)))}",
headers=headers)
temp_df = pd.DataFrame(res.json()["data"]["values"],
columns=pd.DataFrame(res.json()["data"]["keys"])["name"].tolist()).T
temp_df.columns = temp_df.iloc[0, :]
temp_df = temp_df.iloc[1:, :]
try:
temp_df = temp_df[['阿尔及利亚', '安哥拉', '加蓬', '伊朗', '伊拉克', '科威特', '利比亚', '尼日利亚', '沙特',
'阿联酋', '委内瑞拉', '欧佩克产量']].iloc[-1, :]
except:
temp_df = temp_df[['阿尔及利亚', '安哥拉', '加蓬', '伊朗', '伊拉克', '科威特', '利比亚', '尼日利亚', '沙特',
'阿联酋', '委内瑞拉', '欧佩克产量']].iloc[-1, :]
big_df[temp_df.name] = temp_df
big_df = big_df.T
big_df.columns.name = "日期"
big_df = big_df.astype(float)
return big_df
def _macro_cons_opec_month():
"""
欧佩克报告-月度, 数据区间从20170118-至今
这里返回的具体索引日期的数据为上一个月的数据, 由于某些国家的数据有缺失,
只选择有数据的国家返回
:return: pandas.Series
阿尔及利亚 安哥拉 厄瓜多尔 加蓬 伊朗 伊拉克 科威特 利比亚 尼日利亚 \
2017-01-18 108.0 172.4 54.5 21.3 372.0 463.2 281.2 60.8 154.2
2017-02-13 104.5 165.1 52.7 19.9 377.5 447.6 271.8 67.5 157.6
2017-03-14 105.3 164.1 52.6 19.4 381.4 441.4 270.9 66.9 160.8
2017-04-12 105.6 161.4 52.6 19.8 379.0 440.2 270.2 62.2 154.5
2017-05-11 104.7 169.2 52.4 20.6 375.9 437.3 270.2 55.0 150.8
2017-06-13 105.9 161.3 52.8 20.4 379.5 442.4 270.5 73.0 168.0
2017-07-12 106.0 166.8 52.7 19.7 379.0 450.2 270.9 85.2 173.3
2017-08-10 105.9 164.6 53.6 20.5 382.4 446.8 270.3 100.1 174.8
2017-09-12 106.5 164.6 53.7 17.3 382.8 444.8 270.2 89.0 186.1
2017-10-11 104.6 164.1 53.6 20.1 382.7 449.4 270.0 92.3 185.5
2017-11-13 101.2 171.1 54.1 20.3 382.3 438.3 270.8 96.2 173.8
2017-12-13 101.3 158.1 53.3 19.7 381.8 439.6 270.3 97.3 179.0
2018-01-18 103.7 163.3 52.6 19.7 382.9 440.5 270.0 96.2 186.1
2018-04-12 98.4 152.4 51.8 18.3 381.4 442.6 270.4 96.8 181.0
2018-05-14 99.7 151.5 52.0 18.3 382.3 442.9 270.5 98.2 179.1
2018-06-12 103.1 152.5 51.9 18.9 382.9 445.5 270.1 95.5 171.1
2018-07-11 103.9 143.1 51.9 19.0 379.9 453.3 273.1 70.8 166.0
2018-08-13 106.2 145.6 52.5 18.8 373.7 455.6 279.1 66.4 166.7
2018-09-12 104.5 144.8 52.9 18.7 358.4 464.9 280.2 92.6 172.5
2018-10-11 104.9 151.9 53.1 18.7 344.7 465.0 281.2 105.3 174.8
2018-11-13 105.4 153.3 52.5 18.6 329.6 465.4 276.4 111.4 175.1
2018-12-12 105.2 152.1 52.5 17.6 295.4 463.1 280.9 110.4 173.6
2019-03-14 102.6 145.7 52.2 20.3 274.3 463.3 270.9 90.6 174.1
2019-04-10 101.8 145.4 52.4 21.4 269.8 452.2 270.9 109.8 173.3
2019-06-13 102.9 147.1 52.9 21.1 237.0 472.4 271.0 117.4 173.3
沙特 阿联酋 委内瑞拉 欧佩克产量
2017-01-18 1047.4 307.1 202.1 3308.5
2017-02-13 994.6 293.1 200.4 3213.9
2017-03-14 979.7 292.5 198.7 3195.8
2017-04-12 999.4 289.5 197.2 3192.8
2017-05-11 995.4 284.2 195.6 3173.2
2017-06-13 994.0 288.5 196.3 3213.9
2017-07-12 995.0 289.8 193.8 3261.1
2017-08-10 1006.7 290.5 193.2 3286.9
2017-09-12 1002.2 290.1 191.8 3275.5
2017-10-11 997.5 290.5 189.0 3274.8
2017-11-13 1000.0 291.1 186.3 3258.9
2017-12-13 999.6 288.3 183.4 3244.8
2018-01-18 991.8 287.8 174.5 3241.6
2018-04-12 993.4 286.4 148.8 3195.8
2018-05-14 995.9 287.2 143.6 3193.0
2018-06-12 998.7 286.5 139.2 3186.9
2018-07-11 1042.0 289.7 134.0 3232.7
2018-08-13 1038.7 295.9 127.8 3232.3
2018-09-12 1040.1 297.2 123.5 3256.5
2018-10-11 1051.2 300.4 119.7 3276.1
2018-11-13 1063.0 316.0 117.1 3290.0
2018-12-12 1101.6 324.6 113.7 3296.5
2019-03-14 1008.7 307.2 100.8 3054.9
2019-04-10 979.4 305.9 73.2 3002.2
2019-06-13 969.0 306.1 74.1 2987.6
"""
t = time.time()
res = requests.get(
JS_CONS_OPEC_URL.format(
str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)
)
)
json_data = json.loads(res.text[res.text.find("{"): res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
big_df = pd.DataFrame()
for country in [item["datas"] for item in json_data["list"]][0].keys():
try:
value_list = [item["datas"][country] for item in json_data["list"]]
value_df = pd.DataFrame(value_list)
value_df.columns = json_data["kinds"]
value_df.index = pd.to_datetime(date_list)
temp_df = value_df["上个月"]
temp_df.name = country
big_df = big_df.append(temp_df)
except:
continue
headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
"cache-control": "no-cache",
"origin": "https://datacenter.jin10.com",
"pragma": "no-cache",
"referer": "https://datacenter.jin10.com/reportType/dc_opec_report",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.117 Safari/537.36",
"x-app-id": "rU6QIu7JHe2gOUeR",
"x-csrf-token": "",
"x-version": "1.0.0",
}
res = requests.get(f"https://datacenter-api.jin10.com/reports/dates?category=opec&_={str(int(round(t * 1000)))}",
headers=headers) # 日期序列
all_date_list = res.json()["data"]
need_date_list = [item for item in all_date_list if
item.split("-")[0] + item.split("-")[1] + item.split("-")[2] not in date_list]
for item in reversed(need_date_list):
res = requests.get(
f"https://datacenter-api.jin10.com/reports/list?category=opec&date={item}&_={str(int(round(t * 1000)))}",
headers=headers)
temp_df = pd.DataFrame(res.json()["data"]["values"],
columns=pd.DataFrame(res.json()["data"]["keys"])["name"].tolist()).T
temp_df.columns = temp_df.iloc[0, :]
temp_df = temp_df[['阿尔及利亚', '安哥拉', '厄瓜多尔', '加蓬', '伊朗', '伊拉克', '科威特', '利比亚', '尼日利亚', '沙特',
'阿联酋', '委内瑞拉', '欧佩克产量']].iloc[-2, :]
big_df[item] = temp_df
return big_df.T
def macro_cons_opec_month():
"""
欧佩克报告-月度, 数据区间从 20170118-至今
这里返回的具体索引日期的数据为上一个月的数据, 由于某些国家的数据有缺失
只选择有数据的国家返回
20200312:fix:由于 “厄瓜多尔” 已经有几个月没有更新数据,在这里加以剔除
https://datacenter.jin10.com/reportType/dc_opec_report
:return: pandas.Series
阿尔及利亚 安哥拉 厄瓜多尔 加蓬 伊朗 伊拉克 科威特 利比亚 尼日利亚 \
2017-01-18 108.0 172.4 54.5 21.3 372.0 463.2 281.2 60.8 154.2
2017-02-13 104.5 165.1 52.7 19.9 377.5 447.6 271.8 67.5 157.6
2017-03-14 105.3 164.1 52.6 19.4 381.4 441.4 270.9 66.9 160.8
2017-04-12 105.6 161.4 52.6 19.8 379.0 440.2 270.2 62.2 154.5
2017-05-11 104.7 169.2 52.4 20.6 375.9 437.3 270.2 55.0 150.8
2017-06-13 105.9 161.3 52.8 20.4 379.5 442.4 270.5 73.0 168.0
2017-07-12 106.0 166.8 52.7 19.7 379.0 450.2 270.9 85.2 173.3
2017-08-10 105.9 164.6 53.6 20.5 382.4 446.8 270.3 100.1 174.8
2017-09-12 106.5 164.6 53.7 17.3 382.8 444.8 270.2 89.0 186.1
2017-10-11 104.6 164.1 53.6 20.1 382.7 449.4 270.0 92.3 185.5
2017-11-13 101.2 171.1 54.1 20.3 382.3 438.3 270.8 96.2 173.8
2017-12-13 101.3 158.1 53.3 19.7 381.8 439.6 270.3 97.3 179.0
2018-01-18 103.7 163.3 52.6 19.7 382.9 440.5 270.0 96.2 186.1
2018-04-12 98.4 152.4 51.8 18.3 381.4 442.6 270.4 96.8 181.0
2018-05-14 99.7 151.5 52.0 18.3 382.3 442.9 270.5 98.2 179.1
2018-06-12 103.1 152.5 51.9 18.9 382.9 445.5 270.1 95.5 171.1
2018-07-11 103.9 143.1 51.9 19.0 379.9 453.3 273.1 70.8 166.0
2018-08-13 106.2 145.6 52.5 18.8 373.7 455.6 279.1 66.4 166.7
2018-09-12 104.5 144.8 52.9 18.7 358.4 464.9 280.2 92.6 172.5
2018-10-11 104.9 151.9 53.1 18.7 344.7 465.0 281.2 105.3 174.8
2018-11-13 105.4 153.3 52.5 18.6 329.6 465.4 276.4 111.4 175.1
2018-12-12 105.2 152.1 52.5 17.6 295.4 463.1 280.9 110.4 173.6
2019-03-14 102.6 145.7 52.2 20.3 274.3 463.3 270.9 90.6 174.1
2019-04-10 101.8 145.4 52.4 21.4 269.8 452.2 270.9 109.8 173.3
2019-06-13 102.9 147.1 52.9 21.1 237.0 472.4 271.0 117.4 173.3
沙特 阿联酋 委内瑞拉 欧佩克产量
2017-01-18 1047.4 307.1 202.1 3308.5
2017-02-13 994.6 293.1 200.4 3213.9
2017-03-14 979.7 292.5 198.7 3195.8
2017-04-12 999.4 289.5 197.2 3192.8
2017-05-11 995.4 284.2 195.6 3173.2
2017-06-13 994.0 288.5 196.3 3213.9
2017-07-12 995.0 289.8 193.8 3261.1
2017-08-10 1006.7 290.5 193.2 3286.9
2017-09-12 1002.2 290.1 191.8 3275.5
2017-10-11 997.5 290.5 189.0 3274.8
2017-11-13 1000.0 291.1 186.3 3258.9
2017-12-13 999.6 288.3 183.4 3244.8
2018-01-18 991.8 287.8 174.5 3241.6
2018-04-12 993.4 286.4 148.8 3195.8
2018-05-14 995.9 287.2 143.6 3193.0
2018-06-12 998.7 286.5 139.2 3186.9
2018-07-11 1042.0 289.7 134.0 3232.7
2018-08-13 1038.7 295.9 127.8 3232.3
2018-09-12 1040.1 297.2 123.5 3256.5
2018-10-11 1051.2 300.4 119.7 3276.1
2018-11-13 1063.0 316.0 117.1 3290.0
2018-12-12 1101.6 324.6 113.7 3296.5
2019-03-14 1008.7 307.2 100.8 3054.9
2019-04-10 979.4 305.9 73.2 3002.2
2019-06-13 969.0 306.1 74.1 2987.6
"""
t = time.time()
big_df = pd.DataFrame()
headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
"cache-control": "no-cache",
"origin": "https://datacenter.jin10.com",
"pragma": "no-cache",
"referer": "https://datacenter.jin10.com/reportType/dc_opec_report",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.117 Safari/537.36",
"x-app-id": "rU6QIu7JHe2gOUeR",
"x-csrf-token": "",
"x-version": "1.0.0",
}
res = requests.get(f"https://datacenter-api.jin10.com/reports/dates?category=opec&_={str(int(round(t * 1000)))}",
headers=headers) # 日期序列
all_date_list = res.json()["data"]
bar = tqdm(reversed(all_date_list))
for item in bar:
bar.set_description(f"Please wait for a moment, now downing {item}'s data")
res = requests.get(
f"https://datacenter-api.jin10.com/reports/list?category=opec&date={item}&_={str(int(round(t * 1000)))}",
headers=headers)
temp_df = pd.DataFrame(res.json()["data"]["values"],
columns=pd.DataFrame(res.json()["data"]["keys"])["name"].tolist()).T
temp_df.columns = temp_df.iloc[0, :]
temp_df = temp_df.iloc[1:, :]
try:
temp_df = temp_df[['阿尔及利亚', '安哥拉', '加蓬', '伊朗', '伊拉克', '科威特', '利比亚', '尼日利亚', '沙特',
'阿联酋', '委内瑞拉', '欧佩克产量']].iloc[-2, :]
except:
temp_df = temp_df[['阿尔及利亚', '安哥拉', '加蓬', '伊朗', '伊拉克', '科威特', '利比亚', '尼日利亚', '沙特',
'阿联酋', '委内瑞拉', '欧佩克产量']].iloc[-1, :]
big_df[temp_df.name] = temp_df
big_df = big_df.T
big_df.columns.name = "日期"
big_df = big_df.astype(float)
return big_df
if __name__ == "__main__":
macro_cons_gold_volume_df = macro_cons_gold_volume()
print(macro_cons_gold_volume_df)
macro_cons_gold_change_df = macro_cons_gold_change()
print(macro_cons_gold_change_df)
macro_cons_gold_amount_df = macro_cons_gold_amount()
print(macro_cons_gold_amount_df)
print(pd.concat([macro_cons_gold_volume_df, macro_cons_gold_change_df, macro_cons_gold_amount_df], axis=1))
macro_cons_silver_volume_df = macro_cons_silver_volume()
print(macro_cons_silver_volume_df)
macro_cons_silver_change_df = macro_cons_silver_change()
print(macro_cons_silver_change_df)
macro_cons_silver_amount_df = macro_cons_silver_amount()
print(macro_cons_silver_amount_df)
print(pd.concat([macro_cons_silver_volume_df, macro_cons_silver_change_df, macro_cons_silver_amount_df], axis=1))
macro_cons_opec_near_change_df = macro_cons_opec_near_change()
print(macro_cons_opec_near_change_df)
macro_cons_opec_month_df = macro_cons_opec_month()
print(macro_cons_opec_month_df)
| [((38, 8, 38, 19), 'time.time', 'time.time', ({}, {}), '()', False, 'import time\n'), ((47, 15, 47, 39), 'pandas.DataFrame', 'pd.DataFrame', ({(47, 28, 47, 38): 'value_list'}, {}), '(value_list)', True, 'import pandas as pd\n'), ((49, 21, 49, 46), 'pandas.to_datetime', 'pd.to_datetime', ({(49, 36, 49, 45): 'date_list'}, {}), '(date_list)', True, 'import pandas as pd\n'), ((74, 8, 74, 57), 'requests.get', 'requests.get', (), '', False, 'import requests\n'), ((76, 20, 76, 54), 'pandas.to_datetime', 'pd.to_datetime', ({(76, 35, 76, 53): 'temp_se.iloc[:, (0)]'}, {}), '(temp_se.iloc[:, (0)])', True, 'import pandas as pd\n'), ((107, 8, 107, 19), 'time.time', 'time.time', ({}, {}), '()', False, 'import time\n'), ((116, 15, 116, 39), 'pandas.DataFrame', 'pd.DataFrame', ({(116, 28, 116, 38): 'value_list'}, {}), '(value_list)', True, 'import pandas as pd\n'), ((118, 21, 118, 46), 'pandas.to_datetime', 'pd.to_datetime', ({(118, 36, 118, 45): 'date_list'}, {}), '(date_list)', True, 'import pandas as pd\n'), ((143, 8, 143, 57), 'requests.get', 'requests.get', (), '', False, 'import requests\n'), ((145, 20, 145, 54), 'pandas.to_datetime', 'pd.to_datetime', ({(145, 35, 145, 53): 'temp_se.iloc[:, (0)]'}, {}), '(temp_se.iloc[:, (0)])', True, 'import pandas as pd\n'), ((176, 8, 176, 19), 'time.time', 'time.time', ({}, {}), '()', False, 'import time\n'), ((185, 15, 185, 39), 'pandas.DataFrame', 'pd.DataFrame', ({(185, 28, 185, 38): 'value_list'}, {}), '(value_list)', True, 'import pandas as pd\n'), ((187, 21, 187, 46), 'pandas.to_datetime', 'pd.to_datetime', ({(187, 36, 187, 45): 'date_list'}, {}), '(date_list)', True, 'import pandas as pd\n'), ((212, 8, 212, 57), 'requests.get', 'requests.get', (), '', False, 'import requests\n'), ((214, 20, 214, 54), 'pandas.to_datetime', 'pd.to_datetime', ({(214, 35, 214, 53): 'temp_se.iloc[:, (0)]'}, {}), '(temp_se.iloc[:, (0)])', True, 'import pandas as pd\n'), ((245, 8, 245, 19), 'time.time', 'time.time', ({}, {}), '()', False, 'import time\n'), ((254, 15, 254, 39), 'pandas.DataFrame', 'pd.DataFrame', ({(254, 28, 254, 38): 'value_list'}, {}), '(value_list)', True, 'import pandas as pd\n'), ((256, 21, 256, 46), 'pandas.to_datetime', 'pd.to_datetime', ({(256, 36, 256, 45): 'date_list'}, {}), '(date_list)', True, 'import pandas as pd\n'), ((281, 8, 281, 57), 'requests.get', 'requests.get', (), '', False, 'import requests\n'), ((283, 20, 283, 54), 'pandas.to_datetime', 'pd.to_datetime', ({(283, 35, 283, 53): 'temp_se.iloc[:, (0)]'}, {}), '(temp_se.iloc[:, (0)])', True, 'import pandas as pd\n'), ((296, 8, 296, 25), 'requests.get', 'requests.get', ({(296, 21, 296, 24): 'url'}, {}), '(url)', False, 'import requests\n'), ((307, 20, 307, 52), 'pandas.to_datetime', 'pd.to_datetime', ({(307, 35, 307, 51): "temp_df['index']"}, {}), "(temp_df['index'])", True, 'import pandas as pd\n'), ((332, 8, 332, 19), 'time.time', 'time.time', ({}, {}), '()', False, 'import time\n'), ((341, 15, 341, 39), 'pandas.DataFrame', 'pd.DataFrame', ({(341, 28, 341, 38): 'value_list'}, {}), '(value_list)', True, 'import pandas as pd\n'), ((343, 21, 343, 46), 'pandas.to_datetime', 'pd.to_datetime', ({(343, 36, 343, 45): 'date_list'}, {}), '(date_list)', True, 'import pandas as pd\n'), ((369, 8, 369, 57), 'requests.get', 'requests.get', (), '', False, 'import requests\n'), ((371, 20, 371, 54), 'pandas.to_datetime', 'pd.to_datetime', ({(371, 35, 371, 53): 'temp_se.iloc[:, (0)]'}, {}), '(temp_se.iloc[:, (0)])', True, 'import pandas as pd\n'), ((384, 8, 384, 25), 'requests.get', 'requests.get', ({(384, 21, 384, 24): 'url'}, {}), '(url)', False, 'import requests\n'), ((395, 20, 395, 52), 'pandas.to_datetime', 'pd.to_datetime', ({(395, 35, 395, 51): "temp_df['index']"}, {}), "(temp_df['index'])", True, 'import pandas as pd\n'), ((420, 8, 420, 19), 'time.time', 'time.time', ({}, {}), '()', False, 'import time\n'), ((429, 15, 429, 39), 'pandas.DataFrame', 'pd.DataFrame', ({(429, 28, 429, 38): 'value_list'}, {}), '(value_list)', True, 'import pandas as pd\n'), ((431, 21, 431, 46), 'pandas.to_datetime', 'pd.to_datetime', ({(431, 36, 431, 45): 'date_list'}, {}), '(date_list)', True, 'import pandas as pd\n'), ((456, 8, 456, 57), 'requests.get', 'requests.get', (), '', False, 'import requests\n'), ((458, 20, 458, 54), 'pandas.to_datetime', 'pd.to_datetime', ({(458, 35, 458, 53): 'temp_se.iloc[:, (0)]'}, {}), '(temp_se.iloc[:, (0)])', True, 'import pandas as pd\n'), ((471, 8, 471, 25), 'requests.get', 'requests.get', ({(471, 21, 471, 24): 'url'}, {}), '(url)', False, 'import requests\n'), ((482, 20, 482, 52), 'pandas.to_datetime', 'pd.to_datetime', ({(482, 35, 482, 51): "temp_df['index']"}, {}), "(temp_df['index'])", True, 'import pandas as pd\n'), ((547, 8, 547, 19), 'time.time', 'time.time', ({}, {}), '()', False, 'import time\n'), ((548, 13, 548, 27), 'pandas.DataFrame', 'pd.DataFrame', ({}, {}), '()', True, 'import pandas as pd\n'), ((650, 8, 650, 19), 'time.time', 'time.time', ({}, {}), '()', False, 'import time\n'), ((658, 13, 658, 27), 'pandas.DataFrame', 'pd.DataFrame', ({}, {}), '()', True, 'import pandas as pd\n'), ((766, 8, 766, 19), 'time.time', 'time.time', ({}, {}), '()', False, 'import time\n'), ((767, 13, 767, 27), 'pandas.DataFrame', 'pd.DataFrame', ({}, {}), '()', True, 'import pandas as pd\n'), ((298, 21, 298, 54), 'pandas.DataFrame', 'pd.DataFrame', ({(298, 34, 298, 53): "data_json['values']"}, {}), "(data_json['values'])", True, 'import pandas as pd\n'), ((386, 21, 386, 54), 'pandas.DataFrame', 'pd.DataFrame', ({(386, 34, 386, 53): "data_json['values']"}, {}), "(data_json['values'])", True, 'import pandas as pd\n'), ((473, 21, 473, 54), 'pandas.DataFrame', 'pd.DataFrame', ({(473, 34, 473, 53): "data_json['values']"}, {}), "(data_json['values'])", True, 'import pandas as pd\n'), ((817, 10, 817, 110), 'pandas.concat', 'pd.concat', (), '', True, 'import pandas as pd\n'), ((825, 10, 825, 116), 'pandas.concat', 'pd.concat', (), '', True, 'import pandas as pd\n'), ((662, 23, 662, 47), 'pandas.DataFrame', 'pd.DataFrame', ({(662, 36, 662, 46): 'value_list'}, {}), '(value_list)', True, 'import pandas as pd\n'), ((664, 29, 664, 54), 'pandas.to_datetime', 'pd.to_datetime', ({(664, 44, 664, 53): 'date_list'}, {}), '(date_list)', True, 'import pandas as pd\n')] |
ABEMBARKA/monoUI | test/testers/winforms/scrollbar/__init__.py | 5fda266ad2db8f89580a40b525973d86cd8de939 |
##############################################################################
# Written by: Cachen Chen <[email protected]>
# Date: 08/06/2008
# Description: Application wrapper for scrollbar.py
# Used by the scrollbar-*.py tests
##############################################################################$
'Application wrapper for scrollbar'
from strongwind import *
from os.path import exists
from sys import path
def launchScrollBar(exe=None):
'Launch ScrollBar with accessibility enabled and return a scrollbar object. Log an error and return None if something goes wrong'
if exe is None:
# make sure we can find the sample application
harness_dir = path[0]
i = harness_dir.rfind("/")
j = harness_dir[:i].rfind("/")
uiaqa_path = harness_dir[:j]
if uiaqa_path is None:
raise IOError, "When launching an application you must provide the "\
"full path or set the\nUIAQA_HOME environment "\
"variable."
exe = '%s/samples/winforms/scrollbar.py' % uiaqa_path
if not os.path.exists(exe):
raise IOError, "%s does not exist" % exe
args = [exe]
(app, subproc) = cache.launchApplication(args=args, name='ipy', wait=config.LONG_DELAY)
scrollbar = ScrollBar(app, subproc)
cache.addApplication(scrollbar)
scrollbar.scrollBarFrame.app = scrollbar
return scrollbar
# class to represent the application
class ScrollBar(accessibles.Application):
#checkShowing=False
def __init__(self, accessible, subproc=None):
'Get a reference to the scrollBar window'
super(ScrollBar, self).__init__(accessible, subproc)
self.findFrame(re.compile('^ScrollBar control'), logName='Scroll Bar')
| [] |
iglesiasmanu/data_analysis | save_tweets.py | 61127c91ad0eb11ecdc7258e186e430e9dddb0b6 | import json
from os import path
from tweepy import OAuthHandler, Stream
from tweepy.streaming import StreamListener
from sqlalchemy.orm.exc import NoResultFound
from database import session, Tweet, Hashtag, User
consumer_key = "0qFf4T2xPWVIycLmAwk3rDQ55"
consumer_secret = "LcHpujASn4fIIrQ8sikbCTQ3oyU6T6opchFVWBBqwICahzSE64"
access_token = "4271002872-XLo7TNnE3qvYevqLmT1RBuiJ5CJ3o0DCr3WReAT"
acces_token_secret = "ulZ3dA25zuC6BGJgaFowCSTIm6gKVtOa4x9y7tO0IUDIx"
auth = OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, acces_token_secret)
def save_tweets():
directory = _get_dir_absolute_path()
filepath = path.join(directory, "tweets.json")
listener = DatabaseListener(number_tweets_to_save = 1000, filepath=filepath)
stream = Stream(auth, listener)
languages = ("en",)
try:
stream.sample(languages = languages)
except KeyboardInterrupt:
listener.file.close()
class DatabaseListener(StreamListener):
def __init__(self, number_tweets_to_save, filepath = None):
self._final_count = number_tweets_to_save
self._current_count = 0
if filepath is None:
filepath = "tweets.txt"
self.file = open(filepath,"w")
#Slightly dangerous due to circular references>>
def __del__(self):
self.file.close()
def on_data(self, raw_data):
data = json.loads(raw_data)
json.dump(raw_data, self.file)
self.file.write("\n")
if "in_reply_to_status_id" in data:
return self.on_status(data)
def on_status(self, data):
#this method is define in this file
save_to_database(data)
self._current_count += 1
print("status count: {}".format(self._current_count))
if self._current_count >= self._final_count:
return False
def create_user_helper(user_data):
#alias to shorten calls
u = user_data
user = user(uid = u["id_str"],
name = u["name"],
screen_name = u["screen_name"],
created_at = u["created_at"],
description = u.get("description"),
followers_count = u["followers_count"],
statuses_count = u["statuses_count"],
favourites_count = u["favourites_count"],
listed_count = u["listed_count"],
geo_enabled = u["geo_enabled"],
lang = u.get("lang"))
return user
def create_tweet_helper(tweet_data, user):
#alias for shorten calls
t = tweet_data
retweet = True if t["text"][:3] == "RT " else False
coordinates = json.dumps(t["coordinates"])
tweet = Tweet(tid=t["id_str"],
tweet=t["text"],
user=user,
coordinates=coordinates,
created_at = t["created_at"],
favorite_count = t["favorite_count"],
in_reply_to_screen_name = t["in_reply_to_screen_name"],
in_reply_to_status_id = t["in_reply_to_status_id"],
in_reply_to_user_id = t["in_reply_to_user_id"],
lang = t.get("lang"),
quoted_status_id = t.get("quoted_status_id"),
retweet_count = t["retweet_count"],
source = t["source"],
is_retweet = retweet)
return tweet
def save_to_database(data):
try:
user = session.query(User).filter_by(id=str(data["user"]["id"])).one()
except NoResultFound:
user = create_user_helper(data["user"])
session.add(user)
hashtag_results = []
hashtags = data["entities"]["hashtags"]
for hashtag in hashtags:
hashtag = hashtag["text"].lower()
try:
hashtag_obj=session.query(Hashtag).filer_by(text = hashtag).one()
except NoResutlFound:
user = create_
hashtag_obj = Hashtag(text = hashtag)
session.add(hashtag_obj)
hashtag_results.append(hashtag_obj)
tweet = create_tweet_helper(data, user)
for hashtag in hashtag_results:
tweet.hashtags.append(hashtag)
session.add(tweet)
session.commit()
| [((16, 7, 16, 50), 'tweepy.OAuthHandler', 'OAuthHandler', ({(16, 20, 16, 32): 'consumer_key', (16, 34, 16, 49): 'consumer_secret'}, {}), '(consumer_key, consumer_secret)', False, 'from tweepy import OAuthHandler, Stream\n'), ((21, 15, 21, 50), 'os.path.join', 'path.join', ({(21, 25, 21, 34): 'directory', (21, 36, 21, 49): '"""tweets.json"""'}, {}), "(directory, 'tweets.json')", False, 'from os import path\n'), ((25, 13, 25, 35), 'tweepy.Stream', 'Stream', ({(25, 20, 25, 24): 'auth', (25, 26, 25, 34): 'listener'}, {}), '(auth, listener)', False, 'from tweepy import OAuthHandler, Stream\n'), ((82, 18, 82, 46), 'json.dumps', 'json.dumps', ({(82, 29, 82, 45): "t['coordinates']"}, {}), "(t['coordinates'])", False, 'import json\n'), ((46, 15, 46, 35), 'json.loads', 'json.loads', ({(46, 26, 46, 34): 'raw_data'}, {}), '(raw_data)', False, 'import json\n'), ((47, 8, 47, 38), 'json.dump', 'json.dump', ({(47, 18, 47, 26): 'raw_data', (47, 28, 47, 37): 'self.file'}, {}), '(raw_data, self.file)', False, 'import json\n'), ((125, 8, 125, 26), 'database.session.add', 'session.add', ({(125, 20, 125, 25): 'tweet'}, {}), '(tweet)', False, 'from database import session, Tweet, Hashtag, User\n'), ((126, 8, 126, 24), 'database.session.commit', 'session.commit', ({}, {}), '()', False, 'from database import session, Tweet, Hashtag, User\n'), ((105, 8, 105, 25), 'database.session.add', 'session.add', ({(105, 20, 105, 24): 'user'}, {}), '(user)', False, 'from database import session, Tweet, Hashtag, User\n'), ((115, 26, 115, 49), 'database.Hashtag', 'Hashtag', (), '', False, 'from database import session, Tweet, Hashtag, User\n'), ((116, 12, 116, 36), 'database.session.add', 'session.add', ({(116, 24, 116, 35): 'hashtag_obj'}, {}), '(hashtag_obj)', False, 'from database import session, Tweet, Hashtag, User\n'), ((102, 15, 102, 34), 'database.session.query', 'session.query', ({(102, 29, 102, 33): 'User'}, {}), '(User)', False, 'from database import session, Tweet, Hashtag, User\n'), ((112, 24, 112, 46), 'database.session.query', 'session.query', ({(112, 38, 112, 45): 'Hashtag'}, {}), '(Hashtag)', False, 'from database import session, Tweet, Hashtag, User\n')] |
chrisjws-harness/flaskSaaS | app/views/main.py | f42558c523de23f03a098044df164ead3539a4dd | from flask import render_template, jsonify
from app import app
import random
@app.route('/')
@app.route('/index')
def index():
# Feature flags init goes here!
#
# noinspection PyDictCreation
flags = {
"welcome_text": "welcome to my python FF tutorial!"
}
# Flag goes here!
#
flags["alternate_homescreen"] = False
return render_template(
'index.html',
**flags,
title='Home'
)
@app.route('/map')
def map():
return render_template('map.html', title='Map')
@app.route('/map/refresh', methods=['POST'])
def map_refresh():
points = [(random.uniform(48.8434100, 48.8634100),
random.uniform(2.3388000, 2.3588000))
for _ in range(random.randint(2, 9))]
return jsonify({'points': points})
@app.route('/contact')
def contact():
return render_template('contact.html', title='Contact')
| [((7, 1, 7, 15), 'app.app.route', 'app.route', ({(7, 11, 7, 14): '"""/"""'}, {}), "('/')", False, 'from app import app\n'), ((8, 1, 8, 20), 'app.app.route', 'app.route', ({(8, 11, 8, 19): '"""/index"""'}, {}), "('/index')", False, 'from app import app\n'), ((30, 1, 30, 18), 'app.app.route', 'app.route', ({(30, 11, 30, 17): '"""/map"""'}, {}), "('/map')", False, 'from app import app\n'), ((35, 1, 35, 44), 'app.app.route', 'app.route', (), '', False, 'from app import app\n'), ((43, 1, 43, 22), 'app.app.route', 'app.route', ({(43, 11, 43, 21): '"""/contact"""'}, {}), "('/contact')", False, 'from app import app\n'), ((23, 11, 27, 5), 'flask.render_template', 'render_template', (), '', False, 'from flask import render_template, jsonify\n'), ((32, 11, 32, 51), 'flask.render_template', 'render_template', (), '', False, 'from flask import render_template, jsonify\n'), ((40, 11, 40, 38), 'flask.jsonify', 'jsonify', ({(40, 19, 40, 37): "{'points': points}"}, {}), "({'points': points})", False, 'from flask import render_template, jsonify\n'), ((45, 11, 45, 59), 'flask.render_template', 'render_template', (), '', False, 'from flask import render_template, jsonify\n'), ((37, 15, 37, 53), 'random.uniform', 'random.uniform', ({(37, 30, 37, 40): '(48.84341)', (37, 42, 37, 52): '(48.86341)'}, {}), '(48.84341, 48.86341)', False, 'import random\n'), ((38, 15, 38, 51), 'random.uniform', 'random.uniform', ({(38, 30, 38, 39): '(2.3388)', (38, 41, 38, 50): '(2.3588)'}, {}), '(2.3388, 2.3588)', False, 'import random\n'), ((39, 29, 39, 49), 'random.randint', 'random.randint', ({(39, 44, 39, 45): '(2)', (39, 47, 39, 48): '(9)'}, {}), '(2, 9)', False, 'import random\n')] |
keepangry/ai_algorithm | base_sample/numpy_mat.py | 21d8024296a2f2d2797448ed34eb383359259684 | # encoding: utf-8
'''
@author: yangsen
@license:
@contact:
@software:
@file: numpy_mat.py
@time: 18-8-25 下午9:56
@desc:
'''
import numpy as np
a = np.arange(9).reshape(3,3)
# 行
a[1]
a[[1,2]]
a[np.array([1,2])]
# 列
a[:,1]
a[:,[1,2]]
a[:,np.array([1,2])] | [((12, 4, 12, 16), 'numpy.arange', 'np.arange', ({(12, 14, 12, 15): '9'}, {}), '(9)', True, 'import numpy as np\n'), ((18, 2, 18, 17), 'numpy.array', 'np.array', ({(18, 11, 18, 16): '[1, 2]'}, {}), '([1, 2])', True, 'import numpy as np\n'), ((23, 4, 23, 19), 'numpy.array', 'np.array', ({(23, 13, 23, 18): '[1, 2]'}, {}), '([1, 2])', True, 'import numpy as np\n')] |
laszukdawid/ai-traineree | ai_traineree/agents/rainbow.py | af32940eba8e11012de87b60d78f10f5a3b96c79 | import copy
from typing import Callable, Dict, List, Optional
import torch
import torch.nn as nn
import torch.optim as optim
from ai_traineree import DEVICE
from ai_traineree.agents import AgentBase
from ai_traineree.agents.agent_utils import soft_update
from ai_traineree.buffers import NStepBuffer, PERBuffer
from ai_traineree.buffers.buffer_factory import BufferFactory
from ai_traineree.loggers import DataLogger
from ai_traineree.networks.heads import RainbowNet
from ai_traineree.types import ActionType, AgentState, BufferState, DoneType, NetworkState, ObsType, RewardType
from ai_traineree.types.dataspace import DataSpace
from ai_traineree.utils import to_numbers_seq, to_tensor
class RainbowAgent(AgentBase):
"""Rainbow agent as described in [1].
Rainbow is a DQN agent with some improvments that were suggested before 2017.
As mentioned by the authors it's not exhaustive improvment but all changes are in
relatively separate areas so their connection makes sense. These improvements are:
* Priority Experience Replay
* Multi-step
* Double Q net
* Dueling nets
* NoisyNet
* CategoricalNet for Q estimate
Consider this class as a particular version of the DQN agent.
[1] "Rainbow: Combining Improvements in Deep Reinforcement Learning" by Hessel et al. (DeepMind team)
https://arxiv.org/abs/1710.02298
"""
model = "Rainbow"
def __init__(
self,
obs_space: DataSpace,
action_space: DataSpace,
state_transform: Optional[Callable]=None,
reward_transform: Optional[Callable]=None,
**kwargs
):
"""
A wrapper over the DQN thus majority of the logic is in the DQNAgent.
Special treatment is required because the Rainbow agent uses categorical nets
which operate on probability distributions. Each action is taken as the estimate
from such distributions.
Parameters:
obs_space (DataSpace): Dataspace describing the input.
action_space (DataSpace): Dataspace describing the output.
state_transform (optional func):
reward_transform (optional func):
Keyword parameters:
pre_network_fn (function that takes input_shape and returns network):
Used to preprocess state before it is used in the value- and advantage-function in the dueling nets.
hidden_layers (tuple of ints): Shape of the hidden layers in fully connected network. Default: (100, 100).
lr (default: 1e-3): Learning rate value.
gamma (float): Discount factor. Default: 0.99.
tau (float): Soft-copy factor. Default: 0.002.
update_freq (int): Number of steps between each learning step. Default 1.
batch_size (int): Number of samples to use at each learning step. Default: 80.
buffer_size (int): Number of most recent samples to keep in memory for learning. Default: 1e5.
warm_up (int): Number of samples to observe before starting any learning step. Default: 0.
number_updates (int): How many times to use learning step in the learning phase. Default: 1.
max_grad_norm (float): Maximum norm of the gradient used in learning. Default: 10.
using_double_q (bool): Whether to use Double Q Learning network. Default: True.
n_steps (int): Number of lookahead steps when estimating reward. See :ref:`NStepBuffer`. Default: 3.
v_min (float): Lower bound for distributional value V. Default: -10.
v_max (float): Upper bound for distributional value V. Default: 10.
num_atoms (int): Number of atoms (discrete states) in the value V distribution. Default: 21.
"""
super().__init__(**kwargs)
self.device = self._register_param(kwargs, "device", DEVICE, update=True)
self.obs_space = obs_space
self.action_space = action_space
self._config['obs_space'] = self.obs_space
self._config['action_space'] = self.action_space
self.action_size = action_space.to_feature()
self.lr = float(self._register_param(kwargs, 'lr', 3e-4))
self.gamma = float(self._register_param(kwargs, 'gamma', 0.99))
self.tau = float(self._register_param(kwargs, 'tau', 0.002))
self.update_freq = int(self._register_param(kwargs, 'update_freq', 1))
self.batch_size = int(self._register_param(kwargs, 'batch_size', 80, update=True))
self.buffer_size = int(self._register_param(kwargs, 'buffer_size', int(1e5), update=True))
self.warm_up = int(self._register_param(kwargs, 'warm_up', 0))
self.number_updates = int(self._register_param(kwargs, 'number_updates', 1))
self.max_grad_norm = float(self._register_param(kwargs, 'max_grad_norm', 10))
self.iteration: int = 0
self.using_double_q = bool(self._register_param(kwargs, "using_double_q", True))
self.state_transform = state_transform if state_transform is not None else lambda x: x
self.reward_transform = reward_transform if reward_transform is not None else lambda x: x
v_min = float(self._register_param(kwargs, "v_min", -10))
v_max = float(self._register_param(kwargs, "v_max", 10))
self.num_atoms = int(self._register_param(kwargs, "num_atoms", 21, drop=True))
self.z_atoms = torch.linspace(v_min, v_max, self.num_atoms, device=self.device)
self.z_delta = self.z_atoms[1] - self.z_atoms[0]
self.buffer = PERBuffer(**kwargs)
self.__batch_indices = torch.arange(self.batch_size, device=self.device)
self.n_steps = int(self._register_param(kwargs, "n_steps", 3))
self.n_buffer = NStepBuffer(n_steps=self.n_steps, gamma=self.gamma)
# Note that in case a pre_network is provided, e.g. a shared net that extracts pixels values,
# it should be explicitly passed in kwargs
kwargs["hidden_layers"] = to_numbers_seq(self._register_param(kwargs, "hidden_layers", (100, 100)))
self.net = RainbowNet(obs_space.shape, self.action_size, num_atoms=self.num_atoms, **kwargs)
self.target_net = RainbowNet(obs_space.shape, self.action_size, num_atoms=self.num_atoms, **kwargs)
self.optimizer = optim.Adam(self.net.parameters(), lr=self.lr)
self.dist_probs = None
self._loss = float('nan')
@property
def loss(self):
return {'loss': self._loss}
@loss.setter
def loss(self, value):
if isinstance(value, dict):
value = value['loss']
self._loss = value
def step(self, obs: ObsType, action: ActionType, reward: RewardType, next_obs: ObsType, done: DoneType) -> None:
"""Letting the agent to take a step.
On some steps the agent will initiate learning step. This is dependent on
the `update_freq` value.
Parameters:
obs (ObservationType): Observation.
action (int): Discrete action associated with observation.
reward (float): Reward obtained for taking action at state.
next_obs (ObservationType): Observation in a state where the action took.
done: (bool) Whether in terminal (end of episode) state.
"""
assert isinstance(action, int), "Rainbow expects discrete action (int)"
self.iteration += 1
t_obs = to_tensor(self.state_transform(obs)).float().to("cpu")
t_next_obs = to_tensor(self.state_transform(next_obs)).float().to("cpu")
reward = self.reward_transform(reward)
# Delay adding to buffer to account for n_steps (particularly the reward)
self.n_buffer.add(
state=t_obs.numpy(), action=[int(action)], reward=[reward], done=[done], next_state=t_next_obs.numpy()
)
if not self.n_buffer.available:
return
self.buffer.add(**self.n_buffer.get().get_dict())
if self.iteration < self.warm_up:
return
if len(self.buffer) >= self.batch_size and (self.iteration % self.update_freq) == 0:
for _ in range(self.number_updates):
self.learn(self.buffer.sample())
# Update networks only once - sync local & target
soft_update(self.target_net, self.net, self.tau)
def act(self, obs: ObsType, eps: float = 0.) -> int:
"""
Returns actions for given state as per current policy.
Parameters:
state: Current available state from the environment.
epislon: Epsilon value in the epislon-greedy policy.
"""
# Epsilon-greedy action selection
if self._rng.random() < eps:
# TODO: Update with action_space.sample() once implemented
assert len(self.action_space.shape) == 1, "Only 1D is supported right now"
return self._rng.randint(self.action_space.low, self.action_space.high)
t_obs = to_tensor(self.state_transform(obs)).float().unsqueeze(0).to(self.device)
self.dist_probs = self.net.act(t_obs)
q_values = (self.dist_probs * self.z_atoms).sum(-1)
return int(q_values.argmax(-1)) # Action maximizes state-action value Q(s, a)
def learn(self, experiences: Dict[str, List]) -> None:
"""
Parameters:
experiences: Contains all experiences for the agent. Typically sampled from the memory buffer.
Five keys are expected, i.e. `state`, `action`, `reward`, `next_state`, `done`.
Each key contains a array and all arrays have to have the same length.
"""
rewards = to_tensor(experiences['reward']).float().to(self.device)
dones = to_tensor(experiences['done']).type(torch.int).to(self.device)
states = to_tensor(experiences['state']).float().to(self.device)
next_states = to_tensor(experiences['next_state']).float().to(self.device)
actions = to_tensor(experiences['action']).type(torch.long).to(self.device)
assert rewards.shape == dones.shape == (self.batch_size, 1)
assert states.shape == next_states.shape == (self.batch_size,) + self.obs_space.shape
assert actions.shape == (self.batch_size, 1) # Discrete domain
with torch.no_grad():
prob_next = self.target_net.act(next_states)
q_next = (prob_next * self.z_atoms).sum(-1) * self.z_delta
if self.using_double_q:
duel_prob_next = self.net.act(next_states)
a_next = torch.argmax((duel_prob_next * self.z_atoms).sum(-1), dim=-1)
else:
a_next = torch.argmax(q_next, dim=-1)
prob_next = prob_next[self.__batch_indices, a_next, :]
m = self.net.dist_projection(rewards, 1 - dones, self.gamma ** self.n_steps, prob_next)
assert m.shape == (self.batch_size, self.num_atoms)
log_prob = self.net(states, log_prob=True)
assert log_prob.shape == (self.batch_size,) + self.action_size + (self.num_atoms,)
log_prob = log_prob[self.__batch_indices, actions.squeeze(), :]
assert log_prob.shape == m.shape == (self.batch_size, self.num_atoms)
# Cross-entropy loss error and the loss is batch mean
error = -torch.sum(m * log_prob, 1)
assert error.shape == (self.batch_size,)
loss = error.mean()
assert loss >= 0
self.optimizer.zero_grad()
loss.backward()
nn.utils.clip_grad_norm_(self.net.parameters(), self.max_grad_norm)
self.optimizer.step()
self._loss = float(loss.item())
if hasattr(self.buffer, 'priority_update'):
assert (~torch.isnan(error)).any()
self.buffer.priority_update(experiences['index'], error.detach().cpu().numpy())
# Update networks - sync local & target
soft_update(self.target_net, self.net, self.tau)
def state_dict(self) -> Dict[str, dict]:
"""Returns agent's state dictionary.
Returns:
State dicrionary for internal networks.
"""
return {"net": self.net.state_dict(), "target_net": self.target_net.state_dict()}
def log_metrics(self, data_logger: DataLogger, step: int, full_log: bool=False):
data_logger.log_value("loss/agent", self._loss, step)
if full_log and self.dist_probs is not None:
assert len(self.action_space.shape) == 1, "Only 1D actions currently supported"
action_size = self.action_size[0]
for action_idx in range(action_size):
dist = self.dist_probs[0, action_idx]
data_logger.log_value(f'dist/expected_{action_idx}', (dist*self.z_atoms).sum().item(), step)
data_logger.add_histogram(
f'dist/Q_{action_idx}', min=self.z_atoms[0], max=self.z_atoms[-1], num=len(self.z_atoms),
sum=dist.sum(), sum_squares=dist.pow(2).sum(), bucket_limits=self.z_atoms+self.z_delta,
bucket_counts=dist, global_step=step
)
# This method, `log_metrics`, isn't executed on every iteration but just in case we delay plotting weights.
# It simply might be quite costly. Thread wisely.
if full_log:
for idx, layer in enumerate(self.net.value_net.layers):
if hasattr(layer, "weight"):
data_logger.create_histogram(f"value_net/layer_weights_{idx}", layer.weight.cpu(), step)
if hasattr(layer, "bias") and layer.bias is not None:
data_logger.create_histogram(f"value_net/layer_bias_{idx}", layer.bias.cpu(), step)
for idx, layer in enumerate(self.net.advantage_net.layers):
if hasattr(layer, "weight"):
data_logger.create_histogram(f"advantage_net/layer_{idx}", layer.weight.cpu(), step)
if hasattr(layer, "bias") and layer.bias is not None:
data_logger.create_histogram(f"advantage_net/layer_bias_{idx}", layer.bias.cpu(), step)
def get_state(self) -> AgentState:
"""Provides agent's internal state."""
return AgentState(
model=self.model,
obs_space=self.obs_space,
action_space=self.action_space,
config=self._config,
buffer=copy.deepcopy(self.buffer.get_state()),
network=copy.deepcopy(self.get_network_state()),
)
def get_network_state(self) -> NetworkState:
return NetworkState(net=dict(net=self.net.state_dict(), target_net=self.target_net.state_dict()))
@staticmethod
def from_state(state: AgentState) -> AgentBase:
config = copy.copy(state.config)
config.update({'obs_space': state.obs_space, 'action_space': state.action_space})
agent = RainbowAgent(**config)
if state.network is not None:
agent.set_network(state.network)
if state.buffer is not None:
agent.set_buffer(state.buffer)
return agent
def set_network(self, network_state: NetworkState) -> None:
self.net.load_state_dict(network_state.net['net'])
self.target_net.load_state_dict(network_state.net['target_net'])
def set_buffer(self, buffer_state: BufferState) -> None:
self.buffer = BufferFactory.from_state(buffer_state)
def save_state(self, path: str) -> None:
"""Saves agent's state into a file.
Parameters:
path: String path where to write the state.
"""
agent_state = self.get_state()
torch.save(agent_state, path)
def load_state(self, path: str) -> None:
"""Loads state from a file under provided path.
Parameters:
path: String path indicating where the state is stored.
"""
agent_state = torch.load(path)
self._config = agent_state.get('config', {})
self.__dict__.update(**self._config)
self.net.load_state_dict(agent_state['net'])
self.target_net.load_state_dict(agent_state['target_net'])
def save_buffer(self, path: str) -> None:
"""Saves data from the buffer into a file under provided path.
Parameters:
path: String path where to write the buffer.
"""
import json
dump = self.buffer.dump_buffer(serialize=True)
with open(path, 'w') as f:
json.dump(dump, f)
def load_buffer(self, path: str) -> None:
"""Loads data into the buffer from provided file path.
Parameters:
path: String path indicating where the buffer is stored.
"""
import json
with open(path, 'r') as f:
buffer_dump = json.load(f)
self.buffer.load_buffer(buffer_dump)
def __eq__(self, o: object) -> bool:
return super().__eq__(o) \
and isinstance(o, type(self)) \
and self._config == o._config \
and self.buffer == o.buffer \
and self.get_network_state() == o.get_network_state()
| [((109, 23, 109, 87), 'torch.linspace', 'torch.linspace', (), '', False, 'import torch\n'), ((112, 22, 112, 41), 'ai_traineree.buffers.PERBuffer', 'PERBuffer', ({}, {}), '(**kwargs)', False, 'from ai_traineree.buffers import NStepBuffer, PERBuffer\n'), ((113, 31, 113, 80), 'torch.arange', 'torch.arange', (), '', False, 'import torch\n'), ((116, 24, 116, 75), 'ai_traineree.buffers.NStepBuffer', 'NStepBuffer', (), '', False, 'from ai_traineree.buffers import NStepBuffer, PERBuffer\n'), ((121, 19, 121, 100), 'ai_traineree.networks.heads.RainbowNet', 'RainbowNet', (), '', False, 'from ai_traineree.networks.heads import RainbowNet\n'), ((122, 26, 122, 107), 'ai_traineree.networks.heads.RainbowNet', 'RainbowNet', (), '', False, 'from ai_traineree.networks.heads import RainbowNet\n'), ((250, 8, 250, 56), 'ai_traineree.agents.agent_utils.soft_update', 'soft_update', ({(250, 20, 250, 35): 'self.target_net', (250, 37, 250, 45): 'self.net', (250, 47, 250, 55): 'self.tau'}, {}), '(self.target_net, self.net, self.tau)', False, 'from ai_traineree.agents.agent_utils import soft_update\n'), ((306, 17, 306, 40), 'copy.copy', 'copy.copy', ({(306, 27, 306, 39): 'state.config'}, {}), '(state.config)', False, 'import copy\n'), ((320, 22, 320, 60), 'ai_traineree.buffers.buffer_factory.BufferFactory.from_state', 'BufferFactory.from_state', ({(320, 47, 320, 59): 'buffer_state'}, {}), '(buffer_state)', False, 'from ai_traineree.buffers.buffer_factory import BufferFactory\n'), ((330, 8, 330, 37), 'torch.save', 'torch.save', ({(330, 19, 330, 30): 'agent_state', (330, 32, 330, 36): 'path'}, {}), '(agent_state, path)', False, 'import torch\n'), ((339, 22, 339, 38), 'torch.load', 'torch.load', ({(339, 33, 339, 37): 'path'}, {}), '(path)', False, 'import torch\n'), ((175, 12, 175, 60), 'ai_traineree.agents.agent_utils.soft_update', 'soft_update', ({(175, 24, 175, 39): 'self.target_net', (175, 41, 175, 49): 'self.net', (175, 51, 175, 59): 'self.tau'}, {}), '(self.target_net, self.net, self.tau)', False, 'from ai_traineree.agents.agent_utils import soft_update\n'), ((214, 13, 214, 28), 'torch.no_grad', 'torch.no_grad', ({}, {}), '()', False, 'import torch\n'), ((234, 17, 234, 43), 'torch.sum', 'torch.sum', ({(234, 27, 234, 39): '(m * log_prob)', (234, 41, 234, 42): '(1)'}, {}), '(m * log_prob, 1)', False, 'import torch\n'), ((356, 12, 356, 30), 'json.dump', 'json.dump', ({(356, 22, 356, 26): 'dump', (356, 28, 356, 29): 'f'}, {}), '(dump, f)', False, 'import json\n'), ((367, 26, 367, 38), 'json.load', 'json.load', ({(367, 36, 367, 37): 'f'}, {}), '(f)', False, 'import json\n'), ((221, 25, 221, 53), 'torch.argmax', 'torch.argmax', (), '', False, 'import torch\n'), ((205, 18, 205, 50), 'ai_traineree.utils.to_tensor', 'to_tensor', ({(205, 28, 205, 49): "experiences['reward']"}, {}), "(experiences['reward'])", False, 'from ai_traineree.utils import to_numbers_seq, to_tensor\n'), ((206, 16, 206, 46), 'ai_traineree.utils.to_tensor', 'to_tensor', ({(206, 26, 206, 45): "experiences['done']"}, {}), "(experiences['done'])", False, 'from ai_traineree.utils import to_numbers_seq, to_tensor\n'), ((207, 17, 207, 48), 'ai_traineree.utils.to_tensor', 'to_tensor', ({(207, 27, 207, 47): "experiences['state']"}, {}), "(experiences['state'])", False, 'from ai_traineree.utils import to_numbers_seq, to_tensor\n'), ((208, 22, 208, 58), 'ai_traineree.utils.to_tensor', 'to_tensor', ({(208, 32, 208, 57): "experiences['next_state']"}, {}), "(experiences['next_state'])", False, 'from ai_traineree.utils import to_numbers_seq, to_tensor\n'), ((209, 18, 209, 50), 'ai_traineree.utils.to_tensor', 'to_tensor', ({(209, 28, 209, 49): "experiences['action']"}, {}), "(experiences['action'])", False, 'from ai_traineree.utils import to_numbers_seq, to_tensor\n'), ((246, 21, 246, 39), 'torch.isnan', 'torch.isnan', ({(246, 33, 246, 38): 'error'}, {}), '(error)', False, 'import torch\n')] |
uchytilc/PyCu | nvvm/core/nvvm.py | 9ba25281611bf4dbd70d37f4eba0574f817d6928 | from pycu.nvvm import (get_libdevice, ir_version, version, add_module_to_program, compile_program,
create_program, destroy_program, get_compiled_result, get_compiled_result_size,
get_program_log, get_program_log_size, lazy_add_module_to_program, verify_program)
import os
import sys
from ctypes import c_char_p
import weakref
class NVVMPtr:
# #key: arch associated with libdevice (None indicates libdevice is not arch specific)
# #value: libdevice source
# libdevice = {}
# #key:given arch
# #value: closest available arch found
# searched_arch = {}
def __init__(self, handle, arch = 20):
self.get_libdevice(arch)
self.handle = handle
def get_libdevice(self, arch = 20):
return get_libdevice(arch)
# libdevice = self.libdevice.get(arch, None)
# if libdevice is None:
# #note: use False instead of None in searched_arch.get when indicating failure to prevent getting None key from libdevice (libdevice with no "compute_" is stored under None key)
# libdevice = self.libdevice.get(self.searched_arch.get(arch, False), None)
# if libdevice is None:
# found_arch, libdevice = next(iter(get_libdevice(arch).items()))
# self.searched_arch[arch] = found_arch
# self.libdevice[arch] = libdevice
# return libdevice
def get_version(self):
return version()
def get_ir_version(self):
return ir_version()
def add_module(self, buff, name = "<unnamed>"):
if isinstance(buff, str):
buff = buff.encode('utf8')
if isinstance(name, str):
name = name.encode('utf8')
size = len(buff)
add_module_to_program(self.handle, buff, size, name)
def compile(self, options = {}):
"""
https://docs.nvidia.com/cuda/libnvvm-api/group__compilation.html#group__compilation_1g76ac1e23f5d0e2240e78be0e63450346
Valid compiler options are
-g (enable generation of debugging information, valid only with -opt=0)
-generate-line-info (generate line number information)
-opt=
0 (disable optimizations)
3 (default, enable optimizations)
-arch=
compute_35
compute_37
compute_50
compute_52 (default)
compute_53
compute_60
compute_61
compute_62
compute_70
compute_72
compute_75
compute_80
-ftz=
0 (default, preserve denormal values, when performing single-precision floating-point operations)
1 (flush denormal values to zero, when performing single-precision floating-point operations)
-prec-sqrt=
0 (use a faster approximation for single-precision floating-point square root)
1 (default, use IEEE round-to-nearest mode for single-precision floating-point square root)
-prec-div=
0 (use a faster approximation for single-precision floating-point division and reciprocals)
1 (default, use IEEE round-to-nearest mode for single-precision floating-point division and reciprocals)
-fma=
0 (disable FMA contraction)
1 (default, enable FMA contraction)
-g (enable generation of debugging information, valid only with -opt=0)
-generate-line-info (generate line number information)
"""
opt = options.get("opt", 3)
arch = options.get("arch", 52)
ftz = options.get("ftz", 0)
prec_sqrt = options.get("prec_sqrt", 1)
prec_div = options.get("prec_div", 1)
fma = options.get("fma", 0)
opts = [f"-opt={opt}",
f"-arch=compute_{arch}",
f"-ftz={ftz}",
f"-prec-sqrt={prec_sqrt}",
f"-prec-div={prec_div}",
f"-fma={fma}",]
if options.get("g", False) and opt == 0:
if opt == 0:
opts.append("-g")
else:
#raise warning (g is only valid when -opt=0)
pass
if options.get("generate-line-info", True):
opts.append("-generate-line-info")
options = (c_char_p * len(opts))(*[c_char_p(opt.encode('utf8')) for opt in opts])
compile_program(self.handle, options)
ptx = get_compiled_result(self.handle)
#TO DO
#Apply Numba's debug patch to ptx
return ptx
def verify_program(self, options = {}):
pass
# verify_program(self.handle, )
class NVVM(NVVMPtr):
def __init__(self, arch = 20):
# self.handle = handle = create_program()
handle = create_program()
weakref.finalize(self, destroy_program, handle)
super().__init__(handle, arch)
| [((25, 9, 25, 28), 'pycu.nvvm.get_libdevice', 'get_libdevice', ({(25, 23, 25, 27): 'arch'}, {}), '(arch)', False, 'from pycu.nvvm import get_libdevice, ir_version, version, add_module_to_program, compile_program, create_program, destroy_program, get_compiled_result, get_compiled_result_size, get_program_log, get_program_log_size, lazy_add_module_to_program, verify_program\n'), ((37, 9, 37, 18), 'pycu.nvvm.version', 'version', ({}, {}), '()', False, 'from pycu.nvvm import get_libdevice, ir_version, version, add_module_to_program, compile_program, create_program, destroy_program, get_compiled_result, get_compiled_result_size, get_program_log, get_program_log_size, lazy_add_module_to_program, verify_program\n'), ((40, 9, 40, 21), 'pycu.nvvm.ir_version', 'ir_version', ({}, {}), '()', False, 'from pycu.nvvm import get_libdevice, ir_version, version, add_module_to_program, compile_program, create_program, destroy_program, get_compiled_result, get_compiled_result_size, get_program_log, get_program_log_size, lazy_add_module_to_program, verify_program\n'), ((49, 2, 49, 54), 'pycu.nvvm.add_module_to_program', 'add_module_to_program', ({(49, 24, 49, 35): 'self.handle', (49, 37, 49, 41): 'buff', (49, 43, 49, 47): 'size', (49, 49, 49, 53): 'name'}, {}), '(self.handle, buff, size, name)', False, 'from pycu.nvvm import get_libdevice, ir_version, version, add_module_to_program, compile_program, create_program, destroy_program, get_compiled_result, get_compiled_result_size, get_program_log, get_program_log_size, lazy_add_module_to_program, verify_program\n'), ((123, 2, 123, 39), 'pycu.nvvm.compile_program', 'compile_program', ({(123, 18, 123, 29): 'self.handle', (123, 31, 123, 38): 'options'}, {}), '(self.handle, options)', False, 'from pycu.nvvm import get_libdevice, ir_version, version, add_module_to_program, compile_program, create_program, destroy_program, get_compiled_result, get_compiled_result_size, get_program_log, get_program_log_size, lazy_add_module_to_program, verify_program\n'), ((125, 8, 125, 40), 'pycu.nvvm.get_compiled_result', 'get_compiled_result', ({(125, 28, 125, 39): 'self.handle'}, {}), '(self.handle)', False, 'from pycu.nvvm import get_libdevice, ir_version, version, add_module_to_program, compile_program, create_program, destroy_program, get_compiled_result, get_compiled_result_size, get_program_log, get_program_log_size, lazy_add_module_to_program, verify_program\n'), ((140, 11, 140, 27), 'pycu.nvvm.create_program', 'create_program', ({}, {}), '()', False, 'from pycu.nvvm import get_libdevice, ir_version, version, add_module_to_program, compile_program, create_program, destroy_program, get_compiled_result, get_compiled_result_size, get_program_log, get_program_log_size, lazy_add_module_to_program, verify_program\n'), ((141, 2, 141, 49), 'weakref.finalize', 'weakref.finalize', ({(141, 19, 141, 23): 'self', (141, 25, 141, 40): 'destroy_program', (141, 42, 141, 48): 'handle'}, {}), '(self, destroy_program, handle)', False, 'import weakref\n')] |
itsdaveit/fieldservice | fieldservice/fieldservice/doctype/fieldservice_settings/test_fieldservice_settings.py | 90bd813fb01f23a18df3b24fc67ec86c4d8be5a5 | # Copyright (c) 2022, itsdve GmbH and Contributors
# See license.txt
# import frappe
import unittest
class TestFieldserviceSettings(unittest.TestCase):
pass
| [] |
BrickerP/Investment- | Codes/Data Processing.py | 8b57c0d157a7eaa38d693c8d42ce1bc7dc7bdde9 | # -*- coding: utf-8 -*-
"""
Created on Sun Nov 21 14:51:01 2021
@author: 75638
"""
import pandas as pd
import numpy as np
pd.set_option('display.max_columns', None)
pd.set_option('display.width', 10000)
def process_data(path1,path2):
'''
1.path1: file path of different factor
2.path2:file path of SP500members
3.remove anomalies
4.normalized data
5.fill NaN with 0
'''
#read factor.xlsx
factor=pd.read_excel(path1,index_col=0)
#remove anomalies which is greater than median+5*std or less than median-s*std
for date in factor:
median=factor[date].quantile(0.5)
std=factor[date].std()
min=median-5*std
max=median+5*std
factor[date]=factor[date].clip(min,max)
#normalize data
for date in factor:
mean=factor[date].mean()
std=factor[date].std()
factor[date]=(factor[date]-mean)/std
# fill NAN
for date in factor:
median=factor[date].quantile(0.5)
factor.fillna(median,inplace=True)
#read SP500 member datas
member=pd.read_excel(path2,index_col=0)
#merge industry data
factor=pd.merge(member,factor,left_index=True,right_index=True)
# save processed data
factor.to_csv('C:\\Users\\75638\\OneDrive - UW\\Desktop\\703project\\data\\volatility.csv')
return factor
def remove_dates(data):
columns = []
for i in data:
if '20' in i:
columns.append(i[:7])
else:
columns.append(i)
data.columns = columns
return data
def Seasonal_data_fill(path):
data = pd.read_csv('{}'.format(path))
order = 2
for j in data:
if '20' in j:
year = j.split('/')[2]
month = j.split('/')[0]
month =(int)(month)
time_1 = year + '-' +str(month+1)
time_2 = year + '-' +str(month+2)
data.insert(order+1, '{}'.format(time_1), np.nan)
data.insert(order+2, '{}'.format(time_2), np.nan)
order += 3
temp = data.iloc[:,:2]
data = data.iloc[:,2:]
data = data.ffill(axis = 1)
data = pd.concat([temp, data], axis = 1)
data.columns = remove_dates(pd.read_csv('PE.csv')).columns
data = data.set_index(data.columns[0])
return data.to_csv('New {}'.format(path))
if __name__ == '__main__':
path1='C:\\Users\\75638\\OneDrive - UW\\Desktop\\703project\\original_data\\volatility.xlsx'
path2='C:\\Users\\75638\\OneDrive - UW\\Desktop\\703project\\SP500\\SP500members.xlsx'
data=process_data(path1,path2) | [((9, 0, 9, 42), 'pandas.set_option', 'pd.set_option', ({(9, 14, 9, 35): '"""display.max_columns"""', (9, 37, 9, 41): 'None'}, {}), "('display.max_columns', None)", True, 'import pandas as pd\n'), ((10, 0, 10, 37), 'pandas.set_option', 'pd.set_option', ({(10, 14, 10, 29): '"""display.width"""', (10, 31, 10, 36): '(10000)'}, {}), "('display.width', 10000)", True, 'import pandas as pd\n'), ((20, 11, 20, 43), 'pandas.read_excel', 'pd.read_excel', (), '', True, 'import pandas as pd\n'), ((38, 11, 38, 43), 'pandas.read_excel', 'pd.read_excel', (), '', True, 'import pandas as pd\n'), ((40, 11, 40, 67), 'pandas.merge', 'pd.merge', (), '', True, 'import pandas as pd\n'), ((70, 11, 70, 44), 'pandas.concat', 'pd.concat', (), '', True, 'import pandas as pd\n'), ((71, 32, 71, 53), 'pandas.read_csv', 'pd.read_csv', ({(71, 44, 71, 52): '"""PE.csv"""'}, {}), "('PE.csv')", True, 'import pandas as pd\n')] |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.