repo_name
stringlengths 7
94
| repo_path
stringlengths 4
237
| repo_head_hexsha
stringlengths 40
40
| content
stringlengths 10
680k
| apis
stringlengths 2
840k
|
---|---|---|---|---|
garlicbutter/Jonathan-Tom | mojoco trivial/mujocoSim/UR5/simple_example/Mujoco_py_example.py | c1696f0a94da46911b3566a3d4f49791e877373f | import numpy as np
import mujoco_py as mj
from mujoco_py_renderer import SimulationError, XMLError, MujocoPyRenderer
from mujoco_py import (MjSim, load_model_from_xml,functions,
load_model_from_path, MjSimState,
ignore_mujoco_warnings,
load_model_from_mjb)
from matplotlib import pyplot as plt
import time
xml = """
<mujoco model="example">
<compiler coordinate="global"/>
<default>
<geom rgba=".8 .6 .4 1"/>
</default>
<asset>
<texture type="skybox" builtin="gradient" rgb1="1 1 1" rgb2=".6 .8 1"
width="256" height="256"/>
</asset>
<worldbody>
<light pos="0 1 1" dir="0 -1 -1" diffuse="1 1 1"/>
<geom name="floor" pos="0 0 0" rgba="0.8 0.9 0.8 1" size="10 10 10" type="plane"/>
<body>
<site name="world" size="0.1" pos="0 0 0" />
<geom name="first_pole" type="capsule" fromto="0 0 0 0 0 0.5" size="0.04"/>
<joint name='a' type="hinge" pos="0 0 0" axis="0 0 1" />
<body name="second_pole">
<inertial pos="0 0 0" mass="0.00000001" diaginertia="1e-008 1e-008 1e-008" />
<geom type="capsule" fromto="0 0 0.5 0.5 0 0.5" size="0.04" name="second_pole"/>
<joint name='b' type="hinge" pos="0 0 0.5" axis="0 1 0"/>
<body name='third_pole'>
<inertial pos="0 0 0" mass="0.00000001" diaginertia="1e-008 1e-008 1e-008" />
<geom type="capsule" fromto="0.5 0 0.5 1 0 0.5" size="0.04" name="third_pole"/>
<joint name='c' type="hinge" pos="0.5 0 0.5" axis="0 1 0"/>
<site name="target" size="0.1" pos="1 0 0.5" />
<body name="mass">
<inertial pos="1 0 0.5" mass="1e-2" diaginertia="1e-008 1e-008 1e-008" />
<geom type="sphere" pos="1 0 0.5" size="0.2" name="mass"/>
</body>
</body>
</body>
</body>
</worldbody>
<actuator>
<motor joint="a"/>
<motor joint="b"/>
<motor joint="c"/>
</actuator>
</mujoco>
"""
model = load_model_from_xml(xml)
sim = MjSim(model)
viewer = MujocoPyRenderer(sim)
sim.reset()
# After reset jacobians are all zeros
sim.forward()
target_jacp = np.zeros(3 * sim.model.nv)
target_jacr= np.zeros(3 * sim.model.nv)
F=np.array([0,0,-9.81*1e-2,0,0,0]).T
#np.testing.assert_allclose(target_jacp, np.zeros(3 * sim.model.nv))
# After first forward, jacobians are real
#sim.forward()
K_diag=2000
C_diag=100
A_diag=1e-3
K=np.identity(3)*K_diag
C=np.identity(3)*C_diag
A=np.identity(3)*A_diag
#K_diag=0.3
#C_diag=0.05
for i in range(3):
K[i, i]=K_diag
C[i,i]=C_diag
A[i, i] = A_diag
x_intial=sim.data.site_xpos[1]
print(x_intial)
x_desired=np.array([0,1,0.3])
v_intial=sim.data.site_xvelp[1]
v_desired=np.array([0,0,0])
a_desired=np.array([0,0,0])
a_intial=np.array([0,0,0])
dt=sim.model.opt.timestep
#sim.data.get_site_jacp('target', jacp=target_jacp)
# Should be unchanged after steps (zero action)
graph=[]
for _ in range(100000):
F[:3]=np.dot(K,x_desired-x_intial)+np.dot(C,v_desired-v_intial)+np.dot(A,a_desired-a_intial)
H = np.zeros(sim.model.nv* sim.model.nv)
functions.mj_fullM(sim.model, H, sim.data.qM)
sim.data.get_site_jacp('target', jacp=target_jacp)
sim.data.get_site_jacr('target', jacr=target_jacr)
J_L = target_jacp.reshape((3, sim.model.nv))
J_A = target_jacr.reshape((3, sim.model.nv))
J = np.concatenate((J_L, J_A), axis=0)
H_L =np.dot(np.linalg.pinv(J_L.T),np.dot(H.reshape(sim.model.nv, sim.model.nv), np.linalg.pinv(J_L)))
H_all=np.dot(np.linalg.pinv(J.T),np.dot(H.reshape(sim.model.nv, sim.model.nv), np.linalg.pinv(J)))
#F_a=np.dot(A,0.3-sim.data.qacc)
#action = np.dot(J_L.T, np.dot(H_L, F[:3]))+sim.data.qfrc_bias
action = sim.data.qfrc_bias+np.dot(H.reshape(3,3),np.dot(J_L.T,F[:3]))
#print(action)
#action = np.dot(J.T, F)
sim.data.ctrl[:] = action
sim.step()
sim.forward()
#print(np.max(action))
#print(sim.data.qacc)
viewer.render()
x_intial = sim.data.site_xpos[1]
a_intial=(v_intial-sim.data.site_xvelp[1])/dt
print(a_intial)
v_intial = sim.data.site_xvelp[1]
normal=np.linalg.norm(x_intial-x_desired)
#print(normal)
if normal<0.1:
print("in")
if x_desired[0]==0:
x_desired = np.array([-1, 0, 0.5])
elif x_desired[0]==1:
x_desired = np.array([0, 1, 0.3])
elif x_desired[0] == -1:
x_desired = np.array([1, 0, 0.5])
graph.append(np.abs(x_intial-x_desired))
# sim.forward()
print("the desired is {} and the intial is{}".format(x_desired,x_intial))
plt.plot(graph)
plt.show() | [((58, 8, 58, 32), 'mujoco_py.load_model_from_xml', 'load_model_from_xml', ({(58, 28, 58, 31): 'xml'}, {}), '(xml)', False, 'from mujoco_py import MjSim, load_model_from_xml, functions, load_model_from_path, MjSimState, ignore_mujoco_warnings, load_model_from_mjb\n'), ((60, 6, 60, 18), 'mujoco_py.MjSim', 'MjSim', ({(60, 12, 60, 17): 'model'}, {}), '(model)', False, 'from mujoco_py import MjSim, load_model_from_xml, functions, load_model_from_path, MjSimState, ignore_mujoco_warnings, load_model_from_mjb\n'), ((61, 9, 61, 30), 'mujoco_py_renderer.MujocoPyRenderer', 'MujocoPyRenderer', ({(61, 26, 61, 29): 'sim'}, {}), '(sim)', False, 'from mujoco_py_renderer import SimulationError, XMLError, MujocoPyRenderer\n'), ((67, 14, 67, 40), 'numpy.zeros', 'np.zeros', ({(67, 23, 67, 39): '3 * sim.model.nv'}, {}), '(3 * sim.model.nv)', True, 'import numpy as np\n'), ((68, 13, 68, 39), 'numpy.zeros', 'np.zeros', ({(68, 22, 68, 38): '3 * sim.model.nv'}, {}), '(3 * sim.model.nv)', True, 'import numpy as np\n'), ((98, 10, 98, 29), 'numpy.array', 'np.array', ({(98, 19, 98, 28): '[0, 1, 0.3]'}, {}), '([0, 1, 0.3])', True, 'import numpy as np\n'), ((101, 10, 101, 27), 'numpy.array', 'np.array', ({(101, 19, 101, 26): '[0, 0, 0]'}, {}), '([0, 0, 0])', True, 'import numpy as np\n'), ((103, 10, 103, 27), 'numpy.array', 'np.array', ({(103, 19, 103, 26): '[0, 0, 0]'}, {}), '([0, 0, 0])', True, 'import numpy as np\n'), ((104, 9, 104, 26), 'numpy.array', 'np.array', ({(104, 18, 104, 25): '[0, 0, 0]'}, {}), '([0, 0, 0])', True, 'import numpy as np\n'), ((155, 0, 155, 15), 'matplotlib.pyplot.plot', 'plt.plot', ({(155, 9, 155, 14): 'graph'}, {}), '(graph)', True, 'from matplotlib import pyplot as plt\n'), ((156, 0, 156, 10), 'matplotlib.pyplot.show', 'plt.show', ({}, {}), '()', True, 'from matplotlib import pyplot as plt\n'), ((72, 2, 72, 34), 'numpy.array', 'np.array', ({(72, 11, 72, 33): '[0, 0, -9.81 * 0.01, 0, 0, 0]'}, {}), '([0, 0, -9.81 * 0.01, 0, 0, 0])', True, 'import numpy as np\n'), ((82, 2, 82, 16), 'numpy.identity', 'np.identity', ({(82, 14, 82, 15): '(3)'}, {}), '(3)', True, 'import numpy as np\n'), ((83, 2, 83, 16), 'numpy.identity', 'np.identity', ({(83, 14, 83, 15): '(3)'}, {}), '(3)', True, 'import numpy as np\n'), ((84, 2, 84, 16), 'numpy.identity', 'np.identity', ({(84, 14, 84, 15): '(3)'}, {}), '(3)', True, 'import numpy as np\n'), ((113, 8, 113, 44), 'numpy.zeros', 'np.zeros', ({(113, 17, 113, 43): 'sim.model.nv * sim.model.nv'}, {}), '(sim.model.nv * sim.model.nv)', True, 'import numpy as np\n'), ((114, 4, 114, 49), 'mujoco_py.functions.mj_fullM', 'functions.mj_fullM', ({(114, 23, 114, 32): 'sim.model', (114, 34, 114, 35): 'H', (114, 37, 114, 48): 'sim.data.qM'}, {}), '(sim.model, H, sim.data.qM)', False, 'from mujoco_py import MjSim, load_model_from_xml, functions, load_model_from_path, MjSimState, ignore_mujoco_warnings, load_model_from_mjb\n'), ((120, 8, 120, 42), 'numpy.concatenate', 'np.concatenate', (), '', True, 'import numpy as np\n'), ((138, 11, 138, 45), 'numpy.linalg.norm', 'np.linalg.norm', ({(138, 26, 138, 44): 'x_intial - x_desired'}, {}), '(x_intial - x_desired)', True, 'import numpy as np\n'), ((112, 68, 112, 96), 'numpy.dot', 'np.dot', ({(112, 75, 112, 76): 'A', (112, 77, 112, 95): '(a_desired - a_intial)'}, {}), '(A, a_desired - a_intial)', True, 'import numpy as np\n'), ((121, 16, 121, 37), 'numpy.linalg.pinv', 'np.linalg.pinv', ({(121, 31, 121, 36): 'J_L.T'}, {}), '(J_L.T)', True, 'import numpy as np\n'), ((122, 17, 122, 36), 'numpy.linalg.pinv', 'np.linalg.pinv', ({(122, 32, 122, 35): 'J.T'}, {}), '(J.T)', True, 'import numpy as np\n'), ((150, 17, 150, 43), 'numpy.abs', 'np.abs', ({(150, 24, 150, 42): '(x_intial - x_desired)'}, {}), '(x_intial - x_desired)', True, 'import numpy as np\n'), ((112, 10, 112, 38), 'numpy.dot', 'np.dot', ({(112, 17, 112, 18): 'K', (112, 19, 112, 37): '(x_desired - x_intial)'}, {}), '(K, x_desired - x_intial)', True, 'import numpy as np\n'), ((112, 39, 112, 67), 'numpy.dot', 'np.dot', ({(112, 46, 112, 47): 'C', (112, 48, 112, 66): '(v_desired - v_intial)'}, {}), '(C, v_desired - v_intial)', True, 'import numpy as np\n'), ((121, 84, 121, 103), 'numpy.linalg.pinv', 'np.linalg.pinv', ({(121, 99, 121, 102): 'J_L'}, {}), '(J_L)', True, 'import numpy as np\n'), ((122, 83, 122, 100), 'numpy.linalg.pinv', 'np.linalg.pinv', ({(122, 98, 122, 99): 'J'}, {}), '(J)', True, 'import numpy as np\n'), ((125, 54, 125, 73), 'numpy.dot', 'np.dot', ({(125, 61, 125, 66): 'J_L.T', (125, 67, 125, 72): 'F[:3]'}, {}), '(J_L.T, F[:3])', True, 'import numpy as np\n'), ((143, 24, 143, 46), 'numpy.array', 'np.array', ({(143, 33, 143, 45): '[-1, 0, 0.5]'}, {}), '([-1, 0, 0.5])', True, 'import numpy as np\n'), ((145, 24, 145, 45), 'numpy.array', 'np.array', ({(145, 33, 145, 44): '[0, 1, 0.3]'}, {}), '([0, 1, 0.3])', True, 'import numpy as np\n'), ((147, 24, 147, 45), 'numpy.array', 'np.array', ({(147, 33, 147, 44): '[1, 0, 0.5]'}, {}), '([1, 0, 0.5])', True, 'import numpy as np\n')] |
seveirbian/gear-old | evaluation/wordpress/pull_docker_images_from_private_registry.py | 8d3529a9bf42e652a9d7475c9d14e9a6afc69a76 | import sys
# package need to be installed, pip install docker
import docker
import time
import yaml
import os
import xlwt
auto = False
private_registry = "202.114.10.146:9999/"
# result
result = [["tag", "finishTime", "size", "data"], ]
class Puller:
def __init__(self, images):
self.images_to_pull = images
def check(self):
# detect whether the file exists, if true, delete it
if os.path.exists("./images_pulled.txt"):
os.remove("./images_pulled.txt")
def pull(self):
self.check()
client = docker.from_env()
# if don't give a tag, then all image under this registry will be pulled
repos = self.images_to_pull[0]["repo"]
for repo in repos:
tags = self.images_to_pull[1][repo]
for tag in tags:
print "start pulling: ", private_registry+repo, ":", tag
# get present time
startTime = time.time()
# get present net data
cnetdata = get_net_data()
# pull images
try:
image_pulled = client.images.pull(repository=private_registry+repo, tag=str(tag))
# print pull time
finishTime = time.time() - startTime
print "finished in " , finishTime, "s"
# get image's size
size = image_pulled.attrs[u'Size'] / 1000000.0
print "image size: ", size
data = get_net_data() - cnetdata
print "pull data: ", data
print "\n"
# record the image and its pulling time
result.append([tag, finishTime, size, data])
except docker.errors.NotFound:
print private_registry+repo + " not found...\n\n"
except docker.errors.ImageNotFound:
print private_registry+repo + " image not fount...\n\n"
if auto != True:
raw_input("Next?")
class Generator:
def __init__(self, profilePath=""):
self.profilePath = profilePath
def generateFromProfile(self):
if self.profilePath == "":
print "Error: profile path is null"
with open(self.profilePath, 'r') as f:
self.images = yaml.load(f, Loader=yaml.FullLoader)
return self.images
def get_net_data():
netCard = "/proc/net/dev"
fd = open(netCard, "r")
for line in fd.readlines():
if line.find("enp0s3") >= 0:
field = line.split()
data = float(field[1]) / 1024.0 / 1024.0
fd.close()
return data
if __name__ == "__main__":
if len(sys.argv) == 2:
auto = True
generator = Generator(os.path.split(os.path.realpath(__file__))[0]+"/image_versions.yaml")
images = generator.generateFromProfile()
puller = Puller(images)
puller.pull()
# create a workbook sheet
workbook = xlwt.Workbook()
sheet = workbook.add_sheet("run_time")
for row in range(len(result)):
for column in range(len(result[row])):
sheet.write(row, column, result[row][column])
workbook.save(os.path.split(os.path.realpath(__file__))[0]+"/pull.xls") | [] |
sibeshkar/jiminy | jiminy/envs/vnc_wog.py | 7754f86fb0f246e7d039ea0cbfd9950fcae4adfb | from jiminy.envs import vnc_env
from jiminy.spaces import VNCActionSpace
class WorldOfGooEnv(vnc_env.VNCEnv):
def __init__(self):
super(WorldOfGooEnv, self).__init__()
# TODO: set action space screen shape to match
# HACK: empty keys list fails for some weird reason, give it an 'a'
self.action_space = VNCActionSpace(keys=['a'], buttonmasks=[1])
| [((10, 28, 10, 71), 'jiminy.spaces.VNCActionSpace', 'VNCActionSpace', (), '', False, 'from jiminy.spaces import VNCActionSpace\n')] |
arj119/FedML | fedml_api/standalone/federated_sgan/fedssgan_api.py | 5b7c098659f3e61f9e44583965300d8d0829f7a8 | import copy
import logging
import random
from typing import List, Tuple
import numpy as np
import torch
import wandb
from torch.utils.data import ConcatDataset
from fedml_api.standalone.fedavg.my_model_trainer import MyModelTrainer
from fedml_api.standalone.federated_sgan.ac_gan_model_trainer import ACGANModelTrainer
from fedml_api.standalone.federated_sgan.client import FedSSGANClient
from fedml_api.standalone.federated_sgan.model_trainer import FedSSGANModelTrainer
from fedml_api.standalone.utils.HeterogeneousModelBaseTrainerAPI import HeterogeneousModelBaseTrainerAPI
class FedSSGANAPI(HeterogeneousModelBaseTrainerAPI):
def __init__(self, dataset, device, args, adapter_model, client_models: List[Tuple[torch.nn.Module, int]]):
"""
Args:
dataset: Dataset presplit into data loaders
device: Device to run training on
args: Additional args
client_models: List of client models and their frequency participating (assuming a stateful algorithm for simplicity)
"""
super().__init__(dataset, device, args)
self.global_model = MyModelTrainer(adapter_model)
self._setup_clients(self.train_data_local_num_dict, self.train_data_local_dict, self.test_data_local_dict,
client_models)
self._plot_client_training_data_distribution()
def _setup_clients(self, train_data_local_num_dict, train_data_local_dict, test_data_local_dict,
client_models):
logging.info("############setup_clients (START)#############")
c_idx = 0
for local_model, freq in client_models:
for i in range(freq):
model_trainer = ACGANModelTrainer(
copy.deepcopy(self.global_model.model),
copy.deepcopy(local_model)
)
c = FedSSGANClient(c_idx, train_data_local_dict[c_idx], test_data_local_dict[c_idx],
train_data_local_num_dict[c_idx], self.test_global, self.args, self.device,
model_trainer)
c_idx += 1
self.client_list.append(c)
logging.info("############setup_clients (END)#############")
def train(self):
logging.info('\n###############Pre-Training clients#############\n')
for i, c in enumerate(self.client_list):
logging.info(f'Pre=training client: {i}')
c.pre_train()
logging.info('###############Pre-Training clients (END)###########\n')
unlabelled_synthesised_data = None
w_global = self.global_model.get_model_params()
for round_idx in range(self.args.comm_round):
logging.info("################Communication round : {}".format(round_idx))
w_locals = []
synthesised_data_locals = []
client_synthesised_data_lens = {'round': round_idx}
client: FedSSGANClient
for idx, client in enumerate(self.client_list):
# Update client synthetic datasets
# client.set_synthetic_dataset(unlabelled_synthesised_data)
# Local round
w = client.train(copy.deepcopy(w_global), round_idx)
# self.logger.info("local weights = " + str(w))
w_locals.append((client.get_sample_number(), copy.deepcopy(w)))
# synthetic_data = client.generate_synthetic_dataset()
# if synthetic_data is not None:
# synthesised_data_locals.append(synthetic_data)
# client_synthesised_data_lens[f'Client_{idx}: Synthetic Dataset Size'] = len(synthetic_data)
# else:
# client_synthesised_data_lens[f'Client_{idx}: Synthetic Dataset Size'] = 0
#
# if len(synthesised_data_locals) > 0:
# unlabelled_synthesised_data = ConcatDataset(synthesised_data_locals)
# logging.info(f'\n Synthetic Unlabelled Dataset Size: {len(unlabelled_synthesised_data)}\n')
# client_synthesised_data_lens['Total Synthetic Dataset Size'] = len(unlabelled_synthesised_data)
# else:
# unlabelled_synthesised_data = None
# client_synthesised_data_lens['Total Synthetic Dataset Size'] = 0
# wandb.log(client_synthesised_data_lens)
# update global weights
w_global = self._aggregate(w_locals)
self.global_model.set_model_params(w_global)
# test results
# at last round
if round_idx == self.args.comm_round - 1:
self._local_test_on_all_clients(round_idx)
# per {frequency_of_the_test} round
elif round_idx % self.args.frequency_of_the_test == 0:
if self.args.dataset.startswith("stackoverflow"):
self._local_test_on_validation_set(round_idx)
else:
self._local_test_on_all_clients(round_idx)
| [((29, 28, 29, 57), 'fedml_api.standalone.fedavg.my_model_trainer.MyModelTrainer', 'MyModelTrainer', ({(29, 43, 29, 56): 'adapter_model'}, {}), '(adapter_model)', False, 'from fedml_api.standalone.fedavg.my_model_trainer import MyModelTrainer\n'), ((38, 8, 38, 70), 'logging.info', 'logging.info', ({(38, 21, 38, 69): '"""############setup_clients (START)#############"""'}, {}), "('############setup_clients (START)#############')", False, 'import logging\n'), ((53, 8, 53, 68), 'logging.info', 'logging.info', ({(53, 21, 53, 67): '"""############setup_clients (END)#############"""'}, {}), "('############setup_clients (END)#############')", False, 'import logging\n'), ((56, 8, 56, 76), 'logging.info', 'logging.info', ({(56, 21, 56, 75): '"""\n###############Pre-Training clients#############\n"""'}, {}), '("""\n###############Pre-Training clients#############\n""")', False, 'import logging\n'), ((60, 8, 60, 78), 'logging.info', 'logging.info', ({(60, 21, 60, 77): '"""###############Pre-Training clients (END)###########\n"""'}, {}), "('###############Pre-Training clients (END)###########\\n')", False, 'import logging\n'), ((58, 12, 58, 53), 'logging.info', 'logging.info', ({(58, 25, 58, 52): 'f"""Pre=training client: {i}"""'}, {}), "(f'Pre=training client: {i}')", False, 'import logging\n'), ((47, 20, 49, 49), 'fedml_api.standalone.federated_sgan.client.FedSSGANClient', 'FedSSGANClient', ({(47, 35, 47, 40): 'c_idx', (47, 42, 47, 70): 'train_data_local_dict[c_idx]', (47, 72, 47, 99): 'test_data_local_dict[c_idx]', (48, 35, 48, 67): 'train_data_local_num_dict[c_idx]', (48, 69, 48, 85): 'self.test_global', (48, 87, 48, 96): 'self.args', (48, 98, 48, 109): 'self.device', (49, 35, 49, 48): 'model_trainer'}, {}), '(c_idx, train_data_local_dict[c_idx], test_data_local_dict[\n c_idx], train_data_local_num_dict[c_idx], self.test_global, self.args,\n self.device, model_trainer)', False, 'from fedml_api.standalone.federated_sgan.client import FedSSGANClient\n'), ((44, 20, 44, 58), 'copy.deepcopy', 'copy.deepcopy', ({(44, 34, 44, 57): 'self.global_model.model'}, {}), '(self.global_model.model)', False, 'import copy\n'), ((45, 20, 45, 46), 'copy.deepcopy', 'copy.deepcopy', ({(45, 34, 45, 45): 'local_model'}, {}), '(local_model)', False, 'import copy\n'), ((78, 33, 78, 56), 'copy.deepcopy', 'copy.deepcopy', ({(78, 47, 78, 55): 'w_global'}, {}), '(w_global)', False, 'import copy\n'), ((80, 61, 80, 77), 'copy.deepcopy', 'copy.deepcopy', ({(80, 75, 80, 76): 'w'}, {}), '(w)', False, 'import copy\n')] |
arjun-sai-krishnan/tamil-morpho-embeddings | pytorch-word2vec-master/csv.py | a33bcb427d635dba3b1857f26ea7ab287e1a44c5 | #!/usr/bin/env python3
import argparse
from collections import Counter
import pdb
import pickle
import re
import sys
import time
import numpy as np
import torch
import torch.nn as nn
from torch.autograd import Variable
from torch import optim
import torch.nn.functional as F
import torch.multiprocessing as mp
import data_producer
from multiprocessing import set_start_method
parser = argparse.ArgumentParser()
parser.add_argument("--train", type=str, default="", help="training file")
parser.add_argument("--vocab", type=str, default="", help="vocab pickle file")
parser.add_argument("--save", type=str, default="csv.pth.tar", help="saved model filename")
parser.add_argument("--size", type=int, default=300, help="word embedding dimension")
parser.add_argument("--window", type=int, default=5, help="context window size")
parser.add_argument("--sample", type=float, default=1e-5, help="subsample threshold")
parser.add_argument("--negative", type=int, default=10, help="number of negative samples")
parser.add_argument("--delta", type=float, default=0.15, help="create new sense for a type if similarity lower than this value.")
parser.add_argument("--min_count", type=int, default=5, help="minimum frequency of a word")
parser.add_argument("--processes", type=int, default=4, help="number of processes")
parser.add_argument("--num_workers", type=int, default=6, help="number of workers for data processsing")
parser.add_argument("--iter", type=int, default=3, help="number of iterations")
parser.add_argument("--lr", type=float, default=-1.0, help="initial learning rate")
parser.add_argument("--batch_size", type=int, default=100, help="(max) batch size")
parser.add_argument("--cuda", action='store_true', default=False, help="enable cuda")
parser.add_argument("--multi_proto", action='store_true', default=False, help="True: multi-prototype, False:single-prototype")
MAX_SENT_LEN = 1000
# Build the vocabulary.
def file_split(f, delim=' \t\n', bufsize=1024):
prev = ''
while True:
s = f.read(bufsize)
if not s:
break
tokens = re.split('['+delim+']{1,}', s)
if len(tokens) > 1:
yield prev + tokens[0]
prev = tokens[-1]
for x in tokens[1:-1]:
yield x
else:
prev += s
if prev:
yield prev
def build_vocab(args):
vocab = Counter()
word_count = 0
for word in file_split(open(args.train)):
vocab[word] += 1
word_count += 1
if word_count % 10000 == 0:
sys.stdout.write('%d\r' % len(vocab))
freq = {k:v for k,v in vocab.items() if v >= args.min_count}
word_count = sum([freq[k] for k in freq])
word_list = sorted(freq, key=freq.get, reverse=True)
word2idx = {}
for i,w in enumerate(word_list):
word2idx[w] = i
print("Vocab size: %ld" % len(word2idx))
print("Words in train file: %ld" % word_count)
vars(args)['vocab_size'] = len(word2idx)
vars(args)['train_words'] = word_count
return word2idx, word_list, freq
class CSV(nn.Module):
def __init__(self, args):
super(CSV, self).__init__()
self.global_embs = nn.Embedding(args.vocab_size+1, args.size, padding_idx=args.vocab_size, sparse=True)
self.sense_embs = nn.Embedding(args.vocab_size*5, args.size, sparse=True)
self.ctx_weight = torch.nn.Parameter(torch.ones(2*args.window, args.size))
self.word2sense = [ [i] for i in range(args.vocab_size) ]
'''
word2sense = np.zeros((args.vocab_size, 5), dtype='int32')
for i in range(args.vocab_size):
word2sense[i, 0] = i
self.word2sense = torch.nn.Parameter(torch.from_numpy(word2sense).int())
self.word_sense_cnts = torch.nn.Parameter(torch.ones((args.vocab_size,)).int())
'''
self.global_embs.weight.data.uniform_(-0.5/args.size, 0.5/args.size)
self.sense_embs.weight.data.uniform_(-0.5/args.size, 0.5/args.size)
self.n_senses = args.vocab_size
self.sense_capacity = args.vocab_size*5
self.batch_size = args.batch_size
self.size = args.size
self.window = args.window
self.negative = args.negative
self.pad_idx = args.vocab_size
def get_context_feats(self, ctx_type_indices):
ctx_type_embs = self.global_embs(ctx_type_indices)
return torch.sum(ctx_type_embs * self.ctx_weight, 1).cpu().data.numpy()
def get_possible_sense_embs(self, type_indices, cuda=True):
sense_indices = []
sense2idx = {}
for type_id in type_indices:
for s_id in self.word2sense[type_id]:
if s_id not in sense2idx:
sense2idx[s_id] = len(sense_indices)
sense_indices.append( s_id )
sense_indices = np.array(sense_indices)
if cuda:
sense_embs = self.sense_embs(Variable(torch.LongTensor(sense_indices).cuda()))
return sense2idx, sense_embs.cpu().data.numpy()
else:
sense_embs = self.sense_embs(Variable(torch.LongTensor(sense_indices)))
return sense2idx, sense_embs.data.numpy()
def forward(self, data):
ctx_type_indices = data[:, 0:2*self.window]
pos_sense_idx = data[:, 2*self.window+1]
neg_sense_indices = data[:, 2*self.window+2:2*self.window+2+self.negative]
neg_mask = data[:, 2*self.window+2+self.negative:].float()
ctx_type_embs = self.global_embs(ctx_type_indices)
pos_sense_embs = self.sense_embs(pos_sense_idx)
neg_sense_embs = self.sense_embs(neg_sense_indices)
ctx_feats = torch.sum(ctx_type_embs * self.ctx_weight, 1, keepdim=True)
# Neg Log Likelihood
pos_ips = torch.sum(ctx_feats[:,0,:] * pos_sense_embs, 1)
pos_loss = torch.sum( -F.logsigmoid(torch.clamp(pos_ips,max=10,min=-10)))
neg_ips = torch.bmm(neg_sense_embs, ctx_feats.permute(0,2,1))[:,:,0]
neg_loss = torch.sum( -F.logsigmoid(torch.clamp(-neg_ips,max=10,min=-10)) * neg_mask )
return pos_loss + neg_loss
# Initialize model.
def init_net(args):
if args.lr == -1.0:
vars(args)['lr'] = 0.05
return CSV(args)
def save_model(filename, model, args, word2idx):
torch.save({
'word2idx':word2idx,
'args':args,
#'word2sense': model.word2sense,
'n_senses': model.n_senses,
'params': model.state_dict()
}, filename)
def load_model(filename):
checkpoint = torch.load(filename)
word2idx = checkpoint['word2idx']
args = checkpoint['args']
model = CSV(args)
if args.cuda:
model.cuda()
model.global_embs.weight.data = checkpoint['params']['global_embs.weight']
model.sense_embs.weight.data = checkpoint['params']['sense_embs.weight']
model.ctx_weight.data = checkpoint['params']['ctx_weight']
model.word2sense = checkpoint['word2sense']
#model.word2sense.data = checkpoint['params']['word2sense']
#model.word_sense_cnts.data = checkpoint['params']['word_sense_cnts']
model.n_senses = checkpoint['n_senses']
return model, word2idx
# Training
def train_process_sent_producer(p_id, data_queue, word_count_actual, word_list, word2idx, freq, args):
n_proc = 1 if args.stage == 2 else args.processes
N = 1 if args.stage == 2 else args.iter
neg = 0 if args.stage == 2 else args.negative
if args.negative > 0:
table_ptr_val = data_producer.init_unigram_table(word_list, freq, args.train_words)
train_file = open(args.train)
file_pos = args.file_size * p_id // n_proc
train_file.seek(file_pos, 0)
while True:
try:
train_file.read(1)
except UnicodeDecodeError:
file_pos -= 1
train_file.seek(file_pos, 0)
else:
train_file.seek(file_pos, 0)
break
batch_count = 0
batch_placeholder = np.zeros((args.batch_size, 2*args.window+2+2*neg), 'int64')
for it in range(N):
train_file.seek(file_pos, 0)
last_word_cnt = 0
word_cnt = 0
sentence = []
prev = ''
eof = False
while True:
if eof or train_file.tell() > file_pos + args.file_size / n_proc:
break
while True:
s = train_file.read(1)
if not s:
eof = True
break
elif s == ' ' or s == '\t':
if prev in word2idx:
sentence.append(prev)
prev = ''
if len(sentence) >= MAX_SENT_LEN:
break
elif s == '\n':
if prev in word2idx:
sentence.append(prev)
prev = ''
break
else:
prev += s
if len(sentence) > 0:
# subsampling
sent_id = []
if args.sample != 0:
sent_len = len(sentence)
i = 0
while i < sent_len:
word = sentence[i]
f = freq[word] / args.train_words
pb = (np.sqrt(f / args.sample) + 1) * args.sample / f;
if pb > np.random.random_sample():
sent_id.append( word2idx[word] )
i += 1
if len(sent_id) < 2:
word_cnt += len(sentence)
sentence.clear()
continue
next_random = (2**24) * np.random.randint(0, 2**24) + np.random.randint(0, 2**24)
chunk = data_producer.cbow_producer(sent_id, len(sent_id), table_ptr_val, args.window,
neg, args.vocab_size, args.batch_size, next_random)
chunk_pos = 0
while chunk_pos < chunk.shape[0]:
remain_space = args.batch_size - batch_count
remain_chunk = chunk.shape[0] - chunk_pos
if remain_chunk < remain_space:
take_from_chunk = remain_chunk
else:
take_from_chunk = remain_space
batch_placeholder[batch_count:batch_count+take_from_chunk, :] = chunk[chunk_pos:chunk_pos+take_from_chunk, :]
batch_count += take_from_chunk
if batch_count == args.batch_size:
data_queue.put(batch_placeholder)
batch_count = 0
chunk_pos += take_from_chunk
word_cnt += len(sentence)
if word_cnt - last_word_cnt > 10000:
with word_count_actual.get_lock():
word_count_actual.value += word_cnt - last_word_cnt
last_word_cnt = word_cnt
sentence.clear()
with word_count_actual.get_lock():
word_count_actual.value += word_cnt - last_word_cnt
print(p_id, it, file_pos, train_file.tell(), args.file_size)
if batch_count > 0:
data_queue.put(batch_placeholder[:batch_count,:])
data_queue.put(None)
print(p_id, file_pos, train_file.tell(), args.file_size)
def train_process(p_id, word_count_actual, word2idx, word_list, freq, args, model):
data_queue = mp.SimpleQueue()
lr = args.lr
#optimizer = optim.SGD(filter(lambda p: p.requires_grad, model.parameters()), lr=lr)
optimizer = optim.Adagrad(filter(lambda p: p.requires_grad, model.parameters()), lr=lr)
t = mp.Process(target=train_process_sent_producer, args=(p_id, data_queue, word_count_actual, word_list, word2idx, freq, args))
t.start()
#n_iter = 1 if args.stage == 2 else args.iter
n_iter = args.iter
# get from data_queue and feed to model
prev_word_cnt = 0
while True:
chunk = data_queue.get()
if chunk is None:
break
else:
# lr anneal & output
if word_count_actual.value - prev_word_cnt > 10000:
#if args.lr_anneal:
# lr = args.lr * (1 - word_count_actual.value / (n_iter * args.train_words))
# if lr < 0.0001 * args.lr:
# lr = 0.0001 * args.lr
# for param_group in optimizer.param_groups:
# param_group['lr'] = lr
#sys.stdout.write("\rAlpha: %0.8f, Progess: %0.2f, Words/sec: %f, word_cnt: %d" % (lr, word_count_actual.value / (n_iter * args.train_words) * 100, word_count_actual.value / (time.monotonic() - args.t_start), word_count_actual.value))
sys.stdout.write("\rProgess: %0.2f, Words/sec: %f, word_cnt: %d" % (word_count_actual.value / (n_iter * args.train_words) * 100, word_count_actual.value / (time.monotonic() - args.t_start), word_count_actual.value))
sys.stdout.flush()
prev_word_cnt = word_count_actual.value
if args.stage == 1:
if args.cuda:
data = Variable(torch.LongTensor(chunk).cuda(), requires_grad=False)
else:
data = Variable(torch.LongTensor(chunk), requires_grad=False)
optimizer.zero_grad()
loss = model(data)
loss.backward()
optimizer.step()
model.global_embs.weight.data[args.vocab_size].fill_(0)
elif args.stage == 3:
if args.cuda:
data = Variable(torch.LongTensor(chunk).cuda(), requires_grad=False)
else:
data = Variable(torch.LongTensor(chunk), requires_grad=False)
#type_ids = chunk[:, 2*args.window+1:2*args.window+2+2*args.negative]
type_ids = chunk[:, 2*args.window+1:2*args.window+2+args.negative]
type_ids = np.reshape(type_ids, (type_ids.shape[0] * type_ids.shape[1]))
sense2idx, sense_embs = model.get_possible_sense_embs(type_ids.tolist())
# get type_idx from chunk, and do sense selection here.
context_feats = model.get_context_feats(data[:, :2*args.window])
chunk = data_producer.select_sense(chunk, context_feats, sense2idx, sense_embs,
model.word2sense, chunk.shape[0], args.size, args.window, args.negative)
if args.cuda:
data = Variable(torch.LongTensor(chunk).cuda(), requires_grad=False)
else:
data = Variable(torch.LongTensor(chunk), requires_grad=False)
optimizer.zero_grad()
loss = model(data)
loss.backward()
optimizer.step()
model.global_embs.weight.data[args.vocab_size].fill_(0)
t.join()
def train_process_stage2(p_id, word_count_actual, word2idx, word_list, freq, args, model):
data_queue = mp.SimpleQueue()
sense_embs = model.sense_embs.weight.data.numpy()
counter_list = np.zeros((model.sense_capacity), dtype='float32')
t = mp.Process(target=train_process_sent_producer, args=(p_id, data_queue, word_count_actual, word_list, word2idx, freq, args))
t.start()
n_iter = 1
# get from data_queue and feed to model
prev_word_cnt = 0
while True:
chunk = data_queue.get()
if chunk is None:
break
else:
if word_count_actual.value - prev_word_cnt > 10000:
sys.stdout.write("\rProgess: %0.2f, Words/sec: %f, word_cnt: %d" % (word_count_actual.value / (n_iter * args.train_words) * 100, word_count_actual.value / (time.monotonic() - args.t_start), word_count_actual.value))
sys.stdout.flush()
prev_word_cnt = word_count_actual.value
if args.cuda:
data = Variable(torch.LongTensor(chunk).cuda(), requires_grad=False)
else:
data = Variable(torch.LongTensor(chunk), requires_grad=False)
context_feats = model.get_context_feats(data[:, :2*args.window])
# update sense_embs
create_cnt = data_producer.create_n_update_sense(chunk[:, 2*args.window+1], context_feats, sense_embs, model.word2sense, counter_list, chunk.shape[0], args.size, args.delta, model.n_senses)
model.n_senses += create_cnt
#if model.n_senses + args.batch_size > model.sense_capacity:
# new_capacity = model.sense_capacity * 3 // 2
# counter_list = np.concatenate( (counter_list, np.ones((new_capacity - model.sense_capacity),dtype='float32')), axis=0)
# zero = np.zeros((new_capacity - model.sense_capacity, args.size), 'float32')
# sense_embs = np.concatenate((sense_embs, zero), 0)
# model.sense_capacity = new_capacity
# print("\nexapnded sense_embs: %d" % model.n_senses)
t.join()
sense_embs[:model.n_senses, :] = sense_embs[:model.n_senses, :] / counter_list[:model.n_senses, None]
if __name__ == '__main__':
set_start_method('forkserver')
args = parser.parse_args()
print("Starting training using file %s" % args.train)
train_file = open(args.train)
train_file.seek(0, 2)
vars(args)['file_size'] = train_file.tell()
word_count_actual = mp.Value('L', 0)
if args.vocab == '':
word2idx, word_list, freq = build_vocab(args)
else:
with open(args.vocab, 'rb') as f:
word2idx, word_list, freq, pos2idx, dep2id = pickle.load(f)
word_count = sum([freq[k] for k in freq])
vars(args)['vocab_size'] = len(word2idx)
vars(args)['train_words'] = word_count
print("Vocab size: %ld" % len(word2idx))
print("Words in train file: %ld" % word_count)
model = init_net(args)
model.share_memory()
if args.cuda:
model.cuda()
# stage 1, learn robust context representation.
vars(args)['stage'] = 1
print("Stage 1")
vars(args)['lr_anneal'] = True
vars(args)['t_start'] = time.monotonic()
processes = []
for p_id in range(args.processes):
p = mp.Process(target=train_process, args=(p_id, word_count_actual, word2idx, word_list, freq, args, model))
p.start()
processes.append(p)
for p in processes:
p.join()
del processes
print("\nStage 1, ", time.monotonic() - args.t_start, " secs ", word_count_actual.value)
filename = args.save
if not filename.endswith('.pth.tar'):
filename += '.stage1.pth.tar'
save_model(filename, model, args, word2idx)
if args.multi_proto:
# stage 2, create new sense in a non-parametric way.
# Freeze model paramters except sense_embs, and use only 1 process to prevent race condition
old_batch_size = vars(args)['batch_size']
model.global_embs.requires_grad = False
model.ctx_weight.requires_grad = False
model.sense_embs = model.sense_embs.cpu()
vars(args)['stage'] = 2
vars(args)['batch_size'] = 5000
print("\nStage 2")
word_count_actual.value = 0
vars(args)['t_start'] = time.monotonic()
train_process_stage2(0, word_count_actual, word2idx, word_list, freq, args, model)
if args.cuda:
model.cuda()
print("\nStage 2, ", time.monotonic() - args.t_start, " secs")
print("Current # of senses: %d" % model.n_senses)
pdb.set_trace()
filename = args.save
if not filename.endswith('.pth.tar'):
filename += '.stage2.pth.tar'
save_model(filename, model, args, word2idx)
# stage 3, no more sense creation.
vars(args)['lr'] = args.lr * 0.01
vars(args)['batch_size'] = old_batch_size
model.global_embs.requires_grad = True
model.ctx_weight.requires_grad = True
vars(args)['stage'] = 3
print("\nBegin stage 3")
word_count_actual.value = 0
vars(args)['t_start'] = time.monotonic()
processes = []
for p_id in range(args.processes):
p = mp.Process(target=train_process, args=(p_id, word_count_actual, word2idx, word_list, freq, args, model))
p.start()
processes.append(p)
for p in processes:
p.join()
print("\nStage 3, ", time.monotonic() - args.t_start, " secs")
# save model
filename = args.save
if not filename.endswith('.pth.tar'):
filename += '.stage3.pth.tar'
save_model(filename, model, args, word2idx)
print("")
| [((23, 9, 23, 34), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ({}, {}), '()', False, 'import argparse\n'), ((62, 12, 62, 21), 'collections.Counter', 'Counter', ({}, {}), '()', False, 'from collections import Counter\n'), ((168, 17, 168, 37), 'torch.load', 'torch.load', ({(168, 28, 168, 36): 'filename'}, {}), '(filename)', False, 'import torch\n'), ((208, 24, 208, 83), 'numpy.zeros', 'np.zeros', ({(208, 33, 208, 73): '(args.batch_size, 2 * args.window + 2 + 2 * neg)', (208, 75, 208, 82): '"""int64"""'}, {}), "((args.batch_size, 2 * args.window + 2 + 2 * neg), 'int64')", True, 'import numpy as np\n'), ((300, 17, 300, 33), 'torch.multiprocessing.SimpleQueue', 'mp.SimpleQueue', ({}, {}), '()', True, 'import torch.multiprocessing as mp\n'), ((306, 8, 306, 131), 'torch.multiprocessing.Process', 'mp.Process', (), '', True, 'import torch.multiprocessing as mp\n'), ((374, 17, 374, 33), 'torch.multiprocessing.SimpleQueue', 'mp.SimpleQueue', ({}, {}), '()', True, 'import torch.multiprocessing as mp\n'), ((377, 19, 377, 68), 'numpy.zeros', 'np.zeros', (), '', True, 'import numpy as np\n'), ((379, 8, 379, 131), 'torch.multiprocessing.Process', 'mp.Process', (), '', True, 'import torch.multiprocessing as mp\n'), ((419, 4, 419, 34), 'multiprocessing.set_start_method', 'set_start_method', ({(419, 21, 419, 33): '"""forkserver"""'}, {}), "('forkserver')", False, 'from multiprocessing import set_start_method\n'), ((427, 24, 427, 40), 'torch.multiprocessing.Value', 'mp.Value', ({(427, 33, 427, 36): '"""L"""', (427, 38, 427, 39): '0'}, {}), "('L', 0)", True, 'import torch.multiprocessing as mp\n'), ((449, 28, 449, 44), 'time.monotonic', 'time.monotonic', ({}, {}), '()', False, 'import time\n'), ((50, 17, 50, 47), 're.split', 're.split', ({(50, 26, 50, 43): "'[' + delim + ']{1,}'", (50, 45, 50, 46): 's'}, {}), "('[' + delim + ']{1,}', s)", False, 'import re\n'), ((87, 27, 87, 111), 'torch.nn.Embedding', 'nn.Embedding', (), '', True, 'import torch.nn as nn\n'), ((88, 26, 88, 81), 'torch.nn.Embedding', 'nn.Embedding', (), '', True, 'import torch.nn as nn\n'), ((122, 24, 122, 47), 'numpy.array', 'np.array', ({(122, 33, 122, 46): 'sense_indices'}, {}), '(sense_indices)', True, 'import numpy as np\n'), ((141, 20, 141, 79), 'torch.sum', 'torch.sum', (), '', False, 'import torch\n'), ((144, 18, 144, 65), 'torch.sum', 'torch.sum', ({(144, 28, 144, 61): 'ctx_feats[:, (0), :] * pos_sense_embs', (144, 63, 144, 64): '1'}, {}), '(ctx_feats[:, (0), :] * pos_sense_embs, 1)', False, 'import torch\n'), ((192, 24, 192, 91), 'data_producer.init_unigram_table', 'data_producer.init_unigram_table', ({(192, 57, 192, 66): 'word_list', (192, 68, 192, 72): 'freq', (192, 74, 192, 90): 'args.train_words'}, {}), '(word_list, freq, args.train_words)', False, 'import data_producer\n'), ((452, 12, 452, 116), 'torch.multiprocessing.Process', 'mp.Process', (), '', True, 'import torch.multiprocessing as mp\n'), ((476, 32, 476, 48), 'time.monotonic', 'time.monotonic', ({}, {}), '()', False, 'import time\n'), ((483, 8, 483, 23), 'pdb.set_trace', 'pdb.set_trace', ({}, {}), '()', False, 'import pdb\n'), ((497, 32, 497, 48), 'time.monotonic', 'time.monotonic', ({}, {}), '()', False, 'import time\n'), ((89, 45, 89, 81), 'torch.ones', 'torch.ones', ({(89, 56, 89, 69): '2 * args.window', (89, 71, 89, 80): 'args.size'}, {}), '(2 * args.window, args.size)', False, 'import torch\n'), ((403, 25, 403, 201), 'data_producer.create_n_update_sense', 'data_producer.create_n_update_sense', ({(403, 61, 403, 86): 'chunk[:, (2 * args.window + 1)]', (403, 88, 403, 101): 'context_feats', (403, 103, 403, 113): 'sense_embs', (403, 115, 403, 131): 'model.word2sense', (403, 133, 403, 145): 'counter_list', (403, 147, 403, 161): 'chunk.shape[0]', (403, 163, 403, 172): 'args.size', (403, 174, 403, 184): 'args.delta', (403, 186, 403, 200): 'model.n_senses'}, {}), '(chunk[:, (2 * args.window + 1)],\n context_feats, sense_embs, model.word2sense, counter_list, chunk.shape[\n 0], args.size, args.delta, model.n_senses)', False, 'import data_producer\n'), ((433, 57, 433, 71), 'pickle.load', 'pickle.load', ({(433, 69, 433, 70): 'f'}, {}), '(f)', False, 'import pickle\n'), ((459, 25, 459, 41), 'time.monotonic', 'time.monotonic', ({}, {}), '()', False, 'import time\n'), ((500, 16, 500, 120), 'torch.multiprocessing.Process', 'mp.Process', (), '', True, 'import torch.multiprocessing as mp\n'), ((329, 16, 329, 34), 'sys.stdout.flush', 'sys.stdout.flush', ({}, {}), '()', False, 'import sys\n'), ((392, 16, 392, 34), 'sys.stdout.flush', 'sys.stdout.flush', ({}, {}), '()', False, 'import sys\n'), ((481, 29, 481, 45), 'time.monotonic', 'time.monotonic', ({}, {}), '()', False, 'import time\n'), ((507, 29, 507, 45), 'time.monotonic', 'time.monotonic', ({}, {}), '()', False, 'import time\n'), ((128, 50, 128, 81), 'torch.LongTensor', 'torch.LongTensor', ({(128, 67, 128, 80): 'sense_indices'}, {}), '(sense_indices)', False, 'import torch\n'), ((145, 44, 145, 79), 'torch.clamp', 'torch.clamp', (), '', False, 'import torch\n'), ((261, 70, 261, 97), 'numpy.random.randint', 'np.random.randint', ({(261, 88, 261, 89): '(0)', (261, 91, 261, 96): '(2 ** 24)'}, {}), '(0, 2 ** 24)', True, 'import numpy as np\n'), ((352, 27, 352, 88), 'numpy.reshape', 'np.reshape', ({(352, 38, 352, 46): 'type_ids', (352, 49, 352, 86): 'type_ids.shape[0] * type_ids.shape[1]'}, {}), '(type_ids, type_ids.shape[0] * type_ids.shape[1])', True, 'import numpy as np\n'), ((358, 24, 359, 100), 'data_producer.select_sense', 'data_producer.select_sense', ({(358, 51, 358, 56): 'chunk', (358, 58, 358, 71): 'context_feats', (358, 73, 358, 82): 'sense2idx', (358, 84, 358, 94): 'sense_embs', (359, 28, 359, 44): 'model.word2sense', (359, 46, 359, 60): 'chunk.shape[0]', (359, 62, 359, 71): 'args.size', (359, 73, 359, 84): 'args.window', (359, 86, 359, 99): 'args.negative'}, {}), '(chunk, context_feats, sense2idx, sense_embs,\n model.word2sense, chunk.shape[0], args.size, args.window, args.negative)', False, 'import data_producer\n'), ((398, 32, 398, 55), 'torch.LongTensor', 'torch.LongTensor', ({(398, 49, 398, 54): 'chunk'}, {}), '(chunk)', False, 'import torch\n'), ((147, 44, 147, 80), 'torch.clamp', 'torch.clamp', (), '', False, 'import torch\n'), ((261, 40, 261, 67), 'numpy.random.randint', 'np.random.randint', ({(261, 58, 261, 59): '(0)', (261, 61, 261, 66): '(2 ** 24)'}, {}), '(0, 2 ** 24)', True, 'import numpy as np\n'), ((336, 36, 336, 59), 'torch.LongTensor', 'torch.LongTensor', ({(336, 53, 336, 58): 'chunk'}, {}), '(chunk)', False, 'import torch\n'), ((112, 15, 112, 60), 'torch.sum', 'torch.sum', ({(112, 25, 112, 56): '(ctx_type_embs * self.ctx_weight)', (112, 58, 112, 59): '(1)'}, {}), '(ctx_type_embs * self.ctx_weight, 1)', False, 'import torch\n'), ((125, 50, 125, 81), 'torch.LongTensor', 'torch.LongTensor', ({(125, 67, 125, 80): 'sense_indices'}, {}), '(sense_indices)', False, 'import torch\n'), ((252, 32, 252, 57), 'numpy.random.random_sample', 'np.random.random_sample', ({}, {}), '()', True, 'import numpy as np\n'), ((348, 36, 348, 59), 'torch.LongTensor', 'torch.LongTensor', ({(348, 53, 348, 58): 'chunk'}, {}), '(chunk)', False, 'import torch\n'), ((364, 36, 364, 59), 'torch.LongTensor', 'torch.LongTensor', ({(364, 53, 364, 58): 'chunk'}, {}), '(chunk)', False, 'import torch\n'), ((396, 32, 396, 55), 'torch.LongTensor', 'torch.LongTensor', ({(396, 49, 396, 54): 'chunk'}, {}), '(chunk)', False, 'import torch\n'), ((334, 36, 334, 59), 'torch.LongTensor', 'torch.LongTensor', ({(334, 53, 334, 58): 'chunk'}, {}), '(chunk)', False, 'import torch\n'), ((250, 30, 250, 54), 'numpy.sqrt', 'np.sqrt', ({(250, 38, 250, 53): '(f / args.sample)'}, {}), '(f / args.sample)', True, 'import numpy as np\n'), ((328, 172, 328, 188), 'time.monotonic', 'time.monotonic', ({}, {}), '()', False, 'import time\n'), ((346, 36, 346, 59), 'torch.LongTensor', 'torch.LongTensor', ({(346, 53, 346, 58): 'chunk'}, {}), '(chunk)', False, 'import torch\n'), ((362, 36, 362, 59), 'torch.LongTensor', 'torch.LongTensor', ({(362, 53, 362, 58): 'chunk'}, {}), '(chunk)', False, 'import torch\n'), ((391, 172, 391, 188), 'time.monotonic', 'time.monotonic', ({}, {}), '()', False, 'import time\n')] |
ProEgitim/Python-Dersleri-BEM | Ogrenciler/Varol/buyuksayi.py | b25e9fdb1fa3026925a46b2fcbcba348726b775c | sayi1 = int(input("1. Sayı: "))
sayi2 = int(input("2. Sayı: "))
sayi3 = int(input("3. Sayı: "))
sayi4 = int(input("4. Sayı: "))
sayi5 = int(input("5. Sayı: "))
sayilar=[];
sayilar.append(sayi1)
sayilar.append(sayi2)
sayilar.append(sayi3)
sayilar.append(sayi4)
sayilar.append(sayi5)
sayilar.sort()
print("En büyük sayimiz..",sayilar[-1])
| [] |
MouseHu/emdqn | baselines/deepq/build_graph_mfec.py | ba907e959f21dd0b5a17117accccae9c82a79a3b | """Deep Q learning graph
The functions in this file can are used to create the following functions:
======= act ========
Function to chose an action given an observation
Parameters
----------
observation: object
Observation that can be feed into the output of make_obs_ph
stochastic: bool
if set to False all the actions are always deterministic (default False)
update_eps_ph: float
update epsilon a new value, if negative not update happens
(default: no update)
Returns
-------
Tensor of dtype tf.int64 and shape (BATCH_SIZE,) with an action to be performed for
every element of the batch.
======= train =======
Function that takes a transition (s,a,r,s') and optimizes Bellman equation's error:
td_error = Q(s,a) - (r + gamma * max_a' Q(s', a'))
loss = huber_loss[td_error]
Parameters
----------
obs_t: object
a batch of observations
action: np.array
actions that were selected upon seeing obs_t.
dtype must be int32 and shape must be (batch_size,)
reward: np.array
immediate reward attained after executing those actions
dtype must be float32 and shape must be (batch_size,)
obs_tp1: object
observations that followed obs_t
done: np.array
1 if obs_t was the last observation in the episode and 0 otherwise
obs_tp1 gets ignored, but must be of the valid shape.
dtype must be float32 and shape must be (batch_size,)
weight: np.array
imporance weights for every element of the batch (gradient is multiplied
by the importance weight) dtype must be float32 and shape must be (batch_size,)
Returns
-------
td_error: np.array
a list of differences between Q(s,a) and the target in Bellman's equation.
dtype is float32 and shape is (batch_size,)
======= update_target ========
copy the parameters from optimized Q function to the target Q function.
In Q learning we actually optimize the following error:
Q(s,a) - (r + gamma * max_a' Q'(s', a'))
Where Q' is lagging behind Q to stablize the learning. For example for Atari
Q' is set to Q once every 10000 updates training steps.
"""
import tensorflow as tf
import baselines.common.tf_util as U
import numpy as np
def build_act_mf(make_obs_ph, q_func, z_noise, num_actions, scope="deepq", reuse=None):
with tf.variable_scope(scope, reuse=reuse):
observations_ph = U.ensure_tf_input(make_obs_ph("observation"))
q, q_deterministic, v_mean, v_logvar, z_mean, z_logvar, recon_obs = q_func(observations_ph.get(), z_noise,
num_actions,
scope="q_func",
reuse=tf.AUTO_REUSE)
act = U.function(inputs=[observations_ph,z_noise],
outputs=[z_mean, z_logvar])
return act
def build_train_mf(make_obs_ph, q_func, num_actions, optimizer, grad_norm_clipping=None, gamma=1.0, scope="mfec",
alpha=1.0, beta=1.0, theta=1.0, latent_dim=32, ib=True, reuse=None):
"""Creates the train function:
Parameters
----------
make_obs_ph: str -> tf.placeholder or TfInput
a function that takes a name and creates a placeholder of input with that name
q_func: (tf.Variable, int, str, bool) -> tf.Variable
the model that takes the following inputs:
observation_in: object
the output of observation placeholder
num_actions: int
number of actions
scope: str
reuse: bool
should be passed to outer variable scope
and returns a tensor of shape (batch_size, num_actions) with values of every action.
num_actions: int
number of actions
reuse: bool
whether or not to reuse the graph variables
optimizer: tf.train.Optimizer
optimizer to use for the Q-learning objective.
grad_norm_clipping: float or None
clip gradient norms to this value. If None no clipping is performed.
gamma: float
discount rate.
double_q: bool
if true will use Double Q Learning (https://arxiv.org/abs/1509.06461).
In general it is a good idea to keep it enabled.
scope: str or VariableScope
optional scope for variable_scope.
reuse: bool or None
whether or not the variables should be reused. To be able to reuse the scope must be given.
Returns
-------
act: (tf.Variable, bool, float) -> tf.Variable
function to select and action given observation.
` See the top of the file for details.
train: (object, np.array, np.array, object, np.array, np.array) -> np.array
optimize the error in Bellman's equation.
` See the top of the file for details.
update_target: () -> ()
copy the parameters from optimized Q function to the target Q function.
` See the top of the file for details.
debug: {str: function}
a bunch of functions to print debug data like q_values.
"""
act_noise = tf.placeholder(tf.float32, [None, latent_dim], name="act_noise")
act_f = build_act_mf(make_obs_ph, q_func, act_noise, num_actions, scope=scope, reuse=reuse)
with tf.variable_scope(scope, reuse=reuse):
# set up placeholders
# EMDQN
obs_vae_input = U.ensure_tf_input(make_obs_ph("obs_vae"))
z_noise_vae = tf.placeholder(tf.float32, [None, latent_dim], name="z_noise_vae")
inputs = [obs_vae_input,z_noise_vae]
if ib:
qec_input = tf.placeholder(tf.float32, [None], name='qec')
inputs.append(qec_input)
outputs = []
q_vae, q_deterministic_vae, v_mean_vae, v_logvar_vae, z_mean_vae, z_logvar_vae, recon_obs = q_func(obs_vae_input.get(),
z_noise_vae, num_actions,
scope="q_func",
reuse=True)
q_func_vars = U.scope_vars(U.absolute_scope_name("q_func"))
encoder_loss = -1 + z_mean_vae ** 2 + tf.exp(z_logvar_vae) - z_logvar_vae
total_loss = tf.reduce_mean(beta * encoder_loss)
decoder_loss = tf.keras.losses.binary_crossentropy(tf.reshape(recon_obs, [-1]), tf.reshape(
tf.dtypes.cast(obs_vae_input._placeholder, tf.float32), [-1]))
print("here", z_mean_vae.shape, z_logvar_vae.shape, encoder_loss.shape, decoder_loss.shape)
vae_loss = beta * encoder_loss + theta * decoder_loss
outputs.append(encoder_loss)
outputs.append(decoder_loss)
outputs.append(vae_loss)
total_loss += tf.reduce_mean(theta * decoder_loss)
if ib:
ib_loss = (v_mean_vae - tf.stop_gradient(tf.expand_dims(qec_input, 1))) ** 2 / tf.exp(
v_logvar_vae) + v_logvar_vae
print("here2", v_mean_vae.shape, tf.expand_dims(qec_input, 1).shape, v_logvar_vae.shape, ib_loss.shape)
total_ib_loss = alpha * ib_loss + beta * encoder_loss
outputs.append(total_ib_loss)
total_loss += tf.reduce_mean(alpha * ib_loss)
if grad_norm_clipping is not None:
optimize_expr = U.minimize_and_clip(optimizer,
total_loss,
var_list=q_func_vars,
clip_val=grad_norm_clipping)
else:
optimize_expr = optimizer.minimize(total_loss, var_list=q_func_vars)
# Create callable functions
# EMDQN
total_loss_summary = tf.summary.scalar("total loss", total_loss)
z_var_summary = tf.summary.scalar("z_var", tf.reduce_mean(tf.exp(z_logvar_vae)))
encoder_loss_summary = tf.summary.scalar("encoder loss", tf.reduce_mean(encoder_loss))
decoder_loss_summary = tf.summary.scalar("decoder loss", tf.reduce_mean(decoder_loss))
summaries = [total_loss_summary, z_var_summary, encoder_loss_summary, decoder_loss_summary]
if ib:
ib_loss_summary = tf.summary.scalar("ib loss", tf.reduce_mean(ib_loss))
total_ib_loss_summary = tf.summary.scalar("total ib loss", tf.reduce_mean(total_ib_loss))
summaries.append(ib_loss_summary)
summaries.append(total_ib_loss_summary)
summary = tf.summary.merge(summaries)
outputs.append(summary)
train = U.function(
inputs=inputs,
outputs=[total_loss,summary],
updates=[optimize_expr]
)
return act_f, train
| [((139, 16, 139, 80), 'tensorflow.placeholder', 'tf.placeholder', (), '', True, 'import tensorflow as tf\n'), ((76, 9, 76, 46), 'tensorflow.variable_scope', 'tf.variable_scope', (), '', True, 'import tensorflow as tf\n'), ((83, 14, 84, 52), 'baselines.common.tf_util.function', 'U.function', (), '', True, 'import baselines.common.tf_util as U\n'), ((142, 9, 142, 46), 'tensorflow.variable_scope', 'tf.variable_scope', (), '', True, 'import tensorflow as tf\n'), ((148, 22, 148, 88), 'tensorflow.placeholder', 'tf.placeholder', (), '', True, 'import tensorflow as tf\n'), ((163, 21, 163, 56), 'tensorflow.reduce_mean', 'tf.reduce_mean', ({(163, 36, 163, 55): 'beta * encoder_loss'}, {}), '(beta * encoder_loss)', True, 'import tensorflow as tf\n'), ((171, 22, 171, 58), 'tensorflow.reduce_mean', 'tf.reduce_mean', ({(171, 37, 171, 57): '(theta * decoder_loss)'}, {}), '(theta * decoder_loss)', True, 'import tensorflow as tf\n'), ((189, 29, 189, 72), 'tensorflow.summary.scalar', 'tf.summary.scalar', ({(189, 47, 189, 59): '"""total loss"""', (189, 61, 189, 71): 'total_loss'}, {}), "('total loss', total_loss)", True, 'import tensorflow as tf\n'), ((200, 18, 200, 45), 'tensorflow.summary.merge', 'tf.summary.merge', ({(200, 35, 200, 44): 'summaries'}, {}), '(summaries)', True, 'import tensorflow as tf\n'), ((203, 16, 207, 9), 'baselines.common.tf_util.function', 'U.function', (), '', True, 'import baselines.common.tf_util as U\n'), ((151, 24, 151, 70), 'tensorflow.placeholder', 'tf.placeholder', (), '', True, 'import tensorflow as tf\n'), ((159, 35, 159, 66), 'baselines.common.tf_util.absolute_scope_name', 'U.absolute_scope_name', ({(159, 57, 159, 65): '"""q_func"""'}, {}), "('q_func')", True, 'import baselines.common.tf_util as U\n'), ((164, 59, 164, 86), 'tensorflow.reshape', 'tf.reshape', ({(164, 70, 164, 79): 'recon_obs', (164, 81, 164, 85): '[-1]'}, {}), '(recon_obs, [-1])', True, 'import tensorflow as tf\n'), ((178, 26, 178, 57), 'tensorflow.reduce_mean', 'tf.reduce_mean', ({(178, 41, 178, 56): '(alpha * ib_loss)'}, {}), '(alpha * ib_loss)', True, 'import tensorflow as tf\n'), ((181, 28, 184, 76), 'baselines.common.tf_util.minimize_and_clip', 'U.minimize_and_clip', (), '', True, 'import baselines.common.tf_util as U\n'), ((191, 65, 191, 93), 'tensorflow.reduce_mean', 'tf.reduce_mean', ({(191, 80, 191, 92): 'encoder_loss'}, {}), '(encoder_loss)', True, 'import tensorflow as tf\n'), ((192, 65, 192, 93), 'tensorflow.reduce_mean', 'tf.reduce_mean', ({(192, 80, 192, 92): 'decoder_loss'}, {}), '(decoder_loss)', True, 'import tensorflow as tf\n'), ((161, 46, 161, 66), 'tensorflow.exp', 'tf.exp', ({(161, 53, 161, 65): 'z_logvar_vae'}, {}), '(z_logvar_vae)', True, 'import tensorflow as tf\n'), ((165, 12, 165, 66), 'tensorflow.dtypes.cast', 'tf.dtypes.cast', ({(165, 27, 165, 53): 'obs_vae_input._placeholder', (165, 55, 165, 65): 'tf.float32'}, {}), '(obs_vae_input._placeholder, tf.float32)', True, 'import tensorflow as tf\n'), ((190, 66, 190, 86), 'tensorflow.exp', 'tf.exp', ({(190, 73, 190, 85): 'z_logvar_vae'}, {}), '(z_logvar_vae)', True, 'import tensorflow as tf\n'), ((195, 59, 195, 82), 'tensorflow.reduce_mean', 'tf.reduce_mean', ({(195, 74, 195, 81): 'ib_loss'}, {}), '(ib_loss)', True, 'import tensorflow as tf\n'), ((196, 71, 196, 100), 'tensorflow.reduce_mean', 'tf.reduce_mean', ({(196, 86, 196, 99): 'total_ib_loss'}, {}), '(total_ib_loss)', True, 'import tensorflow as tf\n'), ((173, 91, 174, 29), 'tensorflow.exp', 'tf.exp', ({(174, 16, 174, 28): 'v_logvar_vae'}, {}), '(v_logvar_vae)', True, 'import tensorflow as tf\n'), ((175, 45, 175, 73), 'tensorflow.expand_dims', 'tf.expand_dims', ({(175, 60, 175, 69): 'qec_input', (175, 71, 175, 72): '(1)'}, {}), '(qec_input, 1)', True, 'import tensorflow as tf\n'), ((173, 53, 173, 81), 'tensorflow.expand_dims', 'tf.expand_dims', ({(173, 68, 173, 77): 'qec_input', (173, 79, 173, 80): '(1)'}, {}), '(qec_input, 1)', True, 'import tensorflow as tf\n')] |
frodre/LMR | tests/test_prior.py | 4c00d3f9db96447e69bd3f426d59524f7b5f3ef5 | import sys
sys.path.append('../')
import LMR_config as cfg
import LMR_prior
import numpy as np
import pytest
def test_prior_seed():
cfg_obj = cfg.Config(**{'core':{'seed': 2}})
prior_cfg = cfg_obj.prior
prior_source = '20cr'
datadir_prior = 'data'
datafile_prior = '[vardef_template]_gridded_dat.nc'
state_variables = {'air': 'anom'}
state_kind = 'anom'
X = LMR_prior.prior_assignment(prior_source)
X.prior_datadir = datadir_prior
X.prior_datafile = datafile_prior
X.statevars = state_variables
X.Nens = 1
X.detrend = False
X.kind = state_kind
X.avgInterval = [1,2,3,4,5,6,7,8,9,10,11,12]
X.populate_ensemble(prior_source, prior_cfg)
X2 = LMR_prior.prior_assignment(prior_source)
X2.prior_datadir = datadir_prior
X2.prior_datafile = datafile_prior
X2.statevars = state_variables
X2.Nens = 1
X2.detrend = False
X2.kind = state_kind
X2.avgInterval = [1,2,3,4,5,6,7,8,9,10,11,12]
X2.populate_ensemble(prior_source, prior_cfg)
np.testing.assert_equal(X2.ens, X.ens)
def test_prior_use_full_prior():
cfg_obj = cfg.Config(**{'core': {'seed': None}})
prior_cfg = cfg_obj.prior
prior_source = '20cr'
datadir_prior = 'data'
datafile_prior = '[vardef_template]_gridded_dat.nc'
state_variables = {'air': 'anom'}
state_kind = 'anom'
avgInterval = [1,2,3,4,5,6,7,8,9,10,11,12]
X = LMR_prior.prior_assignment(prior_source)
X.prior_datadir = datadir_prior
X.prior_datafile = datafile_prior
X.statevars = state_variables
X.Nens = None
X.detrend = False
X.kind = state_kind
X.avgInterval = avgInterval
X.populate_ensemble(prior_source, prior_cfg)
X2 = LMR_prior.prior_assignment(prior_source)
X2.prior_datadir = datadir_prior
X2.prior_datafile = datafile_prior
X2.statevars = state_variables
X2.Nens = None
X2.detrend = False
X2.kind = state_kind
X2.avgInterval = avgInterval
X2.read_prior()
# Transform full prior into ensemble-like shape
prior_vals = X2.prior_dict['air']['value']
prior_vals = prior_vals.reshape(prior_vals.shape[0], -1)
prior_vals = prior_vals.T
np.testing.assert_equal(X.ens, prior_vals)
| [((3, 0, 3, 22), 'sys.path.append', 'sys.path.append', ({(3, 16, 3, 21): '"""../"""'}, {}), "('../')", False, 'import sys\n'), ((12, 14, 12, 48), 'LMR_config.Config', 'cfg.Config', ({}, {}), "(**{'core': {'seed': 2}})", True, 'import LMR_config as cfg\n'), ((20, 8, 20, 48), 'LMR_prior.prior_assignment', 'LMR_prior.prior_assignment', ({(20, 35, 20, 47): 'prior_source'}, {}), '(prior_source)', False, 'import LMR_prior\n'), ((32, 9, 32, 49), 'LMR_prior.prior_assignment', 'LMR_prior.prior_assignment', ({(32, 36, 32, 48): 'prior_source'}, {}), '(prior_source)', False, 'import LMR_prior\n'), ((44, 4, 44, 42), 'numpy.testing.assert_equal', 'np.testing.assert_equal', ({(44, 28, 44, 34): 'X2.ens', (44, 36, 44, 41): 'X.ens'}, {}), '(X2.ens, X.ens)', True, 'import numpy as np\n'), ((48, 14, 48, 52), 'LMR_config.Config', 'cfg.Config', ({}, {}), "(**{'core': {'seed': None}})", True, 'import LMR_config as cfg\n'), ((57, 8, 57, 48), 'LMR_prior.prior_assignment', 'LMR_prior.prior_assignment', ({(57, 35, 57, 47): 'prior_source'}, {}), '(prior_source)', False, 'import LMR_prior\n'), ((69, 9, 69, 49), 'LMR_prior.prior_assignment', 'LMR_prior.prior_assignment', ({(69, 36, 69, 48): 'prior_source'}, {}), '(prior_source)', False, 'import LMR_prior\n'), ((85, 4, 85, 46), 'numpy.testing.assert_equal', 'np.testing.assert_equal', ({(85, 28, 85, 33): 'X.ens', (85, 35, 85, 45): 'prior_vals'}, {}), '(X.ens, prior_vals)', True, 'import numpy as np\n')] |
juanjo3ns/SalGAN2 | src/salgan_dhf1k/train_bce.py | ac52af743b94961cdb44c5d89774b72fc8acfd3e | import os
from dataloader.datasetDHF1K import DHF1K
from torch.utils.data import DataLoader
from utils.salgan_utils import save_model, get_lr_optimizer
from utils.sendTelegram import send
from utils.printer import param_print
from utils.salgan_generator import create_model, add_bn
from evaluation.fast_evaluation import compute_metrics
import numpy as np
import torch
from torch.nn import AvgPool2d
from torch.nn.modules.loss import BCELoss
import torch.backends.cudnn as cudnn
from torch.optim import SGD, Adam
from torch.optim.lr_scheduler import ReduceLROnPlateau, StepLR
from time import time
from IPython import embed
from tensorboard_logger import configure, log_value, log_histogram
TRAIN = 'train'
VAL = 'val'
TEST = 'test'
def add_layer_weights(vgg_weights):
# Mean of RGB weights of first layer with size [64,1,3,3]
layer1 = vgg_weights['0.weight']
mean_rgb = layer1.mean(dim=1,keepdim=True)
vgg_weights['0.weight'] = torch.cat([layer1.cuda(),mean_rgb.cuda()],1)
# We could do it easily accessing to the weights trought model[0].weight and change dimension 1, but as we
# already have the 4th channel we'd be doing the mean of all of the channels, inicializing it in the wrong way.
return vgg_weights
def train_eval(mode, model, optimizer, dataloader):
if mode == TRAIN:
N = len(ds_train)/batch_size
model.train()
else:
N = len(ds_validate)/batch_size
model.eval()
total_loss = []
#iterate epoch...
#iterate epoch...
for i, X in enumerate(dataloader[mode]):
inputs = X[0].cuda()
# noramlize saliency maps values between [0,1]
gt_maps = X[1].cuda()/255
embed()
predictions = model.forward(inputs).squeeze()
# reduce size for loss
reduce_size = AvgPool2d((4,4))
pred_ = reduce_size(predictions)
gt_maps_ = reduce_size(gt_maps)
pred_ = pred_.view(pred_.size()[0], -1)
gt_maps_ = gt_maps_.view(gt_maps_.size()[0], -1)
loss = bce_loss(pred_, gt_maps_)
# make actual step update
if mode==TRAIN:
# compute gradients
loss.backward()
# step optimizer
optimizer.step()
# reset grads for next step
optimizer.zero_grad()
print("\t{}/{} loss:{}".format(i, int(N), loss.item()), end="\r")
total_loss.append(loss.item())
total_loss=np.mean(total_loss)
return total_loss
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--path_out", default='sal_dhf1k_adamdepthcoordaugm2_frombestsaldepth',
type=str,
help="""set output path for the trained model""")
parser.add_argument("--batch_size", default=12,
type=int,
help="""Set batch size""")
parser.add_argument("--n_epochs", default=10, type=int,
help="""Set total number of epochs""")
parser.add_argument("--depth", default=False, type=bool,
help="""Enable 4th channel with depth""")
parser.add_argument("--augment", default=False, type=bool,
help="""Enable data augmentation""")
parser.add_argument("--coord", default=False, type=bool,
help="""Enable coordconv""")
parser.add_argument("--flow", default=False, type=bool,
help="""Enable opticalflow""")
parser.add_argument("--lr", type=float, default=0.00001,
help="""Learning rate for training""")
parser.add_argument("--patience", type=int, default=3,
help="""Patience for learning rate scheduler (default 10)""")
args = parser.parse_args()
# set output path ==========================================================
path_out = '../trained_models/batch12_/' + args.path_out
if not os.path.exists(path_out):
# create output path
os.makedirs(path_out)
# create output for models
path_models = os.path.join(path_out, 'models')
if not os.path.exists(path_models):
os.makedirs(path_models)
# tensorboard
configure("{}".format(path_out), flush_secs=5)
# data =====================================================================
batch_size = args.batch_size
n_epochs = args.n_epochs
lr = args.lr
DEPTH = args.depth
AUGMENT = args.augment
COORD = args.coord
FLOW = args.flow
# Datasets for DHF1K
ds_train = DHF1K(mode=TRAIN, transformation=True, depth=DEPTH, d_augm=AUGMENT, coord=COORD)
ds_validate = DHF1K(mode=VAL, transformation=False, depth=DEPTH, d_augm=False, coord=COORD)
# Dataloaders
dataloader = {
TRAIN: DataLoader(ds_train, batch_size=batch_size,
shuffle=True, num_workers=2),
VAL: DataLoader(ds_validate, batch_size=batch_size,
shuffle=False, num_workers=2)
}
# POSSIBILITY OF CHOOSING GPU
torch.cuda.set_device(1)
# MODEL INITIALIZATION
print("Init model...")
vgg_weights = torch.load('../trained_models/salgan_baseline.pt')['state_dict']
model = create_model(3)
# if DEPTH and COORD:
# model = create_model(6)
# for i in range(0,3):
# vgg_weights = add_layer_weights(vgg_weights)
# elif DEPTH:
# model = create_model(4)
# add_layer_weights(vgg_weights)
# elif COORD:
# model = create_model(5)
# for i in range(0,2):
# vgg_weights = add_layer_weights(vgg_weights)
# else: model = create_model(3)
# Instead of adding manually the layer of new weights, we could use strict=False
model.load_state_dict(vgg_weights)
# Add batch normalization to current model if needed
model = add_bn(model)
model.train()
model.cuda()
cudnn.benchmark = True
# NOT WORKING UNMOUNTED DISK
# If we have the two GPU's available we are going to use both
# if torch.cuda.device_count() > 1:
# print("Using ", torch.cuda.device_count(), "GPUs!")
# model = torch.nn.DataParallel(model)
# LOSS FUNCTION
bce_loss = BCELoss()
# FINE-TUNE WHOLE NETWORK OR JUST DECODER => uncomment / or different lr for each part
# decoder_parameters = []
# base_params = []
# for i, (a, p) in enumerate(model.named_parameters()):
# embed()
# if i>25:
# # print(i, a, p.shape)
# decoder_parameters.append(p)
# else:
# base_params.append(p)
# If you wanna train just the decoder put this
# p.requires_grad = False
# ADAM OPTIMIZER
optimizer = Adam(model.parameters(),
lr = lr,
weight_decay=0.000001)
# STOCHASTIC GRADIENT DESCENT OPTIMIZER
# optimizer = SGD(model.parameters(),
# lr = 0.00001,
# momentum=0.9,
# weight_decay=0.00001,
# nesterov=True)
# NUMBER OF TOTAL PARAMETERS
# pytorch_total_params = sum(p.numel() for p in model.parameters())
# NUMBER OF TRAINABLE PARAMETERS
trainable_parameters = sum(p.numel() for p in model.parameters() if p.requires_grad)
print("Trainable parameters: ", trainable_parameters)
send("Trainable parameters: " + str(trainable_parameters))
send("Experiment: " + args.path_out)
# PRINT TABLE OF PARAMETERS
param_print([path_out,"",DEPTH,AUGMENT,COORD,FLOW,batch_size,lr,n_epochs, trainable_parameters])
# set learning rate scheduler
# ReduceLROnPlateau(
# optimizer,
# mode (str) 'min':lr es reduira quan la metrica no es redueixi mes, 'max' al contrari,
# factor (float) factor de reduccio de la lr,
# patience (int) num epochs sense millora a partir dels quals es redueix lr,
# verbose (bool),
# )
# scheduler = ReduceLROnPlateau(optimizer,
# 'min',
# patience=args.patience,
# verbose=True)
scheduler = StepLR(optimizer, step_size=3, gamma=0.1)
best_loss=9999999
# main loop training =======================================================
for id_epoch in range(n_epochs):
for mode in [VAL, TRAIN]:
# select dataloader
data_iterator = dataloader[mode]
#
# # saliency metrics
# if mode ==VAL:
# print("Evaluating metrics....")
# # only do 100 images from validation
# metrics = compute_metrics(model, 100, DEPTH, COORD)
#
# # log metric values
# for metric in metrics.keys():
# log_value("Metrics/{}".format(metric),
# metrics[metric], id_epoch)
#
# # get epoch loss
# print("--> {} epoch {}".format(mode, id_epoch))
epoch_loss = train_eval(mode, model, optimizer, dataloader)
lr = list(get_lr_optimizer(optimizer))[0]
print("-----------")
print("Done! {} epoch {} loss {} lr {}".format(mode, id_epoch, epoch_loss, lr))
send("{} epoch {}/{} loss {}".format(mode, id_epoch, n_epochs, epoch_loss))
print("\n")
# record loss
log_value("loss/{}".format(mode), epoch_loss, id_epoch)
log_value("lr/{}".format(mode), lr, id_epoch)
# for v in model.state_dict():
# log_histogram("Layer {}".format(v), model.state_dict()[v], id_epoch)
if (id_epoch%2)==0:
save_model(model, optimizer, id_epoch, path_out, name_model='{:03d}'.format(id_epoch))
# store model if val loss improves
if mode==VAL:
if best_loss > epoch_loss:
# update loss
best_loss = epoch_loss
save_model(model, optimizer, id_epoch, path_out, name_model='best')
# scheduler.step(epoch_loss)
scheduler.step()
| [((78, 12, 78, 31), 'numpy.mean', 'np.mean', ({(78, 20, 78, 30): 'total_loss'}, {}), '(total_loss)', True, 'import numpy as np\n'), ((85, 10, 85, 35), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ({}, {}), '()', False, 'import argparse\n'), ((133, 12, 133, 92), 'dataloader.datasetDHF1K.DHF1K', 'DHF1K', (), '', False, 'from dataloader.datasetDHF1K import DHF1K\n'), ((134, 15, 134, 92), 'dataloader.datasetDHF1K.DHF1K', 'DHF1K', (), '', False, 'from dataloader.datasetDHF1K import DHF1K\n'), ((146, 1, 146, 25), 'torch.cuda.set_device', 'torch.cuda.set_device', ({(146, 23, 146, 24): '(1)'}, {}), '(1)', False, 'import torch\n'), ((150, 9, 150, 24), 'utils.salgan_generator.create_model', 'create_model', ({(150, 22, 150, 23): '3'}, {}), '(3)', False, 'from utils.salgan_generator import create_model, add_bn\n'), ((167, 9, 167, 22), 'utils.salgan_generator.add_bn', 'add_bn', ({(167, 16, 167, 21): 'model'}, {}), '(model)', False, 'from utils.salgan_generator import create_model, add_bn\n'), ((181, 12, 181, 21), 'torch.nn.modules.loss.BCELoss', 'BCELoss', ({}, {}), '()', False, 'from torch.nn.modules.loss import BCELoss\n'), ((215, 1, 215, 37), 'utils.sendTelegram.send', 'send', ({(215, 6, 215, 36): "('Experiment: ' + args.path_out)"}, {}), "('Experiment: ' + args.path_out)", False, 'from utils.sendTelegram import send\n'), ((218, 1, 218, 97), 'utils.printer.param_print', 'param_print', ({(218, 13, 218, 96): "[path_out, '', DEPTH, AUGMENT, COORD, FLOW, batch_size, lr, n_epochs,\n trainable_parameters]"}, {}), "([path_out, '', DEPTH, AUGMENT, COORD, FLOW, batch_size, lr,\n n_epochs, trainable_parameters])", False, 'from utils.printer import param_print\n'), ((232, 13, 232, 54), 'torch.optim.lr_scheduler.StepLR', 'StepLR', (), '', False, 'from torch.optim.lr_scheduler import ReduceLROnPlateau, StepLR\n'), ((53, 2, 53, 9), 'IPython.embed', 'embed', ({}, {}), '()', False, 'from IPython import embed\n'), ((56, 16, 56, 32), 'torch.nn.AvgPool2d', 'AvgPool2d', ({(56, 26, 56, 31): '(4, 4)'}, {}), '((4, 4))', False, 'from torch.nn import AvgPool2d\n'), ((112, 8, 112, 32), 'os.path.exists', 'os.path.exists', ({(112, 23, 112, 31): 'path_out'}, {}), '(path_out)', False, 'import os\n'), ((114, 2, 114, 23), 'os.makedirs', 'os.makedirs', ({(114, 14, 114, 22): 'path_out'}, {}), '(path_out)', False, 'import os\n'), ((117, 16, 117, 48), 'os.path.join', 'os.path.join', ({(117, 29, 117, 37): 'path_out', (117, 39, 117, 47): '"""models"""'}, {}), "(path_out, 'models')", False, 'import os\n'), ((138, 9, 139, 36), 'torch.utils.data.DataLoader', 'DataLoader', (), '', False, 'from torch.utils.data import DataLoader\n'), ((140, 7, 141, 37), 'torch.utils.data.DataLoader', 'DataLoader', (), '', False, 'from torch.utils.data import DataLoader\n'), ((149, 15, 149, 65), 'torch.load', 'torch.load', ({(149, 26, 149, 64): '"""../trained_models/salgan_baseline.pt"""'}, {}), "('../trained_models/salgan_baseline.pt')", False, 'import torch\n'), ((118, 9, 118, 36), 'os.path.exists', 'os.path.exists', ({(118, 24, 118, 35): 'path_models'}, {}), '(path_models)', False, 'import os\n'), ((119, 3, 119, 27), 'os.makedirs', 'os.makedirs', ({(119, 15, 119, 26): 'path_models'}, {}), '(path_models)', False, 'import os\n'), ((258, 13, 258, 40), 'utils.salgan_utils.get_lr_optimizer', 'get_lr_optimizer', ({(258, 30, 258, 39): 'optimizer'}, {}), '(optimizer)', False, 'from utils.salgan_utils import save_model, get_lr_optimizer\n'), ((277, 5, 277, 72), 'utils.salgan_utils.save_model', 'save_model', (), '', False, 'from utils.salgan_utils import save_model, get_lr_optimizer\n')] |
tracon/dragontail | dragontail/content/models/basicpage.py | aae860acb5fe400015557f659b6d4221b939747a | # encoding: utf-8
from django.db import models
from wagtail.wagtailcore.models import Page
from wagtail.wagtailcore.fields import StreamField
from wagtail.wagtailcore import blocks
from wagtail.wagtailadmin.edit_handlers import FieldPanel, StreamFieldPanel
from wagtail.wagtailimages.blocks import ImageChooserBlock
class BasicPage(Page):
body = StreamField([
('paragraph', blocks.RichTextBlock()),
('image', ImageChooserBlock()),
])
content_panels = Page.content_panels + [
StreamFieldPanel('body'),
]
def get_template(self, request, *args, **kwargs):
from .templatesettings import TemplateSettings
template_settings = TemplateSettings.for_site(request.site)
return template_settings.basic_page_template | [((19, 8, 19, 32), 'wagtail.wagtailadmin.edit_handlers.StreamFieldPanel', 'StreamFieldPanel', ({(19, 25, 19, 31): '"""body"""'}, {}), "('body')", False, 'from wagtail.wagtailadmin.edit_handlers import FieldPanel, StreamFieldPanel\n'), ((14, 22, 14, 44), 'wagtail.wagtailcore.blocks.RichTextBlock', 'blocks.RichTextBlock', ({}, {}), '()', False, 'from wagtail.wagtailcore import blocks\n'), ((15, 18, 15, 37), 'wagtail.wagtailimages.blocks.ImageChooserBlock', 'ImageChooserBlock', ({}, {}), '()', False, 'from wagtail.wagtailimages.blocks import ImageChooserBlock\n')] |
infapy/infapy | infapy/v3/agentService.py | 0cb11310130be70ce1b647aa5ede929c1eb9b2ce | # Copyright (c) 2021-Present (Prashanth Pradeep)
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import requests as re
import infapy
from infapy.exceptions import InvalidDetailsProvided
class AgentService():
def __init__(self,v3,v3BaseURL,v3SessionID):
self._v3 = v3
self._v3BaseURL = v3BaseURL
self._v3SessionID = v3SessionID
def updateAgentService(self,serviceName, serviceAction, agentId):
url=self._v3BaseURL + "/public/core/v3/agent/service"
headers = {'Content-Type': "application/json", 'Accept': "application/json","INFA-SESSION-ID":self._v3SessionID}
body = {
'serviceName':serviceName,
'serviceAction':serviceAction,
'agentId':agentId}
infapy.log.info("agentService API URL - " + url)
infapy.log.info("API Headers: " + str(headers))
infapy.log.info("Body: " + str(body))
try:
response = re.post(url=url, json=body, headers=headers)
data = response.json()
infapy.log.debug(str(data))
try:
if ("error" in data):
infapy.log.error("Please validate the details passed")
infapy.log.error(str(data))
raise InvalidDetailsProvided
except Exception as e:
infapy.log.exception(e)
raise
except Exception as e:
infapy.log.exception(e)
raise
infapy.log.info(data["message"])
return data | [((33, 8, 33, 56), 'infapy.log.info', 'infapy.log.info', ({(33, 24, 33, 55): "('agentService API URL - ' + url)"}, {}), "('agentService API URL - ' + url)", False, 'import infapy\n'), ((52, 8, 52, 40), 'infapy.log.info', 'infapy.log.info', ({(52, 24, 52, 39): "data['message']"}, {}), "(data['message'])", False, 'import infapy\n'), ((38, 23, 38, 67), 'requests.post', 're.post', (), '', True, 'import requests as re\n'), ((50, 12, 50, 35), 'infapy.log.exception', 'infapy.log.exception', ({(50, 33, 50, 34): 'e'}, {}), '(e)', False, 'import infapy\n'), ((43, 20, 43, 74), 'infapy.log.error', 'infapy.log.error', ({(43, 37, 43, 73): '"""Please validate the details passed"""'}, {}), "('Please validate the details passed')", False, 'import infapy\n'), ((47, 16, 47, 39), 'infapy.log.exception', 'infapy.log.exception', ({(47, 37, 47, 38): 'e'}, {}), '(e)', False, 'import infapy\n')] |
pengwow/test-demo | home_application/views.py | 9d5c460b534d93d84f39ae24db82aa101027d199 | # -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making 蓝鲸智云(BlueKing) available.
Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved.
Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License.
You may obtain a copy of the License at http://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and limitations under the License.
"""
from common.mymako import render_mako_context, render_json
from blueking.component.shortcuts import get_client_by_request
from django.views.decorators.csrf import csrf_exempt
from models import TEST, HostDisk, ScriptExecInfo
import json
import base64
def home(request):
"""
首页
"""
# yewu = [
# {'id': 1, "name": u"业务1"},
# {'id': 2, "name": u"业务2"},
# {'id': 3, "name": u"业务3"},
# ]
# 从环境配置获取APP信息,从request获取当前用户信息
client = get_client_by_request(request)
kwargs = {}
result = client.cc.search_business(kwargs)
print(result)
yewu = result['data']['info']
return render_mako_context(request, '/home_application/home.html',
{
"yewu": yewu,
"AAA": u"业务列表"
})
def submit_template(request):
"""
首页
"""
print(request.body)
return render_json({"1111111": "dddddddddd"})
def dev_guide(request):
"""
开发指引
"""
return render_mako_context(request, '/home_application/dev_guide.html')
def contactus(request):
"""
联系我们
"""
return render_mako_context(request, '/home_application/contact.html')
def tijiao(request):
data = json.loads(request.body)
print(type(data))
sss = TEST(**data)
sss.save()
return render_json({"DATA": "AAAAAAAA"})
def host_disk(request):
host_list = HostDisk.objects.all()
re_list = list()
for item in host_list:
temp_dict = dict()
temp_dict['os'] = item.os
temp_dict['host_ip'] = item.host_ip
temp_dict['host_name'] = item.host_name
temp_dict['host_path'] = item.host_path
temp_dict['create_time'] = item.create_time
re_list.append(temp_dict)
print(re_list)
return render_mako_context(request,
'/home_application/host_disk.html',
{'host_all': re_list}
)
def host_tijiao(request):
data = request.body
print(type(data))
data = json.loads(data)
host = HostDisk(**data)
host.save()
return render_json({"status": "OK"})
def host_script(request):
# 根据作业id查询日志
data = ScriptExecInfo.objects.all()
client = get_client_by_request(request)
script_all = list()
for item in data:
temp_dict = dict()
kwargs = {}
kwargs['bk_biz_id'] = item.bk_biz_id
kwargs['job_instance_id'] = item.job_instance_id
result = client.job.get_job_instance_log(kwargs)
log_content = result['data'][0]['step_results'][0]['ip_logs'][0]['log_content']
temp_dict['host_ip'] = item.host_ip
temp_dict['log_content'] = log_content
temp_dict['script_content'] = item.script_content
temp_dict['create_time'] = item.create_time
script_all.append(temp_dict)
return render_mako_context(request,
'/home_application/host_script.html',
{'script_all': script_all},
)
def script_tijiao(request):
try:
print(request.user.username)
except Exception as e:
print(str(e))
data = json.loads(request.body)
client = get_client_by_request(request)
kwargs = {}
result = client.cc.search_business(kwargs)
bk_biz_id = result['data']['info'][0]['bk_biz_id']
script_content = base64.b64encode(data['script_content'])
kwargs = dict()
kwargs['bk_biz_id'] = bk_biz_id
kwargs['script_content'] = script_content
kwargs["account"] = "root"
kwargs['ip_list'] = [{'bk_cloud_id': 0, "ip": data['host_ip']}]
result = client.job.fast_execute_script(kwargs)
script_dict = dict()
script_dict["host_ip"] = data['host_ip']
script_dict["script_content"] = data['script_content']
script_dict["job_instance_id"] = result['data']['job_instance_id']
script_dict['bk_biz_id'] = bk_biz_id
scriptexecinfo = ScriptExecInfo(**script_dict)
scriptexecinfo.save()
return render_json({"status": "OK"})
# ####################其他
def other(request):
return render_mako_context(request, '/home_application/other.html')
@csrf_exempt # 注意:需要添加此装饰器
def upload_file(request):
# 接收的为文件列表,需要遍历操作
files = request.FILES
for item in files:
_file = files.get(item)
print(_file.name)
print(_file.size)
with open('./' + str(_file.name), 'wb') as fd:
fd.write(_file.file.read())
return render_json({"status": "OK"})
def download_file(request):
"""
文件下载
:param request:
:return: 文件response
"""
from django.http import FileResponse
# 接收文件名请求
file_name = request.GET.get('filename')
fd = open('./' + file_name, 'rb')
response = FileResponse(fd)
response['Content-Type'] = 'application/octet-stream'
response['Content-Disposition'] = 'attachment;filename="%s"' % file_name
return response
| [((32, 13, 32, 43), 'blueking.component.shortcuts.get_client_by_request', 'get_client_by_request', ({(32, 35, 32, 42): 'request'}, {}), '(request)', False, 'from blueking.component.shortcuts import get_client_by_request\n'), ((38, 11, 42, 33), 'common.mymako.render_mako_context', 'render_mako_context', ({(38, 31, 38, 38): 'request', (38, 40, 38, 69): '"""/home_application/home.html"""', (39, 31, 42, 32): "{'yewu': yewu, 'AAA': u'业务列表'}"}, {}), "(request, '/home_application/home.html', {'yewu': yewu,\n 'AAA': u'业务列表'})", False, 'from common.mymako import render_mako_context, render_json\n'), ((50, 11, 50, 49), 'common.mymako.render_json', 'render_json', ({(50, 23, 50, 48): "{'1111111': 'dddddddddd'}"}, {}), "({'1111111': 'dddddddddd'})", False, 'from common.mymako import render_mako_context, render_json\n'), ((57, 11, 57, 75), 'common.mymako.render_mako_context', 'render_mako_context', ({(57, 31, 57, 38): 'request', (57, 40, 57, 74): '"""/home_application/dev_guide.html"""'}, {}), "(request, '/home_application/dev_guide.html')", False, 'from common.mymako import render_mako_context, render_json\n'), ((64, 11, 64, 73), 'common.mymako.render_mako_context', 'render_mako_context', ({(64, 31, 64, 38): 'request', (64, 40, 64, 72): '"""/home_application/contact.html"""'}, {}), "(request, '/home_application/contact.html')", False, 'from common.mymako import render_mako_context, render_json\n'), ((68, 11, 68, 35), 'json.loads', 'json.loads', ({(68, 22, 68, 34): 'request.body'}, {}), '(request.body)', False, 'import json\n'), ((70, 10, 70, 22), 'models.TEST', 'TEST', ({}, {}), '(**data)', False, 'from models import TEST, HostDisk, ScriptExecInfo\n'), ((72, 11, 72, 44), 'common.mymako.render_json', 'render_json', ({(72, 23, 72, 43): "{'DATA': 'AAAAAAAA'}"}, {}), "({'DATA': 'AAAAAAAA'})", False, 'from common.mymako import render_mako_context, render_json\n'), ((76, 16, 76, 38), 'models.HostDisk.objects.all', 'HostDisk.objects.all', ({}, {}), '()', False, 'from models import TEST, HostDisk, ScriptExecInfo\n'), ((88, 11, 91, 32), 'common.mymako.render_mako_context', 'render_mako_context', ({(88, 31, 88, 38): 'request', (89, 31, 89, 65): '"""/home_application/host_disk.html"""', (90, 31, 90, 52): "{'host_all': re_list}"}, {}), "(request, '/home_application/host_disk.html', {\n 'host_all': re_list})", False, 'from common.mymako import render_mako_context, render_json\n'), ((97, 11, 97, 27), 'json.loads', 'json.loads', ({(97, 22, 97, 26): 'data'}, {}), '(data)', False, 'import json\n'), ((99, 11, 99, 27), 'models.HostDisk', 'HostDisk', ({}, {}), '(**data)', False, 'from models import TEST, HostDisk, ScriptExecInfo\n'), ((101, 11, 101, 40), 'common.mymako.render_json', 'render_json', ({(101, 23, 101, 39): "{'status': 'OK'}"}, {}), "({'status': 'OK'})", False, 'from common.mymako import render_mako_context, render_json\n'), ((106, 11, 106, 39), 'models.ScriptExecInfo.objects.all', 'ScriptExecInfo.objects.all', ({}, {}), '()', False, 'from models import TEST, HostDisk, ScriptExecInfo\n'), ((107, 13, 107, 43), 'blueking.component.shortcuts.get_client_by_request', 'get_client_by_request', ({(107, 35, 107, 42): 'request'}, {}), '(request)', False, 'from blueking.component.shortcuts import get_client_by_request\n'), ((122, 11, 125, 32), 'common.mymako.render_mako_context', 'render_mako_context', ({(122, 31, 122, 38): 'request', (123, 31, 123, 67): '"""/home_application/host_script.html"""', (124, 31, 124, 57): "{'script_all': script_all}"}, {}), "(request, '/home_application/host_script.html', {\n 'script_all': script_all})", False, 'from common.mymako import render_mako_context, render_json\n'), ((133, 11, 133, 35), 'json.loads', 'json.loads', ({(133, 22, 133, 34): 'request.body'}, {}), '(request.body)', False, 'import json\n'), ((134, 13, 134, 43), 'blueking.component.shortcuts.get_client_by_request', 'get_client_by_request', ({(134, 35, 134, 42): 'request'}, {}), '(request)', False, 'from blueking.component.shortcuts import get_client_by_request\n'), ((139, 21, 139, 61), 'base64.b64encode', 'base64.b64encode', ({(139, 38, 139, 60): "data['script_content']"}, {}), "(data['script_content'])", False, 'import base64\n'), ((152, 21, 152, 50), 'models.ScriptExecInfo', 'ScriptExecInfo', ({}, {}), '(**script_dict)', False, 'from models import TEST, HostDisk, ScriptExecInfo\n'), ((155, 11, 155, 40), 'common.mymako.render_json', 'render_json', ({(155, 23, 155, 39): "{'status': 'OK'}"}, {}), "({'status': 'OK'})", False, 'from common.mymako import render_mako_context, render_json\n'), ((160, 11, 160, 71), 'common.mymako.render_mako_context', 'render_mako_context', ({(160, 31, 160, 38): 'request', (160, 40, 160, 70): '"""/home_application/other.html"""'}, {}), "(request, '/home_application/other.html')", False, 'from common.mymako import render_mako_context, render_json\n'), ((172, 11, 172, 40), 'common.mymako.render_json', 'render_json', ({(172, 23, 172, 39): "{'status': 'OK'}"}, {}), "({'status': 'OK'})", False, 'from common.mymako import render_mako_context, render_json\n'), ((185, 15, 185, 31), 'django.http.FileResponse', 'FileResponse', ({(185, 28, 185, 30): 'fd'}, {}), '(fd)', False, 'from django.http import FileResponse\n')] |
moseskim/Expert-Python-Programming-Fourth-Edition | Chapter 6/09 - The built-in multiprocessing module/basic_multiprocessing.py | 5160f974deb2365597b7be9cc032f24bfa13471a | """
"멀티프로세싱"절 예시
`multiprocessing` 모듈을 이용해 새로운 프로세스들을
생성하는 방법을 설명한다.
"""
from multiprocessing import Process
import os
def work(identifier):
print(f'Hey, I am the process ' f'{identifier}, pid: {os.getpid()}')
def main():
processes = [Process(target=work, args=(number,)) for number in range(5)]
for process in processes:
process.start()
while processes:
processes.pop().join()
if __name__ == "__main__":
main()
| [((15, 17, 15, 53), 'multiprocessing.Process', 'Process', (), '', False, 'from multiprocessing import Process\n'), ((11, 11, 11, 22), 'os.getpid', 'os.getpid', ({}, {}), '()', False, 'import os\n')] |
dominoFire/sweeper | sweeper/cloud/localhost/manager.py | 26c5497b81c8d0c50671f8ab75c1cf5c4c8191c9 | __author__ = '@dominofire'
import os
from sweeper.cloud import resource_config_combinations
from sweeper.cloud.localhost import resource_config_factory as config_factory
from sweeper.resource import Resource
def possible_configs(num):
configs = config_factory.list_configs()
combs = resource_config_combinations(num, configs)
return combs
def create_resource(name, config_object):
res = Resource(config_object, name, 'localhost', None, None)
return res
def mount_distributed_file_system(name, vm_resources):
vm_first = vm_resources[0]
vm_first.execute_command('mkdir ./fileshare')
return os.path.join(os.getcwd(), 'fileshare')
| [((11, 14, 11, 43), 'sweeper.cloud.localhost.resource_config_factory.list_configs', 'config_factory.list_configs', ({}, {}), '()', True, 'from sweeper.cloud.localhost import resource_config_factory as config_factory\n'), ((12, 12, 12, 54), 'sweeper.cloud.resource_config_combinations', 'resource_config_combinations', ({(12, 41, 12, 44): 'num', (12, 46, 12, 53): 'configs'}, {}), '(num, configs)', False, 'from sweeper.cloud import resource_config_combinations\n'), ((18, 10, 18, 64), 'sweeper.resource.Resource', 'Resource', ({(18, 19, 18, 32): 'config_object', (18, 34, 18, 38): 'name', (18, 40, 18, 51): '"""localhost"""', (18, 53, 18, 57): 'None', (18, 59, 18, 63): 'None'}, {}), "(config_object, name, 'localhost', None, None)", False, 'from sweeper.resource import Resource\n'), ((27, 24, 27, 35), 'os.getcwd', 'os.getcwd', ({}, {}), '()', False, 'import os\n')] |
BACtaki/tfx | tfx/orchestration/experimental/core/service_jobs_test.py | 29db845200beccbb0ffa1e1e1a091e314a3a470f | # Copyright 2021 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for tfx.orchestration.experimental.core.service_jobs."""
from absl.testing.absltest import mock
import tensorflow as tf
from tfx.orchestration.experimental.core import service_jobs
from tfx.orchestration.experimental.core import test_utils
class ExceptionHandlingServiceJobManagerWrapperTest(test_utils.TfxTest):
def setUp(self):
super().setUp()
self._mock_service_job_manager = mock.create_autospec(
service_jobs.ServiceJobManager, instance=True)
self._mock_service_job_manager.ensure_node_services.return_value = (
service_jobs.ServiceStatus.SUCCESS)
self._mock_service_job_manager.stop_node_services.return_value = True
self._mock_service_job_manager.is_pure_service_node.return_value = True
self._mock_service_job_manager.is_mixed_service_node.return_value = False
self._wrapper = service_jobs.ExceptionHandlingServiceJobManagerWrapper(
self._mock_service_job_manager)
def test_calls_forwarded_to_underlying_instance(self):
self.assertEqual(service_jobs.ServiceStatus.SUCCESS,
self._wrapper.ensure_node_services(mock.Mock(), 'node1'))
self.assertTrue(self._wrapper.stop_node_services(mock.Mock(), 'node2'))
self.assertTrue(self._wrapper.is_pure_service_node(mock.Mock(), 'node3'))
self.assertFalse(self._wrapper.is_mixed_service_node(mock.Mock(), 'node4'))
self._mock_service_job_manager.ensure_node_services.assert_called_once_with(
mock.ANY, 'node1')
self._mock_service_job_manager.stop_node_services.assert_called_once_with(
mock.ANY, 'node2')
self._mock_service_job_manager.is_pure_service_node.assert_called_once_with(
mock.ANY, 'node3')
self._mock_service_job_manager.is_mixed_service_node.assert_called_once_with(
mock.ANY, 'node4')
def test_ensure_node_services_exception_handling(self):
self._mock_service_job_manager.ensure_node_services.side_effect = RuntimeError(
'test error')
self.assertEqual(service_jobs.ServiceStatus.FAILED,
self._wrapper.ensure_node_services(mock.Mock(), 'node1'))
self._mock_service_job_manager.ensure_node_services.assert_called_once_with(
mock.ANY, 'node1')
def test_stop_node_services_exception_handling(self):
self._mock_service_job_manager.stop_node_services.side_effect = RuntimeError(
'test error')
self.assertFalse(self._wrapper.stop_node_services(mock.Mock(), 'node2'))
self._mock_service_job_manager.stop_node_services.assert_called_once_with(
mock.ANY, 'node2')
if __name__ == '__main__':
tf.test.main()
| [((68, 2, 68, 16), 'tensorflow.test.main', 'tf.test.main', ({}, {}), '()', True, 'import tensorflow as tf\n'), ((26, 37, 27, 54), 'absl.testing.absltest.mock.create_autospec', 'mock.create_autospec', (), '', False, 'from absl.testing.absltest import mock\n'), ((33, 20, 34, 39), 'tfx.orchestration.experimental.core.service_jobs.ExceptionHandlingServiceJobManagerWrapper', 'service_jobs.ExceptionHandlingServiceJobManagerWrapper', ({(34, 8, 34, 38): 'self._mock_service_job_manager'}, {}), '(self.\n _mock_service_job_manager)', False, 'from tfx.orchestration.experimental.core import service_jobs\n'), ((38, 56, 38, 67), 'absl.testing.absltest.mock.Mock', 'mock.Mock', ({}, {}), '()', False, 'from absl.testing.absltest import mock\n'), ((39, 53, 39, 64), 'absl.testing.absltest.mock.Mock', 'mock.Mock', ({}, {}), '()', False, 'from absl.testing.absltest import mock\n'), ((40, 55, 40, 66), 'absl.testing.absltest.mock.Mock', 'mock.Mock', ({}, {}), '()', False, 'from absl.testing.absltest import mock\n'), ((41, 57, 41, 68), 'absl.testing.absltest.mock.Mock', 'mock.Mock', ({}, {}), '()', False, 'from absl.testing.absltest import mock\n'), ((55, 56, 55, 67), 'absl.testing.absltest.mock.Mock', 'mock.Mock', ({}, {}), '()', False, 'from absl.testing.absltest import mock\n'), ((62, 54, 62, 65), 'absl.testing.absltest.mock.Mock', 'mock.Mock', ({}, {}), '()', False, 'from absl.testing.absltest import mock\n')] |
kundajelab/dragonn | dragonn/models.py | 431e7c6b94a82972ac0fc3ef76d76e9ce8ba67fc | from __future__ import absolute_import, division, print_function
import matplotlib
import numpy as np
import os
import subprocess
import sys
import tempfile
matplotlib.use('pdf')
import matplotlib.pyplot as plt
from abc import abstractmethod, ABCMeta
from dragonn.metrics import ClassificationResult
from sklearn.svm import SVC as scikit_SVC
from sklearn.tree import DecisionTreeClassifier as scikit_DecisionTree
from sklearn.ensemble import RandomForestClassifier
from keras.models import load_model
from dragonn.runtime_metrics import *
from dragonn.custom_losses import *
import warnings
warnings.filterwarnings('ignore')
def load_dragonn_model(model_string):
custom_objects={"recall":recall,
"sensitivity":recall,
"specificity":specificity,
"fpr":fpr,
"fnr":fnr,
"fdr":fdr,
"precision":precision,
"f1":f1,
"spearman_corr":spearman_corr,
"ambig_binary_crossentropy":ambig_binary_crossentropy,
"ambig_mean_squared_error":ambig_mean_squared_error}
model=load_model(model_string,custom_objects=custom_objects)
return model
class Model(object):
__metaclass__ = ABCMeta
@abstractmethod
def __init__(self, **hyperparameters):
pass
@abstractmethod
def train(self, X, y, validation_data):
pass
@abstractmethod
def predict(self, X):
pass
def test(self, X, y):
return ClassificationResult(y, self.predict(X))
def score(self, X, y, metric):
return self.test(X, y)[metric]
class SequenceDNN(Model):
"""
Sequence DNN models.
Parameters
----------
seq_length : int, optional
length of input sequence.
keras_model : instance of keras.models.Sequential, optional
seq_length or keras_model must be specified.
num_tasks : int, optional
number of tasks. Default: 1.
num_filters : list[int] | tuple[int]
number of convolutional filters in each layer. Default: (15,).
conv_width : list[int] | tuple[int]
width of each layer's convolutional filters. Default: (15,).
pool_width : int
width of max pooling after the last layer. Default: 35.
L1 : float
strength of L1 penalty.
dropout : float
dropout probability in every convolutional layer. Default: 0.
verbose: int
Verbosity level during training. Valida values: 0, 1, 2.
Returns
-------
Compiled DNN model.
"""
def __init__(self, seq_length=None, keras_model=None,
use_RNN=False, num_tasks=1,
num_filters=(15, 15, 15), conv_width=(15, 15, 15),
pool_width=35, GRU_size=35, TDD_size=15,
L1=0, dropout=0.0, num_epochs=100, verbose=1):
from keras.models import Sequential
from keras.layers.core import (
Activation, Dense, Dropout, Flatten,
Permute, Reshape
)
from keras.layers.convolutional import Convolution2D, MaxPooling2D
from keras.layers.recurrent import GRU
from keras.regularizers import l1
self.num_tasks = num_tasks
self.num_epochs = num_epochs
self.verbose = verbose
self.train_metrics = []
self.valid_metrics = []
if keras_model is not None and seq_length is None:
self.model = keras_model
self.num_tasks = keras_model.layers[-1].output_shape[-1]
elif seq_length is not None and keras_model is None:
self.model = Sequential()
assert len(num_filters) == len(conv_width)
for i, (nb_filter, nb_col) in enumerate(zip(num_filters, conv_width)):
conv_height = 4 if i == 0 else 1
self.model.add(Convolution2D(
nb_filter=nb_filter, nb_row=conv_height,
nb_col=nb_col, activation='linear',
init='he_normal', input_shape=(1, 4, seq_length),
W_regularizer=l1(L1), b_regularizer=l1(L1)))
self.model.add(Activation('relu'))
self.model.add(Dropout(dropout))
self.model.add(MaxPooling2D(pool_size=(1, pool_width)))
if use_RNN:
num_max_pool_outputs = self.model.layers[-1].output_shape[-1]
self.model.add(Reshape((num_filters[-1], num_max_pool_outputs)))
self.model.add(Permute((2, 1)))
self.model.add(GRU(GRU_size, return_sequences=True))
self.model.add(TimeDistributedDense(TDD_size, activation='relu'))
self.model.add(Flatten())
self.model.add(Dense(output_dim=self.num_tasks))
self.model.add(Activation('sigmoid'))
self.model.compile(optimizer='adam', loss='binary_crossentropy')
else:
raise ValueError("Exactly one of seq_length or keras_model must be specified!")
def train(self, X, y, validation_data, early_stopping_metric='Loss',
early_stopping_patience=5, save_best_model_to_prefix=None):
if y.dtype != bool:
assert set(np.unique(y)) == {0, 1}
y = y.astype(bool)
multitask = y.shape[1] > 1
if not multitask:
num_positives = y.sum()
num_sequences = len(y)
num_negatives = num_sequences - num_positives
if self.verbose >= 1:
print('Training model (* indicates new best result)...')
X_valid, y_valid = validation_data
early_stopping_wait = 0
best_metric = np.inf if early_stopping_metric == 'Loss' else -np.inf
for epoch in range(1, self.num_epochs + 1):
self.model.fit(X, y, batch_size=128, nb_epoch=1,
class_weight={True: num_sequences / num_positives,
False: num_sequences / num_negatives}
if not multitask else None, verbose=self.verbose >= 2)
epoch_train_metrics = self.test(X, y)
epoch_valid_metrics = self.test(X_valid, y_valid)
self.train_metrics.append(epoch_train_metrics)
self.valid_metrics.append(epoch_valid_metrics)
if self.verbose >= 1:
print('Epoch {}:'.format(epoch))
print('Train {}'.format(epoch_train_metrics))
print('Valid {}'.format(epoch_valid_metrics), end='')
current_metric = epoch_valid_metrics[early_stopping_metric].mean()
if (early_stopping_metric == 'Loss') == (current_metric <= best_metric):
if self.verbose >= 1:
print(' *')
best_metric = current_metric
best_epoch = epoch
early_stopping_wait = 0
if save_best_model_to_prefix is not None:
self.save(save_best_model_to_prefix)
else:
if self.verbose >= 1:
print()
if early_stopping_wait >= early_stopping_patience:
break
early_stopping_wait += 1
if self.verbose >= 1:
print('Finished training after {} epochs.'.format(epoch))
if save_best_model_to_prefix is not None:
print("The best model's architecture and weights (from epoch {0}) "
'were saved to {1}.arch.json and {1}.weights.h5'.format(
best_epoch, save_best_model_to_prefix))
def predict(self, X):
return self.model.predict(X, batch_size=128, verbose=False)
def get_sequence_filters(self):
"""
Returns 3D array of 2D sequence filters.
"""
return self.model.layers[0].get_weights()[0].squeeze(axis=1)
@staticmethod
def _plot_scores(X, output_directory, peak_width, score_func, score_name):
from dragonn.plot import plot_bases_on_ax
scores = score_func(X).squeeze(axis=2) # (num_task, num_samples, num_bases, sequence_length)
try:
os.makedirs(output_directory)
except OSError:
pass
num_tasks = len(scores)
for task_index, task_scores in enumerate(scores):
for sequence_index, sequence_scores in enumerate(task_scores):
# sequence_scores is num_bases x sequence_length
basewise_max_sequence_scores = sequence_scores.max(axis=0)
plt.clf()
figure, (top_axis, bottom_axis) = plt.subplots(2)
top_axis.plot(range(1, len(basewise_max_sequence_scores) + 1),
basewise_max_sequence_scores)
top_axis.set_title('{} scores (motif highlighted)'.format(score_name))
peak_position = basewise_max_sequence_scores.argmax()
top_axis.axvspan(peak_position - peak_width, peak_position + peak_width,
color='grey', alpha=0.1)
peak_sequence_scores = sequence_scores[:, peak_position - peak_width :
peak_position + peak_width].T
# Set non-max letter_heights to zero
letter_heights = np.zeros_like(peak_sequence_scores)
letter_heights[np.arange(len(letter_heights)),
peak_sequence_scores.argmax(axis=1)] = \
basewise_max_sequence_scores[peak_position - peak_width :
peak_position + peak_width]
plot_bases_on_ax(letter_heights, bottom_axis)
bottom_axis.set_xticklabels(tuple(map(
str, np.arange(peak_position - peak_width, peak_position + peak_width + 1))))
bottom_axis.tick_params(axis='x', labelsize='small')
plt.xlabel('Position')
plt.ylabel('Score')
plt.savefig(os.path.join(output_directory, 'sequence_{}{}'.format(
sequence_index, '_task_{}'.format(task_index) if num_tasks > 1 else '')))
plt.close()
def plot_deeplift(self, X, output_directory, peak_width=10):
self._plot_scores(X, output_directory, peak_width,
score_func=self.deeplift, score_name='DeepLift')
def plot_in_silico_mutagenesis(self, X, output_directory, peak_width=10):
self._plot_scores(X, output_directory, peak_width,
score_func=self.in_silico_mutagenesis, score_name='ISM')
def plot_architecture(self, output_file):
from dragonn.visualize_util import plot as plot_keras_model
plot_keras_model(self.model, output_file, show_shape=True)
def save(self, save_best_model_to_prefix):
arch_fname = save_best_model_to_prefix + '.arch.json'
weights_fname = save_best_model_to_prefix + '.weights.h5'
open(arch_fname, 'w').write(self.model.to_json())
self.model.save_weights(weights_fname, overwrite=True)
@staticmethod
def load(model_hdf5_fname=None, arch_fname=None, weights_fname=None):
if model_hdf5_fname!=None:
from keras.models import load_model
sequence_dnn=SequenceDNN(keras_model=load_model(model_hdf5_fname))
else:
from keras.models import model_from_json
model_json_string = open(arch_fname).read()
sequence_dnn = SequenceDNN(keras_model=model_from_json(model_json_string))
if weights_fname is not None:
sequence_dnn.model.load_weights(weights_fname)
return sequence_dnn
class MotifScoreRNN(Model):
def __init__(self, input_shape, gru_size=10, tdd_size=4):
from keras.models import Sequential
from keras.layers.core import (
Activation, Dense, Flatten, TimeDistributedDense
)
from keras.layers.recurrent import GRU
self.model = Sequential()
self.model.add(GRU(gru_size, return_sequences=True,
input_shape=input_shape))
if tdd_size is not None:
self.model.add(TimeDistributedDense(tdd_size))
self.model.add(Flatten())
self.model.add(Dense(1))
self.model.add(Activation('sigmoid'))
print('Compiling model...')
self.model.compile(optimizer='adam', loss='binary_crossentropy')
def train(self, X, y, validation_data):
from keras.callbacks import EarlyStopping
print('Training model...')
multitask = y.shape[1] > 1
if not multitask:
num_positives = y.sum()
num_sequences = len(y)
num_negatives = num_sequences - num_positives
self.model.fit(
X, y, batch_size=128, nb_epoch=100,
validation_data=validation_data,
class_weight={True: num_sequences / num_positives,
False: num_sequences / num_negatives}
if not multitask else None,
callbacks=[EarlyStopping(monitor='val_loss', patience=10)],
verbose=True)
def predict(self, X):
return self.model.predict(X, batch_size=128, verbose=False)
class gkmSVM(Model):
def __init__(self, prefix='./gkmSVM', word_length=11, mismatches=3, C=1,
threads=1, cache_memory=100, verbosity=4):
self.word_length = word_length
self.mismatches = mismatches
self.C = C
self.threads = threads
self.prefix = '_'.join(map(str, (prefix, word_length, mismatches, C)))
options_list = zip(
['-l', '-d', '-c', '-T', '-m', '-v'],
map(str, (word_length, mismatches, C, threads, cache_memory, verbosity)))
self.options = ' '.join([' '.join(option) for option in options_list])
@property
def model_file(self):
model_fname = '{}.model.txt'.format(self.prefix)
return model_fname if os.path.isfile(model_fname) else None
@staticmethod
def encode_sequence_into_fasta_file(sequence_iterator, ofname):
"""writes sequences into fasta file
"""
with open(ofname, "w") as wf:
for i, seq in enumerate(sequence_iterator):
print('>{}'.format(i), file=wf)
print(seq, file=wf)
def train(self, X, y, validation_data=None):
"""
Trains gkm-svm, saves model file.
"""
y = y.squeeze()
pos_sequence = X[y]
neg_sequence = X[~y]
pos_fname = "%s.pos_seq.fa" % self.prefix
neg_fname = "%s.neg_seq.fa" % self.prefix
# create temporary fasta files
self.encode_sequence_into_fasta_file(pos_sequence, pos_fname)
self.encode_sequence_into_fasta_file(neg_sequence, neg_fname)
# run command
command = ' '.join(
('gkmtrain', self.options, pos_fname, neg_fname, self.prefix))
process = subprocess.Popen(command, stdout=subprocess.PIPE, shell=True)
process.wait() # wait for it to finish
# remove fasta files
os.system("rm %s" % pos_fname)
os.system("rm %s" % neg_fname)
def predict(self, X):
if self.model_file is None:
raise RuntimeError("GkmSvm hasn't been trained!")
# write test fasta file
test_fname = "%s.test.fa" % self.prefix
self.encode_sequence_into_fasta_file(X, test_fname)
# test gkmsvm
temp_ofp = tempfile.NamedTemporaryFile()
threads_option = '-T %s' % (str(self.threads))
command = ' '.join(['gkmpredict',
test_fname,
self.model_file,
temp_ofp.name,
threads_option])
process = subprocess.Popen(command, shell=True)
process.wait() # wait for it to finish
os.system("rm %s" % test_fname) # remove fasta file
# get classification results
temp_ofp.seek(0)
y = np.array([line.split()[-1] for line in temp_ofp], dtype=float)
temp_ofp.close()
return np.expand_dims(y, 1)
class SVC(Model):
def __init__(self):
self.classifier = scikit_SVC(probability=True, kernel='linear')
def train(self, X, y, validation_data=None):
self.classifier.fit(X, y)
def predict(self, X):
return self.classifier.predict_proba(X)[:, 1:]
class DecisionTree(Model):
def __init__(self):
self.classifier = scikit_DecisionTree()
def train(self, X, y, validation_data=None):
self.classifier.fit(X, y)
def predict(self, X):
predictions = np.asarray(self.classifier.predict_proba(X))[..., 1]
if len(predictions.shape) == 2: # multitask
predictions = predictions.T
else: # single-task
predictions = np.expand_dims(predictions, 1)
return predictions
class RandomForest(DecisionTree):
def __init__(self):
self.classifier = RandomForestClassifier(n_estimators=100)
| [((8, 0, 8, 21), 'matplotlib.use', 'matplotlib.use', ({(8, 15, 8, 20): '"""pdf"""'}, {}), "('pdf')", False, 'import matplotlib\n'), ((19, 0, 19, 33), 'warnings.filterwarnings', 'warnings.filterwarnings', ({(19, 24, 19, 32): '"""ignore"""'}, {}), "('ignore')", False, 'import warnings\n'), ((33, 10, 33, 64), 'keras.models.load_model', 'load_model', (), '', False, 'from keras.models import load_model\n'), ((246, 8, 246, 66), 'dragonn.visualize_util.plot', 'plot_keras_model', (), '', True, 'from dragonn.visualize_util import plot as plot_keras_model\n'), ((275, 21, 275, 33), 'keras.models.Sequential', 'Sequential', ({}, {}), '()', False, 'from keras.models import Sequential\n'), ((350, 18, 350, 79), 'subprocess.Popen', 'subprocess.Popen', (), '', False, 'import subprocess\n'), ((353, 8, 353, 38), 'os.system', 'os.system', ({(353, 18, 353, 37): "('rm %s' % pos_fname)"}, {}), "('rm %s' % pos_fname)", False, 'import os\n'), ((354, 8, 354, 38), 'os.system', 'os.system', ({(354, 18, 354, 37): "('rm %s' % neg_fname)"}, {}), "('rm %s' % neg_fname)", False, 'import os\n'), ((363, 19, 363, 48), 'tempfile.NamedTemporaryFile', 'tempfile.NamedTemporaryFile', ({}, {}), '()', False, 'import tempfile\n'), ((370, 18, 370, 55), 'subprocess.Popen', 'subprocess.Popen', (), '', False, 'import subprocess\n'), ((372, 8, 372, 39), 'os.system', 'os.system', ({(372, 18, 372, 38): "('rm %s' % test_fname)"}, {}), "('rm %s' % test_fname)", False, 'import os\n'), ((377, 15, 377, 35), 'numpy.expand_dims', 'np.expand_dims', ({(377, 30, 377, 31): 'y', (377, 33, 377, 34): '(1)'}, {}), '(y, 1)', True, 'import numpy as np\n'), ((383, 26, 383, 71), 'sklearn.svm.SVC', 'scikit_SVC', (), '', True, 'from sklearn.svm import SVC as scikit_SVC\n'), ((395, 26, 395, 47), 'sklearn.tree.DecisionTreeClassifier', 'scikit_DecisionTree', ({}, {}), '()', True, 'from sklearn.tree import DecisionTreeClassifier as scikit_DecisionTree\n'), ((412, 26, 412, 66), 'sklearn.ensemble.RandomForestClassifier', 'RandomForestClassifier', (), '', False, 'from sklearn.ensemble import RandomForestClassifier\n'), ((202, 12, 202, 41), 'os.makedirs', 'os.makedirs', ({(202, 24, 202, 40): 'output_directory'}, {}), '(output_directory)', False, 'import os\n'), ((276, 23, 277, 51), 'keras.layers.recurrent.GRU', 'GRU', (), '', False, 'from keras.layers.recurrent import GRU\n'), ((280, 23, 280, 32), 'keras.layers.core.Flatten', 'Flatten', ({}, {}), '()', False, 'from keras.layers.core import Activation, Dense, Flatten, TimeDistributedDense\n'), ((281, 23, 281, 31), 'keras.layers.core.Dense', 'Dense', ({(281, 29, 281, 30): '(1)'}, {}), '(1)', False, 'from keras.layers.core import Activation, Dense, Flatten, TimeDistributedDense\n'), ((282, 23, 282, 44), 'keras.layers.core.Activation', 'Activation', ({(282, 34, 282, 43): '"""sigmoid"""'}, {}), "('sigmoid')", False, 'from keras.layers.core import Activation, Dense, Flatten, TimeDistributedDense\n'), ((324, 30, 324, 57), 'os.path.isfile', 'os.path.isfile', ({(324, 45, 324, 56): 'model_fname'}, {}), '(model_fname)', False, 'import os\n'), ((405, 26, 405, 56), 'numpy.expand_dims', 'np.expand_dims', ({(405, 41, 405, 52): 'predictions', (405, 54, 405, 55): '1'}, {}), '(predictions, 1)', True, 'import numpy as np\n'), ((111, 25, 111, 37), 'keras.models.Sequential', 'Sequential', ({}, {}), '()', False, 'from keras.models import Sequential\n'), ((210, 16, 210, 25), 'matplotlib.pyplot.clf', 'plt.clf', ({}, {}), '()', True, 'import matplotlib.pyplot as plt\n'), ((211, 50, 211, 65), 'matplotlib.pyplot.subplots', 'plt.subplots', ({(211, 63, 211, 64): '2'}, {}), '(2)', True, 'import matplotlib.pyplot as plt\n'), ((221, 33, 221, 68), 'numpy.zeros_like', 'np.zeros_like', ({(221, 47, 221, 67): 'peak_sequence_scores'}, {}), '(peak_sequence_scores)', True, 'import numpy as np\n'), ((226, 16, 226, 61), 'dragonn.plot.plot_bases_on_ax', 'plot_bases_on_ax', ({(226, 33, 226, 47): 'letter_heights', (226, 49, 226, 60): 'bottom_axis'}, {}), '(letter_heights, bottom_axis)', False, 'from dragonn.plot import plot_bases_on_ax\n'), ((230, 16, 230, 38), 'matplotlib.pyplot.xlabel', 'plt.xlabel', ({(230, 27, 230, 37): '"""Position"""'}, {}), "('Position')", True, 'import matplotlib.pyplot as plt\n'), ((231, 16, 231, 35), 'matplotlib.pyplot.ylabel', 'plt.ylabel', ({(231, 27, 231, 34): '"""Score"""'}, {}), "('Score')", True, 'import matplotlib.pyplot as plt\n'), ((234, 16, 234, 27), 'matplotlib.pyplot.close', 'plt.close', ({}, {}), '()', True, 'import matplotlib.pyplot as plt\n'), ((279, 27, 279, 57), 'keras.layers.core.TimeDistributedDense', 'TimeDistributedDense', ({(279, 48, 279, 56): 'tdd_size'}, {}), '(tdd_size)', False, 'from keras.layers.core import Activation, Dense, Flatten, TimeDistributedDense\n'), ((122, 27, 122, 66), 'keras.layers.convolutional.MaxPooling2D', 'MaxPooling2D', (), '', False, 'from keras.layers.convolutional import Convolution2D, MaxPooling2D\n'), ((129, 27, 129, 36), 'keras.layers.core.Flatten', 'Flatten', ({}, {}), '()', False, 'from keras.layers.core import Activation, Dense, Flatten, TimeDistributedDense\n'), ((130, 27, 130, 59), 'keras.layers.core.Dense', 'Dense', (), '', False, 'from keras.layers.core import Activation, Dense, Flatten, TimeDistributedDense\n'), ((131, 27, 131, 48), 'keras.layers.core.Activation', 'Activation', ({(131, 38, 131, 47): '"""sigmoid"""'}, {}), "('sigmoid')", False, 'from keras.layers.core import Activation, Dense, Flatten, TimeDistributedDense\n'), ((139, 23, 139, 35), 'numpy.unique', 'np.unique', ({(139, 33, 139, 34): 'y'}, {}), '(y)', True, 'import numpy as np\n'), ((258, 49, 258, 77), 'keras.models.load_model', 'load_model', ({(258, 60, 258, 76): 'model_hdf5_fname'}, {}), '(model_hdf5_fname)', False, 'from keras.models import load_model\n'), ((262, 51, 262, 85), 'keras.models.model_from_json', 'model_from_json', ({(262, 67, 262, 84): 'model_json_string'}, {}), '(model_json_string)', False, 'from keras.models import model_from_json\n'), ((300, 23, 300, 69), 'keras.callbacks.EarlyStopping', 'EarlyStopping', (), '', False, 'from keras.callbacks import EarlyStopping\n'), ((120, 31, 120, 49), 'keras.layers.core.Activation', 'Activation', ({(120, 42, 120, 48): '"""relu"""'}, {}), "('relu')", False, 'from keras.layers.core import Activation, Dense, Flatten, TimeDistributedDense\n'), ((121, 31, 121, 47), 'keras.layers.core.Dropout', 'Dropout', ({(121, 39, 121, 46): 'dropout'}, {}), '(dropout)', False, 'from keras.layers.core import Activation, Dense, Dropout, Flatten, Permute, Reshape\n'), ((125, 31, 125, 79), 'keras.layers.core.Reshape', 'Reshape', ({(125, 39, 125, 78): '(num_filters[-1], num_max_pool_outputs)'}, {}), '((num_filters[-1], num_max_pool_outputs))', False, 'from keras.layers.core import Activation, Dense, Dropout, Flatten, Permute, Reshape\n'), ((126, 31, 126, 46), 'keras.layers.core.Permute', 'Permute', ({(126, 39, 126, 45): '(2, 1)'}, {}), '((2, 1))', False, 'from keras.layers.core import Activation, Dense, Dropout, Flatten, Permute, Reshape\n'), ((127, 31, 127, 67), 'keras.layers.recurrent.GRU', 'GRU', (), '', False, 'from keras.layers.recurrent import GRU\n'), ((128, 31, 128, 80), 'keras.layers.core.TimeDistributedDense', 'TimeDistributedDense', (), '', False, 'from keras.layers.core import Activation, Dense, Flatten, TimeDistributedDense\n'), ((228, 25, 228, 94), 'numpy.arange', 'np.arange', ({(228, 35, 228, 61): '(peak_position - peak_width)', (228, 63, 228, 93): '(peak_position + peak_width + 1)'}, {}), '(peak_position - peak_width, peak_position + peak_width + 1)', True, 'import numpy as np\n'), ((119, 34, 119, 40), 'keras.regularizers.l1', 'l1', ({(119, 37, 119, 39): 'L1'}, {}), '(L1)', False, 'from keras.regularizers import l1\n'), ((119, 56, 119, 62), 'keras.regularizers.l1', 'l1', ({(119, 59, 119, 61): 'L1'}, {}), '(L1)', False, 'from keras.regularizers import l1\n')] |
haltu/velmu-mpass-demo | src/mpass/mpass/migrations/0001_initial.py | 19eb0e14fa6710e4aee5d47c898cf570bf7621e5 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.10 on 2018-03-20 08:34
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
import parler.models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='AuthenticationSource',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True)),
('modified_at', models.DateTimeField(auto_now=True)),
('auth_id', models.CharField(max_length=128)),
('icon_url', models.CharField(blank=True, max_length=2048, null=True)),
],
options={
'abstract': False,
},
bases=(parler.models.TranslatableModelMixin, models.Model),
),
migrations.CreateModel(
name='AuthenticationSourceTranslation',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('language_code', models.CharField(db_index=True, max_length=15, verbose_name='Language')),
('title', models.CharField(max_length=2048)),
('master', models.ForeignKey(editable=False, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='translations', to='mpass.AuthenticationSource')),
],
options={
'managed': True,
'db_table': 'mpass_authenticationsource_translation',
'db_tablespace': '',
'default_permissions': (),
'verbose_name': 'authentication source Translation',
},
),
migrations.CreateModel(
name='AuthenticationTag',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True)),
('modified_at', models.DateTimeField(auto_now=True)),
('tag_id', models.CharField(max_length=128)),
],
options={
'abstract': False,
},
bases=(parler.models.TranslatableModelMixin, models.Model),
),
migrations.CreateModel(
name='AuthenticationTagTranslation',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('language_code', models.CharField(db_index=True, max_length=15, verbose_name='Language')),
('title', models.CharField(max_length=2048)),
('master', models.ForeignKey(editable=False, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='translations', to='mpass.AuthenticationTag')),
],
options={
'managed': True,
'db_table': 'mpass_authenticationtag_translation',
'db_tablespace': '',
'default_permissions': (),
'verbose_name': 'authentication tag Translation',
},
),
migrations.AddField(
model_name='authenticationsource',
name='tags',
field=models.ManyToManyField(blank=True, to='mpass.AuthenticationTag'),
),
migrations.AlterUniqueTogether(
name='authenticationtagtranslation',
unique_together=set([('language_code', 'master')]),
),
migrations.AlterUniqueTogether(
name='authenticationsourcetranslation',
unique_together=set([('language_code', 'master')]),
),
]
| [((80, 18, 80, 82), 'django.db.models.ManyToManyField', 'models.ManyToManyField', (), '', False, 'from django.db import migrations, models\n'), ((21, 23, 21, 112), 'django.db.models.AutoField', 'models.AutoField', (), '', False, 'from django.db import migrations, models\n'), ((22, 31, 22, 70), 'django.db.models.DateTimeField', 'models.DateTimeField', (), '', False, 'from django.db import migrations, models\n'), ((23, 32, 23, 67), 'django.db.models.DateTimeField', 'models.DateTimeField', (), '', False, 'from django.db import migrations, models\n'), ((24, 28, 24, 60), 'django.db.models.CharField', 'models.CharField', (), '', False, 'from django.db import migrations, models\n'), ((25, 29, 25, 85), 'django.db.models.CharField', 'models.CharField', (), '', False, 'from django.db import migrations, models\n'), ((35, 23, 35, 112), 'django.db.models.AutoField', 'models.AutoField', (), '', False, 'from django.db import migrations, models\n'), ((36, 34, 36, 105), 'django.db.models.CharField', 'models.CharField', (), '', False, 'from django.db import migrations, models\n'), ((37, 26, 37, 59), 'django.db.models.CharField', 'models.CharField', (), '', False, 'from django.db import migrations, models\n'), ((38, 27, 38, 178), 'django.db.models.ForeignKey', 'models.ForeignKey', (), '', False, 'from django.db import migrations, models\n'), ((51, 23, 51, 112), 'django.db.models.AutoField', 'models.AutoField', (), '', False, 'from django.db import migrations, models\n'), ((52, 31, 52, 70), 'django.db.models.DateTimeField', 'models.DateTimeField', (), '', False, 'from django.db import migrations, models\n'), ((53, 32, 53, 67), 'django.db.models.DateTimeField', 'models.DateTimeField', (), '', False, 'from django.db import migrations, models\n'), ((54, 27, 54, 59), 'django.db.models.CharField', 'models.CharField', (), '', False, 'from django.db import migrations, models\n'), ((64, 23, 64, 112), 'django.db.models.AutoField', 'models.AutoField', (), '', False, 'from django.db import migrations, models\n'), ((65, 34, 65, 105), 'django.db.models.CharField', 'models.CharField', (), '', False, 'from django.db import migrations, models\n'), ((66, 26, 66, 59), 'django.db.models.CharField', 'models.CharField', (), '', False, 'from django.db import migrations, models\n'), ((67, 27, 67, 175), 'django.db.models.ForeignKey', 'models.ForeignKey', (), '', False, 'from django.db import migrations, models\n')] |
fractalego/dgt | dgt/inference/forward_inference.py | 6781b9445d93c4a1680ab3d5636803c81062cc67 | import logging
import random
from dgt.graph.graph_matcher import GraphWeightedMatch
from dgt.utils import graph_iterations
_logger = logging.getLogger(__name__)
def find_weight_between(s, first, last):
try:
start = s.index(first) + len(first)
end = s.index(last, start)
return s[start:end]
except ValueError:
return 1
def clean_between(s, first, last):
try:
start = s.index(first) + len(first)
end = s.index(last, start)
new_s = s[:start - 1] + s[end + 1:]
return new_s
except ValueError:
return s
def eliminate_spaces(line):
line = line.replace(' ', '')
line = line.replace('\t', '')
line = line.replace('\n', '')
return line
class UniqueNamesModifier:
def apply(self, g):
from ..auxiliary import get_random_name
substitution_dict = {}
for v in g.vs:
random_name = get_random_name()
old_name = v['name']
new_name = old_name + random_name
v['name'] = new_name
substitution_dict[old_name] = new_name
try:
for v in g.vs:
referring_name = v['refers_to']
if referring_name:
v['refers_to'] = substitution_dict[referring_name]
except Exception as e:
_logger.warning("Exception while substituting refers_to ID: " + str(e))
for e in g.es:
e['name'] += get_random_name()
class BaseForwardInference:
def compute(self):
return None
class ForwardInference(BaseForwardInference):
_unique = UniqueNamesModifier()
def __init__(self, data, knowledge, permutation_shift, max_depth=1):
self.permutations = permutation_shift
self.data = data
self.knowledge = knowledge
self._max_depth = max_depth
self.permutation_shift = permutation_shift
def __apply_clause_to_graph(self, rule, data, i):
drs = data.copy()
drs.visit(self._unique)
w = 1
iterations = graph_iterations(drs._g)
if not iterations:
return drs, 0
drs._g = iterations[self.permutations[i] % len(iterations)]
if not rule.gradient:
weighted_match = GraphWeightedMatch(rule.get_hypothesis(), self.knowledge._metric,
self.knowledge._relations_metric)
w = drs.visit(weighted_match)
is_match = drs.visit(rule)
if not is_match:
return drs, 0
return drs, w
def _compute_step(self, data_tuple, i):
"""
Applies all the rules to a drs
:return: all the variants of the drs after a rule match as a pair (<NEW_DRS>, <WEIGHT>)
"""
data = data_tuple[0]
prior_w = data_tuple[1]
clauses = self.knowledge.ask_rule(data)
results = []
for clause_tuple in clauses:
rule = clause_tuple[0]
rule_weight = rule.weight
prior_rules = list(data_tuple[2])
if rule in prior_rules: # A rule can be used only once per path
continue
drs, w = self.__apply_clause_to_graph(rule, data, i)
if w > 0:
prior_rules.append(rule)
prior_rules.append(drs)
results.append((drs, prior_w * w * rule_weight, prior_rules))
return results
def compute(self):
results = []
to_process = [(self.data, 1, [self.data])]
for i in range(self._max_depth):
new_results = []
for data_tuple in to_process:
new_results += self._compute_step(data_tuple, i)
if not new_results:
break
to_process = sorted(new_results, key=lambda x: -x[1])
results += to_process
results = sorted(results, key=lambda x: -x[1])
return results
| [((7, 10, 7, 37), 'logging.getLogger', 'logging.getLogger', ({(7, 28, 7, 36): '__name__'}, {}), '(__name__)', False, 'import logging\n'), ((77, 21, 77, 45), 'dgt.utils.graph_iterations', 'graph_iterations', ({(77, 38, 77, 44): 'drs._g'}, {}), '(drs._g)', False, 'from dgt.utils import graph_iterations\n')] |
ikekilinc/dnnSuperBinoculars | serverPythonClient/client.py | b0fc584b1d449961bdbab37cf9d72c0b466f197f | import argparse
import cv2
import common
# from .utils.cropAtCenter import cropImageCenter
# from cropAtCenter import cropImageCenter
from gabriel_client.websocket_client import WebsocketClient
from gabriel_client.opencv_adapter import OpencvAdapter
DEFAULT_SERVER_HOST = '128.2.212.50'
DEFAULT_ZOOM_FACTOR = 10
def preprocess(frame):
# return frame
print(type(frame), frame.shape)
width, height = frame.shape[1], frame.shape[0]
left = int(width/2 * (1 - 1/DEFAULT_ZOOM_FACTOR))
top = int(height/2 * (1 - 1/DEFAULT_ZOOM_FACTOR))
right = int(width/2 * (1 + 1/DEFAULT_ZOOM_FACTOR))
bottom = int(height/2 * (1 + 1/DEFAULT_ZOOM_FACTOR))
cropped_frame = frame[top:bottom, left:right]
return cropped_frame
def produce_extras():
return None
def consume_frame(frame, _):
cv2.imshow('Image from server', frame)
cv2.waitKey(1)
def main():
common.configure_logging()
parser = argparse.ArgumentParser()
parser.add_argument(
'source_name', nargs='?', default=common.DEFAULT_SOURCE_NAME)
parser.add_argument('server_host', nargs='?', default=DEFAULT_SERVER_HOST)
args = parser.parse_args()
capture = cv2.VideoCapture(0)
opencv_adapter = OpencvAdapter(
preprocess, produce_extras, consume_frame, capture, args.source_name)
client = WebsocketClient(
args.server_host, common.WEBSOCKET_PORT,
opencv_adapter.get_producer_wrappers(), opencv_adapter.consumer)
client.launch()
if __name__ == '__main__':
main()
| [((35, 4, 35, 42), 'cv2.imshow', 'cv2.imshow', ({(35, 15, 35, 34): '"""Image from server"""', (35, 36, 35, 41): 'frame'}, {}), "('Image from server', frame)", False, 'import cv2\n'), ((36, 4, 36, 18), 'cv2.waitKey', 'cv2.waitKey', ({(36, 16, 36, 17): '(1)'}, {}), '(1)', False, 'import cv2\n'), ((40, 4, 40, 30), 'common.configure_logging', 'common.configure_logging', ({}, {}), '()', False, 'import common\n'), ((41, 13, 41, 38), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ({}, {}), '()', False, 'import argparse\n'), ((47, 14, 47, 33), 'cv2.VideoCapture', 'cv2.VideoCapture', ({(47, 31, 47, 32): '0'}, {}), '(0)', False, 'import cv2\n'), ((48, 21, 49, 77), 'gabriel_client.opencv_adapter.OpencvAdapter', 'OpencvAdapter', ({(49, 8, 49, 18): 'preprocess', (49, 20, 49, 34): 'produce_extras', (49, 36, 49, 49): 'consume_frame', (49, 51, 49, 58): 'capture', (49, 60, 49, 76): 'args.source_name'}, {}), '(preprocess, produce_extras, consume_frame, capture, args.\n source_name)', False, 'from gabriel_client.opencv_adapter import OpencvAdapter\n')] |
SharsDela/BankCardRecognize | src/DeepCard.API/batch.py | ce80589bc5a5afaba2b97b1ccab35354fb99b548 | from api import get_result
import os
import shutil
from glob import glob
from PIL import Image
if __name__ == '__main__':
image_files = glob('./test_images/*.*')
result_dir = './test_results'
if os.path.exists(result_dir):
shutil.rmtree(result_dir)
os.mkdir(result_dir)
txt_file = os.path.join(result_dir, 'result.txt')
txt_f = open(txt_file, 'w')
for image_file in sorted(image_files):
if ".gitkeep" in image_files:
continue
print("Finded file", image_file, end=" ")
result = get_result(Image.open(image_file))
print(":", result)
txt_f.write(image_file.split('/')[-1].split('.')[0] + ':' + result + '\n')
txt_f.close() | [((8, 18, 8, 43), 'glob.glob', 'glob', ({(8, 23, 8, 42): '"""./test_images/*.*"""'}, {}), "('./test_images/*.*')", False, 'from glob import glob\n'), ((10, 7, 10, 33), 'os.path.exists', 'os.path.exists', ({(10, 22, 10, 32): 'result_dir'}, {}), '(result_dir)', False, 'import os\n'), ((12, 4, 12, 24), 'os.mkdir', 'os.mkdir', ({(12, 13, 12, 23): 'result_dir'}, {}), '(result_dir)', False, 'import os\n'), ((14, 15, 14, 53), 'os.path.join', 'os.path.join', ({(14, 28, 14, 38): 'result_dir', (14, 40, 14, 52): '"""result.txt"""'}, {}), "(result_dir, 'result.txt')", False, 'import os\n'), ((11, 8, 11, 33), 'shutil.rmtree', 'shutil.rmtree', ({(11, 22, 11, 32): 'result_dir'}, {}), '(result_dir)', False, 'import shutil\n'), ((21, 28, 21, 50), 'PIL.Image.open', 'Image.open', ({(21, 39, 21, 49): 'image_file'}, {}), '(image_file)', False, 'from PIL import Image\n')] |
MaximeBaudette/PyCIM | CIM14/ENTSOE/Equipment/Core/Curve.py | d68ee5ccfc1d32d44c5cd09fb173142fb5ff4f14 | # Copyright (C) 2010-2011 Richard Lincoln
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
from CIM14.ENTSOE.Equipment.Core.IdentifiedObject import IdentifiedObject
class Curve(IdentifiedObject):
"""A multi-purpose curve or functional relationship between an independent variable (X-axis) and dependent (Y-axis) variables.
"""
def __init__(self, y1Unit="A", curveStyle="straightLineYValues", xUnit="A", CurveDatas=None, *args, **kw_args):
"""Initialises a new 'Curve' instance.
@param y1Unit: The Y1-axis units of measure. Values are: "A", "rad", "none", "g", "W/Hz", "V", "m2", "VA", "VArh", "N", "Pa", "VAh", "F", "H", "Hz-1", "W/s", "J", "m", "S", "min", "deg", "J/s", "s", "Wh", "m3", "oC", "V/VAr", "s-1", "h", "W", "ohm", "Hz", "VAr", "kg/J"
@param curveStyle: The style or shape of the curve. Values are: "straightLineYValues", "rampYValue", "constantYValue", "formula"
@param xUnit: The X-axis units of measure. Values are: "A", "rad", "none", "g", "W/Hz", "V", "m2", "VA", "VArh", "N", "Pa", "VAh", "F", "H", "Hz-1", "W/s", "J", "m", "S", "min", "deg", "J/s", "s", "Wh", "m3", "oC", "V/VAr", "s-1", "h", "W", "ohm", "Hz", "VAr", "kg/J"
@param CurveDatas: The point data values that define a curve
"""
#: The Y1-axis units of measure. Values are: "A", "rad", "none", "g", "W/Hz", "V", "m2", "VA", "VArh", "N", "Pa", "VAh", "F", "H", "Hz-1", "W/s", "J", "m", "S", "min", "deg", "J/s", "s", "Wh", "m3", "oC", "V/VAr", "s-1", "h", "W", "ohm", "Hz", "VAr", "kg/J"
self.y1Unit = y1Unit
#: The style or shape of the curve. Values are: "straightLineYValues", "rampYValue", "constantYValue", "formula"
self.curveStyle = curveStyle
#: The X-axis units of measure. Values are: "A", "rad", "none", "g", "W/Hz", "V", "m2", "VA", "VArh", "N", "Pa", "VAh", "F", "H", "Hz-1", "W/s", "J", "m", "S", "min", "deg", "J/s", "s", "Wh", "m3", "oC", "V/VAr", "s-1", "h", "W", "ohm", "Hz", "VAr", "kg/J"
self.xUnit = xUnit
self._CurveDatas = []
self.CurveDatas = [] if CurveDatas is None else CurveDatas
super(Curve, self).__init__(*args, **kw_args)
_attrs = ["y1Unit", "curveStyle", "xUnit"]
_attr_types = {"y1Unit": str, "curveStyle": str, "xUnit": str}
_defaults = {"y1Unit": "A", "curveStyle": "straightLineYValues", "xUnit": "A"}
_enums = {"y1Unit": "UnitSymbol", "curveStyle": "CurveStyle", "xUnit": "UnitSymbol"}
_refs = ["CurveDatas"]
_many_refs = ["CurveDatas"]
def getCurveDatas(self):
"""The point data values that define a curve
"""
return self._CurveDatas
def setCurveDatas(self, value):
for x in self._CurveDatas:
x.Curve = None
for y in value:
y._Curve = self
self._CurveDatas = value
CurveDatas = property(getCurveDatas, setCurveDatas)
def addCurveDatas(self, *CurveDatas):
for obj in CurveDatas:
obj.Curve = self
def removeCurveDatas(self, *CurveDatas):
for obj in CurveDatas:
obj.Curve = None
| [] |
unclenachoduh/python-fluent | fluent/syntax/errors.py | 1d15bdc94a37ecb488a80aefcdd37b8cb5535f73 | from __future__ import unicode_literals
class ParseError(Exception):
def __init__(self, code, *args):
self.code = code
self.args = args
self.message = get_error_message(code, args)
def get_error_message(code, args):
if code == 'E00001':
return 'Generic error'
if code == 'E0002':
return 'Expected an entry start'
if code == 'E0003':
return 'Expected token: "{}"'.format(args[0])
if code == 'E0004':
return 'Expected a character from range: "{}"'.format(args[0])
if code == 'E0005':
msg = 'Expected message "{}" to have a value or attributes'
return msg.format(args[0])
if code == 'E0006':
msg = 'Expected term "{}" to have a value'
return msg.format(args[0])
if code == 'E0007':
return 'Keyword cannot end with a whitespace'
if code == 'E0008':
return 'The callee has to be a simple, upper-case identifier'
if code == 'E0009':
return 'The key has to be a simple identifier'
if code == 'E0010':
return 'Expected one of the variants to be marked as default (*)'
if code == 'E0011':
return 'Expected at least one variant after "->"'
if code == 'E0012':
return 'Expected value'
if code == 'E0013':
return 'Expected variant key'
if code == 'E0014':
return 'Expected literal'
if code == 'E0015':
return 'Only one variant can be marked as default (*)'
if code == 'E0016':
return 'Message references cannot be used as selectors'
if code == 'E0017':
return 'Variants cannot be used as selectors'
if code == 'E0018':
return 'Attributes of messages cannot be used as selectors'
if code == 'E0019':
return 'Attributes of terms cannot be used as placeables'
if code == 'E0020':
return 'Unterminated string expression'
if code == 'E0021':
return 'Positional arguments must not follow named arguments'
if code == 'E0022':
return 'Named arguments must be unique'
if code == 'E0023':
return 'VariantLists are only allowed inside of other VariantLists.'
if code == 'E0024':
return 'Cannot access variants of a message.'
if code == 'E0025':
return 'Unknown escape sequence: {}'.format(args[0])
if code == 'E0026':
return 'Invalid Unicode escape sequence: {}'.format(args[0])
return code
| [] |
jdddog/mag-archiver | tests/test_mag.py | 079e735e610d6b81b3ac8dc479d4f93bb0aacb11 | # Copyright 2020 Curtin University
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Author: James Diprose
import os
import unittest
from unittest.mock import patch
import pendulum
from azure.common import AzureMissingResourceHttpError
from azure.cosmosdb.table.tableservice import TableService
from azure.storage.blob import ContainerProperties
from mag_archiver.azure import create_table
from mag_archiver.mag import make_mag_query, MagState, MagDateType, MagRelease, MagTask, MagArchiverClient, \
hide_if_not_none
class TestMag(unittest.TestCase):
def test_hide_if_not_none(self):
# Test that None is returned for None
value = hide_if_not_none(None)
self.assertEqual(value, None)
# Test that 'hidden' is returned: string
value = hide_if_not_none('hello world')
self.assertEqual(value, 'hidden')
# Test that 'hidden' is returned: integer
value = hide_if_not_none(123)
self.assertEqual(value, 'hidden')
def test_make_mag_query(self):
start_date = pendulum.datetime(year=2020, month=4, day=1)
end_date = pendulum.datetime(year=2020, month=5, day=1)
# No parameters
query = make_mag_query()
self.assertEqual(query, '')
# State parameter
query = make_mag_query(state=MagState.discovered)
self.assertEqual(query, "State eq 'discovered'")
query = make_mag_query(state=MagState.archived)
self.assertEqual(query, "State eq 'archived'")
query = make_mag_query(state=MagState.done)
self.assertEqual(query, "State eq 'done'")
# Start date parameter
query = make_mag_query(start_date=start_date, date_type=MagDateType.release)
self.assertEqual(query, "ReleaseDate ge datetime'2020-04-01T00:00Z'")
query = make_mag_query(start_date=start_date, date_type=MagDateType.discovered)
self.assertEqual(query, "DiscoveredDate ge datetime'2020-04-01T00:00Z'")
query = make_mag_query(start_date=start_date, date_type=MagDateType.archived)
self.assertEqual(query, "ArchivedDate ge datetime'2020-04-01T00:00Z'")
query = make_mag_query(start_date=start_date, date_type=MagDateType.done)
self.assertEqual(query, "DoneDate ge datetime'2020-04-01T00:00Z'")
# End date parameter
query = make_mag_query(end_date=end_date, date_type=MagDateType.release)
self.assertEqual(query, "ReleaseDate lt datetime'2020-05-01T00:00Z'")
query = make_mag_query(end_date=end_date, date_type=MagDateType.discovered)
self.assertEqual(query, "DiscoveredDate lt datetime'2020-05-01T00:00Z'")
query = make_mag_query(end_date=end_date, date_type=MagDateType.archived)
self.assertEqual(query, "ArchivedDate lt datetime'2020-05-01T00:00Z'")
query = make_mag_query(end_date=end_date, date_type=MagDateType.done)
self.assertEqual(query, "DoneDate lt datetime'2020-05-01T00:00Z'")
# Start date, end date and date type
query = make_mag_query(start_date=start_date, end_date=end_date, date_type=MagDateType.release)
self.assertEqual(query, "ReleaseDate ge datetime'2020-04-01T00:00Z' and ReleaseDate lt "
"datetime'2020-05-01T00:00Z'")
query = make_mag_query(start_date=start_date, end_date=end_date, date_type=MagDateType.discovered)
self.assertEqual(query, "DiscoveredDate ge datetime'2020-04-01T00:00Z' and DiscoveredDate lt "
"datetime'2020-05-01T00:00Z'")
query = make_mag_query(start_date=start_date, end_date=end_date, date_type=MagDateType.archived)
self.assertEqual(query, "ArchivedDate ge datetime'2020-04-01T00:00Z' and ArchivedDate lt "
"datetime'2020-05-01T00:00Z'")
query = make_mag_query(start_date=start_date, end_date=end_date, date_type=MagDateType.done)
self.assertEqual(query, "DoneDate ge datetime'2020-04-01T00:00Z' and DoneDate lt "
"datetime'2020-05-01T00:00Z'")
# State, start date, end date and date type
query = make_mag_query(state=MagState.discovered, start_date=start_date, end_date=end_date,
date_type=MagDateType.discovered)
self.assertEqual(query, "State eq 'discovered' and DiscoveredDate ge datetime'2020-04-01T00:00Z' "
"and DiscoveredDate lt datetime'2020-05-01T00:00Z'")
query = make_mag_query(state=MagState.archived, start_date=start_date, end_date=end_date,
date_type=MagDateType.archived)
self.assertEqual(query, "State eq 'archived' and ArchivedDate ge datetime'2020-04-01T00:00Z' "
"and ArchivedDate lt datetime'2020-05-01T00:00Z'")
query = make_mag_query(state=MagState.done, start_date=start_date, end_date=end_date,
date_type=MagDateType.done)
self.assertEqual(query, "State eq 'done' and DoneDate ge datetime'2020-04-01T00:00Z' "
"and DoneDate lt datetime'2020-05-01T00:00Z'")
def make_mag_release(account_name: str, account_key: str, year: int, month: int, day: int):
min_date = pendulum.datetime(1601, 1, 1)
partition_key_ = 'mag'
row_key_ = f'mag-{year:0>4d}-{month:0>2d}-{day:0>2d}'
state_ = MagState.discovered
task_ = MagTask.not_started
release_date_ = pendulum.datetime(year=year, month=month, day=day)
source_container_ = row_key_
source_container_last_modified_ = pendulum.datetime(year=year, month=month, day=day, hour=1)
release_container_ = ''
release_path_ = ''
discovered_date_ = pendulum.datetime(year=year, month=month, day=day, hour=2)
archived_date_ = min_date
done_date_ = min_date
return MagRelease(partition_key_, row_key_, state_, task_, release_date_, source_container_,
source_container_last_modified_, release_container_, release_path_, discovered_date_,
archived_date_, done_date_, account_name=account_name, account_key=account_key)
class TestMagRelease(unittest.TestCase):
def __init__(self, *args, **kwargs):
super(TestMagRelease, self).__init__(*args, **kwargs)
self.account_name = os.getenv('STORAGE_ACCOUNT_NAME')
self.account_key = os.getenv('STORAGE_ACCOUNT_KEY')
create_table(self.account_name, self.account_key, MagRelease.TABLE_NAME)
def test_secrets_hidden(self):
# Check that account key is hidden
account_name = 'myaccountname'
secret = 'secret'
# Check that account_key and sas_token are hidden
release = make_mag_release(account_name, secret, 2020, 1, 1)
self.assertIn('account_key=hidden', release.__repr__())
self.assertNotIn(secret, release.__str__())
self.assertNotIn(secret, release.__repr__())
# Check that account_key is None
release = make_mag_release(account_name, None, 2020, 1, 1)
self.assertIn('account_key=None', release.__repr__())
def test_create(self):
release = make_mag_release(self.account_name, self.account_key, 2019, 6, 1)
try:
success = release.create()
self.assertTrue(success)
finally:
release.delete()
def test_delete(self):
release = make_mag_release(self.account_name, self.account_key, 2019, 6, 1)
# Check that we can create and then delete
release.create()
release.delete()
# Check that second delete fails
with self.assertRaises(AzureMissingResourceHttpError):
release.delete()
def test_update(self):
release = make_mag_release(self.account_name, self.account_key, 2019, 6, 1)
try:
release.create()
# Update release
release.state = MagState.archived
release.archived_date = pendulum.utcnow().microsecond_(0)
release.update()
# Verify that release is updated
service = TableService(account_name=self.account_name, account_key=self.account_key)
entity = service.get_entity(MagRelease.TABLE_NAME, release.partition_key, release.row_key)
updated_release = MagRelease.from_entity(entity)
self.assertEqual(release.state, updated_release.state)
self.assertEqual(release.archived_date, updated_release.archived_date)
finally:
release.delete()
def make_containers():
containers = []
cp1 = ContainerProperties()
cp1.name = 'mag-2020-04-17'
cp1.last_modified = pendulum.datetime(year=2020, month=4, day=18)
containers.append(cp1)
cp3 = ContainerProperties()
cp3.name = 'mag-2020-05-01'
cp3.last_modified = pendulum.datetime(year=2020, month=5, day=1)
containers.append(cp3)
cp2 = ContainerProperties()
cp2.name = 'mag-2020-04-24'
cp2.last_modified = pendulum.datetime(year=2020, month=4, day=25)
containers.append(cp2)
return containers
class TestMagArchiverClient(unittest.TestCase):
def __init__(self, *args, **kwargs):
super(TestMagArchiverClient, self).__init__(*args, **kwargs)
self.account_name = os.getenv('STORAGE_ACCOUNT_NAME')
self.account_key = os.getenv('STORAGE_ACCOUNT_KEY')
create_table(self.account_name, self.account_key, MagRelease.TABLE_NAME)
def test_secrets_hidden(self):
# Check that account key is hidden
account_name = 'myaccountname'
secret = 'secret'
# Check that account_key and sas_token are hidden
client = MagArchiverClient(account_name=account_name, account_key=secret, sas_token=secret)
expected = f'MagArchiverClient(account_name={account_name}, account_key=hidden, sas_token=hidden)'
self.assertEqual(client.__str__(), expected)
self.assertEqual(client.__repr__(), expected)
self.assertNotIn(secret, client.__str__())
self.assertNotIn(secret, client.__repr__())
# Check that account_key and sas_token are None
client = MagArchiverClient(account_name=account_name)
expected = f'MagArchiverClient(account_name={account_name}, account_key=None, sas_token=None)'
self.assertEqual(client.__str__(), expected)
self.assertEqual(client.__repr__(), expected)
@patch('mag_archiver.mag.list_containers')
@patch('pendulum.datetime.now')
def test_list_containers(self, mock_now, mock_list_containers):
# Mock time
mock_now.return_value = pendulum.datetime(year=2020, month=5, day=1, minute=10)
# Mock containers
containers_in = make_containers()
mock_list_containers.return_value = containers_in
# Test that 2 containers are returned when last_modified_thresh=1
client = MagArchiverClient(account_name=self.account_name, account_key=self.account_key)
containers_out = client.list_containers(last_modified_thresh=1)
self.assertEqual(len(containers_out), 2)
# Test that 3 containers are returned when last_modified_thresh=0
containers_out = client.list_containers(last_modified_thresh=0)
self.assertEqual(len(containers_out), 3)
# Test sort order reverse=False
self.assertEqual(containers_in[0].name, containers_out[0].name)
self.assertEqual(containers_in[2].name, containers_out[1].name)
self.assertEqual(containers_in[1].name, containers_out[2].name)
# Test sort order reverse=True
containers_out = client.list_containers(last_modified_thresh=0, reverse=True)
self.assertEqual(len(containers_out), 3)
self.assertEqual(containers_in[1].name, containers_out[0].name)
self.assertEqual(containers_in[2].name, containers_out[1].name)
self.assertEqual(containers_in[0].name, containers_out[2].name)
@patch('mag_archiver.mag.list_containers')
@patch('pendulum.datetime.now')
def test_update_releases(self, mock_now, mock_list_containers):
# Mock time
mock_now.return_value = pendulum.datetime(year=2020, month=5, day=1, minute=10)
# Mock containers
containers_in = make_containers()
mock_list_containers.return_value = containers_in
# Mock fetching of containers
client = MagArchiverClient(account_name=self.account_name, account_key=self.account_key)
containers = client.list_containers(last_modified_thresh=1)
try:
# Update releases based on containers
num_updated, num_errors = client.update_releases(containers)
self.assertEqual(num_updated, 2)
self.assertEqual(num_errors, 0)
finally:
# Clean up
service = TableService(account_name=self.account_name, account_key=self.account_key)
for container in containers:
service.delete_entity(MagRelease.TABLE_NAME, 'mag', container.name.replace("mag-", ""))
@patch('mag_archiver.mag.list_containers')
@patch('pendulum.datetime.now')
def test_list_releases(self, mock_now, mock_list_containers):
# Mock time
mock_now.return_value = pendulum.datetime(year=2020, month=5, day=1, hour=1)
# Mock containers
containers_in = make_containers()
mock_list_containers.return_value = containers_in
# Mock fetching of containers
client = MagArchiverClient(account_name=self.account_name, account_key=self.account_key)
containers = client.list_containers(last_modified_thresh=1)
try:
# Update releases based on containers
num_updated, num_errors = client.update_releases(containers)
self.assertEqual(num_updated, 3)
self.assertEqual(num_errors, 0)
# Two releases
start_date = pendulum.datetime(year=2020, month=4, day=17)
end_date = pendulum.datetime(year=2020, month=5, day=1)
releases = client.list_releases(start_date=start_date, end_date=end_date, state=MagState.discovered,
date_type=MagDateType.release)
self.assertEqual(len(releases), 2)
# 1 release
start_date = pendulum.datetime(year=2020, month=4, day=17, minute=1)
end_date = pendulum.datetime(year=2020, month=5, day=1)
releases = client.list_releases(start_date=start_date, end_date=end_date, state=MagState.discovered,
date_type=MagDateType.release)
self.assertEqual(len(releases), 1)
# Three releases
start_date = pendulum.datetime(year=2020, month=4, day=17)
end_date = pendulum.datetime(year=2020, month=5, day=1, minute=1)
releases = client.list_releases(start_date=start_date, end_date=end_date, state=MagState.discovered,
date_type=MagDateType.release, reverse=False)
self.assertEqual(len(releases), 3)
# Sorting reverse=False
self.assertEqual(releases[0].row_key, '2020-04-17')
self.assertEqual(releases[1].row_key, '2020-04-24')
self.assertEqual(releases[2].row_key, '2020-05-01')
# Sorting reverse=True
releases = client.list_releases(start_date=start_date, end_date=end_date,
state=MagState.discovered, date_type=MagDateType.release,
reverse=True)
self.assertEqual(releases[0].row_key, '2020-05-01')
self.assertEqual(releases[1].row_key, '2020-04-24')
self.assertEqual(releases[2].row_key, '2020-04-17')
finally:
# Clean up
service = TableService(account_name=self.account_name, account_key=self.account_key)
for container in containers:
service.delete_entity(MagRelease.TABLE_NAME, 'mag', container.name.replace("mag-", ""))
| [((126, 15, 126, 44), 'pendulum.datetime', 'pendulum.datetime', ({(126, 33, 126, 37): '1601', (126, 39, 126, 40): '1', (126, 42, 126, 43): '1'}, {}), '(1601, 1, 1)', False, 'import pendulum\n'), ((131, 20, 131, 70), 'pendulum.datetime', 'pendulum.datetime', (), '', False, 'import pendulum\n'), ((133, 38, 133, 96), 'pendulum.datetime', 'pendulum.datetime', (), '', False, 'import pendulum\n'), ((136, 23, 136, 81), 'pendulum.datetime', 'pendulum.datetime', (), '', False, 'import pendulum\n'), ((139, 11, 141, 101), 'mag_archiver.mag.MagRelease', 'MagRelease', (), '', False, 'from mag_archiver.mag import make_mag_query, MagState, MagDateType, MagRelease, MagTask, MagArchiverClient, hide_if_not_none\n'), ((207, 10, 207, 31), 'azure.storage.blob.ContainerProperties', 'ContainerProperties', ({}, {}), '()', False, 'from azure.storage.blob import ContainerProperties\n'), ((209, 24, 209, 69), 'pendulum.datetime', 'pendulum.datetime', (), '', False, 'import pendulum\n'), ((212, 10, 212, 31), 'azure.storage.blob.ContainerProperties', 'ContainerProperties', ({}, {}), '()', False, 'from azure.storage.blob import ContainerProperties\n'), ((214, 24, 214, 68), 'pendulum.datetime', 'pendulum.datetime', (), '', False, 'import pendulum\n'), ((217, 10, 217, 31), 'azure.storage.blob.ContainerProperties', 'ContainerProperties', ({}, {}), '()', False, 'from azure.storage.blob import ContainerProperties\n'), ((219, 24, 219, 69), 'pendulum.datetime', 'pendulum.datetime', (), '', False, 'import pendulum\n'), ((252, 5, 252, 46), 'unittest.mock.patch', 'patch', ({(252, 11, 252, 45): '"""mag_archiver.mag.list_containers"""'}, {}), "('mag_archiver.mag.list_containers')", False, 'from unittest.mock import patch\n'), ((253, 5, 253, 35), 'unittest.mock.patch', 'patch', ({(253, 11, 253, 34): '"""pendulum.datetime.now"""'}, {}), "('pendulum.datetime.now')", False, 'from unittest.mock import patch\n'), ((283, 5, 283, 46), 'unittest.mock.patch', 'patch', ({(283, 11, 283, 45): '"""mag_archiver.mag.list_containers"""'}, {}), "('mag_archiver.mag.list_containers')", False, 'from unittest.mock import patch\n'), ((284, 5, 284, 35), 'unittest.mock.patch', 'patch', ({(284, 11, 284, 34): '"""pendulum.datetime.now"""'}, {}), "('pendulum.datetime.now')", False, 'from unittest.mock import patch\n'), ((308, 5, 308, 46), 'unittest.mock.patch', 'patch', ({(308, 11, 308, 45): '"""mag_archiver.mag.list_containers"""'}, {}), "('mag_archiver.mag.list_containers')", False, 'from unittest.mock import patch\n'), ((309, 5, 309, 35), 'unittest.mock.patch', 'patch', ({(309, 11, 309, 34): '"""pendulum.datetime.now"""'}, {}), "('pendulum.datetime.now')", False, 'from unittest.mock import patch\n'), ((36, 16, 36, 38), 'mag_archiver.mag.hide_if_not_none', 'hide_if_not_none', ({(36, 33, 36, 37): 'None'}, {}), '(None)', False, 'from mag_archiver.mag import make_mag_query, MagState, MagDateType, MagRelease, MagTask, MagArchiverClient, hide_if_not_none\n'), ((40, 16, 40, 47), 'mag_archiver.mag.hide_if_not_none', 'hide_if_not_none', ({(40, 33, 40, 46): '"""hello world"""'}, {}), "('hello world')", False, 'from mag_archiver.mag import make_mag_query, MagState, MagDateType, MagRelease, MagTask, MagArchiverClient, hide_if_not_none\n'), ((44, 16, 44, 37), 'mag_archiver.mag.hide_if_not_none', 'hide_if_not_none', ({(44, 33, 44, 36): '123'}, {}), '(123)', False, 'from mag_archiver.mag import make_mag_query, MagState, MagDateType, MagRelease, MagTask, MagArchiverClient, hide_if_not_none\n'), ((48, 21, 48, 65), 'pendulum.datetime', 'pendulum.datetime', (), '', False, 'import pendulum\n'), ((49, 19, 49, 63), 'pendulum.datetime', 'pendulum.datetime', (), '', False, 'import pendulum\n'), ((52, 16, 52, 32), 'mag_archiver.mag.make_mag_query', 'make_mag_query', ({}, {}), '()', False, 'from mag_archiver.mag import make_mag_query, MagState, MagDateType, MagRelease, MagTask, MagArchiverClient, hide_if_not_none\n'), ((56, 16, 56, 57), 'mag_archiver.mag.make_mag_query', 'make_mag_query', (), '', False, 'from mag_archiver.mag import make_mag_query, MagState, MagDateType, MagRelease, MagTask, MagArchiverClient, hide_if_not_none\n'), ((59, 16, 59, 55), 'mag_archiver.mag.make_mag_query', 'make_mag_query', (), '', False, 'from mag_archiver.mag import make_mag_query, MagState, MagDateType, MagRelease, MagTask, MagArchiverClient, hide_if_not_none\n'), ((62, 16, 62, 51), 'mag_archiver.mag.make_mag_query', 'make_mag_query', (), '', False, 'from mag_archiver.mag import make_mag_query, MagState, MagDateType, MagRelease, MagTask, MagArchiverClient, hide_if_not_none\n'), ((66, 16, 66, 84), 'mag_archiver.mag.make_mag_query', 'make_mag_query', (), '', False, 'from mag_archiver.mag import make_mag_query, MagState, MagDateType, MagRelease, MagTask, MagArchiverClient, hide_if_not_none\n'), ((69, 16, 69, 87), 'mag_archiver.mag.make_mag_query', 'make_mag_query', (), '', False, 'from mag_archiver.mag import make_mag_query, MagState, MagDateType, MagRelease, MagTask, MagArchiverClient, hide_if_not_none\n'), ((72, 16, 72, 85), 'mag_archiver.mag.make_mag_query', 'make_mag_query', (), '', False, 'from mag_archiver.mag import make_mag_query, MagState, MagDateType, MagRelease, MagTask, MagArchiverClient, hide_if_not_none\n'), ((75, 16, 75, 81), 'mag_archiver.mag.make_mag_query', 'make_mag_query', (), '', False, 'from mag_archiver.mag import make_mag_query, MagState, MagDateType, MagRelease, MagTask, MagArchiverClient, hide_if_not_none\n'), ((79, 16, 79, 80), 'mag_archiver.mag.make_mag_query', 'make_mag_query', (), '', False, 'from mag_archiver.mag import make_mag_query, MagState, MagDateType, MagRelease, MagTask, MagArchiverClient, hide_if_not_none\n'), ((82, 16, 82, 83), 'mag_archiver.mag.make_mag_query', 'make_mag_query', (), '', False, 'from mag_archiver.mag import make_mag_query, MagState, MagDateType, MagRelease, MagTask, MagArchiverClient, hide_if_not_none\n'), ((85, 16, 85, 81), 'mag_archiver.mag.make_mag_query', 'make_mag_query', (), '', False, 'from mag_archiver.mag import make_mag_query, MagState, MagDateType, MagRelease, MagTask, MagArchiverClient, hide_if_not_none\n'), ((88, 16, 88, 77), 'mag_archiver.mag.make_mag_query', 'make_mag_query', (), '', False, 'from mag_archiver.mag import make_mag_query, MagState, MagDateType, MagRelease, MagTask, MagArchiverClient, hide_if_not_none\n'), ((92, 16, 92, 103), 'mag_archiver.mag.make_mag_query', 'make_mag_query', (), '', False, 'from mag_archiver.mag import make_mag_query, MagState, MagDateType, MagRelease, MagTask, MagArchiverClient, hide_if_not_none\n'), ((96, 16, 96, 106), 'mag_archiver.mag.make_mag_query', 'make_mag_query', (), '', False, 'from mag_archiver.mag import make_mag_query, MagState, MagDateType, MagRelease, MagTask, MagArchiverClient, hide_if_not_none\n'), ((100, 16, 100, 104), 'mag_archiver.mag.make_mag_query', 'make_mag_query', (), '', False, 'from mag_archiver.mag import make_mag_query, MagState, MagDateType, MagRelease, MagTask, MagArchiverClient, hide_if_not_none\n'), ((104, 16, 104, 100), 'mag_archiver.mag.make_mag_query', 'make_mag_query', (), '', False, 'from mag_archiver.mag import make_mag_query, MagState, MagDateType, MagRelease, MagTask, MagArchiverClient, hide_if_not_none\n'), ((109, 16, 110, 64), 'mag_archiver.mag.make_mag_query', 'make_mag_query', (), '', False, 'from mag_archiver.mag import make_mag_query, MagState, MagDateType, MagRelease, MagTask, MagArchiverClient, hide_if_not_none\n'), ((114, 16, 115, 62), 'mag_archiver.mag.make_mag_query', 'make_mag_query', (), '', False, 'from mag_archiver.mag import make_mag_query, MagState, MagDateType, MagRelease, MagTask, MagArchiverClient, hide_if_not_none\n'), ((119, 16, 120, 58), 'mag_archiver.mag.make_mag_query', 'make_mag_query', (), '', False, 'from mag_archiver.mag import make_mag_query, MagState, MagDateType, MagRelease, MagTask, MagArchiverClient, hide_if_not_none\n'), ((147, 28, 147, 61), 'os.getenv', 'os.getenv', ({(147, 38, 147, 60): '"""STORAGE_ACCOUNT_NAME"""'}, {}), "('STORAGE_ACCOUNT_NAME')", False, 'import os\n'), ((148, 27, 148, 59), 'os.getenv', 'os.getenv', ({(148, 37, 148, 58): '"""STORAGE_ACCOUNT_KEY"""'}, {}), "('STORAGE_ACCOUNT_KEY')", False, 'import os\n'), ((149, 8, 149, 80), 'mag_archiver.azure.create_table', 'create_table', ({(149, 21, 149, 38): 'self.account_name', (149, 40, 149, 56): 'self.account_key', (149, 58, 149, 79): 'MagRelease.TABLE_NAME'}, {}), '(self.account_name, self.account_key, MagRelease.TABLE_NAME)', False, 'from mag_archiver.azure import create_table\n'), ((229, 28, 229, 61), 'os.getenv', 'os.getenv', ({(229, 38, 229, 60): '"""STORAGE_ACCOUNT_NAME"""'}, {}), "('STORAGE_ACCOUNT_NAME')", False, 'import os\n'), ((230, 27, 230, 59), 'os.getenv', 'os.getenv', ({(230, 37, 230, 58): '"""STORAGE_ACCOUNT_KEY"""'}, {}), "('STORAGE_ACCOUNT_KEY')", False, 'import os\n'), ((231, 8, 231, 80), 'mag_archiver.azure.create_table', 'create_table', ({(231, 21, 231, 38): 'self.account_name', (231, 40, 231, 56): 'self.account_key', (231, 58, 231, 79): 'MagRelease.TABLE_NAME'}, {}), '(self.account_name, self.account_key, MagRelease.TABLE_NAME)', False, 'from mag_archiver.azure import create_table\n'), ((239, 17, 239, 99), 'mag_archiver.mag.MagArchiverClient', 'MagArchiverClient', (), '', False, 'from mag_archiver.mag import make_mag_query, MagState, MagDateType, MagRelease, MagTask, MagArchiverClient, hide_if_not_none\n'), ((247, 17, 247, 61), 'mag_archiver.mag.MagArchiverClient', 'MagArchiverClient', (), '', False, 'from mag_archiver.mag import make_mag_query, MagState, MagDateType, MagRelease, MagTask, MagArchiverClient, hide_if_not_none\n'), ((256, 32, 256, 87), 'pendulum.datetime', 'pendulum.datetime', (), '', False, 'import pendulum\n'), ((263, 17, 263, 96), 'mag_archiver.mag.MagArchiverClient', 'MagArchiverClient', (), '', False, 'from mag_archiver.mag import make_mag_query, MagState, MagDateType, MagRelease, MagTask, MagArchiverClient, hide_if_not_none\n'), ((287, 32, 287, 87), 'pendulum.datetime', 'pendulum.datetime', (), '', False, 'import pendulum\n'), ((294, 17, 294, 96), 'mag_archiver.mag.MagArchiverClient', 'MagArchiverClient', (), '', False, 'from mag_archiver.mag import make_mag_query, MagState, MagDateType, MagRelease, MagTask, MagArchiverClient, hide_if_not_none\n'), ((312, 32, 312, 84), 'pendulum.datetime', 'pendulum.datetime', (), '', False, 'import pendulum\n'), ((319, 17, 319, 96), 'mag_archiver.mag.MagArchiverClient', 'MagArchiverClient', (), '', False, 'from mag_archiver.mag import make_mag_query, MagState, MagDateType, MagRelease, MagTask, MagArchiverClient, hide_if_not_none\n'), ((196, 22, 196, 96), 'azure.cosmosdb.table.tableservice.TableService', 'TableService', (), '', False, 'from azure.cosmosdb.table.tableservice import TableService\n'), ((198, 30, 198, 60), 'mag_archiver.mag.MagRelease.from_entity', 'MagRelease.from_entity', ({(198, 53, 198, 59): 'entity'}, {}), '(entity)', False, 'from mag_archiver.mag import make_mag_query, MagState, MagDateType, MagRelease, MagTask, MagArchiverClient, hide_if_not_none\n'), ((304, 22, 304, 96), 'azure.cosmosdb.table.tableservice.TableService', 'TableService', (), '', False, 'from azure.cosmosdb.table.tableservice import TableService\n'), ((329, 25, 329, 70), 'pendulum.datetime', 'pendulum.datetime', (), '', False, 'import pendulum\n'), ((330, 23, 330, 67), 'pendulum.datetime', 'pendulum.datetime', (), '', False, 'import pendulum\n'), ((336, 25, 336, 80), 'pendulum.datetime', 'pendulum.datetime', (), '', False, 'import pendulum\n'), ((337, 23, 337, 67), 'pendulum.datetime', 'pendulum.datetime', (), '', False, 'import pendulum\n'), ((343, 25, 343, 70), 'pendulum.datetime', 'pendulum.datetime', (), '', False, 'import pendulum\n'), ((344, 23, 344, 77), 'pendulum.datetime', 'pendulum.datetime', (), '', False, 'import pendulum\n'), ((364, 22, 364, 96), 'azure.cosmosdb.table.tableservice.TableService', 'TableService', (), '', False, 'from azure.cosmosdb.table.tableservice import TableService\n'), ((192, 36, 192, 53), 'pendulum.utcnow', 'pendulum.utcnow', ({}, {}), '()', False, 'import pendulum\n')] |
jacob-heglund/socialsensing-jh | twitterinfrastructure/CH-Data-Public.py | fd6d2d749f40fee46bee749ff868212bf117a747 | '''
Created on Mar 22, 2018
Edited on Jan 11, 2019
@author: npvance2
@author: curtisd2
Variables that will need to be edited/personalized:
monitorID in Variables() (line 27)
projectStartDate in Variables() (line 28)
projectEndDate in Variables() (line 29)
authToken in getAuthToken() (line 49)
consumer_key in twitterAPI() (line 62)
consumer_secret in twitterAPI() (line 63)
access_token in twitterAPI() (line 64)
access_secret in twitterAPI() (line 65)
'''
from datetime import date, timedelta
import urllib.request
import json
import csv
import tweepy
from tweepy import OAuthHandler
def Variables():
monitorID = "9926183772" # The numerical ID for your Crimson Hexagon monitor
startDate = "yyyy-mm-dd" # Date must be in yyyy-mm-dd format
endDate = "yyyy-mm-dd" # Date must be in yyyy-mm-dd format
variableMap = {}
variableMap['monitorID'] = monitorID
variableMap['startDate'] = startDate
variableMap['endDate'] = endDate
return variableMap
def getURL(): #provides URL for Crimson API
urlStart = "https://api.crimsonhexagon.com/api"
return urlStart
###########
#
# You'll need to generate your own Crimson API key/token from here:
# https://apidocs.crimsonhexagon.com/reference
#
###########
def getAuthToken(): #provides auth token needed to access Crimson API
authToken = ''
authToken = "&auth="+authToken
return authToken
###########
#
# You'll need to add your own Twitter API keys here.
# Instructions on generating API keys: https://developer.twitter.com/en/docs/basics/authentication/guides/access-tokens.html
# API reference guide: https://developer.twitter.com/en/docs/api-reference-index.html
#
###########
def twitterAPI(): #Provides access keys for Twitter API
consumer_key = '2S1Z7Giq0oOf3w0R0sJUPnLFx'
consumer_secret = '9IPOE8dqWzUPseAPHeNxTTv1jAr9BNj8mF2ryw8DIud8Ot8VCe'
access_token = '998275516892409858-hQ1pk5wKg1YyxUrbiFkuFHKHqztPMNE'
access_secret = 'gsXqGx1gU93HkKNDupTPt56ZnAmmalsaSNBUuoBToraBw'
if (consumer_key == '') or (consumer_secret =='') or (access_token =='') or (access_secret ==''):
print("Not all Twitter keys have been entered, please add them to the script and try again")
auth = OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_secret)
api = tweepy.API(auth, wait_on_rate_limit=True, wait_on_rate_limit_notify=True)
return api
def getTwitterURL(): #provides URL for Twitter api
urlStart = "https://api.twitter.com/1.1/statuses/lookup.json?id="
return urlStart
def DatePull(startdate, enddate):
listArray = []
startdate = date(int(startdate[0:4]), int(startdate[5:7]), int(startdate[8:10]))
enddate = date(int(enddate[0:4]), int(enddate[5:7]), int(enddate[8:10]))
while startdate <= enddate:
listArray.append(str(startdate))
startdate += timedelta(days=1)
return listArray
def main():
monitorID = Variables()['monitorID']
projectStartDate = Variables()['startDate']
projectEndDate = Variables()['endDate']
fPath = "Monitor-"+monitorID+'-from-'+projectStartDate+'-to-'+projectEndDate+'.csv'
lineArray = DatePull(projectStartDate, projectEndDate)
print("------------------------------")
print("MonitorID is "+monitorID)
print(lineArray[0],lineArray[-1])
with open(fPath, 'w', newline = '', encoding = 'utf-8') as f:
writer = csv.writer(f)
header = ["PostType","PostDate","PostTime","URL","TweetID","Contents","RetweetCount","FavoriteCount","Location","Language","Sentiment","NeutralScore","PositiveScore","NegativeScore","Followers","Friends","Author","AuthorGender","AuthorTweets"]
writer.writerow(header)
for i in range(len(lineArray)-1):
print(lineArray[i])
startDate = lineArray[i]
endDate = lineArray[i+1]
dates = "&start="+startDate+"&end="+endDate #Combines start and end date into format needed for API call
urlStart = getURL() #Gets URL
authToken = getAuthToken() #Gets auth token
endpoint = "/monitor/posts?id="; #endpoint needed for this query
extendLimit = "&extendLimit=true" #extends call number from 500 to 10,000
fullContents = "&fullContents=true" #Brings back full contents for Blog and Tumblr posts which are usually truncated around search keywords. This can occasionally disrupt CSV formatting.
urlData = urlStart+endpoint+monitorID+authToken+dates+extendLimit+fullContents #Combines all API calls parts into full URL
webURL = urllib.request.urlopen(urlData)
if (webURL.getcode() == 200):
with open(fPath, 'a', newline='', encoding='utf-8') as f:
writer = csv.writer(f)
data = webURL.read().decode('utf8')
theJSON = json.loads(data)
postDates = [] #These initialize the attributes of the final output
postTimes = []
urls = []
contents = []
authors = []
authorGenders = []
locations = []
languages = []
postTypes = []
sentiments = []
neutralScore = []
positiveScore = []
negativeScore = []
tweetIDs = []
followers = []
friends = []
retweetCounts = []
favoritesCount = []
statusesCount = []
tweetCount = 0
tempTweetIDs = []
api = twitterAPI()
c = 0
for i in theJSON["posts"]:
postDates.append("")
postTimes.append("")
if ('date' in i): #identifies date posted
tempDate = str(i["date"])
dateTime = tempDate.split("T")
postDates[c] = dateTime[0]
postTimes[c] = dateTime[1]
urls.append(i["url"])
contents.append("")
if ('contents' in i): #identifies post contents
contents[c] = i["contents"].replace(",","").replace("\n"," ") #replaces commas and new lines to facilitate CSV formatting, this occasionally missed new lines in some blog posts which I'm working to fix
authors.append("")
if ('author' in i): #identifies author
authors[c] = i["author"].replace(",","")
authorGenders.append("")
if ('authorGender' in i): #identifies author gender
authorGenders[c] = i["authorGender"]
locations.append("")
if ('location' in i): #identifies location
locations[c] = i["location"].replace(",","")
languages.append("")
if ('language' in i): #identifies language specified in the author's profile
languages[c] = i["language"]
postTypes.append(i["type"]) #identifies the type of post, i.e. Twitter, Tumblr, Blog
tweetIDs.append("")
followers.append("")
friends.append("")
retweetCounts.append("")
favoritesCount.append("")
statusesCount.append("")
if postTypes[c] == "Twitter": #if the post type is Twitter it goes through more processing
tweetCount = tweetCount + 1 #counts number of tweets
tweetSplit = urls[c].split("status/") #splits URL to get tweetID
tweetIDs[c] = tweetSplit[1]
tempTweetIDs.append(tweetIDs[c])
if tweetCount == 100: #the max number of TweetIDs in one API call is 100 so a call is run every 100 tweets identified
tweepys = api.statuses_lookup(id_=tempTweetIDs) #call to Twitter API
for tweet in tweepys:
tempID = tweet.id_str #finds tweetsID
postMatch = 0
for idMatch in tweetIDs:
if idMatch==tempID: #matches tweetID in Twitter API call to tweetID stored from Crimson API
tempDate = str(tweet.created_at).replace(" "," ") #These all fill the matching Crimson attributes to those found in the Twitter API
dateTime = tempDate.split(" ")
postDates[postMatch] = dateTime[0]
postTimes[postMatch] = dateTime[1]
contents[postMatch] = tweet.text.replace(",","")
authors[postMatch] = tweet.author.screen_name
followers[postMatch] = str(tweet.author.followers_count)
friends[postMatch] = str(tweet.author.friends_count)
retweetCounts[postMatch] = str(tweet.retweet_count)
favoritesCount[postMatch] = str(tweet.favorite_count)
statusesCount[postMatch] = str(tweet.author.statuses_count)
postMatch = postMatch + 1
tweetCount = 0 #clears tweet count for a new 100
tempTweetIDs = [] #clears tweetIDs for next call
sentiments.append("")
neutralScore.append("")
positiveScore.append("")
negativeScore.append("")
if ('categoryScores' in i): #finds sentiment value and matching attribute
for l in i["categoryScores"]:
catName = l["categoryName"]
if catName == "Basic Neutral":
neutralScore[c] = l["score"]
elif catName =="Basic Positive":
positiveScore[c] = l["score"]
elif catName == "Basic Negative":
negativeScore[c] = l["score"]
if neutralScore[c] > positiveScore[c] and neutralScore[c] > negativeScore[c]:
sentiments[c] = "Basic Neutral"
if positiveScore[c] > neutralScore[c] and positiveScore[c] > negativeScore[c]:
sentiments[c] = "Basic Positive"
if negativeScore[c] > positiveScore[c] and negativeScore[c] > neutralScore[c]:
sentiments[c] = "Basic Negative"
c = c + 1
if len(tempTweetIDs) != 0: #after loop the Twitter API call must run one more time to clean up all the tweets since the last 100
try:
tweepys = api.statuses_lookup(id_=tempTweetIDs)
for tweet in tweepys:
tempID = tweet.id_str
postMatch = 0
for idMatch in tweetIDs:
if idMatch==tempID:
tempDate = str(tweet.created_at).replace(" "," ")
dateTime = tempDate.split(" ")
postDates[postMatch] = dateTime[0]
postTimes[postMatch] = dateTime[1]
contents[postMatch] = tweet.text.replace(",","")
authors[postMatch] = tweet.author.screen_name
followers[postMatch] = str(tweet.author.followers_count)
friends[postMatch] = str(tweet.author.friends_count)
retweetCounts[postMatch] = str(tweet.retweet_count)
favoritesCount[postMatch] = str(tweet.favorite_count)
statusesCount[postMatch] = str(tweet.author.statuses_count)
postMatch = postMatch + 1
tweetCount = 0
except:
print("Tweepy error: skipping cleanup")
pC = 0
for pDate in postDates: #iterates through the word lists and prints matching posts to CSV
csvRow=[postTypes[pC], pDate, postTimes[pC], urls[pC], str(tweetIDs[pC]), contents[pC].replace("\n"," "), retweetCounts[pC], favoritesCount[pC], locations[pC], languages[pC], sentiments[pC], str(neutralScore[pC]), str(positiveScore[pC]), str(negativeScore[pC]), followers[pC], friends[pC], authors[pC], authorGenders[pC], statusesCount[pC]]
writer.writerow(csvRow)
pC = pC + 1
else:
print("Server Error, No Data" + str(webURL.getcode())) #displays error if Crimson URL fails
if __name__ == '__main__':
main()
| [((69, 11, 69, 54), 'tweepy.OAuthHandler', 'OAuthHandler', ({(69, 24, 69, 36): 'consumer_key', (69, 38, 69, 53): 'consumer_secret'}, {}), '(consumer_key, consumer_secret)', False, 'from tweepy import OAuthHandler\n'), ((71, 10, 71, 83), 'tweepy.API', 'tweepy.API', (), '', False, 'import tweepy\n'), ((86, 21, 86, 38), 'datetime.timedelta', 'timedelta', (), '', False, 'from datetime import date, timedelta\n'), ((101, 17, 101, 30), 'csv.writer', 'csv.writer', ({(101, 28, 101, 29): 'f'}, {}), '(f)', False, 'import csv\n'), ((123, 25, 123, 38), 'csv.writer', 'csv.writer', ({(123, 36, 123, 37): 'f'}, {}), '(f)', False, 'import csv\n'), ((126, 26, 126, 42), 'json.loads', 'json.loads', ({(126, 37, 126, 41): 'data'}, {}), '(data)', False, 'import json\n')] |
danhnguyen48/slurm-elastic-computing | roles/slurm/files/startnode.py | 0793cf23677169a6d9dceea0793118bc00c0913e | #! /opt/cloud_sdk/bin/python
import asyncio
import logging
import subprocess
import sys
import citc_cloud
def handle_exception(exc_type, exc_value, exc_traceback):
if issubclass(exc_type, KeyboardInterrupt):
sys.__excepthook__(exc_type, exc_value, exc_traceback)
return
log.critical("Uncaught exception", exc_info=(exc_type, exc_value, exc_traceback))
async def main() -> None:
nodespace = citc_cloud.get_nodespace()
keys_file = "/home/slurm/opc_authorized_keys"
with open(keys_file) as kf:
ssh_keys = kf.read()
hosts = subprocess.run(["scontrol", "show", "hostnames", sys.argv[1]], stdout=subprocess.PIPE).stdout.decode().split()
await asyncio.gather(*(
citc_cloud.start_node( log, host, nodespace, ssh_keys)
for host in hosts
))
sys.excepthook = handle_exception
if __name__ == "__main__":
log = logging.getLogger("startnode")
log.setLevel(logging.INFO)
handler = logging.FileHandler('/var/log/slurm/elastic.log')
formatter = logging.Formatter('%(asctime)s %(name)-10s %(levelname)-8s %(message)s')
handler.setFormatter(formatter)
log.addHandler(handler)
loop = asyncio.get_event_loop()
try:
loop.run_until_complete(main())
finally:
loop.close()
| [((20, 16, 20, 42), 'citc_cloud.get_nodespace', 'citc_cloud.get_nodespace', ({}, {}), '()', False, 'import citc_cloud\n'), ((37, 10, 37, 40), 'logging.getLogger', 'logging.getLogger', ({(37, 28, 37, 39): '"""startnode"""'}, {}), "('startnode')", False, 'import logging\n'), ((39, 14, 39, 63), 'logging.FileHandler', 'logging.FileHandler', ({(39, 34, 39, 62): '"""/var/log/slurm/elastic.log"""'}, {}), "('/var/log/slurm/elastic.log')", False, 'import logging\n'), ((40, 16, 40, 88), 'logging.Formatter', 'logging.Formatter', ({(40, 34, 40, 87): '"""%(asctime)s %(name)-10s %(levelname)-8s %(message)s"""'}, {}), "('%(asctime)s %(name)-10s %(levelname)-8s %(message)s')", False, 'import logging\n'), ((44, 11, 44, 35), 'asyncio.get_event_loop', 'asyncio.get_event_loop', ({}, {}), '()', False, 'import asyncio\n'), ((13, 8, 13, 62), 'sys.__excepthook__', 'sys.__excepthook__', ({(13, 27, 13, 35): 'exc_type', (13, 37, 13, 46): 'exc_value', (13, 48, 13, 61): 'exc_traceback'}, {}), '(exc_type, exc_value, exc_traceback)', False, 'import sys\n'), ((30, 8, 30, 62), 'citc_cloud.start_node', 'citc_cloud.start_node', ({(30, 31, 30, 34): 'log', (30, 36, 30, 40): 'host', (30, 42, 30, 51): 'nodespace', (30, 53, 30, 61): 'ssh_keys'}, {}), '(log, host, nodespace, ssh_keys)', False, 'import citc_cloud\n'), ((27, 12, 27, 98), 'subprocess.run', 'subprocess.run', (), '', False, 'import subprocess\n')] |
BryanRiel/pyre | tests/pyre/components/component_class_registration_model.py | 179359634a7091979cced427b6133dd0ec4726ea | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# michael a.g. aïvázis
# orthologue
# (c) 1998-2018 all rights reserved
#
"""
Verify that component registration interacts correctly with the pyre configurator model
"""
# access
# print(" -- importing pyre")
import pyre
# print(" -- done")
def declare():
# declare a protocol
class protocol(pyre.protocol):
"""a protocol"""
# properties
p1 = pyre.properties.str()
p2 = pyre.properties.str()
# behavior
@pyre.provides
def do(self):
"""behave"""
# declare a component
class component(pyre.component, family="test", implements=protocol):
"""a component"""
# traits
p1 = pyre.properties.str(default="p1")
p2 = pyre.properties.str(default="p2")
@pyre.export
def do(self):
"""behave"""
return "component"
return component
def test():
# and the model
model = pyre.executive.nameserver
# model.dump(pattern='test')
# print(" -- making some configuration changes")
# add an assignment
model['test.p1'] = 'step 1'
# an alias
model.alias(alias='p1', target='test.p1')
# and a reference to the alias
model['ref'] = '{p1}'
# check that they point to the same slot
assert model.retrieve(name='p1') == model.retrieve(name='test.p1')
# save the nodes
ref = model.retrieve(name='ref')
step_0 = model.retrieve(name='test.p1')
# now declare the component and its protocol
# print(" -- declaring components")
component = declare()
# print(" -- done")
# model.dump(pattern='')
assert component.p1 == 'step 1'
assert component.p2 == 'p2'
# check that the model is as we expect
# model.dump()
assert model['test.p1'] == component.p1
assert model['test.p2'] == component.p2
# how about the alias and the reference?
assert model['ref'] == component.p1
assert model['p1'] == component.p1
# make a late registration to what is now the component trait
model['test.p2'] = 'step 2'
# model.dump(pattern='test')
# and check
assert component.p1 == 'step 1'
assert component.p2 == 'step 2'
return
# main
if __name__ == "__main__":
test()
# end of file
| [((26, 13, 26, 34), 'pyre.properties.str', 'pyre.properties.str', ({}, {}), '()', False, 'import pyre\n'), ((27, 13, 27, 34), 'pyre.properties.str', 'pyre.properties.str', ({}, {}), '()', False, 'import pyre\n'), ((37, 13, 37, 46), 'pyre.properties.str', 'pyre.properties.str', (), '', False, 'import pyre\n'), ((38, 13, 38, 46), 'pyre.properties.str', 'pyre.properties.str', (), '', False, 'import pyre\n')] |
carlmontanari/nssh | tests/unit/transport/plugins/asyncssh/test_asyncssh_transport.py | fa2277ea0b8fdb81de3064e1d48bad9264f0cd64 | import asyncio
from io import BytesIO
import pytest
from asyncssh.connection import SSHClientConnection
from asyncssh.stream import SSHReader
from scrapli.exceptions import ScrapliConnectionNotOpened, ScrapliTimeout
class DumbContainer:
def __init__(self):
self.preferred_auth = ()
def __getattr__(self, item):
# options has a billion attributes, just return None, doesnt matter for this test
return None
def test_close(monkeypatch, asyncssh_transport):
def _close(cls):
pass
monkeypatch.setattr(
"asyncssh.connection.SSHClientConnection.close",
_close,
)
# lie and pretend the session is already assigned
options = DumbContainer()
asyncssh_transport.session = SSHClientConnection(
loop=asyncio.get_event_loop_policy().get_event_loop(), options=options
)
asyncssh_transport.close()
assert asyncssh_transport.session is None
assert asyncssh_transport.stdin is None
assert asyncssh_transport.stdout is None
def test_close_catch_brokenpipe(monkeypatch, asyncssh_transport):
def _close(cls):
raise BrokenPipeError
monkeypatch.setattr(
"asyncssh.connection.SSHClientConnection.close",
_close,
)
# lie and pretend the session is already assigned
options = DumbContainer()
asyncssh_transport.session = SSHClientConnection(
loop=asyncio.get_event_loop_policy().get_event_loop(), options=options
)
asyncssh_transport.close()
assert asyncssh_transport.session is None
assert asyncssh_transport.stdin is None
assert asyncssh_transport.stdout is None
def test_isalive_no_session(asyncssh_transport):
assert asyncssh_transport.isalive() is False
def test_isalive(asyncssh_transport):
# lie and pretend the session is already assigned
options = DumbContainer()
asyncssh_transport.session = SSHClientConnection(
loop=asyncio.get_event_loop_policy().get_event_loop(), options=options
)
# lie and tell asyncssh auth is done
asyncssh_transport.session._auth_complete = True
# also have to lie and create a transport and have it return False when is_closing is called
asyncssh_transport.session._transport = DumbContainer()
asyncssh_transport.session._transport.is_closing = lambda: False
assert asyncssh_transport.isalive() is True
def test_isalive_attribute_error(asyncssh_transport):
# lie and pretend the session is already assigned
options = DumbContainer()
asyncssh_transport.session = SSHClientConnection(
loop=asyncio.get_event_loop_policy().get_event_loop(), options=options
)
# lie and tell asyncssh auth is done
asyncssh_transport.session._auth_complete = True
assert asyncssh_transport.isalive() is False
async def test_read(monkeypatch, asyncssh_transport):
async def _read(cls, _):
return b"somebytes"
monkeypatch.setattr(
"asyncssh.stream.SSHReader.read",
_read,
)
# lie and pretend the session is already assigned/stdout is already a thing
asyncssh_transport.stdout = SSHReader("", "")
assert await asyncssh_transport.read() == b"somebytes"
async def test_read_exception_not_open(asyncssh_transport):
with pytest.raises(ScrapliConnectionNotOpened):
await asyncssh_transport.read()
async def test_read_exception_timeout(monkeypatch, asyncssh_transport):
async def _read(cls, _):
await asyncio.sleep(0.5)
monkeypatch.setattr(
"asyncssh.stream.SSHReader.read",
_read,
)
# lie and pretend the session is already assigned/stdout is already a thing
asyncssh_transport.stdout = SSHReader("", "")
asyncssh_transport._base_transport_args.timeout_transport = 0.1
with pytest.raises(ScrapliTimeout):
await asyncssh_transport.read()
def test_write(asyncssh_transport):
asyncssh_transport.stdin = BytesIO()
asyncssh_transport.write(b"blah")
asyncssh_transport.stdin.seek(0)
assert asyncssh_transport.stdin.read() == b"blah"
def test_write_exception(asyncssh_transport):
with pytest.raises(ScrapliConnectionNotOpened):
asyncssh_transport.write("blah")
| [((108, 32, 108, 49), 'asyncssh.stream.SSHReader', 'SSHReader', ({(108, 42, 108, 44): '""""""', (108, 46, 108, 48): '""""""'}, {}), "('', '')", False, 'from asyncssh.stream import SSHReader\n'), ((128, 32, 128, 49), 'asyncssh.stream.SSHReader', 'SSHReader', ({(128, 42, 128, 44): '""""""', (128, 46, 128, 48): '""""""'}, {}), "('', '')", False, 'from asyncssh.stream import SSHReader\n'), ((136, 31, 136, 40), 'io.BytesIO', 'BytesIO', ({}, {}), '()', False, 'from io import BytesIO\n'), ((114, 9, 114, 50), 'pytest.raises', 'pytest.raises', ({(114, 23, 114, 49): 'ScrapliConnectionNotOpened'}, {}), '(ScrapliConnectionNotOpened)', False, 'import pytest\n'), ((131, 9, 131, 38), 'pytest.raises', 'pytest.raises', ({(131, 23, 131, 37): 'ScrapliTimeout'}, {}), '(ScrapliTimeout)', False, 'import pytest\n'), ((143, 9, 143, 50), 'pytest.raises', 'pytest.raises', ({(143, 23, 143, 49): 'ScrapliConnectionNotOpened'}, {}), '(ScrapliConnectionNotOpened)', False, 'import pytest\n'), ((120, 14, 120, 32), 'asyncio.sleep', 'asyncio.sleep', ({(120, 28, 120, 31): '(0.5)'}, {}), '(0.5)', False, 'import asyncio\n'), ((32, 13, 32, 44), 'asyncio.get_event_loop_policy', 'asyncio.get_event_loop_policy', ({}, {}), '()', False, 'import asyncio\n'), ((54, 13, 54, 44), 'asyncio.get_event_loop_policy', 'asyncio.get_event_loop_policy', ({}, {}), '()', False, 'import asyncio\n'), ((72, 13, 72, 44), 'asyncio.get_event_loop_policy', 'asyncio.get_event_loop_policy', ({}, {}), '()', False, 'import asyncio\n'), ((89, 13, 89, 44), 'asyncio.get_event_loop_policy', 'asyncio.get_event_loop_policy', ({}, {}), '()', False, 'import asyncio\n')] |
Mozilla-GitHub-Standards/93f18f14efcf5fdfc0e04f9bf247f66baf46663f37b1d2087ab8d850abc90803 | apps/ignite/views.py | 4e374b4d52dfb9039ebe543e7f27682189022307 | from django.shortcuts import get_object_or_404
import jingo
import waffle
from django.contrib.auth.models import User
from challenges.models import Submission, Category
from projects.models import Project
from blogs.models import BlogEntry
from events.models import Event
def splash(request, project, slug, template_name='ignite/splash.html'):
"""Show an individual project challenge."""
project = get_object_or_404(Project, slug=project)
challenge = get_object_or_404(project.challenge_set, slug=slug)
num_blogs = 3
# have we announced the winners yet - switch template
if waffle.switch_is_active('announce_winners'):
template_name = 'ignite/homepage-winners.html'
num_blogs = 5
blogs = BlogEntry.objects.filter(
page='splash'
).order_by("-updated",)[:num_blogs]
# if the dev challenge is open we want to only show dev entries
if request.development.is_open:
entries = (Submission.objects.visible()
.filter(phase__challenge=challenge)
.filter(phase__name="Development")
.order_by("?"))
num_entries = len(entries)
entries_from = 'apps'
if num_entries < 5:
entries = (Submission.objects.visible()
.filter(phase__challenge=challenge)
.filter(phase__name="Ideation")
.order_by("?"))
entries_from = 'ideas'
else:
entries = (Submission.objects.visible()
.filter(phase__challenge=challenge)
.filter(phase__name="Ideation")
.order_by("?"))
entries_from = 'ideas'
event_list = Event.objects.get_featured()[:5]
return jingo.render(request, template_name, {
'challenge': challenge,
'project': project,
'phases': list(enumerate(challenge.phases.all(), start=1)),
'entries': entries[:5],
'categories': Category.objects.all(),
'blogs': blogs,
'event_list': event_list,
'entries_from': entries_from,
})
def about(request, project, slug, template_name='ignite/about.html'):
if waffle.switch_is_active('announce_winners'):
template_name = 'ignite/about-winners.html'
return jingo.render(request, template_name)
def judges(request, project, slug, template_name='challenges/all_judges.html'):
""" List all judges we have in the system """
profiles = []
for judge in User.objects.filter(groups__name='Judges'):
profile = judge.get_profile()
# we only want to show featured profiles
if profile.featured == True:
profiles.append(profile)
return jingo.render(request, 'ignite/judges.html', {
'profiles': profiles
})
def terms(request, project, slug, template_name='static/terms_conditions.html'):
return jingo.render(request, template_name, {})
def terms_development(request, project, slug, template_name='static/terms_conditions_development.html'):
return jingo.render(request, template_name, {})
def fail(request, template_name='404.html'):
return jingo.render(request, template_name, {}, status=404)
def app_fail(request, template_name='500.html'):
return jingo.render(request, template_name, {}, status=500)
def action_unavailable_response(request, message=None,
template_name="action_unavailable.html"):
"""Generic page for unavailable actions"""
context = {'message': message}
return jingo.render(request, template_name, context, status=403)
| [((14, 14, 14, 54), 'django.shortcuts.get_object_or_404', 'get_object_or_404', (), '', False, 'from django.shortcuts import get_object_or_404\n'), ((15, 16, 15, 67), 'django.shortcuts.get_object_or_404', 'get_object_or_404', (), '', False, 'from django.shortcuts import get_object_or_404\n'), ((18, 7, 18, 50), 'waffle.switch_is_active', 'waffle.switch_is_active', ({(18, 31, 18, 49): '"""announce_winners"""'}, {}), "('announce_winners')", False, 'import waffle\n'), ((59, 7, 59, 50), 'waffle.switch_is_active', 'waffle.switch_is_active', ({(59, 31, 59, 49): '"""announce_winners"""'}, {}), "('announce_winners')", False, 'import waffle\n'), ((61, 11, 61, 47), 'jingo.render', 'jingo.render', ({(61, 24, 61, 31): 'request', (61, 33, 61, 46): 'template_name'}, {}), '(request, template_name)', False, 'import jingo\n'), ((67, 17, 67, 59), 'django.contrib.auth.models.User.objects.filter', 'User.objects.filter', (), '', False, 'from django.contrib.auth.models import User\n'), ((74, 11, 76, 6), 'jingo.render', 'jingo.render', ({(74, 24, 74, 31): 'request', (74, 33, 74, 53): '"""ignite/judges.html"""', (74, 55, 76, 5): "{'profiles': profiles}"}, {}), "(request, 'ignite/judges.html', {'profiles': profiles})", False, 'import jingo\n'), ((80, 11, 80, 51), 'jingo.render', 'jingo.render', ({(80, 24, 80, 31): 'request', (80, 33, 80, 46): 'template_name', (80, 48, 80, 50): '{}'}, {}), '(request, template_name, {})', False, 'import jingo\n'), ((84, 11, 84, 51), 'jingo.render', 'jingo.render', ({(84, 24, 84, 31): 'request', (84, 33, 84, 46): 'template_name', (84, 48, 84, 50): '{}'}, {}), '(request, template_name, {})', False, 'import jingo\n'), ((88, 11, 88, 63), 'jingo.render', 'jingo.render', (), '', False, 'import jingo\n'), ((92, 11, 92, 63), 'jingo.render', 'jingo.render', (), '', False, 'import jingo\n'), ((99, 11, 99, 68), 'jingo.render', 'jingo.render', (), '', False, 'import jingo\n'), ((45, 17, 45, 45), 'events.models.Event.objects.get_featured', 'Event.objects.get_featured', ({}, {}), '()', False, 'from events.models import Event\n'), ((51, 22, 51, 44), 'challenges.models.Category.objects.all', 'Category.objects.all', ({}, {}), '()', False, 'from challenges.models import Submission, Category\n'), ((21, 12, 23, 5), 'blogs.models.BlogEntry.objects.filter', 'BlogEntry.objects.filter', (), '', False, 'from blogs.models import BlogEntry\n'), ((26, 19, 26, 47), 'challenges.models.Submission.objects.visible', 'Submission.objects.visible', ({}, {}), '()', False, 'from challenges.models import Submission, Category\n'), ((39, 19, 39, 47), 'challenges.models.Submission.objects.visible', 'Submission.objects.visible', ({}, {}), '()', False, 'from challenges.models import Submission, Category\n'), ((33, 23, 33, 51), 'challenges.models.Submission.objects.visible', 'Submission.objects.visible', ({}, {}), '()', False, 'from challenges.models import Submission, Category\n')] |
thebouv/IUS-Hacktoberfest | dataPresenter.py | 084634ec2feff3e81862d85b3938e1ae2c5aadff | from plotly.subplots import make_subplots
import plotly.graph_objects as go
import plotly.io as pio
from dataProcessor import parseLabels, parseLangs
import plotly.io as pio
import os
years = parseLabels()
langs = parseLangs()
#make the plotly results
fig = make_subplots(
rows=1, cols=2,
specs=[[{"type": "xy"}, {"type": "domain"}]],
)
fig.add_trace(go.Bar(y = list(langs.values()), x = list(langs.keys()), showlegend=False),
row=1, col=1)
fig.add_trace(go.Pie(values = list(years.values()), labels = list(years.keys())),
row=1, col=2)
fig.update_layout(height=600)
pio.write_html(fig, 'index.html', auto_open=True)
| [((8, 8, 8, 21), 'dataProcessor.parseLabels', 'parseLabels', ({}, {}), '()', False, 'from dataProcessor import parseLabels, parseLangs\n'), ((9, 8, 9, 20), 'dataProcessor.parseLangs', 'parseLangs', ({}, {}), '()', False, 'from dataProcessor import parseLabels, parseLangs\n'), ((13, 6, 16, 1), 'plotly.subplots.make_subplots', 'make_subplots', (), '', False, 'from plotly.subplots import make_subplots\n'), ((28, 0, 28, 49), 'plotly.io.write_html', 'pio.write_html', (), '', True, 'import plotly.io as pio\n')] |
Sairam954/bdl-benchmarks | bdlb/diabetic_retinopathy_diagnosis/benchmark.py | 6fbc855ca51403ad8f64b6be30ed92f6118c6cae | # Copyright 2019 BDL Benchmarks Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Diabetic retinopathy diagnosis BDL Benchmark."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import os
from typing import Callable
from typing import Dict
from typing import Optional
from typing import Sequence
from typing import Text
from typing import Tuple
from typing import Union
import numpy as np
import pandas as pd
import tensorflow as tf
from absl import logging
from ..core import transforms
from ..core.benchmark import Benchmark
from ..core.benchmark import BenchmarkInfo
from ..core.benchmark import DataSplits
from ..core.constants import DATA_DIR
from ..core.levels import Level
tfk = tf.keras
_DIABETIC_RETINOPATHY_DIAGNOSIS_DATA_DIR = os.path.join(
DATA_DIR, "downloads", "manual", "diabetic_retinopathy_diagnosis")
class DiabeticRetinopathyDiagnosisBecnhmark(Benchmark):
"""Diabetic retinopathy diagnosis benchmark class."""
def __init__(
self,
level: Union[Text, Level],
batch_size: int = 64,
data_dir: Optional[Text] = None,
download_and_prepare: bool = False,
):
"""Constructs a benchmark object.
Args:
level: `Level` or `str, downstream task level.
batch_size: (optional) `int`, number of datapoints
per mini-batch.
data_dir: (optional) `str`, path to parent data directory.
download_and_prepare: (optional) `bool`, if the data is not available
it downloads and preprocesses it.
"""
self.__level = level if isinstance(level, Level) else Level.from_str(level)
try:
self.__ds = self.load(level=level,
batch_size=batch_size,
data_dir=data_dir or DATA_DIR)
except AssertionError:
if not download_and_prepare:
raise
else:
logging.info(
"Data not found, `DiabeticRetinopathyDiagnosisBecnhmark.download_and_prepare()`"
" is now running...")
self.download_and_prepare()
@classmethod
def evaluate(
cls,
estimator: Callable[[np.ndarray], Tuple[np.ndarray, np.ndarray]],
dataset: tf.data.Dataset,
output_dir: Optional[Text] = None,
name: Optional[Text] = None,
) -> Dict[Text, float]:
"""Evaluates an `estimator` on the `mode` benchmark dataset.
Args:
estimator: `lambda x: mu_x, uncertainty_x`, an uncertainty estimation
function, which returns `mean_x` and predictive `uncertainty_x`.
dataset: `tf.data.Dataset`, on which dataset to performance evaluation.
output_dir: (optional) `str`, directory to save figures.
name: (optional) `str`, the name of the method.
"""
import inspect
import tqdm
import tensorflow_datasets as tfds
from sklearn.metrics import roc_auc_score
from sklearn.metrics import accuracy_score
import matplotlib.pyplot as plt
# Containers used for caching performance evaluation
y_true = list()
y_pred = list()
y_uncertainty = list()
# Convert to NumPy iterator if necessary
ds = dataset if inspect.isgenerator(dataset) else tfds.as_numpy(dataset)
for x, y in tqdm.tqdm(ds):
# Sample from probabilistic model
mean, uncertainty = estimator(x)
# Cache predictions
y_true.append(y)
y_pred.append(mean)
y_uncertainty.append(uncertainty)
# Use vectorized NumPy containers
y_true = np.concatenate(y_true).flatten()
y_pred = np.concatenate(y_pred).flatten()
y_uncertainty = np.concatenate(y_uncertainty).flatten()
fractions = np.asarray([0.5, 0.6, 0.7, 0.8, 0.9, 1.0])
# Metrics for evaluation
metrics = zip(["accuracy", "auc"], cls.metrics())
return {
metric: cls._evaluate_metric(
y_true,
y_pred,
y_uncertainty,
fractions,
lambda y_true, y_pred: metric_fn(y_true, y_pred).numpy(),
name,
) for (metric, metric_fn) in metrics
}
@staticmethod
def _evaluate_metric(
y_true: np.ndarray,
y_pred: np.ndarray,
y_uncertainty: np.ndarray,
fractions: Sequence[float],
metric_fn: Callable[[np.ndarray, np.ndarray], float],
name=None,
) -> pd.DataFrame:
"""Evaluate model predictive distribution on `metric_fn` at data retain
`fractions`.
Args:
y_true: `numpy.ndarray`, the ground truth labels, with shape [N].
y_pred: `numpy.ndarray`, the model predictions, with shape [N].
y_uncertainty: `numpy.ndarray`, the model uncertainties,
with shape [N].
fractions: `iterable`, the percentages of data to retain for
calculating `metric_fn`.
metric_fn: `lambda(y_true, y_pred) -> float`, a metric
function that provides a score given ground truths
and predictions.
name: (optional) `str`, the name of the method.
Returns:
A `pandas.DataFrame` with columns ["retained_data", "mean", "std"],
that summarizes the scores at different data retained fractions.
"""
N = y_true.shape[0]
# Sorts indexes by ascending uncertainty
I_uncertainties = np.argsort(y_uncertainty)
# Score containers
mean = np.empty_like(fractions)
# TODO(filangel): do bootstrap sampling and estimate standard error
std = np.zeros_like(fractions)
for i, frac in enumerate(fractions):
# Keep only the %-frac of lowest uncertainties
I = np.zeros(N, dtype=bool)
I[I_uncertainties[:int(N * frac)]] = True
mean[i] = metric_fn(y_true[I], y_pred[I])
# Store
df = pd.DataFrame(dict(retained_data=fractions, mean=mean, std=std))
df.name = name
return df
@property
def datasets(self) -> tf.data.Dataset:
"""Pointer to the processed datasets."""
return self.__ds
@property
def info(self) -> BenchmarkInfo:
"""Text description of the benchmark."""
return BenchmarkInfo(description="", urls="", setup="", citation="")
@property
def level(self) -> Level:
"""The downstream task level."""
return self.__level
@staticmethod
def loss() -> tfk.losses.Loss:
"""Loss used for training binary classifiers."""
return tfk.losses.BinaryCrossentropy()
@staticmethod
def metrics() -> tfk.metrics.Metric:
"""Evaluation metrics used for monitoring training."""
return [tfk.metrics.BinaryAccuracy(), tfk.metrics.AUC()]
@staticmethod
def class_weight() -> Sequence[float]:
"""Class weights used for rebalancing the dataset, by skewing the `loss`
accordingly."""
return [1.0, 4.0]
@classmethod
def load(
cls,
level: Union[Text, Level] = "realworld",
batch_size: int = 64,
data_dir: Optional[Text] = None,
as_numpy: bool = False,
) -> DataSplits:
"""Loads the datasets for the benchmark.
Args:
level: `Level` or `str, downstream task level.
batch_size: (optional) `int`, number of datapoints
per mini-batch.
data_dir: (optional) `str`, path to parent data directory.
as_numpy: (optional) `bool`, if True returns python generators
with `numpy.ndarray` outputs.
Returns:
A namedtuple with properties:
* train: `tf.data.Dataset`, train dataset.
* validation: `tf.data.Dataset`, validation dataset.
* test: `tf.data.Dataset`, test dataset.
"""
import tensorflow_datasets as tfds
from .tfds_adapter import DiabeticRetinopathyDiagnosis
# Fetch datasets
try:
ds_train, ds_validation, ds_test = DiabeticRetinopathyDiagnosis(
data_dir=data_dir or DATA_DIR,
config=level).as_dataset(split=["train", "validation", "test"],
shuffle_files=True,
batch_size=batch_size)
except AssertionError as ae:
raise AssertionError(
str(ae) +
" Run DiabeticRetinopathyDiagnosisBecnhmark.download_and_prepare()"
" first and then retry.")
# Parse task level
level = level if isinstance(level, Level) else Level.from_str(level)
# Dataset tranformations
transforms_train, transforms_eval = cls._preprocessors()
# Apply transformations
ds_train = ds_train.map(transforms_train,
num_parallel_calls=tf.data.experimental.AUTOTUNE)
ds_validation = ds_validation.map(
transforms_eval, num_parallel_calls=tf.data.experimental.AUTOTUNE)
ds_test = ds_test.map(transforms_eval,
num_parallel_calls=tf.data.experimental.AUTOTUNE)
# Prefetches datasets to memory
ds_train = ds_train.prefetch(tf.data.experimental.AUTOTUNE)
ds_validation = ds_validation.prefetch(tf.data.experimental.AUTOTUNE)
ds_test = ds_test.prefetch(tf.data.experimental.AUTOTUNE)
if as_numpy:
# Convert to NumPy iterators
ds_train = tfds.as_numpy(ds_train)
ds_validation = tfds.as_numpy(ds_validation)
ds_test = tfds.as_numpy(ds_test)
return DataSplits(ds_train, ds_validation, ds_test)
@classmethod
def download_and_prepare(cls, levels=None) -> None:
"""Downloads dataset from Kaggle, extracts zip files and processes it using
`tensorflow_datasets`.
Args:
levels: (optional) `iterable` of `str`, specifies which
levels from {'medium', 'realworld'} to prepare,
if None it prepares all the levels.
Raises:
OSError: if `~/.kaggle/kaggle.json` is not set up.
"""
# Disable GPU for data download, extraction and preparation
import os
os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
cls._download()
# cls._extract()
#cls._prepare(levels)
@staticmethod
def _download() -> None:
"""Downloads data from Kaggle using `tensorflow_datasets`.
Raises:
OSError: if `~/.kaggle/kaggle.json` is not set up.
"""
import subprocess as sp
import tensorflow_datasets as tfds
# Append `/home/$USER/.local/bin` to path
os.environ["PATH"] += ":/home/{}/.local/bin/".format(os.environ["USER"])
# Download all files from Kaggle
drd = tfds.download.kaggle.KaggleCompetitionDownloader(
"diabetic-retinopathy-detection")
try:
for dfile in drd.competition_files:
drd.download_file(dfile,
output_dir=_DIABETIC_RETINOPATHY_DIAGNOSIS_DATA_DIR)
except sp.CalledProcessError as cpe:
raise OSError(
str(cpe) + "." +
" Make sure you have ~/.kaggle/kaggle.json setup, fetched from the Kaggle website"
" https://www.kaggle.com/<username>/account -> 'Create New API Key'."
" Also accept the dataset license by going to"
" https://www.kaggle.com/c/diabetic-retinopathy-detection/rules"
" and look for the button 'I Understand and Accept' (make sure when reloading the"
" page that the button does not pop up again).")
@staticmethod
def _extract() -> None:
"""Extracts zip files downloaded from Kaggle."""
import glob
import tqdm
import zipfile
import tempfile
# Extract train and test original images
for split in ["train", "test"]:
# Extract "<split>.zip.00*"" files to "<split>"
with tempfile.NamedTemporaryFile() as tmp:
# Concatenate "<split>.zip.00*" to "<split>.zip"
for fname in tqdm.tqdm(
sorted(
glob.glob(
os.path.join(_DIABETIC_RETINOPATHY_DIAGNOSIS_DATA_DIR,
"{split}.zip.00*".format(split=split))))):
# Unzip "<split>.zip" to "<split>"
with open(fname, "rb") as ztmp:
tmp.write(ztmp.read())
with zipfile.ZipFile(tmp) as zfile:
for image in tqdm.tqdm(iterable=zfile.namelist(),
total=len(zfile.namelist())):
zfile.extract(member=image,
path=_DIABETIC_RETINOPATHY_DIAGNOSIS_DATA_DIR)
# Delete "<split>.zip.00*" files
for splitzip in os.listdir(_DIABETIC_RETINOPATHY_DIAGNOSIS_DATA_DIR):
if "{split}.zip.00".format(split=split) in splitzip:
os.remove(
os.path.join(_DIABETIC_RETINOPATHY_DIAGNOSIS_DATA_DIR, splitzip))
# Extract "sample.zip", "trainLabels.csv.zip"
for fname in ["sample", "trainLabels.csv"]:
zfname = os.path.join(_DIABETIC_RETINOPATHY_DIAGNOSIS_DATA_DIR,
"{fname}.zip".format(fname=fname))
with zipfile.ZipFile(zfname) as zfile:
zfile.extractall(_DIABETIC_RETINOPATHY_DIAGNOSIS_DATA_DIR)
os.remove(zfname)
@staticmethod
def _prepare(levels=None) -> None:
"""Generates the TFRecord objects for medium and realworld experiments."""
import multiprocessing
from absl import logging
from .tfds_adapter import DiabeticRetinopathyDiagnosis
# Hangle each level individually
for level in levels or ["medium", "realworld"]:
dtask = DiabeticRetinopathyDiagnosis(data_dir=DATA_DIR, config=level)
logging.debug("=== Preparing TFRecords for {} ===".format(level))
dtask.download_and_prepare()
@classmethod
def _preprocessors(cls) -> Tuple[transforms.Transform, transforms.Transform]:
"""Applies transformations to the raw data."""
import tensorflow_datasets as tfds
# Transformation hyperparameters
mean = np.asarray([0.42606387, 0.29752496, 0.21309826])
stddev = np.asarray([0.27662534, 0.20280295, 0.1687619])
class Parse(transforms.Transform):
"""Parses datapoints from raw `tf.data.Dataset`."""
def __call__(self, x, y=None):
"""Returns `as_supervised` tuple."""
return x["image"], x["label"]
class CastX(transforms.Transform):
"""Casts image to `dtype`."""
def __init__(self, dtype):
"""Constructs a type caster."""
self.dtype = dtype
def __call__(self, x, y):
"""Returns casted image (to `dtype`) and its (unchanged) label as
tuple."""
return tf.cast(x, self.dtype), y
class To01X(transforms.Transform):
"""Rescales image to [min, max]=[0, 1]."""
def __call__(self, x, y):
"""Returns rescaled image and its (unchanged) label as tuple."""
return x / 255.0, y
# Get augmentation schemes
[augmentation_config,
no_augmentation_config] = cls._ImageDataGenerator_config()
# Transformations for train dataset
transforms_train = transforms.Compose([
Parse(),
CastX(tf.float32),
To01X(),
transforms.Normalize(mean, stddev),
# TODO(filangel): hangle batch with ImageDataGenerator
# transforms.RandomAugment(**augmentation_config),
])
# Transformations for validation/test dataset
transforms_eval = transforms.Compose([
Parse(),
CastX(tf.float32),
To01X(),
transforms.Normalize(mean, stddev),
# TODO(filangel): hangle batch with ImageDataGenerator
# transforms.RandomAugment(**no_augmentation_config),
])
return transforms_train, transforms_eval
@staticmethod
def _ImageDataGenerator_config():
"""Returns the configs for the
`tensorflow.keras.preprocessing.image.ImageDataGenerator`, used for the
random augmentation of the dataset, following the implementation of
https://github.com/chleibig/disease-detection/blob/f3401b26aa9b832ff77afe93
e3faa342f7d088e5/scripts/inspect_data_augmentation.py."""
augmentation_config = dict(
featurewise_center=False,
samplewise_center=False,
featurewise_std_normalization=False,
samplewise_std_normalization=False,
zca_whitening=False,
rotation_range=180.0,
width_shift_range=0.05,
height_shift_range=0.05,
shear_range=0.,
zoom_range=0.10,
channel_shift_range=0.,
fill_mode="constant",
cval=0.,
horizontal_flip=True,
vertical_flip=True,
data_format="channels_last",
)
no_augmentation_config = dict(
featurewise_center=False,
samplewise_center=False,
featurewise_std_normalization=False,
samplewise_std_normalization=False,
zca_whitening=False,
rotation_range=0.0,
width_shift_range=0.0,
height_shift_range=0.0,
shear_range=0.,
zoom_range=0.0,
channel_shift_range=0.,
fill_mode="nearest",
cval=0.,
horizontal_flip=False,
vertical_flip=False,
data_format="channels_last",
)
return augmentation_config, no_augmentation_config
| [((45, 43, 46, 70), 'os.path.join', 'os.path.join', ({(46, 4, 46, 12): 'DATA_DIR', (46, 14, 46, 25): '"""downloads"""', (46, 27, 46, 35): '"""manual"""', (46, 37, 46, 69): '"""diabetic_retinopathy_diagnosis"""'}, {}), "(DATA_DIR, 'downloads', 'manual', 'diabetic_retinopathy_diagnosis')", False, 'import os\n'), ((115, 16, 115, 29), 'tqdm.tqdm', 'tqdm.tqdm', ({(115, 26, 115, 28): 'ds'}, {}), '(ds)', False, 'import tqdm\n'), ((127, 16, 127, 58), 'numpy.asarray', 'np.asarray', ({(127, 27, 127, 57): '[0.5, 0.6, 0.7, 0.8, 0.9, 1.0]'}, {}), '([0.5, 0.6, 0.7, 0.8, 0.9, 1.0])', True, 'import numpy as np\n'), ((175, 22, 175, 47), 'numpy.argsort', 'np.argsort', ({(175, 33, 175, 46): 'y_uncertainty'}, {}), '(y_uncertainty)', True, 'import numpy as np\n'), ((178, 11, 178, 35), 'numpy.empty_like', 'np.empty_like', ({(178, 25, 178, 34): 'fractions'}, {}), '(fractions)', True, 'import numpy as np\n'), ((180, 10, 180, 34), 'numpy.zeros_like', 'np.zeros_like', ({(180, 24, 180, 33): 'fractions'}, {}), '(fractions)', True, 'import numpy as np\n'), ((324, 10, 325, 41), 'tensorflow_datasets.download.kaggle.KaggleCompetitionDownloader', 'tfds.download.kaggle.KaggleCompetitionDownloader', ({(325, 8, 325, 40): '"""diabetic-retinopathy-detection"""'}, {}), "(\n 'diabetic-retinopathy-detection')", True, 'import tensorflow_datasets as tfds\n'), ((398, 11, 398, 59), 'numpy.asarray', 'np.asarray', ({(398, 22, 398, 58): '[0.42606387, 0.29752496, 0.21309826]'}, {}), '([0.42606387, 0.29752496, 0.21309826])', True, 'import numpy as np\n'), ((399, 13, 399, 60), 'numpy.asarray', 'np.asarray', ({(399, 24, 399, 59): '[0.27662534, 0.20280295, 0.1687619]'}, {}), '([0.27662534, 0.20280295, 0.1687619])', True, 'import numpy as np\n'), ((113, 20, 113, 48), 'inspect.isgenerator', 'inspect.isgenerator', ({(113, 40, 113, 47): 'dataset'}, {}), '(dataset)', False, 'import inspect\n'), ((113, 54, 113, 76), 'tensorflow_datasets.as_numpy', 'tfds.as_numpy', ({(113, 68, 113, 75): 'dataset'}, {}), '(dataset)', True, 'import tensorflow_datasets as tfds\n'), ((184, 10, 184, 33), 'numpy.zeros', 'np.zeros', (), '', True, 'import numpy as np\n'), ((284, 17, 284, 40), 'tensorflow_datasets.as_numpy', 'tfds.as_numpy', ({(284, 31, 284, 39): 'ds_train'}, {}), '(ds_train)', True, 'import tensorflow_datasets as tfds\n'), ((285, 22, 285, 50), 'tensorflow_datasets.as_numpy', 'tfds.as_numpy', ({(285, 36, 285, 49): 'ds_validation'}, {}), '(ds_validation)', True, 'import tensorflow_datasets as tfds\n'), ((286, 16, 286, 38), 'tensorflow_datasets.as_numpy', 'tfds.as_numpy', ({(286, 30, 286, 37): 'ds_test'}, {}), '(ds_test)', True, 'import tensorflow_datasets as tfds\n'), ((367, 22, 367, 74), 'os.listdir', 'os.listdir', ({(367, 33, 367, 73): '_DIABETIC_RETINOPATHY_DIAGNOSIS_DATA_DIR'}, {}), '(_DIABETIC_RETINOPATHY_DIAGNOSIS_DATA_DIR)', False, 'import os\n'), ((378, 6, 378, 23), 'os.remove', 'os.remove', ({(378, 16, 378, 22): 'zfname'}, {}), '(zfname)', False, 'import os\n'), ((124, 13, 124, 35), 'numpy.concatenate', 'np.concatenate', ({(124, 28, 124, 34): 'y_true'}, {}), '(y_true)', True, 'import numpy as np\n'), ((125, 13, 125, 35), 'numpy.concatenate', 'np.concatenate', ({(125, 28, 125, 34): 'y_pred'}, {}), '(y_pred)', True, 'import numpy as np\n'), ((126, 20, 126, 49), 'numpy.concatenate', 'np.concatenate', ({(126, 35, 126, 48): 'y_uncertainty'}, {}), '(y_uncertainty)', True, 'import numpy as np\n'), ((351, 11, 351, 40), 'tempfile.NamedTemporaryFile', 'tempfile.NamedTemporaryFile', ({}, {}), '()', False, 'import tempfile\n'), ((376, 11, 376, 34), 'zipfile.ZipFile', 'zipfile.ZipFile', ({(376, 27, 376, 33): 'zfname'}, {}), '(zfname)', False, 'import zipfile\n'), ((78, 8, 80, 33), 'absl.logging.info', 'logging.info', ({(79, 12, 80, 32): '"""Data not found, `DiabeticRetinopathyDiagnosisBecnhmark.download_and_prepare()` is now running..."""'}, {}), "(\n 'Data not found, `DiabeticRetinopathyDiagnosisBecnhmark.download_and_prepare()` is now running...'\n )", False, 'from absl import logging\n'), ((361, 13, 361, 33), 'zipfile.ZipFile', 'zipfile.ZipFile', ({(361, 29, 361, 32): 'tmp'}, {}), '(tmp)', False, 'import zipfile\n'), ((418, 15, 418, 37), 'tensorflow.cast', 'tf.cast', ({(418, 23, 418, 24): 'x', (418, 26, 418, 36): 'self.dtype'}, {}), '(x, self.dtype)', True, 'import tensorflow as tf\n'), ((370, 14, 370, 78), 'os.path.join', 'os.path.join', ({(370, 27, 370, 67): '_DIABETIC_RETINOPATHY_DIAGNOSIS_DATA_DIR', (370, 69, 370, 77): 'splitzip'}, {}), '(_DIABETIC_RETINOPATHY_DIAGNOSIS_DATA_DIR, splitzip)', False, 'import os\n')] |
agustinhenze/mibs.snmplabs.com | pysnmp-with-texts/CXConsoleDriver-MIB.py | 1fc5c07860542b89212f4c8ab807057d9a9206c7 | #
# PySNMP MIB module CXConsoleDriver-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/CXConsoleDriver-MIB
# Produced by pysmi-0.3.4 at Wed May 1 12:32:28 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
Integer, OctetString, ObjectIdentifier = mibBuilder.importSymbols("ASN1", "Integer", "OctetString", "ObjectIdentifier")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ValueSizeConstraint, ValueRangeConstraint, SingleValueConstraint, ConstraintsIntersection, ConstraintsUnion = mibBuilder.importSymbols("ASN1-REFINEMENT", "ValueSizeConstraint", "ValueRangeConstraint", "SingleValueConstraint", "ConstraintsIntersection", "ConstraintsUnion")
cxConsoleDriver, = mibBuilder.importSymbols("CXProduct-SMI", "cxConsoleDriver")
NotificationGroup, ModuleCompliance = mibBuilder.importSymbols("SNMPv2-CONF", "NotificationGroup", "ModuleCompliance")
Counter64, Gauge32, TimeTicks, MibScalar, MibTable, MibTableRow, MibTableColumn, IpAddress, Unsigned32, Integer32, ModuleIdentity, NotificationType, ObjectIdentity, MibIdentifier, Counter32, iso, Bits = mibBuilder.importSymbols("SNMPv2-SMI", "Counter64", "Gauge32", "TimeTicks", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "IpAddress", "Unsigned32", "Integer32", "ModuleIdentity", "NotificationType", "ObjectIdentity", "MibIdentifier", "Counter32", "iso", "Bits")
TextualConvention, DisplayString = mibBuilder.importSymbols("SNMPv2-TC", "TextualConvention", "DisplayString")
cxCdBaudRate = MibScalar((1, 3, 6, 1, 4, 1, 495, 2, 1, 5, 6, 1), Integer32().clone(9600)).setMaxAccess("readwrite")
if mibBuilder.loadTexts: cxCdBaudRate.setStatus('mandatory')
if mibBuilder.loadTexts: cxCdBaudRate.setDescription('Determines the baud rate of the console port. The setting of this object is dynamic. The console port immediately implements the option you enter. Options: 9600 19200 38400 115200 Default Value: 9600 Configuration Changed: operative')
cxCdCharSize = MibScalar((1, 3, 6, 1, 4, 1, 495, 2, 1, 5, 6, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(7, 8)).clone(8)).setMaxAccess("readwrite")
if mibBuilder.loadTexts: cxCdCharSize.setStatus('mandatory')
if mibBuilder.loadTexts: cxCdCharSize.setDescription('Determines how many bits constitute a character for the console port. Options: none - the value is fixed at 8 Default Value: 8 Configuration Changed: none ')
cxCdParity = MibScalar((1, 3, 6, 1, 4, 1, 495, 2, 1, 5, 6, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("noParity", 1), ("evenParity", 2), ("oddParity", 3))).clone('noParity')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: cxCdParity.setStatus('mandatory')
if mibBuilder.loadTexts: cxCdParity.setDescription('Determines the parity scheme the CPU uses to validate the characters it receives through the console port. Options: none - the value is fixed at noParity Default Value: noParity Configuration Changed: none ')
cxCdStopBit = MibScalar((1, 3, 6, 1, 4, 1, 495, 2, 1, 5, 6, 4), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 2)).clone(1)).setMaxAccess("readwrite")
if mibBuilder.loadTexts: cxCdStopBit.setStatus('mandatory')
if mibBuilder.loadTexts: cxCdStopBit.setDescription('Determines how many stop bits are at the end of each character the console port receives. Options: none - the value is fixed at 1 Default Value: 1 Configuration Changed: none ')
cxCdProtocol = MibScalar((1, 3, 6, 1, 4, 1, 495, 2, 1, 5, 6, 5), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("localConsole", 1), ("ppp", 2))).clone('localConsole')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: cxCdProtocol.setStatus('mandatory')
if mibBuilder.loadTexts: cxCdProtocol.setDescription('Determines the protocol (configuration method) for the console port. The setting of this object is dynamic. The console port immediately implements the option you enter. However, if you change the protocol you are currently using to configure the port your connection will be lost. Options: localConsole (1): you use this protocol when you attach a TTY terminal directly to the console port. This protocol requires you to use command line configuration. You also must enter a password to gain access to the configuration tables. You can define the password using the object uiPassword of the CXUserInterface Table. ppp (2): you use this protocol when you are configuring via a windows-based application such as HP/OV (Hewlett Packard-OpenView). Default Value: ppp (2) Configuration Changed: operative')
mibBuilder.exportSymbols("CXConsoleDriver-MIB", cxCdParity=cxCdParity, cxCdProtocol=cxCdProtocol, cxCdBaudRate=cxCdBaudRate, cxCdStopBit=cxCdStopBit, cxCdCharSize=cxCdCharSize)
| [] |
Lifeistrange/WeiboSpider | db/redis_db.py | 8aa3465487ef64bb6e9bb4bd503f182a1b38c292 | # coding:utf-8
import datetime
import json
import re
import redis
from config.conf import get_redis_args
redis_args = get_redis_args()
class Cookies(object):
rd_con = redis.StrictRedis(host=redis_args.get('host'), port=redis_args.get('port'),
password=redis_args.get('password'), db=redis_args.get('cookies'))
rd_con_broker = redis.StrictRedis(host=redis_args.get('host'), port=redis_args.get('port'),
password=redis_args.get('password'), db=redis_args.get('broker'))
@classmethod
def store_cookies(cls, name, cookies):
pickled_cookies = json.dumps(
{'cookies': cookies, 'loginTime': datetime.datetime.now().timestamp()})
cls.rd_con.hset('account', name, pickled_cookies)
cls.rd_con.lpush('account_queue', name)
@classmethod
def fetch_cookies(cls):
for i in range(cls.rd_con.llen('account_queue')):
name = cls.rd_con.rpop('account_queue').decode('utf-8')
if name:
j_account = cls.rd_con.hget('account', name).decode('utf-8')
if j_account:
cls.rd_con.lpush('account_queue', name) # 当账号不存在时,这个name也会清除,并取下一个name
account = json.loads(j_account)
login_time = datetime.datetime.fromtimestamp(account['loginTime'])
if datetime.datetime.now() - login_time > datetime.timedelta(hours=20):
cls.rd_con.hdel('account', name)
continue # 丢弃这个过期账号,account_queue会在下次访问的时候被清除,这里不清除是因为分布式的关系
return name, account['cookies']
else:
return None
@classmethod
def delete_cookies(cls, name):
cls.rd_con.hdel('account', name)
return True
@classmethod
def check_login_task(cls):
if cls.rd_con_broker.llen('login_queue') > 0:
cls.rd_con_broker.delete('login_queue')
class Urls(object):
rd_con = redis.StrictRedis(host=redis_args.get('host'), port=redis_args.get('port'),
password=redis_args.get('password'), db=redis_args.get('urls'))
@classmethod
def store_crawl_url(cls, url, result):
cls.rd_con.set(url, result)
class IdNames(object):
rd_con = redis.StrictRedis(host=redis_args.get('host'), port=redis_args.get('port'),
password=redis_args.get('password'), db=redis_args.get('id_name'))
@classmethod
def store_id_name(cls, user_name, user_id):
cls.rd_con.set(user_name, user_id)
@classmethod
def fetch_uid_by_name(cls, user_name):
user_id = cls.rd_con.get(user_name)
if user_id:
return user_id.decode('utf-8')
return ''
| [((9, 13, 9, 29), 'config.conf.get_redis_args', 'get_redis_args', ({}, {}), '()', False, 'from config.conf import get_redis_args\n'), ((34, 30, 34, 51), 'json.loads', 'json.loads', ({(34, 41, 34, 50): 'j_account'}, {}), '(j_account)', False, 'import json\n'), ((35, 33, 35, 86), 'datetime.datetime.fromtimestamp', 'datetime.datetime.fromtimestamp', ({(35, 65, 35, 85): "account['loginTime']"}, {}), "(account['loginTime'])", False, 'import datetime\n'), ((22, 46, 22, 69), 'datetime.datetime.now', 'datetime.datetime.now', ({}, {}), '()', False, 'import datetime\n'), ((36, 62, 36, 90), 'datetime.timedelta', 'datetime.timedelta', (), '', False, 'import datetime\n'), ((36, 23, 36, 46), 'datetime.datetime.now', 'datetime.datetime.now', ({}, {}), '()', False, 'import datetime\n')] |
vEpiphyte/vivisect | vivisect/storage/mpfile.py | 14947a53c6781175f0aa83d49cc16c524a2e23a3 | import base64
import logging
import msgpack
logger = logging.getLogger(__name__)
loadargs = {'use_list': False, 'raw': False}
if msgpack.version < (1, 0, 0):
loadargs['encoding'] = 'utf-8'
else:
loadargs['strict_map_key'] = False
VSIG = b'MSGVIV'.ljust(8, b'\x00')
def vivEventsAppendFile(filename, events):
with open(filename, 'ab') as f:
for event in events:
if event[0] == 20:
mape = base64.b64encode(event[1][3])
event = (event[0], (event[1][0], event[1][1], event[1][2], mape))
msgpack.pack(event, f, use_bin_type=False)
def saveWorkspaceChanges(vw, filename):
events = vw.exportWorkspaceChanges()
vivEventsAppendFile(filename, events)
def vivEventsToFile(filename, events):
with open(filename, 'wb') as f:
msgpack.pack(VSIG, f, use_bin_type=False)
for event in events:
if event[0] == 20:
mape = base64.b64encode(event[1][3])
event = (event[0], (event[1][0], event[1][1], event[1][2], mape))
msgpack.pack(event, f, use_bin_type=False)
def saveWorkspace(vw, filename):
events = vw.exportWorkspace()
vivEventsToFile(filename, events)
def vivEventsFromFile(filename):
events = []
with open(filename, 'rb') as f:
unpacker = msgpack.Unpacker(f, **loadargs)
siggy = next(unpacker)
if siggy.encode('utf-8') != VSIG:
logger.warning('Invalid file signature of %s', str(siggy))
return
for event in unpacker:
if event[0] == 20:
mape = base64.b64decode(event[1][3])
event = (event[0], (event[1][0], event[1][1], event[1][2], mape))
events.append(event)
return events
def loadWorkspace(vw, filename):
events = vivEventsFromFile(filename)
vw.importWorkspace(events)
| [((6, 9, 6, 36), 'logging.getLogger', 'logging.getLogger', ({(6, 27, 6, 35): '__name__'}, {}), '(__name__)', False, 'import logging\n'), ((33, 8, 33, 49), 'msgpack.pack', 'msgpack.pack', (), '', False, 'import msgpack\n'), ((49, 19, 49, 50), 'msgpack.Unpacker', 'msgpack.Unpacker', ({(49, 36, 49, 37): 'f'}, {}), '(f, **loadargs)', False, 'import msgpack\n'), ((23, 12, 23, 54), 'msgpack.pack', 'msgpack.pack', (), '', False, 'import msgpack\n'), ((38, 12, 38, 54), 'msgpack.pack', 'msgpack.pack', (), '', False, 'import msgpack\n'), ((21, 23, 21, 52), 'base64.b64encode', 'base64.b64encode', ({(21, 40, 21, 51): 'event[1][3]'}, {}), '(event[1][3])', False, 'import base64\n'), ((36, 23, 36, 52), 'base64.b64encode', 'base64.b64encode', ({(36, 40, 36, 51): 'event[1][3]'}, {}), '(event[1][3])', False, 'import base64\n'), ((56, 23, 56, 52), 'base64.b64decode', 'base64.b64decode', ({(56, 40, 56, 51): 'event[1][3]'}, {}), '(event[1][3])', False, 'import base64\n')] |
mathiasose/pytest-pgsql | pytest_pgsql/plugin.py | 5e076db146699c3b683b49e4a31323c4c23054de | """This forms the core of the pytest plugin."""
import pytest
import testing.postgresql
from pytest_pgsql import database
from pytest_pgsql import ext
def pytest_addoption(parser):
"""Add configuration options for pytest_pgsql."""
parser.addoption(
'--pg-extensions', action='store', default='',
help="A comma-separated list of PostgreSQL extensions to install at "
"the beginning of the session for use by all tests. Example: "
"--pg-extensions=uuid-ossp,pg_tgrm,pgcrypto")
parser.addoption(
'--pg-work-mem', type=int, default=32,
help='Set the value of the `work_mem` setting, in megabytes. '
'`pytest_pgsql` defaults to 32. Adjusting this up or down can '
'help performance; see the Postgres documentation for more details.')
parser.addoption(
'--pg-conf-opt', action='append',
help='Add a key=value line that will be appended to postgresql.conf')
@pytest.fixture(scope='session')
def database_uri(request):
"""A fixture giving the connection URI of the session-wide test database."""
# Note: due to the nature of the variable configs, the command line options
# must be tested manually.
work_mem = request.config.getoption('--pg-work-mem')
if work_mem < 0: # pragma: no cover
pytest.exit('ERROR: --pg-work-mem value must be >= 0. Got: %d' % work_mem)
return
elif work_mem == 0: # pragma: no cover
# Disable memory tweak and use the server default.
work_mem_setting = ''
else:
# User wants to change the working memory setting.
work_mem_setting = '-c work_mem=%dMB ' % work_mem
conf_opts = request.config.getoption('--pg-conf-opt')
if conf_opts:
conf_opts_string = ' -c ' + ' -c '.join(conf_opts)
else:
conf_opts_string = ''
# pylint: disable=bad-continuation,deprecated-method
with testing.postgresql.Postgresql(
postgres_args='-c TimeZone=UTC '
'-c fsync=off '
'-c synchronous_commit=off '
'-c full_page_writes=off '
+ work_mem_setting +
'-c checkpoint_timeout=30min '
'-c bgwriter_delay=10000ms'
+ conf_opts_string) as pgdb:
yield pgdb.url()
#: A SQLAlchemy engine shared by the transacted and non-transacted database fixtures.
#:
#: .. seealso:: `pytest_pgsql.ext.create_engine_fixture`
# pylint: disable=invalid-name
pg_engine = ext.create_engine_fixture('pg_engine', scope='session')
# pylint: enable=invalid-name
@pytest.fixture(scope='session')
def database_snapshot(pg_engine):
"""Create one database snapshot for the session.
The database will be restored to this state after each test.
.. note ::
This is an implementation detail and should not be used directly except
by derived fixtures.
"""
return database.create_database_snapshot(pg_engine)
# pylint: disable=invalid-name
#: Create a test database instance and cleans up after each test finishes.
#:
#: You should prefer the `transacted_postgresql_db` fixture unless your test
#: cannot be run in a single transaction. The `transacted_postgresql_db` fixture
#: leads to faster tests since it doesn't tear down the entire database between
#: each test.
postgresql_db = \
database.PostgreSQLTestDB.create_fixture('postgresql_db')
#: Create a test database instance that rolls back the current transaction after
#: each test finishes, verifying its integrity before returning.
#:
#: Read the warning in the main documentation page before using this fixture.
transacted_postgresql_db = \
database.TransactedPostgreSQLTestDB.create_fixture('transacted_postgresql_db')
# pylint: enable=invalid-name
| [((29, 1, 29, 32), 'pytest.fixture', 'pytest.fixture', (), '', False, 'import pytest\n'), ((69, 12, 69, 67), 'pytest_pgsql.ext.create_engine_fixture', 'ext.create_engine_fixture', (), '', False, 'from pytest_pgsql import ext\n'), ((73, 1, 73, 32), 'pytest.fixture', 'pytest.fixture', (), '', False, 'import pytest\n'), ((96, 4, 96, 61), 'pytest_pgsql.database.PostgreSQLTestDB.create_fixture', 'database.PostgreSQLTestDB.create_fixture', ({(96, 45, 96, 60): '"""postgresql_db"""'}, {}), "('postgresql_db')", False, 'from pytest_pgsql import database\n'), ((104, 4, 104, 82), 'pytest_pgsql.database.TransactedPostgreSQLTestDB.create_fixture', 'database.TransactedPostgreSQLTestDB.create_fixture', ({(104, 55, 104, 81): '"""transacted_postgresql_db"""'}, {}), "('transacted_postgresql_db')", False, 'from pytest_pgsql import database\n'), ((84, 11, 84, 55), 'pytest_pgsql.database.create_database_snapshot', 'database.create_database_snapshot', ({(84, 45, 84, 54): 'pg_engine'}, {}), '(pg_engine)', False, 'from pytest_pgsql import database\n'), ((37, 8, 37, 82), 'pytest.exit', 'pytest.exit', ({(37, 20, 37, 81): "('ERROR: --pg-work-mem value must be >= 0. Got: %d' % work_mem)"}, {}), "('ERROR: --pg-work-mem value must be >= 0. Got: %d' % work_mem)", False, 'import pytest\n')] |
abrahamneben/orbcomm_beam_mapping | power_data_to_sat_passes/date_utils.py | 71b3e7d6e4214db0a6f4e68ebeeb7d7f846f5004 | # written by abraham on aug 24
def dyear2date(dyear):
year = int(dyear)
month_lengths = [31,28,31,30,31,30,31,31,30,31,30,31]
days_before_months = [0,31,59,90,120,151,181,212,243,273,304,334]
days_into_year_f = (dyear-year)*365
days_into_year_i = int(days_into_year_f)
for i in range(12):
if days_before_months[i] < days_into_year_f < (days_before_months[i]+month_lengths[i]):
month = i+1
break
date = days_into_year_i - days_before_months[month-1]
hours_f = (days_into_year_f-days_into_year_i)*24
hours_i = int(hours_f)
minutes_f = (hours_f-hours_i)*60
minutes_i = int(minutes_f)
seconds_i = int((minutes_f-minutes_i)*60)
return "%02d/%02d/%d %02d:%02d:%02d" % (month,date,year,hours_i,minutes_i,seconds_i)
| [] |
sourcery-ai-bot/personal-expenses-accounting | app/base/count_lines.py | 55e76744a06fd502d119f57427cd7a0bfaf68fe1 | import glob
from os import walk
exclude_folders = [
'node_modules',
'ios',
'android',
'__pycache__'
]
exclude_files = [
'json',
'txt',
'traineddata',
'lstmf',
'yml',
'md'
'log',
'env',
'gitignore',
'dockerignore'
]
# get all files in directory
dirr = '/home/viktor/Documents/personal-expenses-accounting/app/services/web_service/'
folders = glob.glob(dirr + '/**/', recursive=True)
# only app related directories
directories = []
for folder in folders:
current_folder = folder.split('/')[-2]
if current_folder not in exclude_folders:
files = glob.glob(folder + '*')
print(files)
directories.append(folder)
# num_lines = sum(1 for line in open('myfile.txt'))
| [((26, 10, 26, 50), 'glob.glob', 'glob.glob', (), '', False, 'import glob\n'), ((33, 16, 33, 39), 'glob.glob', 'glob.glob', ({(33, 26, 33, 38): "folder + '*'"}, {}), "(folder + '*')", False, 'import glob\n')] |
rgurevych/python_for_testers | data/contacts.py | 04023a5d6ea480f7828aa56e8a4094b744e05721 |
from models.contact import Contact
testdata = [Contact(first_name="Firstname", last_name="Lastname", mobile_phone="+12345678",
work_phone="12345", home_phone="67890", fax="55443322", email_1="[email protected]",
email_2="[email protected]", email_3="[email protected]",
address="Street, 15 \n 12345 New-York")]
| [((4, 12, 7, 71), 'models.contact.Contact', 'Contact', (), '', False, 'from models.contact import Contact\n')] |
nobuto-m/charm-helpers | charmhelpers/contrib/charmsupport/nrpe.py | 4cffc05ace43234d34b040cccdde3460f68cb673 | # Copyright 2014-2015 Canonical Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Compatibility with the nrpe-external-master charm"""
# Copyright 2012 Canonical Ltd.
#
# Authors:
# Matthew Wedgwood <[email protected]>
import subprocess
import pwd
import grp
import os
import glob
import shutil
import re
import shlex
import yaml
from charmhelpers.core.hookenv import (
config,
hook_name,
local_unit,
log,
relation_get,
relation_ids,
relation_set,
relations_of_type,
)
from charmhelpers.core.host import service
from charmhelpers.core import host
# This module adds compatibility with the nrpe-external-master and plain nrpe
# subordinate charms. To use it in your charm:
#
# 1. Update metadata.yaml
#
# provides:
# (...)
# nrpe-external-master:
# interface: nrpe-external-master
# scope: container
#
# and/or
#
# provides:
# (...)
# local-monitors:
# interface: local-monitors
# scope: container
#
# 2. Add the following to config.yaml
#
# nagios_context:
# default: "juju"
# type: string
# description: |
# Used by the nrpe subordinate charms.
# A string that will be prepended to instance name to set the host name
# in nagios. So for instance the hostname would be something like:
# juju-myservice-0
# If you're running multiple environments with the same services in them
# this allows you to differentiate between them.
# nagios_servicegroups:
# default: ""
# type: string
# description: |
# A comma-separated list of nagios servicegroups.
# If left empty, the nagios_context will be used as the servicegroup
#
# 3. Add custom checks (Nagios plugins) to files/nrpe-external-master
#
# 4. Update your hooks.py with something like this:
#
# from charmsupport.nrpe import NRPE
# (...)
# def update_nrpe_config():
# nrpe_compat = NRPE()
# nrpe_compat.add_check(
# shortname = "myservice",
# description = "Check MyService",
# check_cmd = "check_http -w 2 -c 10 http://localhost"
# )
# nrpe_compat.add_check(
# "myservice_other",
# "Check for widget failures",
# check_cmd = "/srv/myapp/scripts/widget_check"
# )
# nrpe_compat.write()
#
# def config_changed():
# (...)
# update_nrpe_config()
#
# def nrpe_external_master_relation_changed():
# update_nrpe_config()
#
# def local_monitors_relation_changed():
# update_nrpe_config()
#
# 4.a If your charm is a subordinate charm set primary=False
#
# from charmsupport.nrpe import NRPE
# (...)
# def update_nrpe_config():
# nrpe_compat = NRPE(primary=False)
#
# 5. ln -s hooks.py nrpe-external-master-relation-changed
# ln -s hooks.py local-monitors-relation-changed
class CheckException(Exception):
pass
class Check(object):
shortname_re = '[A-Za-z0-9-_.@]+$'
service_template = ("""
#---------------------------------------------------
# This file is Juju managed
#---------------------------------------------------
define service {{
use active-service
host_name {nagios_hostname}
service_description {nagios_hostname}[{shortname}] """
"""{description}
check_command check_nrpe!{command}
servicegroups {nagios_servicegroup}
}}
""")
def __init__(self, shortname, description, check_cmd):
super(Check, self).__init__()
# XXX: could be better to calculate this from the service name
if not re.match(self.shortname_re, shortname):
raise CheckException("shortname must match {}".format(
Check.shortname_re))
self.shortname = shortname
self.command = "check_{}".format(shortname)
# Note: a set of invalid characters is defined by the
# Nagios server config
# The default is: illegal_object_name_chars=`~!$%^&*"|'<>?,()=
self.description = description
self.check_cmd = self._locate_cmd(check_cmd)
def _get_check_filename(self):
return os.path.join(NRPE.nrpe_confdir, '{}.cfg'.format(self.command))
def _get_service_filename(self, hostname):
return os.path.join(NRPE.nagios_exportdir,
'service__{}_{}.cfg'.format(hostname, self.command))
def _locate_cmd(self, check_cmd):
search_path = (
'/usr/lib/nagios/plugins',
'/usr/local/lib/nagios/plugins',
)
parts = shlex.split(check_cmd)
for path in search_path:
if os.path.exists(os.path.join(path, parts[0])):
command = os.path.join(path, parts[0])
if len(parts) > 1:
command += " " + " ".join(parts[1:])
return command
log('Check command not found: {}'.format(parts[0]))
return ''
def _remove_service_files(self):
if not os.path.exists(NRPE.nagios_exportdir):
return
for f in os.listdir(NRPE.nagios_exportdir):
if f.endswith('_{}.cfg'.format(self.command)):
os.remove(os.path.join(NRPE.nagios_exportdir, f))
def remove(self, hostname):
nrpe_check_file = self._get_check_filename()
if os.path.exists(nrpe_check_file):
os.remove(nrpe_check_file)
self._remove_service_files()
def write(self, nagios_context, hostname, nagios_servicegroups):
nrpe_check_file = self._get_check_filename()
with open(nrpe_check_file, 'w') as nrpe_check_config:
nrpe_check_config.write("# check {}\n".format(self.shortname))
if nagios_servicegroups:
nrpe_check_config.write(
"# The following header was added automatically by juju\n")
nrpe_check_config.write(
"# Modifying it will affect nagios monitoring and alerting\n")
nrpe_check_config.write(
"# servicegroups: {}\n".format(nagios_servicegroups))
nrpe_check_config.write("command[{}]={}\n".format(
self.command, self.check_cmd))
if not os.path.exists(NRPE.nagios_exportdir):
log('Not writing service config as {} is not accessible'.format(
NRPE.nagios_exportdir))
else:
self.write_service_config(nagios_context, hostname,
nagios_servicegroups)
def write_service_config(self, nagios_context, hostname,
nagios_servicegroups):
self._remove_service_files()
templ_vars = {
'nagios_hostname': hostname,
'nagios_servicegroup': nagios_servicegroups,
'description': self.description,
'shortname': self.shortname,
'command': self.command,
}
nrpe_service_text = Check.service_template.format(**templ_vars)
nrpe_service_file = self._get_service_filename(hostname)
with open(nrpe_service_file, 'w') as nrpe_service_config:
nrpe_service_config.write(str(nrpe_service_text))
def run(self):
subprocess.call(self.check_cmd)
class NRPE(object):
nagios_logdir = '/var/log/nagios'
nagios_exportdir = '/var/lib/nagios/export'
nrpe_confdir = '/etc/nagios/nrpe.d'
homedir = '/var/lib/nagios' # home dir provided by nagios-nrpe-server
def __init__(self, hostname=None, primary=True):
super(NRPE, self).__init__()
self.config = config()
self.primary = primary
self.nagios_context = self.config['nagios_context']
if 'nagios_servicegroups' in self.config and self.config['nagios_servicegroups']:
self.nagios_servicegroups = self.config['nagios_servicegroups']
else:
self.nagios_servicegroups = self.nagios_context
self.unit_name = local_unit().replace('/', '-')
if hostname:
self.hostname = hostname
else:
nagios_hostname = get_nagios_hostname()
if nagios_hostname:
self.hostname = nagios_hostname
else:
self.hostname = "{}-{}".format(self.nagios_context, self.unit_name)
self.checks = []
# Iff in an nrpe-external-master relation hook, set primary status
relation = relation_ids('nrpe-external-master')
if relation:
log("Setting charm primary status {}".format(primary))
for rid in relation:
relation_set(relation_id=rid, relation_settings={'primary': self.primary})
self.remove_check_queue = set()
def add_check(self, *args, **kwargs):
shortname = None
if kwargs.get('shortname') is None:
if len(args) > 0:
shortname = args[0]
else:
shortname = kwargs['shortname']
self.checks.append(Check(*args, **kwargs))
try:
self.remove_check_queue.remove(shortname)
except KeyError:
pass
def remove_check(self, *args, **kwargs):
if kwargs.get('shortname') is None:
raise ValueError('shortname of check must be specified')
# Use sensible defaults if they're not specified - these are not
# actually used during removal, but they're required for constructing
# the Check object; check_disk is chosen because it's part of the
# nagios-plugins-basic package.
if kwargs.get('check_cmd') is None:
kwargs['check_cmd'] = 'check_disk'
if kwargs.get('description') is None:
kwargs['description'] = ''
check = Check(*args, **kwargs)
check.remove(self.hostname)
self.remove_check_queue.add(kwargs['shortname'])
def write(self):
try:
nagios_uid = pwd.getpwnam('nagios').pw_uid
nagios_gid = grp.getgrnam('nagios').gr_gid
except Exception:
log("Nagios user not set up, nrpe checks not updated")
return
if not os.path.exists(NRPE.nagios_logdir):
os.mkdir(NRPE.nagios_logdir)
os.chown(NRPE.nagios_logdir, nagios_uid, nagios_gid)
nrpe_monitors = {}
monitors = {"monitors": {"remote": {"nrpe": nrpe_monitors}}}
for nrpecheck in self.checks:
nrpecheck.write(self.nagios_context, self.hostname,
self.nagios_servicegroups)
nrpe_monitors[nrpecheck.shortname] = {
"command": nrpecheck.command,
}
# update-status hooks are configured to firing every 5 minutes by
# default. When nagios-nrpe-server is restarted, the nagios server
# reports checks failing causing unnecessary alerts. Let's not restart
# on update-status hooks.
if not hook_name() == 'update-status':
service('restart', 'nagios-nrpe-server')
monitor_ids = relation_ids("local-monitors") + \
relation_ids("nrpe-external-master")
for rid in monitor_ids:
reldata = relation_get(unit=local_unit(), rid=rid)
if 'monitors' in reldata:
# update the existing set of monitors with the new data
old_monitors = yaml.safe_load(reldata['monitors'])
old_nrpe_monitors = old_monitors['monitors']['remote']['nrpe']
# remove keys that are in the remove_check_queue
old_nrpe_monitors = {k: v for k, v in old_nrpe_monitors.items()
if k not in self.remove_check_queue}
# update/add nrpe_monitors
old_nrpe_monitors.update(nrpe_monitors)
old_monitors['monitors']['remote']['nrpe'] = old_nrpe_monitors
# write back to the relation
relation_set(relation_id=rid, monitors=yaml.dump(old_monitors))
else:
# write a brand new set of monitors, as no existing ones.
relation_set(relation_id=rid, monitors=yaml.dump(monitors))
self.remove_check_queue.clear()
def get_nagios_hostcontext(relation_name='nrpe-external-master'):
"""
Query relation with nrpe subordinate, return the nagios_host_context
:param str relation_name: Name of relation nrpe sub joined to
"""
for rel in relations_of_type(relation_name):
if 'nagios_host_context' in rel:
return rel['nagios_host_context']
def get_nagios_hostname(relation_name='nrpe-external-master'):
"""
Query relation with nrpe subordinate, return the nagios_hostname
:param str relation_name: Name of relation nrpe sub joined to
"""
for rel in relations_of_type(relation_name):
if 'nagios_hostname' in rel:
return rel['nagios_hostname']
def get_nagios_unit_name(relation_name='nrpe-external-master'):
"""
Return the nagios unit name prepended with host_context if needed
:param str relation_name: Name of relation nrpe sub joined to
"""
host_context = get_nagios_hostcontext(relation_name)
if host_context:
unit = "%s:%s" % (host_context, local_unit())
else:
unit = local_unit()
return unit
def add_init_service_checks(nrpe, services, unit_name, immediate_check=True):
"""
Add checks for each service in list
:param NRPE nrpe: NRPE object to add check to
:param list services: List of services to check
:param str unit_name: Unit name to use in check description
:param bool immediate_check: For sysv init, run the service check immediately
"""
for svc in services:
# Don't add a check for these services from neutron-gateway
if svc in ['ext-port', 'os-charm-phy-nic-mtu']:
next
upstart_init = '/etc/init/%s.conf' % svc
sysv_init = '/etc/init.d/%s' % svc
if host.init_is_systemd():
nrpe.add_check(
shortname=svc,
description='process check {%s}' % unit_name,
check_cmd='check_systemd.py %s' % svc
)
elif os.path.exists(upstart_init):
nrpe.add_check(
shortname=svc,
description='process check {%s}' % unit_name,
check_cmd='check_upstart_job %s' % svc
)
elif os.path.exists(sysv_init):
cronpath = '/etc/cron.d/nagios-service-check-%s' % svc
checkpath = '%s/service-check-%s.txt' % (nrpe.homedir, svc)
croncmd = (
'/usr/local/lib/nagios/plugins/check_exit_status.pl '
'-e -s /etc/init.d/%s status' % svc
)
cron_file = '*/5 * * * * root %s > %s\n' % (croncmd, checkpath)
f = open(cronpath, 'w')
f.write(cron_file)
f.close()
nrpe.add_check(
shortname=svc,
description='service check {%s}' % unit_name,
check_cmd='check_status_file.py -f %s' % checkpath,
)
# if /var/lib/nagios doesn't exist open(checkpath, 'w') will fail
# (LP: #1670223).
if immediate_check and os.path.isdir(nrpe.homedir):
f = open(checkpath, 'w')
subprocess.call(
croncmd.split(),
stdout=f,
stderr=subprocess.STDOUT
)
f.close()
os.chmod(checkpath, 0o644)
def copy_nrpe_checks(nrpe_files_dir=None):
"""
Copy the nrpe checks into place
"""
NAGIOS_PLUGINS = '/usr/local/lib/nagios/plugins'
if nrpe_files_dir is None:
# determine if "charmhelpers" is in CHARMDIR or CHARMDIR/hooks
for segment in ['.', 'hooks']:
nrpe_files_dir = os.path.abspath(os.path.join(
os.getenv('CHARM_DIR'),
segment,
'charmhelpers',
'contrib',
'openstack',
'files'))
if os.path.isdir(nrpe_files_dir):
break
else:
raise RuntimeError("Couldn't find charmhelpers directory")
if not os.path.exists(NAGIOS_PLUGINS):
os.makedirs(NAGIOS_PLUGINS)
for fname in glob.glob(os.path.join(nrpe_files_dir, "check_*")):
if os.path.isfile(fname):
shutil.copy2(fname,
os.path.join(NAGIOS_PLUGINS, os.path.basename(fname)))
def add_haproxy_checks(nrpe, unit_name):
"""
Add checks for each service in list
:param NRPE nrpe: NRPE object to add check to
:param str unit_name: Unit name to use in check description
"""
nrpe.add_check(
shortname='haproxy_servers',
description='Check HAProxy {%s}' % unit_name,
check_cmd='check_haproxy.sh')
nrpe.add_check(
shortname='haproxy_queue',
description='Check HAProxy queue depth {%s}' % unit_name,
check_cmd='check_haproxy_queue_depth.sh')
| [((356, 15, 356, 47), 'charmhelpers.core.hookenv.relations_of_type', 'relations_of_type', ({(356, 33, 356, 46): 'relation_name'}, {}), '(relation_name)', False, 'from charmhelpers.core.hookenv import config, hook_name, local_unit, log, relation_get, relation_ids, relation_set, relations_of_type\n'), ((367, 15, 367, 47), 'charmhelpers.core.hookenv.relations_of_type', 'relations_of_type', ({(367, 33, 367, 46): 'relation_name'}, {}), '(relation_name)', False, 'from charmhelpers.core.hookenv import config, hook_name, local_unit, log, relation_get, relation_ids, relation_set, relations_of_type\n'), ((171, 16, 171, 38), 'shlex.split', 'shlex.split', ({(171, 28, 171, 37): 'check_cmd'}, {}), '(check_cmd)', False, 'import shlex\n'), ((184, 17, 184, 50), 'os.listdir', 'os.listdir', ({(184, 28, 184, 49): 'NRPE.nagios_exportdir'}, {}), '(NRPE.nagios_exportdir)', False, 'import os\n'), ((190, 11, 190, 42), 'os.path.exists', 'os.path.exists', ({(190, 26, 190, 41): 'nrpe_check_file'}, {}), '(nrpe_check_file)', False, 'import os\n'), ((232, 8, 232, 39), 'subprocess.call', 'subprocess.call', ({(232, 24, 232, 38): 'self.check_cmd'}, {}), '(self.check_cmd)', False, 'import subprocess\n'), ((243, 22, 243, 30), 'charmhelpers.core.hookenv.config', 'config', ({}, {}), '()', False, 'from charmhelpers.core.hookenv import config, hook_name, local_unit, log, relation_get, relation_ids, relation_set, relations_of_type\n'), ((261, 19, 261, 55), 'charmhelpers.core.hookenv.relation_ids', 'relation_ids', ({(261, 32, 261, 54): '"""nrpe-external-master"""'}, {}), "('nrpe-external-master')", False, 'from charmhelpers.core.hookenv import config, hook_name, local_unit, log, relation_get, relation_ids, relation_set, relations_of_type\n'), ((382, 15, 382, 27), 'charmhelpers.core.hookenv.local_unit', 'local_unit', ({}, {}), '()', False, 'from charmhelpers.core.hookenv import config, hook_name, local_unit, log, relation_get, relation_ids, relation_set, relations_of_type\n'), ((403, 11, 403, 33), 'charmhelpers.core.host.init_is_systemd', 'host.init_is_systemd', ({}, {}), '()', False, 'from charmhelpers.core import host\n'), ((464, 11, 464, 41), 'os.path.exists', 'os.path.exists', ({(464, 26, 464, 40): 'NAGIOS_PLUGINS'}, {}), '(NAGIOS_PLUGINS)', False, 'import os\n'), ((465, 8, 465, 35), 'os.makedirs', 'os.makedirs', ({(465, 20, 465, 34): 'NAGIOS_PLUGINS'}, {}), '(NAGIOS_PLUGINS)', False, 'import os\n'), ((466, 27, 466, 66), 'os.path.join', 'os.path.join', ({(466, 40, 466, 54): 'nrpe_files_dir', (466, 56, 466, 65): '"""check_*"""'}, {}), "(nrpe_files_dir, 'check_*')", False, 'import os\n'), ((467, 11, 467, 32), 'os.path.isfile', 'os.path.isfile', ({(467, 26, 467, 31): 'fname'}, {}), '(fname)', False, 'import os\n'), ((148, 15, 148, 53), 're.match', 're.match', ({(148, 24, 148, 41): 'self.shortname_re', (148, 43, 148, 52): 'shortname'}, {}), '(self.shortname_re, shortname)', False, 'import re\n'), ((182, 15, 182, 52), 'os.path.exists', 'os.path.exists', ({(182, 30, 182, 51): 'NRPE.nagios_exportdir'}, {}), '(NRPE.nagios_exportdir)', False, 'import os\n'), ((191, 12, 191, 38), 'os.remove', 'os.remove', ({(191, 22, 191, 37): 'nrpe_check_file'}, {}), '(nrpe_check_file)', False, 'import os\n'), ((208, 15, 208, 52), 'os.path.exists', 'os.path.exists', ({(208, 30, 208, 51): 'NRPE.nagios_exportdir'}, {}), '(NRPE.nagios_exportdir)', False, 'import os\n'), ((307, 15, 307, 49), 'os.path.exists', 'os.path.exists', ({(307, 30, 307, 48): 'NRPE.nagios_logdir'}, {}), '(NRPE.nagios_logdir)', False, 'import os\n'), ((308, 12, 308, 40), 'os.mkdir', 'os.mkdir', ({(308, 21, 308, 39): 'NRPE.nagios_logdir'}, {}), '(NRPE.nagios_logdir)', False, 'import os\n'), ((309, 12, 309, 64), 'os.chown', 'os.chown', ({(309, 21, 309, 39): 'NRPE.nagios_logdir', (309, 41, 309, 51): 'nagios_uid', (309, 53, 309, 63): 'nagios_gid'}, {}), '(NRPE.nagios_logdir, nagios_uid, nagios_gid)', False, 'import os\n'), ((325, 12, 325, 52), 'charmhelpers.core.host.service', 'service', ({(325, 20, 325, 29): '"""restart"""', (325, 31, 325, 51): '"""nagios-nrpe-server"""'}, {}), "('restart', 'nagios-nrpe-server')", False, 'from charmhelpers.core.host import service\n'), ((327, 22, 327, 52), 'charmhelpers.core.hookenv.relation_ids', 'relation_ids', ({(327, 35, 327, 51): '"""local-monitors"""'}, {}), "('local-monitors')", False, 'from charmhelpers.core.hookenv import config, hook_name, local_unit, log, relation_get, relation_ids, relation_set, relations_of_type\n'), ((328, 12, 328, 48), 'charmhelpers.core.hookenv.relation_ids', 'relation_ids', ({(328, 25, 328, 47): '"""nrpe-external-master"""'}, {}), "('nrpe-external-master')", False, 'from charmhelpers.core.hookenv import config, hook_name, local_unit, log, relation_get, relation_ids, relation_set, relations_of_type\n'), ((409, 13, 409, 41), 'os.path.exists', 'os.path.exists', ({(409, 28, 409, 40): 'upstart_init'}, {}), '(upstart_init)', False, 'import os\n'), ((460, 15, 460, 44), 'os.path.isdir', 'os.path.isdir', ({(460, 29, 460, 43): 'nrpe_files_dir'}, {}), '(nrpe_files_dir)', False, 'import os\n'), ((173, 30, 173, 58), 'os.path.join', 'os.path.join', ({(173, 43, 173, 47): 'path', (173, 49, 173, 57): 'parts[0]'}, {}), '(path, parts[0])', False, 'import os\n'), ((174, 26, 174, 54), 'os.path.join', 'os.path.join', ({(174, 39, 174, 43): 'path', (174, 45, 174, 53): 'parts[0]'}, {}), '(path, parts[0])', False, 'import os\n'), ((250, 25, 250, 37), 'charmhelpers.core.hookenv.local_unit', 'local_unit', ({}, {}), '()', False, 'from charmhelpers.core.hookenv import config, hook_name, local_unit, log, relation_get, relation_ids, relation_set, relations_of_type\n'), ((265, 16, 265, 90), 'charmhelpers.core.hookenv.relation_set', 'relation_set', (), '', False, 'from charmhelpers.core.hookenv import config, hook_name, local_unit, log, relation_get, relation_ids, relation_set, relations_of_type\n'), ((301, 25, 301, 47), 'pwd.getpwnam', 'pwd.getpwnam', ({(301, 38, 301, 46): '"""nagios"""'}, {}), "('nagios')", False, 'import pwd\n'), ((302, 25, 302, 47), 'grp.getgrnam', 'grp.getgrnam', ({(302, 38, 302, 46): '"""nagios"""'}, {}), "('nagios')", False, 'import grp\n'), ((304, 12, 304, 66), 'charmhelpers.core.hookenv.log', 'log', ({(304, 16, 304, 65): '"""Nagios user not set up, nrpe checks not updated"""'}, {}), "('Nagios user not set up, nrpe checks not updated')", False, 'from charmhelpers.core.hookenv import config, hook_name, local_unit, log, relation_get, relation_ids, relation_set, relations_of_type\n'), ((324, 15, 324, 26), 'charmhelpers.core.hookenv.hook_name', 'hook_name', ({}, {}), '()', False, 'from charmhelpers.core.hookenv import config, hook_name, local_unit, log, relation_get, relation_ids, relation_set, relations_of_type\n'), ((333, 31, 333, 66), 'yaml.safe_load', 'yaml.safe_load', ({(333, 46, 333, 65): "reldata['monitors']"}, {}), "(reldata['monitors'])", False, 'import yaml\n'), ((380, 40, 380, 52), 'charmhelpers.core.hookenv.local_unit', 'local_unit', ({}, {}), '()', False, 'from charmhelpers.core.hookenv import config, hook_name, local_unit, log, relation_get, relation_ids, relation_set, relations_of_type\n'), ((415, 13, 415, 38), 'os.path.exists', 'os.path.exists', ({(415, 28, 415, 37): 'sysv_init'}, {}), '(sysv_init)', False, 'import os\n'), ((186, 26, 186, 64), 'os.path.join', 'os.path.join', ({(186, 39, 186, 60): 'NRPE.nagios_exportdir', (186, 62, 186, 63): 'f'}, {}), '(NRPE.nagios_exportdir, f)', False, 'import os\n'), ((330, 40, 330, 52), 'charmhelpers.core.hookenv.local_unit', 'local_unit', ({}, {}), '()', False, 'from charmhelpers.core.hookenv import config, hook_name, local_unit, log, relation_get, relation_ids, relation_set, relations_of_type\n'), ((454, 16, 454, 38), 'os.getenv', 'os.getenv', ({(454, 26, 454, 37): '"""CHARM_DIR"""'}, {}), "('CHARM_DIR')", False, 'import os\n'), ((469, 54, 469, 77), 'os.path.basename', 'os.path.basename', ({(469, 71, 469, 76): 'fname'}, {}), '(fname)', False, 'import os\n'), ((342, 55, 342, 78), 'yaml.dump', 'yaml.dump', ({(342, 65, 342, 77): 'old_monitors'}, {}), '(old_monitors)', False, 'import yaml\n'), ((345, 55, 345, 74), 'yaml.dump', 'yaml.dump', ({(345, 65, 345, 73): 'monitors'}, {}), '(monitors)', False, 'import yaml\n'), ((433, 35, 433, 62), 'os.path.isdir', 'os.path.isdir', ({(433, 49, 433, 61): 'nrpe.homedir'}, {}), '(nrpe.homedir)', False, 'import os\n'), ((441, 16, 441, 42), 'os.chmod', 'os.chmod', ({(441, 25, 441, 34): 'checkpath', (441, 36, 441, 41): '(420)'}, {}), '(checkpath, 420)', False, 'import os\n')] |
mintzer/pupillometry-rf-back | venv/Lib/site-packages/proglog/proglog.py | cfa86fa984a49dce0123798f8de5b838c02e10d5 | """Implements the generic progress logger class, and the ProgressBar class.
"""
from tqdm import tqdm, tqdm_notebook
from collections import OrderedDict
import time
SETTINGS = {
'notebook': False
}
def notebook(turn='on'):
SETTINGS['notebook'] = True if (turn == 'on') else False
def troncate_string(s, max_length=25):
return s if (len(s) < max_length) else (s[:max_length] + "...")
class ProgressLogger:
"""Generic class for progress loggers.
A progress logger contains a "state" dictionnary.
Parameters
----------
init_state
Dictionnary representing the initial state.
"""
def __init__(self, init_state=None):
self.state = {}
self.stored = {}
self.logs = []
self.log_indent = 0
if init_state is not None:
self.state.update(init_state)
def log(self, message):
self.logs.append((' ' * self.log_indent) + message)
def dump_logs(self, filepath=None):
if filepath is not None:
with open(filepath, 'a') as f:
f.write("\n".join(self.logs))
else:
return "\n".join(self.logs)
def callback(self, **kw):
"""Execute something after the state has been updated by the given
state elements.
This default callback does nothing, overwrite it by subclassing
"""
pass
def store(self, **kw):
"""Store objects in the logger and trigger ``self.store_callback``.
This works exactly like ``logger()``, but the later is meant for simple
data objects (text, numbers) that will be sent over the network or
written to a file. The ``store`` method expects rather large objects
which are not necessarily serializable, and will be used eg to draw
plots on the fly.
"""
self.stored.update(kw)
self.store_callback(**kw)
def store_callback(self, **kw):
"""Execute something after the store has been updated by the given
state elements.
This default callback does nothing, overwrite it by subclassing
"""
pass
def iter(self, **kw):
"""Iterate through a list while updating the state.
Examples
--------
>>> for username in logger.iter(user=['tom', 'tim', 'lea']:
>>> # At every loop, logger.state['user'] is updated
>>> print (username)
"""
for field, iterable in kw.items():
for it in iterable:
self(**{field: it})
yield it
def __call__(self, **kw):
self.state.update(kw)
self.callback(**kw)
class ProgressBarLogger(ProgressLogger):
"""Generic class for progress loggers.
A progress logger contains a "state" dictionnary
Parameters
----------
init_state
Initial state of the logger
bars
Either None (will be initialized with no bar) or a list/tuple of bar
names (``['main', 'sub']``) which will be initialized with index -1 and
no total, or a dictionary (possibly ordered) of bars, of the form
``{bar_1: {title: 'bar1', index: 2, total:23}, bar_2: {...}}``
ignored_bars
Either None (newly met bars will be added) or a list of blacklisted bar
names, or ``'all_others'`` to signify that all bar names not already in
``self.bars`` will be ignored.
"""
bar_indent = 2
def __init__(self, init_state=None, bars=None, ignored_bars=None,
logged_bars='all', min_time_interval=0, ignore_bars_under=0):
ProgressLogger.__init__(self, init_state)
if bars is None:
bars = OrderedDict()
elif isinstance(bars, (list, tuple)):
bars = OrderedDict([
(b, dict(title=b, index=-1, total=None, message=None,
indent=0))
for b in bars
])
if isinstance(ignored_bars, (list, tuple)):
ignored_bars = set(ignored_bars)
self.ignored_bars = ignored_bars
self.logged_bars = logged_bars
self.state['bars'] = bars
self.min_time_interval = min_time_interval
self.ignore_bars_under = ignore_bars_under
@property
def bars(self):
"""Return ``self.state['bars'].``"""
return self.state['bars']
def bar_is_ignored(self, bar):
if self.ignored_bars is None:
return False
elif self.ignored_bars == 'all_others':
return (bar not in self.bars)
else:
return bar in self.ignored_bars
def bar_is_logged(self, bar):
if (not self.logged_bars):
return False
elif self.logged_bars == 'all':
return True
else:
return bar in self.logged_bars
def iterable_is_too_short(self, iterable):
length = len(iterable) if hasattr(iterable, '__len__') else None
return (length is not None) and (length < self.ignore_bars_under)
def iter_bar(self, bar_prefix='', **kw):
"""Iterate through a list while updating a state bar.
Examples
--------
>>> for username in logger.iter_bar(user=['tom', 'tim', 'lea']):
>>> # At every loop, logger.state['bars']['user'] is updated
>>> # to {index: i, total: 3, title:'user'}
>>> print (username)
"""
if 'bar_message' in kw:
bar_message = kw.pop('bar_message')
else:
bar_message = None
bar, iterable = kw.popitem()
if self.bar_is_ignored(bar) or self.iterable_is_too_short(iterable):
return iterable
bar = bar_prefix + bar
if hasattr(iterable, '__len__'):
self(**{bar + '__total': len(iterable)})
def new_iterable():
last_time = time.time()
i = 0 # necessary in case the iterator is empty
for i, it in enumerate(iterable):
now_time = time.time()
if (i == 0) or (now_time - last_time > self.min_time_interval):
if bar_message is not None:
self(**{bar + '__message': bar_message(it)})
self(**{bar + '__index': i})
last_time = now_time
yield it
if self.bars[bar]['index'] != i:
self(**{bar + '__index': i})
self(**{bar + '__index': i + 1})
return new_iterable()
def bars_callback(self, bar, attr, value, old_value=None):
"""Execute a custom action after the progress bars are updated.
Parameters
----------
bar
Name/ID of the bar to be modified.
attr
Attribute of the bar attribute to be modified
value
New value of the attribute
old_value
Previous value of this bar's attribute.
This default callback does nothing, overwrite it by subclassing.
"""
pass
def __call__(self, **kw):
items = sorted(kw.items(), key=lambda kv: not kv[0].endswith('total'))
for key, value in items:
if '__' in key:
bar, attr = key.split('__')
if self.bar_is_ignored(bar):
continue
kw.pop(key)
if bar not in self.bars:
self.bars[bar] = dict(title=bar, index=-1,
total=None, message=None)
old_value = self.bars[bar][attr]
if self.bar_is_logged(bar):
new_bar = (attr == 'index') and (value < old_value)
if (attr == 'total') or (new_bar):
self.bars[bar]['indent'] = self.log_indent
else:
self.log_indent = self.bars[bar]['indent']
self.log("[%s] %s: %s" % (bar, attr, value))
self.log_indent += self.bar_indent
self.bars[bar][attr] = value
self.bars_callback(bar, attr, value, old_value)
self.state.update(kw)
self.callback(**kw)
class TqdmProgressBarLogger(ProgressBarLogger):
"""Tqdm-powered progress bar for console or Notebooks.
Parameters
----------
init_state
Initial state of the logger
bars
Either None (will be initialized with no bar) or a list/tuple of bar
names (``['main', 'sub']``) which will be initialized with index -1 and
no total, or a dictionary (possibly ordered) of bars, of the form
``{bar_1: {title: 'bar1', index: 2, total:23}, bar_2: {...}}``
ignored_bars
Either None (newly met bars will be added) or a list of blacklisted bar
names, or ``'all_others'`` to signify that all bar names not already in
``self.bars`` will be ignored.
leave_bars
notebook
True will make the bars look nice (HTML) in the jupyter notebook. It is
advised to leave to 'default' as the default can be globally set from
inside a notebook with ``import proglog; proglog.notebook_mode()``.
print_messages
If True, every ``logger(message='something')`` will print a message in
the console / notebook
"""
def __init__(self, init_state=None, bars=None, leave_bars=False,
ignored_bars=None, logged_bars='all', notebook='default',
print_messages=True, min_time_interval=0,
ignore_bars_under=0):
ProgressBarLogger.__init__(self, init_state=init_state, bars=bars,
ignored_bars=ignored_bars,
logged_bars=logged_bars,
ignore_bars_under=ignore_bars_under,
min_time_interval=min_time_interval)
self.leave_bars = leave_bars
self.tqdm_bars = OrderedDict([
(bar, None)
for bar in self.bars
])
if notebook == 'default':
notebook = SETTINGS['notebook']
self.notebook = notebook
self.print_messages = print_messages
self.tqdm = (tqdm_notebook if self.notebook else tqdm)
def new_tqdm_bar(self, bar):
"""Create a new tqdm bar, possibly replacing an existing one."""
if (bar in self.tqdm_bars) and (self.tqdm_bars[bar] is not None):
self.close_tqdm_bar(bar)
infos = self.bars[bar]
self.tqdm_bars[bar] = self.tqdm(
total=infos['total'],
desc=infos['title'],
postfix=dict(now=troncate_string(str(infos['message']))),
leave=self.leave_bars
)
def close_tqdm_bar(self, bar):
"""Close and erase the tqdm bar"""
self.tqdm_bars[bar].close()
if not self.notebook:
self.tqdm_bars[bar] = None
def bars_callback(self, bar, attr, value, old_value):
if (bar not in self.tqdm_bars) or (self.tqdm_bars[bar] is None):
self.new_tqdm_bar(bar)
if attr == 'index':
if value >= old_value:
total = self.bars[bar]['total']
if total and (value >= total):
self.close_tqdm_bar(bar)
else:
self.tqdm_bars[bar].update(value - old_value)
else:
self.new_tqdm_bar(bar)
self.tqdm_bars[bar].update(value + 1)
elif attr == 'message':
self.tqdm_bars[bar].set_postfix(now=troncate_string(str(value)))
self.tqdm_bars[bar].update(0)
def callback(self, **kw):
if self.print_messages and ('message' in kw) and kw['message']:
if self.notebook:
print(kw['message'])
else:
self.tqdm.write(kw['message'])
class RqWorkerProgressLogger:
def __init__(self, job):
self.job = job
if 'progress_data' not in self.job.meta:
self.job.meta['progress_data'] = {}
self.job.save()
def callback(self, **kw):
self.job.meta['progress_data'] = self.state
self.job.save()
class RqWorkerBarLogger(RqWorkerProgressLogger, ProgressBarLogger):
def __init__(self, job, init_state=None, bars=None, ignored_bars=(),
logged_bars='all', min_time_interval=0):
RqWorkerProgressLogger.__init__(self, job)
ProgressBarLogger.__init__(self, init_state=init_state, bars=bars,
ignored_bars=ignored_bars,
logged_bars=logged_bars,
min_time_interval=min_time_interval)
class MuteProgressBarLogger(ProgressBarLogger):
def bar_is_ignored(self, bar):
return True
def default_bar_logger(logger, bars=None, ignored_bars=None, logged_bars='all',
min_time_interval=0, ignore_bars_under=0):
if logger == 'bar':
return TqdmProgressBarLogger(
bars=bars,
ignored_bars=ignored_bars,
logged_bars=logged_bars,
min_time_interval=min_time_interval,
ignore_bars_under=ignore_bars_under
)
elif logger is None:
return MuteProgressBarLogger()
else:
return logger
| [((301, 25, 304, 10), 'collections.OrderedDict', 'OrderedDict', ({(301, 37, 304, 9): '[(bar, None) for bar in self.bars]'}, {}), '([(bar, None) for bar in self.bars])', False, 'from collections import OrderedDict\n'), ((129, 19, 129, 32), 'collections.OrderedDict', 'OrderedDict', ({}, {}), '()', False, 'from collections import OrderedDict\n'), ((193, 24, 193, 35), 'time.time', 'time.time', ({}, {}), '()', False, 'import time\n'), ((196, 27, 196, 38), 'time.time', 'time.time', ({}, {}), '()', False, 'import time\n')] |
jorgepadilla19/gdsfactory | gdsfactory/tests/test_component_from_yaml_bezier.py | 68e1c18257a75d4418279851baea417c8899a165 | import gdsfactory as gf
from gdsfactory.component import Component
yaml = """
name:
test_component_yaml_without_cell
instances:
mmi:
component: mmi1x2
bend:
component: bend_s
connections:
bend,o1: mmi,o2
"""
def test_component_from_yaml_without_cell() -> Component:
"""bezier does not have cell"""
c = gf.read.from_yaml(yaml)
assert c.name == "test_component_yaml_without_cell", c.name
assert len(c.get_dependencies()) == 2, len(c.get_dependencies())
assert len(c.ports) == 0, len(c.ports)
return c
if __name__ == "__main__":
c = test_component_from_yaml_without_cell()
print(c.name)
c.show()
| [((22, 8, 22, 31), 'gdsfactory.read.from_yaml', 'gf.read.from_yaml', ({(22, 26, 22, 30): 'yaml'}, {}), '(yaml)', True, 'import gdsfactory as gf\n')] |
AdamBrianBright/cats-python | cats/types.py | 163cbde06c0d56520c217c0d66ddca34c7e0f63b | from pathlib import Path
from types import GeneratorType
from typing import AsyncIterable, Iterable, TypeAlias
import ujson
from cats.errors import MalformedHeadersError
try:
from django.db.models import QuerySet, Model
except ImportError:
QuerySet = type('QuerySet', (list,), {})
Model = type('Model', (list,), {})
__all__ = [
'Bytes',
'BytesGen',
'BytesAsyncGen',
'BytesAnyGen',
'Byte',
'Json',
'File',
'List',
'Missing',
'MISSING',
'QuerySet',
'Model',
'T_Headers',
'Headers',
]
Bytes: TypeAlias = bytes | bytearray | memoryview
BytesGen: TypeAlias = Iterable[Bytes]
BytesAsyncGen: TypeAlias = AsyncIterable[Bytes]
BytesAnyGen: TypeAlias = BytesGen | BytesAsyncGen
Byte: TypeAlias = Bytes
Json: TypeAlias = str | int | float | dict | list | bool | None
File: TypeAlias = Path | str
List = list | tuple | set | GeneratorType | QuerySet
class Missing(str):
"""
Custom Missing type is required for Pydantic to work properly. IDK
"""
__slots__ = ()
def __init__(self):
super().__init__()
def __eq__(self, other):
return isinstance(other, Missing)
def __bool__(self):
return False
MISSING = Missing()
class Headers(dict):
__slots__ = ()
def __init__(self, *args, **kwargs):
v = self._convert(*args, **kwargs)
if (offset := v.get('offset', None)) and (not isinstance(offset, int) or offset < 0):
raise MalformedHeadersError('Invalid offset header', headers=v)
super().__init__(v)
@classmethod
def _key(cls, key: str) -> str:
return key.replace(' ', '-').title()
def __getitem__(self, item):
return super().__getitem__(self._key(item))
def __setitem__(self, key, value):
return super().__setitem__(self._key(key), value)
def __delitem__(self, key):
return super().__delitem__(self._key(key))
def __contains__(self, item):
return super().__contains__(self._key(item))
@classmethod
def _convert(cls, *args, **kwargs):
return {cls._key(k): v for k, v in dict(*args, **kwargs).items() if isinstance(k, str)}
def update(self, *args, **kwargs) -> None:
super().update(self._convert(*args, **kwargs))
def encode(self) -> bytes:
return ujson.dumps(self, ensure_ascii=False, escape_forward_slashes=False).encode('utf-8')
@classmethod
def decode(cls, headers: Bytes) -> 'Headers':
try:
headers = ujson.loads(headers)
except ValueError: # + UnicodeDecodeError
headers = None
return cls(headers or {})
T_Headers: TypeAlias = Headers | dict[str]
| [((68, 18, 68, 75), 'cats.errors.MalformedHeadersError', 'MalformedHeadersError', (), '', False, 'from cats.errors import MalformedHeadersError\n'), ((100, 22, 100, 42), 'ujson.loads', 'ujson.loads', ({(100, 34, 100, 41): 'headers'}, {}), '(headers)', False, 'import ujson\n'), ((95, 15, 95, 82), 'ujson.dumps', 'ujson.dumps', (), '', False, 'import ujson\n')] |
MyCollege/raven | raven/utils/urlparse.py | 9447f3a55ae7703afe84c3493625e3c3fb700700 | from __future__ import absolute_import
try:
import urlparse as _urlparse
except ImportError:
from urllib import parse as _urlparse
def register_scheme(scheme):
for method in filter(lambda s: s.startswith('uses_'), dir(_urlparse)):
uses = getattr(_urlparse, method)
if scheme not in uses:
uses.append(scheme)
urlparse = _urlparse.urlparse
| [] |
stjordanis/MONeT-1 | setup.py | 98a5c7d149ca19c8c64069dbd8f27ce7f97bf3af | import setuptools
setuptools.setup(
name="monet_memory_optimized_training",
version="0.0.1",
description="Memory Optimized Network Training Framework",
url="https://github.com/philkr/lowrank_conv",
packages=setuptools.find_packages(include = ['monet', 'monet.*', 'models', 'checkmate', 'gist']),
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
python_requires='>=3.6',
)
| [((8, 13, 8, 100), 'setuptools.find_packages', 'setuptools.find_packages', (), '', False, 'import setuptools\n')] |
Superomeg4/pyleecan | Tests/Methods/Machine/test_Magnet_Type_11_meth.py | 2b695b5f39e77475a07aa0ea89489fb0a9659337 | # -*- coding: utf-8 -*-
"""
@date Created on Thu Dec 18 13:56:33 2014
@copyright (C) 2014-2015 EOMYS ENGINEERING.
@author pierre_b
"""
from unittest import TestCase
from ddt import ddt, data
from pyleecan.Classes.Arc1 import Arc1
from pyleecan.Classes.Segment import Segment
from pyleecan.Classes.MagnetType11 import MagnetType11
from pyleecan.Classes.LamSlotMag import LamSlotMag
from pyleecan.Classes.SlotMPolar import SlotMPolar
from numpy import pi, exp, angle, array
from pyleecan.Methods.Machine.Magnet.comp_surface import comp_surface
Mag11_test = list()
# Internal Slot surface
lam = LamSlotMag(is_internal=True, Rext=0.5)
lam.slot = SlotMPolar(H0=0, W0=pi / 4, Zs=4)
lam.slot.magnet = [MagnetType11(Hmag=1, Wmag=pi / 4)]
Mag11_test.append({"test_obj": lam, "S_exp": 0.78539616, "Ao": pi / 4, "H_exp": 1})
# Internal Slot inset
lam = LamSlotMag(is_internal=True, Rext=0.5)
lam.slot = SlotMPolar(H0=40e-3, W0=pi / 4, Zs=4)
lam.slot.magnet = [MagnetType11(Hmag=20e-3, Wmag=pi / 4)]
Mag11_test.append({"test_obj": lam, "S_exp": 7.3827e-3, "Ao": pi / 4, "H_exp": 20e-3})
# Outward Slot inset
lam = LamSlotMag(is_internal=False, Rext=0.1325)
lam.slot = SlotMPolar(H0=5e-3, W0=pi / 10, Zs=8)
lam.slot.magnet = [MagnetType11(Hmag=8e-3, Wmag=pi / 12)]
Mag11_test.append({"test_obj": lam, "S_exp": 2.09439e-6, "Ao": pi / 12, "H_exp": 8e-3})
# For AlmostEqual
DELTA = 1e-4
@ddt
class test_Magnet_Type_11_meth(TestCase):
"""unittest for MagnetType11 methods
"""
@data(*Mag11_test)
def test_comp_surface(self, test_dict):
"""Check that the computation of the surface is correct
"""
test_obj = test_dict["test_obj"]
result = test_obj.slot.magnet[0].comp_surface()
a = result
b = test_dict["S_exp"]
msg = "Return " + str(a) + " expected " + str(b)
self.assertAlmostEqual((a - b) / a, 0, delta=DELTA, msg=msg)
# Compare numerical and analytical results
b = comp_surface(test_obj.slot.magnet[0])
msg = "Analytical: " + str(a) + " Numerical " + str(b)
self.assertAlmostEqual((a - b) / a, 0, delta=DELTA, msg=msg)
@data(*Mag11_test)
def test_comp_height(self, test_dict):
"""Check that the computation of the height is correct
"""
test_obj = test_dict["test_obj"]
result = test_obj.slot.magnet[0].comp_height()
a = result
b = test_dict["H_exp"]
msg = "Return " + str(a) + " expected " + str(b)
self.assertAlmostEqual((a - b) / a, 0, delta=DELTA, msg=msg)
@data(*Mag11_test)
def test_comp_angle_op(self, test_dict):
"""Check that the computation of the opening angle is correct
"""
test_obj = test_dict["test_obj"]
result = test_obj.slot.magnet[0].comp_angle_opening()
a = result
b = test_dict["Ao"]
msg = "Return " + str(a) + " expected " + str(b)
self.assertAlmostEqual((a - b) / a, 0, delta=DELTA, msg=msg)
def test_build_geometry_out(self):
"""check that curve_list is correct (outwards magnet)"""
lam = LamSlotMag(
Rint=40e-3,
Rext=90e-3,
is_internal=False,
is_stator=False,
L1=0.45,
Nrvd=1,
Wrvd=0.05,
)
magnet = [MagnetType11(Wmag=pi / 10, Hmag=0.2)]
lam.slot = SlotMPolar(Zs=8, W0=pi / 10, H0=0.2, magnet=magnet)
test_obj = lam.slot.magnet[0]
Z1 = (40e-3 + 0.2) * exp(-1j * pi / 10 / 2)
Z2 = (40e-3 + 0.2) * exp(1j * pi / 10 / 2)
Z = abs(Z1)
Z3 = (Z - 0.2) * exp(1j * angle(Z1))
Z4 = (Z - 0.2) * exp(1j * angle(Z2))
# # Creation of curve
curve_list = list()
curve_list.append(Segment(Z1, Z3))
curve_list.append(Arc1(Z3, Z4, abs(Z3)))
curve_list.append(Segment(Z4, Z2))
curve_list.append(Arc1(Z2, Z1, -abs(Z2)))
surface = test_obj.build_geometry()
result = surface[0].get_lines()
for i in range(0, len(result)):
a = result[i].begin
b = curve_list[i].begin
self.assertAlmostEqual((a - b) / a, 0, delta=DELTA)
a = result[i].end
b = curve_list[i].end
self.assertAlmostEqual((a - b) / a, 0, delta=DELTA)
def test_build_geometry_in(self):
"""check that curve_list is correct (inwards magnet)"""
lam = LamSlotMag(
Rint=40e-1,
Rext=90e-1,
is_internal=True,
is_stator=False,
L1=0.45,
Nrvd=1,
Wrvd=0.05,
)
magnet = [MagnetType11(Wmag=pi / 10, Hmag=0.2)]
lam.slot = SlotMPolar(Zs=8, W0=pi / 10, H0=0.2, magnet=magnet)
test_obj = lam.slot.magnet[0]
Z1 = (90e-1 - 0.2) * exp(-1j * pi / 10 / 2)
Z2 = (90e-1 - 0.2) * exp(1j * pi / 10 / 2)
Z = abs(Z1)
Z3 = (Z + 0.2) * exp(1j * angle(Z1))
Z4 = (Z + 0.2) * exp(1j * angle(Z2))
# # Creation of curve
curve_list = list()
curve_list.append(Segment(Z1, Z3))
curve_list.append(Arc1(Z3, Z4, abs(Z3)))
curve_list.append(Segment(Z4, Z2))
curve_list.append(Arc1(Z2, Z1, -abs(Z2)))
surface = test_obj.build_geometry()
result = surface[0].get_lines()
for i in range(0, len(result)):
a = result[i].begin
b = curve_list[i].begin
self.assertAlmostEqual((a - b) / a, 0, delta=DELTA)
a = result[i].end
b = curve_list[i].end
self.assertAlmostEqual((a - b) / a, 0, delta=DELTA)
| [((24, 6, 24, 44), 'pyleecan.Classes.LamSlotMag.LamSlotMag', 'LamSlotMag', (), '', False, 'from pyleecan.Classes.LamSlotMag import LamSlotMag\n'), ((25, 11, 25, 44), 'pyleecan.Classes.SlotMPolar.SlotMPolar', 'SlotMPolar', (), '', False, 'from pyleecan.Classes.SlotMPolar import SlotMPolar\n'), ((30, 6, 30, 44), 'pyleecan.Classes.LamSlotMag.LamSlotMag', 'LamSlotMag', (), '', False, 'from pyleecan.Classes.LamSlotMag import LamSlotMag\n'), ((31, 11, 31, 48), 'pyleecan.Classes.SlotMPolar.SlotMPolar', 'SlotMPolar', (), '', False, 'from pyleecan.Classes.SlotMPolar import SlotMPolar\n'), ((36, 6, 36, 48), 'pyleecan.Classes.LamSlotMag.LamSlotMag', 'LamSlotMag', (), '', False, 'from pyleecan.Classes.LamSlotMag import LamSlotMag\n'), ((37, 11, 37, 48), 'pyleecan.Classes.SlotMPolar.SlotMPolar', 'SlotMPolar', (), '', False, 'from pyleecan.Classes.SlotMPolar import SlotMPolar\n'), ((26, 19, 26, 52), 'pyleecan.Classes.MagnetType11.MagnetType11', 'MagnetType11', (), '', False, 'from pyleecan.Classes.MagnetType11 import MagnetType11\n'), ((32, 19, 32, 56), 'pyleecan.Classes.MagnetType11.MagnetType11', 'MagnetType11', (), '', False, 'from pyleecan.Classes.MagnetType11 import MagnetType11\n'), ((38, 19, 38, 56), 'pyleecan.Classes.MagnetType11.MagnetType11', 'MagnetType11', (), '', False, 'from pyleecan.Classes.MagnetType11 import MagnetType11\n'), ((50, 5, 50, 22), 'ddt.data', 'data', ({(50, 10, 50, 21): '*Mag11_test'}, {}), '(*Mag11_test)', False, 'from ddt import ddt, data\n'), ((67, 5, 67, 22), 'ddt.data', 'data', ({(67, 10, 67, 21): '*Mag11_test'}, {}), '(*Mag11_test)', False, 'from ddt import ddt, data\n'), ((79, 5, 79, 22), 'ddt.data', 'data', ({(79, 10, 79, 21): '*Mag11_test'}, {}), '(*Mag11_test)', False, 'from ddt import ddt, data\n'), ((63, 12, 63, 49), 'pyleecan.Methods.Machine.Magnet.comp_surface.comp_surface', 'comp_surface', ({(63, 25, 63, 48): 'test_obj.slot.magnet[0]'}, {}), '(test_obj.slot.magnet[0])', False, 'from pyleecan.Methods.Machine.Magnet.comp_surface import comp_surface\n'), ((93, 14, 101, 9), 'pyleecan.Classes.LamSlotMag.LamSlotMag', 'LamSlotMag', (), '', False, 'from pyleecan.Classes.LamSlotMag import LamSlotMag\n'), ((103, 19, 103, 70), 'pyleecan.Classes.SlotMPolar.SlotMPolar', 'SlotMPolar', (), '', False, 'from pyleecan.Classes.SlotMPolar import SlotMPolar\n'), ((133, 14, 141, 9), 'pyleecan.Classes.LamSlotMag.LamSlotMag', 'LamSlotMag', (), '', False, 'from pyleecan.Classes.LamSlotMag import LamSlotMag\n'), ((143, 19, 143, 70), 'pyleecan.Classes.SlotMPolar.SlotMPolar', 'SlotMPolar', (), '', False, 'from pyleecan.Classes.SlotMPolar import SlotMPolar\n'), ((102, 18, 102, 54), 'pyleecan.Classes.MagnetType11.MagnetType11', 'MagnetType11', (), '', False, 'from pyleecan.Classes.MagnetType11 import MagnetType11\n'), ((105, 29, 105, 51), 'numpy.exp', 'exp', ({(105, 33, 105, 50): '(-1.0j * pi / 10 / 2)'}, {}), '(-1.0j * pi / 10 / 2)', False, 'from numpy import pi, exp, angle, array\n'), ((106, 29, 106, 50), 'numpy.exp', 'exp', ({(106, 33, 106, 49): '(1.0j * pi / 10 / 2)'}, {}), '(1.0j * pi / 10 / 2)', False, 'from numpy import pi, exp, angle, array\n'), ((115, 26, 115, 41), 'pyleecan.Classes.Segment.Segment', 'Segment', ({(115, 34, 115, 36): 'Z1', (115, 38, 115, 40): 'Z3'}, {}), '(Z1, Z3)', False, 'from pyleecan.Classes.Segment import Segment\n'), ((117, 26, 117, 41), 'pyleecan.Classes.Segment.Segment', 'Segment', ({(117, 34, 117, 36): 'Z4', (117, 38, 117, 40): 'Z2'}, {}), '(Z4, Z2)', False, 'from pyleecan.Classes.Segment import Segment\n'), ((142, 18, 142, 54), 'pyleecan.Classes.MagnetType11.MagnetType11', 'MagnetType11', (), '', False, 'from pyleecan.Classes.MagnetType11 import MagnetType11\n'), ((145, 29, 145, 51), 'numpy.exp', 'exp', ({(145, 33, 145, 50): '(-1.0j * pi / 10 / 2)'}, {}), '(-1.0j * pi / 10 / 2)', False, 'from numpy import pi, exp, angle, array\n'), ((146, 29, 146, 50), 'numpy.exp', 'exp', ({(146, 33, 146, 49): '(1.0j * pi / 10 / 2)'}, {}), '(1.0j * pi / 10 / 2)', False, 'from numpy import pi, exp, angle, array\n'), ((155, 26, 155, 41), 'pyleecan.Classes.Segment.Segment', 'Segment', ({(155, 34, 155, 36): 'Z1', (155, 38, 155, 40): 'Z3'}, {}), '(Z1, Z3)', False, 'from pyleecan.Classes.Segment import Segment\n'), ((157, 26, 157, 41), 'pyleecan.Classes.Segment.Segment', 'Segment', ({(157, 34, 157, 36): 'Z4', (157, 38, 157, 40): 'Z2'}, {}), '(Z4, Z2)', False, 'from pyleecan.Classes.Segment import Segment\n'), ((110, 34, 110, 43), 'numpy.angle', 'angle', ({(110, 40, 110, 42): 'Z1'}, {}), '(Z1)', False, 'from numpy import pi, exp, angle, array\n'), ((111, 34, 111, 43), 'numpy.angle', 'angle', ({(111, 40, 111, 42): 'Z2'}, {}), '(Z2)', False, 'from numpy import pi, exp, angle, array\n'), ((150, 34, 150, 43), 'numpy.angle', 'angle', ({(150, 40, 150, 42): 'Z1'}, {}), '(Z1)', False, 'from numpy import pi, exp, angle, array\n'), ((151, 34, 151, 43), 'numpy.angle', 'angle', ({(151, 40, 151, 42): 'Z2'}, {}), '(Z2)', False, 'from numpy import pi, exp, angle, array\n')] |
arshadzahangirchowdhury/TomoEncoders | tomo_encoders/tasks/void_mapping.py | 9c2b15fd515d864079f198546821faee5d78df17 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
"""
from operator import mod
from tomo_encoders.misc.voxel_processing import modified_autocontrast, TimerGPU
from tomo_encoders.reconstruction.recon import recon_patches_3d
import cupy as cp
import numpy as np
from skimage.filters import threshold_otsu
from tomo_encoders import Grid
def get_values_cyl_mask(vol, mask_fac):
vol_shape = vol.shape
assert vol_shape[1] == vol_shape[2], "must be a tomographic volume where shape y = shape x"
shape_yx = vol_shape[1]
shape_z = vol_shape[0]
rad = int(mask_fac*shape_yx/2)
pts = cp.arange(-int(shape_yx//2), int(cp.ceil(shape_yx//2)))
yy, xx = cp.meshgrid(pts, pts, indexing = 'ij')
circ = (cp.sqrt(yy**2 + xx**2) < rad).astype(cp.uint8) # inside is positive
circ = circ[cp.newaxis, ...]
cyl = cp.repeat(circ, shape_z, axis = 0)
return vol[cyl > 0]
def cylindrical_mask(out_vol, mask_fac, mask_val = 0):
vol_shape = out_vol.shape
assert vol_shape[1] == vol_shape[2], "must be a tomographic volume where shape y = shape x"
shape_yx = vol_shape[1]
shape_z = vol_shape[0]
rad = int(mask_fac*shape_yx/2)
pts = cp.arange(-int(shape_yx//2), int(cp.ceil(shape_yx//2)))
yy, xx = cp.meshgrid(pts, pts, indexing = 'ij')
circ = (cp.sqrt(yy**2 + xx**2) < rad).astype(cp.uint8) # inside is positive
circ = circ[cp.newaxis, ...]
cyl = cp.repeat(circ, shape_z, axis = 0)
out_vol[cyl == 0] = mask_val
return
def segment_otsu(vol, s = 0.05):
'''segment volume with otsu'''
timer = TimerGPU()
timer.tic()
tmp_values = vol[::4,::4,::4].get()
# rec_min_max = modified_autocontrast(tmp_values, s = s, normalize_sampling_factor=1)
thresh = cp.float32(threshold_otsu(tmp_values.reshape(-1)))
vol = (vol < thresh).astype(cp.uint8)
timer.toc("otsu thresholding")
return vol
def edge_map(Y):
'''
this algorithm was inspired by: https://github.com/tomochallenge/tomochallenge_utils/blob/master/foam_phantom_utils.py
'''
msk = cp.zeros_like(Y)
tmp = Y[:-1]!=Y[1:]
msk[:-1][tmp] = 1
msk[1:][tmp] = 1
tmp = Y[:,:-1]!=Y[:,1:]
msk[:,:-1][tmp] = 1
msk[:,1:][tmp] = 1
tmp = Y[:,:,:-1]!=Y[:,:,1:]
msk[:,:,:-1][tmp] = 1
msk[:,:,1:][tmp] = 1
return msk > 0
def guess_surface(V_bin, b, wd):
# find patches on surface
wdb = int(wd//b)
p3d = Grid(V_bin.shape, width = wdb)
x = p3d.extract(V_bin)
is_surf = (np.std(x, axis = (1,2,3)) > 0.0)
is_ones = (np.sum(x, axis = (1,2,3))/(wdb**3) == 1)
is_zeros = (np.sum(x, axis = (1,2,3))/(wdb**3) == 0)
p3d = p3d.rescale(b)
p3d_surf = p3d.filter_by_condition(is_surf)
p3d_ones = p3d.filter_by_condition(is_ones)
p3d_zeros = p3d.filter_by_condition(is_zeros)
eff = len(p3d_surf)*(wd**3)/np.prod(p3d_surf.vol_shape)
print(f"\tSTAT: r value: {eff*100.0:.2f}")
return p3d_surf, p3d_ones, p3d_zeros
def process_patches(projs, theta, center, fe, p_surf, min_max, TIMEIT = False):
# SCHEME 1: integrate reconstruction and segmention (segments data on gpu itself)
# st_proc = cp.cuda.Event(); end_proc = cp.cuda.Event(); st_proc.record()
# x_surf, p_surf = recon_patches_3d(projs, theta, center, p_surf, \
# apply_fbp = True, segmenter = fe, \
# segmenter_batch_size = 256)
# end_proc.record(); end_proc.synchronize(); t_surf = cp.cuda.get_elapsed_time(st_proc,end_proc)
# SCHEME 2: reconstruct and segment separately (copies rec data from gpu to cpu)
st_rec = cp.cuda.Event(); end_rec = cp.cuda.Event(); st_rec.record()
x_surf, p_surf = recon_patches_3d(projs, theta, center, p_surf, \
apply_fbp =True)
end_rec.record(); end_rec.synchronize(); t_rec = cp.cuda.get_elapsed_time(st_rec,end_rec)
st_seg = cp.cuda.Event(); end_seg = cp.cuda.Event(); st_seg.record()
x_surf = np.clip(x_surf, *min_max)
x_surf = fe.predict_patches("segmenter", x_surf[...,np.newaxis], 256, None, min_max = min_max)[...,0]
end_seg.record(); end_seg.synchronize(); t_seg = cp.cuda.get_elapsed_time(st_seg,end_seg)
print(f'\tTIME: local reconstruction - {t_rec/1000.0:.2f} secs')
print(f'\tTIME: local segmentation - {t_seg/1000.0:.2f} secs')
print(f'\tSTAT: total patches in neighborhood: {len(p_surf)}')
if TIMEIT:
return x_surf, p_surf, t_rec, t_seg
else:
return x_surf, p_surf
| [((25, 13, 25, 51), 'cupy.meshgrid', 'cp.meshgrid', (), '', True, 'import cupy as cp\n'), ((28, 10, 28, 44), 'cupy.repeat', 'cp.repeat', (), '', True, 'import cupy as cp\n'), ((42, 13, 42, 51), 'cupy.meshgrid', 'cp.meshgrid', (), '', True, 'import cupy as cp\n'), ((45, 10, 45, 44), 'cupy.repeat', 'cp.repeat', (), '', True, 'import cupy as cp\n'), ((53, 12, 53, 22), 'tomo_encoders.misc.voxel_processing.TimerGPU', 'TimerGPU', ({}, {}), '()', False, 'from tomo_encoders.misc.voxel_processing import modified_autocontrast, TimerGPU\n'), ((67, 10, 67, 26), 'cupy.zeros_like', 'cp.zeros_like', ({(67, 24, 67, 25): 'Y'}, {}), '(Y)', True, 'import cupy as cp\n'), ((83, 10, 83, 40), 'tomo_encoders.Grid', 'Grid', (), '', False, 'from tomo_encoders import Grid\n'), ((109, 13, 109, 28), 'cupy.cuda.Event', 'cp.cuda.Event', ({}, {}), '()', True, 'import cupy as cp\n'), ((109, 40, 109, 55), 'cupy.cuda.Event', 'cp.cuda.Event', ({}, {}), '()', True, 'import cupy as cp\n'), ((110, 21, 111, 54), 'tomo_encoders.reconstruction.recon.recon_patches_3d', 'recon_patches_3d', (), '', False, 'from tomo_encoders.reconstruction.recon import recon_patches_3d\n'), ((112, 53, 112, 93), 'cupy.cuda.get_elapsed_time', 'cp.cuda.get_elapsed_time', ({(112, 78, 112, 84): 'st_rec', (112, 85, 112, 92): 'end_rec'}, {}), '(st_rec, end_rec)', True, 'import cupy as cp\n'), ((113, 13, 113, 28), 'cupy.cuda.Event', 'cp.cuda.Event', ({}, {}), '()', True, 'import cupy as cp\n'), ((113, 40, 113, 55), 'cupy.cuda.Event', 'cp.cuda.Event', ({}, {}), '()', True, 'import cupy as cp\n'), ((115, 13, 115, 38), 'numpy.clip', 'np.clip', ({(115, 21, 115, 27): 'x_surf', (115, 29, 115, 37): '*min_max'}, {}), '(x_surf, *min_max)', True, 'import numpy as np\n'), ((117, 53, 117, 93), 'cupy.cuda.get_elapsed_time', 'cp.cuda.get_elapsed_time', ({(117, 78, 117, 84): 'st_seg', (117, 85, 117, 92): 'end_seg'}, {}), '(st_seg, end_seg)', True, 'import cupy as cp\n'), ((86, 15, 86, 40), 'numpy.std', 'np.std', (), '', True, 'import numpy as np\n'), ((94, 32, 94, 59), 'numpy.prod', 'np.prod', ({(94, 40, 94, 58): 'p3d_surf.vol_shape'}, {}), '(p3d_surf.vol_shape)', True, 'import numpy as np\n'), ((24, 43, 24, 63), 'cupy.ceil', 'cp.ceil', ({(24, 51, 24, 62): 'shape_yx // 2'}, {}), '(shape_yx // 2)', True, 'import cupy as cp\n'), ((41, 43, 41, 63), 'cupy.ceil', 'cp.ceil', ({(41, 51, 41, 62): 'shape_yx // 2'}, {}), '(shape_yx // 2)', True, 'import cupy as cp\n'), ((87, 15, 87, 40), 'numpy.sum', 'np.sum', (), '', True, 'import numpy as np\n'), ((88, 16, 88, 41), 'numpy.sum', 'np.sum', (), '', True, 'import numpy as np\n'), ((26, 12, 26, 34), 'cupy.sqrt', 'cp.sqrt', ({(26, 20, 26, 33): 'yy ** 2 + xx ** 2'}, {}), '(yy ** 2 + xx ** 2)', True, 'import cupy as cp\n'), ((43, 12, 43, 34), 'cupy.sqrt', 'cp.sqrt', ({(43, 20, 43, 33): 'yy ** 2 + xx ** 2'}, {}), '(yy ** 2 + xx ** 2)', True, 'import cupy as cp\n')] |
roundium/handypackages | handypackages/subscribe/migrations/0001_initial.py | b8a0e4952644144b31168f9a4ac8e743933d87c7 | # Generated by Django 2.2.1 on 2019-06-22 11:03
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='SubscribeModel',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('email', models.EmailField(db_index=True, max_length=255, unique=True, verbose_name='Email')),
('create_time', models.DateTimeField(auto_now_add=True, verbose_name='Subscribe Time')),
],
options={
'verbose_name': 'Subscribe Email',
'verbose_name_plural': 'Subscribe Emails',
'abstract': False,
},
),
]
| [((17, 23, 17, 112), 'django.db.models.AutoField', 'models.AutoField', (), '', False, 'from django.db import migrations, models\n'), ((18, 26, 18, 109), 'django.db.models.EmailField', 'models.EmailField', (), '', False, 'from django.db import migrations, models\n'), ((19, 32, 19, 102), 'django.db.models.DateTimeField', 'models.DateTimeField', (), '', False, 'from django.db import migrations, models\n')] |
lwh2015/TuShare | TuShare/view/sh_margins.py | f244e05e5cf208e18e6237d3b81f71f0d3c1394a | # -*- coding: UTF-8 -*-
import json
from django.http import HttpResponse
from django.views.decorators.csrf import csrf_exempt
import tushare as ts
from .publiceClass import DateEncoder
@csrf_exempt
def sh_margins(request):
try:
start = request.POST.get('start','')#选填
end = request.POST.get('end','')#选填
data = ts.sh_margins(start,end)
res = {'columns':[
'信用交易日期',
'本日融资余额(元)',
'本日融资买入额(元)',
'本日融券余量',
'本日融券余量金额(元)',
'本日融券卖出量',
'本日融资融券余额(元)'
],'data':json.loads(json.dumps(data.values,cls=DateEncoder))}
except(BaseException):
return HttpResponse(BaseException)
else:
return HttpResponse(json.dumps(res),content_type="application/json")
| [((14, 15, 14, 39), 'tushare.sh_margins', 'ts.sh_margins', ({(14, 29, 14, 34): 'start', (14, 35, 14, 38): 'end'}, {}), '(start, end)', True, 'import tushare as ts\n'), ((25, 15, 25, 42), 'django.http.HttpResponse', 'HttpResponse', ({(25, 28, 25, 41): 'BaseException'}, {}), '(BaseException)', False, 'from django.http import HttpResponse\n'), ((28, 28, 28, 43), 'json.dumps', 'json.dumps', ({(28, 39, 28, 42): 'res'}, {}), '(res)', False, 'import json\n'), ((23, 28, 23, 67), 'json.dumps', 'json.dumps', (), '', False, 'import json\n')] |
robertob45/learning-python | intermediate/classes/camera.py | 7407f7d9e513792150eb2b65ebc644b5f8632c56 | class Camera:
"""docstring for ."""
def __init__(self, brand, sensor, lens, battery):
self.brand = brand
self.sensor = sensor
self.lens = lens
self.battery = battery
def __str__(self):
return self.brand + ' ' + self.sensor + ' ' + self.lens + ' ' + self.battery
def focus(self):
print('Focusing using', self.lens, '...')
print('')
def frame(self):
print('Move until your subject is in the desired position')
print('.')
print('.')
print('.')
def flash(self, flash_use):
if flash_use == 's':
print('Shooting with flash...')
else:
print('Shooting without flash...')
print('')
def format(self, save_format):
if save_format == 'jpg':
print('Saving in: ' + save_format)
elif save_format == 'raw':
print('Saving in: ' + save_format)
else:
print('No valid format to save')
def take_picture(self, save_format, flash_use):
print('Say cheese!')
self.focus()
self.frame()
self.flash(flash_use)
self.format(save_format)
| [] |
didindinn/database-as-a-service | dbaas/tsuru/tests/test_service_add.py | 747de31ff8546f7874ddd654af860e130afd17a0 | from mock import patch, MagicMock
from django.contrib.auth.models import User
from django.test import TestCase
from django.core.urlresolvers import reverse
from django.utils.datastructures import MultiValueDictKeyError
from account.models import Role, Team, Organization
from physical.tests.factory import EnvironmentFactory, PlanFactory
from physical.models import Plan
class ValidationTestCase(TestCase):
"""HTTP test cases for the tsuru Service Add. This class focuses on
validations of POST
"""
USERNAME = "fake_user"
PASSWORD = "123456"
def setUp(self):
self.role = Role.objects.get_or_create(name="fake_role")[0]
self.organization = Organization.objects.get_or_create(
name='fake_organization'
)[0]
self.team = Team.objects.get_or_create(
name="fake_team", role=self.role,
organization=self.organization)[0]
self.superuser = User.objects.create_superuser(
self.USERNAME,
email="{}@admin.com".format(self.USERNAME),
password=self.PASSWORD
)
self.team.users.add(self.superuser)
self.client.login(username=self.USERNAME, password=self.PASSWORD)
self.env = 'dev'
self.environment = EnvironmentFactory.create(name=self.env)
self.url = reverse('tsuru:service-add', args=(self.env,))
self.name = 'fake_database'
self.user = '{}@admin.com'.format(self.USERNAME)
self.description = 'fake desc'
self.plan = PlanFactory(name='fake_plan', provider=Plan.CLOUDSTACK)
self.plan.environments.add(self.environment)
self.plan_name = 'fake-plan-dev'
def tearDown(self):
self.client.logout()
def _assert_resp(self, resp, msg):
self.assertEqual(resp.status_code, 400)
self.assertEqual(resp.content, msg)
def test_name_not_in_payload(self):
with self.assertRaises(MultiValueDictKeyError):
self.client.post(self.url, {})
def test_user_not_in_payload(self):
with self.assertRaises(MultiValueDictKeyError):
self.client.post(
self.url,
{'name': self.name}
)
def test_team_not_in_payload(self):
with self.assertRaises(MultiValueDictKeyError):
self.client.post(
self.url,
{'name': self.name, 'user': self.user}
)
def test_description_fail(self):
resp = self.client.post(
self.url,
{'name': self.name, 'user': self.user, 'team': self.team}
)
self._assert_resp(resp, '"A description must be provided."')
def test_name_fail(self):
resp = self.client.post(
self.url,
{
'name': '99invalid-name',
'user': self.user,
'description': self.description,
'team': self.team
}
)
self._assert_resp(
resp,
'"Your database name must match /^[a-z][a-z0-9_]+$/ ."'
)
@patch('tsuru.views.Database.objects.get', new=MagicMock())
def test_database_found(self):
resp = self.client.post(
self.url,
{
'name': self.name,
'user': self.user,
'description': self.description,
'team': self.team
}
)
self._assert_resp(
resp,
'"There is already a database called fake_database in dev."'
)
@patch(
'tsuru.views.database_name_evironment_constraint',
new=MagicMock(return_value=True)
)
def test_already_exist_database_with_name(self):
resp = self.client.post(
self.url,
{
'name': self.name,
'user': self.user,
'description': self.description,
'team': self.team
}
)
self._assert_resp(
resp,
'"fake_database already exists in env dev!"'
)
def test_user_not_found(self):
resp = self.client.post(
self.url,
{
'name': self.name,
'user': 'another_user@not_found.com',
'description': self.description,
'team': self.team
}
)
self._assert_resp(
resp,
'"User does not exist."'
)
def test_team_not_found(self):
resp = self.client.post(
self.url,
{
'name': self.name,
'user': 'another_user@not_found.com',
'description': self.description,
'team': 'team_not_found'
}
)
self._assert_resp(
resp,
'"User does not exist."'
)
def test_env_not_found(self):
self.url = self.url.replace(
'/{}/'.format(self.env),
'/env_not_found/'
)
resp = self.client.post(
self.url,
{
'name': self.name,
'user': self.user,
'description': self.description,
'team': self.team.name
}
)
self._assert_resp(
resp,
'"Environment does not exist."'
)
@patch(
'tsuru.views.Team.count_databases_in_use',
new=MagicMock(return_value=99)
)
def test_allocation_limit(self):
resp = self.client.post(
self.url,
{
'name': self.name,
'user': self.user,
'description': self.description,
'team': self.team.name
}
)
self._assert_resp(
resp,
('"The database alocation limit of 2 has been exceeded for the '
'selected team: fake_team"')
)
def test_plan_not_on_payload(self):
resp = self.client.post(
self.url,
{
'name': self.name,
'user': self.user,
'description': self.description,
'team': self.team.name
}
)
self._assert_resp(
resp,
'"Plan was not found"'
)
def test_plan_not_found(self):
resp = self.client.post(
self.url,
{
'name': self.name,
'user': self.user,
'description': self.description,
'team': self.team.name,
'plan': 'not found'
}
)
self._assert_resp(
resp,
'"Plan was not found"'
)
@patch('notification.tasks.TaskRegister.create_task', new=MagicMock())
@patch('notification.tasks.create_database_with_retry')
def test_call_database_create(self, create_database_mock):
resp = self.client.post(
self.url,
{
'name': self.name,
'user': self.user,
'description': self.description,
'team': self.team.name,
'plan': self.plan_name
}
)
self.assertTrue(create_database_mock.called)
self.assertEqual(resp.status_code, 201)
| [((228, 5, 228, 59), 'mock.patch', 'patch', ({(228, 11, 228, 58): '"""notification.tasks.create_database_with_retry"""'}, {}), "('notification.tasks.create_database_with_retry')", False, 'from mock import patch, MagicMock\n'), ((36, 27, 36, 67), 'physical.tests.factory.EnvironmentFactory.create', 'EnvironmentFactory.create', (), '', False, 'from physical.tests.factory import EnvironmentFactory, PlanFactory\n'), ((37, 19, 37, 65), 'django.core.urlresolvers.reverse', 'reverse', (), '', False, 'from django.core.urlresolvers import reverse\n'), ((41, 20, 41, 75), 'physical.tests.factory.PlanFactory', 'PlanFactory', (), '', False, 'from physical.tests.factory import EnvironmentFactory, PlanFactory\n'), ((21, 20, 21, 64), 'account.models.Role.objects.get_or_create', 'Role.objects.get_or_create', (), '', False, 'from account.models import Role, Team, Organization\n'), ((22, 28, 24, 9), 'account.models.Organization.objects.get_or_create', 'Organization.objects.get_or_create', (), '', False, 'from account.models import Role, Team, Organization\n'), ((25, 20, 27, 43), 'account.models.Team.objects.get_or_create', 'Team.objects.get_or_create', (), '', False, 'from account.models import Role, Team, Organization\n'), ((92, 51, 92, 62), 'mock.MagicMock', 'MagicMock', ({}, {}), '()', False, 'from mock import patch, MagicMock\n'), ((110, 12, 110, 40), 'mock.MagicMock', 'MagicMock', (), '', False, 'from mock import patch, MagicMock\n'), ((178, 12, 178, 38), 'mock.MagicMock', 'MagicMock', (), '', False, 'from mock import patch, MagicMock\n'), ((227, 62, 227, 73), 'mock.MagicMock', 'MagicMock', ({}, {}), '()', False, 'from mock import patch, MagicMock\n')] |
Muhammet-Yildiz/Ecommerce_Website-HepsiOrada | Main/migrations/0072_auto_20210506_0016.py | 91935014ccc37e0ea57c8cbd2c4891941dcbb917 | # Generated by Django 3.1.4 on 2021-05-05 21:16
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('Main', '0071_auto_20210506_0004'),
]
operations = [
migrations.RemoveField(
model_name='product',
name='chooseColor',
),
migrations.RemoveField(
model_name='product',
name='chooseSize',
),
]
| [((13, 8, 16, 9), 'django.db.migrations.RemoveField', 'migrations.RemoveField', (), '', False, 'from django.db import migrations\n'), ((17, 8, 20, 9), 'django.db.migrations.RemoveField', 'migrations.RemoveField', (), '', False, 'from django.db import migrations\n')] |
zweed4u/dailycodingproblem | 1.py | 6e40eaad347e283f86a11adeff01c6426211a0be | #!/usr/bin/python3
"""
Good morning! Here's your coding interview problem for today.
This problem was recently asked by Google.
Given a list of numbers and a number k, return whether any two numbers from the list add up to k.
For example, given [10, 15, 3, 7] and k of 17, return true since 10 + 7 is 17.
Bonus: Can you do this in one pass?
"""
def func(l, k):
sums = []
for index, element in enumerate(l):
print(f'Current element: {element}')
if index == 0:
# first element - need another
print()
continue
for num in range(index):
print(f'Appending {l[index]} + {l[num]}')
sums.append(l[num] + l[index])
print()
print(sums)
return k in sums
print(func([10, 15, 3, 7], 17))
| [] |
ow-gryphon/gryphon | gryphon/data/template_scaffolding/template/setup.py | 0b34f2f61a50af46b9d1ec1d3c15d53cf4055dd5 | import json
import setuptools
with open("template/README.md", "r") as fh:
long_description = fh.read()
with open('requirements.txt') as fr:
requirements = fr.read().strip().split('\n')
with open('metadata.json') as fr:
metadata = json.load(fr)
setuptools.setup(
name="", # Name of the repository
version="0.0.1",
author=metadata.get("author", ""),
author_email=metadata.get("author_email", ""),
description=metadata.get("description", ""),
long_description=long_description,
long_description_content_type="text/markdown",
url="", # Repository URL or externally maintained page
packages=setuptools.find_packages(),
python_requires='>=3.6',
install_requires=requirements,
)
| [((11, 15, 11, 28), 'json.load', 'json.load', ({(11, 25, 11, 27): 'fr'}, {}), '(fr)', False, 'import json\n'), ((22, 13, 22, 39), 'setuptools.find_packages', 'setuptools.find_packages', ({}, {}), '()', False, 'import setuptools\n')] |
Mhaiyang/iccv | train_base3.py | 04a8ee52c2323d7ff5cdf03c0be1466e8180d2eb | """
@Time : 201/21/19 10:41
@Author : TaylorMei
@Email : [email protected]
@Project : iccv
@File : train_base3.py
@Function:
"""
import datetime
import os
import torch
from torch import nn
from torch import optim
from torch.autograd import Variable
from torch.backends import cudnn
from torch.utils.data import DataLoader
from torchvision import transforms
from tensorboardX import SummaryWriter
from tqdm import tqdm
import joint_transforms
from config import msd_training_root
from config import backbone_path
from dataset import ImageFolder
from misc import AvgMeter, check_mkdir
from model.base3 import BASE3
import loss as L
cudnn.benchmark = True
device_ids = [2]
ckpt_path = './ckpt'
exp_name = 'BASE3'
args = {
'epoch_num': 100,
'train_batch_size': 14,
'last_epoch': 0,
'lr': 5e-3,
'lr_decay': 0.9,
'weight_decay': 5e-4,
'momentum': 0.9,
'snapshot': '',
'scale': 384,
'save_point': [60, 80, 90],
'add_graph': True,
'poly_train': True,
'optimizer': 'SGD'
}
# Path.
check_mkdir(ckpt_path)
check_mkdir(os.path.join(ckpt_path, exp_name))
vis_path = os.path.join(ckpt_path, exp_name, 'log')
check_mkdir(vis_path)
log_path = os.path.join(ckpt_path, exp_name, str(datetime.datetime.now()) + '.txt')
writer = SummaryWriter(log_dir=vis_path, comment=exp_name)
# Transform Data.
joint_transform = joint_transforms.Compose([
joint_transforms.RandomRotate(),
joint_transforms.Resize((args['scale'], args['scale']))
])
img_transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]) # maybe can optimized.
])
target_transform = transforms.ToTensor()
# Prepare Data Set.
train_set = ImageFolder(msd_training_root, joint_transform, img_transform, target_transform)
print("Train set: {}".format(train_set.__len__()))
train_loader = DataLoader(train_set, batch_size=args['train_batch_size'], num_workers=0, shuffle=True)
def main():
print(args)
print(exp_name)
net = BASE3(backbone_path).cuda(device_ids[0]).train()
if args['add_graph']:
writer.add_graph(net, input_to_model=torch.rand(
args['train_batch_size'], 3, args['scale'], args['scale']).cuda(device_ids[0]))
if args['optimizer'] == 'Adam':
print("Adam")
optimizer = optim.Adam([
{'params': [param for name, param in net.named_parameters() if name[-4:] == 'bias'],
'lr': 2 * args['lr']},
{'params': [param for name, param in net.named_parameters() if name[-4:] != 'bias'],
'lr': 1 * args['lr'], 'weight_decay': args['weight_decay']}
])
else:
print("SGD")
optimizer = optim.SGD([
{'params': [param for name, param in net.named_parameters() if name[-4:] == 'bias'],
'lr': 2 * args['lr']},
{'params': [param for name, param in net.named_parameters() if name[-4:] != 'bias'],
'lr': 1 * args['lr'], 'weight_decay': args['weight_decay']}
], momentum=args['momentum'])
if len(args['snapshot']) > 0:
print('Training Resumes From \'%s\'' % args['snapshot'])
net.load_state_dict(torch.load(os.path.join(ckpt_path, exp_name, args['snapshot'] + '.pth')))
net = nn.DataParallel(net, device_ids=device_ids)
print("Using {} GPU(s) to Train.".format(len(device_ids)))
open(log_path, 'w').write(str(args) + '\n\n')
train(net, optimizer)
writer.close()
def train(net, optimizer):
curr_iter = 1
for epoch in range(args['last_epoch'] + 1, args['last_epoch'] + 1 + args['epoch_num']):
loss_4_record, loss_3_record, loss_2_record, loss_1_record, \
loss_f_record, loss_record = AvgMeter(), AvgMeter(), AvgMeter(), AvgMeter(), AvgMeter(), AvgMeter()
train_iterator = tqdm(train_loader, total=len(train_loader))
for data in train_iterator:
if args['poly_train']:
base_lr = args['lr'] * (1 - float(curr_iter) / (args['epoch_num'] * len(train_loader))) ** args[
'lr_decay']
optimizer.param_groups[0]['lr'] = 2 * base_lr
optimizer.param_groups[1]['lr'] = 1 * base_lr
inputs, labels = data
batch_size = inputs.size(0)
inputs = Variable(inputs).cuda(device_ids[0])
labels = Variable(labels).cuda(device_ids[0])
optimizer.zero_grad()
predict_4, predict_3, predict_2, predict_1, predict_f = net(inputs)
loss_4 = L.lovasz_hinge(predict_4, labels)
loss_3 = L.lovasz_hinge(predict_3, labels)
loss_2 = L.lovasz_hinge(predict_2, labels)
loss_1 = L.lovasz_hinge(predict_1, labels)
loss_f = L.lovasz_hinge(predict_f, labels)
loss = loss_4 + loss_3 + loss_2 + loss_1 + loss_f
loss.backward()
optimizer.step()
loss_record.update(loss.data, batch_size)
loss_4_record.update(loss_4.data, batch_size)
loss_3_record.update(loss_3.data, batch_size)
loss_2_record.update(loss_2.data, batch_size)
loss_1_record.update(loss_1.data, batch_size)
loss_f_record.update(loss_f.data, batch_size)
if curr_iter % 50 == 0:
writer.add_scalar('loss', loss, curr_iter)
writer.add_scalar('loss_4', loss_4, curr_iter)
writer.add_scalar('loss_3', loss_3, curr_iter)
writer.add_scalar('loss_2', loss_2, curr_iter)
writer.add_scalar('loss_1', loss_1, curr_iter)
writer.add_scalar('loss_f', loss_f, curr_iter)
log = '[%3d], [%6d], [%.6f], [%.5f], [L4: %.5f], [L3: %.5f], [L2: %.5f], [L1: %.5f], [Lf: %.5f]' % \
(epoch, curr_iter, base_lr, loss_record.avg, loss_4_record.avg, loss_3_record.avg, loss_2_record.avg,
loss_1_record.avg, loss_f_record.avg)
train_iterator.set_description(log)
open(log_path, 'a').write(log + '\n')
curr_iter += 1
if epoch in args['save_point']:
net.cpu()
torch.save(net.module.state_dict(), os.path.join(ckpt_path, exp_name, '%d.pth' % epoch))
net.cuda(device_ids[0])
if epoch >= args['epoch_num']:
net.cpu()
torch.save(net.module.state_dict(), os.path.join(ckpt_path, exp_name, '%d.pth' % epoch))
print("Optimization Have Done!")
return
if __name__ == '__main__':
main()
| [((57, 0, 57, 22), 'misc.check_mkdir', 'check_mkdir', ({(57, 12, 57, 21): 'ckpt_path'}, {}), '(ckpt_path)', False, 'from misc import AvgMeter, check_mkdir\n'), ((59, 11, 59, 51), 'os.path.join', 'os.path.join', ({(59, 24, 59, 33): 'ckpt_path', (59, 35, 59, 43): 'exp_name', (59, 45, 59, 50): '"""log"""'}, {}), "(ckpt_path, exp_name, 'log')", False, 'import os\n'), ((60, 0, 60, 21), 'misc.check_mkdir', 'check_mkdir', ({(60, 12, 60, 20): 'vis_path'}, {}), '(vis_path)', False, 'from misc import AvgMeter, check_mkdir\n'), ((62, 9, 62, 58), 'tensorboardX.SummaryWriter', 'SummaryWriter', (), '', False, 'from tensorboardX import SummaryWriter\n'), ((73, 19, 73, 40), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ({}, {}), '()', False, 'from torchvision import transforms\n'), ((76, 12, 76, 92), 'dataset.ImageFolder', 'ImageFolder', ({(76, 24, 76, 41): 'msd_training_root', (76, 43, 76, 58): 'joint_transform', (76, 60, 76, 73): 'img_transform', (76, 75, 76, 91): 'target_transform'}, {}), '(msd_training_root, joint_transform, img_transform, target_transform\n )', False, 'from dataset import ImageFolder\n'), ((78, 15, 78, 102), 'torch.utils.data.DataLoader', 'DataLoader', (), '', False, 'from torch.utils.data import DataLoader\n'), ((58, 12, 58, 45), 'os.path.join', 'os.path.join', ({(58, 25, 58, 34): 'ckpt_path', (58, 36, 58, 44): 'exp_name'}, {}), '(ckpt_path, exp_name)', False, 'import os\n'), ((111, 10, 111, 53), 'torch.nn.DataParallel', 'nn.DataParallel', (), '', False, 'from torch import nn\n'), ((66, 4, 66, 35), 'joint_transforms.RandomRotate', 'joint_transforms.RandomRotate', ({}, {}), '()', False, 'import joint_transforms\n'), ((67, 4, 67, 59), 'joint_transforms.Resize', 'joint_transforms.Resize', ({(67, 28, 67, 58): "(args['scale'], args['scale'])"}, {}), "((args['scale'], args['scale']))", False, 'import joint_transforms\n'), ((70, 4, 70, 25), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ({}, {}), '()', False, 'from torchvision import transforms\n'), ((71, 4, 71, 70), 'torchvision.transforms.Normalize', 'transforms.Normalize', ({(71, 25, 71, 46): '[0.485, 0.456, 0.406]', (71, 48, 71, 69): '[0.229, 0.224, 0.225]'}, {}), '([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])', False, 'from torchvision import transforms\n'), ((61, 49, 61, 72), 'datetime.datetime.now', 'datetime.datetime.now', ({}, {}), '()', False, 'import datetime\n'), ((124, 37, 124, 47), 'misc.AvgMeter', 'AvgMeter', ({}, {}), '()', False, 'from misc import AvgMeter, check_mkdir\n'), ((124, 49, 124, 59), 'misc.AvgMeter', 'AvgMeter', ({}, {}), '()', False, 'from misc import AvgMeter, check_mkdir\n'), ((124, 61, 124, 71), 'misc.AvgMeter', 'AvgMeter', ({}, {}), '()', False, 'from misc import AvgMeter, check_mkdir\n'), ((124, 73, 124, 83), 'misc.AvgMeter', 'AvgMeter', ({}, {}), '()', False, 'from misc import AvgMeter, check_mkdir\n'), ((124, 85, 124, 95), 'misc.AvgMeter', 'AvgMeter', ({}, {}), '()', False, 'from misc import AvgMeter, check_mkdir\n'), ((124, 97, 124, 107), 'misc.AvgMeter', 'AvgMeter', ({}, {}), '()', False, 'from misc import AvgMeter, check_mkdir\n'), ((143, 21, 143, 54), 'loss.lovasz_hinge', 'L.lovasz_hinge', ({(143, 36, 143, 45): 'predict_4', (143, 47, 143, 53): 'labels'}, {}), '(predict_4, labels)', True, 'import loss as L\n'), ((144, 21, 144, 54), 'loss.lovasz_hinge', 'L.lovasz_hinge', ({(144, 36, 144, 45): 'predict_3', (144, 47, 144, 53): 'labels'}, {}), '(predict_3, labels)', True, 'import loss as L\n'), ((145, 21, 145, 54), 'loss.lovasz_hinge', 'L.lovasz_hinge', ({(145, 36, 145, 45): 'predict_2', (145, 47, 145, 53): 'labels'}, {}), '(predict_2, labels)', True, 'import loss as L\n'), ((146, 21, 146, 54), 'loss.lovasz_hinge', 'L.lovasz_hinge', ({(146, 36, 146, 45): 'predict_1', (146, 47, 146, 53): 'labels'}, {}), '(predict_1, labels)', True, 'import loss as L\n'), ((147, 21, 147, 54), 'loss.lovasz_hinge', 'L.lovasz_hinge', ({(147, 36, 147, 45): 'predict_f', (147, 47, 147, 53): 'labels'}, {}), '(predict_f, labels)', True, 'import loss as L\n'), ((109, 39, 109, 99), 'os.path.join', 'os.path.join', ({(109, 52, 109, 61): 'ckpt_path', (109, 63, 109, 71): 'exp_name', (109, 73, 109, 98): "(args['snapshot'] + '.pth')"}, {}), "(ckpt_path, exp_name, args['snapshot'] + '.pth')", False, 'import os\n'), ((180, 48, 180, 99), 'os.path.join', 'os.path.join', ({(180, 61, 180, 70): 'ckpt_path', (180, 72, 180, 80): 'exp_name', (180, 82, 180, 98): "('%d.pth' % epoch)"}, {}), "(ckpt_path, exp_name, '%d.pth' % epoch)", False, 'import os\n'), ((185, 48, 185, 99), 'os.path.join', 'os.path.join', ({(185, 61, 185, 70): 'ckpt_path', (185, 72, 185, 80): 'exp_name', (185, 82, 185, 98): "('%d.pth' % epoch)"}, {}), "(ckpt_path, exp_name, '%d.pth' % epoch)", False, 'import os\n'), ((85, 10, 85, 30), 'model.base3.BASE3', 'BASE3', ({(85, 16, 85, 29): 'backbone_path'}, {}), '(backbone_path)', False, 'from model.base3 import BASE3\n'), ((136, 21, 136, 37), 'torch.autograd.Variable', 'Variable', ({(136, 30, 136, 36): 'inputs'}, {}), '(inputs)', False, 'from torch.autograd import Variable\n'), ((137, 21, 137, 37), 'torch.autograd.Variable', 'Variable', ({(137, 30, 137, 36): 'labels'}, {}), '(labels)', False, 'from torch.autograd import Variable\n'), ((87, 45, 88, 70), 'torch.rand', 'torch.rand', ({(88, 12, 88, 36): "args['train_batch_size']", (88, 38, 88, 39): '(3)', (88, 41, 88, 54): "args['scale']", (88, 56, 88, 69): "args['scale']"}, {}), "(args['train_batch_size'], 3, args['scale'], args['scale'])", False, 'import torch\n')] |
uwase-diane/min_pitch | tests/test_comment.py | 514ab5da150244e900fd51b6563173a905ef4f29 | import unittest
from app.models import Comment, Pitch
from app import db
class TestPitchComment(unittest.TestCase):
def setUp(self):
self.new_pitch = Pitch(post = "doit", category='Quotes')
self.new_comment = Comment(comment = "good comment", pitch=self.new_pitch)
def test_instance(self):
self.assertTrue(isinstance(self.new_comment,Comment))
def test_check_instance_variables(self):
self.assertEquals(self.new_comment.comment,"good comment")
self.assertEquals(self.new_comment.pitch,self.new_pitch, 'do it') | [((8, 25, 8, 64), 'app.models.Pitch', 'Pitch', (), '', False, 'from app.models import Comment, Pitch\n'), ((9, 27, 9, 82), 'app.models.Comment', 'Comment', (), '', False, 'from app.models import Comment, Pitch\n')] |
itteamforslp/safelife_project | teacher/views.py | 53af23dec0d19acf7227a43a16d7aedad443e90d | from django.shortcuts import render
from django.http import HttpResponse
from django.contrib.auth.decorators import login_required
from django.views.decorators.csrf import csrf_exempt
from django.template import loader
from django.db import connection
from django.http import HttpResponseRedirect
import datetime
from django.http import JsonResponse
from administrator.models import Course, CourseTeacher, CourseStudent, Student
from django.core.exceptions import PermissionDenied
def teacher_only(function):
#"""Limit view to teacher only."""
def _inner(request, *args, **kwargs):
if not request.user.is_staff == False | request.user.is_superuser:
raise PermissionDenied
return function(request, *args, **kwargs)
return _inner
@login_required(login_url = '/users')
@teacher_only
def home(request):
current_user = request.user.id
teacher_current_courses = Course.objects.select_related().raw('SELECT * '
'FROM course_teachers as CT, courses as C '
'WHERE CT.teachers_id = %s AND C.course_id = CT.course_id AND C.is_complete = 0 ', [current_user])
currentdate = datetime.datetime.today().strftime('%Y-%m-%d')
with connection.cursor() as cursor:
cursor.execute('SELECT CL.course_id, CL.date '
'FROM classes as CL, course_teachers as CT '
'WHERE CT.teachers_id = %s AND CL.date >= %s '
'AND CT.course_id = CL.course_id '
'GROUP BY CL.course_id ', [current_user, currentdate])
next_class_date = cursor.fetchall()
with connection.cursor() as cursor:
cursor.execute('SELECT CS.course_id, COUNT(CS.students_id) '
'FROM course_teachers as CT, course_students as CS '
'WHERE CT.teachers_id = %s AND CT.course_id = CS.course_id '
'GROUP BY CS.course_id ', [current_user])
teacher_student_count = cursor.fetchall()
with connection.cursor() as cursor:
cursor.execute('SELECT C.course_id, C.notes '
'FROM course_teachers as CT, courses as C '
'WHERE CT.teachers_id = %s AND C.course_id = CT.course_id '
'GROUP BY CT.course_id ', [current_user])
teacher_course_notes = cursor.fetchall()
template = loader.get_template('teacher/dashboard.html')
context = {
'teacher_current_courses': teacher_current_courses,
'teacher_student_count': teacher_student_count,
'next_class_date': next_class_date,
'teacher_course_notes': teacher_course_notes
}
# Render the template to the user
return HttpResponse(template.render(context, request))
@csrf_exempt
def update_course_notes(request):
# Get the student name that was passed from the web page
courseNotes = request.POST.get('courseNotes')
courseId = request.POST.get('courseId')
# Create a cursor to execute raw SQL queries.
with connection.cursor() as cursor:
cursor.execute('UPDATE courses '
'SET notes = %s '
'WHERE course_id = %s', [courseNotes, courseId])
# Render the response to the user
| [((22, 1, 22, 37), 'django.contrib.auth.decorators.login_required', 'login_required', (), '', False, 'from django.contrib.auth.decorators import login_required\n'), ((56, 19, 56, 64), 'django.template.loader.get_template', 'loader.get_template', ({(56, 39, 56, 63): '"""teacher/dashboard.html"""'}, {}), "('teacher/dashboard.html')", False, 'from django.template import loader\n'), ((32, 13, 32, 32), 'django.db.connection.cursor', 'connection.cursor', ({}, {}), '()', False, 'from django.db import connection\n'), ((41, 13, 41, 32), 'django.db.connection.cursor', 'connection.cursor', ({}, {}), '()', False, 'from django.db import connection\n'), ((48, 13, 48, 32), 'django.db.connection.cursor', 'connection.cursor', ({}, {}), '()', False, 'from django.db import connection\n'), ((73, 9, 73, 28), 'django.db.connection.cursor', 'connection.cursor', ({}, {}), '()', False, 'from django.db import connection\n'), ((26, 34, 26, 65), 'administrator.models.Course.objects.select_related', 'Course.objects.select_related', ({}, {}), '()', False, 'from administrator.models import Course, CourseTeacher, CourseStudent, Student\n'), ((30, 22, 30, 47), 'datetime.datetime.today', 'datetime.datetime.today', ({}, {}), '()', False, 'import datetime\n')] |
botstory/bot-story | botstory/middlewares/text/text_test.py | 9c5b2fc7f7a14dbd467d70f60d5ba855ef89dac3 | import logging
import pytest
import re
from . import text
from ... import matchers
from ...utils import answer, SimpleTrigger
logger = logging.getLogger(__name__)
@pytest.mark.asyncio
async def test_should_run_story_on_equal_message():
trigger = SimpleTrigger()
with answer.Talk() as talk:
story = talk.story
@story.on('hi there!')
def one_story():
@story.part()
def then(ctx):
trigger.passed()
await talk.pure_text('hi there!')
assert trigger.is_triggered
@pytest.mark.asyncio
async def test_should_not_run_story_on_non_equal_message():
trigger = SimpleTrigger()
with answer.Talk() as talk:
story = talk.story
@story.on('hi there!')
def one_story():
@story.part()
def then(ctx):
trigger.passed()
await talk.pure_text('buy!')
assert not trigger.is_triggered
@pytest.mark.asyncio
async def test_should_catch_any_text_message():
trigger = SimpleTrigger()
with answer.Talk() as talk:
story = talk.story
@story.on(text.Any())
def one_story():
@story.part()
def then(ctx):
trigger.passed()
await talk.pure_text('hi there!')
assert trigger.is_triggered
@pytest.mark.asyncio
async def test_should_ignore_any_non_text_message():
trigger = SimpleTrigger()
with answer.Talk() as talk:
story = talk.story
@story.on(text.Any())
def one_story():
@story.part()
def then(ctx):
trigger.passed()
await talk.location('some where')
assert not trigger.is_triggered
def test_serialize_text_any():
m_old = text.Any()
m_new = matchers.deserialize(matchers.serialize(m_old))
assert isinstance(m_new, text.Any)
@pytest.mark.asyncio
async def test_should_catch_equal_text_message():
trigger_hi_there = SimpleTrigger()
trigger_see_you = SimpleTrigger()
with answer.Talk() as talk:
story = talk.story
@story.on(text.Equal('hi there!'))
def first_story():
@story.part()
def then(ctx):
trigger_hi_there.passed()
@story.on(text.Equal('see you!'))
def second_story():
@story.part()
def then(ctx):
trigger_see_you.passed()
await talk.pure_text('see you!')
assert not trigger_hi_there.is_triggered
assert trigger_see_you.is_triggered
def test_equal_handle_should_create_right_type():
assert isinstance(text.Equal.handle(''), text.Equal)
def test_serialize_text_equal():
m_old = text.Equal('hats off')
m_new = matchers.deserialize(matchers.serialize(m_old))
assert isinstance(m_new, text.Equal)
assert m_new.test_string == 'hats off'
@pytest.mark.asyncio
async def test_should_catch_equal_text_message_case_in_sensitive():
trigger_hi_there = SimpleTrigger()
trigger_see_you = SimpleTrigger()
with answer.Talk() as talk:
story = talk.story
@story.on(text.EqualCaseIgnore('hi there!'))
def first_story():
@story.part()
def then(ctx):
trigger_hi_there.passed()
@story.on(text.EqualCaseIgnore('see you!'))
def second_story():
@story.part()
def then(ctx):
trigger_see_you.passed()
await talk.pure_text('See You!')
assert not trigger_hi_there.is_triggered
assert trigger_see_you.is_triggered
def test_serialize_text_equal_case_ignore():
m_old = text.EqualCaseIgnore('hats off')
m_new = matchers.deserialize(matchers.serialize(m_old))
assert isinstance(m_new, text.EqualCaseIgnore)
assert m_new.test_string == 'hats off'
@pytest.mark.asyncio
async def test_should_catch_text_message_that_match_regex():
trigger_buy = SimpleTrigger()
trigger_sell = SimpleTrigger()
with answer.Talk() as talk:
story = talk.story
@story.on(text.Match('buy (.*)btc'))
def one_story():
@story.part()
def then(ctx):
trigger_buy.receive(text.get_text(ctx)['matches'][0])
@story.on(text.Match('sell (.*)btc'))
def another_story():
@story.part()
def then(ctx):
trigger_sell.receive(text.get_text(ctx)['matches'][0])
await talk.pure_text('buy 700btc')
await talk.pure_text('sell 600btc')
assert trigger_buy.result() == '700'
assert trigger_sell.result() == '600'
@pytest.mark.asyncio
async def test_should_catch_text_message_that_match_regex_with_flags():
trigger_destination = SimpleTrigger()
with answer.Talk() as talk:
story = talk.story
@story.on(text.Match('going to (.*)', re.IGNORECASE))
def one_story():
@story.part()
def then(ctx):
logger.debug('ctx')
logger.debug(ctx)
trigger_destination.receive(text.get_text(ctx)['matches'][0])
await talk.pure_text('Going to Pripyat')
assert trigger_destination.result() == 'Pripyat'
@pytest.mark.asyncio
async def test_should_not_fail_on_empty_message():
with answer.Talk() as talk:
story = talk.story
@story.on(text.Match('going to (.*)', re.IGNORECASE))
def one_story():
@story.part()
def then(ctx):
pass
await talk.ask(None)
def test_serialize_text_match():
m_old = text.Match('hello (.*)', re.IGNORECASE)
m_new = matchers.deserialize(matchers.serialize(m_old))
assert isinstance(m_new, text.Match)
assert m_new.matcher.match('Hello Piter!')
def test_text_qual_should_handle_text():
assert isinstance(matchers.get_validator('just pure text'), text.Equal)
| [((8, 9, 8, 36), 'logging.getLogger', 'logging.getLogger', ({(8, 27, 8, 35): '__name__'}, {}), '(__name__)', False, 'import logging\n')] |
xqt/pwb | pywikibot/site/_datasite.py | 9a4fe27138f32952e533256195849d05855df0b0 | """Objects representing API interface to Wikibase site."""
#
# (C) Pywikibot team, 2012-2022
#
# Distributed under the terms of the MIT license.
#
import datetime
import json
import uuid
from contextlib import suppress
from typing import Optional
from warnings import warn
import pywikibot
from pywikibot.data import api
from pywikibot.exceptions import (
APIError,
EntityTypeUnknownError,
IsRedirectPageError,
NoPageError,
NoWikibaseEntityError,
)
from pywikibot.site._apisite import APISite
from pywikibot.site._decorators import need_extension, need_right, need_version
from pywikibot.tools import itergroup, merge_unique_dicts, remove_last_args
__all__ = ('DataSite', )
class DataSite(APISite):
"""Wikibase data capable site."""
def __init__(self, *args, **kwargs) -> None:
"""Initializer."""
super().__init__(*args, **kwargs)
self._item_namespace = None
self._property_namespace = None
self._type_to_class = {
'item': pywikibot.ItemPage,
'property': pywikibot.PropertyPage,
'mediainfo': pywikibot.MediaInfo,
'lexeme': pywikibot.LexemePage,
'form': pywikibot.LexemeForm,
'sense': pywikibot.LexemeSense,
}
def _cache_entity_namespaces(self) -> None:
"""Find namespaces for each known wikibase entity type."""
self._entity_namespaces = {}
for entity_type in self._type_to_class:
for namespace in self.namespaces.values():
if not hasattr(namespace, 'defaultcontentmodel'):
continue
content_model = namespace.defaultcontentmodel
if content_model == ('wikibase-' + entity_type):
self._entity_namespaces[entity_type] = namespace
break
def get_namespace_for_entity_type(self, entity_type):
"""
Return namespace for given entity type.
:return: corresponding namespace
:rtype: Namespace
"""
if not hasattr(self, '_entity_namespaces'):
self._cache_entity_namespaces()
if entity_type in self._entity_namespaces:
return self._entity_namespaces[entity_type]
raise EntityTypeUnknownError(
'{!r} does not support entity type "{}" '
"or it doesn't have its own namespace"
.format(self, entity_type))
@property
def item_namespace(self):
"""
Return namespace for items.
:return: item namespace
:rtype: Namespace
"""
if self._item_namespace is None:
self._item_namespace = self.get_namespace_for_entity_type('item')
return self._item_namespace
@property
def property_namespace(self):
"""
Return namespace for properties.
:return: property namespace
:rtype: Namespace
"""
if self._property_namespace is None:
self._property_namespace = self.get_namespace_for_entity_type(
'property')
return self._property_namespace
def get_entity_for_entity_id(self, entity_id):
"""
Return a new instance for given entity id.
:raises pywikibot.exceptions.NoWikibaseEntityError: there is no entity
with the id
:return: a WikibaseEntity subclass
:rtype: WikibaseEntity
"""
for cls in self._type_to_class.values():
if cls.is_valid_id(entity_id):
return cls(self, entity_id)
entity = pywikibot.page.WikibaseEntity(self, entity_id)
raise NoWikibaseEntityError(entity)
@property
@need_version('1.28-wmf.3')
def sparql_endpoint(self):
"""
Return the sparql endpoint url, if any has been set.
:return: sparql endpoint url
:rtype: str|None
"""
return self.siteinfo['general'].get('wikibase-sparql')
@property
@need_version('1.28-wmf.23')
def concept_base_uri(self):
"""
Return the base uri for concepts/entities.
:return: concept base uri
:rtype: str
"""
return self.siteinfo['general']['wikibase-conceptbaseuri']
def geo_shape_repository(self):
"""Return Site object for the geo-shapes repository e.g. commons."""
url = self.siteinfo['general'].get('wikibase-geoshapestoragebaseurl')
if url:
return pywikibot.Site(url=url, user=self.username())
return None
def tabular_data_repository(self):
"""Return Site object for the tabular-datas repository e.g. commons."""
url = self.siteinfo['general'].get(
'wikibase-tabulardatastoragebaseurl')
if url:
return pywikibot.Site(url=url, user=self.username())
return None
def loadcontent(self, identification, *props):
"""
Fetch the current content of a Wikibase item.
This is called loadcontent since
wbgetentities does not support fetching old
revisions. Eventually this will get replaced by
an actual loadrevisions.
:param identification: Parameters used to identify the page(s)
:type identification: dict
:param props: the optional properties to fetch.
"""
params = merge_unique_dicts(identification, action='wbgetentities',
# TODO: When props is empty it results in
# an empty string ('&props=') but it should
# result in a missing entry.
props=props if props else False)
req = self.simple_request(**params)
data = req.submit()
if 'success' not in data:
raise APIError(data['errors'], '')
return data['entities']
def preload_entities(self, pagelist, groupsize: int = 50):
"""
Yield subclasses of WikibaseEntity's with content prefilled.
Note that pages will be iterated in a different order
than in the underlying pagelist.
:param pagelist: an iterable that yields either WikibaseEntity objects,
or Page objects linked to an ItemPage.
:param groupsize: how many pages to query at a time
"""
if not hasattr(self, '_entity_namespaces'):
self._cache_entity_namespaces()
for sublist in itergroup(pagelist, groupsize):
req = {'ids': [], 'titles': [], 'sites': []}
for p in sublist:
if isinstance(p, pywikibot.page.WikibaseEntity):
ident = p._defined_by()
for key in ident:
req[key].append(ident[key])
else:
if p.site == self and p.namespace() in (
self._entity_namespaces.values()):
req['ids'].append(p.title(with_ns=False))
else:
assert p.site.has_data_repository, \
'Site must have a data repository'
req['sites'].append(p.site.dbName())
req['titles'].append(p._link._text)
req = self.simple_request(action='wbgetentities', **req)
data = req.submit()
for entity in data['entities']:
if 'missing' in data['entities'][entity]:
continue
cls = self._type_to_class[data['entities'][entity]['type']]
page = cls(self, entity)
# No api call is made because item._content is given
page._content = data['entities'][entity]
with suppress(IsRedirectPageError):
page.get() # cannot provide get_redirect=True (T145971)
yield page
def getPropertyType(self, prop):
"""
Obtain the type of a property.
This is used specifically because we can cache
the value for a much longer time (near infinite).
"""
params = {'action': 'wbgetentities', 'ids': prop.getID(),
'props': 'datatype'}
expiry = datetime.timedelta(days=365 * 100)
# Store it for 100 years
req = self._request(expiry=expiry, parameters=params)
data = req.submit()
# the IDs returned from the API can be upper or lowercase, depending
# on the version. See bug T55894 for more information.
try:
dtype = data['entities'][prop.getID()]['datatype']
except KeyError:
dtype = data['entities'][prop.getID().lower()]['datatype']
return dtype
@need_right('edit')
def editEntity(self, entity, data, bot: bool = True, **kwargs):
"""
Edit entity.
Note: This method is unable to create entities other than 'item'
if dict with API parameters was passed to 'entity' parameter.
:param entity: Page to edit, or dict with API parameters
to use for entity identification
:type entity: WikibaseEntity or dict
:param data: data updates
:type data: dict
:param bot: Whether to mark the edit as a bot edit
:return: New entity data
:rtype: dict
"""
# this changes the reference to a new object
data = dict(data)
if isinstance(entity, pywikibot.page.WikibaseEntity):
params = entity._defined_by(singular=True)
if 'id' in params and params['id'] == '-1':
del params['id']
if not params:
params['new'] = entity.entity_type
data_for_new_entity = entity.get_data_for_new_entity()
data.update(data_for_new_entity)
else:
if 'id' in entity and entity['id'] == '-1':
del entity['id']
params = dict(entity)
if not params: # If no identification was provided
params['new'] = 'item'
params['action'] = 'wbeditentity'
if bot:
params['bot'] = 1
if 'baserevid' in kwargs and kwargs['baserevid']:
params['baserevid'] = kwargs['baserevid']
params['token'] = self.tokens['edit']
for arg in kwargs:
if arg in ['clear', 'summary']:
params[arg] = kwargs[arg]
elif arg != 'baserevid':
warn('Unknown wbeditentity parameter {} ignored'.format(arg),
UserWarning, 2)
params['data'] = json.dumps(data)
req = self.simple_request(**params)
return req.submit()
@need_right('edit')
def addClaim(self, entity, claim, bot: bool = True, summary=None) -> None:
"""
Add a claim.
:param entity: Entity to modify
:type entity: WikibaseEntity
:param claim: Claim to be added
:type claim: pywikibot.Claim
:param bot: Whether to mark the edit as a bot edit
:param summary: Edit summary
:type summary: str
"""
claim.snak = entity.getID() + '$' + str(uuid.uuid4())
params = {'action': 'wbsetclaim',
'claim': json.dumps(claim.toJSON()),
'baserevid': entity.latest_revision_id,
'summary': summary,
'token': self.tokens['edit'],
'bot': bot,
}
req = self.simple_request(**params)
data = req.submit()
# Update the item
if claim.getID() in entity.claims:
entity.claims[claim.getID()].append(claim)
else:
entity.claims[claim.getID()] = [claim]
entity.latest_revision_id = data['pageinfo']['lastrevid']
@need_right('edit')
def changeClaimTarget(self, claim, snaktype: str = 'value',
bot: bool = True, summary=None):
"""
Set the claim target to the value of the provided claim target.
:param claim: The source of the claim target value
:type claim: pywikibot.Claim
:param snaktype: An optional snaktype ('value', 'novalue' or
'somevalue'). Default: 'value'
:param bot: Whether to mark the edit as a bot edit
:param summary: Edit summary
:type summary: str
"""
if claim.isReference or claim.isQualifier:
raise NotImplementedError
if not claim.snak:
# We need to already have the snak value
raise NoPageError(claim)
params = {'action': 'wbsetclaimvalue', 'claim': claim.snak,
'snaktype': snaktype, 'summary': summary, 'bot': bot,
'token': self.tokens['edit']}
if snaktype == 'value':
params['value'] = json.dumps(claim._formatValue())
params['baserevid'] = claim.on_item.latest_revision_id
req = self.simple_request(**params)
return req.submit()
@need_right('edit')
def save_claim(self, claim, summary=None, bot: bool = True):
"""
Save the whole claim to the wikibase site.
:param claim: The claim to save
:type claim: pywikibot.Claim
:param bot: Whether to mark the edit as a bot edit
:param summary: Edit summary
:type summary: str
"""
if claim.isReference or claim.isQualifier:
raise NotImplementedError
if not claim.snak:
# We need to already have the snak value
raise NoPageError(claim)
params = {'action': 'wbsetclaim',
'claim': json.dumps(claim.toJSON()),
'token': self.tokens['edit'],
'baserevid': claim.on_item.latest_revision_id,
'summary': summary,
'bot': bot,
}
req = self.simple_request(**params)
data = req.submit()
claim.on_item.latest_revision_id = data['pageinfo']['lastrevid']
return data
@need_right('edit')
@remove_last_args(['baserevid']) # since 7.0.0
def editSource(self, claim, source,
new: bool = False,
bot: bool = True,
summary: Optional[str] = None):
"""Create/Edit a source.
.. versionchanged:: 7.0
deprecated `baserevid` parameter was removed
:param claim: A Claim object to add the source to
:type claim: pywikibot.Claim
:param source: A Claim object to be used as a source
:type source: pywikibot.Claim
:param new: Whether to create a new one if the "source" already exists
:param bot: Whether to mark the edit as a bot edit
:param summary: Edit summary
"""
if claim.isReference or claim.isQualifier:
raise ValueError('The claim cannot have a source.')
params = {'action': 'wbsetreference', 'statement': claim.snak,
'baserevid': claim.on_item.latest_revision_id,
'summary': summary, 'bot': bot, 'token': self.tokens['edit']}
# build up the snak
if isinstance(source, list):
sources = source
else:
sources = [source]
snak = {}
for sourceclaim in sources:
datavalue = sourceclaim._formatDataValue()
valuesnaks = snak.get(sourceclaim.getID(), [])
valuesnaks.append({
'snaktype': 'value',
'property': sourceclaim.getID(),
'datavalue': datavalue,
})
snak[sourceclaim.getID()] = valuesnaks
# set the hash if the source should be changed.
# if present, all claims of one source have the same hash
if not new and hasattr(sourceclaim, 'hash'):
params['reference'] = sourceclaim.hash
params['snaks'] = json.dumps(snak)
req = self.simple_request(**params)
return req.submit()
@need_right('edit')
@remove_last_args(['baserevid']) # since 7.0.0
def editQualifier(self, claim, qualifier,
new: bool = False,
bot: bool = True,
summary: Optional[str] = None):
"""Create/Edit a qualifier.
.. versionchanged:: 7.0
deprecated `baserevid` parameter was removed
:param claim: A Claim object to add the qualifier to
:type claim: pywikibot.Claim
:param qualifier: A Claim object to be used as a qualifier
:type qualifier: pywikibot.Claim
:param new: Whether to create a new one if the "qualifier"
already exists
:param bot: Whether to mark the edit as a bot edit
:param summary: Edit summary
"""
if claim.isReference or claim.isQualifier:
raise ValueError('The claim cannot have a qualifier.')
params = {'action': 'wbsetqualifier', 'claim': claim.snak,
'baserevid': claim.on_item.latest_revision_id,
'summary': summary, 'bot': bot}
if (not new and hasattr(qualifier, 'hash')
and qualifier.hash is not None):
params['snakhash'] = qualifier.hash
params['token'] = self.tokens['edit']
# build up the snak
if qualifier.getSnakType() == 'value':
params['value'] = json.dumps(qualifier._formatValue())
params['snaktype'] = qualifier.getSnakType()
params['property'] = qualifier.getID()
req = self.simple_request(**params)
return req.submit()
@need_right('edit')
@remove_last_args(['baserevid']) # since 7.0.0
def removeClaims(self, claims,
bot: bool = True,
summary: Optional[str] = None):
"""Remove claims.
.. versionchanged:: 7.0
deprecated `baserevid` parameter was removed
:param claims: Claims to be removed
:type claims: List[pywikibot.Claim]
:param bot: Whether to mark the edit as a bot edit
:type bot: bool
:param summary: Edit summary
:type summary: str
"""
# Check on_item for all additional claims
items = {claim.on_item for claim in claims if claim.on_item}
assert len(items) == 1
baserevid = items.pop().latest_revision_id
params = {
'action': 'wbremoveclaims', 'baserevid': baserevid,
'summary': summary,
'bot': bot,
'claim': '|'.join(claim.snak for claim in claims),
'token': self.tokens['edit'],
}
req = self.simple_request(**params)
return req.submit()
@need_right('edit')
@remove_last_args(['baserevid']) # since 7.0.0
def removeSources(self, claim, sources,
bot: bool = True,
summary: Optional[str] = None):
"""Remove sources.
.. versionchanged:: 7.0
deprecated `baserevid` parameter was removed
:param claim: A Claim object to remove the sources from
:type claim: pywikibot.Claim
:param sources: A list of Claim objects that are sources
:type sources: list
:param bot: Whether to mark the edit as a bot edit
:param summary: Edit summary
"""
params = {
'action': 'wbremovereferences',
'baserevid': claim.on_item.latest_revision_id,
'summary': summary, 'bot': bot,
'statement': claim.snak,
'references': '|'.join(source.hash for source in sources),
'token': self.tokens['edit'],
}
req = self.simple_request(**params)
return req.submit()
@need_right('edit')
@remove_last_args(['baserevid']) # since 7.0.0
def remove_qualifiers(self, claim, qualifiers,
bot: bool = True,
summary: Optional[str] = None):
"""Remove qualifiers.
.. versionchanged:: 7.0
deprecated `baserevid` parameter was removed
:param claim: A Claim object to remove the qualifier from
:type claim: pywikibot.Claim
:param qualifiers: Claim objects currently used as a qualifiers
:type qualifiers: List[pywikibot.Claim]
:param bot: Whether to mark the edit as a bot edit
:param summary: Edit summary
"""
params = {
'action': 'wbremovequalifiers',
'claim': claim.snak,
'baserevid': claim.on_item.latest_revision_id,
'summary': summary,
'bot': bot,
'qualifiers': [qualifier.hash for qualifier in qualifiers],
'token': self.tokens['edit']
}
req = self.simple_request(**params)
return req.submit()
@need_right('edit')
def linkTitles(self, page1, page2, bot: bool = True):
"""
Link two pages together.
:param page1: First page to link
:type page1: pywikibot.Page
:param page2: Second page to link
:type page2: pywikibot.Page
:param bot: Whether to mark the edit as a bot edit
:return: dict API output
:rtype: dict
"""
params = {
'action': 'wblinktitles',
'tosite': page1.site.dbName(),
'totitle': page1.title(),
'fromsite': page2.site.dbName(),
'fromtitle': page2.title(),
'token': self.tokens['edit']
}
if bot:
params['bot'] = 1
req = self.simple_request(**params)
return req.submit()
@need_right('item-merge')
def mergeItems(self, from_item, to_item, ignore_conflicts=None,
summary=None, bot: bool = True):
"""
Merge two items together.
:param from_item: Item to merge from
:type from_item: pywikibot.ItemPage
:param to_item: Item to merge into
:type to_item: pywikibot.ItemPage
:param ignore_conflicts: Which type of conflicts
('description', 'sitelink', and 'statement')
should be ignored
:type ignore_conflicts: list of str
:param summary: Edit summary
:type summary: str
:param bot: Whether to mark the edit as a bot edit
:return: dict API output
:rtype: dict
"""
params = {
'action': 'wbmergeitems',
'fromid': from_item.getID(),
'toid': to_item.getID(),
'ignoreconflicts': ignore_conflicts,
'token': self.tokens['edit'],
'summary': summary,
}
if bot:
params['bot'] = 1
req = self.simple_request(**params)
return req.submit()
@need_right('item-merge')
@need_extension('WikibaseLexeme')
def mergeLexemes(self, from_lexeme, to_lexeme, summary=None, *,
bot: bool = True) -> dict:
"""
Merge two lexemes together.
:param from_lexeme: Lexeme to merge from
:type from_lexeme: pywikibot.LexemePage
:param to_lexeme: Lexeme to merge into
:type to_lexeme: pywikibot.LexemePage
:param summary: Edit summary
:type summary: str
:keyword bot: Whether to mark the edit as a bot edit
:return: dict API output
"""
params = {
'action': 'wblmergelexemes',
'source': from_lexeme.getID(),
'target': to_lexeme.getID(),
'token': self.tokens['edit'],
'summary': summary,
}
if bot:
params['bot'] = 1
req = self.simple_request(**params)
data = req.submit()
return data
@need_right('item-redirect')
def set_redirect_target(self, from_item, to_item, bot: bool = True):
"""
Make a redirect to another item.
:param to_item: title of target item.
:type to_item: pywikibot.ItemPage
:param from_item: Title of the item to be redirected.
:type from_item: pywikibot.ItemPage
:param bot: Whether to mark the edit as a bot edit
"""
params = {
'action': 'wbcreateredirect',
'from': from_item.getID(),
'to': to_item.getID(),
'token': self.tokens['edit'],
'bot': bot,
}
req = self.simple_request(**params)
return req.submit()
def search_entities(self, search: str, language: str,
total: Optional[int] = None, **kwargs):
"""
Search for pages or properties that contain the given text.
:param search: Text to find.
:param language: Language to search in.
:param total: Maximum number of pages to retrieve in total, or
None in case of no limit.
:return: 'search' list from API output.
:rtype: Generator
"""
lang_codes = self._paraminfo.parameter('wbsearchentities',
'language')['type']
if language not in lang_codes:
raise ValueError('Data site used does not support provided '
'language.')
if 'site' in kwargs:
if kwargs['site'].sitename != self.sitename:
raise ValueError('The site given in the kwargs is different.')
warn('search_entities should not get a site via kwargs.',
UserWarning, 2)
del kwargs['site']
parameters = dict(search=search, language=language, **kwargs)
gen = self._generator(api.APIGenerator,
type_arg='wbsearchentities',
data_name='search',
total=total, parameters=parameters)
return gen
@need_right('edit')
def _wbset_action(self, itemdef, action: str, action_data,
**kwargs) -> dict:
"""
Execute wbset{action} on a Wikibase entity.
Supported actions are:
wbsetaliases, wbsetdescription, wbsetlabel and wbsetsitelink
:param itemdef: Entity to modify or create
:type itemdef: str, WikibaseEntity or Page connected to such item
:param action: wbset{action} to perform:
'wbsetaliases', 'wbsetdescription', 'wbsetlabel', 'wbsetsitelink'
:param action_data: data to be used in API request, see API help
:type action_data: SiteLink or dict
wbsetaliases:
dict shall have the following structure:
{'language': value (str),
'add': list of language codes (str),
'remove': list of language codes (str),
'set' list of language codes (str)
}
'add' and 'remove' are alternative to 'set'
wbsetdescription and wbsetlabel:
dict shall have keys 'language', 'value'
wbsetsitelink:
dict shall have keys 'linksite', 'linktitle' and
optionally 'badges'
:keyword bot: Whether to mark the edit as a bot edit, default is True
:type bot: bool
:keyword tags: Change tags to apply with the edit
:type tags: list of str
:return: query result
:raises AssertionError, TypeError
"""
def format_sitelink(sitelink):
"""Convert SiteLink to a dict accepted by wbsetsitelink API."""
if isinstance(sitelink, pywikibot.page.SiteLink):
_dict = {
'linksite': sitelink._sitekey,
'linktitle': sitelink._rawtitle,
'badges': '|'.join([b.title() for b in sitelink.badges]),
}
else:
_dict = sitelink
return _dict
def prepare_data(action, data):
"""Prepare data as expected by API."""
if action == 'wbsetaliases':
res = data
keys = set(res)
assert keys < {'language', 'add', 'remove', 'set'}
assert 'language' in keys
assert ({'add', 'remove', 'set'} & keys)
assert ({'add', 'set'} >= keys)
assert ({'remove', 'set'} >= keys)
elif action in ('wbsetlabel', 'wbsetdescription'):
res = data
keys = set(res)
assert keys == {'language', 'value'}
elif action == 'wbsetsitelink':
res = format_sitelink(data)
keys = set(res)
assert keys >= {'linksite'}
assert keys <= {'linksite', 'linktitle', 'badges'}
else:
raise ValueError('Something has gone wrong ...')
return res
# Supported actions
assert action in ('wbsetaliases', 'wbsetdescription',
'wbsetlabel', 'wbsetsitelink'), \
'action {} not supported.'.format(action)
# prefer ID over (site, title)
if isinstance(itemdef, str):
itemdef = self.get_entity_for_entity_id(itemdef)
elif isinstance(itemdef, pywikibot.Page):
itemdef = pywikibot.ItemPage.fromPage(itemdef, lazy_load=True)
elif not isinstance(itemdef, pywikibot.page.WikibaseEntity):
raise TypeError('itemdef shall be str, WikibaseEntity or Page')
params = itemdef._defined_by(singular=True)
# TODO: support 'new'
baserevid = kwargs.pop(
'baserevid',
itemdef.latest_revision_id if 'id' in params else 0
)
params.update(
{'baserevid': baserevid,
'action': action,
'token': self.tokens['edit'],
'bot': kwargs.pop('bot', True),
})
params.update(prepare_data(action, action_data))
for arg in kwargs:
if arg in ['summary', 'tags']:
params[arg] = kwargs[arg]
else:
warn('Unknown parameter {} for action {}, ignored'
.format(arg, action), UserWarning, 2)
req = self.simple_request(**params)
data = req.submit()
return data
def wbsetaliases(self, itemdef, aliases, **kwargs):
"""
Set aliases for a single Wikibase entity.
See self._wbset_action() for parameters
"""
return self._wbset_action(itemdef, 'wbsetaliases', aliases, **kwargs)
def wbsetdescription(self, itemdef, description, **kwargs):
"""
Set description for a single Wikibase entity.
See self._wbset_action()
"""
return self._wbset_action(itemdef, 'wbsetdescription', description,
**kwargs)
def wbsetlabel(self, itemdef, label, **kwargs):
"""
Set label for a single Wikibase entity.
See self._wbset_action() for parameters
"""
return self._wbset_action(itemdef, 'wbsetlabel', label, **kwargs)
def wbsetsitelink(self, itemdef, sitelink, **kwargs):
"""
Set, remove or modify a sitelink on a Wikibase item.
See self._wbset_action() for parameters
"""
return self._wbset_action(itemdef, 'wbsetsitelink', sitelink, **kwargs)
@need_right('edit')
@need_extension('WikibaseLexeme')
def add_form(self, lexeme, form, *, bot: bool = True,
baserevid=None) -> dict:
"""
Add a form.
:param lexeme: Lexeme to modify
:type lexeme: pywikibot.LexemePage
:param form: Form to be added
:type form: pywikibot.LexemeForm
:keyword bot: Whether to mark the edit as a bot edit
:keyword baserevid: Base revision id override, used to detect
conflicts.
:type baserevid: long
"""
params = {
'action': 'wbladdform',
'lexemeId': lexeme.getID(),
'data': json.dumps(form.toJSON()),
'bot': bot,
'token': self.tokens['edit'],
}
if baserevid:
params['baserevid'] = baserevid
req = self.simple_request(**params)
data = req.submit()
return data
@need_right('edit')
@need_extension('WikibaseLexeme')
def remove_form(self, form, *, bot: bool = True, baserevid=None) -> dict:
"""
Remove a form.
:param form: Form to be removed
:type form: pywikibot.LexemeForm
:keyword bot: Whether to mark the edit as a bot edit
:keyword baserevid: Base revision id override, used to detect
conflicts.
:type baserevid: long
"""
params = {
'action': 'wblremoveform',
'id': form.getID(),
'bot': bot,
'token': self.tokens['edit'],
}
if baserevid:
params['baserevid'] = baserevid
req = self.simple_request(**params)
data = req.submit()
return data
@need_right('edit')
@need_extension('WikibaseLexeme')
def edit_form_elements(self, form, data, *, bot: bool = True,
baserevid=None) -> dict:
"""
Edit lexeme form elements.
:param form: Form
:type form: pywikibot.LexemeForm
:param data: data updates
:type data: dict
:keyword bot: Whether to mark the edit as a bot edit
:keyword baserevid: Base revision id override, used to detect
conflicts.
:type baserevid: long
:return: New form data
"""
params = {
'action': 'wbleditformelements',
'formId': form.getID(),
'data': json.dumps(data),
'bot': bot,
'token': self.tokens['edit'],
}
if baserevid:
params['baserevid'] = baserevid
req = self.simple_request(**params)
data = req.submit()
return data
| [((120, 5, 120, 31), 'pywikibot.site._decorators.need_version', 'need_version', ({(120, 18, 120, 30): '"""1.28-wmf.3"""'}, {}), "('1.28-wmf.3')", False, 'from pywikibot.site._decorators import need_extension, need_right, need_version\n'), ((131, 5, 131, 32), 'pywikibot.site._decorators.need_version', 'need_version', ({(131, 18, 131, 31): '"""1.28-wmf.23"""'}, {}), "('1.28-wmf.23')", False, 'from pywikibot.site._decorators import need_extension, need_right, need_version\n'), ((248, 5, 248, 23), 'pywikibot.site._decorators.need_right', 'need_right', ({(248, 16, 248, 22): '"""edit"""'}, {}), "('edit')", False, 'from pywikibot.site._decorators import need_extension, need_right, need_version\n'), ((300, 5, 300, 23), 'pywikibot.site._decorators.need_right', 'need_right', ({(300, 16, 300, 22): '"""edit"""'}, {}), "('edit')", False, 'from pywikibot.site._decorators import need_extension, need_right, need_version\n'), ((330, 5, 330, 23), 'pywikibot.site._decorators.need_right', 'need_right', ({(330, 16, 330, 22): '"""edit"""'}, {}), "('edit')", False, 'from pywikibot.site._decorators import need_extension, need_right, need_version\n'), ((360, 5, 360, 23), 'pywikibot.site._decorators.need_right', 'need_right', ({(360, 16, 360, 22): '"""edit"""'}, {}), "('edit')", False, 'from pywikibot.site._decorators import need_extension, need_right, need_version\n'), ((389, 5, 389, 23), 'pywikibot.site._decorators.need_right', 'need_right', ({(389, 16, 389, 22): '"""edit"""'}, {}), "('edit')", False, 'from pywikibot.site._decorators import need_extension, need_right, need_version\n'), ((390, 5, 390, 36), 'pywikibot.tools.remove_last_args', 'remove_last_args', ({(390, 22, 390, 35): "['baserevid']"}, {}), "(['baserevid'])", False, 'from pywikibot.tools import itergroup, merge_unique_dicts, remove_last_args\n'), ((440, 5, 440, 23), 'pywikibot.site._decorators.need_right', 'need_right', ({(440, 16, 440, 22): '"""edit"""'}, {}), "('edit')", False, 'from pywikibot.site._decorators import need_extension, need_right, need_version\n'), ((441, 5, 441, 36), 'pywikibot.tools.remove_last_args', 'remove_last_args', ({(441, 22, 441, 35): "['baserevid']"}, {}), "(['baserevid'])", False, 'from pywikibot.tools import itergroup, merge_unique_dicts, remove_last_args\n'), ((479, 5, 479, 23), 'pywikibot.site._decorators.need_right', 'need_right', ({(479, 16, 479, 22): '"""edit"""'}, {}), "('edit')", False, 'from pywikibot.site._decorators import need_extension, need_right, need_version\n'), ((480, 5, 480, 36), 'pywikibot.tools.remove_last_args', 'remove_last_args', ({(480, 22, 480, 35): "['baserevid']"}, {}), "(['baserevid'])", False, 'from pywikibot.tools import itergroup, merge_unique_dicts, remove_last_args\n'), ((512, 5, 512, 23), 'pywikibot.site._decorators.need_right', 'need_right', ({(512, 16, 512, 22): '"""edit"""'}, {}), "('edit')", False, 'from pywikibot.site._decorators import need_extension, need_right, need_version\n'), ((513, 5, 513, 36), 'pywikibot.tools.remove_last_args', 'remove_last_args', ({(513, 22, 513, 35): "['baserevid']"}, {}), "(['baserevid'])", False, 'from pywikibot.tools import itergroup, merge_unique_dicts, remove_last_args\n'), ((541, 5, 541, 23), 'pywikibot.site._decorators.need_right', 'need_right', ({(541, 16, 541, 22): '"""edit"""'}, {}), "('edit')", False, 'from pywikibot.site._decorators import need_extension, need_right, need_version\n'), ((542, 5, 542, 36), 'pywikibot.tools.remove_last_args', 'remove_last_args', ({(542, 22, 542, 35): "['baserevid']"}, {}), "(['baserevid'])", False, 'from pywikibot.tools import itergroup, merge_unique_dicts, remove_last_args\n'), ((571, 5, 571, 23), 'pywikibot.site._decorators.need_right', 'need_right', ({(571, 16, 571, 22): '"""edit"""'}, {}), "('edit')", False, 'from pywikibot.site._decorators import need_extension, need_right, need_version\n'), ((597, 5, 597, 29), 'pywikibot.site._decorators.need_right', 'need_right', ({(597, 16, 597, 28): '"""item-merge"""'}, {}), "('item-merge')", False, 'from pywikibot.site._decorators import need_extension, need_right, need_version\n'), ((630, 5, 630, 29), 'pywikibot.site._decorators.need_right', 'need_right', ({(630, 16, 630, 28): '"""item-merge"""'}, {}), "('item-merge')", False, 'from pywikibot.site._decorators import need_extension, need_right, need_version\n'), ((631, 5, 631, 37), 'pywikibot.site._decorators.need_extension', 'need_extension', ({(631, 20, 631, 36): '"""WikibaseLexeme"""'}, {}), "('WikibaseLexeme')", False, 'from pywikibot.site._decorators import need_extension, need_right, need_version\n'), ((659, 5, 659, 32), 'pywikibot.site._decorators.need_right', 'need_right', ({(659, 16, 659, 31): '"""item-redirect"""'}, {}), "('item-redirect')", False, 'from pywikibot.site._decorators import need_extension, need_right, need_version\n'), ((713, 5, 713, 23), 'pywikibot.site._decorators.need_right', 'need_right', ({(713, 16, 713, 22): '"""edit"""'}, {}), "('edit')", False, 'from pywikibot.site._decorators import need_extension, need_right, need_version\n'), ((856, 5, 856, 23), 'pywikibot.site._decorators.need_right', 'need_right', ({(856, 16, 856, 22): '"""edit"""'}, {}), "('edit')", False, 'from pywikibot.site._decorators import need_extension, need_right, need_version\n'), ((857, 5, 857, 37), 'pywikibot.site._decorators.need_extension', 'need_extension', ({(857, 20, 857, 36): '"""WikibaseLexeme"""'}, {}), "('WikibaseLexeme')", False, 'from pywikibot.site._decorators import need_extension, need_right, need_version\n'), ((885, 5, 885, 23), 'pywikibot.site._decorators.need_right', 'need_right', ({(885, 16, 885, 22): '"""edit"""'}, {}), "('edit')", False, 'from pywikibot.site._decorators import need_extension, need_right, need_version\n'), ((886, 5, 886, 37), 'pywikibot.site._decorators.need_extension', 'need_extension', ({(886, 20, 886, 36): '"""WikibaseLexeme"""'}, {}), "('WikibaseLexeme')", False, 'from pywikibot.site._decorators import need_extension, need_right, need_version\n'), ((910, 5, 910, 23), 'pywikibot.site._decorators.need_right', 'need_right', ({(910, 16, 910, 22): '"""edit"""'}, {}), "('edit')", False, 'from pywikibot.site._decorators import need_extension, need_right, need_version\n'), ((911, 5, 911, 37), 'pywikibot.site._decorators.need_extension', 'need_extension', ({(911, 20, 911, 36): '"""WikibaseLexeme"""'}, {}), "('WikibaseLexeme')", False, 'from pywikibot.site._decorators import need_extension, need_right, need_version\n'), ((116, 17, 116, 63), 'pywikibot.page.WikibaseEntity', 'pywikibot.page.WikibaseEntity', ({(116, 47, 116, 51): 'self', (116, 53, 116, 62): 'entity_id'}, {}), '(self, entity_id)', False, 'import pywikibot\n'), ((117, 14, 117, 43), 'pywikibot.exceptions.NoWikibaseEntityError', 'NoWikibaseEntityError', ({(117, 36, 117, 42): 'entity'}, {}), '(entity)', False, 'from pywikibot.exceptions import APIError, EntityTypeUnknownError, IsRedirectPageError, NoPageError, NoWikibaseEntityError\n'), ((171, 17, 175, 68), 'pywikibot.tools.merge_unique_dicts', 'merge_unique_dicts', (), '', False, 'from pywikibot.tools import itergroup, merge_unique_dicts, remove_last_args\n'), ((195, 23, 195, 53), 'pywikibot.tools.itergroup', 'itergroup', ({(195, 33, 195, 41): 'pagelist', (195, 43, 195, 52): 'groupsize'}, {}), '(pagelist, groupsize)', False, 'from pywikibot.tools import itergroup, merge_unique_dicts, remove_last_args\n'), ((234, 17, 234, 51), 'datetime.timedelta', 'datetime.timedelta', (), '', False, 'import datetime\n'), ((296, 25, 296, 41), 'json.dumps', 'json.dumps', ({(296, 36, 296, 40): 'data'}, {}), '(data)', False, 'import json\n'), ((435, 26, 435, 42), 'json.dumps', 'json.dumps', ({(435, 37, 435, 41): 'snak'}, {}), '(snak)', False, 'import json\n'), ((179, 18, 179, 46), 'pywikibot.exceptions.APIError', 'APIError', ({(179, 27, 179, 41): "data['errors']", (179, 43, 179, 45): '""""""'}, {}), "(data['errors'], '')", False, 'from pywikibot.exceptions import APIError, EntityTypeUnknownError, IsRedirectPageError, NoPageError, NoWikibaseEntityError\n'), ((348, 18, 348, 36), 'pywikibot.exceptions.NoPageError', 'NoPageError', ({(348, 30, 348, 35): 'claim'}, {}), '(claim)', False, 'from pywikibot.exceptions import APIError, EntityTypeUnknownError, IsRedirectPageError, NoPageError, NoWikibaseEntityError\n'), ((375, 18, 375, 36), 'pywikibot.exceptions.NoPageError', 'NoPageError', ({(375, 30, 375, 35): 'claim'}, {}), '(claim)', False, 'from pywikibot.exceptions import APIError, EntityTypeUnknownError, IsRedirectPageError, NoPageError, NoWikibaseEntityError\n'), ((702, 12, 703, 32), 'warnings.warn', 'warn', ({(702, 17, 702, 68): '"""search_entities should not get a site via kwargs."""', (703, 17, 703, 28): 'UserWarning', (703, 30, 703, 31): '(2)'}, {}), "('search_entities should not get a site via kwargs.', UserWarning, 2)", False, 'from warnings import warn\n'), ((930, 20, 930, 36), 'json.dumps', 'json.dumps', ({(930, 31, 930, 35): 'data'}, {}), '(data)', False, 'import json\n'), ((313, 48, 313, 60), 'uuid.uuid4', 'uuid.uuid4', ({}, {}), '()', False, 'import uuid\n'), ((794, 22, 794, 74), 'pywikibot.ItemPage.fromPage', 'pywikibot.ItemPage.fromPage', (), '', False, 'import pywikibot\n'), ((221, 21, 221, 50), 'contextlib.suppress', 'suppress', ({(221, 30, 221, 49): 'IsRedirectPageError'}, {}), '(IsRedirectPageError)', False, 'from contextlib import suppress\n')] |
MisaelVillaverde/fourier-calculator | app.py | fd50cd292e333c1a9d75e93962a0aaa0985ecef9 | from flask import Flask
from flask import render_template, request
from flask import jsonify
import requests
import json
app = Flask(__name__)
@app.route("/symbo",methods=['POST'])
def symbo():
#import pdb; pdb.set_trace()
session = requests.session()
token = session.get("https://es.symbolab.com/solver/step-by-step/x%5E%7B2%7D?or=input").cookies.get_dict()["sy2.pub.token"]
query = request.json["expression"]
#response = json.loads(session.get(f"https://es.symbolab.com/pub_api/steps?subscribed=true&origin=input&language=es&query=%5Cint+tcos%5Cleft(nt%5Cright)dt+&referer=https%3A%2F%2Fes.symbolab.com%2Fsolver%2Fstep-by-step%2F%255Cint_%257B%2520%257Dtcos%255Cleft(nt%255Cright)dt%2520%3For%3Dinput&plotRequest=PlotOptional&page=step-by-step",headers={
response = json.loads(session.get(f"https://es.symbolab.com/pub_api/steps?subscribed=true&origin=input&language=es&query={query}",headers={
"x-requested-with":"XMLHttpRequest",
"authorization":f"Bearer {token}"
}).content)
return {
"dym":response["dym"],
"solutions":response["solutions"]
}
@app.route('/')
def hello():
return render_template('index.html')
app.run(debug=True) | [((6, 6, 6, 21), 'flask.Flask', 'Flask', ({(6, 12, 6, 20): '__name__'}, {}), '(__name__)', False, 'from flask import Flask\n'), ((12, 14, 12, 32), 'requests.session', 'requests.session', ({}, {}), '()', False, 'import requests\n'), ((27, 11, 27, 40), 'flask.render_template', 'render_template', ({(27, 27, 27, 39): '"""index.html"""'}, {}), "('index.html')", False, 'from flask import render_template, request\n')] |
kalona/Spark-The-Definitive-Guide | my_code/Chapter_2.py | 0b495c4710b2030aa59d5a7f4053ee0a8345d0d8 | from pyspark.sql import SparkSession
# spark = SparkSession.builder.master("local[*]").getOrCreate()
spark = SparkSession.builder.getOrCreate()
file_path = "C:\home_work\local_github\Spark-The-Definitive-Guide\data\/flight-data\csv\/2015-summary.csv"
# COMMAND ----------
# COMMAND ----------
flightData2015 = spark\
.read\
.option("inferSchema", "true")\
.option("header", "true")\
.csv("./data/flight-data/csv/2015-summary.csv")
# COMMAND ----------
flightData2015.createOrReplaceTempView("flight_data_2015")
# COMMAND ----------
sqlWay = spark.sql("""
SELECT DEST_COUNTRY_NAME, count(1)
FROM flight_data_2015
GROUP BY DEST_COUNTRY_NAME
""")
dataFrameWay = flightData2015\
.groupBy("DEST_COUNTRY_NAME")\
.count()
sqlWay.explain()
dataFrameWay.explain()
# COMMAND ----------
from pyspark.sql.functions import max, col
#
flightData2015.select(max(col("count"))).show(1)
# COMMAND ----------
maxSql = spark.sql("""
SELECT DEST_COUNTRY_NAME, sum(count) as destination_total
FROM flight_data_2015
GROUP BY DEST_COUNTRY_NAME
ORDER BY sum(count) DESC
LIMIT 5
""")
maxSql.show()
# COMMAND ----------
from pyspark.sql.functions import desc
flightData2015\
.groupBy("DEST_COUNTRY_NAME")\
.sum("count")\
.withColumnRenamed("sum(count)", "destination_total")\
.sort(desc("destination_total"))\
.limit(5)\
.show()
# COMMAND ----------
flightData2015\
.groupBy("DEST_COUNTRY_NAME")\
.sum("count")\
.withColumnRenamed("sum(count)", "destination_total")\
.sort(desc("destination_total"))\
.limit(5)\
.explain()
# COMMAND ----------
| [((5, 8, 5, 42), 'pyspark.sql.SparkSession.builder.getOrCreate', 'SparkSession.builder.getOrCreate', ({}, {}), '()', False, 'from pyspark.sql import SparkSession\n'), ((45, 26, 45, 38), 'pyspark.sql.functions.col', 'col', ({(45, 30, 45, 37): '"""count"""'}, {}), "('count')", False, 'from pyspark.sql.functions import max, col\n'), ((69, 8, 69, 33), 'pyspark.sql.functions.desc', 'desc', ({(69, 13, 69, 32): '"""destination_total"""'}, {}), "('destination_total')", False, 'from pyspark.sql.functions import desc\n'), ((80, 8, 80, 33), 'pyspark.sql.functions.desc', 'desc', ({(80, 13, 80, 32): '"""destination_total"""'}, {}), "('destination_total')", False, 'from pyspark.sql.functions import desc\n')] |
ContinuumIO/intake-postgres | tests/test_intake_postgres.py | fda7f7b2b6255544ea7ffd365a4ac8b2655fd226 | import os
import pickle
import pytest
import pandas as pd
from shapely import wkt
from intake_postgres import PostgresSource
from intake import open_catalog
from .util import verify_datasource_interface
TEST_DATA_DIR = 'tests'
TEST_DATA = [
('sample1', 'sample1.csv'),
('sample2_1', 'sample2_1.csv'),
('sample2_2', 'sample2_2.csv'),
]
TEST_GIS_DATA = [
('points', 'sample_points.psql'),
('multipoints', 'sample_multipoints.psql'),
('lines', 'sample_lines.psql'),
('multilines', 'sample_multilines.psql'),
('polygons', 'sample_polygons.psql'),
('multipolygons', 'sample_multipolygons.psql'),
# ('triangles', 'sample_triangles.psql'),
]
TEST_TEMPLATE_DATA = [
'jinja2_params_with_env',
]
@pytest.fixture(scope='module')
def engine():
"""Start docker container for PostgreSQL database, yield a tuple (engine,
metadata), and cleanup connection afterward."""
from .util import start_postgres, stop_postgres
from sqlalchemy import create_engine
stop_postgres(let_fail=True)
local_port = start_postgres()
uri = 'postgresql://postgres@localhost:{}/postgres'.format(local_port)
engine = create_engine(uri)
for table_name, csv_fname in TEST_DATA:
csv_fpath = os.path.join(TEST_DATA_DIR, csv_fname)
df = pd.read_csv(csv_fpath)
df.to_sql(table_name, engine, index=False)
for table_name, psql_fname in TEST_GIS_DATA:
psql_fpath = os.path.join(TEST_DATA_DIR, psql_fname)
with engine.connect() as conn:
with open(psql_fpath, 'r') as fp:
cmds = fp.read().strip().split(';')
for cmd in cmds:
if cmd.strip():
conn.execute(' '.join(cmd.split()))
try:
yield engine
finally:
stop_postgres()
@pytest.mark.parametrize('table_name,_', TEST_DATA)
def test_open(engine, table_name, _):
d = PostgresSource(str(engine.url), 'select * from '+table_name)
assert d.container == 'dataframe'
assert d.description is None
verify_datasource_interface(d)
@pytest.mark.parametrize('table_name,csv_fpath', TEST_DATA)
def test_discover(engine, table_name, csv_fpath):
expected_df = pd.read_csv(os.path.join(TEST_DATA_DIR, csv_fpath))
source = PostgresSource(str(engine.url), 'select * from '+table_name)
info = source.discover()
dt = {k: str(v) for k, v in expected_df.dtypes.to_dict().items()}
assert info['dtype'] == dt
assert info['shape'] == (None, 3)
assert info['npartitions'] == 1
@pytest.mark.parametrize('table_name,csv_fpath', TEST_DATA)
def test_read(engine, table_name, csv_fpath):
expected_df = pd.read_csv(os.path.join(TEST_DATA_DIR, csv_fpath))
source = PostgresSource(str(engine.url), 'select * from '+table_name)
df = source.read()
assert expected_df.equals(df)
@pytest.mark.parametrize('table_name,csv_fpath', TEST_DATA)
def test_discover_after_read(engine, table_name, csv_fpath):
"""Assert that after reading the dataframe, discover() shows more accurate
information.
"""
expected_df = pd.read_csv(os.path.join(TEST_DATA_DIR, csv_fpath))
source = PostgresSource(str(engine.url), 'select * from '+table_name)
info = source.discover()
dt = {k: str(v) for k, v in expected_df.dtypes.to_dict().items()}
assert info['dtype'] == dt
assert info['shape'] == (None, 3)
assert info['npartitions'] == 1
df = source.read()
assert expected_df.equals(df)
info = source.discover()
assert info['dtype'] == dt
assert info['shape'] == (4, 3)
assert info['npartitions'] == 1
assert expected_df.equals(df)
@pytest.mark.parametrize('table_name,csv_fpath', TEST_DATA)
def test_close(engine, table_name, csv_fpath):
expected_df = pd.read_csv(os.path.join(TEST_DATA_DIR, csv_fpath))
source = PostgresSource(str(engine.url), 'select * from '+table_name)
source.close()
# Can reopen after close
df = source.read()
assert expected_df.equals(df)
@pytest.mark.parametrize('table_name,csv_fpath', TEST_DATA)
def test_pickle(engine, table_name, csv_fpath):
source = PostgresSource(str(engine.url), 'select * from '+table_name)
pickled_source = pickle.dumps(source)
source_clone = pickle.loads(pickled_source)
expected_df = source.read()
df = source_clone.read()
assert expected_df.equals(df)
@pytest.mark.parametrize('table_name,_1', TEST_DATA)
def test_catalog(engine, table_name, _1):
catalog_fpath = os.path.join(TEST_DATA_DIR, 'catalog1.yml')
catalog = open_catalog(catalog_fpath)
ds_name = table_name.rsplit('_idx', 1)[0]
src = catalog[ds_name]
pgsrc = src.get()
pgsrc._uri = str(engine.url)
assert src.describe()['container'] == 'dataframe'
assert src.describe_open()['plugin'] == 'postgres'
assert src.describe_open()['args']['sql_expr'][:6] in ('select', 'SELECT')
metadata = pgsrc.discover()
assert metadata['npartitions'] == 1
expected_df = pd.read_sql_query(pgsrc._sql_expr, engine)
df = pgsrc.read()
assert expected_df.equals(df)
pgsrc.close()
def test_catalog_join(engine):
catalog_fpath = os.path.join(TEST_DATA_DIR, 'catalog1.yml')
catalog = open_catalog(catalog_fpath)
ds_name = 'sample2'
src = catalog[ds_name]
pgsrc = src.get()
pgsrc._uri = str(engine.url)
assert src.describe()['container'] == 'dataframe'
assert src.describe_open()['plugin'] == 'postgres'
assert src.describe_open()['args']['sql_expr'][:6] in ('select', 'SELECT')
metadata = pgsrc.discover()
assert metadata['npartitions'] == 1
expected_df = pd.read_sql_query(pgsrc._sql_expr, engine)
df = pgsrc.read()
assert expected_df.equals(df)
pgsrc.close()
@pytest.mark.parametrize('table_name,_1', TEST_GIS_DATA)
def test_postgis_data(engine, table_name, _1):
from sqlalchemy import MetaData
catalog_fpath = os.path.join(TEST_DATA_DIR, 'catalog1.yml')
catalog = open_catalog(catalog_fpath)
ds_name = table_name
src = catalog[ds_name]
pgsrc = src.get()
pgsrc._uri = str(engine.url)
assert src.describe()['container'] == 'dataframe'
assert src.describe_open()['plugin'] == 'postgres'
assert src.describe_open()['args']['sql_expr'][:6] in ('select', 'SELECT')
metadata = pgsrc.discover()
assert metadata['npartitions'] == 1
meta = MetaData()
meta.reflect(bind=engine)
col_exprs = ['ST_AsText({0}) as {0}'.format(col.name)
for col in meta.tables[table_name].columns]
_query = pgsrc._sql_expr.replace('*', ', '.join(col_exprs))
expected_df = pd.read_sql_query(_query, engine).applymap(
lambda geom: str(wkt.loads(geom))
)
df = pgsrc.read().applymap(lambda geom: str(wkt.loads(geom)))
assert expected_df.equals(df)
pgsrc.close()
@pytest.mark.parametrize('ds_name', TEST_TEMPLATE_DATA)
def test_jinja2(engine, ds_name):
catalog_fpath = os.path.join(TEST_DATA_DIR, 'catalog1.yml')
catalog = open_catalog(catalog_fpath)
src = catalog[ds_name]
pgsrc = src.get()
pgsrc._uri = str(engine.url)
assert src.describe()['container'] == 'dataframe'
assert src.describe_open()['plugin'] == 'postgres'
assert src.describe_open()['args']['sql_expr'][:6] in ('select', 'SELECT')
metadata = pgsrc.discover()
assert metadata['npartitions'] == 1
expected_df = pd.read_sql_query(pgsrc._sql_expr, engine)
df = pgsrc.read()
assert expected_df.equals(df)
pgsrc.close()
| [((32, 1, 32, 31), 'pytest.fixture', 'pytest.fixture', (), '', False, 'import pytest\n'), ((62, 1, 62, 51), 'pytest.mark.parametrize', 'pytest.mark.parametrize', ({(62, 25, 62, 39): '"""table_name,_"""', (62, 41, 62, 50): 'TEST_DATA'}, {}), "('table_name,_', TEST_DATA)", False, 'import pytest\n'), ((70, 1, 70, 59), 'pytest.mark.parametrize', 'pytest.mark.parametrize', ({(70, 25, 70, 47): '"""table_name,csv_fpath"""', (70, 49, 70, 58): 'TEST_DATA'}, {}), "('table_name,csv_fpath', TEST_DATA)", False, 'import pytest\n'), ((81, 1, 81, 59), 'pytest.mark.parametrize', 'pytest.mark.parametrize', ({(81, 25, 81, 47): '"""table_name,csv_fpath"""', (81, 49, 81, 58): 'TEST_DATA'}, {}), "('table_name,csv_fpath', TEST_DATA)", False, 'import pytest\n'), ((89, 1, 89, 59), 'pytest.mark.parametrize', 'pytest.mark.parametrize', ({(89, 25, 89, 47): '"""table_name,csv_fpath"""', (89, 49, 89, 58): 'TEST_DATA'}, {}), "('table_name,csv_fpath', TEST_DATA)", False, 'import pytest\n'), ((113, 1, 113, 59), 'pytest.mark.parametrize', 'pytest.mark.parametrize', ({(113, 25, 113, 47): '"""table_name,csv_fpath"""', (113, 49, 113, 58): 'TEST_DATA'}, {}), "('table_name,csv_fpath', TEST_DATA)", False, 'import pytest\n'), ((125, 1, 125, 59), 'pytest.mark.parametrize', 'pytest.mark.parametrize', ({(125, 25, 125, 47): '"""table_name,csv_fpath"""', (125, 49, 125, 58): 'TEST_DATA'}, {}), "('table_name,csv_fpath', TEST_DATA)", False, 'import pytest\n'), ((138, 1, 138, 52), 'pytest.mark.parametrize', 'pytest.mark.parametrize', ({(138, 25, 138, 40): '"""table_name,_1"""', (138, 42, 138, 51): 'TEST_DATA'}, {}), "('table_name,_1', TEST_DATA)", False, 'import pytest\n'), ((185, 1, 185, 56), 'pytest.mark.parametrize', 'pytest.mark.parametrize', ({(185, 25, 185, 40): '"""table_name,_1"""', (185, 42, 185, 55): 'TEST_GIS_DATA'}, {}), "('table_name,_1', TEST_GIS_DATA)", False, 'import pytest\n'), ((217, 1, 217, 55), 'pytest.mark.parametrize', 'pytest.mark.parametrize', ({(217, 25, 217, 34): '"""ds_name"""', (217, 36, 217, 54): 'TEST_TEMPLATE_DATA'}, {}), "('ds_name', TEST_TEMPLATE_DATA)", False, 'import pytest\n'), ((42, 13, 42, 31), 'sqlalchemy.create_engine', 'create_engine', ({(42, 27, 42, 30): 'uri'}, {}), '(uri)', False, 'from sqlalchemy import create_engine\n'), ((129, 21, 129, 41), 'pickle.dumps', 'pickle.dumps', ({(129, 34, 129, 40): 'source'}, {}), '(source)', False, 'import pickle\n'), ((130, 19, 130, 47), 'pickle.loads', 'pickle.loads', ({(130, 32, 130, 46): 'pickled_source'}, {}), '(pickled_source)', False, 'import pickle\n'), ((140, 20, 140, 63), 'os.path.join', 'os.path.join', ({(140, 33, 140, 46): 'TEST_DATA_DIR', (140, 48, 140, 62): '"""catalog1.yml"""'}, {}), "(TEST_DATA_DIR, 'catalog1.yml')", False, 'import os\n'), ((142, 14, 142, 41), 'intake.open_catalog', 'open_catalog', ({(142, 27, 142, 40): 'catalog_fpath'}, {}), '(catalog_fpath)', False, 'from intake import open_catalog\n'), ((155, 18, 155, 60), 'pandas.read_sql_query', 'pd.read_sql_query', ({(155, 36, 155, 51): 'pgsrc._sql_expr', (155, 53, 155, 59): 'engine'}, {}), '(pgsrc._sql_expr, engine)', True, 'import pandas as pd\n'), ((163, 20, 163, 63), 'os.path.join', 'os.path.join', ({(163, 33, 163, 46): 'TEST_DATA_DIR', (163, 48, 163, 62): '"""catalog1.yml"""'}, {}), "(TEST_DATA_DIR, 'catalog1.yml')", False, 'import os\n'), ((165, 14, 165, 41), 'intake.open_catalog', 'open_catalog', ({(165, 27, 165, 40): 'catalog_fpath'}, {}), '(catalog_fpath)', False, 'from intake import open_catalog\n'), ((178, 18, 178, 60), 'pandas.read_sql_query', 'pd.read_sql_query', ({(178, 36, 178, 51): 'pgsrc._sql_expr', (178, 53, 178, 59): 'engine'}, {}), '(pgsrc._sql_expr, engine)', True, 'import pandas as pd\n'), ((188, 20, 188, 63), 'os.path.join', 'os.path.join', ({(188, 33, 188, 46): 'TEST_DATA_DIR', (188, 48, 188, 62): '"""catalog1.yml"""'}, {}), "(TEST_DATA_DIR, 'catalog1.yml')", False, 'import os\n'), ((190, 14, 190, 41), 'intake.open_catalog', 'open_catalog', ({(190, 27, 190, 40): 'catalog_fpath'}, {}), '(catalog_fpath)', False, 'from intake import open_catalog\n'), ((203, 11, 203, 21), 'sqlalchemy.MetaData', 'MetaData', ({}, {}), '()', False, 'from sqlalchemy import MetaData\n'), ((219, 20, 219, 63), 'os.path.join', 'os.path.join', ({(219, 33, 219, 46): 'TEST_DATA_DIR', (219, 48, 219, 62): '"""catalog1.yml"""'}, {}), "(TEST_DATA_DIR, 'catalog1.yml')", False, 'import os\n'), ((221, 14, 221, 41), 'intake.open_catalog', 'open_catalog', ({(221, 27, 221, 40): 'catalog_fpath'}, {}), '(catalog_fpath)', False, 'from intake import open_catalog\n'), ((233, 18, 233, 60), 'pandas.read_sql_query', 'pd.read_sql_query', ({(233, 36, 233, 51): 'pgsrc._sql_expr', (233, 53, 233, 59): 'engine'}, {}), '(pgsrc._sql_expr, engine)', True, 'import pandas as pd\n'), ((44, 20, 44, 58), 'os.path.join', 'os.path.join', ({(44, 33, 44, 46): 'TEST_DATA_DIR', (44, 48, 44, 57): 'csv_fname'}, {}), '(TEST_DATA_DIR, csv_fname)', False, 'import os\n'), ((45, 13, 45, 35), 'pandas.read_csv', 'pd.read_csv', ({(45, 25, 45, 34): 'csv_fpath'}, {}), '(csv_fpath)', True, 'import pandas as pd\n'), ((48, 21, 48, 60), 'os.path.join', 'os.path.join', ({(48, 34, 48, 47): 'TEST_DATA_DIR', (48, 49, 48, 59): 'psql_fname'}, {}), '(TEST_DATA_DIR, psql_fname)', False, 'import os\n'), ((72, 30, 72, 68), 'os.path.join', 'os.path.join', ({(72, 43, 72, 56): 'TEST_DATA_DIR', (72, 58, 72, 67): 'csv_fpath'}, {}), '(TEST_DATA_DIR, csv_fpath)', False, 'import os\n'), ((83, 30, 83, 68), 'os.path.join', 'os.path.join', ({(83, 43, 83, 56): 'TEST_DATA_DIR', (83, 58, 83, 67): 'csv_fpath'}, {}), '(TEST_DATA_DIR, csv_fpath)', False, 'import os\n'), ((94, 30, 94, 68), 'os.path.join', 'os.path.join', ({(94, 43, 94, 56): 'TEST_DATA_DIR', (94, 58, 94, 67): 'csv_fpath'}, {}), '(TEST_DATA_DIR, csv_fpath)', False, 'import os\n'), ((115, 30, 115, 68), 'os.path.join', 'os.path.join', ({(115, 43, 115, 56): 'TEST_DATA_DIR', (115, 58, 115, 67): 'csv_fpath'}, {}), '(TEST_DATA_DIR, csv_fpath)', False, 'import os\n'), ((208, 18, 208, 51), 'pandas.read_sql_query', 'pd.read_sql_query', ({(208, 36, 208, 42): '_query', (208, 44, 208, 50): 'engine'}, {}), '(_query, engine)', True, 'import pandas as pd\n'), ((209, 25, 209, 40), 'shapely.wkt.loads', 'wkt.loads', ({(209, 35, 209, 39): 'geom'}, {}), '(geom)', False, 'from shapely import wkt\n'), ((211, 48, 211, 63), 'shapely.wkt.loads', 'wkt.loads', ({(211, 58, 211, 62): 'geom'}, {}), '(geom)', False, 'from shapely import wkt\n')] |
dks1018/CoffeeShopCoding | Module_3/testImage.py | 13ac1700673c86c601eb2758570920620a956e4c | # file = open('C:\\Users\\dks10\\OneDrive\\Desktop\\Projects\\Code\\Python\\PythonCrypto\\Module_3\\eye.png', 'rb')
file = open('encrypt_eye.png', 'rb')
image = file.read()
file.close()
image = bytearray(image)
key = 48
for index, value in enumerate(image):
image[index] = value^key
file = open('2eye.png','wb')
file.write(image)
file.close() | [] |
Aircoookie/LedFx | ledfxcontroller/effects/temporal.py | 95628fc237497dd89aaf30fdbf88f780f3330166 | import time
import logging
from ledfxcontroller.effects import Effect
from threading import Thread
import voluptuous as vol
_LOGGER = logging.getLogger(__name__)
DEFAULT_RATE = 1.0 / 60.0
@Effect.no_registration
class TemporalEffect(Effect):
_thread_active = False
_thread = None
CONFIG_SCHEMA = vol.Schema({
vol.Required('speed', default = 1.0): float
})
def thread_function(self):
while self._thread_active:
startTime = time.time()
# Treat the return value of the effect loop as a speed modifier
# such that effects that are nartually faster or slower can have
# a consistent feel.
sleepInterval = self.effect_loop()
if sleepInterval is None:
sleepInterval = 1.0
sleepInterval = sleepInterval * DEFAULT_RATE
# Calculate the time to sleep accounting for potential heavy
# frame assembly operations
timeToSleep = (sleepInterval / self._config['speed']) - (time.time() - startTime)
if timeToSleep > 0:
time.sleep(timeToSleep)
def effect_loop(self):
"""
Triggered periodically based on the effect speed and
any additional effect modifiers
"""
pass
def activate(self, pixel_count):
super().activate(pixel_count)
self._thread_active = True
self._thread = Thread(target = self.thread_function)
self._thread.start()
def deactivate(self):
if self._thread_active:
self._thread_active = False
self._thread.join()
self._thread = None
super().deactivate()
| [((7, 10, 7, 37), 'logging.getLogger', 'logging.getLogger', ({(7, 28, 7, 36): '__name__'}, {}), '(__name__)', False, 'import logging\n'), ((49, 23, 49, 60), 'threading.Thread', 'Thread', (), '', False, 'from threading import Thread\n'), ((16, 8, 16, 44), 'voluptuous.Required', 'vol.Required', (), '', True, 'import voluptuous as vol\n'), ((22, 24, 22, 35), 'time.time', 'time.time', ({}, {}), '()', False, 'import time\n'), ((36, 16, 36, 39), 'time.sleep', 'time.sleep', ({(36, 27, 36, 38): 'timeToSleep'}, {}), '(timeToSleep)', False, 'import time\n'), ((34, 69, 34, 80), 'time.time', 'time.time', ({}, {}), '()', False, 'import time\n')] |
Surferlul/csc-python-solutions | 07/c/3 - Square Census.py | bea99e5e1e344d17fb2cb29d8bcbc6b108e24cee | n=int(input())
c = 1
while c**2 < n:
print(c**2)
c += 1
| [] |
LuChang-CS/sherbet | utils.py | d1061aca108eab8e0ccbd2202460e25261fdf1d5 | import numpy as np
class DataGenerator:
def __init__(self, inputs, shuffle=True, batch_size=32):
assert len(inputs) > 0
self.inputs = inputs
self.idx = np.arange(len(inputs[0]))
self.shuffle = shuffle
self.batch_size = batch_size
self.on_epoch_end()
def data_length(self):
return len(self.idx)
def __len__(self):
n = self.data_length()
len_ = n // self.batch_size
return len_ if n % self.batch_size == 0 else len_ + 1
def __getitem__(self, index):
start = index * self.batch_size
end = start + self.batch_size
index = self.idx[start:end]
data = []
for x in self.inputs:
data.append(x[index])
return data
def on_epoch_end(self):
if self.shuffle:
np.random.shuffle(self.idx)
def set_batch_size(self, batch_size):
self.batch_size = batch_size
def lr_decay(total_epoch, init_lr, split_val):
lr_map = [init_lr] * total_epoch
if len(split_val) > 0:
assert split_val[0][0] > 1
assert split_val[-1][0] <= total_epoch
current_split_index = 0
current_lr = init_lr
next_epoch, next_lr = split_val[current_split_index]
for i in range(total_epoch):
if i < next_epoch - 1:
lr_map[i] = current_lr
else:
current_lr = next_lr
lr_map[i] = current_lr
current_split_index += 1
if current_split_index >= len(split_val):
next_epoch = total_epoch + 1
else:
next_epoch, next_lr = split_val[current_split_index]
def lr_schedule_fn(epoch, lr):
return lr_map[epoch]
return lr_schedule_fn
| [((32, 12, 32, 39), 'numpy.random.shuffle', 'np.random.shuffle', ({(32, 30, 32, 38): 'self.idx'}, {}), '(self.idx)', True, 'import numpy as np\n')] |
sudhanshu55/Speech_to_Image | Version1_STI.py | 7a047725b3167cfcb7a68004b3c35b2ece75fde4 | from nltk.tokenize import sent_tokenize, word_tokenize
from nltk.corpus import stopwords
import speech_recognition as sr
import nltk
from google_images_download import google_images_download
response = google_images_download.googleimagesdownload()
r = sr.Recognizer()
with sr.Microphone() as source:
print("Say something!")
audio = r.listen(source)
data = r.recognize_google(audio).encode("utf-8")
print (data)
stopWords = set(stopwords.words('english'))
words = word_tokenize(data)
wordsFiltered = []
for w in words:
if w not in stopWords:
wordsFiltered.append(w)
into_string = str(wordsFiltered)
print(into_string)
arguments = {"keywords":into_string,"limit":2,"print_urls":True} #creating list of arguments
response.download(arguments) #passing the arguments to the function | [((6, 11, 6, 56), 'google_images_download.google_images_download.googleimagesdownload', 'google_images_download.googleimagesdownload', ({}, {}), '()', False, 'from google_images_download import google_images_download\n'), ((7, 4, 7, 19), 'speech_recognition.Recognizer', 'sr.Recognizer', ({}, {}), '()', True, 'import speech_recognition as sr\n'), ((15, 8, 15, 27), 'nltk.tokenize.word_tokenize', 'word_tokenize', ({(15, 22, 15, 26): 'data'}, {}), '(data)', False, 'from nltk.tokenize import sent_tokenize, word_tokenize\n'), ((8, 5, 8, 20), 'speech_recognition.Microphone', 'sr.Microphone', ({}, {}), '()', True, 'import speech_recognition as sr\n'), ((14, 16, 14, 42), 'nltk.corpus.stopwords.words', 'stopwords.words', ({(14, 32, 14, 41): '"""english"""'}, {}), "('english')", False, 'from nltk.corpus import stopwords\n')] |
jonathanlloyd/scratchstack-httpserver | src/models.py | 72f9bb5b1673b132786d94c017dbf2d370886b79 | from dataclasses import dataclass
@dataclass
class Request:
method: str
path: str
headers: dict
body: bytes
@dataclass
class Response:
status_code: int
reason_phrase: str
headers: dict
body: bytes
| [] |
EdwardZX/hoomd-blue | hoomd/communicator.py | c87ac3f136534e8a80359a2faceeb730f445da21 | # Copyright (c) 2009-2021 The Regents of the University of Michigan
# This file is part of the HOOMD-blue project, released under the BSD 3-Clause
# License.
"""MPI communicator."""
from hoomd import _hoomd
import hoomd
import contextlib
class Communicator(object):
"""MPI communicator.
Args:
mpi_comm: Accepts an mpi4py communicator. Use this argument to perform
many independent hoomd simulations where you communicate between those
simulations using mpi4py.
ranks_per_partition (int): (MPI) Number of ranks to include in a
partition.
`Communicator` initialize MPI communications for a `hoomd.Simulation`. To
use MPI, launch your Python script with an MPI launcher (e.g. ``mpirun`` or
``mpiexec``). By default, `Communicator` uses all ranks provided by the
launcher ``num_launch_ranks`` for a single `hoomd.Simulation` object which
decomposes the state onto that many domains.
Set ``ranks_per_partition`` to an integer to partition launched ranks into
``num_launch_ranks / ranks_per_partition`` communicators, each with their
own `partition` index. Use this to perform many simulations in parallel, for
example by using `partition` as an index into an array of state points to
execute.
"""
def __init__(self, mpi_comm=None, ranks_per_partition=None):
# check ranks_per_partition
if ranks_per_partition is not None:
if not hoomd.version.mpi_enabled:
raise RuntimeError(
"The ranks_per_partition option is only available in MPI.\n"
)
mpi_available = hoomd.version.mpi_enabled
self.cpp_mpi_conf = None
# create the specified configuration
if mpi_comm is None:
self.cpp_mpi_conf = _hoomd.MPIConfiguration()
else:
if not mpi_available:
raise RuntimeError("mpi_comm is not supported in serial builds")
handled = False
# pass in pointer to MPI_Comm object provided by mpi4py
try:
import mpi4py
if isinstance(mpi_comm, mpi4py.MPI.Comm):
addr = mpi4py.MPI._addressof(mpi_comm)
self.cpp_mpi_conf = \
_hoomd.MPIConfiguration._make_mpi_conf_mpi_comm(addr)
handled = True
except ImportError:
# silently ignore when mpi4py is missing
pass
# undocumented case: handle plain integers as pointers to MPI_Comm
# objects
if not handled and isinstance(mpi_comm, int):
self.cpp_mpi_conf = \
_hoomd.MPIConfiguration._make_mpi_conf_mpi_comm(mpi_comm)
handled = True
if not handled:
raise RuntimeError(
"Invalid mpi_comm object: {}".format(mpi_comm))
if ranks_per_partition is not None:
# check validity
if (self.cpp_mpi_conf.getNRanksGlobal() % ranks_per_partition):
raise RuntimeError('Total number of ranks is not a multiple of '
'ranks_per_partition.')
# split the communicator into partitions
self.cpp_mpi_conf.splitPartitions(ranks_per_partition)
@property
def num_ranks(self):
"""int: The number of ranks in this partition.
When initialized with ``ranks_per_partition=None``, `num_ranks` is equal
to the ``num_launch_ranks`` set by the MPI launcher. When using
partitions, `num_ranks` is equal to ``ranks_per_partition``.
Note:
Returns 1 in builds with ENABLE_MPI=off.
"""
if hoomd.version.mpi_enabled:
return self.cpp_mpi_conf.getNRanks()
else:
return 1
@property
def rank(self):
"""int: The current rank within the partition.
Note:
Returns 0 in builds with ENABLE_MPI=off.
"""
if hoomd.version.mpi_enabled:
return self.cpp_mpi_conf.getRank()
else:
return 0
@property
def num_partitions(self):
"""int: The number of partitions in this execution.
Create partitions with the ``ranks_per_partition`` argument on
initialization. Then, the number of partitions is
``num_launch_ranks / ranks_per_partition``.
Note:
Returns 1 in builds with ENABLE_MPI=off.
"""
if hoomd.version.mpi_enabled:
return self.cpp_mpi_conf.getNPartitions()
else:
return 1
@property
def partition(self):
"""int: The current partition.
Note:
Returns 0 in builds with ENABLE_MPI=off.
"""
if hoomd.version.mpi_enabled:
return self.cpp_mpi_conf.getPartition()
else:
return 0
def barrier_all(self):
"""Perform a MPI barrier synchronization across all ranks.
Note:
Does nothing in builds with ENABLE_MPI=off.
"""
if hoomd.version.mpi_enabled:
_hoomd.mpi_barrier_world()
def barrier(self):
"""Perform a barrier synchronization across all ranks in the partition.
Note:
Does nothing in builds with ENABLE_MPI=off.
"""
if hoomd.version.mpi_enabled:
self.cpp_mpi_conf.barrier()
@contextlib.contextmanager
def localize_abort(self):
"""Localize MPI_Abort to this partition.
HOOMD calls ``MPI_Abort`` to tear down all running MPI processes
whenever there is an uncaught exception. By default, this will abort the
entire MPI execution. When using partitions, an uncaught exception on
one partition will therefore abort all of them.
Use the return value of :py:meth:`localize_abort()` as a context manager
to tell HOOMD that all operations within the context will use only
that MPI communicator so that an uncaught exception in one partition
will only abort that partition and leave the others running.
"""
global _current_communicator
prev = _current_communicator
_current_communicator = self
yield None
_current_communicator = prev
# store the "current" communicator to be used for MPI_Abort calls. This defaults
# to the world communicator, but users can opt in to a more specific
# communicator using the Device.localize_abort context manager
_current_communicator = Communicator()
| [((51, 32, 51, 57), 'hoomd._hoomd.MPIConfiguration', '_hoomd.MPIConfiguration', ({}, {}), '()', False, 'from hoomd import _hoomd\n'), ((153, 12, 153, 38), 'hoomd._hoomd.mpi_barrier_world', '_hoomd.mpi_barrier_world', ({}, {}), '()', False, 'from hoomd import _hoomd\n'), ((74, 20, 74, 77), 'hoomd._hoomd.MPIConfiguration._make_mpi_conf_mpi_comm', '_hoomd.MPIConfiguration._make_mpi_conf_mpi_comm', ({(74, 68, 74, 76): 'mpi_comm'}, {}), '(mpi_comm)', False, 'from hoomd import _hoomd\n'), ((62, 27, 62, 58), 'mpi4py.MPI._addressof', 'mpi4py.MPI._addressof', ({(62, 49, 62, 57): 'mpi_comm'}, {}), '(mpi_comm)', False, 'import mpi4py\n'), ((64, 24, 64, 77), 'hoomd._hoomd.MPIConfiguration._make_mpi_conf_mpi_comm', '_hoomd.MPIConfiguration._make_mpi_conf_mpi_comm', ({(64, 72, 64, 76): 'addr'}, {}), '(addr)', False, 'from hoomd import _hoomd\n')] |
dominc8/affinity-propagation | src/affinity-propagation/generate_data.py | b91b18b52eb68a7eafaadf0ceac39fe10955dcf2 | from config import DataGeneratorCfg
from sklearn.datasets.samples_generator import make_blobs
import numpy as np
def generate():
data, true_labels = make_blobs(n_samples=DataGeneratorCfg.n_samples, centers=DataGeneratorCfg.centers, cluster_std=DataGeneratorCfg.cluster_std, random_state=DataGeneratorCfg.random_state)
print("Generating new data!")
np.savetxt("data/data.txt", data)
np.savetxt("data/true_labels.txt", true_labels)
return data
| [((6, 24, 6, 192), 'sklearn.datasets.samples_generator.make_blobs', 'make_blobs', (), '', False, 'from sklearn.datasets.samples_generator import make_blobs\n'), ((8, 4, 8, 37), 'numpy.savetxt', 'np.savetxt', ({(8, 15, 8, 30): '"""data/data.txt"""', (8, 32, 8, 36): 'data'}, {}), "('data/data.txt', data)", True, 'import numpy as np\n'), ((9, 4, 9, 51), 'numpy.savetxt', 'np.savetxt', ({(9, 15, 9, 37): '"""data/true_labels.txt"""', (9, 39, 9, 50): 'true_labels'}, {}), "('data/true_labels.txt', true_labels)", True, 'import numpy as np\n')] |
roch1990/peon | peon/tests/test_project/test_file/test_function_def/test_functions/test_reflection_at_line.py | 0e9e40956c05138c0820fe380b354fdd1fe95e01 | import _ast
from peon.src.project.file.function_def.function import FunctionLint
class ReflectionAtLineFixture:
empty_node = _ast.Pass
is_instance_at_first_lvl = _ast.FunctionDef(id='isinstance', lineno=1)
type_at_first_lvl = _ast.FunctionDef(id='type', lineno=1)
is_instance_at_second_lvl = _ast.FunctionDef(body=[_ast.Expr(id='isinstance', lineno=2)], lineno=1)
type_at_second_lvl = _ast.FunctionDef(body=[_ast.Expr(id='type', lineno=2)], lineno=1)
def test_empty_node():
assert FunctionLint(
definition=ReflectionAtLineFixture.empty_node,
).reflection_at_line() == tuple()
def test_is_instance_at_first_lvl():
assert FunctionLint(
definition=ReflectionAtLineFixture.is_instance_at_first_lvl,
).reflection_at_line() == (1,)
def test_type_at_first_lvl():
assert FunctionLint(
definition=ReflectionAtLineFixture.type_at_first_lvl,
).reflection_at_line() == (1,)
def test_is_instance_at_second_lvl():
assert FunctionLint(
definition=ReflectionAtLineFixture.is_instance_at_second_lvl,
).reflection_at_line() == (2,)
def test_type_at_second_lvl():
assert FunctionLint(
definition=ReflectionAtLineFixture.type_at_second_lvl,
).reflection_at_line() == (2,)
| [((8, 31, 8, 74), '_ast.FunctionDef', '_ast.FunctionDef', (), '', False, 'import _ast\n'), ((9, 24, 9, 61), '_ast.FunctionDef', '_ast.FunctionDef', (), '', False, 'import _ast\n'), ((10, 55, 10, 91), '_ast.Expr', '_ast.Expr', (), '', False, 'import _ast\n'), ((11, 48, 11, 78), '_ast.Expr', '_ast.Expr', (), '', False, 'import _ast\n'), ((15, 11, 17, 5), 'peon.src.project.file.function_def.function.FunctionLint', 'FunctionLint', (), '', False, 'from peon.src.project.file.function_def.function import FunctionLint\n'), ((21, 11, 23, 5), 'peon.src.project.file.function_def.function.FunctionLint', 'FunctionLint', (), '', False, 'from peon.src.project.file.function_def.function import FunctionLint\n'), ((27, 11, 29, 5), 'peon.src.project.file.function_def.function.FunctionLint', 'FunctionLint', (), '', False, 'from peon.src.project.file.function_def.function import FunctionLint\n'), ((33, 11, 35, 5), 'peon.src.project.file.function_def.function.FunctionLint', 'FunctionLint', (), '', False, 'from peon.src.project.file.function_def.function import FunctionLint\n'), ((39, 11, 41, 5), 'peon.src.project.file.function_def.function.FunctionLint', 'FunctionLint', (), '', False, 'from peon.src.project.file.function_def.function import FunctionLint\n')] |
Nama/A.T.S.P.-Website | db2_funcs.py | 658db78da1b12c01ef9ead2dc44d1ecd97b178d8 | ###############################################################################
# #
'''Website Database-connection-related features''' #
# #
###############################################################################
import cymysql
from conf import website_db
from time import gmtime
from time import strftime
db_host = website_db.ip
db_port = website_db.port
db = website_db.db
db_user = website_db.user
db_pw = website_db.pw
###############################################################################
# #
'''Databse-connect and close''' #
# #
###############################################################################
def db_con():
conn = cymysql.connect(host=db_host, port=db_port, user=db_user, passwd=db_pw, db=db)
cur = conn.cursor()
return conn, cur
def db_close(conn, cur):
cur.close()
conn.close()
###############################################################################
# #
'''Donation-Page data''' #
# #
###############################################################################
def donate_save(nick):
conn, cur = db_con()
time = strftime('%Y.%m.%d - %H:%M:%S', gmtime())
cur.execute('INSERT INTO `donate` (`time`, `user`) VALUES (%s, %s)', (time, nick))
conn.commit()
db_close(conn, cur)
def donate_read():
conn, cur = db_con()
cur.execute('SELECT * FROM `donate` ORDER BY `time` DESC LIMIT 20')
nicks = list()
for r in cur.fetchall():
nicks.append([r[0], r[1]])
db_close(conn, cur)
return nicks
###############################################################################
# #
'''Short-URL data''' #
# #
###############################################################################
def shorturl_save(surl, url):
conn, cur = db_con()
cur.execute('INSERT INTO `shorturls` (`surl`, `url`) VALUES (%s, %s)', (surl, url))
conn.commit()
db_close(conn, cur)
def shorturl_read():
conn, cur = db_con()
cur.execute('SELECT * FROM `shorturls`')
urls = list()
for r in cur.fetchall():
urls.append([r[0], r[0], r[1]])
db_close(conn, cur)
return urls
###############################################################################
# #
'''Old Worlds''' #
# #
###############################################################################
def get_old_worlds(item):
conn, cur = db_con()
sql = 'SELECT * FROM `oldworlds` ORDER BY `date` DESC LIMIT {0}, {1}'.format(item, 20)
cur.execute(sql)
worlds = cur.fetchall()
db_close(conn, cur)
return worlds
###############################################################################
# #
'''Server Backup-Size in Dash''' #
# #
###############################################################################
def backup_size():
conn, cur = db_con()
dbtshock = []
tserver = []
htdocs = []
cur.execute('SELECT * FROM `backups`')
for r in cur.fetchall():
if r[1] == 'db':
dbtshock.append([r[0] * 1000, r[2]])
elif r[1] == 'tserver':
tserver.append([r[0] * 1000, r[2]])
elif r[1] == 'htdocs':
htdocs.append([r[0] * 1000, r[2]])
db_close(conn, cur)
return (dbtshock, tserver, htdocs)
| [((29, 11, 29, 89), 'cymysql.connect', 'cymysql.connect', (), '', False, 'import cymysql\n'), ((48, 43, 48, 51), 'time.gmtime', 'gmtime', ({}, {}), '()', False, 'from time import gmtime\n')] |
rgschmitz1/tcss702 | nlp/handler.py | b0fdd7b6107401dc297b467c9e63773dfb8fd487 | from minio import Minio
import json
import os
from .Inspector import Inspector
from .topic_model import topic_model
#def handle(event):
def handle(event, context):
with open("/var/openfaas/secrets/minio-access-key") as f:
access_key = f.read()
with open("/var/openfaas/secrets/minio-secret-key") as f:
secret_key = f.read()
mc = Minio(os.environ['minio_hostname'],
access_key=access_key,
secret_key=secret_key,
secure=False)
tm = topic_model(mc)
# Collect data
inspector = Inspector()
inspector.inspectAll()
# Add custom message and finish the function
# if "startWallClock" in event:
# inspector.addAttribute("startWallClock", event['startWallClock'])
body = json.loads(event.body)
print(body['fn'], flush=True)
fn = {"p": tm.preprocess,
"t": tm.train,
"q": tm.query}
fn[body['fn']]()
inspector.inspectAllDeltas()
# Include functionName
inspector.addAttribute("functionName", fn[body['fn']].__name__)
iret = inspector.finish()
ret = {
"status": 200,
"body": iret
}
return ret
| [((14, 9, 17, 28), 'minio.Minio', 'Minio', (), '', False, 'from minio import Minio\n'), ((28, 11, 28, 33), 'json.loads', 'json.loads', ({(28, 22, 28, 32): 'event.body'}, {}), '(event.body)', False, 'import json\n')] |
jmangs/prometheus-pve-exporter | src/pve_exporter/cli.py | 2947a1247d854791114eb5ed348a250739540708 | """
Proxmox VE exporter for the Prometheus monitoring system.
"""
import sys
from argparse import ArgumentParser
from pve_exporter.http import start_http_server
def main(args=None):
"""
Main entry point.
"""
parser = ArgumentParser()
parser.add_argument('config', nargs='?', default='pve.yml',
help='Path to configuration file (pve.yml)')
parser.add_argument('port', nargs='?', type=int, default='9221',
help='Port on which the exporter is listening (9221)')
parser.add_argument('address', nargs='?', default='',
help='Address to which the exporter will bind')
params = parser.parse_args(args if args is None else sys.argv[1:])
start_http_server(params.config, params.port, params.address)
| [((14, 13, 14, 29), 'argparse.ArgumentParser', 'ArgumentParser', ({}, {}), '()', False, 'from argparse import ArgumentParser\n'), ((24, 4, 24, 65), 'pve_exporter.http.start_http_server', 'start_http_server', ({(24, 22, 24, 35): 'params.config', (24, 37, 24, 48): 'params.port', (24, 50, 24, 64): 'params.address'}, {}), '(params.config, params.port, params.address)', False, 'from pve_exporter.http import start_http_server\n')] |
vinodkahuja/augur | workers/repo_info_worker/repo_info_worker.py | a7688af262c2f971767962d4a20110daf4b1179a | #SPDX-License-Identifier: MIT
import logging, os, sys, time, requests, json
from datetime import datetime
from multiprocessing import Process, Queue
import pandas as pd
import sqlalchemy as s
from workers.worker_base import Worker
# NOTE: This worker primarily inserts rows into the REPO_INFO table, which serves the primary purposes of
# 1. Displaying discrete metadata like "number of forks" and how they change over time
# 2. Validating other workers, like those related to pull requests, issues, and commits. Our totals should be at or very near the totals in the repo_info table.
# This table also updates the REPO table in 2 cases:
# 1. Recognizing when a repository is a forked repository by updating the "forked_from" field and
# 2. Recognizing when a repository is archived, and recording the data we observed the change in status.
class RepoInfoWorker(Worker):
def __init__(self, config={}):
worker_type = "repo_info_worker"
# Define what this worker can be given and know how to interpret
given = [['github_url']]
models = ['repo_info']
# Define the tables needed to insert, update, or delete on
data_tables = ['repo_info', 'repo']
operations_tables = ['worker_history', 'worker_job']
# Run the general worker initialization
super().__init__(worker_type, config, given, models, data_tables, operations_tables)
# Define data collection info
self.tool_source = 'Repo Info Worker'
self.tool_version = '1.0.0'
self.data_source = 'GitHub API'
def repo_info_model(self, task, repo_id):
github_url = task['given']['github_url']
self.logger.info("Beginning filling the repo_info model for repo: " + github_url + "\n")
owner, repo = self.get_owner_repo(github_url)
url = 'https://api.github.com/graphql'
query = """
{
repository(owner:"%s", name:"%s"){
updatedAt
hasIssuesEnabled
issues(states:OPEN) {
totalCount
}
hasWikiEnabled
forkCount
defaultBranchRef {
name
}
watchers {
totalCount
}
id
licenseInfo {
name
url
}
stargazers {
totalCount
}
codeOfConduct {
name
url
}
issue_count: issues {
totalCount
}
issues_closed: issues(states:CLOSED) {
totalCount
}
pr_count: pullRequests {
totalCount
}
pr_open: pullRequests(states: OPEN) {
totalCount
}
pr_closed: pullRequests(states: CLOSED) {
totalCount
}
pr_merged: pullRequests(states: MERGED) {
totalCount
}
ref(qualifiedName: "master") {
target {
... on Commit {
history(first: 0){
totalCount
}
}
}
}
}
}
""" % (owner, repo)
# Hit the graphql endpoint and retry 3 times in case of failure
num_attempts = 0
success = False
data = None
while num_attempts < 3:
self.logger.info("Hitting endpoint: {} ...\n".format(url))
r = requests.post(url, json={'query': query}, headers=self.headers)
self.update_gh_rate_limit(r)
try:
data = r.json()
except:
data = json.loads(json.dumps(r.text))
if 'errors' in data:
self.logger.info("Error!: {}".format(data['errors']))
if data['errors'][0]['message'] == 'API rate limit exceeded':
self.update_gh_rate_limit(r)
continue
if 'data' in data:
success = True
data = data['data']['repository']
break
else:
self.logger.info("Request returned a non-data dict: {}\n".format(data))
if data['message'] == 'Not Found':
self.logger.info("Github repo was not found or does not exist for endpoint: {}\n".format(url))
break
if data['message'] == 'You have triggered an abuse detection mechanism. Please wait a few minutes before you try again.':
self.update_gh_rate_limit(r, temporarily_disable=True)
continue
if data['message'] == 'Bad credentials':
self.update_gh_rate_limit(r, bad_credentials=True)
continue
num_attempts += 1
if not success:
self.logger.error('Cannot hit endpoint after 3 attempts. \"Completing\" task.\n')
self.register_task_completion(self.task, repo_id, 'repo_info')
return
# Just checking that the data is accessible (would not be if repo no longer exists)
try:
data['updatedAt']
except Exception as e:
self.logger.error('Cannot access repo_info data: {}\nError: {}. \"Completing\" task.'.format(data, e))
self.register_task_completion(self.task, repo_id, 'repo_info')
return
# Get committers count info that requires seperate endpoint
committers_count = self.query_committers_count(owner, repo)
# Put all data together in format of the table
self.logger.info(f'Inserting repo info for repo with id:{repo_id}, owner:{owner}, name:{repo}\n')
rep_inf = {
'repo_id': repo_id,
'last_updated': data['updatedAt'] if 'updatedAt' in data else None,
'issues_enabled': data['hasIssuesEnabled'] if 'hasIssuesEnabled' in data else None,
'open_issues': data['issues']['totalCount'] if data['issues'] else None,
'pull_requests_enabled': None,
'wiki_enabled': data['hasWikiEnabled'] if 'hasWikiEnabled' in data else None,
'pages_enabled': None,
'fork_count': data['forkCount'] if 'forkCount' in data else None,
'default_branch': data['defaultBranchRef']['name'] if data['defaultBranchRef'] else None,
'watchers_count': data['watchers']['totalCount'] if data['watchers'] else None,
'UUID': None,
'license': data['licenseInfo']['name'] if data['licenseInfo'] else None,
'stars_count': data['stargazers']['totalCount'] if data['stargazers'] else None,
'committers_count': committers_count,
'issue_contributors_count': None,
'changelog_file': None,
'contributing_file': None,
'license_file': data['licenseInfo']['url'] if data['licenseInfo'] else None,
'code_of_conduct_file': data['codeOfConduct']['url'] if data['codeOfConduct'] else None,
'security_issue_file': None,
'security_audit_file': None,
'status': None,
'keywords': None,
'commit_count': data['ref']['target']['history']['totalCount'] if data['ref'] else None,
'issues_count': data['issue_count']['totalCount'] if data['issue_count'] else None,
'issues_closed': data['issues_closed']['totalCount'] if data['issues_closed'] else None,
'pull_request_count': data['pr_count']['totalCount'] if data['pr_count'] else None,
'pull_requests_open': data['pr_open']['totalCount'] if data['pr_open'] else None,
'pull_requests_closed': data['pr_closed']['totalCount'] if data['pr_closed'] else None,
'pull_requests_merged': data['pr_merged']['totalCount'] if data['pr_merged'] else None,
'tool_source': self.tool_source,
'tool_version': self.tool_version,
'data_source': self.data_source
}
result = self.db.execute(self.repo_info_table.insert().values(rep_inf))
self.logger.info(f"Primary Key inserted into repo_info table: {result.inserted_primary_key}\n")
self.results_counter += 1
# Note that the addition of information about where a repository may be forked from, and whether a repository is archived, updates the `repo` table, not the `repo_info` table.
forked = self.is_forked(owner, repo)
archived = self.is_archived(owner, repo)
archived_date_collected = None
if archived is not False:
archived_date_collected = archived
archived = 1
else:
archived = 0
rep_additional_data = {
'forked_from': forked,
'repo_archived': archived,
'repo_archived_date_collected': archived_date_collected
}
result = self.db.execute(self.repo_table.update().where(
self.repo_table.c.repo_id==repo_id).values(rep_additional_data))
self.logger.info(f"Inserted info for {owner}/{repo}\n")
# Register this task as completed
self.register_task_completion(self.task, repo_id, "repo_info")
def query_committers_count(self, owner, repo):
self.logger.info('Querying committers count\n')
url = f'https://api.github.com/repos/{owner}/{repo}/contributors?per_page=100'
committers = 0
try:
while True:
r = requests.get(url, headers=self.headers)
self.update_gh_rate_limit(r)
committers += len(r.json())
if 'next' not in r.links:
break
else:
url = r.links['next']['url']
except Exception:
self.logger.exception('An error occured while querying contributor count\n')
return committers
def is_forked(self, owner, repo): #/repos/:owner/:repo parent
self.logger.info('Querying parent info to verify if the repo is forked\n')
url = f'https://api.github.com/repos/{owner}/{repo}'
r = requests.get(url, headers=self.headers)
self.update_gh_rate_limit(r)
data = self.get_repo_data(url, r)
if 'fork' in data:
if 'parent' in data:
return data['parent']['full_name']
return 'Parent not available'
return False
def is_archived(self, owner, repo):
self.logger.info('Querying committers count\n')
url = f'https://api.github.com/repos/{owner}/{repo}'
r = requests.get(url, headers=self.headers)
self.update_gh_rate_limit(r)
data = self.get_repo_data(url, r)
if 'archived' in data:
if data['archived']:
if 'updated_at' in data:
return data['updated_at']
return 'Date not available'
return False
return False
def get_repo_data(self, url, response):
success = False
try:
data = response.json()
except:
data = json.loads(json.dumps(response.text))
if 'errors' in data:
self.logger.info("Error!: {}".format(data['errors']))
if data['errors'][0]['message'] == 'API rate limit exceeded':
self.update_gh_rate_limit(response)
if 'id' in data:
success = True
else:
self.logger.info("Request returned a non-data dict: {}\n".format(data))
if data['message'] == 'Not Found':
self.logger.info("Github repo was not found or does not exist for endpoint: {}\n".format(url))
if data['message'] == 'You have triggered an abuse detection mechanism. Please wait a few minutes before you try again.':
self.update_gh_rate_limit(r, temporarily_disable=True)
if data['message'] == 'Bad credentials':
self.update_gh_rate_limit(r, bad_credentials=True)
if not success:
self.register_task_failure(self.task, repo_id, "Failed to hit endpoint: {}".format(url))
return data
| [((248, 12, 248, 51), 'requests.get', 'requests.get', (), '', False, 'import logging, os, sys, time, requests, json\n'), ((264, 12, 264, 51), 'requests.get', 'requests.get', (), '', False, 'import logging, os, sys, time, requests, json\n'), ((113, 16, 113, 79), 'requests.post', 'requests.post', (), '', False, 'import logging, os, sys, time, requests, json\n'), ((231, 20, 231, 59), 'requests.get', 'requests.get', (), '', False, 'import logging, os, sys, time, requests, json\n'), ((283, 30, 283, 55), 'json.dumps', 'json.dumps', ({(283, 41, 283, 54): 'response.text'}, {}), '(response.text)', False, 'import logging, os, sys, time, requests, json\n'), ((119, 34, 119, 52), 'json.dumps', 'json.dumps', ({(119, 45, 119, 51): 'r.text'}, {}), '(r.text)', False, 'import logging, os, sys, time, requests, json\n')] |
victor-estrade/SystGradDescent | benchmark/my_argparser.py | 822e7094290301ec47a99433381a8d6406798aff | # coding: utf-8
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
from __future__ import unicode_literals
import argparse
def parse_args_tolerance():
parser = argparse.ArgumentParser(description='just for tolerance')
parser.add_argument("--tolerance", type=float,
default=0.1, help="tolerance value for Minuit migrad and simplex minimization")
args, _ = parser.parse_known_args()
return args.tolerance
def GB_parse_args(main_description="Training launcher"):
parser = argparse.ArgumentParser(description=main_description)
parser.add_argument("--verbose", "-v", type=int, choices=[0, 1, 2],
default=0, help="increase output verbosity")
parser.add_argument("--start-cv", type=int,
default=0, help="start of i_cv for range(start, end)")
parser.add_argument("--end-cv", type=int,
default=30, help="end of i_cv for range(start, end)")
parser.add_argument("--tolerance", type=float,
default=0.1, help="tolerance value for Minuit migrad and simplex minimization")
parser.add_argument('--load-run', help='load saved runs. Do not run the models',
action='store_true')
parser.add_argument('--estimate-only', help='Turns off conditional estimation for V_stat and V_syst',
action='store_true')
parser.add_argument('--conditional-only', help='Turns off common estimation',
action='store_true')
# MODEL HYPER PARAMETERS
parser.add_argument('--n-estimators', help='number of estimators',
default=100, type=int)
parser.add_argument('--max-depth', help='maximum depth of trees',
default=3, type=int)
parser.add_argument('--learning-rate', '--lr', help='learning rate',
default=1e-1, type=float)
# OTHER
parser.add_argument('--no-cuda', '--no-gpu', help='flag to use or not the gpu',
action='store_false', dest='cuda')
parser.add_argument('--retrain', help='flag to force retraining',
action='store_true')
parser.add_argument('--skip-minuit', help='flag to skip minuit NLL minization',
action='store_true')
args = parser.parse_args()
return args
def REG_parse_args(main_description="Training launcher"):
parser = argparse.ArgumentParser(description=main_description)
parser.add_argument("--verbose", "-v", type=int, choices=[0, 1, 2],
default=0, help="increase output verbosity")
parser.add_argument("--start-cv", type=int,
default=0, help="start of i_cv for range(start, end)")
parser.add_argument("--end-cv", type=int,
default=30, help="end of i_cv for range(start, end)")
parser.add_argument("--tolerance", type=float,
default=0.1, help="tolerance value for Minuit migrad and simplex minimization")
parser.add_argument('--load-run', help='load saved runs. Do not run the models',
action='store_true')
parser.add_argument('--estimate-only', help='Turns off conditional estimation for V_stat and V_syst',
action='store_true')
parser.add_argument('--conditional-only', help='Turns off common estimation',
action='store_true')
# MODEL HYPER PARAMETERS
parser.add_argument('--learning-rate', '--lr', help='learning rate',
default=1e-4, type=float)
parser.add_argument('--beta1', help='beta 1 for Adam',
default=0.5, type=float)
parser.add_argument('--beta2', help='beta 2 for Adam',
default=0.9, type=float)
parser.add_argument('--weight-decay', help='weight decay for SGD',
default=0.0, type=float)
parser.add_argument('--optimizer', help='optimizer name', dest='optimizer_name',
default='Adam', type=str, choices=('Adam', 'SGD', 'ADAM', 'sgd', 'adam'))
parser.add_argument('--n-unit', help='Number of units in layers. Controls NN width.',
default=200, type=int)
parser.add_argument('--sample-size', help='data sample size',
default=1000, type=int)
parser.add_argument('--batch-size', help='mini-batch size',
default=20, type=int)
parser.add_argument('--n-steps', help='number of update steps',
default=1000, type=int)
# OTHER
parser.add_argument('--no-cuda', '--no-gpu', help='flag to use or not the gpu',
action='store_false', dest='cuda')
parser.add_argument('--retrain', help='flag to force retraining',
action='store_true')
args = parser.parse_args()
return args
def INFERNO_parse_args(main_description="Training launcher"):
parser = argparse.ArgumentParser(description=main_description)
parser.add_argument("--verbose", "-v", type=int, choices=[0, 1, 2],
default=0, help="increase output verbosity")
parser.add_argument("--start-cv", type=int,
default=0, help="start of i_cv for range(start, end)")
parser.add_argument("--end-cv", type=int,
default=30, help="end of i_cv for range(start, end)")
parser.add_argument("--tolerance", type=float,
default=0.1, help="tolerance value for Minuit migrad and simplex minimization")
parser.add_argument('--load-run', help='load saved runs. Do not run the models',
action='store_true')
parser.add_argument('--estimate-only', help='Turns off conditional estimation for V_stat and V_syst',
action='store_true')
parser.add_argument('--conditional-only', help='Turns off common estimation',
action='store_true')
# MODEL HYPER PARAMETERS
parser.add_argument('--learning-rate', '--lr', help='learning rate',
default=1e-3, type=float)
parser.add_argument('--temperature', help='control initial softmax steepness',
default=1.0, type=float)
parser.add_argument('--beta1', help='beta 1 for Adam',
default=0.5, type=float)
parser.add_argument('--beta2', help='beta 2 for Adam',
default=0.9, type=float)
parser.add_argument('--weight-decay', help='weight decay for SGD',
default=0.0, type=float)
parser.add_argument('--optimizer', help='optimizer name', dest='optimizer_name',
default='Adam', type=str, choices=('Adam', 'SGD', 'ADAM', 'sgd', 'adam'))
parser.add_argument('--n-unit', help='Number of units in layers. Controls NN width.',
default=200, type=int)
parser.add_argument('--n-bins', help='number of output bins',
default=10, type=int)
parser.add_argument('--sample-size', help='data sample size',
default=1000, type=int)
parser.add_argument('--batch-size', help='mini-batch size',
default=20, type=int)
parser.add_argument('--n-steps', help='number of update steps',
default=1000, type=int)
# OTHER
parser.add_argument('--no-cuda', '--no-gpu', help='flag to use or not the gpu',
action='store_false', dest='cuda')
parser.add_argument('--retrain', help='flag to force retraining',
action='store_true')
args = parser.parse_args()
return args
def NET_parse_args(main_description="Training launcher"):
parser = argparse.ArgumentParser(description=main_description)
parser.add_argument("--verbose", "-v", type=int, choices=[0, 1, 2],
default=0, help="increase output verbosity")
parser.add_argument("--start-cv", type=int,
default=0, help="start of i_cv for range(start, end)")
parser.add_argument("--end-cv", type=int,
default=30, help="end of i_cv for range(start, end)")
parser.add_argument("--tolerance", type=float,
default=0.1, help="tolerance value for Minuit migrad and simplex minimization")
parser.add_argument('--load-run', help='load saved runs. Do not run the models',
action='store_true')
parser.add_argument('--estimate-only', help='Turns off conditional estimation for V_stat and V_syst',
action='store_true')
parser.add_argument('--conditional-only', help='Turns off common estimation',
action='store_true')
# MODEL HYPER PARAMETERS
parser.add_argument('--learning-rate', '--lr', help='learning rate',
default=1e-3, type=float)
parser.add_argument('--beta1', help='beta 1 for Adam',
default=0.9, type=float)
parser.add_argument('--beta2', help='beta 2 for Adam',
default=0.999, type=float)
parser.add_argument('--weight-decay', help='weight decay for SGD',
default=0.0, type=float)
parser.add_argument('--optimizer', help='optimizer name', dest='optimizer_name',
default='Adam', type=str, choices=('Adam', 'SGD', 'ADAM', 'sgd', 'adam'))
parser.add_argument('--n-unit', help='Number of units in layers. Controls NN width.',
default=200, type=int)
parser.add_argument('--sample-size', help='data sample size',
default=1000, type=int)
parser.add_argument('--batch-size', help='mini-batch size',
default=1000, type=int)
parser.add_argument('--n-steps', help='number of update steps',
default=1000, type=int)
# OTHER
parser.add_argument('--no-cuda', '--no-gpu', help='flag to use or not the gpu',
action='store_false', dest='cuda')
parser.add_argument('--retrain', help='flag to force retraining',
action='store_true')
args = parser.parse_args()
return args
def TP_parse_args(main_description="Training launcher"):
parser = argparse.ArgumentParser(description=main_description)
parser.add_argument("--verbose", "-v", type=int, choices=[0, 1, 2],
default=0, help="increase output verbosity")
parser.add_argument("--start-cv", type=int,
default=0, help="start of i_cv for range(start, end)")
parser.add_argument("--end-cv", type=int,
default=30, help="end of i_cv for range(start, end)")
parser.add_argument("--tolerance", type=float,
default=0.1, help="tolerance value for Minuit migrad and simplex minimization")
parser.add_argument('--load-run', help='load saved runs. Do not run the models',
action='store_true')
parser.add_argument('--estimate-only', help='Turns off conditional estimation for V_stat and V_syst',
action='store_true')
parser.add_argument('--conditional-only', help='Turns off common estimation',
action='store_true')
# MODEL HYPER PARAMETERS
parser.add_argument('--learning-rate', '--lr', help='learning rate',
default=1e-3, type=float)
parser.add_argument('--trade-off', help='trade-off between classic loss and adversarial loss',
default=1.0, type=float)
parser.add_argument('--beta1', help='beta 1 for Adam',
default=0.9, type=float)
parser.add_argument('--beta2', help='beta 2 for Adam',
default=0.999, type=float)
parser.add_argument('--weight-decay', help='weight decay for SGD',
default=0.0, type=float)
parser.add_argument('--optimizer', help='optimizer name', dest='optimizer_name',
default='Adam', type=str, choices=('Adam', 'SGD', 'ADAM', 'sgd', 'adam'))
parser.add_argument('--n-unit', help='Number of units in layers. Controls NN width.',
default=200, type=int)
parser.add_argument('--sample-size', help='data sample size',
default=1000, type=int)
parser.add_argument('--batch-size', help='mini-batch size',
default=1000, type=int)
parser.add_argument('--n-steps', help='number of update steps',
default=1000, type=int)
# OTHER
parser.add_argument('--no-cuda', '--no-gpu', help='flag to use or not the gpu',
action='store_false', dest='cuda')
parser.add_argument('--retrain', help='flag to force retraining',
action='store_true')
args = parser.parse_args()
return args
def PIVOT_parse_args(main_description="Training launcher"):
parser = argparse.ArgumentParser(description=main_description)
parser.add_argument("--verbose", "-v", type=int, choices=[0, 1, 2],
default=0, help="increase output verbosity")
parser.add_argument("--start-cv", type=int,
default=0, help="start of i_cv for range(start, end)")
parser.add_argument("--end-cv", type=int,
default=30, help="end of i_cv for range(start, end)")
parser.add_argument("--tolerance", type=float,
default=0.1, help="tolerance value for Minuit migrad and simplex minimization")
parser.add_argument('--load-run', help='load saved runs. Do not run the models',
action='store_true')
parser.add_argument('--estimate-only', help='Turns off conditional estimation for V_stat and V_syst',
action='store_true')
parser.add_argument('--conditional-only', help='Turns off common estimation',
action='store_true')
# MODEL HYPER PARAMETERS
parser.add_argument('--learning-rate', '--lr', help='learning rate',
default=1e-3, type=float)
parser.add_argument('--trade-off', help='trade-off between classic loss and adversarial loss',
default=1.0, type=float)
parser.add_argument('--beta1', help='beta 1 for Adam',
default=0.9, type=float)
parser.add_argument('--beta2', help='beta 2 for Adam',
default=0.999, type=float)
parser.add_argument('--weight-decay', help='weight decay for SGD',
default=0.0, type=float)
parser.add_argument('--optimizer', help='optimizer name', dest='optimizer_name',
default='Adam', type=str, choices=('Adam', 'SGD', 'ADAM', 'sgd', 'adam'))
parser.add_argument('--n-unit', help='Number of units in layers. Controls NN width.',
default=200, type=int)
parser.add_argument('--sample-size', help='data sample size',
default=1000, type=int)
parser.add_argument('--batch-size', help='mini-batch size',
default=1000, type=int)
parser.add_argument('--n-steps', help='number of update steps',
default=1000, type=int)
parser.add_argument('--n-net-pre-training-steps', help='number of update steps for pretraining the classifier',
default=1000, type=int)
parser.add_argument('--n-adv-pre-training-steps', help='number of update steps for pretraining the adversarial',
default=1000, type=int)
parser.add_argument('--n-recovery-steps', help='number of update steps for adversarial recovery',
default=1, type=int)
# OTHER
parser.add_argument('--no-cuda', '--no-gpu', help='flag to use or not the gpu',
action='store_false', dest='cuda')
parser.add_argument('--retrain', help='flag to force retraining',
action='store_true')
args = parser.parse_args()
return args
def FF_parse_args(main_description="Training launcher"):
parser = argparse.ArgumentParser(description=main_description)
parser.add_argument("--verbose", "-v", type=int, choices=[0, 1, 2],
default=0, help="increase output verbosity")
parser.add_argument("--start-cv", type=int,
default=0, help="start of i_cv for range(start, end)")
parser.add_argument("--end-cv", type=int,
default=30, help="end of i_cv for range(start, end)")
parser.add_argument("--tolerance", type=float,
default=0.1, help="tolerance value for Minuit migrad and simplex minimization")
parser.add_argument('--load-run', help='load saved runs. Do not run the models',
action='store_true')
parser.add_argument('--estimate-only', help='Turns off conditional estimation for V_stat and V_syst',
action='store_true')
parser.add_argument('--conditional-only', help='Turns off common estimation',
action='store_true')
# MODEL HYPER PARAMETERS
parser.add_argument('--feature-id', help='feature index to filter on',
default=0, type=int)
# OTHER
parser.add_argument('--no-cuda', '--no-gpu', help='flag to use or not the gpu',
action='store_false', dest='cuda')
parser.add_argument('--retrain', help='flag to force retraining',
action='store_true')
parser.add_argument('--skip-minuit', help='flag to skip minuit NLL minization',
action='store_true')
args = parser.parse_args()
return args
| [((11, 13, 11, 70), 'argparse.ArgumentParser', 'argparse.ArgumentParser', (), '', False, 'import argparse\n'), ((18, 13, 18, 66), 'argparse.ArgumentParser', 'argparse.ArgumentParser', (), '', False, 'import argparse\n'), ((59, 13, 59, 66), 'argparse.ArgumentParser', 'argparse.ArgumentParser', (), '', False, 'import argparse\n'), ((115, 13, 115, 66), 'argparse.ArgumentParser', 'argparse.ArgumentParser', (), '', False, 'import argparse\n'), ((176, 13, 176, 66), 'argparse.ArgumentParser', 'argparse.ArgumentParser', (), '', False, 'import argparse\n'), ((230, 13, 230, 66), 'argparse.ArgumentParser', 'argparse.ArgumentParser', (), '', False, 'import argparse\n'), ((288, 13, 288, 66), 'argparse.ArgumentParser', 'argparse.ArgumentParser', (), '', False, 'import argparse\n'), ((351, 13, 351, 66), 'argparse.ArgumentParser', 'argparse.ArgumentParser', (), '', False, 'import argparse\n')] |
SarthakJariwala/Shockley-Queisser-Calculator | src/main/python/main.py | 5f9cfd4c97b8141e8b4ee8d15fa5f3cccfe25b7e | from fbs_runtime.application_context.PyQt5 import ApplicationContext, cached_property
from fbs_runtime.platform import is_windows, is_mac
# system imports
import sys
# module imports
from PyQt5 import uic, QtWidgets
from PyQt5.QtWidgets import QMessageBox
import numpy as np
import matplotlib.pyplot as plt
import matplotlib
import scipy.constants as constants
from scipy.integrate import simps, quad
from scipy.interpolate import splrep, splint
from scipy.optimize import fmin
class AppContext(ApplicationContext):
def run(self):
self.main_window.show()
return self.app.exec_()
def get_design(self):
qtCreatorFile = self.get_resource("SQ_GUI.ui")
return qtCreatorFile
def get_file(self):
astmg_file = self.get_resource("ASTMG173.csv")
return astmg_file
@cached_property
def main_window(self):
return MainWindow(self.get_design(), self.get_file())
if is_windows():
matplotlib.use('Qt5Agg')
elif is_mac():
matplotlib.use('macosx')
class MainWindow(QtWidgets.QMainWindow):
def __init__(self, uiFile, astmg173_file):
super(MainWindow, self).__init__()
#Create Main Window
self.ui = uic.loadUi(uiFile, self)
#self.ui = WindowTemplate()
#self.ui.setupUi(self)
#Connect PushButtons to Functions etc
self.ui.CalcualteSQ_pushButton.clicked.connect(self.calculate_SQ)
self.ui.load_pushButton.clicked.connect(self.load_SMARTS_spectrum)
self.ui.save_pushButton.clicked.connect(self.save_bandgap_array)
#start app with checked "plot j-v curve"
self.ui.plot_checkBox.setChecked(True)
self.astmg173_file = astmg173_file
self.out_array = None
self.show()
def load_SMARTS_spectrum(self):
filename = QtWidgets.QFileDialog.getOpenFileName(self)
try:
self.SMARTS = np.genfromtxt(filename[0], skip_header=1)
self.ui.load_checkBox.setChecked(False)
except Exception as e:
QMessageBox.information(
self, None,
str(e), QMessageBox.Ok
)
def calculate_SQ(self):
h = constants.physical_constants['Planck constant'][0] # units of J*s
h_ev = constants.physical_constants['Planck constant in eV s'][0]
c_nm = (constants.physical_constants['speed of light in vacuum'][0]) * 1e9
c = (constants.physical_constants['speed of light in vacuum'][0])
e_charge = constants.physical_constants['elementary charge'][0]
kb_ev = constants.physical_constants['Boltzmann constant in eV/K'][0]
"""User settings"""
Tcell = self.ui.temp_spinBox.value() #temperature of solar cell in degrees K
bandgap = self.ui.bandgap_doubleSpinBox.value() #enter bandgap in eV
#self.ui.textBrowser.append(str('Tcell = %.3f' %(Tcell)))
plot_jv = self.ui.plot_checkBox.isChecked() #'True' if you want to plot the SQ JV curve for "bandgap"
plot_bandgap_array = self.ui.calc_SQ_array_checkBox.isChecked() #'True' if you want to plot SQ parameters for an array of bandgaps
# starting from "mbandgap_array_min" to "bandgap_array_max"
# with number of points "num_points_bandgap_array"
# (see below)
#'False' if you just want SQ data for one bandgap (faster)
bandgap_array_min = self.ui.bandgap_min_doubleSpinBox.value() #in eV
bandgap_array_max = self.ui.bandgap_max_doubleSpinBox.value() # in eV
num_points_bandgap_array = self.ui.no_points_spinBox.value()
"""Programming below"""
bandgap_array = np.linspace(bandgap_array_min, bandgap_array_max, num_points_bandgap_array)
#First convert AM1.5 spectrum from W/m^2/nm to W/m^2/ev
if self.ui.load_checkBox.isChecked():
astmg173 = np.loadtxt(self.astmg173_file, delimiter = ',', skiprows = 2)
am15_wav = np.copy(astmg173[:,0]) #AM1.5 wavelength axis in nm
am15 = np.copy(astmg173[:,2]) #AM1.5 in units of W/m^2/nm = J/s*m^2/nm
else:
try:
astmg173 = self.SMARTS
am15_wav = np.copy(astmg173[:,0]) #AM1.5 wavelength axis in nm
am15 = np.copy(astmg173[:,1]) #AM1.5 in units of W/m^2/nm = J/s*m^2/nm
except:
QMessageBox.information(
self, None,
"No valid spectrum file found!\n\n"+
"Load a valid file or check the 'Use ASTMG173'box"
)
return
total_power_nm = simps(am15, x = am15_wav) #Integrate over nm to check that total power density = 1000 W/m^2
am15_ev = h_ev * (c_nm) / (am15_wav )
am15_wats_ev = am15 * (h_ev * c_nm/ ((am15_ev) ** 2.0))
am15_ev_flip = am15_ev[::-1]
am15_wats_ev_flip = am15_wats_ev[::-1]
total_power_ev = simps(am15_wats_ev_flip, x = am15_ev_flip) #Integrate over eV to check that total power density = 1000 W/m^2
am15_photons_ev = am15_wats_ev_flip / (am15_ev_flip * e_charge)
am15_photons_nm = am15 / (am15_ev * e_charge)
total_photonflux_ev = simps(am15_photons_ev, x = am15_ev_flip)
total_photonflux_nm = simps(am15_photons_nm , x = am15_wav)
total_photonflux_ev_splrep = splrep(am15_ev_flip, am15_photons_ev)
emin = am15_ev_flip[0]
emax = am15_ev_flip[len(am15_ev_flip) - 1]
def solar_photons_above_gap(Egap): #units of photons / sec *m^2
return splint(Egap, emax,total_photonflux_ev_splrep)
def RR0(Egap):
integrand = lambda eV : eV ** 2.0 / (np.exp(eV / (kb_ev * Tcell)) - 1)
integral = quad(integrand, Egap, emax, full_output=1)[0]
return ((2.0 * np.pi / ((c ** 2.0) * (h_ev ** 3.0)))) * integral
def current_density(V, Egap): #to get from units of amps / m^2 to mA/ cm^2 ---multiply by 1000 to convert to mA ---- multiply by (0.01 ^2) to convert to cm^2
cur_dens = e_charge * (solar_photons_above_gap(Egap) - RR0(Egap) * np.exp( V / (kb_ev * Tcell)))
return cur_dens * 1000 * (0.01 ** 2.0)
def JSC(Egap):
return current_density(0, Egap)
def VOC(Egap):
return (kb_ev * Tcell) * np.log(solar_photons_above_gap(Egap) / RR0(Egap))
def fmax(func_to_maximize, initial_guess=0):
"""return the x that maximizes func_to_maximize(x)"""
func_to_minimize = lambda x : -func_to_maximize(x)
return fmin(func_to_minimize, initial_guess, disp=False)[0]
def V_mpp_Jmpp_maxpower_maxeff_ff(Egap):
vmpp = fmax(lambda V : V * current_density(V, Egap))
jmpp = current_density(vmpp, Egap)
maxpower = vmpp * jmpp
max_eff = maxpower / (total_power_ev * 1000 * (0.01 ** 2.0))
jsc_return = JSC(Egap)
voc_return = VOC(Egap)
ff = maxpower / (jsc_return * voc_return)
return [vmpp, jmpp, maxpower, max_eff, ff, jsc_return, voc_return]
maxpcemeta = V_mpp_Jmpp_maxpower_maxeff_ff(bandgap)
self.ui.textBrowser.append(str('For Bandgap = %.3f eV, TCell = %.3f K:\nJSC = %.3f mA/cm^2\nVOC = %.3f V\nFF = %.3f\nPCE = %.3f' % (bandgap, Tcell, maxpcemeta[5], maxpcemeta[6],maxpcemeta[4], maxpcemeta[3] * 100)))
if plot_bandgap_array == True:
pce_array = np.empty_like(bandgap_array)
ff_array = np.empty_like(bandgap_array)
voc_array = np.empty_like(bandgap_array)
jsc_array = np.empty_like(bandgap_array)
for i in range(len(bandgap_array)):
metadata = V_mpp_Jmpp_maxpower_maxeff_ff(bandgap_array[i])
pce_array[i] = metadata[3]
ff_array[i] = metadata[4]
voc_array[i] = metadata[6]
jsc_array[i] = metadata[5]
self.out_array = np.array((bandgap_array,pce_array,ff_array, voc_array,jsc_array)).T
plt.figure(figsize=(5,4))
plt.title('Cell Temperature = %.2f K' %(Tcell))
plt.xlim(bandgap_array[0], bandgap_array[len(bandgap_array) - 1])
plt.ylabel('PCE (%)')
plt.xlabel('Bandgap (eV)')
plt.plot(bandgap_array, pce_array * 100)
plt.tight_layout()
plt.show()
plt.figure(figsize=(5,4))
plt.title('Cell Temperature = %.2f K' %(Tcell))
plt.ylim(0, 1)
plt.xlim(bandgap_array[0], bandgap_array[len(bandgap_array) - 1])
plt.ylabel('Fill Factor')
plt.xlabel('Bandgap (eV)')
plt.plot(bandgap_array, ff_array)
plt.tight_layout()
plt.show()
plt.figure(figsize=(5,4))
plt.title('Cell Temperature = %.2f K' %(Tcell))
plt.xlim(bandgap_array[0], bandgap_array[len(bandgap_array) - 1])
plt.ylabel('Jsc (mA/cm$^2$)')
plt.xlabel('Bandgap (eV)')
plt.plot(bandgap_array, jsc_array)
plt.tight_layout()
plt.show()
plt.figure(figsize=(5,4))
plt.title('Cell Temperature = %.2f K' %(Tcell))
plt.xlim(bandgap_array[0], bandgap_array[len(bandgap_array) - 1])
plt.ylabel('Voc (V)')
plt.xlabel('Bandgap (eV)')
plt.plot(bandgap_array, voc_array, label = 'S-Q Voc')
plt.plot(bandgap_array, bandgap_array, '--', label = 'Bandgap')
plt.legend(loc = 'best')
plt.tight_layout()
plt.show()
self.ui.textBrowser.append('--')
else:
self.ui.textBrowser.append('--')
def JV_curve(Egap):
volt_array = np.linspace(0, VOC(Egap), 200)
j_array = np.empty_like(volt_array)
for i in range(len(volt_array)):
j_array[i] = current_density(volt_array[i], Egap)
return [volt_array, j_array]
if plot_jv == True:
jv_meta = JV_curve(bandgap)
v_array = jv_meta[0]
jv_array = jv_meta[1]
plt.figure(figsize=(5,4))
plt.ylabel('Current Density (mA/cm$^2$)')
plt.xlabel('Voltage (V)')
plt.plot(v_array, -jv_array)
plt.title('J-V Curve for '+str(self.ui.bandgap_doubleSpinBox.value())+'eV')
plt.tight_layout()
plt.show()
self.ui.textBrowser.append('--')
else:
self.ui.textBrowser.append('--')
def save_bandgap_array(self):
if self.out_array is None:
self.ui.textBrowser.append("Calculate SQ limit before saving file!")
else:
filename = QtWidgets.QFileDialog.getSaveFileName(self)
np.savetxt(filename[0]+".txt", self.out_array, delimiter='\t', header="Bandgap, PCE, FillFactor, Voc, Jsc")
#def run():
# win = MainWindow()
# QtGui.QApplication.instance().exec_()
# return win
#run()
if __name__ == '__main__':
appctxt = AppContext() # 1. Instantiate ApplicationContext
exit_code = appctxt.run()
sys.exit(exit_code) # 2. Invoke appctxt.app.exec_() | [((33, 3, 33, 15), 'fbs_runtime.platform.is_windows', 'is_windows', ({}, {}), '()', False, 'from fbs_runtime.platform import is_windows, is_mac\n'), ((34, 4, 34, 28), 'matplotlib.use', 'matplotlib.use', ({(34, 19, 34, 27): '"""Qt5Agg"""'}, {}), "('Qt5Agg')", False, 'import matplotlib\n'), ((35, 5, 35, 13), 'fbs_runtime.platform.is_mac', 'is_mac', ({}, {}), '()', False, 'from fbs_runtime.platform import is_windows, is_mac\n'), ((297, 4, 297, 23), 'sys.exit', 'sys.exit', ({(297, 13, 297, 22): 'exit_code'}, {}), '(exit_code)', False, 'import sys\n'), ((36, 4, 36, 28), 'matplotlib.use', 'matplotlib.use', ({(36, 19, 36, 27): '"""macosx"""'}, {}), "('macosx')", False, 'import matplotlib\n'), ((44, 18, 44, 42), 'PyQt5.uic.loadUi', 'uic.loadUi', ({(44, 29, 44, 35): 'uiFile', (44, 37, 44, 41): 'self'}, {}), '(uiFile, self)', False, 'from PyQt5 import uic, QtWidgets\n'), ((62, 19, 62, 62), 'PyQt5.QtWidgets.QFileDialog.getOpenFileName', 'QtWidgets.QFileDialog.getOpenFileName', ({(62, 57, 62, 61): 'self'}, {}), '(self)', False, 'from PyQt5 import uic, QtWidgets\n'), ((105, 24, 105, 99), 'numpy.linspace', 'np.linspace', ({(105, 36, 105, 53): 'bandgap_array_min', (105, 55, 105, 72): 'bandgap_array_max', (105, 74, 105, 98): 'num_points_bandgap_array'}, {}), '(bandgap_array_min, bandgap_array_max, num_points_bandgap_array)', True, 'import numpy as np\n'), ((127, 25, 127, 50), 'scipy.integrate.simps', 'simps', (), '', False, 'from scipy.integrate import simps, quad\n'), ((137, 25, 137, 67), 'scipy.integrate.simps', 'simps', (), '', False, 'from scipy.integrate import simps, quad\n'), ((144, 30, 144, 70), 'scipy.integrate.simps', 'simps', (), '', False, 'from scipy.integrate import simps, quad\n'), ((147, 30, 147, 67), 'scipy.integrate.simps', 'simps', (), '', False, 'from scipy.integrate import simps, quad\n'), ((150, 37, 150, 74), 'scipy.interpolate.splrep', 'splrep', ({(150, 44, 150, 56): 'am15_ev_flip', (150, 58, 150, 73): 'am15_photons_ev'}, {}), '(am15_ev_flip, am15_photons_ev)', False, 'from scipy.interpolate import splrep, splint\n'), ((64, 26, 64, 67), 'numpy.genfromtxt', 'np.genfromtxt', (), '', True, 'import numpy as np\n'), ((109, 23, 109, 84), 'numpy.loadtxt', 'np.loadtxt', (), '', True, 'import numpy as np\n'), ((110, 23, 110, 45), 'numpy.copy', 'np.copy', ({(110, 31, 110, 44): 'astmg173[:, (0)]'}, {}), '(astmg173[:, (0)])', True, 'import numpy as np\n'), ((111, 19, 111, 41), 'numpy.copy', 'np.copy', ({(111, 27, 111, 40): 'astmg173[:, (2)]'}, {}), '(astmg173[:, (2)])', True, 'import numpy as np\n'), ((156, 19, 156, 64), 'scipy.interpolate.splint', 'splint', ({(156, 26, 156, 30): 'Egap', (156, 32, 156, 36): 'emax', (156, 37, 156, 63): 'total_photonflux_ev_splrep'}, {}), '(Egap, emax, total_photonflux_ev_splrep)', False, 'from scipy.interpolate import splrep, splint\n'), ((198, 24, 198, 52), 'numpy.empty_like', 'np.empty_like', ({(198, 38, 198, 51): 'bandgap_array'}, {}), '(bandgap_array)', True, 'import numpy as np\n'), ((199, 23, 199, 51), 'numpy.empty_like', 'np.empty_like', ({(199, 37, 199, 50): 'bandgap_array'}, {}), '(bandgap_array)', True, 'import numpy as np\n'), ((200, 24, 200, 52), 'numpy.empty_like', 'np.empty_like', ({(200, 38, 200, 51): 'bandgap_array'}, {}), '(bandgap_array)', True, 'import numpy as np\n'), ((201, 24, 201, 52), 'numpy.empty_like', 'np.empty_like', ({(201, 38, 201, 51): 'bandgap_array'}, {}), '(bandgap_array)', True, 'import numpy as np\n'), ((211, 12, 211, 37), 'matplotlib.pyplot.figure', 'plt.figure', (), '', True, 'import matplotlib.pyplot as plt\n'), ((212, 12, 212, 59), 'matplotlib.pyplot.title', 'plt.title', ({(212, 22, 212, 58): "('Cell Temperature = %.2f K' % Tcell)"}, {}), "('Cell Temperature = %.2f K' % Tcell)", True, 'import matplotlib.pyplot as plt\n'), ((214, 12, 214, 33), 'matplotlib.pyplot.ylabel', 'plt.ylabel', ({(214, 23, 214, 32): '"""PCE (%)"""'}, {}), "('PCE (%)')", True, 'import matplotlib.pyplot as plt\n'), ((215, 12, 215, 38), 'matplotlib.pyplot.xlabel', 'plt.xlabel', ({(215, 23, 215, 37): '"""Bandgap (eV)"""'}, {}), "('Bandgap (eV)')", True, 'import matplotlib.pyplot as plt\n'), ((216, 12, 216, 52), 'matplotlib.pyplot.plot', 'plt.plot', ({(216, 21, 216, 34): 'bandgap_array', (216, 36, 216, 51): '(pce_array * 100)'}, {}), '(bandgap_array, pce_array * 100)', True, 'import matplotlib.pyplot as plt\n'), ((217, 12, 217, 30), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ({}, {}), '()', True, 'import matplotlib.pyplot as plt\n'), ((218, 12, 218, 22), 'matplotlib.pyplot.show', 'plt.show', ({}, {}), '()', True, 'import matplotlib.pyplot as plt\n'), ((220, 12, 220, 37), 'matplotlib.pyplot.figure', 'plt.figure', (), '', True, 'import matplotlib.pyplot as plt\n'), ((221, 12, 221, 59), 'matplotlib.pyplot.title', 'plt.title', ({(221, 22, 221, 58): "('Cell Temperature = %.2f K' % Tcell)"}, {}), "('Cell Temperature = %.2f K' % Tcell)", True, 'import matplotlib.pyplot as plt\n'), ((222, 12, 222, 26), 'matplotlib.pyplot.ylim', 'plt.ylim', ({(222, 21, 222, 22): '(0)', (222, 24, 222, 25): '(1)'}, {}), '(0, 1)', True, 'import matplotlib.pyplot as plt\n'), ((224, 12, 224, 37), 'matplotlib.pyplot.ylabel', 'plt.ylabel', ({(224, 23, 224, 36): '"""Fill Factor"""'}, {}), "('Fill Factor')", True, 'import matplotlib.pyplot as plt\n'), ((225, 12, 225, 38), 'matplotlib.pyplot.xlabel', 'plt.xlabel', ({(225, 23, 225, 37): '"""Bandgap (eV)"""'}, {}), "('Bandgap (eV)')", True, 'import matplotlib.pyplot as plt\n'), ((226, 12, 226, 45), 'matplotlib.pyplot.plot', 'plt.plot', ({(226, 21, 226, 34): 'bandgap_array', (226, 36, 226, 44): 'ff_array'}, {}), '(bandgap_array, ff_array)', True, 'import matplotlib.pyplot as plt\n'), ((227, 12, 227, 30), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ({}, {}), '()', True, 'import matplotlib.pyplot as plt\n'), ((228, 12, 228, 22), 'matplotlib.pyplot.show', 'plt.show', ({}, {}), '()', True, 'import matplotlib.pyplot as plt\n'), ((230, 12, 230, 37), 'matplotlib.pyplot.figure', 'plt.figure', (), '', True, 'import matplotlib.pyplot as plt\n'), ((231, 12, 231, 59), 'matplotlib.pyplot.title', 'plt.title', ({(231, 22, 231, 58): "('Cell Temperature = %.2f K' % Tcell)"}, {}), "('Cell Temperature = %.2f K' % Tcell)", True, 'import matplotlib.pyplot as plt\n'), ((233, 12, 233, 41), 'matplotlib.pyplot.ylabel', 'plt.ylabel', ({(233, 23, 233, 40): '"""Jsc (mA/cm$^2$)"""'}, {}), "('Jsc (mA/cm$^2$)')", True, 'import matplotlib.pyplot as plt\n'), ((234, 12, 234, 38), 'matplotlib.pyplot.xlabel', 'plt.xlabel', ({(234, 23, 234, 37): '"""Bandgap (eV)"""'}, {}), "('Bandgap (eV)')", True, 'import matplotlib.pyplot as plt\n'), ((235, 12, 235, 46), 'matplotlib.pyplot.plot', 'plt.plot', ({(235, 21, 235, 34): 'bandgap_array', (235, 36, 235, 45): 'jsc_array'}, {}), '(bandgap_array, jsc_array)', True, 'import matplotlib.pyplot as plt\n'), ((236, 12, 236, 30), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ({}, {}), '()', True, 'import matplotlib.pyplot as plt\n'), ((237, 12, 237, 22), 'matplotlib.pyplot.show', 'plt.show', ({}, {}), '()', True, 'import matplotlib.pyplot as plt\n'), ((239, 12, 239, 37), 'matplotlib.pyplot.figure', 'plt.figure', (), '', True, 'import matplotlib.pyplot as plt\n'), ((240, 12, 240, 59), 'matplotlib.pyplot.title', 'plt.title', ({(240, 22, 240, 58): "('Cell Temperature = %.2f K' % Tcell)"}, {}), "('Cell Temperature = %.2f K' % Tcell)", True, 'import matplotlib.pyplot as plt\n'), ((242, 12, 242, 33), 'matplotlib.pyplot.ylabel', 'plt.ylabel', ({(242, 23, 242, 32): '"""Voc (V)"""'}, {}), "('Voc (V)')", True, 'import matplotlib.pyplot as plt\n'), ((243, 12, 243, 38), 'matplotlib.pyplot.xlabel', 'plt.xlabel', ({(243, 23, 243, 37): '"""Bandgap (eV)"""'}, {}), "('Bandgap (eV)')", True, 'import matplotlib.pyplot as plt\n'), ((244, 12, 244, 65), 'matplotlib.pyplot.plot', 'plt.plot', (), '', True, 'import matplotlib.pyplot as plt\n'), ((245, 12, 245, 75), 'matplotlib.pyplot.plot', 'plt.plot', (), '', True, 'import matplotlib.pyplot as plt\n'), ((246, 12, 246, 36), 'matplotlib.pyplot.legend', 'plt.legend', (), '', True, 'import matplotlib.pyplot as plt\n'), ((247, 12, 247, 30), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ({}, {}), '()', True, 'import matplotlib.pyplot as plt\n'), ((248, 12, 248, 22), 'matplotlib.pyplot.show', 'plt.show', ({}, {}), '()', True, 'import matplotlib.pyplot as plt\n'), ((258, 22, 258, 47), 'numpy.empty_like', 'np.empty_like', ({(258, 36, 258, 46): 'volt_array'}, {}), '(volt_array)', True, 'import numpy as np\n'), ((269, 12, 269, 37), 'matplotlib.pyplot.figure', 'plt.figure', (), '', True, 'import matplotlib.pyplot as plt\n'), ((270, 12, 270, 53), 'matplotlib.pyplot.ylabel', 'plt.ylabel', ({(270, 23, 270, 52): '"""Current Density (mA/cm$^2$)"""'}, {}), "('Current Density (mA/cm$^2$)')", True, 'import matplotlib.pyplot as plt\n'), ((271, 12, 271, 37), 'matplotlib.pyplot.xlabel', 'plt.xlabel', ({(271, 23, 271, 36): '"""Voltage (V)"""'}, {}), "('Voltage (V)')", True, 'import matplotlib.pyplot as plt\n'), ((272, 12, 272, 40), 'matplotlib.pyplot.plot', 'plt.plot', ({(272, 21, 272, 28): 'v_array', (272, 30, 272, 39): '(-jv_array)'}, {}), '(v_array, -jv_array)', True, 'import matplotlib.pyplot as plt\n'), ((274, 12, 274, 30), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ({}, {}), '()', True, 'import matplotlib.pyplot as plt\n'), ((275, 12, 275, 22), 'matplotlib.pyplot.show', 'plt.show', ({}, {}), '()', True, 'import matplotlib.pyplot as plt\n'), ((284, 23, 284, 66), 'PyQt5.QtWidgets.QFileDialog.getSaveFileName', 'QtWidgets.QFileDialog.getSaveFileName', ({(284, 61, 284, 65): 'self'}, {}), '(self)', False, 'from PyQt5 import uic, QtWidgets\n'), ((285, 12, 285, 119), 'numpy.savetxt', 'np.savetxt', (), '', True, 'import numpy as np\n'), ((116, 27, 116, 49), 'numpy.copy', 'np.copy', ({(116, 35, 116, 48): 'astmg173[:, (0)]'}, {}), '(astmg173[:, (0)])', True, 'import numpy as np\n'), ((118, 23, 118, 45), 'numpy.copy', 'np.copy', ({(118, 31, 118, 44): 'astmg173[:, (1)]'}, {}), '(astmg173[:, (1)])', True, 'import numpy as np\n'), ((161, 23, 161, 65), 'scipy.integrate.quad', 'quad', (), '', False, 'from scipy.integrate import simps, quad\n'), ((177, 19, 177, 68), 'scipy.optimize.fmin', 'fmin', (), '', False, 'from scipy.optimize import fmin\n'), ((209, 29, 209, 94), 'numpy.array', 'np.array', ({(209, 38, 209, 93): '(bandgap_array, pce_array, ff_array, voc_array, jsc_array)'}, {}), '((bandgap_array, pce_array, ff_array, voc_array, jsc_array))', True, 'import numpy as np\n'), ((120, 16, 124, 17), 'PyQt5.QtWidgets.QMessageBox.information', 'QMessageBox.information', ({(121, 20, 121, 24): 'self', (121, 26, 121, 30): 'None', (122, 20, 123, 70): '(\'No valid spectrum file found!\\n\\n\' +\n "Load a valid file or check the \'Use ASTMG173\'box")'}, {}), '(self, None, \'No valid spectrum file found!\\n\\n\' +\n "Load a valid file or check the \'Use ASTMG173\'box")', False, 'from PyQt5.QtWidgets import QMessageBox\n'), ((160, 49, 160, 77), 'numpy.exp', 'np.exp', ({(160, 56, 160, 76): '(eV / (kb_ev * Tcell))'}, {}), '(eV / (kb_ev * Tcell))', True, 'import numpy as np\n'), ((165, 80, 165, 108), 'numpy.exp', 'np.exp', ({(165, 88, 165, 107): '(V / (kb_ev * Tcell))'}, {}), '(V / (kb_ev * Tcell))', True, 'import numpy as np\n')] |
tov101/HelpUs | helpus/core.py | 6b53d9651cf45c191774be2f70b70b130251d2a6 | import io
import logging
import os
import sys
from PyQt5 import QtGui, QtCore, QtWidgets
from helpus import icon_file_path
from helpus import __version__
LOGGER = logging.getLogger('HelpUs')
LOGGER.setLevel(logging.DEBUG)
class XStream(QtCore.QObject):
_stdout = None
_stderr = None
messageWritten = QtCore.pyqtSignal(str)
@staticmethod
def flush():
pass
@staticmethod
def fileno():
return -1
def write(self, msg):
if not self.signalsBlocked():
self.messageWritten.emit(msg)
@staticmethod
def stdout():
if not XStream._stdout:
XStream._stdout = XStream()
sys.stdout = XStream._stdout
return XStream._stdout
@staticmethod
def stderr():
if not XStream._stderr:
XStream._stderr = XStream()
sys.stderr = XStream._stderr
return XStream._stderr
class MyBreakPoint(QtWidgets.QDialog):
_stdout = None
_stderr = None
messageWritten = QtCore.pyqtSignal(str)
HOOK_HEADER = '(Pdb) '
HOOK_INTERACT = '>>> '
HOOK_LINE_BREAK = '... '
HOOKS = [HOOK_HEADER, HOOK_INTERACT]
BUTTONS = [
'Continue',
'Next',
'Step',
'Where',
'Up',
'Down'
]
def __init__(self, parent=None):
super().__init__()
if not parent:
self.parentWidget = QtWidgets.QMainWindow()
else:
self.parentWidget = parent
# Change Window Modality, otherwise parentWidget won't let you use this widget
if self.parentWidget.windowModality() == QtCore.Qt.WindowModality.ApplicationModal:
self.parentWidget.hide()
self.parentWidget.setWindowModality(QtCore.Qt.WindowModality.NonModal)
self.parentWidget.showNormal()
# Set Icon
if icon_file_path and os.path.exists(icon_file_path):
self.setWindowIcon(QtGui.QIcon(icon_file_path))
# Set Flags
self.setWindowFlags(
QtCore.Qt.WindowSystemMenuHint |
QtCore.Qt.WindowTitleHint |
QtCore.Qt.WindowCloseButtonHint
)
# Resize
self.resize(513, 300)
# Create Layout
self.main_layout = QtWidgets.QHBoxLayout()
self.setLayout(self.main_layout)
self.setWindowTitle("HelpUs {}".format(__version__))
# Create Content Layouts
self.ConsoleLayout = QtWidgets.QVBoxLayout()
self.ButtonsLayout = QtWidgets.QVBoxLayout()
self.main_layout.addLayout(self.ButtonsLayout)
self.main_layout.addLayout(self.ConsoleLayout)
# Create OutputConsole
self.console = QtWidgets.QTextEdit(parent)
self.console.insertPlainText = self.__insert_plain_text
self.console.keyPressEvent = self.__key_press_event
self.ConsoleLayout.addWidget(self.console)
# Create buttons
for button_text in self.BUTTONS:
# Create Button Name
button_name = 'button_%s' % button_text.lower()
setattr(self, button_name, QtWidgets.QPushButton(button_text))
getattr(self, button_name).clicked.connect(self.__push_button)
# Add Button to Widget
self.ButtonsLayout.addWidget(getattr(self, button_name))
# Init Buffer
self.buffer = io.StringIO()
self.__set_enable_gui(False)
self.showNormal()
def __set_enable_gui(self, state=True):
"""
:param state:
:return:
"""
self.console.setEnabled(state)
for button_text in self.BUTTONS:
# Get Button Name
button_name = 'button_%s' % button_text.lower()
getattr(self, button_name).setEnabled(state)
if state:
self.console.setFocus()
def redirect_outerr_stream(self):
"""
:return:
"""
# Link Stream Output
XStream.stdout().messageWritten.connect(self.console.insertPlainText)
XStream.stderr().messageWritten.connect(self.console.insertPlainText)
def readline(self):
"""
:return:
"""
if not self.console.isEnabled():
self.__set_enable_gui(True)
# Reset Buffer
self.__reset_buffer()
# Check Position
while self.buffer.tell() == 0:
QtCore.QCoreApplication.processEvents()
value = self.buffer.getvalue()
return value
def __key_press_event(self, event):
"""
:param event:
:return:
"""
# Get Last Line
document = self.console.document()
line_index = document.lineCount()
raw_last_line = document.findBlockByLineNumber(line_index - 1).text()
text = ''
current_hook = ''
# Exclude first 6 chars: (Pdb)\s
if raw_last_line:
for hook in self.HOOKS:
if raw_last_line.startswith(hook):
current_hook = hook
text = raw_last_line[len(hook):]
break
else:
text = raw_last_line
# Get Cursor position
line_from_zero = line_index - 1
current_cursor_line = self.console.textCursor().blockNumber()
current_cursor_column = self.console.textCursor().columnNumber()
# If Enter was pressed -> Process Expression
if event.key() == QtCore.Qt.Key.Key_Return and text:
# Consider Custom Clear Screen Command
if text == 'cls':
self.__clear_screen(raw_last_line)
return
# Replace Line Break with Enter
if self.HOOK_LINE_BREAK == text:
text = '\r\n'
elif self.HOOK_LINE_BREAK in text:
# Replace Line Break with tab
text = text.replace(self.HOOK_LINE_BREAK, '\t')
current_hook = self.HOOK_LINE_BREAK
self.__reset_buffer()
self.buffer.write(text)
self.__set_enable_gui(False)
# If User want to delete something and there is no value in buffer -> Reject
if event.key() == QtCore.Qt.Key.Key_Backspace or event.key() == QtCore.Qt.Key.Key_Delete:
if current_cursor_line != line_from_zero or current_cursor_column <= len(current_hook):
return
if event.key() == QtCore.Qt.Key.Key_Home and current_cursor_line == line_from_zero:
if text:
temp_cursor = self.console.textCursor()
temp_cursor.movePosition(
QtGui.QTextCursor.MoveOperation.StartOfLine,
QtGui.QTextCursor.MoveMode.MoveAnchor
)
temp_cursor.movePosition(
QtGui.QTextCursor.MoveOperation.Right,
QtGui.QTextCursor.MoveMode.MoveAnchor,
len(current_hook)
)
self.console.setTextCursor(temp_cursor)
return
# Set Console Text to Black
self.console.setTextColor(QtCore.Qt.GlobalColor.black)
# Execute default method
QtWidgets.QTextEdit.keyPressEvent(self.console, event)
def __push_button(self):
# Read text from Button and use it as pdb keyword
button_scope = self.sender().text().lower()
self.__reset_buffer()
self.buffer.write(button_scope)
self.__set_enable_gui(False)
def __reset_buffer(self):
if isinstance(self.buffer, io.StringIO):
# Clear Buffer
self.buffer.truncate(0)
self.buffer.seek(0)
else:
self.buffer = io.StringIO()
def __insert_plain_text(self, message):
# Do some stylistics
if message.startswith(self.HOOK_HEADER):
self.console.setTextColor(QtCore.Qt.GlobalColor.magenta)
QtWidgets.QTextEdit.insertPlainText(self.console, message)
return
elif message.startswith(self.HOOK_INTERACT):
self.console.setTextColor(QtCore.Qt.GlobalColor.darkMagenta)
QtWidgets.QTextEdit.insertPlainText(self.console, message)
return
if message.startswith('***'):
self.console.setTextColor(QtCore.Qt.GlobalColor.red)
QtWidgets.QTextEdit.insertPlainText(self.console, message)
# AutoScroll
self.console.verticalScrollBar().setValue(self.console.verticalScrollBar().maximum())
def __clear_screen(self, text):
current_hook = text
for hook in self.HOOKS:
if hook in current_hook:
current_hook = hook
break
self.console.clear()
self.console.insertPlainText(current_hook)
def get_qtconsole_object():
if isinstance(sys.stdin, MyBreakPoint):
return sys.stdin.console
else:
return MyBreakPoint.console
def setup_breakpoint_hook(parent, method, redirect_streams=False):
def __method(*args, **kwargs):
breakpoint()
return method(*args, **kwargs)
if not isinstance(sys.stdin, MyBreakPoint):
sys.stdin = MyBreakPoint(parent)
else:
# Restore Streams
sys.stdin = sys.__stdin__
sys.stdout = sys.__stdout__
sys.stderr = sys.__stderr__
raise Exception(
"Multiple Instances are not allowed. Can be possible, but I'm to lazy to go deep with development."
)
if redirect_streams:
sys.stdin.redirect_outerr_stream()
return __method
if __name__ == '__main__':
p = QtWidgets.QApplication(sys.argv)
LOGGER.error('Ceva')
LOGGER.error = setup_breakpoint_hook(None, LOGGER.error, redirect_streams=True)
# LOGGER.error = setup_breakpoint_hook(None, LOGGER.error, redirect_streams=True)
x = 90
LOGGER.error('Altceva')
print(x)
| [((10, 9, 10, 36), 'logging.getLogger', 'logging.getLogger', ({(10, 27, 10, 35): '"""HelpUs"""'}, {}), "('HelpUs')", False, 'import logging\n'), ((17, 21, 17, 43), 'PyQt5.QtCore.pyqtSignal', 'QtCore.pyqtSignal', ({(17, 39, 17, 42): 'str'}, {}), '(str)', False, 'from PyQt5 import QtGui, QtCore, QtWidgets\n'), ((49, 21, 49, 43), 'PyQt5.QtCore.pyqtSignal', 'QtCore.pyqtSignal', ({(49, 39, 49, 42): 'str'}, {}), '(str)', False, 'from PyQt5 import QtGui, QtCore, QtWidgets\n'), ((307, 8, 307, 40), 'PyQt5.QtWidgets.QApplication', 'QtWidgets.QApplication', ({(307, 31, 307, 39): 'sys.argv'}, {}), '(sys.argv)', False, 'from PyQt5 import QtGui, QtCore, QtWidgets\n'), ((94, 27, 94, 50), 'PyQt5.QtWidgets.QHBoxLayout', 'QtWidgets.QHBoxLayout', ({}, {}), '()', False, 'from PyQt5 import QtGui, QtCore, QtWidgets\n'), ((99, 29, 99, 52), 'PyQt5.QtWidgets.QVBoxLayout', 'QtWidgets.QVBoxLayout', ({}, {}), '()', False, 'from PyQt5 import QtGui, QtCore, QtWidgets\n'), ((100, 29, 100, 52), 'PyQt5.QtWidgets.QVBoxLayout', 'QtWidgets.QVBoxLayout', ({}, {}), '()', False, 'from PyQt5 import QtGui, QtCore, QtWidgets\n'), ((105, 23, 105, 50), 'PyQt5.QtWidgets.QTextEdit', 'QtWidgets.QTextEdit', ({(105, 43, 105, 49): 'parent'}, {}), '(parent)', False, 'from PyQt5 import QtGui, QtCore, QtWidgets\n'), ((121, 22, 121, 35), 'io.StringIO', 'io.StringIO', ({}, {}), '()', False, 'import io\n'), ((233, 8, 233, 62), 'PyQt5.QtWidgets.QTextEdit.keyPressEvent', 'QtWidgets.QTextEdit.keyPressEvent', ({(233, 42, 233, 54): 'self.console', (233, 56, 233, 61): 'event'}, {}), '(self.console, event)', False, 'from PyQt5 import QtGui, QtCore, QtWidgets\n'), ((264, 8, 264, 66), 'PyQt5.QtWidgets.QTextEdit.insertPlainText', 'QtWidgets.QTextEdit.insertPlainText', ({(264, 44, 264, 56): 'self.console', (264, 58, 264, 65): 'message'}, {}), '(self.console, message)', False, 'from PyQt5 import QtGui, QtCore, QtWidgets\n'), ((302, 8, 302, 42), 'sys.stdin.redirect_outerr_stream', 'sys.stdin.redirect_outerr_stream', ({}, {}), '()', False, 'import sys\n'), ((69, 32, 69, 55), 'PyQt5.QtWidgets.QMainWindow', 'QtWidgets.QMainWindow', ({}, {}), '()', False, 'from PyQt5 import QtGui, QtCore, QtWidgets\n'), ((80, 30, 80, 60), 'os.path.exists', 'os.path.exists', ({(80, 45, 80, 59): 'icon_file_path'}, {}), '(icon_file_path)', False, 'import os\n'), ((159, 12, 159, 51), 'PyQt5.QtCore.QCoreApplication.processEvents', 'QtCore.QCoreApplication.processEvents', ({}, {}), '()', False, 'from PyQt5 import QtGui, QtCore, QtWidgets\n'), ((248, 26, 248, 39), 'io.StringIO', 'io.StringIO', ({}, {}), '()', False, 'import io\n'), ((254, 12, 254, 70), 'PyQt5.QtWidgets.QTextEdit.insertPlainText', 'QtWidgets.QTextEdit.insertPlainText', ({(254, 48, 254, 60): 'self.console', (254, 62, 254, 69): 'message'}, {}), '(self.console, message)', False, 'from PyQt5 import QtGui, QtCore, QtWidgets\n'), ((81, 31, 81, 58), 'PyQt5.QtGui.QIcon', 'QtGui.QIcon', ({(81, 43, 81, 57): 'icon_file_path'}, {}), '(icon_file_path)', False, 'from PyQt5 import QtGui, QtCore, QtWidgets\n'), ((114, 39, 114, 73), 'PyQt5.QtWidgets.QPushButton', 'QtWidgets.QPushButton', ({(114, 61, 114, 72): 'button_text'}, {}), '(button_text)', False, 'from PyQt5 import QtGui, QtCore, QtWidgets\n'), ((258, 12, 258, 70), 'PyQt5.QtWidgets.QTextEdit.insertPlainText', 'QtWidgets.QTextEdit.insertPlainText', ({(258, 48, 258, 60): 'self.console', (258, 62, 258, 69): 'message'}, {}), '(self.console, message)', False, 'from PyQt5 import QtGui, QtCore, QtWidgets\n')] |
newgene/biothings.api | biothings/hub/dataindex/indexer_schedule.py | e3278695ac15a55fe420aa49c464946f81ec019d | import math
class Schedule():
def __init__(self, total, batch_size):
self._batch_size = batch_size
self._state = ""
self.total = total
self.scheduled = 0
self.finished = 0
@property
def _batch(self):
return math.ceil(self.scheduled / self._batch_size)
@property
def _batches(self):
return math.ceil(self.total / self._batch_size)
@property
def _percentage(self):
_percentage = self.scheduled / self.total * 100
return "%.1f%%" % _percentage
def suffix(self, string):
return " ".join((
string,
"#%d/%d %s" %
(
self._batch,
self._batches,
self._percentage
)
))
def completed(self):
if self.finished != self.total:
raise ValueError(self.finished, self.total)
def __iter__(self):
return self
def __next__(self):
if self.scheduled >= self.total:
self._state = "pending, waiting for completion,"
raise StopIteration()
self.scheduled += self._batch_size
if self.scheduled > self.total:
self.scheduled = self.total
self._state = self.suffix("running, on batch") + ","
return self._batch
def __str__(self):
return " ".join(f"""
<Schedule {"done" if self.finished >= self.total else self._state}
total={self.total} scheduled={self.scheduled} finished={self.finished}>
""".split())
def test_01():
schedule = Schedule(100, 10)
for batch in schedule:
print(batch)
print(schedule)
def test_02():
schedule = Schedule(25, 10)
for batch in schedule:
print(batch)
print(schedule)
print(schedule.suffix("Task"))
def test_03():
schedule = Schedule(0, 10)
for batch in schedule:
print(batch)
print(schedule)
print(schedule.suffix("Task"))
def test_04():
schedule = Schedule(1, 10)
for batch in schedule:
print(batch)
print(schedule)
print(schedule.suffix("Task"))
if __name__ == "__main__":
test_02()
| [((16, 15, 16, 59), 'math.ceil', 'math.ceil', ({(16, 25, 16, 58): '(self.scheduled / self._batch_size)'}, {}), '(self.scheduled / self._batch_size)', False, 'import math\n'), ((20, 15, 20, 55), 'math.ceil', 'math.ceil', ({(20, 25, 20, 54): '(self.total / self._batch_size)'}, {}), '(self.total / self._batch_size)', False, 'import math\n')] |
voBits/ccxt | examples/py/async-basic.py | edd2dd92053bd06232769a63465a43912b21eda0 | # -*- coding: utf-8 -*-
import asyncio
import os
import sys
root = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
sys.path.append(root + '/python')
import ccxt.async as ccxt # noqa: E402
async def test_gdax():
gdax = ccxt.gdax()
markets = await gdax.load_markets()
await gdax.close()
return markets
if __name__ == '__main__':
print(asyncio.get_event_loop().run_until_complete(test_gdax()))
| [] |
bennettdc/MCEdit-Unified | pymclevel/test/__init__.py | 90abfb170c65b877ac67193e717fa3a3ded635dd | __author__ = 'Rio'
| [] |
ethz-asl/modular_semantic_segmentation | xview/datasets/wrapper.py | 7c950f24df11540a7ddae4ff806d5b31934a3210 | from abc import ABCMeta, abstractmethod
class DataWrapper:
"""Interface for access to datasets."""
__metaclass__ = ABCMeta
@abstractmethod
def next(self):
"""Returns next minibatch for training."""
return NotImplementedError
| [] |
jrbourbeau/partd | partd/core.py | 74016a296a760de9c7a0e0d4b012a3478c9a0831 | from __future__ import absolute_import
import os
import shutil
import locket
import string
from toolz import memoize
from contextlib import contextmanager
from .utils import nested_get, flatten
# http://stackoverflow.com/questions/295135/turn-a-string-into-a-valid-filename-in-python
valid_chars = "-_.() " + string.ascii_letters + string.digits + os.path.sep
def escape_filename(fn):
""" Escape text so that it is a valid filename
>>> escape_filename('Foo!bar?')
'Foobar'
"""
return ''.join(filter(valid_chars.__contains__, fn))
def filename(path, key):
return os.path.join(path, escape_filename(token(key)))
def token(key):
"""
>>> token('hello')
'hello'
>>> token(('hello', 'world')) # doctest: +SKIP
'hello/world'
"""
if isinstance(key, str):
return key
elif isinstance(key, tuple):
return os.path.join(*map(token, key))
else:
return str(key)
class Interface(object):
def __init__(self):
self._iset_seen = set()
def __setstate__(self, state):
self.__dict__.update(state)
self._iset_seen = set()
def iset(self, key, value, **kwargs):
if key in self._iset_seen:
return
else:
self._iset(key, value, **kwargs)
self._iset_seen.add(key)
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.drop()
def iget(self, key):
return self._get([key], lock=False)[0]
def get(self, keys, **kwargs):
if not isinstance(keys, list):
return self.get([keys], **kwargs)[0]
elif any(isinstance(key, list) for key in keys): # nested case
flatkeys = list(flatten(keys))
result = self.get(flatkeys, **kwargs)
return nested_get(keys, dict(zip(flatkeys, result)))
else:
return self._get(keys, **kwargs)
def delete(self, keys, **kwargs):
if not isinstance(keys, list):
return self._delete([keys], **kwargs)
else:
return self._delete(keys, **kwargs)
def pop(self, keys, **kwargs):
with self.partd.lock:
result = self.partd.get(keys, lock=False)
self.partd.delete(keys, lock=False)
return result
| [] |
VITA-Group/Adv-SS-Pretraining | pretraining/model_ensemble.py | 4ffbebea582f858ec6165f082f52ded1fc9b817d | '''
model ensemble for cifar10 // input size(32,32)
'''
import torch
import torchvision
import copy
import torch.nn as nn
from resnetv2 import ResNet50 as resnet50v2
def split_resnet50(model):
return nn.Sequential(
model.conv1,
model.layer1,
model.layer2,
model.layer3
)
class PretrainEnsembleModel(nn.Module):
def __init__(self):
super(PretrainEnsembleModel, self).__init__()
self.blocks = split_resnet50(resnet50v2())
self.layer4_rotation = resnet50v2().layer4
self.layer4_jigsaw = resnet50v2().layer4
self.fc_rotation = nn.Linear(2048, 4)
self.fc_jigsaw = nn.Linear(2048, 31)
self.avgpool1 = nn.AdaptiveAvgPool2d((1,1))
self.avgpool2 = nn.AdaptiveAvgPool2d((1,1))
self.avgpool3 = nn.AdaptiveAvgPool2d((1,1))
def _Normal(self,x):
mean=torch.Tensor([0.485, 0.456, 0.406])
mean=mean[None,:,None,None].cuda()
std = torch.Tensor([0.229, 0.224, 0.225])
std = std[None,:,None,None].cuda()
return x.sub(mean).div(std)
def forward(self, x):
feature_map = self.blocks(self._Normal(x))
return feature_map
| [((15, 11, 20, 5), 'torch.nn.Sequential', 'nn.Sequential', ({(16, 8, 16, 19): 'model.conv1', (17, 8, 17, 20): 'model.layer1', (18, 8, 18, 20): 'model.layer2', (19, 8, 19, 20): 'model.layer3'}, {}), '(model.conv1, model.layer1, model.layer2, model.layer3)', True, 'import torch.nn as nn\n'), ((33, 27, 33, 45), 'torch.nn.Linear', 'nn.Linear', ({(33, 37, 33, 41): '2048', (33, 43, 33, 44): '4'}, {}), '(2048, 4)', True, 'import torch.nn as nn\n'), ((34, 25, 34, 44), 'torch.nn.Linear', 'nn.Linear', ({(34, 35, 34, 39): '2048', (34, 41, 34, 43): '31'}, {}), '(2048, 31)', True, 'import torch.nn as nn\n'), ((36, 24, 36, 51), 'torch.nn.AdaptiveAvgPool2d', 'nn.AdaptiveAvgPool2d', ({(36, 45, 36, 50): '(1, 1)'}, {}), '((1, 1))', True, 'import torch.nn as nn\n'), ((37, 24, 37, 51), 'torch.nn.AdaptiveAvgPool2d', 'nn.AdaptiveAvgPool2d', ({(37, 45, 37, 50): '(1, 1)'}, {}), '((1, 1))', True, 'import torch.nn as nn\n'), ((38, 24, 38, 51), 'torch.nn.AdaptiveAvgPool2d', 'nn.AdaptiveAvgPool2d', ({(38, 45, 38, 50): '(1, 1)'}, {}), '((1, 1))', True, 'import torch.nn as nn\n'), ((42, 13, 42, 48), 'torch.Tensor', 'torch.Tensor', ({(42, 26, 42, 47): '[0.485, 0.456, 0.406]'}, {}), '([0.485, 0.456, 0.406])', False, 'import torch\n'), ((44, 14, 44, 49), 'torch.Tensor', 'torch.Tensor', ({(44, 27, 44, 48): '[0.229, 0.224, 0.225]'}, {}), '([0.229, 0.224, 0.225])', False, 'import torch\n'), ((29, 37, 29, 49), 'resnetv2.ResNet50', 'resnet50v2', ({}, {}), '()', True, 'from resnetv2 import ResNet50 as resnet50v2\n'), ((30, 31, 30, 43), 'resnetv2.ResNet50', 'resnet50v2', ({}, {}), '()', True, 'from resnetv2 import ResNet50 as resnet50v2\n'), ((31, 29, 31, 41), 'resnetv2.ResNet50', 'resnet50v2', ({}, {}), '()', True, 'from resnetv2 import ResNet50 as resnet50v2\n')] |
glciampaglia/HoaxyBots | scripts/ccdf.py | db8d2b7d9927d5d4d94ded125f9785590dace906 | # -*- coding: utf-8 -*-
""" Function that implement Complement the Complementary Cumulative
Distribution Function (CCDF).
"""
#
# written by Chengcheng Shao <[email protected]>
import numpy as np
import pandas as pd
def ccdf(s):
"""
Parameters:
`s`, series, the values of s should be variable to be handled
Return:
a new series `s`, index of s will be X axis (number), value of s
will be Y axis (probability)
"""
s = s.copy()
s = s.sort_values(ascending=True, inplace=False)
s.reset_index(drop=True, inplace=True)
n = len(s)
s.drop_duplicates(keep='first', inplace=True)
X = s.values
Y = [n - i for i in s.index]
return pd.Series(data=Y, index=X) / n
def sum_cdf(s):
s = s.copy()
s = s.value_counts()
s = s.sort_index(ascending=True)
cumulative = []
for i in range(len(s)):
s0 = s.iloc[:i + 1]
cumulative.append(np.inner(s0.index, s0.values))
s = pd.Series(cumulative, index=s.index)
return s / s.max()
def sum_ccdf(s):
"""
Parameters:
`s`, series, the values of s should be variable to be handled
Return:
a news series `s`, index of s will be X axis (number), values
will be Y axis (sum(X>=x))
"""
s = s.copy()
s = s.value_counts()
s = s.sort_index(ascending=True)
cumulative = []
for i in range(len(s)):
s1 = s.iloc[i:]
cumulative.append(np.inner(s1.index, s1.values))
return pd.Series(cumulative, index=s.index)
| [((39, 8, 39, 44), 'pandas.Series', 'pd.Series', (), '', True, 'import pandas as pd\n'), ((58, 11, 58, 47), 'pandas.Series', 'pd.Series', (), '', True, 'import pandas as pd\n'), ((28, 11, 28, 37), 'pandas.Series', 'pd.Series', (), '', True, 'import pandas as pd\n'), ((38, 26, 38, 55), 'numpy.inner', 'np.inner', ({(38, 35, 38, 43): 's0.index', (38, 45, 38, 54): 's0.values'}, {}), '(s0.index, s0.values)', True, 'import numpy as np\n'), ((57, 26, 57, 55), 'numpy.inner', 'np.inner', ({(57, 35, 57, 43): 's1.index', (57, 45, 57, 54): 's1.values'}, {}), '(s1.index, s1.values)', True, 'import numpy as np\n')] |
eliracho37/lifelines | lifelines/fitters/kaplan_meier_fitter.py | b1c6c2732d1ccfc2ae08f7178371d0f95ae3027b | # -*- coding: utf-8 -*-
from __future__ import print_function
import numpy as np
import pandas as pd
from lifelines.fitters import UnivariateFitter
from lifelines.utils import _preprocess_inputs, _additive_estimate, StatError, inv_normal_cdf,\
median_survival_times
from lifelines.plotting import plot_loglogs
class KaplanMeierFitter(UnivariateFitter):
"""
Class for fitting the Kaplan-Meier estimate for the survival function.
KaplanMeierFitter( alpha=0.95)
alpha: The alpha value associated with the confidence intervals.
"""
def fit(self, durations, event_observed=None, timeline=None, entry=None, label='KM_estimate',
alpha=None, left_censorship=False, ci_labels=None):
"""
Parameters:
duration: an array, or pd.Series, of length n -- duration subject was observed for
timeline: return the best estimate at the values in timelines (postively increasing)
event_observed: an array, or pd.Series, of length n -- True if the the death was observed, False if the event
was lost (right-censored). Defaults all True if event_observed==None
entry: an array, or pd.Series, of length n -- relative time when a subject entered the study. This is
useful for left-truncated (not left-censored) observations. If None, all members of the population
were born at time 0.
label: a string to name the column of the estimate.
alpha: the alpha value in the confidence intervals. Overrides the initializing
alpha for this call to fit only.
left_censorship: True if durations and event_observed refer to left censorship events. Default False
ci_labels: add custom column names to the generated confidence intervals
as a length-2 list: [<lower-bound name>, <upper-bound name>]. Default: <label>_lower_<alpha>
Returns:
self, with new properties like 'survival_function_'.
"""
# if the user is interested in left-censorship, we return the cumulative_density_, no survival_function_,
estimate_name = 'survival_function_' if not left_censorship else 'cumulative_density_'
v = _preprocess_inputs(durations, event_observed, timeline, entry)
self.durations, self.event_observed, self.timeline, self.entry, self.event_table = v
self._label = label
alpha = alpha if alpha else self.alpha
log_survival_function, cumulative_sq_ = _additive_estimate(self.event_table, self.timeline,
self._additive_f, self._additive_var,
left_censorship)
if entry is not None:
# a serious problem with KM is that when the sample size is small and there are too few early
# truncation times, it may happen that is the number of patients at risk and the number of deaths is the same.
# we adjust for this using the Breslow-Fleming-Harrington estimator
n = self.event_table.shape[0]
net_population = (self.event_table['entrance'] - self.event_table['removed']).cumsum()
if net_population.iloc[:int(n / 2)].min() == 0:
ix = net_population.iloc[:int(n / 2)].argmin()
raise StatError("""There are too few early truncation times and too many events. S(t)==0 for all t>%.1f. Recommend BreslowFlemingHarringtonFitter.""" % ix)
# estimation
setattr(self, estimate_name, pd.DataFrame(np.exp(log_survival_function), columns=[self._label]))
self.__estimate = getattr(self, estimate_name)
self.confidence_interval_ = self._bounds(cumulative_sq_[:, None], alpha, ci_labels)
self.median_ = median_survival_times(self.__estimate, left_censorship=left_censorship)
# estimation methods
self.predict = self._predict(estimate_name, label)
self.subtract = self._subtract(estimate_name)
self.divide = self._divide(estimate_name)
# plotting functions
self.plot = self._plot_estimate(estimate_name)
setattr(self, "plot_" + estimate_name, self.plot)
self.plot_loglogs = plot_loglogs(self)
return self
def _bounds(self, cumulative_sq_, alpha, ci_labels):
# See http://courses.nus.edu.sg/course/stacar/internet/st3242/handouts/notes2.pdf
alpha2 = inv_normal_cdf((1. + alpha) / 2.)
df = pd.DataFrame(index=self.timeline)
v = np.log(self.__estimate.values)
if ci_labels is None:
ci_labels = ["%s_upper_%.2f" % (self._label, alpha), "%s_lower_%.2f" % (self._label, alpha)]
assert len(ci_labels) == 2, "ci_labels should be a length 2 array."
df[ci_labels[0]] = np.exp(-np.exp(np.log(-v) + alpha2 * np.sqrt(cumulative_sq_) / v))
df[ci_labels[1]] = np.exp(-np.exp(np.log(-v) - alpha2 * np.sqrt(cumulative_sq_) / v))
return df
def _additive_f(self, population, deaths):
np.seterr(invalid='ignore', divide='ignore')
return (np.log(population - deaths) - np.log(population))
def _additive_var(self, population, deaths):
np.seterr(divide='ignore')
return (1. * deaths / (population * (population - deaths))).replace([np.inf], 0)
| [((48, 12, 48, 74), 'lifelines.utils._preprocess_inputs', '_preprocess_inputs', ({(48, 31, 48, 40): 'durations', (48, 42, 48, 56): 'event_observed', (48, 58, 48, 66): 'timeline', (48, 68, 48, 73): 'entry'}, {}), '(durations, event_observed, timeline, entry)', False, 'from lifelines.utils import _preprocess_inputs, _additive_estimate, StatError, inv_normal_cdf, median_survival_times\n'), ((52, 48, 54, 83), 'lifelines.utils._additive_estimate', '_additive_estimate', ({(52, 67, 52, 83): 'self.event_table', (52, 85, 52, 98): 'self.timeline', (53, 67, 53, 83): 'self._additive_f', (53, 85, 53, 103): 'self._additive_var', (54, 67, 54, 82): 'left_censorship'}, {}), '(self.event_table, self.timeline, self._additive_f, self.\n _additive_var, left_censorship)', False, 'from lifelines.utils import _preprocess_inputs, _additive_estimate, StatError, inv_normal_cdf, median_survival_times\n'), ((70, 23, 70, 94), 'lifelines.utils.median_survival_times', 'median_survival_times', (), '', False, 'from lifelines.utils import _preprocess_inputs, _additive_estimate, StatError, inv_normal_cdf, median_survival_times\n'), ((80, 28, 80, 46), 'lifelines.plotting.plot_loglogs', 'plot_loglogs', ({(80, 41, 80, 45): 'self'}, {}), '(self)', False, 'from lifelines.plotting import plot_loglogs\n'), ((85, 17, 85, 50), 'lifelines.utils.inv_normal_cdf', 'inv_normal_cdf', ({(85, 32, 85, 49): '(1.0 + alpha) / 2.0'}, {}), '((1.0 + alpha) / 2.0)', False, 'from lifelines.utils import _preprocess_inputs, _additive_estimate, StatError, inv_normal_cdf, median_survival_times\n'), ((86, 13, 86, 46), 'pandas.DataFrame', 'pd.DataFrame', (), '', True, 'import pandas as pd\n'), ((87, 12, 87, 42), 'numpy.log', 'np.log', ({(87, 19, 87, 41): 'self.__estimate.values'}, {}), '(self.__estimate.values)', True, 'import numpy as np\n'), ((98, 8, 98, 52), 'numpy.seterr', 'np.seterr', (), '', True, 'import numpy as np\n'), ((102, 8, 102, 34), 'numpy.seterr', 'np.seterr', (), '', True, 'import numpy as np\n'), ((99, 16, 99, 43), 'numpy.log', 'np.log', ({(99, 23, 99, 42): '(population - deaths)'}, {}), '(population - deaths)', True, 'import numpy as np\n'), ((99, 46, 99, 64), 'numpy.log', 'np.log', ({(99, 53, 99, 63): 'population'}, {}), '(population)', True, 'import numpy as np\n'), ((64, 22, 64, 171), 'lifelines.utils.StatError', 'StatError', ({(64, 32, 64, 170): "('There are too few early truncation times and too many events. S(t)==0 for all t>%.1f. Recommend BreslowFlemingHarringtonFitter.'\n % ix)"}, {}), "(\n 'There are too few early truncation times and too many events. S(t)==0 for all t>%.1f. Recommend BreslowFlemingHarringtonFitter.'\n % ix)", False, 'from lifelines.utils import _preprocess_inputs, _additive_estimate, StatError, inv_normal_cdf, median_survival_times\n'), ((67, 50, 67, 79), 'numpy.exp', 'np.exp', ({(67, 57, 67, 78): 'log_survival_function'}, {}), '(log_survival_function)', True, 'import numpy as np\n'), ((93, 42, 93, 52), 'numpy.log', 'np.log', ({(93, 49, 93, 51): '-v'}, {}), '(-v)', True, 'import numpy as np\n'), ((94, 42, 94, 52), 'numpy.log', 'np.log', ({(94, 49, 94, 51): '-v'}, {}), '(-v)', True, 'import numpy as np\n'), ((93, 64, 93, 87), 'numpy.sqrt', 'np.sqrt', ({(93, 72, 93, 86): 'cumulative_sq_'}, {}), '(cumulative_sq_)', True, 'import numpy as np\n'), ((94, 64, 94, 87), 'numpy.sqrt', 'np.sqrt', ({(94, 72, 94, 86): 'cumulative_sq_'}, {}), '(cumulative_sq_)', True, 'import numpy as np\n')] |
fishjojo/pydmfe | pydmfet/qcwrap/pyscf_rhf.py | 93cfc655314933d3531b5733521a1f95a044f6cb | import numpy as np
from pydmfet import tools
from .fermi import find_efermi, entropy_corr
from pyscf import ao2mo, gto, scf, dft, lib
from pydmfet.qcwrap import fermi
import time
from functools import reduce
def scf_oei( OEI, Norb, Nelec, smear_sigma = 0.0):
OEI = 0.5*(OEI.T + OEI)
eigenvals, eigenvecs = np.linalg.eigh( OEI )
idx = np.argmax(abs(eigenvecs), axis=0)
eigenvecs[:,eigenvecs[ idx, np.arange(len(eigenvals)) ]<0] *= -1
Nocc = Nelec//2 #closed shell
e_homo = eigenvals[Nocc-1]
e_lumo = eigenvals[Nocc]
print ('HOMO: ', e_homo, 'LUMO: ', e_lumo)
print ("mo_energy:")
print (eigenvals[:Nocc+5])
e_fermi = e_homo
mo_occ = np.zeros((Norb))
if(smear_sigma < 1e-8): #T=0
mo_occ[:Nocc] = 1.0
else: #finite T
e_fermi, mo_occ = find_efermi(eigenvals, smear_sigma, Nocc, Norb)
mo_occ*=2.0 #closed shell
Ne_error = np.sum(mo_occ) - Nelec
if(Ne_error > 1e-8):
print ('Ne error = ', Ne_error)
print ("fermi energy: ", e_fermi)
np.set_printoptions(precision=4)
flag = mo_occ > 1e-4
print (mo_occ[flag])
np.set_printoptions()
RDM1 = reduce(np.dot, (eigenvecs, np.diag(mo_occ), eigenvecs.T))
RDM1 = (RDM1.T + RDM1)/2.0
energy = np.trace(np.dot(RDM1,OEI))
es = entropy_corr(mo_occ, smear_sigma)
print ('entropy correction: ', es)
energy += es
print ('e_tot = ', energy)
return ( energy, RDM1, eigenvecs, eigenvals, mo_occ )
# The following is deprecated!
class scf_pyscf():
'''
subspace scf
wrapper for scf module of pyscf
'''
def __init__(self, Ne, Norb, mol=None, oei=None, tei=None, ovlp=1, dm0=None, coredm=0, ao2sub=None, mf_method='HF'):
self.mol = mol
self.Ne = Ne
self.Norb = Norb
self.method = mf_method
self.oei = oei
self.tei = tei
self.ovlp = ovlp
self.dm0 = dm0
self.coredm = coredm
self.ao2sub = ao2sub
self.method = mf_method.lower()
self.mf = None
if(self.mol is None):
#what molecule does not matter
self.mol = gto.Mole()
self.mol.build( verbose=0 )
self.mol.atom.append(('C', (0, 0, 0)))
#adjust number of electrons
self.mol.nelectron = Ne
if(self.tei is not None):
self.mol.incore_anyway = True
if(self.method == 'hf'):
self.mf = scf.RHF(self.mol)
self.prep_rhf()
else:
self.mf = scf.RKS(self.mol)
self.mf.xc = self.method
self.prep_rhf()
self.prep_rks()
self.elec_energy = 0.0
self.rdm1 = None
self.mo_coeff = None
self.mo_energy = None
self.mo_occ = None
def prep_rhf(self):
if(self.ovlp == 1):
self.mf.get_ovlp = lambda *args: np.eye( self.Norb )
if(self.oei is not None):
self.mf.get_hcore = lambda *args: self.oei
if(self.tei is not None):
self.mf._eri = ao2mo.restore(8, self.tei, self.Norb)
def prep_rks(self):
if(self.ao2sub is None):
return
#overload dft.rks.get_veff if necessary
self.mf.get_veff = get_veff_rks_decorator(self.ao2sub, self.coredm)
def kernel(self):
self.mf.kernel(self.dm0)
if ( self.mf.converged == False ):
raise Exception("scf not converged!")
rdm1 = self.mf.make_rdm1()
self.rdm1 = 0.5*(rdm1.T + rdm1)
self.elec_energy = self.mf.energy_elec(self.rdm1)[0]
self.mo_coeff = self.mf.mo_coeff
self.mo_energy = self.mf.mo_energy
self.mo_occ = self.mf.mo_occ
def get_veff_rks_decorator(ao2sub, coredm):
def get_veff(ks, mol=None, dm=None, dm_last=0, vhf_last=0, hermi=1):
if mol is None: mol = ks.mol
if dm is None: dm = ks.make_rdm1()
dm_sub = np.asarray(dm) + coredm
dm_ao = tools.dm_sub2ao(dm_sub, ao2sub)
if hasattr(dm, 'mo_coeff'):
mo_coeff_sub = dm.mo_coeff
mo_occ_sub = dm.mo_occ
mo_coeff_ao = tools.mo_sub2ao(mo_coeff_sub, ao2sub)
mo_occ_ao = mo_occ_sub
dm_ao = lib.tag_array(dm_ao, mo_coeff=mo_coeff_ao, mo_occ=mo_occ_ao)
n, exc, vxc_ao, hyb = get_vxc(ks, mol, dm_ao)
vxc = tools.op_ao2sub(vxc_ao, ao2sub)
vj = None
vk = None
if abs(hyb) < 1e-10:
if (ks._eri is None and ks.direct_scf and
getattr(vhf_last, 'vj', None) is not None):
ddm = numpy.asarray(dm) - numpy.asarray(dm_last)
vj = ks.get_jk(mol, ddm, hermi)[0]
vj += vhf_last.vj
else:
vj = ks.get_jk(mol, dm, hermi)[0]
vxc += vj
else:
if (ks._eri is None and ks.direct_scf and
getattr(vhf_last, 'vk', None) is not None):
ddm = numpy.asarray(dm) - numpy.asarray(dm_last)
vj, vk = ks.get_jk(mol, ddm, hermi)
vj += vhf_last.vj
vk += vhf_last.vk
else:
vj, vk = ks.get_jk(mol, dm, hermi)
vxc += vj - vk * (hyb * .5)
exc -= np.einsum('ij,ji', dm, vk) * .5 * hyb*.5
ecoul = np.einsum('ij,ji', dm, vj) * .5
vxc = lib.tag_array(vxc, ecoul=ecoul, exc=exc, vj=vj, vk=vk)
return vxc
return get_veff
def get_vxc(ks, mol, dm, hermi=1):
ground_state = (isinstance(dm, numpy.ndarray) and dm.ndim == 2)
if(not ground_state):
raise Exception("fatal error")
if ks.grids.coords is None:
ks.grids.build(with_non0tab=True)
if ks.small_rho_cutoff > 1e-20 and ground_state:
# Filter grids the first time setup grids
t0 = (time.clock(), time.time())
ks.grids = dft.rks.prune_small_rho_grids_(ks, mol, dm, ks.grids)
t1 = tools.timer("prune grid",t0)
if hermi == 2: # because rho = 0
n, exc, vxc = 0, 0, 0
else:
n, exc, vxc = ks._numint.nr_rks(mol, ks.grids, ks.xc, dm)
hyb = ks._numint.hybrid_coeff(ks.xc, spin=mol.spin)
return n, exc, vxc, hyb
'''
def rhf(mol, OEI, TEI, Norb, Nelec, OneDM0=None ):
# Get the RHF solution
OEI = 0.5*(OEI.T + OEI)
#mol = gto.Mole()
#mol.max_memory = 8000
#mol.build( verbose=0 )
#mol.atom.append(('C', (0, 0, 0)))
mol.nelectron = Nelec
mol.incore_anyway = True
mf = pyscf_scf.RHF( mol )
mf.get_hcore = lambda *args: OEI
mf.get_ovlp = lambda *args: np.eye( Norb )
mf._eri = ao2mo.restore(8, TEI, Norb)
mf.max_cycle = 100
#mf.conv_tol = 1e-8
#adiis = pyscf_scf.diis.ADIIS()
#mf.diis = adiis
#mf.verbose = 5
mf.kernel(OneDM0)
if ( mf.converged == False ):
#RDM1 = mf.make_rdm1()
#cdiis = pyscf_scf.diis.SCF_DIIS()
#mf.diis = cdiis
#mf.max_cycle = 200
#mf.kernel(RDM1)
if ( mf.converged == False ):
raise Exception(" rhf not converged!")
return mf
def rks(mol, OEI, TEI, Norb, Nelec, xcfunc, OneDM0=None ):
# Get the RKS solution
OEI = 0.5*(OEI.T + OEI)
#mol = gto.Mole()
#mol.build( verbose=5 )
#mol.atom.append(('C', (0, 0, 0)))
mol.nelectron = Nelec
# mol.incore_anyway = True
mf = pyscf_scf.RKS( mol )
mf.xc = xcfunc.lower()
# mf.get_hcore = lambda *args: OEI
# mf.get_ovlp = lambda *args: np.eye( Norb )
# mf._eri = ao2mo.restore(8, TEI, Norb)
OneDM0 = None
mf.kernel( OneDM0 )
if ( mf.converged == False ):
raise Exception(" rks not converged!")
return mf
def scf(mol, OEI, TEI, Norb, Nelec, OneDM0=None, mf_method = 'HF' ):
# Get the mean-field solution
if(mf_method.lower() == 'hf'):
mf = rhf(mol, OEI, TEI, Norb, Nelec, OneDM0 )
else:
mf = rks(mol, OEI, TEI, Norb, Nelec, mf_method ,OneDM0 )
RDM1 = mf.make_rdm1()
RDM1 = 0.5*(RDM1.T + RDM1)
mo_coeff = mf.mo_coeff
mo_energy = mf.mo_energy
energy = mf.energy_elec(RDM1)[0]
mo = np.zeros([Norb,Norb+1],dtype=float)
mo[:,:-1] = mo_coeff
mo[:,-1] = mo_energy
#print "mo energy"
#print mf.mo_energy
#tools.MatPrint(mf.get_fock(),"fock")
#JK = mf.get_veff(None, dm=RDM1)
#tools.MatPrint(JK,"JK")
#tools.MatPrint(np.dot(mf.get_fock(), mf.mo_coeff),"test")
#tools.MatPrint(mf.mo_coeff,"mo_coeff")
return (energy, RDM1, mo)
'''
| [((12, 27, 12, 48), 'numpy.linalg.eigh', 'np.linalg.eigh', ({(12, 43, 12, 46): 'OEI'}, {}), '(OEI)', True, 'import numpy as np\n'), ((25, 13, 25, 29), 'numpy.zeros', 'np.zeros', ({(25, 23, 25, 27): 'Norb'}, {}), '(Norb)', True, 'import numpy as np\n'), ((38, 4, 38, 36), 'numpy.set_printoptions', 'np.set_printoptions', (), '', True, 'import numpy as np\n'), ((41, 4, 41, 25), 'numpy.set_printoptions', 'np.set_printoptions', ({}, {}), '()', True, 'import numpy as np\n'), ((34, 15, 34, 29), 'numpy.sum', 'np.sum', ({(34, 22, 34, 28): 'mo_occ'}, {}), '(mo_occ)', True, 'import numpy as np\n'), ((47, 22, 47, 38), 'numpy.dot', 'np.dot', ({(47, 29, 47, 33): 'RDM1', (47, 34, 47, 37): 'OEI'}, {}), '(RDM1, OEI)', True, 'import numpy as np\n'), ((155, 16, 155, 47), 'pydmfet.tools.dm_sub2ao', 'tools.dm_sub2ao', ({(155, 32, 155, 38): 'dm_sub', (155, 40, 155, 46): 'ao2sub'}, {}), '(dm_sub, ao2sub)', False, 'from pydmfet import tools\n'), ((166, 14, 166, 45), 'pydmfet.tools.op_ao2sub', 'tools.op_ao2sub', ({(166, 30, 166, 36): 'vxc_ao', (166, 38, 166, 44): 'ao2sub'}, {}), '(vxc_ao, ao2sub)', False, 'from pydmfet import tools\n'), ((193, 14, 193, 68), 'pyscf.lib.tag_array', 'lib.tag_array', (), '', False, 'from pyscf import ao2mo, gto, scf, dft, lib\n'), ((44, 38, 44, 53), 'numpy.diag', 'np.diag', ({(44, 46, 44, 52): 'mo_occ'}, {}), '(mo_occ)', True, 'import numpy as np\n'), ((85, 23, 85, 33), 'pyscf.gto.Mole', 'gto.Mole', ({}, {}), '()', False, 'from pyscf import ao2mo, gto, scf, dft, lib\n'), ((96, 22, 96, 39), 'pyscf.scf.RHF', 'scf.RHF', ({(96, 30, 96, 38): 'self.mol'}, {}), '(self.mol)', False, 'from pyscf import ao2mo, gto, scf, dft, lib\n'), ((99, 22, 99, 39), 'pyscf.scf.RKS', 'scf.RKS', ({(99, 30, 99, 38): 'self.mol'}, {}), '(self.mol)', False, 'from pyscf import ao2mo, gto, scf, dft, lib\n'), ((117, 27, 117, 64), 'pyscf.ao2mo.restore', 'ao2mo.restore', ({(117, 41, 117, 42): '8', (117, 44, 117, 52): 'self.tei', (117, 54, 117, 63): 'self.Norb'}, {}), '(8, self.tei, self.Norb)', False, 'from pyscf import ao2mo, gto, scf, dft, lib\n'), ((154, 17, 154, 31), 'numpy.asarray', 'np.asarray', ({(154, 28, 154, 30): 'dm'}, {}), '(dm)', True, 'import numpy as np\n'), ((161, 26, 161, 63), 'pydmfet.tools.mo_sub2ao', 'tools.mo_sub2ao', ({(161, 42, 161, 54): 'mo_coeff_sub', (161, 56, 161, 62): 'ao2sub'}, {}), '(mo_coeff_sub, ao2sub)', False, 'from pydmfet import tools\n'), ((163, 20, 163, 80), 'pyscf.lib.tag_array', 'lib.tag_array', (), '', False, 'from pyscf import ao2mo, gto, scf, dft, lib\n'), ((191, 16, 191, 42), 'numpy.einsum', 'np.einsum', ({(191, 26, 191, 33): '"""ij,ji"""', (191, 35, 191, 37): 'dm', (191, 39, 191, 41): 'vj'}, {}), "('ij,ji', dm, vj)", True, 'import numpy as np\n'), ((211, 23, 211, 76), 'pyscf.dft.rks.prune_small_rho_grids_', 'dft.rks.prune_small_rho_grids_', ({(211, 54, 211, 56): 'ks', (211, 58, 211, 61): 'mol', (211, 63, 211, 65): 'dm', (211, 67, 211, 75): 'ks.grids'}, {}), '(ks, mol, dm, ks.grids)', False, 'from pyscf import ao2mo, gto, scf, dft, lib\n'), ((212, 17, 212, 45), 'pydmfet.tools.timer', 'tools.timer', ({(212, 29, 212, 41): '"""prune grid"""', (212, 42, 212, 44): 't0'}, {}), "('prune grid', t0)", False, 'from pydmfet import tools\n'), ((113, 45, 113, 64), 'numpy.eye', 'np.eye', ({(113, 53, 113, 62): 'self.Norb'}, {}), '(self.Norb)', True, 'import numpy as np\n'), ((210, 18, 210, 30), 'time.clock', 'time.clock', ({}, {}), '()', False, 'import time\n'), ((210, 32, 210, 43), 'time.time', 'time.time', ({}, {}), '()', False, 'import time\n'), ((189, 19, 189, 45), 'numpy.einsum', 'np.einsum', ({(189, 29, 189, 36): '"""ij,ji"""', (189, 38, 189, 40): 'dm', (189, 42, 189, 44): 'vk'}, {}), "('ij,ji', dm, vk)", True, 'import numpy as np\n')] |
tautschnig/one-line-scan | backends/fortify/summarize-fortify.py | 24e1deedd595e3406eb8d5c69ff9629c5a87d0aa | #!/usr/bin/env python
#
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
#
# Parse the report.html of Fortify and create an ASCII summary
import os
import sys
from subprocess import call
from xml.etree import ElementTree
# print usage
if len(sys.argv) != 2:
print "usage summarizy-fortify.py LOGDIR"
sys.exit(1)
# get directory where the logs are placed
logdir=sys.argv[1]
# strip this part of the directory information of
workdirectory = os.getcwd() + '/'
# get the fortify report; first make it valid XML
filename=logdir+'/log/report.html'
call(['perl', '-p', '-i', '-e', 's#<((img|meta) [^>]+)>#<$1/>#', filename])
# make sure we can run this script multiple times on the same html file
call(['perl', '-p', '-i', '-e', 's#//>#/>#', filename])
# parse the html file and jump to the last table
data=ElementTree.parse(filename).getroot()
table=data.find('.//table')[-1]
# iterate over all rows and print their content in a more useable format
for data in table.iter('tr'):
# handle only the rows that contain results
if len(data) != 4:
continue
# extract file information, convert absolute path into relative one
location=data[2].find('a')
# header does not have <a ...>
if location is None:
continue
filename=location.get('href')
filename=filename.replace('file://','')
filename=filename.replace(workdirectory,'')
severity=data[3].text
if severity is None:
severity=data[3].find('span').text
# strip newline and space sequences
problem=data[0].text.replace('\n','').replace('\r','')
short=problem.replace(' ',' ')
while len(short) < len(problem):
problem=short
short=problem.replace(' ',' ')
column=ElementTree.tostring(data[2].findall("*")[0]).split(':')[2]
printstring = filename + ':' + column.strip() + ', ' + \
severity.strip() + ', ' + \
problem
if data[1].text is not None:
printstring = printstring + ', ' + data[1].text
print printstring
| [] |
r4b3rt/angr | angr/procedures/definitions/win32_wsmsvc.py | c133cfd4f83ffea2a1d9e064241e9459eaabc55f | # pylint:disable=line-too-long
import logging
from ...sim_type import SimTypeFunction, SimTypeShort, SimTypeInt, SimTypeLong, SimTypeLongLong, SimTypeDouble, SimTypeFloat, SimTypePointer, SimTypeChar, SimStruct, SimTypeFixedSizeArray, SimTypeBottom, SimUnion, SimTypeBool
from ...calling_conventions import SimCCStdcall, SimCCMicrosoftAMD64
from .. import SIM_PROCEDURES as P
from . import SimLibrary
_l = logging.getLogger(name=__name__)
lib = SimLibrary()
lib.set_default_cc('X86', SimCCStdcall)
lib.set_default_cc('AMD64', SimCCMicrosoftAMD64)
lib.set_library_names("wsmsvc.dll")
prototypes = \
{
#
'WSManInitialize': SimTypeFunction([SimTypeInt(signed=False, label="UInt32"), SimTypePointer(SimTypePointer(SimStruct({}, name="WSMAN_API", pack=False, align=None), offset=0), offset=0)], SimTypeInt(signed=False, label="UInt32"), arg_names=["flags", "apiHandle"]),
#
'WSManDeinitialize': SimTypeFunction([SimTypePointer(SimStruct({}, name="WSMAN_API", pack=False, align=None), offset=0), SimTypeInt(signed=False, label="UInt32")], SimTypeInt(signed=False, label="UInt32"), arg_names=["apiHandle", "flags"]),
#
'WSManGetErrorMessage': SimTypeFunction([SimTypePointer(SimStruct({}, name="WSMAN_API", pack=False, align=None), offset=0), SimTypeInt(signed=False, label="UInt32"), SimTypePointer(SimTypeChar(label="Char"), offset=0), SimTypeInt(signed=False, label="UInt32"), SimTypeInt(signed=False, label="UInt32"), SimTypePointer(SimTypeChar(label="Char"), label="LPArray", offset=0), SimTypePointer(SimTypeInt(signed=False, label="UInt32"), offset=0)], SimTypeInt(signed=False, label="UInt32"), arg_names=["apiHandle", "flags", "languageCode", "errorCode", "messageLength", "message", "messageLengthUsed"]),
#
'WSManCreateSession': SimTypeFunction([SimTypePointer(SimStruct({}, name="WSMAN_API", pack=False, align=None), offset=0), SimTypePointer(SimTypeChar(label="Char"), offset=0), SimTypeInt(signed=False, label="UInt32"), SimTypePointer(SimStruct({"authenticationMechanism": SimTypeInt(signed=False, label="UInt32"), "Anonymous": SimUnion({"userAccount": SimStruct({"username": SimTypePointer(SimTypeChar(label="Char"), offset=0), "password": SimTypePointer(SimTypeChar(label="Char"), offset=0)}, name="WSMAN_USERNAME_PASSWORD_CREDS", pack=False, align=None), "certificateThumbprint": SimTypePointer(SimTypeChar(label="Char"), offset=0)}, name="<anon>", label="None")}, name="WSMAN_AUTHENTICATION_CREDENTIALS", pack=False, align=None), offset=0), SimTypePointer(SimStruct({"accessType": SimTypeInt(signed=False, label="UInt32"), "authenticationCredentials": SimStruct({"authenticationMechanism": SimTypeInt(signed=False, label="UInt32"), "Anonymous": SimUnion({"userAccount": SimStruct({"username": SimTypePointer(SimTypeChar(label="Char"), offset=0), "password": SimTypePointer(SimTypeChar(label="Char"), offset=0)}, name="WSMAN_USERNAME_PASSWORD_CREDS", pack=False, align=None), "certificateThumbprint": SimTypePointer(SimTypeChar(label="Char"), offset=0)}, name="<anon>", label="None")}, name="WSMAN_AUTHENTICATION_CREDENTIALS", pack=False, align=None)}, name="WSMAN_PROXY_INFO", pack=False, align=None), offset=0), SimTypePointer(SimTypePointer(SimStruct({}, name="WSMAN_SESSION", pack=False, align=None), offset=0), offset=0)], SimTypeInt(signed=False, label="UInt32"), arg_names=["apiHandle", "connection", "flags", "serverAuthenticationCredentials", "proxyInfo", "session"]),
#
'WSManCloseSession': SimTypeFunction([SimTypePointer(SimStruct({}, name="WSMAN_SESSION", pack=False, align=None), offset=0), SimTypeInt(signed=False, label="UInt32")], SimTypeInt(signed=False, label="UInt32"), arg_names=["session", "flags"]),
#
'WSManSetSessionOption': SimTypeFunction([SimTypePointer(SimStruct({}, name="WSMAN_SESSION", pack=False, align=None), offset=0), SimTypeInt(signed=False, label="WSManSessionOption"), SimTypePointer(SimStruct({"type": SimTypeInt(signed=False, label="WSManDataType"), "Anonymous": SimUnion({"text": SimStruct({"bufferLength": SimTypeInt(signed=False, label="UInt32"), "buffer": SimTypePointer(SimTypeChar(label="Char"), offset=0)}, name="WSMAN_DATA_TEXT", pack=False, align=None), "binaryData": SimStruct({"dataLength": SimTypeInt(signed=False, label="UInt32"), "data": SimTypePointer(SimTypeChar(label="Byte"), offset=0)}, name="WSMAN_DATA_BINARY", pack=False, align=None), "number": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None")}, name="WSMAN_DATA", pack=False, align=None), offset=0)], SimTypeInt(signed=False, label="UInt32"), arg_names=["session", "option", "data"]),
#
'WSManGetSessionOptionAsDword': SimTypeFunction([SimTypePointer(SimStruct({}, name="WSMAN_SESSION", pack=False, align=None), offset=0), SimTypeInt(signed=False, label="WSManSessionOption"), SimTypePointer(SimTypeInt(signed=False, label="UInt32"), offset=0)], SimTypeInt(signed=False, label="UInt32"), arg_names=["session", "option", "value"]),
#
'WSManGetSessionOptionAsString': SimTypeFunction([SimTypePointer(SimStruct({}, name="WSMAN_SESSION", pack=False, align=None), offset=0), SimTypeInt(signed=False, label="WSManSessionOption"), SimTypeInt(signed=False, label="UInt32"), SimTypePointer(SimTypeChar(label="Char"), label="LPArray", offset=0), SimTypePointer(SimTypeInt(signed=False, label="UInt32"), offset=0)], SimTypeInt(signed=False, label="UInt32"), arg_names=["session", "option", "stringLength", "string", "stringLengthUsed"]),
#
'WSManCloseOperation': SimTypeFunction([SimTypePointer(SimStruct({}, name="WSMAN_OPERATION", pack=False, align=None), offset=0), SimTypeInt(signed=False, label="UInt32")], SimTypeInt(signed=False, label="UInt32"), arg_names=["operationHandle", "flags"]),
#
'WSManCreateShell': SimTypeFunction([SimTypePointer(SimStruct({}, name="WSMAN_SESSION", pack=False, align=None), offset=0), SimTypeInt(signed=False, label="UInt32"), SimTypePointer(SimTypeChar(label="Char"), offset=0), SimTypePointer(SimStruct({"__AnonymousBase_wsman_L665_C48": SimStruct({"inputStreamSet": SimTypePointer(SimStruct({"streamIDsCount": SimTypeInt(signed=False, label="UInt32"), "streamIDs": SimTypePointer(SimTypePointer(SimTypeChar(label="Char"), offset=0), offset=0)}, name="WSMAN_STREAM_ID_SET", pack=False, align=None), offset=0), "outputStreamSet": SimTypePointer(SimStruct({"streamIDsCount": SimTypeInt(signed=False, label="UInt32"), "streamIDs": SimTypePointer(SimTypePointer(SimTypeChar(label="Char"), offset=0), offset=0)}, name="WSMAN_STREAM_ID_SET", pack=False, align=None), offset=0), "idleTimeoutMs": SimTypeInt(signed=False, label="UInt32"), "workingDirectory": SimTypePointer(SimTypeChar(label="Char"), offset=0), "variableSet": SimTypePointer(SimStruct({"varsCount": SimTypeInt(signed=False, label="UInt32"), "vars": SimTypePointer(SimStruct({"name": SimTypePointer(SimTypeChar(label="Char"), offset=0), "value": SimTypePointer(SimTypeChar(label="Char"), offset=0)}, name="WSMAN_ENVIRONMENT_VARIABLE", pack=False, align=None), offset=0)}, name="WSMAN_ENVIRONMENT_VARIABLE_SET", pack=False, align=None), offset=0)}, name="WSMAN_SHELL_STARTUP_INFO_V10", pack=False, align=None), "name": SimTypePointer(SimTypeChar(label="Char"), offset=0)}, name="WSMAN_SHELL_STARTUP_INFO_V11", pack=False, align=None), offset=0), SimTypePointer(SimStruct({"optionsCount": SimTypeInt(signed=False, label="UInt32"), "options": SimTypePointer(SimStruct({"name": SimTypePointer(SimTypeChar(label="Char"), offset=0), "value": SimTypePointer(SimTypeChar(label="Char"), offset=0), "mustComply": SimTypeInt(signed=True, label="Int32")}, name="WSMAN_OPTION", pack=False, align=None), offset=0), "optionsMustUnderstand": SimTypeInt(signed=True, label="Int32")}, name="WSMAN_OPTION_SET", pack=False, align=None), offset=0), SimTypePointer(SimStruct({"type": SimTypeInt(signed=False, label="WSManDataType"), "Anonymous": SimUnion({"text": SimStruct({"bufferLength": SimTypeInt(signed=False, label="UInt32"), "buffer": SimTypePointer(SimTypeChar(label="Char"), offset=0)}, name="WSMAN_DATA_TEXT", pack=False, align=None), "binaryData": SimStruct({"dataLength": SimTypeInt(signed=False, label="UInt32"), "data": SimTypePointer(SimTypeChar(label="Byte"), offset=0)}, name="WSMAN_DATA_BINARY", pack=False, align=None), "number": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None")}, name="WSMAN_DATA", pack=False, align=None), offset=0), SimTypePointer(SimStruct({"operationContext": SimTypePointer(SimTypeBottom(label="Void"), offset=0), "completionFunction": SimTypePointer(SimTypeFunction([SimTypePointer(SimTypeBottom(label="Void"), offset=0), SimTypeInt(signed=False, label="UInt32"), SimTypePointer(SimStruct({"code": SimTypeInt(signed=False, label="UInt32"), "errorDetail": SimTypePointer(SimTypeChar(label="Char"), offset=0), "language": SimTypePointer(SimTypeChar(label="Char"), offset=0), "machineName": SimTypePointer(SimTypeChar(label="Char"), offset=0), "pluginName": SimTypePointer(SimTypeChar(label="Char"), offset=0)}, name="WSMAN_ERROR", pack=False, align=None), offset=0), SimTypePointer(SimStruct({}, name="WSMAN_SHELL", pack=False, align=None), offset=0), SimTypePointer(SimStruct({}, name="WSMAN_COMMAND", pack=False, align=None), offset=0), SimTypePointer(SimStruct({}, name="WSMAN_OPERATION", pack=False, align=None), offset=0), SimTypePointer(SimUnion({"receiveData": SimStruct({"streamId": SimTypePointer(SimTypeChar(label="Char"), offset=0), "streamData": SimStruct({"type": SimTypeInt(signed=False, label="WSManDataType"), "Anonymous": SimUnion({"text": SimStruct({"bufferLength": SimTypeInt(signed=False, label="UInt32"), "buffer": SimTypePointer(SimTypeChar(label="Char"), offset=0)}, name="WSMAN_DATA_TEXT", pack=False, align=None), "binaryData": SimStruct({"dataLength": SimTypeInt(signed=False, label="UInt32"), "data": SimTypePointer(SimTypeChar(label="Byte"), offset=0)}, name="WSMAN_DATA_BINARY", pack=False, align=None), "number": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None")}, name="WSMAN_DATA", pack=False, align=None), "commandState": SimTypePointer(SimTypeChar(label="Char"), offset=0), "exitCode": SimTypeInt(signed=False, label="UInt32")}, name="WSMAN_RECEIVE_DATA_RESULT", pack=False, align=None), "connectData": SimStruct({"data": SimStruct({"type": SimTypeInt(signed=False, label="WSManDataType"), "Anonymous": SimUnion({"text": SimStruct({"bufferLength": SimTypeInt(signed=False, label="UInt32"), "buffer": SimTypePointer(SimTypeChar(label="Char"), offset=0)}, name="WSMAN_DATA_TEXT", pack=False, align=None), "binaryData": SimStruct({"dataLength": SimTypeInt(signed=False, label="UInt32"), "data": SimTypePointer(SimTypeChar(label="Byte"), offset=0)}, name="WSMAN_DATA_BINARY", pack=False, align=None), "number": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None")}, name="WSMAN_DATA", pack=False, align=None)}, name="WSMAN_CONNECT_DATA", pack=False, align=None), "createData": SimStruct({"data": SimStruct({"type": SimTypeInt(signed=False, label="WSManDataType"), "Anonymous": SimUnion({"text": SimStruct({"bufferLength": SimTypeInt(signed=False, label="UInt32"), "buffer": SimTypePointer(SimTypeChar(label="Char"), offset=0)}, name="WSMAN_DATA_TEXT", pack=False, align=None), "binaryData": SimStruct({"dataLength": SimTypeInt(signed=False, label="UInt32"), "data": SimTypePointer(SimTypeChar(label="Byte"), offset=0)}, name="WSMAN_DATA_BINARY", pack=False, align=None), "number": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None")}, name="WSMAN_DATA", pack=False, align=None)}, name="WSMAN_CREATE_SHELL_DATA", pack=False, align=None)}, name="<anon>", label="None"), offset=0)], SimTypeBottom(label="Void"), arg_names=["operationContext", "flags", "error", "shell", "command", "operationHandle", "data"]), offset=0)}, name="WSMAN_SHELL_ASYNC", pack=False, align=None), offset=0), SimTypePointer(SimTypePointer(SimStruct({}, name="WSMAN_SHELL", pack=False, align=None), offset=0), offset=0)], SimTypeBottom(label="Void"), arg_names=["session", "flags", "resourceUri", "startupInfo", "options", "createXml", "async", "shell"]),
#
'WSManRunShellCommand': SimTypeFunction([SimTypePointer(SimStruct({}, name="WSMAN_SHELL", pack=False, align=None), offset=0), SimTypeInt(signed=False, label="UInt32"), SimTypePointer(SimTypeChar(label="Char"), offset=0), SimTypePointer(SimStruct({"argsCount": SimTypeInt(signed=False, label="UInt32"), "args": SimTypePointer(SimTypePointer(SimTypeChar(label="Char"), offset=0), offset=0)}, name="WSMAN_COMMAND_ARG_SET", pack=False, align=None), offset=0), SimTypePointer(SimStruct({"optionsCount": SimTypeInt(signed=False, label="UInt32"), "options": SimTypePointer(SimStruct({"name": SimTypePointer(SimTypeChar(label="Char"), offset=0), "value": SimTypePointer(SimTypeChar(label="Char"), offset=0), "mustComply": SimTypeInt(signed=True, label="Int32")}, name="WSMAN_OPTION", pack=False, align=None), offset=0), "optionsMustUnderstand": SimTypeInt(signed=True, label="Int32")}, name="WSMAN_OPTION_SET", pack=False, align=None), offset=0), SimTypePointer(SimStruct({"operationContext": SimTypePointer(SimTypeBottom(label="Void"), offset=0), "completionFunction": SimTypePointer(SimTypeFunction([SimTypePointer(SimTypeBottom(label="Void"), offset=0), SimTypeInt(signed=False, label="UInt32"), SimTypePointer(SimStruct({"code": SimTypeInt(signed=False, label="UInt32"), "errorDetail": SimTypePointer(SimTypeChar(label="Char"), offset=0), "language": SimTypePointer(SimTypeChar(label="Char"), offset=0), "machineName": SimTypePointer(SimTypeChar(label="Char"), offset=0), "pluginName": SimTypePointer(SimTypeChar(label="Char"), offset=0)}, name="WSMAN_ERROR", pack=False, align=None), offset=0), SimTypePointer(SimStruct({}, name="WSMAN_SHELL", pack=False, align=None), offset=0), SimTypePointer(SimStruct({}, name="WSMAN_COMMAND", pack=False, align=None), offset=0), SimTypePointer(SimStruct({}, name="WSMAN_OPERATION", pack=False, align=None), offset=0), SimTypePointer(SimUnion({"receiveData": SimStruct({"streamId": SimTypePointer(SimTypeChar(label="Char"), offset=0), "streamData": SimStruct({"type": SimTypeInt(signed=False, label="WSManDataType"), "Anonymous": SimUnion({"text": SimStruct({"bufferLength": SimTypeInt(signed=False, label="UInt32"), "buffer": SimTypePointer(SimTypeChar(label="Char"), offset=0)}, name="WSMAN_DATA_TEXT", pack=False, align=None), "binaryData": SimStruct({"dataLength": SimTypeInt(signed=False, label="UInt32"), "data": SimTypePointer(SimTypeChar(label="Byte"), offset=0)}, name="WSMAN_DATA_BINARY", pack=False, align=None), "number": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None")}, name="WSMAN_DATA", pack=False, align=None), "commandState": SimTypePointer(SimTypeChar(label="Char"), offset=0), "exitCode": SimTypeInt(signed=False, label="UInt32")}, name="WSMAN_RECEIVE_DATA_RESULT", pack=False, align=None), "connectData": SimStruct({"data": SimStruct({"type": SimTypeInt(signed=False, label="WSManDataType"), "Anonymous": SimUnion({"text": SimStruct({"bufferLength": SimTypeInt(signed=False, label="UInt32"), "buffer": SimTypePointer(SimTypeChar(label="Char"), offset=0)}, name="WSMAN_DATA_TEXT", pack=False, align=None), "binaryData": SimStruct({"dataLength": SimTypeInt(signed=False, label="UInt32"), "data": SimTypePointer(SimTypeChar(label="Byte"), offset=0)}, name="WSMAN_DATA_BINARY", pack=False, align=None), "number": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None")}, name="WSMAN_DATA", pack=False, align=None)}, name="WSMAN_CONNECT_DATA", pack=False, align=None), "createData": SimStruct({"data": SimStruct({"type": SimTypeInt(signed=False, label="WSManDataType"), "Anonymous": SimUnion({"text": SimStruct({"bufferLength": SimTypeInt(signed=False, label="UInt32"), "buffer": SimTypePointer(SimTypeChar(label="Char"), offset=0)}, name="WSMAN_DATA_TEXT", pack=False, align=None), "binaryData": SimStruct({"dataLength": SimTypeInt(signed=False, label="UInt32"), "data": SimTypePointer(SimTypeChar(label="Byte"), offset=0)}, name="WSMAN_DATA_BINARY", pack=False, align=None), "number": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None")}, name="WSMAN_DATA", pack=False, align=None)}, name="WSMAN_CREATE_SHELL_DATA", pack=False, align=None)}, name="<anon>", label="None"), offset=0)], SimTypeBottom(label="Void"), arg_names=["operationContext", "flags", "error", "shell", "command", "operationHandle", "data"]), offset=0)}, name="WSMAN_SHELL_ASYNC", pack=False, align=None), offset=0), SimTypePointer(SimTypePointer(SimStruct({}, name="WSMAN_COMMAND", pack=False, align=None), offset=0), offset=0)], SimTypeBottom(label="Void"), arg_names=["shell", "flags", "commandLine", "args", "options", "async", "command"]),
#
'WSManSignalShell': SimTypeFunction([SimTypePointer(SimStruct({}, name="WSMAN_SHELL", pack=False, align=None), offset=0), SimTypePointer(SimStruct({}, name="WSMAN_COMMAND", pack=False, align=None), offset=0), SimTypeInt(signed=False, label="UInt32"), SimTypePointer(SimTypeChar(label="Char"), offset=0), SimTypePointer(SimStruct({"operationContext": SimTypePointer(SimTypeBottom(label="Void"), offset=0), "completionFunction": SimTypePointer(SimTypeFunction([SimTypePointer(SimTypeBottom(label="Void"), offset=0), SimTypeInt(signed=False, label="UInt32"), SimTypePointer(SimStruct({"code": SimTypeInt(signed=False, label="UInt32"), "errorDetail": SimTypePointer(SimTypeChar(label="Char"), offset=0), "language": SimTypePointer(SimTypeChar(label="Char"), offset=0), "machineName": SimTypePointer(SimTypeChar(label="Char"), offset=0), "pluginName": SimTypePointer(SimTypeChar(label="Char"), offset=0)}, name="WSMAN_ERROR", pack=False, align=None), offset=0), SimTypePointer(SimStruct({}, name="WSMAN_SHELL", pack=False, align=None), offset=0), SimTypePointer(SimStruct({}, name="WSMAN_COMMAND", pack=False, align=None), offset=0), SimTypePointer(SimStruct({}, name="WSMAN_OPERATION", pack=False, align=None), offset=0), SimTypePointer(SimUnion({"receiveData": SimStruct({"streamId": SimTypePointer(SimTypeChar(label="Char"), offset=0), "streamData": SimStruct({"type": SimTypeInt(signed=False, label="WSManDataType"), "Anonymous": SimUnion({"text": SimStruct({"bufferLength": SimTypeInt(signed=False, label="UInt32"), "buffer": SimTypePointer(SimTypeChar(label="Char"), offset=0)}, name="WSMAN_DATA_TEXT", pack=False, align=None), "binaryData": SimStruct({"dataLength": SimTypeInt(signed=False, label="UInt32"), "data": SimTypePointer(SimTypeChar(label="Byte"), offset=0)}, name="WSMAN_DATA_BINARY", pack=False, align=None), "number": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None")}, name="WSMAN_DATA", pack=False, align=None), "commandState": SimTypePointer(SimTypeChar(label="Char"), offset=0), "exitCode": SimTypeInt(signed=False, label="UInt32")}, name="WSMAN_RECEIVE_DATA_RESULT", pack=False, align=None), "connectData": SimStruct({"data": SimStruct({"type": SimTypeInt(signed=False, label="WSManDataType"), "Anonymous": SimUnion({"text": SimStruct({"bufferLength": SimTypeInt(signed=False, label="UInt32"), "buffer": SimTypePointer(SimTypeChar(label="Char"), offset=0)}, name="WSMAN_DATA_TEXT", pack=False, align=None), "binaryData": SimStruct({"dataLength": SimTypeInt(signed=False, label="UInt32"), "data": SimTypePointer(SimTypeChar(label="Byte"), offset=0)}, name="WSMAN_DATA_BINARY", pack=False, align=None), "number": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None")}, name="WSMAN_DATA", pack=False, align=None)}, name="WSMAN_CONNECT_DATA", pack=False, align=None), "createData": SimStruct({"data": SimStruct({"type": SimTypeInt(signed=False, label="WSManDataType"), "Anonymous": SimUnion({"text": SimStruct({"bufferLength": SimTypeInt(signed=False, label="UInt32"), "buffer": SimTypePointer(SimTypeChar(label="Char"), offset=0)}, name="WSMAN_DATA_TEXT", pack=False, align=None), "binaryData": SimStruct({"dataLength": SimTypeInt(signed=False, label="UInt32"), "data": SimTypePointer(SimTypeChar(label="Byte"), offset=0)}, name="WSMAN_DATA_BINARY", pack=False, align=None), "number": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None")}, name="WSMAN_DATA", pack=False, align=None)}, name="WSMAN_CREATE_SHELL_DATA", pack=False, align=None)}, name="<anon>", label="None"), offset=0)], SimTypeBottom(label="Void"), arg_names=["operationContext", "flags", "error", "shell", "command", "operationHandle", "data"]), offset=0)}, name="WSMAN_SHELL_ASYNC", pack=False, align=None), offset=0), SimTypePointer(SimTypePointer(SimStruct({}, name="WSMAN_OPERATION", pack=False, align=None), offset=0), offset=0)], SimTypeBottom(label="Void"), arg_names=["shell", "command", "flags", "code", "async", "signalOperation"]),
#
'WSManReceiveShellOutput': SimTypeFunction([SimTypePointer(SimStruct({}, name="WSMAN_SHELL", pack=False, align=None), offset=0), SimTypePointer(SimStruct({}, name="WSMAN_COMMAND", pack=False, align=None), offset=0), SimTypeInt(signed=False, label="UInt32"), SimTypePointer(SimStruct({"streamIDsCount": SimTypeInt(signed=False, label="UInt32"), "streamIDs": SimTypePointer(SimTypePointer(SimTypeChar(label="Char"), offset=0), offset=0)}, name="WSMAN_STREAM_ID_SET", pack=False, align=None), offset=0), SimTypePointer(SimStruct({"operationContext": SimTypePointer(SimTypeBottom(label="Void"), offset=0), "completionFunction": SimTypePointer(SimTypeFunction([SimTypePointer(SimTypeBottom(label="Void"), offset=0), SimTypeInt(signed=False, label="UInt32"), SimTypePointer(SimStruct({"code": SimTypeInt(signed=False, label="UInt32"), "errorDetail": SimTypePointer(SimTypeChar(label="Char"), offset=0), "language": SimTypePointer(SimTypeChar(label="Char"), offset=0), "machineName": SimTypePointer(SimTypeChar(label="Char"), offset=0), "pluginName": SimTypePointer(SimTypeChar(label="Char"), offset=0)}, name="WSMAN_ERROR", pack=False, align=None), offset=0), SimTypePointer(SimStruct({}, name="WSMAN_SHELL", pack=False, align=None), offset=0), SimTypePointer(SimStruct({}, name="WSMAN_COMMAND", pack=False, align=None), offset=0), SimTypePointer(SimStruct({}, name="WSMAN_OPERATION", pack=False, align=None), offset=0), SimTypePointer(SimUnion({"receiveData": SimStruct({"streamId": SimTypePointer(SimTypeChar(label="Char"), offset=0), "streamData": SimStruct({"type": SimTypeInt(signed=False, label="WSManDataType"), "Anonymous": SimUnion({"text": SimStruct({"bufferLength": SimTypeInt(signed=False, label="UInt32"), "buffer": SimTypePointer(SimTypeChar(label="Char"), offset=0)}, name="WSMAN_DATA_TEXT", pack=False, align=None), "binaryData": SimStruct({"dataLength": SimTypeInt(signed=False, label="UInt32"), "data": SimTypePointer(SimTypeChar(label="Byte"), offset=0)}, name="WSMAN_DATA_BINARY", pack=False, align=None), "number": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None")}, name="WSMAN_DATA", pack=False, align=None), "commandState": SimTypePointer(SimTypeChar(label="Char"), offset=0), "exitCode": SimTypeInt(signed=False, label="UInt32")}, name="WSMAN_RECEIVE_DATA_RESULT", pack=False, align=None), "connectData": SimStruct({"data": SimStruct({"type": SimTypeInt(signed=False, label="WSManDataType"), "Anonymous": SimUnion({"text": SimStruct({"bufferLength": SimTypeInt(signed=False, label="UInt32"), "buffer": SimTypePointer(SimTypeChar(label="Char"), offset=0)}, name="WSMAN_DATA_TEXT", pack=False, align=None), "binaryData": SimStruct({"dataLength": SimTypeInt(signed=False, label="UInt32"), "data": SimTypePointer(SimTypeChar(label="Byte"), offset=0)}, name="WSMAN_DATA_BINARY", pack=False, align=None), "number": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None")}, name="WSMAN_DATA", pack=False, align=None)}, name="WSMAN_CONNECT_DATA", pack=False, align=None), "createData": SimStruct({"data": SimStruct({"type": SimTypeInt(signed=False, label="WSManDataType"), "Anonymous": SimUnion({"text": SimStruct({"bufferLength": SimTypeInt(signed=False, label="UInt32"), "buffer": SimTypePointer(SimTypeChar(label="Char"), offset=0)}, name="WSMAN_DATA_TEXT", pack=False, align=None), "binaryData": SimStruct({"dataLength": SimTypeInt(signed=False, label="UInt32"), "data": SimTypePointer(SimTypeChar(label="Byte"), offset=0)}, name="WSMAN_DATA_BINARY", pack=False, align=None), "number": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None")}, name="WSMAN_DATA", pack=False, align=None)}, name="WSMAN_CREATE_SHELL_DATA", pack=False, align=None)}, name="<anon>", label="None"), offset=0)], SimTypeBottom(label="Void"), arg_names=["operationContext", "flags", "error", "shell", "command", "operationHandle", "data"]), offset=0)}, name="WSMAN_SHELL_ASYNC", pack=False, align=None), offset=0), SimTypePointer(SimTypePointer(SimStruct({}, name="WSMAN_OPERATION", pack=False, align=None), offset=0), offset=0)], SimTypeBottom(label="Void"), arg_names=["shell", "command", "flags", "desiredStreamSet", "async", "receiveOperation"]),
#
'WSManSendShellInput': SimTypeFunction([SimTypePointer(SimStruct({}, name="WSMAN_SHELL", pack=False, align=None), offset=0), SimTypePointer(SimStruct({}, name="WSMAN_COMMAND", pack=False, align=None), offset=0), SimTypeInt(signed=False, label="UInt32"), SimTypePointer(SimTypeChar(label="Char"), offset=0), SimTypePointer(SimStruct({"type": SimTypeInt(signed=False, label="WSManDataType"), "Anonymous": SimUnion({"text": SimStruct({"bufferLength": SimTypeInt(signed=False, label="UInt32"), "buffer": SimTypePointer(SimTypeChar(label="Char"), offset=0)}, name="WSMAN_DATA_TEXT", pack=False, align=None), "binaryData": SimStruct({"dataLength": SimTypeInt(signed=False, label="UInt32"), "data": SimTypePointer(SimTypeChar(label="Byte"), offset=0)}, name="WSMAN_DATA_BINARY", pack=False, align=None), "number": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None")}, name="WSMAN_DATA", pack=False, align=None), offset=0), SimTypeInt(signed=True, label="Int32"), SimTypePointer(SimStruct({"operationContext": SimTypePointer(SimTypeBottom(label="Void"), offset=0), "completionFunction": SimTypePointer(SimTypeFunction([SimTypePointer(SimTypeBottom(label="Void"), offset=0), SimTypeInt(signed=False, label="UInt32"), SimTypePointer(SimStruct({"code": SimTypeInt(signed=False, label="UInt32"), "errorDetail": SimTypePointer(SimTypeChar(label="Char"), offset=0), "language": SimTypePointer(SimTypeChar(label="Char"), offset=0), "machineName": SimTypePointer(SimTypeChar(label="Char"), offset=0), "pluginName": SimTypePointer(SimTypeChar(label="Char"), offset=0)}, name="WSMAN_ERROR", pack=False, align=None), offset=0), SimTypePointer(SimStruct({}, name="WSMAN_SHELL", pack=False, align=None), offset=0), SimTypePointer(SimStruct({}, name="WSMAN_COMMAND", pack=False, align=None), offset=0), SimTypePointer(SimStruct({}, name="WSMAN_OPERATION", pack=False, align=None), offset=0), SimTypePointer(SimUnion({"receiveData": SimStruct({"streamId": SimTypePointer(SimTypeChar(label="Char"), offset=0), "streamData": SimStruct({"type": SimTypeInt(signed=False, label="WSManDataType"), "Anonymous": SimUnion({"text": SimStruct({"bufferLength": SimTypeInt(signed=False, label="UInt32"), "buffer": SimTypePointer(SimTypeChar(label="Char"), offset=0)}, name="WSMAN_DATA_TEXT", pack=False, align=None), "binaryData": SimStruct({"dataLength": SimTypeInt(signed=False, label="UInt32"), "data": SimTypePointer(SimTypeChar(label="Byte"), offset=0)}, name="WSMAN_DATA_BINARY", pack=False, align=None), "number": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None")}, name="WSMAN_DATA", pack=False, align=None), "commandState": SimTypePointer(SimTypeChar(label="Char"), offset=0), "exitCode": SimTypeInt(signed=False, label="UInt32")}, name="WSMAN_RECEIVE_DATA_RESULT", pack=False, align=None), "connectData": SimStruct({"data": SimStruct({"type": SimTypeInt(signed=False, label="WSManDataType"), "Anonymous": SimUnion({"text": SimStruct({"bufferLength": SimTypeInt(signed=False, label="UInt32"), "buffer": SimTypePointer(SimTypeChar(label="Char"), offset=0)}, name="WSMAN_DATA_TEXT", pack=False, align=None), "binaryData": SimStruct({"dataLength": SimTypeInt(signed=False, label="UInt32"), "data": SimTypePointer(SimTypeChar(label="Byte"), offset=0)}, name="WSMAN_DATA_BINARY", pack=False, align=None), "number": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None")}, name="WSMAN_DATA", pack=False, align=None)}, name="WSMAN_CONNECT_DATA", pack=False, align=None), "createData": SimStruct({"data": SimStruct({"type": SimTypeInt(signed=False, label="WSManDataType"), "Anonymous": SimUnion({"text": SimStruct({"bufferLength": SimTypeInt(signed=False, label="UInt32"), "buffer": SimTypePointer(SimTypeChar(label="Char"), offset=0)}, name="WSMAN_DATA_TEXT", pack=False, align=None), "binaryData": SimStruct({"dataLength": SimTypeInt(signed=False, label="UInt32"), "data": SimTypePointer(SimTypeChar(label="Byte"), offset=0)}, name="WSMAN_DATA_BINARY", pack=False, align=None), "number": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None")}, name="WSMAN_DATA", pack=False, align=None)}, name="WSMAN_CREATE_SHELL_DATA", pack=False, align=None)}, name="<anon>", label="None"), offset=0)], SimTypeBottom(label="Void"), arg_names=["operationContext", "flags", "error", "shell", "command", "operationHandle", "data"]), offset=0)}, name="WSMAN_SHELL_ASYNC", pack=False, align=None), offset=0), SimTypePointer(SimTypePointer(SimStruct({}, name="WSMAN_OPERATION", pack=False, align=None), offset=0), offset=0)], SimTypeBottom(label="Void"), arg_names=["shell", "command", "flags", "streamId", "streamData", "endOfStream", "async", "sendOperation"]),
#
'WSManCloseCommand': SimTypeFunction([SimTypePointer(SimStruct({}, name="WSMAN_COMMAND", pack=False, align=None), offset=0), SimTypeInt(signed=False, label="UInt32"), SimTypePointer(SimStruct({"operationContext": SimTypePointer(SimTypeBottom(label="Void"), offset=0), "completionFunction": SimTypePointer(SimTypeFunction([SimTypePointer(SimTypeBottom(label="Void"), offset=0), SimTypeInt(signed=False, label="UInt32"), SimTypePointer(SimStruct({"code": SimTypeInt(signed=False, label="UInt32"), "errorDetail": SimTypePointer(SimTypeChar(label="Char"), offset=0), "language": SimTypePointer(SimTypeChar(label="Char"), offset=0), "machineName": SimTypePointer(SimTypeChar(label="Char"), offset=0), "pluginName": SimTypePointer(SimTypeChar(label="Char"), offset=0)}, name="WSMAN_ERROR", pack=False, align=None), offset=0), SimTypePointer(SimStruct({}, name="WSMAN_SHELL", pack=False, align=None), offset=0), SimTypePointer(SimStruct({}, name="WSMAN_COMMAND", pack=False, align=None), offset=0), SimTypePointer(SimStruct({}, name="WSMAN_OPERATION", pack=False, align=None), offset=0), SimTypePointer(SimUnion({"receiveData": SimStruct({"streamId": SimTypePointer(SimTypeChar(label="Char"), offset=0), "streamData": SimStruct({"type": SimTypeInt(signed=False, label="WSManDataType"), "Anonymous": SimUnion({"text": SimStruct({"bufferLength": SimTypeInt(signed=False, label="UInt32"), "buffer": SimTypePointer(SimTypeChar(label="Char"), offset=0)}, name="WSMAN_DATA_TEXT", pack=False, align=None), "binaryData": SimStruct({"dataLength": SimTypeInt(signed=False, label="UInt32"), "data": SimTypePointer(SimTypeChar(label="Byte"), offset=0)}, name="WSMAN_DATA_BINARY", pack=False, align=None), "number": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None")}, name="WSMAN_DATA", pack=False, align=None), "commandState": SimTypePointer(SimTypeChar(label="Char"), offset=0), "exitCode": SimTypeInt(signed=False, label="UInt32")}, name="WSMAN_RECEIVE_DATA_RESULT", pack=False, align=None), "connectData": SimStruct({"data": SimStruct({"type": SimTypeInt(signed=False, label="WSManDataType"), "Anonymous": SimUnion({"text": SimStruct({"bufferLength": SimTypeInt(signed=False, label="UInt32"), "buffer": SimTypePointer(SimTypeChar(label="Char"), offset=0)}, name="WSMAN_DATA_TEXT", pack=False, align=None), "binaryData": SimStruct({"dataLength": SimTypeInt(signed=False, label="UInt32"), "data": SimTypePointer(SimTypeChar(label="Byte"), offset=0)}, name="WSMAN_DATA_BINARY", pack=False, align=None), "number": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None")}, name="WSMAN_DATA", pack=False, align=None)}, name="WSMAN_CONNECT_DATA", pack=False, align=None), "createData": SimStruct({"data": SimStruct({"type": SimTypeInt(signed=False, label="WSManDataType"), "Anonymous": SimUnion({"text": SimStruct({"bufferLength": SimTypeInt(signed=False, label="UInt32"), "buffer": SimTypePointer(SimTypeChar(label="Char"), offset=0)}, name="WSMAN_DATA_TEXT", pack=False, align=None), "binaryData": SimStruct({"dataLength": SimTypeInt(signed=False, label="UInt32"), "data": SimTypePointer(SimTypeChar(label="Byte"), offset=0)}, name="WSMAN_DATA_BINARY", pack=False, align=None), "number": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None")}, name="WSMAN_DATA", pack=False, align=None)}, name="WSMAN_CREATE_SHELL_DATA", pack=False, align=None)}, name="<anon>", label="None"), offset=0)], SimTypeBottom(label="Void"), arg_names=["operationContext", "flags", "error", "shell", "command", "operationHandle", "data"]), offset=0)}, name="WSMAN_SHELL_ASYNC", pack=False, align=None), offset=0)], SimTypeBottom(label="Void"), arg_names=["commandHandle", "flags", "async"]),
#
'WSManCloseShell': SimTypeFunction([SimTypePointer(SimStruct({}, name="WSMAN_SHELL", pack=False, align=None), offset=0), SimTypeInt(signed=False, label="UInt32"), SimTypePointer(SimStruct({"operationContext": SimTypePointer(SimTypeBottom(label="Void"), offset=0), "completionFunction": SimTypePointer(SimTypeFunction([SimTypePointer(SimTypeBottom(label="Void"), offset=0), SimTypeInt(signed=False, label="UInt32"), SimTypePointer(SimStruct({"code": SimTypeInt(signed=False, label="UInt32"), "errorDetail": SimTypePointer(SimTypeChar(label="Char"), offset=0), "language": SimTypePointer(SimTypeChar(label="Char"), offset=0), "machineName": SimTypePointer(SimTypeChar(label="Char"), offset=0), "pluginName": SimTypePointer(SimTypeChar(label="Char"), offset=0)}, name="WSMAN_ERROR", pack=False, align=None), offset=0), SimTypePointer(SimStruct({}, name="WSMAN_SHELL", pack=False, align=None), offset=0), SimTypePointer(SimStruct({}, name="WSMAN_COMMAND", pack=False, align=None), offset=0), SimTypePointer(SimStruct({}, name="WSMAN_OPERATION", pack=False, align=None), offset=0), SimTypePointer(SimUnion({"receiveData": SimStruct({"streamId": SimTypePointer(SimTypeChar(label="Char"), offset=0), "streamData": SimStruct({"type": SimTypeInt(signed=False, label="WSManDataType"), "Anonymous": SimUnion({"text": SimStruct({"bufferLength": SimTypeInt(signed=False, label="UInt32"), "buffer": SimTypePointer(SimTypeChar(label="Char"), offset=0)}, name="WSMAN_DATA_TEXT", pack=False, align=None), "binaryData": SimStruct({"dataLength": SimTypeInt(signed=False, label="UInt32"), "data": SimTypePointer(SimTypeChar(label="Byte"), offset=0)}, name="WSMAN_DATA_BINARY", pack=False, align=None), "number": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None")}, name="WSMAN_DATA", pack=False, align=None), "commandState": SimTypePointer(SimTypeChar(label="Char"), offset=0), "exitCode": SimTypeInt(signed=False, label="UInt32")}, name="WSMAN_RECEIVE_DATA_RESULT", pack=False, align=None), "connectData": SimStruct({"data": SimStruct({"type": SimTypeInt(signed=False, label="WSManDataType"), "Anonymous": SimUnion({"text": SimStruct({"bufferLength": SimTypeInt(signed=False, label="UInt32"), "buffer": SimTypePointer(SimTypeChar(label="Char"), offset=0)}, name="WSMAN_DATA_TEXT", pack=False, align=None), "binaryData": SimStruct({"dataLength": SimTypeInt(signed=False, label="UInt32"), "data": SimTypePointer(SimTypeChar(label="Byte"), offset=0)}, name="WSMAN_DATA_BINARY", pack=False, align=None), "number": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None")}, name="WSMAN_DATA", pack=False, align=None)}, name="WSMAN_CONNECT_DATA", pack=False, align=None), "createData": SimStruct({"data": SimStruct({"type": SimTypeInt(signed=False, label="WSManDataType"), "Anonymous": SimUnion({"text": SimStruct({"bufferLength": SimTypeInt(signed=False, label="UInt32"), "buffer": SimTypePointer(SimTypeChar(label="Char"), offset=0)}, name="WSMAN_DATA_TEXT", pack=False, align=None), "binaryData": SimStruct({"dataLength": SimTypeInt(signed=False, label="UInt32"), "data": SimTypePointer(SimTypeChar(label="Byte"), offset=0)}, name="WSMAN_DATA_BINARY", pack=False, align=None), "number": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None")}, name="WSMAN_DATA", pack=False, align=None)}, name="WSMAN_CREATE_SHELL_DATA", pack=False, align=None)}, name="<anon>", label="None"), offset=0)], SimTypeBottom(label="Void"), arg_names=["operationContext", "flags", "error", "shell", "command", "operationHandle", "data"]), offset=0)}, name="WSMAN_SHELL_ASYNC", pack=False, align=None), offset=0)], SimTypeBottom(label="Void"), arg_names=["shellHandle", "flags", "async"]),
#
'WSManCreateShellEx': SimTypeFunction([SimTypePointer(SimStruct({}, name="WSMAN_SESSION", pack=False, align=None), offset=0), SimTypeInt(signed=False, label="UInt32"), SimTypePointer(SimTypeChar(label="Char"), offset=0), SimTypePointer(SimTypeChar(label="Char"), offset=0), SimTypePointer(SimStruct({"__AnonymousBase_wsman_L665_C48": SimStruct({"inputStreamSet": SimTypePointer(SimStruct({"streamIDsCount": SimTypeInt(signed=False, label="UInt32"), "streamIDs": SimTypePointer(SimTypePointer(SimTypeChar(label="Char"), offset=0), offset=0)}, name="WSMAN_STREAM_ID_SET", pack=False, align=None), offset=0), "outputStreamSet": SimTypePointer(SimStruct({"streamIDsCount": SimTypeInt(signed=False, label="UInt32"), "streamIDs": SimTypePointer(SimTypePointer(SimTypeChar(label="Char"), offset=0), offset=0)}, name="WSMAN_STREAM_ID_SET", pack=False, align=None), offset=0), "idleTimeoutMs": SimTypeInt(signed=False, label="UInt32"), "workingDirectory": SimTypePointer(SimTypeChar(label="Char"), offset=0), "variableSet": SimTypePointer(SimStruct({"varsCount": SimTypeInt(signed=False, label="UInt32"), "vars": SimTypePointer(SimStruct({"name": SimTypePointer(SimTypeChar(label="Char"), offset=0), "value": SimTypePointer(SimTypeChar(label="Char"), offset=0)}, name="WSMAN_ENVIRONMENT_VARIABLE", pack=False, align=None), offset=0)}, name="WSMAN_ENVIRONMENT_VARIABLE_SET", pack=False, align=None), offset=0)}, name="WSMAN_SHELL_STARTUP_INFO_V10", pack=False, align=None), "name": SimTypePointer(SimTypeChar(label="Char"), offset=0)}, name="WSMAN_SHELL_STARTUP_INFO_V11", pack=False, align=None), offset=0), SimTypePointer(SimStruct({"optionsCount": SimTypeInt(signed=False, label="UInt32"), "options": SimTypePointer(SimStruct({"name": SimTypePointer(SimTypeChar(label="Char"), offset=0), "value": SimTypePointer(SimTypeChar(label="Char"), offset=0), "mustComply": SimTypeInt(signed=True, label="Int32")}, name="WSMAN_OPTION", pack=False, align=None), offset=0), "optionsMustUnderstand": SimTypeInt(signed=True, label="Int32")}, name="WSMAN_OPTION_SET", pack=False, align=None), offset=0), SimTypePointer(SimStruct({"type": SimTypeInt(signed=False, label="WSManDataType"), "Anonymous": SimUnion({"text": SimStruct({"bufferLength": SimTypeInt(signed=False, label="UInt32"), "buffer": SimTypePointer(SimTypeChar(label="Char"), offset=0)}, name="WSMAN_DATA_TEXT", pack=False, align=None), "binaryData": SimStruct({"dataLength": SimTypeInt(signed=False, label="UInt32"), "data": SimTypePointer(SimTypeChar(label="Byte"), offset=0)}, name="WSMAN_DATA_BINARY", pack=False, align=None), "number": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None")}, name="WSMAN_DATA", pack=False, align=None), offset=0), SimTypePointer(SimStruct({"operationContext": SimTypePointer(SimTypeBottom(label="Void"), offset=0), "completionFunction": SimTypePointer(SimTypeFunction([SimTypePointer(SimTypeBottom(label="Void"), offset=0), SimTypeInt(signed=False, label="UInt32"), SimTypePointer(SimStruct({"code": SimTypeInt(signed=False, label="UInt32"), "errorDetail": SimTypePointer(SimTypeChar(label="Char"), offset=0), "language": SimTypePointer(SimTypeChar(label="Char"), offset=0), "machineName": SimTypePointer(SimTypeChar(label="Char"), offset=0), "pluginName": SimTypePointer(SimTypeChar(label="Char"), offset=0)}, name="WSMAN_ERROR", pack=False, align=None), offset=0), SimTypePointer(SimStruct({}, name="WSMAN_SHELL", pack=False, align=None), offset=0), SimTypePointer(SimStruct({}, name="WSMAN_COMMAND", pack=False, align=None), offset=0), SimTypePointer(SimStruct({}, name="WSMAN_OPERATION", pack=False, align=None), offset=0), SimTypePointer(SimUnion({"receiveData": SimStruct({"streamId": SimTypePointer(SimTypeChar(label="Char"), offset=0), "streamData": SimStruct({"type": SimTypeInt(signed=False, label="WSManDataType"), "Anonymous": SimUnion({"text": SimStruct({"bufferLength": SimTypeInt(signed=False, label="UInt32"), "buffer": SimTypePointer(SimTypeChar(label="Char"), offset=0)}, name="WSMAN_DATA_TEXT", pack=False, align=None), "binaryData": SimStruct({"dataLength": SimTypeInt(signed=False, label="UInt32"), "data": SimTypePointer(SimTypeChar(label="Byte"), offset=0)}, name="WSMAN_DATA_BINARY", pack=False, align=None), "number": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None")}, name="WSMAN_DATA", pack=False, align=None), "commandState": SimTypePointer(SimTypeChar(label="Char"), offset=0), "exitCode": SimTypeInt(signed=False, label="UInt32")}, name="WSMAN_RECEIVE_DATA_RESULT", pack=False, align=None), "connectData": SimStruct({"data": SimStruct({"type": SimTypeInt(signed=False, label="WSManDataType"), "Anonymous": SimUnion({"text": SimStruct({"bufferLength": SimTypeInt(signed=False, label="UInt32"), "buffer": SimTypePointer(SimTypeChar(label="Char"), offset=0)}, name="WSMAN_DATA_TEXT", pack=False, align=None), "binaryData": SimStruct({"dataLength": SimTypeInt(signed=False, label="UInt32"), "data": SimTypePointer(SimTypeChar(label="Byte"), offset=0)}, name="WSMAN_DATA_BINARY", pack=False, align=None), "number": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None")}, name="WSMAN_DATA", pack=False, align=None)}, name="WSMAN_CONNECT_DATA", pack=False, align=None), "createData": SimStruct({"data": SimStruct({"type": SimTypeInt(signed=False, label="WSManDataType"), "Anonymous": SimUnion({"text": SimStruct({"bufferLength": SimTypeInt(signed=False, label="UInt32"), "buffer": SimTypePointer(SimTypeChar(label="Char"), offset=0)}, name="WSMAN_DATA_TEXT", pack=False, align=None), "binaryData": SimStruct({"dataLength": SimTypeInt(signed=False, label="UInt32"), "data": SimTypePointer(SimTypeChar(label="Byte"), offset=0)}, name="WSMAN_DATA_BINARY", pack=False, align=None), "number": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None")}, name="WSMAN_DATA", pack=False, align=None)}, name="WSMAN_CREATE_SHELL_DATA", pack=False, align=None)}, name="<anon>", label="None"), offset=0)], SimTypeBottom(label="Void"), arg_names=["operationContext", "flags", "error", "shell", "command", "operationHandle", "data"]), offset=0)}, name="WSMAN_SHELL_ASYNC", pack=False, align=None), offset=0), SimTypePointer(SimTypePointer(SimStruct({}, name="WSMAN_SHELL", pack=False, align=None), offset=0), offset=0)], SimTypeBottom(label="Void"), arg_names=["session", "flags", "resourceUri", "shellId", "startupInfo", "options", "createXml", "async", "shell"]),
#
'WSManRunShellCommandEx': SimTypeFunction([SimTypePointer(SimStruct({}, name="WSMAN_SHELL", pack=False, align=None), offset=0), SimTypeInt(signed=False, label="UInt32"), SimTypePointer(SimTypeChar(label="Char"), offset=0), SimTypePointer(SimTypeChar(label="Char"), offset=0), SimTypePointer(SimStruct({"argsCount": SimTypeInt(signed=False, label="UInt32"), "args": SimTypePointer(SimTypePointer(SimTypeChar(label="Char"), offset=0), offset=0)}, name="WSMAN_COMMAND_ARG_SET", pack=False, align=None), offset=0), SimTypePointer(SimStruct({"optionsCount": SimTypeInt(signed=False, label="UInt32"), "options": SimTypePointer(SimStruct({"name": SimTypePointer(SimTypeChar(label="Char"), offset=0), "value": SimTypePointer(SimTypeChar(label="Char"), offset=0), "mustComply": SimTypeInt(signed=True, label="Int32")}, name="WSMAN_OPTION", pack=False, align=None), offset=0), "optionsMustUnderstand": SimTypeInt(signed=True, label="Int32")}, name="WSMAN_OPTION_SET", pack=False, align=None), offset=0), SimTypePointer(SimStruct({"operationContext": SimTypePointer(SimTypeBottom(label="Void"), offset=0), "completionFunction": SimTypePointer(SimTypeFunction([SimTypePointer(SimTypeBottom(label="Void"), offset=0), SimTypeInt(signed=False, label="UInt32"), SimTypePointer(SimStruct({"code": SimTypeInt(signed=False, label="UInt32"), "errorDetail": SimTypePointer(SimTypeChar(label="Char"), offset=0), "language": SimTypePointer(SimTypeChar(label="Char"), offset=0), "machineName": SimTypePointer(SimTypeChar(label="Char"), offset=0), "pluginName": SimTypePointer(SimTypeChar(label="Char"), offset=0)}, name="WSMAN_ERROR", pack=False, align=None), offset=0), SimTypePointer(SimStruct({}, name="WSMAN_SHELL", pack=False, align=None), offset=0), SimTypePointer(SimStruct({}, name="WSMAN_COMMAND", pack=False, align=None), offset=0), SimTypePointer(SimStruct({}, name="WSMAN_OPERATION", pack=False, align=None), offset=0), SimTypePointer(SimUnion({"receiveData": SimStruct({"streamId": SimTypePointer(SimTypeChar(label="Char"), offset=0), "streamData": SimStruct({"type": SimTypeInt(signed=False, label="WSManDataType"), "Anonymous": SimUnion({"text": SimStruct({"bufferLength": SimTypeInt(signed=False, label="UInt32"), "buffer": SimTypePointer(SimTypeChar(label="Char"), offset=0)}, name="WSMAN_DATA_TEXT", pack=False, align=None), "binaryData": SimStruct({"dataLength": SimTypeInt(signed=False, label="UInt32"), "data": SimTypePointer(SimTypeChar(label="Byte"), offset=0)}, name="WSMAN_DATA_BINARY", pack=False, align=None), "number": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None")}, name="WSMAN_DATA", pack=False, align=None), "commandState": SimTypePointer(SimTypeChar(label="Char"), offset=0), "exitCode": SimTypeInt(signed=False, label="UInt32")}, name="WSMAN_RECEIVE_DATA_RESULT", pack=False, align=None), "connectData": SimStruct({"data": SimStruct({"type": SimTypeInt(signed=False, label="WSManDataType"), "Anonymous": SimUnion({"text": SimStruct({"bufferLength": SimTypeInt(signed=False, label="UInt32"), "buffer": SimTypePointer(SimTypeChar(label="Char"), offset=0)}, name="WSMAN_DATA_TEXT", pack=False, align=None), "binaryData": SimStruct({"dataLength": SimTypeInt(signed=False, label="UInt32"), "data": SimTypePointer(SimTypeChar(label="Byte"), offset=0)}, name="WSMAN_DATA_BINARY", pack=False, align=None), "number": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None")}, name="WSMAN_DATA", pack=False, align=None)}, name="WSMAN_CONNECT_DATA", pack=False, align=None), "createData": SimStruct({"data": SimStruct({"type": SimTypeInt(signed=False, label="WSManDataType"), "Anonymous": SimUnion({"text": SimStruct({"bufferLength": SimTypeInt(signed=False, label="UInt32"), "buffer": SimTypePointer(SimTypeChar(label="Char"), offset=0)}, name="WSMAN_DATA_TEXT", pack=False, align=None), "binaryData": SimStruct({"dataLength": SimTypeInt(signed=False, label="UInt32"), "data": SimTypePointer(SimTypeChar(label="Byte"), offset=0)}, name="WSMAN_DATA_BINARY", pack=False, align=None), "number": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None")}, name="WSMAN_DATA", pack=False, align=None)}, name="WSMAN_CREATE_SHELL_DATA", pack=False, align=None)}, name="<anon>", label="None"), offset=0)], SimTypeBottom(label="Void"), arg_names=["operationContext", "flags", "error", "shell", "command", "operationHandle", "data"]), offset=0)}, name="WSMAN_SHELL_ASYNC", pack=False, align=None), offset=0), SimTypePointer(SimTypePointer(SimStruct({}, name="WSMAN_COMMAND", pack=False, align=None), offset=0), offset=0)], SimTypeBottom(label="Void"), arg_names=["shell", "flags", "commandId", "commandLine", "args", "options", "async", "command"]),
#
'WSManDisconnectShell': SimTypeFunction([SimTypePointer(SimStruct({}, name="WSMAN_SHELL", pack=False, align=None), offset=0), SimTypeInt(signed=False, label="UInt32"), SimTypePointer(SimStruct({"idleTimeoutMs": SimTypeInt(signed=False, label="UInt32")}, name="WSMAN_SHELL_DISCONNECT_INFO", pack=False, align=None), offset=0), SimTypePointer(SimStruct({"operationContext": SimTypePointer(SimTypeBottom(label="Void"), offset=0), "completionFunction": SimTypePointer(SimTypeFunction([SimTypePointer(SimTypeBottom(label="Void"), offset=0), SimTypeInt(signed=False, label="UInt32"), SimTypePointer(SimStruct({"code": SimTypeInt(signed=False, label="UInt32"), "errorDetail": SimTypePointer(SimTypeChar(label="Char"), offset=0), "language": SimTypePointer(SimTypeChar(label="Char"), offset=0), "machineName": SimTypePointer(SimTypeChar(label="Char"), offset=0), "pluginName": SimTypePointer(SimTypeChar(label="Char"), offset=0)}, name="WSMAN_ERROR", pack=False, align=None), offset=0), SimTypePointer(SimStruct({}, name="WSMAN_SHELL", pack=False, align=None), offset=0), SimTypePointer(SimStruct({}, name="WSMAN_COMMAND", pack=False, align=None), offset=0), SimTypePointer(SimStruct({}, name="WSMAN_OPERATION", pack=False, align=None), offset=0), SimTypePointer(SimUnion({"receiveData": SimStruct({"streamId": SimTypePointer(SimTypeChar(label="Char"), offset=0), "streamData": SimStruct({"type": SimTypeInt(signed=False, label="WSManDataType"), "Anonymous": SimUnion({"text": SimStruct({"bufferLength": SimTypeInt(signed=False, label="UInt32"), "buffer": SimTypePointer(SimTypeChar(label="Char"), offset=0)}, name="WSMAN_DATA_TEXT", pack=False, align=None), "binaryData": SimStruct({"dataLength": SimTypeInt(signed=False, label="UInt32"), "data": SimTypePointer(SimTypeChar(label="Byte"), offset=0)}, name="WSMAN_DATA_BINARY", pack=False, align=None), "number": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None")}, name="WSMAN_DATA", pack=False, align=None), "commandState": SimTypePointer(SimTypeChar(label="Char"), offset=0), "exitCode": SimTypeInt(signed=False, label="UInt32")}, name="WSMAN_RECEIVE_DATA_RESULT", pack=False, align=None), "connectData": SimStruct({"data": SimStruct({"type": SimTypeInt(signed=False, label="WSManDataType"), "Anonymous": SimUnion({"text": SimStruct({"bufferLength": SimTypeInt(signed=False, label="UInt32"), "buffer": SimTypePointer(SimTypeChar(label="Char"), offset=0)}, name="WSMAN_DATA_TEXT", pack=False, align=None), "binaryData": SimStruct({"dataLength": SimTypeInt(signed=False, label="UInt32"), "data": SimTypePointer(SimTypeChar(label="Byte"), offset=0)}, name="WSMAN_DATA_BINARY", pack=False, align=None), "number": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None")}, name="WSMAN_DATA", pack=False, align=None)}, name="WSMAN_CONNECT_DATA", pack=False, align=None), "createData": SimStruct({"data": SimStruct({"type": SimTypeInt(signed=False, label="WSManDataType"), "Anonymous": SimUnion({"text": SimStruct({"bufferLength": SimTypeInt(signed=False, label="UInt32"), "buffer": SimTypePointer(SimTypeChar(label="Char"), offset=0)}, name="WSMAN_DATA_TEXT", pack=False, align=None), "binaryData": SimStruct({"dataLength": SimTypeInt(signed=False, label="UInt32"), "data": SimTypePointer(SimTypeChar(label="Byte"), offset=0)}, name="WSMAN_DATA_BINARY", pack=False, align=None), "number": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None")}, name="WSMAN_DATA", pack=False, align=None)}, name="WSMAN_CREATE_SHELL_DATA", pack=False, align=None)}, name="<anon>", label="None"), offset=0)], SimTypeBottom(label="Void"), arg_names=["operationContext", "flags", "error", "shell", "command", "operationHandle", "data"]), offset=0)}, name="WSMAN_SHELL_ASYNC", pack=False, align=None), offset=0)], SimTypeBottom(label="Void"), arg_names=["shell", "flags", "disconnectInfo", "async"]),
#
'WSManReconnectShell': SimTypeFunction([SimTypePointer(SimStruct({}, name="WSMAN_SHELL", pack=False, align=None), offset=0), SimTypeInt(signed=False, label="UInt32"), SimTypePointer(SimStruct({"operationContext": SimTypePointer(SimTypeBottom(label="Void"), offset=0), "completionFunction": SimTypePointer(SimTypeFunction([SimTypePointer(SimTypeBottom(label="Void"), offset=0), SimTypeInt(signed=False, label="UInt32"), SimTypePointer(SimStruct({"code": SimTypeInt(signed=False, label="UInt32"), "errorDetail": SimTypePointer(SimTypeChar(label="Char"), offset=0), "language": SimTypePointer(SimTypeChar(label="Char"), offset=0), "machineName": SimTypePointer(SimTypeChar(label="Char"), offset=0), "pluginName": SimTypePointer(SimTypeChar(label="Char"), offset=0)}, name="WSMAN_ERROR", pack=False, align=None), offset=0), SimTypePointer(SimStruct({}, name="WSMAN_SHELL", pack=False, align=None), offset=0), SimTypePointer(SimStruct({}, name="WSMAN_COMMAND", pack=False, align=None), offset=0), SimTypePointer(SimStruct({}, name="WSMAN_OPERATION", pack=False, align=None), offset=0), SimTypePointer(SimUnion({"receiveData": SimStruct({"streamId": SimTypePointer(SimTypeChar(label="Char"), offset=0), "streamData": SimStruct({"type": SimTypeInt(signed=False, label="WSManDataType"), "Anonymous": SimUnion({"text": SimStruct({"bufferLength": SimTypeInt(signed=False, label="UInt32"), "buffer": SimTypePointer(SimTypeChar(label="Char"), offset=0)}, name="WSMAN_DATA_TEXT", pack=False, align=None), "binaryData": SimStruct({"dataLength": SimTypeInt(signed=False, label="UInt32"), "data": SimTypePointer(SimTypeChar(label="Byte"), offset=0)}, name="WSMAN_DATA_BINARY", pack=False, align=None), "number": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None")}, name="WSMAN_DATA", pack=False, align=None), "commandState": SimTypePointer(SimTypeChar(label="Char"), offset=0), "exitCode": SimTypeInt(signed=False, label="UInt32")}, name="WSMAN_RECEIVE_DATA_RESULT", pack=False, align=None), "connectData": SimStruct({"data": SimStruct({"type": SimTypeInt(signed=False, label="WSManDataType"), "Anonymous": SimUnion({"text": SimStruct({"bufferLength": SimTypeInt(signed=False, label="UInt32"), "buffer": SimTypePointer(SimTypeChar(label="Char"), offset=0)}, name="WSMAN_DATA_TEXT", pack=False, align=None), "binaryData": SimStruct({"dataLength": SimTypeInt(signed=False, label="UInt32"), "data": SimTypePointer(SimTypeChar(label="Byte"), offset=0)}, name="WSMAN_DATA_BINARY", pack=False, align=None), "number": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None")}, name="WSMAN_DATA", pack=False, align=None)}, name="WSMAN_CONNECT_DATA", pack=False, align=None), "createData": SimStruct({"data": SimStruct({"type": SimTypeInt(signed=False, label="WSManDataType"), "Anonymous": SimUnion({"text": SimStruct({"bufferLength": SimTypeInt(signed=False, label="UInt32"), "buffer": SimTypePointer(SimTypeChar(label="Char"), offset=0)}, name="WSMAN_DATA_TEXT", pack=False, align=None), "binaryData": SimStruct({"dataLength": SimTypeInt(signed=False, label="UInt32"), "data": SimTypePointer(SimTypeChar(label="Byte"), offset=0)}, name="WSMAN_DATA_BINARY", pack=False, align=None), "number": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None")}, name="WSMAN_DATA", pack=False, align=None)}, name="WSMAN_CREATE_SHELL_DATA", pack=False, align=None)}, name="<anon>", label="None"), offset=0)], SimTypeBottom(label="Void"), arg_names=["operationContext", "flags", "error", "shell", "command", "operationHandle", "data"]), offset=0)}, name="WSMAN_SHELL_ASYNC", pack=False, align=None), offset=0)], SimTypeBottom(label="Void"), arg_names=["shell", "flags", "async"]),
#
'WSManReconnectShellCommand': SimTypeFunction([SimTypePointer(SimStruct({}, name="WSMAN_COMMAND", pack=False, align=None), offset=0), SimTypeInt(signed=False, label="UInt32"), SimTypePointer(SimStruct({"operationContext": SimTypePointer(SimTypeBottom(label="Void"), offset=0), "completionFunction": SimTypePointer(SimTypeFunction([SimTypePointer(SimTypeBottom(label="Void"), offset=0), SimTypeInt(signed=False, label="UInt32"), SimTypePointer(SimStruct({"code": SimTypeInt(signed=False, label="UInt32"), "errorDetail": SimTypePointer(SimTypeChar(label="Char"), offset=0), "language": SimTypePointer(SimTypeChar(label="Char"), offset=0), "machineName": SimTypePointer(SimTypeChar(label="Char"), offset=0), "pluginName": SimTypePointer(SimTypeChar(label="Char"), offset=0)}, name="WSMAN_ERROR", pack=False, align=None), offset=0), SimTypePointer(SimStruct({}, name="WSMAN_SHELL", pack=False, align=None), offset=0), SimTypePointer(SimStruct({}, name="WSMAN_COMMAND", pack=False, align=None), offset=0), SimTypePointer(SimStruct({}, name="WSMAN_OPERATION", pack=False, align=None), offset=0), SimTypePointer(SimUnion({"receiveData": SimStruct({"streamId": SimTypePointer(SimTypeChar(label="Char"), offset=0), "streamData": SimStruct({"type": SimTypeInt(signed=False, label="WSManDataType"), "Anonymous": SimUnion({"text": SimStruct({"bufferLength": SimTypeInt(signed=False, label="UInt32"), "buffer": SimTypePointer(SimTypeChar(label="Char"), offset=0)}, name="WSMAN_DATA_TEXT", pack=False, align=None), "binaryData": SimStruct({"dataLength": SimTypeInt(signed=False, label="UInt32"), "data": SimTypePointer(SimTypeChar(label="Byte"), offset=0)}, name="WSMAN_DATA_BINARY", pack=False, align=None), "number": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None")}, name="WSMAN_DATA", pack=False, align=None), "commandState": SimTypePointer(SimTypeChar(label="Char"), offset=0), "exitCode": SimTypeInt(signed=False, label="UInt32")}, name="WSMAN_RECEIVE_DATA_RESULT", pack=False, align=None), "connectData": SimStruct({"data": SimStruct({"type": SimTypeInt(signed=False, label="WSManDataType"), "Anonymous": SimUnion({"text": SimStruct({"bufferLength": SimTypeInt(signed=False, label="UInt32"), "buffer": SimTypePointer(SimTypeChar(label="Char"), offset=0)}, name="WSMAN_DATA_TEXT", pack=False, align=None), "binaryData": SimStruct({"dataLength": SimTypeInt(signed=False, label="UInt32"), "data": SimTypePointer(SimTypeChar(label="Byte"), offset=0)}, name="WSMAN_DATA_BINARY", pack=False, align=None), "number": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None")}, name="WSMAN_DATA", pack=False, align=None)}, name="WSMAN_CONNECT_DATA", pack=False, align=None), "createData": SimStruct({"data": SimStruct({"type": SimTypeInt(signed=False, label="WSManDataType"), "Anonymous": SimUnion({"text": SimStruct({"bufferLength": SimTypeInt(signed=False, label="UInt32"), "buffer": SimTypePointer(SimTypeChar(label="Char"), offset=0)}, name="WSMAN_DATA_TEXT", pack=False, align=None), "binaryData": SimStruct({"dataLength": SimTypeInt(signed=False, label="UInt32"), "data": SimTypePointer(SimTypeChar(label="Byte"), offset=0)}, name="WSMAN_DATA_BINARY", pack=False, align=None), "number": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None")}, name="WSMAN_DATA", pack=False, align=None)}, name="WSMAN_CREATE_SHELL_DATA", pack=False, align=None)}, name="<anon>", label="None"), offset=0)], SimTypeBottom(label="Void"), arg_names=["operationContext", "flags", "error", "shell", "command", "operationHandle", "data"]), offset=0)}, name="WSMAN_SHELL_ASYNC", pack=False, align=None), offset=0)], SimTypeBottom(label="Void"), arg_names=["commandHandle", "flags", "async"]),
#
'WSManConnectShell': SimTypeFunction([SimTypePointer(SimStruct({}, name="WSMAN_SESSION", pack=False, align=None), offset=0), SimTypeInt(signed=False, label="UInt32"), SimTypePointer(SimTypeChar(label="Char"), offset=0), SimTypePointer(SimTypeChar(label="Char"), offset=0), SimTypePointer(SimStruct({"optionsCount": SimTypeInt(signed=False, label="UInt32"), "options": SimTypePointer(SimStruct({"name": SimTypePointer(SimTypeChar(label="Char"), offset=0), "value": SimTypePointer(SimTypeChar(label="Char"), offset=0), "mustComply": SimTypeInt(signed=True, label="Int32")}, name="WSMAN_OPTION", pack=False, align=None), offset=0), "optionsMustUnderstand": SimTypeInt(signed=True, label="Int32")}, name="WSMAN_OPTION_SET", pack=False, align=None), offset=0), SimTypePointer(SimStruct({"type": SimTypeInt(signed=False, label="WSManDataType"), "Anonymous": SimUnion({"text": SimStruct({"bufferLength": SimTypeInt(signed=False, label="UInt32"), "buffer": SimTypePointer(SimTypeChar(label="Char"), offset=0)}, name="WSMAN_DATA_TEXT", pack=False, align=None), "binaryData": SimStruct({"dataLength": SimTypeInt(signed=False, label="UInt32"), "data": SimTypePointer(SimTypeChar(label="Byte"), offset=0)}, name="WSMAN_DATA_BINARY", pack=False, align=None), "number": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None")}, name="WSMAN_DATA", pack=False, align=None), offset=0), SimTypePointer(SimStruct({"operationContext": SimTypePointer(SimTypeBottom(label="Void"), offset=0), "completionFunction": SimTypePointer(SimTypeFunction([SimTypePointer(SimTypeBottom(label="Void"), offset=0), SimTypeInt(signed=False, label="UInt32"), SimTypePointer(SimStruct({"code": SimTypeInt(signed=False, label="UInt32"), "errorDetail": SimTypePointer(SimTypeChar(label="Char"), offset=0), "language": SimTypePointer(SimTypeChar(label="Char"), offset=0), "machineName": SimTypePointer(SimTypeChar(label="Char"), offset=0), "pluginName": SimTypePointer(SimTypeChar(label="Char"), offset=0)}, name="WSMAN_ERROR", pack=False, align=None), offset=0), SimTypePointer(SimStruct({}, name="WSMAN_SHELL", pack=False, align=None), offset=0), SimTypePointer(SimStruct({}, name="WSMAN_COMMAND", pack=False, align=None), offset=0), SimTypePointer(SimStruct({}, name="WSMAN_OPERATION", pack=False, align=None), offset=0), SimTypePointer(SimUnion({"receiveData": SimStruct({"streamId": SimTypePointer(SimTypeChar(label="Char"), offset=0), "streamData": SimStruct({"type": SimTypeInt(signed=False, label="WSManDataType"), "Anonymous": SimUnion({"text": SimStruct({"bufferLength": SimTypeInt(signed=False, label="UInt32"), "buffer": SimTypePointer(SimTypeChar(label="Char"), offset=0)}, name="WSMAN_DATA_TEXT", pack=False, align=None), "binaryData": SimStruct({"dataLength": SimTypeInt(signed=False, label="UInt32"), "data": SimTypePointer(SimTypeChar(label="Byte"), offset=0)}, name="WSMAN_DATA_BINARY", pack=False, align=None), "number": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None")}, name="WSMAN_DATA", pack=False, align=None), "commandState": SimTypePointer(SimTypeChar(label="Char"), offset=0), "exitCode": SimTypeInt(signed=False, label="UInt32")}, name="WSMAN_RECEIVE_DATA_RESULT", pack=False, align=None), "connectData": SimStruct({"data": SimStruct({"type": SimTypeInt(signed=False, label="WSManDataType"), "Anonymous": SimUnion({"text": SimStruct({"bufferLength": SimTypeInt(signed=False, label="UInt32"), "buffer": SimTypePointer(SimTypeChar(label="Char"), offset=0)}, name="WSMAN_DATA_TEXT", pack=False, align=None), "binaryData": SimStruct({"dataLength": SimTypeInt(signed=False, label="UInt32"), "data": SimTypePointer(SimTypeChar(label="Byte"), offset=0)}, name="WSMAN_DATA_BINARY", pack=False, align=None), "number": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None")}, name="WSMAN_DATA", pack=False, align=None)}, name="WSMAN_CONNECT_DATA", pack=False, align=None), "createData": SimStruct({"data": SimStruct({"type": SimTypeInt(signed=False, label="WSManDataType"), "Anonymous": SimUnion({"text": SimStruct({"bufferLength": SimTypeInt(signed=False, label="UInt32"), "buffer": SimTypePointer(SimTypeChar(label="Char"), offset=0)}, name="WSMAN_DATA_TEXT", pack=False, align=None), "binaryData": SimStruct({"dataLength": SimTypeInt(signed=False, label="UInt32"), "data": SimTypePointer(SimTypeChar(label="Byte"), offset=0)}, name="WSMAN_DATA_BINARY", pack=False, align=None), "number": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None")}, name="WSMAN_DATA", pack=False, align=None)}, name="WSMAN_CREATE_SHELL_DATA", pack=False, align=None)}, name="<anon>", label="None"), offset=0)], SimTypeBottom(label="Void"), arg_names=["operationContext", "flags", "error", "shell", "command", "operationHandle", "data"]), offset=0)}, name="WSMAN_SHELL_ASYNC", pack=False, align=None), offset=0), SimTypePointer(SimTypePointer(SimStruct({}, name="WSMAN_SHELL", pack=False, align=None), offset=0), offset=0)], SimTypeBottom(label="Void"), arg_names=["session", "flags", "resourceUri", "shellID", "options", "connectXml", "async", "shell"]),
#
'WSManConnectShellCommand': SimTypeFunction([SimTypePointer(SimStruct({}, name="WSMAN_SHELL", pack=False, align=None), offset=0), SimTypeInt(signed=False, label="UInt32"), SimTypePointer(SimTypeChar(label="Char"), offset=0), SimTypePointer(SimStruct({"optionsCount": SimTypeInt(signed=False, label="UInt32"), "options": SimTypePointer(SimStruct({"name": SimTypePointer(SimTypeChar(label="Char"), offset=0), "value": SimTypePointer(SimTypeChar(label="Char"), offset=0), "mustComply": SimTypeInt(signed=True, label="Int32")}, name="WSMAN_OPTION", pack=False, align=None), offset=0), "optionsMustUnderstand": SimTypeInt(signed=True, label="Int32")}, name="WSMAN_OPTION_SET", pack=False, align=None), offset=0), SimTypePointer(SimStruct({"type": SimTypeInt(signed=False, label="WSManDataType"), "Anonymous": SimUnion({"text": SimStruct({"bufferLength": SimTypeInt(signed=False, label="UInt32"), "buffer": SimTypePointer(SimTypeChar(label="Char"), offset=0)}, name="WSMAN_DATA_TEXT", pack=False, align=None), "binaryData": SimStruct({"dataLength": SimTypeInt(signed=False, label="UInt32"), "data": SimTypePointer(SimTypeChar(label="Byte"), offset=0)}, name="WSMAN_DATA_BINARY", pack=False, align=None), "number": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None")}, name="WSMAN_DATA", pack=False, align=None), offset=0), SimTypePointer(SimStruct({"operationContext": SimTypePointer(SimTypeBottom(label="Void"), offset=0), "completionFunction": SimTypePointer(SimTypeFunction([SimTypePointer(SimTypeBottom(label="Void"), offset=0), SimTypeInt(signed=False, label="UInt32"), SimTypePointer(SimStruct({"code": SimTypeInt(signed=False, label="UInt32"), "errorDetail": SimTypePointer(SimTypeChar(label="Char"), offset=0), "language": SimTypePointer(SimTypeChar(label="Char"), offset=0), "machineName": SimTypePointer(SimTypeChar(label="Char"), offset=0), "pluginName": SimTypePointer(SimTypeChar(label="Char"), offset=0)}, name="WSMAN_ERROR", pack=False, align=None), offset=0), SimTypePointer(SimStruct({}, name="WSMAN_SHELL", pack=False, align=None), offset=0), SimTypePointer(SimStruct({}, name="WSMAN_COMMAND", pack=False, align=None), offset=0), SimTypePointer(SimStruct({}, name="WSMAN_OPERATION", pack=False, align=None), offset=0), SimTypePointer(SimUnion({"receiveData": SimStruct({"streamId": SimTypePointer(SimTypeChar(label="Char"), offset=0), "streamData": SimStruct({"type": SimTypeInt(signed=False, label="WSManDataType"), "Anonymous": SimUnion({"text": SimStruct({"bufferLength": SimTypeInt(signed=False, label="UInt32"), "buffer": SimTypePointer(SimTypeChar(label="Char"), offset=0)}, name="WSMAN_DATA_TEXT", pack=False, align=None), "binaryData": SimStruct({"dataLength": SimTypeInt(signed=False, label="UInt32"), "data": SimTypePointer(SimTypeChar(label="Byte"), offset=0)}, name="WSMAN_DATA_BINARY", pack=False, align=None), "number": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None")}, name="WSMAN_DATA", pack=False, align=None), "commandState": SimTypePointer(SimTypeChar(label="Char"), offset=0), "exitCode": SimTypeInt(signed=False, label="UInt32")}, name="WSMAN_RECEIVE_DATA_RESULT", pack=False, align=None), "connectData": SimStruct({"data": SimStruct({"type": SimTypeInt(signed=False, label="WSManDataType"), "Anonymous": SimUnion({"text": SimStruct({"bufferLength": SimTypeInt(signed=False, label="UInt32"), "buffer": SimTypePointer(SimTypeChar(label="Char"), offset=0)}, name="WSMAN_DATA_TEXT", pack=False, align=None), "binaryData": SimStruct({"dataLength": SimTypeInt(signed=False, label="UInt32"), "data": SimTypePointer(SimTypeChar(label="Byte"), offset=0)}, name="WSMAN_DATA_BINARY", pack=False, align=None), "number": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None")}, name="WSMAN_DATA", pack=False, align=None)}, name="WSMAN_CONNECT_DATA", pack=False, align=None), "createData": SimStruct({"data": SimStruct({"type": SimTypeInt(signed=False, label="WSManDataType"), "Anonymous": SimUnion({"text": SimStruct({"bufferLength": SimTypeInt(signed=False, label="UInt32"), "buffer": SimTypePointer(SimTypeChar(label="Char"), offset=0)}, name="WSMAN_DATA_TEXT", pack=False, align=None), "binaryData": SimStruct({"dataLength": SimTypeInt(signed=False, label="UInt32"), "data": SimTypePointer(SimTypeChar(label="Byte"), offset=0)}, name="WSMAN_DATA_BINARY", pack=False, align=None), "number": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None")}, name="WSMAN_DATA", pack=False, align=None)}, name="WSMAN_CREATE_SHELL_DATA", pack=False, align=None)}, name="<anon>", label="None"), offset=0)], SimTypeBottom(label="Void"), arg_names=["operationContext", "flags", "error", "shell", "command", "operationHandle", "data"]), offset=0)}, name="WSMAN_SHELL_ASYNC", pack=False, align=None), offset=0), SimTypePointer(SimTypePointer(SimStruct({}, name="WSMAN_COMMAND", pack=False, align=None), offset=0), offset=0)], SimTypeBottom(label="Void"), arg_names=["shell", "flags", "commandID", "options", "connectXml", "async", "command"]),
#
'WSManPluginReportContext': SimTypeFunction([SimTypePointer(SimStruct({"senderDetails": SimTypePointer(SimStruct({"senderName": SimTypePointer(SimTypeChar(label="Char"), offset=0), "authenticationMechanism": SimTypePointer(SimTypeChar(label="Char"), offset=0), "certificateDetails": SimTypePointer(SimStruct({"subject": SimTypePointer(SimTypeChar(label="Char"), offset=0), "issuerName": SimTypePointer(SimTypeChar(label="Char"), offset=0), "issuerThumbprint": SimTypePointer(SimTypeChar(label="Char"), offset=0), "subjectName": SimTypePointer(SimTypeChar(label="Char"), offset=0)}, name="WSMAN_CERTIFICATE_DETAILS", pack=False, align=None), offset=0), "clientToken": SimTypePointer(SimTypeInt(signed=True, label="Int"), label="IntPtr", offset=0), "httpURL": SimTypePointer(SimTypeChar(label="Char"), offset=0)}, name="WSMAN_SENDER_DETAILS", pack=False, align=None), offset=0), "locale": SimTypePointer(SimTypeChar(label="Char"), offset=0), "resourceUri": SimTypePointer(SimTypeChar(label="Char"), offset=0), "operationInfo": SimTypePointer(SimStruct({"fragment": SimStruct({"path": SimTypePointer(SimTypeChar(label="Char"), offset=0), "dialect": SimTypePointer(SimTypeChar(label="Char"), offset=0)}, name="WSMAN_FRAGMENT", pack=False, align=None), "filter": SimStruct({"filter": SimTypePointer(SimTypeChar(label="Char"), offset=0), "dialect": SimTypePointer(SimTypeChar(label="Char"), offset=0)}, name="WSMAN_FILTER", pack=False, align=None), "selectorSet": SimStruct({"numberKeys": SimTypeInt(signed=False, label="UInt32"), "keys": SimTypePointer(SimStruct({"key": SimTypePointer(SimTypeChar(label="Char"), offset=0), "value": SimTypePointer(SimTypeChar(label="Char"), offset=0)}, name="WSMAN_KEY", pack=False, align=None), offset=0)}, name="WSMAN_SELECTOR_SET", pack=False, align=None), "optionSet": SimStruct({"optionsCount": SimTypeInt(signed=False, label="UInt32"), "options": SimTypePointer(SimStruct({"name": SimTypePointer(SimTypeChar(label="Char"), offset=0), "value": SimTypePointer(SimTypeChar(label="Char"), offset=0), "mustComply": SimTypeInt(signed=True, label="Int32")}, name="WSMAN_OPTION", pack=False, align=None), offset=0), "optionsMustUnderstand": SimTypeInt(signed=True, label="Int32")}, name="WSMAN_OPTION_SET", pack=False, align=None), "reserved": SimTypePointer(SimTypeBottom(label="Void"), offset=0), "version": SimTypeInt(signed=False, label="UInt32")}, name="WSMAN_OPERATION_INFO", pack=False, align=None), offset=0), "shutdownNotification": SimTypeInt(signed=True, label="Int32"), "shutdownNotificationHandle": SimTypePointer(SimTypeInt(signed=True, label="Int"), label="IntPtr", offset=0), "dataLocale": SimTypePointer(SimTypeChar(label="Char"), offset=0)}, name="WSMAN_PLUGIN_REQUEST", pack=False, align=None), offset=0), SimTypeInt(signed=False, label="UInt32"), SimTypePointer(SimTypeBottom(label="Void"), offset=0)], SimTypeInt(signed=False, label="UInt32"), arg_names=["requestDetails", "flags", "context"]),
#
'WSManPluginReceiveResult': SimTypeFunction([SimTypePointer(SimStruct({"senderDetails": SimTypePointer(SimStruct({"senderName": SimTypePointer(SimTypeChar(label="Char"), offset=0), "authenticationMechanism": SimTypePointer(SimTypeChar(label="Char"), offset=0), "certificateDetails": SimTypePointer(SimStruct({"subject": SimTypePointer(SimTypeChar(label="Char"), offset=0), "issuerName": SimTypePointer(SimTypeChar(label="Char"), offset=0), "issuerThumbprint": SimTypePointer(SimTypeChar(label="Char"), offset=0), "subjectName": SimTypePointer(SimTypeChar(label="Char"), offset=0)}, name="WSMAN_CERTIFICATE_DETAILS", pack=False, align=None), offset=0), "clientToken": SimTypePointer(SimTypeInt(signed=True, label="Int"), label="IntPtr", offset=0), "httpURL": SimTypePointer(SimTypeChar(label="Char"), offset=0)}, name="WSMAN_SENDER_DETAILS", pack=False, align=None), offset=0), "locale": SimTypePointer(SimTypeChar(label="Char"), offset=0), "resourceUri": SimTypePointer(SimTypeChar(label="Char"), offset=0), "operationInfo": SimTypePointer(SimStruct({"fragment": SimStruct({"path": SimTypePointer(SimTypeChar(label="Char"), offset=0), "dialect": SimTypePointer(SimTypeChar(label="Char"), offset=0)}, name="WSMAN_FRAGMENT", pack=False, align=None), "filter": SimStruct({"filter": SimTypePointer(SimTypeChar(label="Char"), offset=0), "dialect": SimTypePointer(SimTypeChar(label="Char"), offset=0)}, name="WSMAN_FILTER", pack=False, align=None), "selectorSet": SimStruct({"numberKeys": SimTypeInt(signed=False, label="UInt32"), "keys": SimTypePointer(SimStruct({"key": SimTypePointer(SimTypeChar(label="Char"), offset=0), "value": SimTypePointer(SimTypeChar(label="Char"), offset=0)}, name="WSMAN_KEY", pack=False, align=None), offset=0)}, name="WSMAN_SELECTOR_SET", pack=False, align=None), "optionSet": SimStruct({"optionsCount": SimTypeInt(signed=False, label="UInt32"), "options": SimTypePointer(SimStruct({"name": SimTypePointer(SimTypeChar(label="Char"), offset=0), "value": SimTypePointer(SimTypeChar(label="Char"), offset=0), "mustComply": SimTypeInt(signed=True, label="Int32")}, name="WSMAN_OPTION", pack=False, align=None), offset=0), "optionsMustUnderstand": SimTypeInt(signed=True, label="Int32")}, name="WSMAN_OPTION_SET", pack=False, align=None), "reserved": SimTypePointer(SimTypeBottom(label="Void"), offset=0), "version": SimTypeInt(signed=False, label="UInt32")}, name="WSMAN_OPERATION_INFO", pack=False, align=None), offset=0), "shutdownNotification": SimTypeInt(signed=True, label="Int32"), "shutdownNotificationHandle": SimTypePointer(SimTypeInt(signed=True, label="Int"), label="IntPtr", offset=0), "dataLocale": SimTypePointer(SimTypeChar(label="Char"), offset=0)}, name="WSMAN_PLUGIN_REQUEST", pack=False, align=None), offset=0), SimTypeInt(signed=False, label="UInt32"), SimTypePointer(SimTypeChar(label="Char"), offset=0), SimTypePointer(SimStruct({"type": SimTypeInt(signed=False, label="WSManDataType"), "Anonymous": SimUnion({"text": SimStruct({"bufferLength": SimTypeInt(signed=False, label="UInt32"), "buffer": SimTypePointer(SimTypeChar(label="Char"), offset=0)}, name="WSMAN_DATA_TEXT", pack=False, align=None), "binaryData": SimStruct({"dataLength": SimTypeInt(signed=False, label="UInt32"), "data": SimTypePointer(SimTypeChar(label="Byte"), offset=0)}, name="WSMAN_DATA_BINARY", pack=False, align=None), "number": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None")}, name="WSMAN_DATA", pack=False, align=None), offset=0), SimTypePointer(SimTypeChar(label="Char"), offset=0), SimTypeInt(signed=False, label="UInt32")], SimTypeInt(signed=False, label="UInt32"), arg_names=["requestDetails", "flags", "stream", "streamResult", "commandState", "exitCode"]),
#
'WSManPluginOperationComplete': SimTypeFunction([SimTypePointer(SimStruct({"senderDetails": SimTypePointer(SimStruct({"senderName": SimTypePointer(SimTypeChar(label="Char"), offset=0), "authenticationMechanism": SimTypePointer(SimTypeChar(label="Char"), offset=0), "certificateDetails": SimTypePointer(SimStruct({"subject": SimTypePointer(SimTypeChar(label="Char"), offset=0), "issuerName": SimTypePointer(SimTypeChar(label="Char"), offset=0), "issuerThumbprint": SimTypePointer(SimTypeChar(label="Char"), offset=0), "subjectName": SimTypePointer(SimTypeChar(label="Char"), offset=0)}, name="WSMAN_CERTIFICATE_DETAILS", pack=False, align=None), offset=0), "clientToken": SimTypePointer(SimTypeInt(signed=True, label="Int"), label="IntPtr", offset=0), "httpURL": SimTypePointer(SimTypeChar(label="Char"), offset=0)}, name="WSMAN_SENDER_DETAILS", pack=False, align=None), offset=0), "locale": SimTypePointer(SimTypeChar(label="Char"), offset=0), "resourceUri": SimTypePointer(SimTypeChar(label="Char"), offset=0), "operationInfo": SimTypePointer(SimStruct({"fragment": SimStruct({"path": SimTypePointer(SimTypeChar(label="Char"), offset=0), "dialect": SimTypePointer(SimTypeChar(label="Char"), offset=0)}, name="WSMAN_FRAGMENT", pack=False, align=None), "filter": SimStruct({"filter": SimTypePointer(SimTypeChar(label="Char"), offset=0), "dialect": SimTypePointer(SimTypeChar(label="Char"), offset=0)}, name="WSMAN_FILTER", pack=False, align=None), "selectorSet": SimStruct({"numberKeys": SimTypeInt(signed=False, label="UInt32"), "keys": SimTypePointer(SimStruct({"key": SimTypePointer(SimTypeChar(label="Char"), offset=0), "value": SimTypePointer(SimTypeChar(label="Char"), offset=0)}, name="WSMAN_KEY", pack=False, align=None), offset=0)}, name="WSMAN_SELECTOR_SET", pack=False, align=None), "optionSet": SimStruct({"optionsCount": SimTypeInt(signed=False, label="UInt32"), "options": SimTypePointer(SimStruct({"name": SimTypePointer(SimTypeChar(label="Char"), offset=0), "value": SimTypePointer(SimTypeChar(label="Char"), offset=0), "mustComply": SimTypeInt(signed=True, label="Int32")}, name="WSMAN_OPTION", pack=False, align=None), offset=0), "optionsMustUnderstand": SimTypeInt(signed=True, label="Int32")}, name="WSMAN_OPTION_SET", pack=False, align=None), "reserved": SimTypePointer(SimTypeBottom(label="Void"), offset=0), "version": SimTypeInt(signed=False, label="UInt32")}, name="WSMAN_OPERATION_INFO", pack=False, align=None), offset=0), "shutdownNotification": SimTypeInt(signed=True, label="Int32"), "shutdownNotificationHandle": SimTypePointer(SimTypeInt(signed=True, label="Int"), label="IntPtr", offset=0), "dataLocale": SimTypePointer(SimTypeChar(label="Char"), offset=0)}, name="WSMAN_PLUGIN_REQUEST", pack=False, align=None), offset=0), SimTypeInt(signed=False, label="UInt32"), SimTypeInt(signed=False, label="UInt32"), SimTypePointer(SimTypeChar(label="Char"), offset=0)], SimTypeInt(signed=False, label="UInt32"), arg_names=["requestDetails", "flags", "errorCode", "extendedInformation"]),
#
'WSManPluginGetOperationParameters': SimTypeFunction([SimTypePointer(SimStruct({"senderDetails": SimTypePointer(SimStruct({"senderName": SimTypePointer(SimTypeChar(label="Char"), offset=0), "authenticationMechanism": SimTypePointer(SimTypeChar(label="Char"), offset=0), "certificateDetails": SimTypePointer(SimStruct({"subject": SimTypePointer(SimTypeChar(label="Char"), offset=0), "issuerName": SimTypePointer(SimTypeChar(label="Char"), offset=0), "issuerThumbprint": SimTypePointer(SimTypeChar(label="Char"), offset=0), "subjectName": SimTypePointer(SimTypeChar(label="Char"), offset=0)}, name="WSMAN_CERTIFICATE_DETAILS", pack=False, align=None), offset=0), "clientToken": SimTypePointer(SimTypeInt(signed=True, label="Int"), label="IntPtr", offset=0), "httpURL": SimTypePointer(SimTypeChar(label="Char"), offset=0)}, name="WSMAN_SENDER_DETAILS", pack=False, align=None), offset=0), "locale": SimTypePointer(SimTypeChar(label="Char"), offset=0), "resourceUri": SimTypePointer(SimTypeChar(label="Char"), offset=0), "operationInfo": SimTypePointer(SimStruct({"fragment": SimStruct({"path": SimTypePointer(SimTypeChar(label="Char"), offset=0), "dialect": SimTypePointer(SimTypeChar(label="Char"), offset=0)}, name="WSMAN_FRAGMENT", pack=False, align=None), "filter": SimStruct({"filter": SimTypePointer(SimTypeChar(label="Char"), offset=0), "dialect": SimTypePointer(SimTypeChar(label="Char"), offset=0)}, name="WSMAN_FILTER", pack=False, align=None), "selectorSet": SimStruct({"numberKeys": SimTypeInt(signed=False, label="UInt32"), "keys": SimTypePointer(SimStruct({"key": SimTypePointer(SimTypeChar(label="Char"), offset=0), "value": SimTypePointer(SimTypeChar(label="Char"), offset=0)}, name="WSMAN_KEY", pack=False, align=None), offset=0)}, name="WSMAN_SELECTOR_SET", pack=False, align=None), "optionSet": SimStruct({"optionsCount": SimTypeInt(signed=False, label="UInt32"), "options": SimTypePointer(SimStruct({"name": SimTypePointer(SimTypeChar(label="Char"), offset=0), "value": SimTypePointer(SimTypeChar(label="Char"), offset=0), "mustComply": SimTypeInt(signed=True, label="Int32")}, name="WSMAN_OPTION", pack=False, align=None), offset=0), "optionsMustUnderstand": SimTypeInt(signed=True, label="Int32")}, name="WSMAN_OPTION_SET", pack=False, align=None), "reserved": SimTypePointer(SimTypeBottom(label="Void"), offset=0), "version": SimTypeInt(signed=False, label="UInt32")}, name="WSMAN_OPERATION_INFO", pack=False, align=None), offset=0), "shutdownNotification": SimTypeInt(signed=True, label="Int32"), "shutdownNotificationHandle": SimTypePointer(SimTypeInt(signed=True, label="Int"), label="IntPtr", offset=0), "dataLocale": SimTypePointer(SimTypeChar(label="Char"), offset=0)}, name="WSMAN_PLUGIN_REQUEST", pack=False, align=None), offset=0), SimTypeInt(signed=False, label="UInt32"), SimTypePointer(SimStruct({"type": SimTypeInt(signed=False, label="WSManDataType"), "Anonymous": SimUnion({"text": SimStruct({"bufferLength": SimTypeInt(signed=False, label="UInt32"), "buffer": SimTypePointer(SimTypeChar(label="Char"), offset=0)}, name="WSMAN_DATA_TEXT", pack=False, align=None), "binaryData": SimStruct({"dataLength": SimTypeInt(signed=False, label="UInt32"), "data": SimTypePointer(SimTypeChar(label="Byte"), offset=0)}, name="WSMAN_DATA_BINARY", pack=False, align=None), "number": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None")}, name="WSMAN_DATA", pack=False, align=None), offset=0)], SimTypeInt(signed=False, label="UInt32"), arg_names=["requestDetails", "flags", "data"]),
#
'WSManPluginGetConfiguration': SimTypeFunction([SimTypePointer(SimTypeBottom(label="Void"), offset=0), SimTypeInt(signed=False, label="UInt32"), SimTypePointer(SimStruct({"type": SimTypeInt(signed=False, label="WSManDataType"), "Anonymous": SimUnion({"text": SimStruct({"bufferLength": SimTypeInt(signed=False, label="UInt32"), "buffer": SimTypePointer(SimTypeChar(label="Char"), offset=0)}, name="WSMAN_DATA_TEXT", pack=False, align=None), "binaryData": SimStruct({"dataLength": SimTypeInt(signed=False, label="UInt32"), "data": SimTypePointer(SimTypeChar(label="Byte"), offset=0)}, name="WSMAN_DATA_BINARY", pack=False, align=None), "number": SimTypeInt(signed=False, label="UInt32")}, name="<anon>", label="None")}, name="WSMAN_DATA", pack=False, align=None), offset=0)], SimTypeInt(signed=False, label="UInt32"), arg_names=["pluginContext", "flags", "data"]),
#
'WSManPluginReportCompletion': SimTypeFunction([SimTypePointer(SimTypeBottom(label="Void"), offset=0), SimTypeInt(signed=False, label="UInt32")], SimTypeInt(signed=False, label="UInt32"), arg_names=["pluginContext", "flags"]),
#
'WSManPluginFreeRequestDetails': SimTypeFunction([SimTypePointer(SimStruct({"senderDetails": SimTypePointer(SimStruct({"senderName": SimTypePointer(SimTypeChar(label="Char"), offset=0), "authenticationMechanism": SimTypePointer(SimTypeChar(label="Char"), offset=0), "certificateDetails": SimTypePointer(SimStruct({"subject": SimTypePointer(SimTypeChar(label="Char"), offset=0), "issuerName": SimTypePointer(SimTypeChar(label="Char"), offset=0), "issuerThumbprint": SimTypePointer(SimTypeChar(label="Char"), offset=0), "subjectName": SimTypePointer(SimTypeChar(label="Char"), offset=0)}, name="WSMAN_CERTIFICATE_DETAILS", pack=False, align=None), offset=0), "clientToken": SimTypePointer(SimTypeInt(signed=True, label="Int"), label="IntPtr", offset=0), "httpURL": SimTypePointer(SimTypeChar(label="Char"), offset=0)}, name="WSMAN_SENDER_DETAILS", pack=False, align=None), offset=0), "locale": SimTypePointer(SimTypeChar(label="Char"), offset=0), "resourceUri": SimTypePointer(SimTypeChar(label="Char"), offset=0), "operationInfo": SimTypePointer(SimStruct({"fragment": SimStruct({"path": SimTypePointer(SimTypeChar(label="Char"), offset=0), "dialect": SimTypePointer(SimTypeChar(label="Char"), offset=0)}, name="WSMAN_FRAGMENT", pack=False, align=None), "filter": SimStruct({"filter": SimTypePointer(SimTypeChar(label="Char"), offset=0), "dialect": SimTypePointer(SimTypeChar(label="Char"), offset=0)}, name="WSMAN_FILTER", pack=False, align=None), "selectorSet": SimStruct({"numberKeys": SimTypeInt(signed=False, label="UInt32"), "keys": SimTypePointer(SimStruct({"key": SimTypePointer(SimTypeChar(label="Char"), offset=0), "value": SimTypePointer(SimTypeChar(label="Char"), offset=0)}, name="WSMAN_KEY", pack=False, align=None), offset=0)}, name="WSMAN_SELECTOR_SET", pack=False, align=None), "optionSet": SimStruct({"optionsCount": SimTypeInt(signed=False, label="UInt32"), "options": SimTypePointer(SimStruct({"name": SimTypePointer(SimTypeChar(label="Char"), offset=0), "value": SimTypePointer(SimTypeChar(label="Char"), offset=0), "mustComply": SimTypeInt(signed=True, label="Int32")}, name="WSMAN_OPTION", pack=False, align=None), offset=0), "optionsMustUnderstand": SimTypeInt(signed=True, label="Int32")}, name="WSMAN_OPTION_SET", pack=False, align=None), "reserved": SimTypePointer(SimTypeBottom(label="Void"), offset=0), "version": SimTypeInt(signed=False, label="UInt32")}, name="WSMAN_OPERATION_INFO", pack=False, align=None), offset=0), "shutdownNotification": SimTypeInt(signed=True, label="Int32"), "shutdownNotificationHandle": SimTypePointer(SimTypeInt(signed=True, label="Int"), label="IntPtr", offset=0), "dataLocale": SimTypePointer(SimTypeChar(label="Char"), offset=0)}, name="WSMAN_PLUGIN_REQUEST", pack=False, align=None), offset=0)], SimTypeInt(signed=False, label="UInt32"), arg_names=["requestDetails"]),
#
'WSManPluginAuthzUserComplete': SimTypeFunction([SimTypePointer(SimStruct({"senderName": SimTypePointer(SimTypeChar(label="Char"), offset=0), "authenticationMechanism": SimTypePointer(SimTypeChar(label="Char"), offset=0), "certificateDetails": SimTypePointer(SimStruct({"subject": SimTypePointer(SimTypeChar(label="Char"), offset=0), "issuerName": SimTypePointer(SimTypeChar(label="Char"), offset=0), "issuerThumbprint": SimTypePointer(SimTypeChar(label="Char"), offset=0), "subjectName": SimTypePointer(SimTypeChar(label="Char"), offset=0)}, name="WSMAN_CERTIFICATE_DETAILS", pack=False, align=None), offset=0), "clientToken": SimTypePointer(SimTypeInt(signed=True, label="Int"), label="IntPtr", offset=0), "httpURL": SimTypePointer(SimTypeChar(label="Char"), offset=0)}, name="WSMAN_SENDER_DETAILS", pack=False, align=None), offset=0), SimTypeInt(signed=False, label="UInt32"), SimTypePointer(SimTypeBottom(label="Void"), offset=0), SimTypePointer(SimTypeInt(signed=True, label="Int"), label="IntPtr", offset=0), SimTypeInt(signed=True, label="Int32"), SimTypeInt(signed=False, label="UInt32"), SimTypePointer(SimTypeChar(label="Char"), offset=0)], SimTypeInt(signed=False, label="UInt32"), arg_names=["senderDetails", "flags", "userAuthorizationContext", "impersonationToken", "userIsAdministrator", "errorCode", "extendedErrorInformation"]),
#
'WSManPluginAuthzOperationComplete': SimTypeFunction([SimTypePointer(SimStruct({"senderName": SimTypePointer(SimTypeChar(label="Char"), offset=0), "authenticationMechanism": SimTypePointer(SimTypeChar(label="Char"), offset=0), "certificateDetails": SimTypePointer(SimStruct({"subject": SimTypePointer(SimTypeChar(label="Char"), offset=0), "issuerName": SimTypePointer(SimTypeChar(label="Char"), offset=0), "issuerThumbprint": SimTypePointer(SimTypeChar(label="Char"), offset=0), "subjectName": SimTypePointer(SimTypeChar(label="Char"), offset=0)}, name="WSMAN_CERTIFICATE_DETAILS", pack=False, align=None), offset=0), "clientToken": SimTypePointer(SimTypeInt(signed=True, label="Int"), label="IntPtr", offset=0), "httpURL": SimTypePointer(SimTypeChar(label="Char"), offset=0)}, name="WSMAN_SENDER_DETAILS", pack=False, align=None), offset=0), SimTypeInt(signed=False, label="UInt32"), SimTypePointer(SimTypeBottom(label="Void"), offset=0), SimTypeInt(signed=False, label="UInt32"), SimTypePointer(SimTypeChar(label="Char"), offset=0)], SimTypeInt(signed=False, label="UInt32"), arg_names=["senderDetails", "flags", "userAuthorizationContext", "errorCode", "extendedErrorInformation"]),
#
'WSManPluginAuthzQueryQuotaComplete': SimTypeFunction([SimTypePointer(SimStruct({"senderName": SimTypePointer(SimTypeChar(label="Char"), offset=0), "authenticationMechanism": SimTypePointer(SimTypeChar(label="Char"), offset=0), "certificateDetails": SimTypePointer(SimStruct({"subject": SimTypePointer(SimTypeChar(label="Char"), offset=0), "issuerName": SimTypePointer(SimTypeChar(label="Char"), offset=0), "issuerThumbprint": SimTypePointer(SimTypeChar(label="Char"), offset=0), "subjectName": SimTypePointer(SimTypeChar(label="Char"), offset=0)}, name="WSMAN_CERTIFICATE_DETAILS", pack=False, align=None), offset=0), "clientToken": SimTypePointer(SimTypeInt(signed=True, label="Int"), label="IntPtr", offset=0), "httpURL": SimTypePointer(SimTypeChar(label="Char"), offset=0)}, name="WSMAN_SENDER_DETAILS", pack=False, align=None), offset=0), SimTypeInt(signed=False, label="UInt32"), SimTypePointer(SimStruct({"maxAllowedConcurrentShells": SimTypeInt(signed=False, label="UInt32"), "maxAllowedConcurrentOperations": SimTypeInt(signed=False, label="UInt32"), "timeslotSize": SimTypeInt(signed=False, label="UInt32"), "maxAllowedOperationsPerTimeslot": SimTypeInt(signed=False, label="UInt32")}, name="WSMAN_AUTHZ_QUOTA", pack=False, align=None), offset=0), SimTypeInt(signed=False, label="UInt32"), SimTypePointer(SimTypeChar(label="Char"), offset=0)], SimTypeInt(signed=False, label="UInt32"), arg_names=["senderDetails", "flags", "quota", "errorCode", "extendedErrorInformation"]),
}
lib.set_prototypes(prototypes)
| [((10, 5, 10, 37), 'logging.getLogger', 'logging.getLogger', (), '', False, 'import logging\n')] |
bachya/aiolookin | aiolookin/__init__.py | 553731047b6910b1cb74667fbb343faf9b8656ac | """Define the aiolookin package."""
from .device import async_get_device # noqa
| [] |
yuemingl/ode-python-1 | odepy/collision_space.py | a9a12d9d3b7e611874a8d30f6a5c0b83b6087f86 | # -*- coding: utf-8 -*-
from .common import loadOde
from .common import dGeomID
from .common import dSpaceID
from .common import dVector3
from ctypes import POINTER
from ctypes import CFUNCTYPE
from ctypes import c_void_p
from ctypes import c_int32
dNearCallback = CFUNCTYPE(None, c_void_p, dGeomID, dGeomID)
def dSimpleSpaceCreate(space):
if isinstance(space, int):
return loadOde('dSimpleSpaceCreate', dSpaceID, c_int32)(space)
else:
return loadOde('dSimpleSpaceCreate', dSpaceID, dSpaceID)(space)
def dHashSpaceCreate(space):
if isinstance(space, int):
return loadOde('dHashSpaceCreate', dSpaceID, c_int32)(space)
else:
return loadOde('dHashSpaceCreate', dSpaceID, dSpaceID)(space)
dQuadTreeSpaceCreate = loadOde('dQuadTreeSpaceCreate', dSpaceID, dSpaceID, dVector3, dVector3, c_int32)
dSweepAndPruneSpaceCreate = loadOde('dSweepAndPruneSpaceCreate', dSpaceID, dSpaceID, c_int32)
dSpaceDestroy = loadOde('dSpaceDestroy', None, dSpaceID)
dHashSpaceSetLevels = loadOde('dHashSpaceSetLevels', None, dSpaceID, c_int32, c_int32)
dHashSpaceGetLevels = loadOde('dHashSpaceGetLevels', None, dSpaceID, POINTER(c_int32), POINTER(c_int32))
dSpaceSetCleanup = loadOde('dSpaceSetCleanup', None, dSpaceID, c_int32)
dSpaceGetCleanup = loadOde('dSpaceGetCleanup', c_int32, dSpaceID)
dSpaceSetSublevel = loadOde('dSpaceSetSublevel', None, dSpaceID, c_int32)
dSpaceGetSublevel = loadOde('dSpaceGetSublevel', c_int32, dSpaceID)
dSpaceSetManualCleanup = loadOde('dSpaceSetManualCleanup', None, dSpaceID, c_int32)
dSpaceGetManualCleanup = loadOde('dSpaceGetManualCleanup', c_int32, dSpaceID)
dSpaceAdd = loadOde('dSpaceAdd', None, dSpaceID, dGeomID)
dSpaceRemove = loadOde('dSpaceRemove', None, dSpaceID, dGeomID)
dSpaceQuery = loadOde('dSpaceQuery', c_int32, dSpaceID, dGeomID)
dSpaceClean = loadOde('dSpaceClean', None, dSpaceID)
dSpaceGetNumGeoms = loadOde('dSpaceGetNumGeoms', c_int32, dSpaceID)
dSpaceGetGeom = loadOde('dSpaceGetGeom', dGeomID, dSpaceID, c_int32)
dSpaceGetClass = loadOde('dSpaceGetClass', c_int32, dSpaceID)
| [((13, 16, 13, 59), 'ctypes.CFUNCTYPE', 'CFUNCTYPE', ({(13, 26, 13, 30): 'None', (13, 32, 13, 40): 'c_void_p', (13, 42, 13, 49): 'dGeomID', (13, 51, 13, 58): 'dGeomID'}, {}), '(None, c_void_p, dGeomID, dGeomID)', False, 'from ctypes import CFUNCTYPE\n'), ((31, 69, 31, 85), 'ctypes.POINTER', 'POINTER', ({(31, 77, 31, 84): 'c_int32'}, {}), '(c_int32)', False, 'from ctypes import POINTER\n'), ((31, 87, 31, 103), 'ctypes.POINTER', 'POINTER', ({(31, 95, 31, 102): 'c_int32'}, {}), '(c_int32)', False, 'from ctypes import POINTER\n')] |
phildavis17/DS_A | bst.py | 2d950a62b1ea36dff5dcd2c17266ddf088719472 | class BSTNode:
def __init__(self, data = None) -> None:
self.data = data
self.left = None
self.right = None
def __repr__(self) -> str:
return(f"BSTNode({self.data})")
def __str__(self) -> str:
return str(self.data)
def __eq__(self, o: object) -> bool:
pass
def __hash__(self) -> int:
pass
class BST:
def __init__(self) -> None:
pass
def insert(self, item: int) -> None:
pass
def remove(self, item: int) -> int:
pass
def swap_nodes(self, item_a: int, item_b: int) -> None:
pass
def rebalance(self) -> None:
pass
def get_min_value(self) -> int:
pass
def get_max_value(self) -> int:
pass
def clear(self) -> None:
pass
def get_dept(self) -> int:
"""Returns the current depth of the tree."""
pass
def is_bst(self) -> bool:
"""Returns True if the tree is properly configured bst."""
pass
def is_balanced(self) -> bool:
"""
Returns True if the tree is balanced
"""
pass
def is_perfect(self) -> bool:
"""
Returns True if the tree is perfect
"""
pass
def in_order(self):
"""Returns an iterable of the nodes in the tree."""
pass
def pre_order(self):
"""Returns an iterable of the nodes in the tree."""
pass
def post_order(self):
"""Returns an iterable of the nodes in the tree."""
pass
| [] |
DaveWK/pyth-client | pctest/test_publish.py | 4332ef3287f584be46ec38ddd800cae8d4e7b792 | #!/usr/bin/python3
# pip3 install websockets
import asyncio
import websockets
import json
import datetime
import sys
class test_publish:
idnum = 1
def __init__( self, sym, price, spread ):
self.symbol = sym
self.pidnum = test_publish.idnum
test_publish.idnum += 1
self.sidnum = test_publish.idnum
test_publish.idnum += 1
self.psubid = -1
self.ssubid = -1
self.price = price
self.spread = spread
def gen_subscribe_price(self):
req = {
'jsonrpc': '2.0',
'method' : 'subscribe_price',
'params' : {
'account': self.account,
'price_type' : 'price'
},
'id': self.sidnum
}
return json.dumps( req )
def gen_subscribe_price_sched(self):
req = {
'jsonrpc': '2.0',
'method' : 'subscribe_price_sched',
'params' : {
'account': self.account,
'price_type' : 'price'
},
'id': self.pidnum
}
return json.dumps( req )
def gen_update_price(self):
req = {
'jsonrpc': '2.0',
'method': 'update_price',
'params':{
'account': self.account,
'price_type': 'price',
'status': 'trading',
'price': self.price,
'conf': self.spread
},
'id': None
}
self.price += self.spread
return json.dumps( req )
def parse_reply( self, msg, allsub ):
# parse subscription replies
subid = msg['result']['subscription']
allsub[subid] = self
if msg['id'] == self.pidnum:
self.psubid = subid;
else:
self.ssubid = subid
async def parse_notify( self, ws, msg ):
# parse subscription notification messages
subid = msg['params']['subscription']
ts = datetime.datetime.utcnow().isoformat()
if subid == self.ssubid:
# aggregate price update
res = msg['params']['result']
price = res['price']
spread = res['conf']
status = res['status']
print( f'{ts} received aggregate price update symbol=' + self.symbol +
f',price={price}, spread={spread}, status={status}' )
else:
# request to submit price
print( f'{ts} submit price to block-chain symbol=' + self.symbol +
f',price={self.price}, spread={self.spread}, subscription={subid}')
await ws.send( self.gen_update_price() )
async def subscribe( self, acct, ws, allids ):
# submmit initial subscriptions
self.account = acct
allids[self.pidnum] = self
allids[self.sidnum] = self
await ws.send( self.gen_subscribe_price() )
await ws.send( self.gen_subscribe_price_sched() )
# wbsocket event loop
async def poll( uri ):
# connect to pythd
ws = await websockets.connect(uri)
# submit subscriptions to pythd
allids = {}
allsub = {}
allsym = {}
sym1 = test_publish( 'SYMBOL1/USD', 10000, 100 )
sym2 = test_publish( 'SYMBOL2/USD', 2000000, 20000 )
allsym[sym1.symbol] = sym1
allsym[sym2.symbol] = sym2
# lookup accounts by symbol and subscribe
req = { 'jsonrpc': '2.0', 'method': 'get_product_list', 'id': None }
await ws.send( json.dumps( req ) )
msg = json.loads( await ws.recv() )
for prod in msg['result']:
sym = prod['attr_dict']['symbol']
for px in prod['price']:
if sym in allsym and px['price_type'] == 'price':
await allsym[sym].subscribe( px['account'], ws, allids );
# poll for updates from pythd
while True:
msg = json.loads( await ws.recv() )
# print(msg)
if 'error' in msg:
ts = datetime.datetime.utcnow().isoformat()
code = msg['error']['code']
emsg = msg['error']['message']
print( f'{ts} error code: {code} msg: {emsg}' )
sys.exit(1)
elif 'result' in msg:
msgid = msg['id']
if msgid in allids:
allids[msgid].parse_reply( msg, allsub )
else:
subid = msg['params']['subscription']
if subid in allsub:
await allsub[subid].parse_notify( ws, msg )
# connect to pythd, subscribe to and start publishing on two symbols
if __name__ == '__main__':
uri='ws://localhost:8910'
eloop = asyncio.get_event_loop()
try:
eloop.run_until_complete( poll( uri ) )
except ConnectionRefusedError:
print( f'connection refused uri={uri}' )
sys.exit(1)
| [((146, 10, 146, 34), 'asyncio.get_event_loop', 'asyncio.get_event_loop', ({}, {}), '()', False, 'import asyncio\n'), ((34, 11, 34, 28), 'json.dumps', 'json.dumps', ({(34, 23, 34, 26): 'req'}, {}), '(req)', False, 'import json\n'), ((46, 11, 46, 28), 'json.dumps', 'json.dumps', ({(46, 23, 46, 26): 'req'}, {}), '(req)', False, 'import json\n'), ((62, 11, 62, 28), 'json.dumps', 'json.dumps', ({(62, 23, 62, 26): 'req'}, {}), '(req)', False, 'import json\n'), ((103, 13, 103, 36), 'websockets.connect', 'websockets.connect', ({(103, 32, 103, 35): 'uri'}, {}), '(uri)', False, 'import websockets\n'), ((116, 17, 116, 34), 'json.dumps', 'json.dumps', ({(116, 29, 116, 32): 'req'}, {}), '(req)', False, 'import json\n'), ((133, 6, 133, 17), 'sys.exit', 'sys.exit', ({(133, 15, 133, 16): '(1)'}, {}), '(1)', False, 'import sys\n'), ((151, 4, 151, 15), 'sys.exit', 'sys.exit', ({(151, 13, 151, 14): '(1)'}, {}), '(1)', False, 'import sys\n'), ((76, 9, 76, 35), 'datetime.datetime.utcnow', 'datetime.datetime.utcnow', ({}, {}), '()', False, 'import datetime\n'), ((129, 11, 129, 37), 'datetime.datetime.utcnow', 'datetime.datetime.utcnow', ({}, {}), '()', False, 'import datetime\n')] |
TechSpiritSS/NeoAlgo | Python/other/merge_interval.py | 08f559b56081a191db6c6b1339ef37311da9e986 | '''
Given an array of intervals, merge all overlapping intervals,
and return an array of the non-overlapping intervals that cover all the intervals in the input.
Input: intervals = [[1,3],[2,6],[8,10],[15,18]]
Output: [[1,6],[8,10],[15,18]]
Explanation: Since intervals [1,3] and [2,6] overlaps, merge them into [1,6].
'''
def merge(intervals):
#sort the array
intervals.sort()
#take another empty list
intervals_stack = []
for pair in intervals:
if len(intervals_stack) == 0:
intervals_stack.append(pair) #adding all the number in intervals elements in empty list
#check number is equal or greater and less than pop elements
else:
current_pair = intervals_stack[-1]
if current_pair[1]>=pair[0]:
intervals_stack.pop()
if current_pair[1]<pair[1]:
new_pair = [current_pair[0],pair[1]]
intervals_stack.append(new_pair)
else:
new_pair = [current_pair[0],current_pair[1]]
intervals_stack.append(new_pair)
else:
intervals_stack.append(pair)
# result
return intervals_stack
if __name__ == '__main__':
R = int(input("Enter the number of rows:"))
C = int(input("Enter the number of columns:"))
interval = [[int(input("Enter the elements: ")) for x in range (C)] for y in range(R)]
print("Overlapping interval: ",interval)
print("Non-overlapping intervals: ",merge(interval))
"""
Time complexity : O(n^2)
Space complexity : O(n^2)
INPUT:-
Enter the number of rows:4
Enter the number of columns:2
Enter the elements: 1
Enter the elements: 3
Enter the elements: 2
Enter the elements: 6
Enter the elements: 8
Enter the elements: 10
Enter the elements: 15
Enter the elements: 18
OUTPUT:-
Overlapping interval: [[1, 3], [2, 6], [8, 10], [15, 18]]
Non-overlapping intervals: [[1, 6], [8, 10], [15, 18]]
"""
| [] |
InnovativeTravel/humilis-lambdautils | tests/test_all.py | 344e13efb68d19f61f0be8178eb6cc2219913fb0 | """Unit tests."""
import inspect
import json
from mock import Mock
import os
import sys
import uuid
import pytest
# Add the lambda directory to the python library search path
lambda_dir = os.path.join(
os.path.dirname(inspect.getfile(inspect.currentframe())), '..')
sys.path.append(lambda_dir)
import lambdautils.utils
@pytest.mark.parametrize(
"key,environment,stage,namespace,table,nkey", [
("k", "e", "s", None, "e-s-secrets", "k"),
("k", "e", None, None, "e-dummystage-secrets", "k"),
("k", "e", None, "n", "e-dummystage-secrets", "n:k"),
("k", "e", "s", "n", "e-s-secrets", "n:k")])
def test_get_secret_from_vault(key, environment, stage, namespace, table, nkey,
boto3_resource, boto3_client, monkeypatch):
"""Gets a secret from the DynamoDB secrets vault."""
# Call to the DynamoDB client to retrieve the encrypted secret
monkeypatch.setattr("boto3.resource", boto3_resource)
monkeypatch.setattr("boto3.client", boto3_client)
secret = lambdautils.utils.get_secret(key,
namespace=namespace,
environment=environment,
stage=stage)
assert secret == "dummy"
boto3_client("dynamodb").get_item.assert_called_with(
TableName=table,
Key={"id": {"S": nkey}})
# Call to the KMS client to decrypt the secret
boto3_client('kms').decrypt.assert_called_with(CiphertextBlob="encrypted")
def test_get_secret_from_env(monkeypatch):
"""Get a secret from an (encrypted) environment variable."""
key = str(uuid.uuid4()).replace('-', '.')
value = str(uuid.uuid4())
monkeypatch.setenv(key.replace('.', '_').upper(), value)
secret = lambdautils.utils.get_secret(key)
assert secret == value
def test_get_setting(monkeypatch):
"""Should be an alias for get_secret."""
resp = str(uuid.uuid4())
arg = str(uuid.uuid4())
kwarg = str(uuid.uuid4())
get_secret = Mock(return_value=resp)
monkeypatch.setattr("lambdautils.state.get_secret", get_secret)
resp2 = lambdautils.state.get_setting(arg, kwarg=kwarg)
assert resp2 == resp
get_secret.assert_called_with(arg, kwarg=kwarg)
@pytest.mark.parametrize(
"key,environment,layer,stage,shard_id,namespace,table,consistent,nkey", [
("k", "e", "l", "s", None, None, "e-l-s-state", False, "k"),
("k", "e", "l", "s", None, "n", "e-l-s-state", False, "n:k"),
("k", "e", "l", "s", "s-012", "n", "e-l-s-state", True, "s-012:n:k"),
("k", "e", "l", "s", "s-0001", None, "e-l-s-state", True, "s-0001:k")])
def test_get_state(boto3_resource, monkeypatch, key, environment, layer,
stage, shard_id, namespace, table, consistent, nkey):
"""Get a state value from DynamoDB."""
monkeypatch.setattr("boto3.resource", boto3_resource)
lambdautils.utils.get_state(key, environment=environment, layer=layer,
stage=stage, shard_id=shard_id,
namespace=namespace,
consistent=consistent)
boto3_resource("dynamodb").Table.assert_called_with(table)
if consistent is None:
# The default setting: use consistent reads
consistent = True
boto3_resource("dynamodb").Table().get_item.assert_called_with(
Key={"id": nkey}, ConsistentRead=consistent)
def test_no_state_table(boto3_resource, monkeypatch):
"""Test accessing state variable without having a state table."""
monkeypatch.setattr("boto3.resource", boto3_resource)
monkeypatch.delenv("HUMILIS_ENVIRONMENT")
with pytest.raises(lambdautils.state.StateTableError):
lambdautils.utils.set_state("sample_state_key", "sample_state_value")
with pytest.raises(lambdautils.state.StateTableError):
lambdautils.utils.delete_state("sample_state_key")
with pytest.raises(lambdautils.state.StateTableError):
lambdautils.utils.get_state("sample_state_key")
@pytest.mark.parametrize(
"key,value,environment,layer,stage,shard_id,namespace,table,nkey", [
("k", "v", "e", "l", "s", None, None, "e-l-s-state", "k"),
("k", "v", "e", "l", "s", None, "n", "e-l-s-state", "n:k"),
("k", "v", "e", "l", "s", "s1", "n", "e-l-s-state", "s1:n:k"),
("k", "v", "e", "l", "s", "s2", None, "e-l-s-state", "s2:k")])
def test_set_state(boto3_resource, monkeypatch, key, value, environment, layer,
stage, shard_id, namespace, table, nkey):
"""Tests setting a state variable."""
monkeypatch.setattr("boto3.resource", boto3_resource)
lambdautils.utils.set_state(key, value, environment=environment,
layer=layer, stage=stage, shard_id=shard_id,
namespace=namespace)
boto3_resource("dynamodb").Table.assert_called_with(table)
boto3_resource("dynamodb").Table().put_item.assert_called_with(
Item={"id": nkey, "value": json.dumps(value)})
@pytest.mark.parametrize(
"key,environment,layer,stage,shard_id,namespace,table,nkey", [
("k", "e", "l", "s", None, None, "e-l-s-state", "k"),
("k", "e", "l", "s", None, "n", "e-l-s-state", "n:k"),
("k", "e", "l", "s", "s1", "n", "e-l-s-state", "s1:n:k"),
("k", "e", "l", "s", "s2", None, "e-l-s-state", "s2:k")])
def test_delete_state(boto3_resource, monkeypatch, key, environment,
layer, stage, shard_id, namespace, table, nkey):
"""Tests setting a state variable."""
monkeypatch.setattr("boto3.resource", boto3_resource)
lambdautils.utils.delete_state(key, environment=environment,
layer=layer, stage=stage, shard_id=shard_id,
namespace=namespace)
boto3_resource("dynamodb").Table.assert_called_with(table)
boto3_resource("dynamodb").Table().delete_item.assert_called_with(
Key={"id": nkey})
def test_sentry_monitor_bad_client(boto3_client, raven_client, context,
monkeypatch):
"""Test that sentry_monitor handles raven client errors gracefully."""
class ClientError(Exception):
pass
def raise_error(dsn):
raise ClientError
monkeypatch.setattr("raven.Client", Mock(side_effect=raise_error))
monkeypatch.setattr("boto3.client", boto3_client)
@lambdautils.utils.sentry_monitor(environment="dummyenv",
stage="dummystage")
def lambda_handler(event, context):
pass
lambda_handler(None, context)
raven_client.captureException.assert_not_called()
@pytest.mark.parametrize(
"kstream, fstream, rcalls, kcalls, fcalls, ev", [
("a", "b", 1, 0, 0, {"Records": [{}]}),
(None, "b", 1, 0, 0, {"Records": [{}]}),
(None, None, 1, 0, 0, None),
(None, None, 1, 0, 0, None),
("a", "b", 1, 0, 0, None),
("a", None, 1, 0, 0, None)])
def test_sentry_monitor_exception(
kstream, fstream, rcalls, kcalls, fcalls, ev,
boto3_client, raven_client, context, kinesis_event, monkeypatch):
"""Tests the sentry_monitor decorator when throwing an exception and
lacking an error stream where to dump the errors."""
if ev is None:
# Default to a Kinesis event
ev = kinesis_event
monkeypatch.setattr("boto3.client", boto3_client)
monkeypatch.setattr("raven.Client", Mock(return_value=raven_client))
monkeypatch.setattr("lambdautils.monitor.SentryHandler", Mock())
monkeypatch.setattr("lambdautils.utils.get_secret",
Mock(return_value="dummydsn"))
error_stream = {
"kinesis_stream": kstream,
"firehose_delivery_stream": fstream}
@lambdautils.utils.sentry_monitor(error_stream=error_stream)
def lambda_handler(event, context):
"""Raise an error."""
raise KeyError
with pytest.raises(KeyError):
lambda_handler(ev, context)
# Should have captured only 1 error:
# * The original KeyError
assert raven_client.captureException.call_count == rcalls
# And should have send the events to the Kinesis and FH error streams
assert boto3_client("kinesis").put_records.call_count == kcalls
assert boto3_client("firehose").put_record_batch.call_count == fcalls
def test_send_to_kinesis_stream(search_events, boto3_client, monkeypatch):
"""Tests sending events to a Kinesis stream."""
monkeypatch.setattr("boto3.client", boto3_client)
lambdautils.utils.send_to_kinesis_stream(search_events, "dummy_stream")
boto3_client("kinesis").put_records.call_count == 1
def test_send_to_delivery_stream(search_events, boto3_client, monkeypatch):
"""Tests sending events to a Firehose delivery stream."""
monkeypatch.setattr("boto3.client", boto3_client)
lambdautils.utils.send_to_delivery_stream(search_events, "dummy_stream")
boto3_client("firehose").put_record_batch.call_count == 1
@pytest.mark.parametrize("deserializer, embed_ts", [
[json.loads, False],
[json.loads, "kinesis_timestamp"],
[None, False]])
def test_unpack_kinesis_event(kinesis_event, deserializer, embed_ts):
"""Extracts json-serialized events from a Kinesis events."""
events, shard_id = lambdautils.utils.unpack_kinesis_event(
kinesis_event, deserializer=deserializer, embed_timestamp=embed_ts)
# There should be one event per kinesis record
assert len(events) == len(kinesis_event["Records"])
assert shard_id == kinesis_event["Records"][0]["eventID"].split(":")[0]
if embed_ts:
assert all(embed_ts in ev for ev in events)
| [((15, 0, 15, 27), 'sys.path.append', 'sys.path.append', ({(15, 16, 15, 26): 'lambda_dir'}, {}), '(lambda_dir)', False, 'import sys\n'), ((20, 1, 25, 52), 'pytest.mark.parametrize', 'pytest.mark.parametrize', ({(21, 4, 21, 48): '"""key,environment,stage,namespace,table,nkey"""', (21, 50, 25, 51): "[('k', 'e', 's', None, 'e-s-secrets', 'k'), ('k', 'e', None, None,\n 'e-dummystage-secrets', 'k'), ('k', 'e', None, 'n',\n 'e-dummystage-secrets', 'n:k'), ('k', 'e', 's', 'n', 'e-s-secrets', 'n:k')]"}, {}), "('key,environment,stage,namespace,table,nkey', [('k',\n 'e', 's', None, 'e-s-secrets', 'k'), ('k', 'e', None, None,\n 'e-dummystage-secrets', 'k'), ('k', 'e', None, 'n',\n 'e-dummystage-secrets', 'n:k'), ('k', 'e', 's', 'n', 'e-s-secrets', 'n:k')]\n )", False, 'import pytest\n'), ((66, 1, 71, 79), 'pytest.mark.parametrize', 'pytest.mark.parametrize', ({(67, 4, 67, 74): '"""key,environment,layer,stage,shard_id,namespace,table,consistent,nkey"""', (67, 76, 71, 78): "[('k', 'e', 'l', 's', None, None, 'e-l-s-state', False, 'k'), ('k', 'e',\n 'l', 's', None, 'n', 'e-l-s-state', False, 'n:k'), ('k', 'e', 'l', 's',\n 's-012', 'n', 'e-l-s-state', True, 's-012:n:k'), ('k', 'e', 'l', 's',\n 's-0001', None, 'e-l-s-state', True, 's-0001:k')]"}, {}), "(\n 'key,environment,layer,stage,shard_id,namespace,table,consistent,nkey',\n [('k', 'e', 'l', 's', None, None, 'e-l-s-state', False, 'k'), ('k', 'e',\n 'l', 's', None, 'n', 'e-l-s-state', False, 'n:k'), ('k', 'e', 'l', 's',\n 's-012', 'n', 'e-l-s-state', True, 's-012:n:k'), ('k', 'e', 'l', 's',\n 's-0001', None, 'e-l-s-state', True, 's-0001:k')])", False, 'import pytest\n'), ((102, 1, 107, 70), 'pytest.mark.parametrize', 'pytest.mark.parametrize', ({(103, 4, 103, 69): '"""key,value,environment,layer,stage,shard_id,namespace,table,nkey"""', (103, 71, 107, 69): "[('k', 'v', 'e', 'l', 's', None, None, 'e-l-s-state', 'k'), ('k', 'v', 'e',\n 'l', 's', None, 'n', 'e-l-s-state', 'n:k'), ('k', 'v', 'e', 'l', 's',\n 's1', 'n', 'e-l-s-state', 's1:n:k'), ('k', 'v', 'e', 'l', 's', 's2',\n None, 'e-l-s-state', 's2:k')]"}, {}), "(\n 'key,value,environment,layer,stage,shard_id,namespace,table,nkey', [(\n 'k', 'v', 'e', 'l', 's', None, None, 'e-l-s-state', 'k'), ('k', 'v',\n 'e', 'l', 's', None, 'n', 'e-l-s-state', 'n:k'), ('k', 'v', 'e', 'l',\n 's', 's1', 'n', 'e-l-s-state', 's1:n:k'), ('k', 'v', 'e', 'l', 's',\n 's2', None, 'e-l-s-state', 's2:k')])", False, 'import pytest\n'), ((120, 1, 125, 65), 'pytest.mark.parametrize', 'pytest.mark.parametrize', ({(121, 4, 121, 63): '"""key,environment,layer,stage,shard_id,namespace,table,nkey"""', (121, 65, 125, 64): "[('k', 'e', 'l', 's', None, None, 'e-l-s-state', 'k'), ('k', 'e', 'l', 's',\n None, 'n', 'e-l-s-state', 'n:k'), ('k', 'e', 'l', 's', 's1', 'n',\n 'e-l-s-state', 's1:n:k'), ('k', 'e', 'l', 's', 's2', None,\n 'e-l-s-state', 's2:k')]"}, {}), "(\n 'key,environment,layer,stage,shard_id,namespace,table,nkey', [('k', 'e',\n 'l', 's', None, None, 'e-l-s-state', 'k'), ('k', 'e', 'l', 's', None,\n 'n', 'e-l-s-state', 'n:k'), ('k', 'e', 'l', 's', 's1', 'n',\n 'e-l-s-state', 's1:n:k'), ('k', 'e', 'l', 's', 's2', None,\n 'e-l-s-state', 's2:k')])", False, 'import pytest\n'), ((160, 1, 167, 36), 'pytest.mark.parametrize', 'pytest.mark.parametrize', ({(161, 4, 161, 50): '"""kstream, fstream, rcalls, kcalls, fcalls, ev"""', (161, 52, 167, 35): "[('a', 'b', 1, 0, 0, {'Records': [{}]}), (None, 'b', 1, 0, 0, {'Records': [\n {}]}), (None, None, 1, 0, 0, None), (None, None, 1, 0, 0, None), ('a',\n 'b', 1, 0, 0, None), ('a', None, 1, 0, 0, None)]"}, {}), "('kstream, fstream, rcalls, kcalls, fcalls, ev', [(\n 'a', 'b', 1, 0, 0, {'Records': [{}]}), (None, 'b', 1, 0, 0, {'Records':\n [{}]}), (None, None, 1, 0, 0, None), (None, None, 1, 0, 0, None), ('a',\n 'b', 1, 0, 0, None), ('a', None, 1, 0, 0, None)])", False, 'import pytest\n'), ((219, 1, 222, 19), 'pytest.mark.parametrize', 'pytest.mark.parametrize', ({(219, 25, 219, 49): '"""deserializer, embed_ts"""', (219, 51, 222, 18): "[[json.loads, False], [json.loads, 'kinesis_timestamp'], [None, False]]"}, {}), "('deserializer, embed_ts', [[json.loads, False], [\n json.loads, 'kinesis_timestamp'], [None, False]])", False, 'import pytest\n'), ((59, 17, 59, 40), 'mock.Mock', 'Mock', (), '', False, 'from mock import Mock\n'), ((48, 16, 48, 28), 'uuid.uuid4', 'uuid.uuid4', ({}, {}), '()', False, 'import uuid\n'), ((56, 15, 56, 27), 'uuid.uuid4', 'uuid.uuid4', ({}, {}), '()', False, 'import uuid\n'), ((57, 14, 57, 26), 'uuid.uuid4', 'uuid.uuid4', ({}, {}), '()', False, 'import uuid\n'), ((58, 16, 58, 28), 'uuid.uuid4', 'uuid.uuid4', ({}, {}), '()', False, 'import uuid\n'), ((92, 9, 92, 57), 'pytest.raises', 'pytest.raises', ({(92, 23, 92, 56): 'lambdautils.state.StateTableError'}, {}), '(lambdautils.state.StateTableError)', False, 'import pytest\n'), ((95, 9, 95, 57), 'pytest.raises', 'pytest.raises', ({(95, 23, 95, 56): 'lambdautils.state.StateTableError'}, {}), '(lambdautils.state.StateTableError)', False, 'import pytest\n'), ((98, 9, 98, 57), 'pytest.raises', 'pytest.raises', ({(98, 23, 98, 56): 'lambdautils.state.StateTableError'}, {}), '(lambdautils.state.StateTableError)', False, 'import pytest\n'), ((148, 40, 148, 69), 'mock.Mock', 'Mock', (), '', False, 'from mock import Mock\n'), ((179, 40, 179, 71), 'mock.Mock', 'Mock', (), '', False, 'from mock import Mock\n'), ((180, 61, 180, 67), 'mock.Mock', 'Mock', ({}, {}), '()', False, 'from mock import Mock\n'), ((182, 24, 182, 53), 'mock.Mock', 'Mock', (), '', False, 'from mock import Mock\n'), ((193, 9, 193, 32), 'pytest.raises', 'pytest.raises', ({(193, 23, 193, 31): 'KeyError'}, {}), '(KeyError)', False, 'import pytest\n'), ((14, 36, 14, 58), 'inspect.currentframe', 'inspect.currentframe', ({}, {}), '()', False, 'import inspect\n'), ((47, 14, 47, 26), 'uuid.uuid4', 'uuid.uuid4', ({}, {}), '()', False, 'import uuid\n'), ((117, 35, 117, 52), 'json.dumps', 'json.dumps', ({(117, 46, 117, 51): 'value'}, {}), '(value)', False, 'import json\n')] |
sot/ska_testr | packages/starcheck/post_regress.py | dd84b89d0b5ebf6158c6cda4c1df432138044e20 | import os
from testr.packages import make_regress_files
regress_files = ['starcheck.txt',
'starcheck/pcad_att_check.txt']
clean = {'starcheck.txt': [(r'\s*Run on.*[\n\r]*', ''),
(os.environ['SKA'], '')],
'starcheck/pcad_att_check.txt': [(os.environ['SKA'], '')]}
make_regress_files(regress_files, clean=clean)
| [((11, 0, 11, 46), 'testr.packages.make_regress_files', 'make_regress_files', (), '', False, 'from testr.packages import make_regress_files\n')] |
stungkit/djaodjin-saas | testsite/wsgi.py | 93c8631509ffd5b0fb91283cd4a4aeaf9826e97e | """
WSGI config for testsite project.
This module contains the WSGI application used by Django's development server
and any production WSGI deployments. It should expose a module-level variable
named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover
this application via the ``WSGI_APPLICATION`` setting.
Usually you will have the standard Django WSGI application here, but it also
might make sense to replace the whole Django WSGI application with a custom one
that later delegates to the Django one. For example, you could introduce WSGI
middleware here, or combine a Django application with an application of another
framework.
"""
import os, signal
#pylint: disable=invalid-name
def save_coverage(*args, **kwargs):
#pylint:disable=unused-argument
sys.stderr.write("saving coverage\n")
cov.stop()
cov.save()
if os.getenv('DJANGO_COVERAGE'):
import atexit, sys
import coverage
cov = coverage.coverage(data_file=os.path.join(os.getenv('DJANGO_COVERAGE'),
".coverage.%d" % os.getpid()))
cov.start()
atexit.register(save_coverage)
try:
signal.signal(signal.SIGTERM, save_coverage)
except ValueError as e:
# trapping signals does not work with manage
# trying to do so fails with
# ValueError: signal only works in main thread
pass
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "testsite.settings")
# This application object is used by any WSGI server configured to use this
# file. This includes Django's development server, if the WSGI_APPLICATION
# setting points here.
from django.core.wsgi import get_wsgi_application
#pylint: disable=invalid-name
application = get_wsgi_application()
| [((26, 3, 26, 31), 'os.getenv', 'os.getenv', ({(26, 13, 26, 30): '"""DJANGO_COVERAGE"""'}, {}), "('DJANGO_COVERAGE')", False, 'import os, signal\n'), ((41, 0, 41, 68), 'os.environ.setdefault', 'os.environ.setdefault', ({(41, 22, 41, 46): '"""DJANGO_SETTINGS_MODULE"""', (41, 48, 41, 67): '"""testsite.settings"""'}, {}), "('DJANGO_SETTINGS_MODULE', 'testsite.settings')", False, 'import os, signal\n'), ((48, 14, 48, 36), 'django.core.wsgi.get_wsgi_application', 'get_wsgi_application', ({}, {}), '()', False, 'from django.core.wsgi import get_wsgi_application\n'), ((22, 4, 22, 41), 'sys.stderr.write', 'sys.stderr.write', ({(22, 21, 22, 40): '"""saving coverage\n"""'}, {}), "('saving coverage\\n')", False, 'import atexit, sys\n'), ((32, 4, 32, 34), 'atexit.register', 'atexit.register', ({(32, 20, 32, 33): 'save_coverage'}, {}), '(save_coverage)', False, 'import atexit, sys\n'), ((34, 8, 34, 52), 'signal.signal', 'signal.signal', ({(34, 22, 34, 36): 'signal.SIGTERM', (34, 38, 34, 51): 'save_coverage'}, {}), '(signal.SIGTERM, save_coverage)', False, 'import os, signal\n'), ((29, 51, 29, 79), 'os.getenv', 'os.getenv', ({(29, 61, 29, 78): '"""DJANGO_COVERAGE"""'}, {}), "('DJANGO_COVERAGE')", False, 'import os, signal\n'), ((30, 25, 30, 36), 'os.getpid', 'os.getpid', ({}, {}), '()', False, 'import os, signal\n')] |
jdelic/authserver | authserver/maildaemons/forwarder/server.py | e800664436b252fcdf224a9af46a1122c87be3ca | #!/usr/bin/env python3 -u
# -* encoding: utf-8 *-
import argparse
import asyncore
import json
import logging
import signal
import sys
import os
from types import FrameType
from typing import Tuple, Sequence, Any, Union, Optional, List, Dict
from concurrent.futures import ThreadPoolExecutor as Pool
import daemon
from django.db.utils import OperationalError
import authserver
from maildaemons.utils import SMTPWrapper, PatchedSMTPChannel, SaneSMTPServer
_log = logging.getLogger(__name__)
pool = Pool()
class ForwarderServer(SaneSMTPServer):
def __init__(self, remote_relay_ip: str, remote_relay_port: int, local_delivery_ip: str,
local_delivery_port: int, *args: Any, **kwargs: Any) -> None:
super().__init__(*args, **kwargs)
self.smtp = SMTPWrapper(
external_ip=remote_relay_ip, external_port=remote_relay_port,
error_relay_ip=local_delivery_ip, error_relay_port=local_delivery_port
)
# ** must be thread-safe, don't modify shared state,
# _log should be thread-safe as stated by the docs. Django ORM should be as well.
def _process_message(self, peer: Tuple[str, int], mailfrom: str, rcpttos: Sequence[str], data: bytes, *,
channel: PatchedSMTPChannel,
**kwargs: Any) -> Optional[str]:
# we can't import the Domain model before Django has been initialized
from mailauth.models import EmailAlias, Domain
data = self.add_received_header(peer, data, channel)
remaining_rcpttos = list(rcpttos) # ensure that new_rcpttos is a mutable list
combined_rcptto = {} # type: Dict[str, List[str]] # { new_mailfrom: [recipients] }
def add_rcptto(mfrom: str, rcpt: Union[str, List]) -> None:
if mfrom in combined_rcptto:
if isinstance(rcpt, list):
combined_rcptto[mfrom] += rcpt
else:
combined_rcptto[mfrom].append(rcpt)
else:
if isinstance(rcpt, list):
combined_rcptto[mfrom] = rcpt
else:
combined_rcptto[mfrom] = [rcpt]
# we're going to modify remaining_rcpttos so we start from its end
for ix in range(len(remaining_rcpttos) - 1, -1, -1):
rcptto = rcpttos[ix].lower()
rcptuser, rcptdomain = rcptto.split("@", 1)
# implement domain catch-all redirect
domain = None # type: Optional[Domain]
try:
domain = Domain.objects.get(name=rcptdomain)
except Domain.DoesNotExist:
pass
except OperationalError:
_log.exception("Database unavailable.")
return "421 Processing problem. Please try again later."
if domain:
if domain.redirect_to:
_log.debug("ix: %s - rcptto: %s - remaining rcpttos: %s", ix, rcptto, remaining_rcpttos)
del remaining_rcpttos[ix]
new_rcptto = "%s@%s" % (rcptuser, domain.redirect_to)
_log.info("%sForwarding email from <%s> to <%s> to domain @%s",
"(Retry) " if "retry" in kwargs and kwargs["retry"] else "",
mailfrom, rcptto, domain.redirect_to)
add_rcptto(mailfrom, new_rcptto)
continue
# follow the same path like the stored procedure authserver_resolve_alias(...)
if "-" in rcptuser:
# convert the first - to a +
user_mailprefix = "%s+%s" % tuple(rcptuser.split("-", 1)) # type: ignore
else:
user_mailprefix = rcptuser
if "+" in user_mailprefix:
# if we had a dashext, or a plusext, we're left with just the prefix after this
user_mailprefix = user_mailprefix.split("+", 1)[0]
try:
alias = EmailAlias.objects.get(mailprefix__iexact=user_mailprefix,
domain__name__iexact=rcptdomain) # type: EmailAlias
except EmailAlias.DoesNotExist:
# OpenSMTPD shouldn't even call us for invalid addresses if we're configured correctly
_log.error("%sUnknown mail address: %s (from: %s, prefix: %s)",
"(Retry) " if "retry" in kwargs and kwargs["retry"] else "",
rcptto, mailfrom, user_mailprefix)
continue
except OperationalError:
_log.exception("Database unavailable.")
return "421 Processing problem. Please try again later."
if alias.forward_to is not None:
# it's a mailing list, forward the email to all connected addresses
del remaining_rcpttos[ix] # remove this recipient from the list
_newmf = mailfrom
if alias.forward_to.new_mailfrom != "":
_newmf = alias.forward_to.new_mailfrom
_log.info("%sForwarding email from <%s> with new sender <%s> to <%s>",
"(Retry) " if "retry" in kwargs and kwargs["retry"] else "",
mailfrom, _newmf, alias.forward_to.addresses)
add_rcptto(_newmf, alias.forward_to.addresses)
# if there are any remaining non-list/non-forward recipients, we inject them back to OpenSMTPD here
if len(remaining_rcpttos) > 0:
_log.info("%sDelivering email from <%s> to remaining recipients <%s>",
"(Retry) " if "retry" in kwargs and kwargs["retry"] else "",
mailfrom, remaining_rcpttos)
add_rcptto(mailfrom, remaining_rcpttos)
if len(combined_rcptto.keys()) == 1:
_log.debug("Only one mail envelope sender, forwarding is atomic")
results = {k: "unsent" for k in combined_rcptto.keys()} # type: Dict[str, str]
for new_mailfrom in combined_rcptto.keys():
_log.debug("Injecting email from <%s> to <%s>", new_mailfrom, combined_rcptto[new_mailfrom])
ret = self.smtp.sendmail(new_mailfrom, combined_rcptto[new_mailfrom], data)
if ret is not None:
results[new_mailfrom] = "failure"
if len(combined_rcptto.keys()) > 1:
_log.error("Non-atomic mail sending failed from <%s> in dict(%s)", combined_rcptto.keys(),
json.dumps(results))
return ret
results[new_mailfrom] = "success"
# TODO: log results
_log.debug("Done processing.")
return None
def process_message(self, *args: Any, **kwargs: Any) -> Optional[str]:
future = pool.submit(ForwarderServer._process_message, self, *args, **kwargs)
return future.result()
def run(_args: argparse.Namespace) -> None:
server = ForwarderServer(_args.remote_relay_ip, _args.remote_relay_port,
_args.local_delivery_ip, _args.local_delivery_port,
(_args.input_ip, _args.input_port), None, decode_data=False,
daemon_name="mailforwarder")
asyncore.loop()
def _sigint_handler(sig: int, frame: FrameType) -> None:
print("CTRL+C exiting")
pool.shutdown(wait=False)
sys.exit(1)
def _main() -> None:
signal.signal(signal.SIGINT, _sigint_handler)
parser = argparse.ArgumentParser(
description="This is a SMTP daemon that is used through OpenSMTPD configuration "
"to check whether incoming emails are addressed to a forwarding email alias "
"and if they are, inject emails to all list delivery addresses / expand the alias."
)
grp_daemon = parser.add_argument_group("Daemon options")
grp_daemon.add_argument("-p", "--pidfile", dest="pidfile", default="./mailforwarder-server.pid",
help="Path to a pidfile")
grp_daemon.add_argument("-u", "--user", dest="user", default=None, help="Drop privileges and switch to this user")
grp_daemon.add_argument("-g", "--group", dest="group", default=None,
help="Drop privileges and switch to this group")
grp_daemon.add_argument("-d", "--daemonize", dest="daemonize", default=False, action="store_true",
help="If set, fork into background")
grp_daemon.add_argument("-v", "--verbose", dest="verbose", default=False, action="store_true",
help="Output extra logging (not implemented right now)")
grp_daemon.add_argument("-C", "--chdir", dest="chdir", default=".",
help="Change working directory to the provided value")
grp_network = parser.add_argument_group("Network options")
grp_network.add_argument("--input-ip", dest="input_ip", default="127.0.0.1", help="The network address to bind to")
grp_network.add_argument("--input-port", dest="input_port", metavar="PORT", type=int, default=10046,
help="The port to bind to")
grp_network.add_argument("--local-delivery-ip", dest="local_delivery_ip", default="127.0.0.1",
help="The OpenSMTPD instance IP for local email to be delivered.")
grp_network.add_argument("--local-delivery-port", dest="local_delivery_port", metavar="PORT", type=int,
default=10045, help="The port where OpenSMTPD listens for local email to be delivered")
grp_network.add_argument("--remote-relay-ip", dest="remote_relay_ip", default="127.0.0.1",
help="The OpenSMTPD instance IP that accepts mail for relay to external domains.")
grp_network.add_argument("--remote-relay-port", dest="remote_relay_port", default=10045,
help="The port where OpenSMTPD listens for mail to relay.")
grp_django = parser.add_argument_group("Django options")
grp_django.add_argument("--settings", dest="django_settings", default="authserver.settings",
help="The Django settings module to use for authserver database access (default: "
"authserver.settings)")
_args = parser.parse_args()
os.environ.setdefault("DJANGO_SETTINGS_MODULE", _args.django_settings)
# noinspection PyUnresolvedReferences
from django.conf import settings # initialize Django
import django
django.setup()
_log.info("mailforwarder v%s: Forwarding Alias Service starting" % authserver.version)
_log.info("Django ORM initialized")
pidfile = open(_args.pidfile, "w")
ctx = daemon.DaemonContext(
working_directory=_args.chdir,
pidfile=pidfile,
uid=_args.user,
gid=_args.group,
detach_process=_args.daemonize,
files_preserve=[1, 2, 3, pidfile],
stdin=sys.stdin,
stdout=sys.stdout,
stderr=sys.stderr,
)
with ctx:
run(_args)
def main() -> None:
try:
_main()
except Exception as e:
_log.critical("Unhandled exception", exc_info=True)
sys.exit(1)
if __name__ == "__main__":
main()
| [((23, 7, 23, 34), 'logging.getLogger', 'logging.getLogger', ({(23, 25, 23, 33): '__name__'}, {}), '(__name__)', False, 'import logging\n'), ((24, 7, 24, 13), 'concurrent.futures.ThreadPoolExecutor', 'Pool', ({}, {}), '()', True, 'from concurrent.futures import ThreadPoolExecutor as Pool\n'), ((158, 4, 158, 19), 'asyncore.loop', 'asyncore.loop', ({}, {}), '()', False, 'import asyncore\n'), ((164, 4, 164, 15), 'sys.exit', 'sys.exit', ({(164, 13, 164, 14): '(1)'}, {}), '(1)', False, 'import sys\n'), ((168, 4, 168, 49), 'signal.signal', 'signal.signal', ({(168, 18, 168, 31): 'signal.SIGINT', (168, 33, 168, 48): '_sigint_handler'}, {}), '(signal.SIGINT, _sigint_handler)', False, 'import signal\n'), ((170, 13, 174, 5), 'argparse.ArgumentParser', 'argparse.ArgumentParser', (), '', False, 'import argparse\n'), ((209, 4, 209, 74), 'os.environ.setdefault', 'os.environ.setdefault', ({(209, 26, 209, 50): '"""DJANGO_SETTINGS_MODULE"""', (209, 52, 209, 73): '_args.django_settings'}, {}), "('DJANGO_SETTINGS_MODULE', _args.django_settings)", False, 'import os\n'), ((214, 4, 214, 18), 'django.setup', 'django.setup', ({}, {}), '()', False, 'import django\n'), ((221, 10, 231, 5), 'daemon.DaemonContext', 'daemon.DaemonContext', (), '', False, 'import daemon\n'), ((31, 20, 34, 9), 'maildaemons.utils.SMTPWrapper', 'SMTPWrapper', (), '', False, 'from maildaemons.utils import SMTPWrapper, PatchedSMTPChannel, SaneSMTPServer\n'), ((242, 8, 242, 19), 'sys.exit', 'sys.exit', ({(242, 17, 242, 18): '(1)'}, {}), '(1)', False, 'import sys\n'), ((69, 25, 69, 60), 'mailauth.models.Domain.objects.get', 'Domain.objects.get', (), '', False, 'from mailauth.models import EmailAlias, Domain\n'), ((99, 24, 100, 79), 'mailauth.models.EmailAlias.objects.get', 'EmailAlias.objects.get', (), '', False, 'from mailauth.models import EmailAlias, Domain\n'), ((140, 31, 140, 50), 'json.dumps', 'json.dumps', ({(140, 42, 140, 49): 'results'}, {}), '(results)', False, 'import json\n')] |
kzkaneoka/custom-job-search | services/backend/project/api/sites.py | ca6054aee979cb1eff701dc5ba0cf56fb92baf44 | import requests
from bs4 import BeautifulSoup, element
class Indeed:
def __init__(self, words, location, offset):
self.url = "https://www.indeed.com/jobs?as_and={}&l={}&sort=date&start={}".format(
"+".join(set(d.strip().lower() for d in words.split(",") if d)),
"+".join(list(d.lower() for d in location.split(" ") if d)),
int(offset),
)
def extract(self, soup):
if not soup:
return []
jobs = []
for tag in soup.find_all(name="div", attrs={"class": "jobsearch-SerpJobCard"}):
job = {}
for child in tag.children:
if child and type(child) == element.Tag and child.attrs:
if child.attrs["class"][0] == "title":
job["title"] = child.get_text().strip()
for grandchild in child.find_all(name="a"):
if grandchild.has_attr("href"):
job["link"] = (
"https://www.indeed.com" + grandchild["href"]
)
elif child.attrs["class"][0] == "sjcl":
lines = child.get_text().strip().split("\n")
job["company"] = lines[0]
job["location"] = lines[-1]
elif child.attrs["class"][0] == "jobsearch-SerpJobCard-footer":
job["date"] = "n/a"
for grandchild in child.find_all(
name="span", attrs={"class": "date"}
):
job["date"] = grandchild.get_text()
jobs.append(job)
return jobs
def fetch(self):
soup = None
try:
r = requests.get(self.url)
r.raise_for_status()
soup = BeautifulSoup(r.text, "html.parser")
finally:
return soup
def search(self):
soup = self.fetch()
jobs = self.extract(soup)
return jobs
| [((44, 16, 44, 38), 'requests.get', 'requests.get', ({(44, 29, 44, 37): 'self.url'}, {}), '(self.url)', False, 'import requests\n'), ((46, 19, 46, 55), 'bs4.BeautifulSoup', 'BeautifulSoup', ({(46, 33, 46, 39): 'r.text', (46, 41, 46, 54): '"""html.parser"""'}, {}), "(r.text, 'html.parser')", False, 'from bs4 import BeautifulSoup, element\n')] |
gene1wood/django-product-details | product_details/utils.py | 53f245d76fa11d073ba686e0ece7b0293ec21942 | from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from product_details import settings_defaults
def settings_fallback(key):
"""Grab user-defined settings, or fall back to default."""
try:
return getattr(settings, key)
except (AttributeError, ImportError, ImproperlyConfigured):
return getattr(settings_defaults, key)
def get_django_cache(cache_name):
try:
from django.core.cache import caches # django 1.7+
return caches[cache_name]
except ImportError:
from django.core.cache import get_cache
return get_cache(cache_name)
except ImproperlyConfigured:
# dance to get around not-setup-django at import time
return {}
| [((21, 15, 21, 36), 'django.core.cache.get_cache', 'get_cache', ({(21, 25, 21, 35): 'cache_name'}, {}), '(cache_name)', False, 'from django.core.cache import get_cache\n')] |
jaredliw/python-question-bank | kattis/Soda Slurper.py | 9c8c246623d8d171f875700b57772df0afcbdcdf | # CPU: 0.06 s
possessed, found, condition = map(int, input().split())
possessed += found
count = 0
while possessed >= condition:
div, mod = divmod(possessed, condition)
count += div
possessed = div + mod
print(count)
| [] |
HyunjiEllenPak/automl | efficientdet/dataset/csv_.py | fedf04adf12c5fd11045ea06e2f5c11a5a5490c4 | """
Copyright 2017-2018 yhenon (https://github.com/yhenon/)
Copyright 2017-2018 Fizyr (https://fizyr.com)
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
# from generators.common import Generator
import cv2
import numpy as np
from PIL import Image
from six import raise_from
import csv
import sys
import os.path as osp
from collections import OrderedDict
import os
def _parse(value, function, fmt):
"""
Parse a string into a value, and format a nice ValueError if it fails.
Returns `function(value)`.
Any `ValueError` raised is catched and a new `ValueError` is raised
with message `fmt.format(e)`, where `e` is the caught `ValueError`.
"""
try:
return function(value)
except ValueError as e:
raise_from(ValueError(fmt.format(e)), None)
def _read_classes(csv_reader):
"""
Parse the classes file given by csv_reader.
"""
result = OrderedDict()
for line, row in enumerate(csv_reader):
line += 1
try:
class_name, class_id = row
except ValueError:
raise_from(ValueError('line {}: format should be \'class_name,class_id\''.format(line)), None)
class_id = _parse(class_id, int, 'line {}: malformed class ID: {{}}'.format(line))
if class_name in result:
raise ValueError('line {}: duplicate class name: \'{}\''.format(line, class_name))
result[class_name] = class_id
return result
def _read_quadrangle_annotations(csv_reader, classes, detect_text=False):
"""
Read annotations from the csv_reader.
Args:
csv_reader: csv reader of args.annotations_path
classes: list[str] all the class names read from args.classes_path
Returns:
result: dict, dict is like {image_path: [{'x1': x1, 'y1': y1, 'x2': x2, 'y2': y2,
'x3': x3, 'y3': y3, 'x4': x4, 'y4': y4, 'class': class_name}]}
"""
result = OrderedDict()
for line, row in enumerate(csv_reader, 1):
try:
img_file, x1, y1, x2, y2, x3, y3, x4, y4, class_name = row[:10]
if img_file not in result:
result[img_file] = []
# If a row contains only an image path, it's an image without annotations.
if (x1, y1, x2, y2, x3, y3, x4, y4, class_name) == ('', '', '', '', '', '', '', '', ''):
continue
x1 = _parse(x1, int, 'line {}: malformed x1: {{}}'.format(line))
y1 = _parse(y1, int, 'line {}: malformed y1: {{}}'.format(line))
x2 = _parse(x2, int, 'line {}: malformed x2: {{}}'.format(line))
y2 = _parse(y2, int, 'line {}: malformed y2: {{}}'.format(line))
x3 = _parse(x3, int, 'line {}: malformed x3: {{}}'.format(line))
y3 = _parse(y3, int, 'line {}: malformed y3: {{}}'.format(line))
x4 = _parse(x4, int, 'line {}: malformed x4: {{}}'.format(line))
y4 = _parse(y4, int, 'line {}: malformed y4: {{}}'.format(line))
# check if the current class name is correctly present
if detect_text:
if class_name == '###':
continue
else:
class_name = 'text'
if class_name not in classes:
raise ValueError(f'line {line}: unknown class name: \'{class_name}\' (classes: {classes})')
result[img_file].append({'x1': x1, 'y1': y1, 'x2': x2, 'y2': y2,
'x3': x3, 'y3': y3, 'x4': x4, 'y4': y4, 'class': class_name})
except ValueError:
raise_from(ValueError(
f'line {line}: format should be \'img_file,x1,y1,x2,y2,x3,y3,x4,y4,class_name\' or \'img_file,,,,,\''),
None)
return result
def _read_annotations(csv_reader, classes, base_dir):
"""
Read annotations from the csv_reader.
Args:
csv_reader: csv reader of args.annotations_path
classes: list[str] all the class names read from args.classes_path
Returns:
result: dict, dict is like {image_path: [{'x1': x1, 'y1': y1, 'x2': x2, 'y2': y2, 'class': class_name}]}
"""
result = OrderedDict()
for line, row in enumerate(csv_reader, 1):
try:
img_file, x1, y1, x2, y2 = row[:5]
class_name = img_file.split("/")[0]
if img_file not in result:
result[img_file] = []
# If a row contains only an image path, it's an image without annotations.
if (x1, y1, x2, y2, class_name) == ('', '', '', '', ''):
continue
x1 = _parse(x1, int, 'line {}: malformed x1: {{}}'.format(line))
y1 = _parse(y1, int, 'line {}: malformed y1: {{}}'.format(line))
x2 = _parse(x2, int, 'line {}: malformed x2: {{}}'.format(line))
y2 = _parse(y2, int, 'line {}: malformed y2: {{}}'.format(line))
if class_name not in classes:
raise ValueError(f'line {line}: unknown class name: \'{class_name}\' (classes: {classes})')
result[img_file].append({'x1': x1, 'y1': y1, 'x2': x2, 'y2': y2, 'class': class_name,
'filename':img_file})
except ValueError:
raise_from(ValueError(
f'line {line}: format should be \'img_file,x1,y1,x2,y2,class_name\' or \'img_file,,,,,\''),
None)
return result
def _open_for_csv(path):
"""
Open a file with flags suitable for csv.reader.
This is different for python2 it means with mode 'rb', for python3 this means 'r' with "universal newlines".
"""
if sys.version_info[0] < 3:
return open(path, 'rb')
else:
return open(path, 'r', newline='')
def load_image(path):
"""
Load an image at the image_index.
"""
image = cv2.imread(path)
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
return image | [((47, 13, 47, 26), 'collections.OrderedDict', 'OrderedDict', ({}, {}), '()', False, 'from collections import OrderedDict\n'), ((75, 13, 75, 26), 'collections.OrderedDict', 'OrderedDict', ({}, {}), '()', False, 'from collections import OrderedDict\n'), ((126, 13, 126, 26), 'collections.OrderedDict', 'OrderedDict', ({}, {}), '()', False, 'from collections import OrderedDict\n'), ((171, 13, 171, 29), 'cv2.imread', 'cv2.imread', ({(171, 24, 171, 28): 'path'}, {}), '(path)', False, 'import cv2\n'), ((172, 13, 172, 51), 'cv2.cvtColor', 'cv2.cvtColor', ({(172, 26, 172, 31): 'image', (172, 33, 172, 50): 'cv2.COLOR_BGR2RGB'}, {}), '(image, cv2.COLOR_BGR2RGB)', False, 'import cv2\n')] |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.