content
stringlengths 5
1.05M
|
---|
import datetime
import json
import os
import random
from time import sleep
import requests
def main():
location_ids = list(map(int, os.getenv('LOCATION_IDS').split(',')))
with open('global-entry.json') as stream:
locations = {location['id']: location for location in json.load(stream)}
print(f'Searching for appointments at {", ".join([locations[location_id]["shortName"] for location_id in location_ids])}')
while True:
for location_id in location_ids:
appointment = requests.get(f'https://ttp.cbp.dhs.gov/schedulerapi/slots?orderBy=soonest&limit=1&locationId={location_id}&minimum=1').json()
if appointment:
appointment = appointment[0]
print(f'Appointment found: {appointment}')
appointment_datetime = datetime.datetime.strptime(appointment['startTimestamp'], r'%Y-%m-%dT%H:%M')
if os.getenv('IFTTT_WEBHOOK_URL'):
requests.post(os.getenv('IFTTT_WEBHOOK_URL'), json={'value1': locations[location_id]['shortName'], 'value2': appointment_datetime.strftime(r'%A, %B %d, %Y at %H:%M')})
else:
print(f'No appointments found at {locations[location_id]["shortName"]}')
rand = random.randint(30, 90)
print(f'Sleeping for {rand} seconds')
sleep(rand)
if __name__ == "__main__":
main()
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from arch.api import eggroll
from arch.api import federation
from sklearn.utils import resample
from federatedml.util import consts
from federatedml.util.transfer_variable import SampleTransferVariable
from federatedml.util.param_checker import SampleParamChecker
class RandomSampler(object):
"""
Random Sampling Method
Parameters
----------
fraction : None or float, sampling ratio, default: 0.1
random_state: int, RandomState instance or None, optional, default: None
method: str, supported "upsample", "downsample" only in this version, default: "downsample"
"""
def __init__(self, fraction=0.1, random_state=None, method="downsample"):
self.fraction = fraction
self.random_state = random_state
self.method = method
def sample(self, data_inst, sample_ids=None):
"""
Interface to call random sample method
Parameters
----------
data_inst : DTable
The input data
sample_ids : None or list
if None, will sample data from the class instance's parameters,
otherwise, it will be sample transform process, which means use the samples_ids the generate data
Returns
-------
new_data_inst: DTable
the output sample data, sample format with input
sample_ids: list, return only if sample_ids is None
"""
if sample_ids is None:
new_data_inst, sample_ids = self.__sample(data_inst)
return new_data_inst, sample_ids
else:
new_data_inst = self.__sample(data_inst, sample_ids)
return new_data_inst
def __sample(self, data_inst, sample_ids=None):
"""
Random sample method, a line's occur probability is decide by fraction
support down sample and up sample
if use down sample: should give a float ratio between [0, 1]
otherwise: should give a float ratio larger than 1.0
Parameters
----------
data_inst : DTable
The input data
sample_ids : None or list
if None, will sample data from the class instance's parameters,
otherwise, it will be sample transform process, which means use the samples_ids the generate data
Returns
-------
new_data_inst: DTable
the output sample data, sample format with input
sample_ids: list, return only if sample_ids is None
"""
return_sample_ids = False
if self.method == "downsample":
if sample_ids is None:
return_sample_ids = True
idset = [key for key, value in data_inst.mapValues(lambda val: None).collect()]
if self.fraction < 0 or self.fraction > 1:
raise ValueError("sapmle fractions should be a numeric number between 0 and 1inclusive")
sample_num = max(1, int(self.fraction * len(idset)))
sample_ids = resample(idset,
replace=False,
n_samples=sample_num,
random_state=self.random_state)
sample_dtable = eggroll.parallelize(zip(sample_ids, range(len(sample_ids))),
include_key=True,
partition=data_inst._partitions)
new_data_inst = data_inst.join(sample_dtable, lambda v1, v2: v1)
if return_sample_ids:
return new_data_inst, sample_ids
else:
return new_data_inst
elif self.method == "upsample":
data_set = list(data_inst.collect())
idset = [key for (key, value) in data_set]
id_maps = dict(zip(idset, range(len(idset))))
if sample_ids is None:
return_sample_ids = True
if self.fraction <= 0:
raise ValueError("sapmle fractions should be a numeric number large than 0")
sample_num = int(self.fraction * len(idset))
sample_ids = resample(idset,
replace=True,
n_samples=sample_num,
random_state=self.random_state)
new_data = []
for i in range(len(sample_ids)):
index = id_maps[sample_ids[i]]
new_data.append((i, data_set[index][1]))
new_data_inst = eggroll.parallelize(new_data,
include_key=True,
partition=data_inst._partitions)
if return_sample_ids:
return new_data_inst, sample_ids
else:
return new_data_inst
else:
raise ValueError("random sampler not support method {} yet".format(self.method))
class StratifiedSampler(object):
"""
Stratified Sampling Method
Parameters
----------
fractions : None or list of (category, sample ratio) tuple,
sampling ratios of each category, default: None
random_state: int, RandomState instance or None, optional, default: None
method: str, supported "upsample", "downsample" only in this version, default: "downsample"
"""
def __init__(self, fractions=None, random_state=None, method="downsample"):
self.fractions = fractions
self.label_mapping = None
if fractions:
self.label_mapping = [label for (label, frac) in fractions]
self.random_state = random_state
self.method = method
def sample(self, data_inst, sample_ids=None):
"""
Interface to call stratified sample method
Parameters
----------
data_inst : DTable
The input data
sample_ids : None or list
if None, will sample data from the class instance's parameters,
otherwise, it will be sample transform process, which means use the samples_ids the generate data
Returns
-------
new_data_inst: DTable
the output sample data, same format with input
sample_ids: list, return only if sample_ids is None
"""
if sample_ids is None:
new_data_inst, sample_ids = self.__sample(data_inst)
return new_data_inst, sample_ids
else:
new_data_inst = self.__sample(data_inst, sample_ids)
return new_data_inst
def __sample(self, data_inst, sample_ids=None):
"""
Stratified sample method, a line's occur probability is decide by fractions
Input should be DTable, every line should be an instance object with label
To use this method, a list of ratio should be give, and the list length
equals to the number of distinct labels
support down sample and up sample
if use down sample: should give a list of float ratio between [0, 1]
otherwise: should give a list of float ratio larger than 1.0
Parameters
----------
data_inst : DTable
The input data
sample_ids : None or list
if None, will sample data from the class instance's parameters,
otherwise, it will be sample transform process, which means use the samples_ids the generate data
Returns
-------
new_data_inst: DTable
the output sample data, sample format with input
sample_ids: list, return only if sample_ids is None
"""
return_sample_ids = False
if self.method == "downsample":
if sample_ids is None:
idset = [[] for i in range(len(self.fractions))]
for label, fraction in self.fractions:
if fraction < 0 or fraction > 1:
raise ValueError("sapmle fractions should be a numeric number between 0 and 1inclusive")
return_sample_ids = True
for key, inst in data_inst.collect():
label = inst.label
if label not in self.label_mapping:
raise ValueError("label not specify sample rate! check it please")
idset[self.label_mapping[label]].append(key)
sample_ids = []
for i in range(len(idset)):
if idset[i]:
sample_num = max(1, int(self.fractions[i][1] * len(idset[i])))
_sample_ids = resample(idset[i],
replace=False,
n_samples=sample_num,
random_state=self.random_state)
sample_ids.extend(_sample_ids)
sample_dtable = eggroll.parallelize(zip(sample_ids, range(len(sample_ids))),
include_key=True,
partition=data_inst._partitions)
new_data_inst = data_inst.join(sample_dtable, lambda v1, v2: v1)
if return_sample_ids:
return new_data_inst, sample_ids
else:
return new_data_inst
elif self.method == "upsample":
data_set = list(data_inst.collect())
ids = [key for (key, inst) in data_set]
id_maps = dict(zip(ids, range(len(ids))))
return_sample_ids = False
if sample_ids is None:
idset = [[] for i in range(len(self.fractions))]
for label, fraction in self.fractions:
if fraction <= 0:
raise ValueError("sapmle fractions should be a numeric number greater than 0")
for key, inst in data_set:
label = inst.label
if label not in self.label_mapping:
raise ValueError("label not specify sample rate! check it please")
idset[self.label_mapping[label]].append(key)
return_sample_ids = True
sample_ids = []
for i in range(len(idset)):
if idset[i]:
sample_num = max(1, int(self.fractions[i][1] * len(idset[i])))
_sample_ids = resample(idset[i],
replace=True,
n_samples=sample_num,
random_state=self.random_state)
sample_ids.extend(_sample_ids)
new_data = []
for i in range(len(sample_ids)):
index = id_maps[sample_ids[i]]
new_data.append((i, data_set[index][1]))
new_data_inst = eggroll.parallelize(new_data,
include_key=True,
partition=data_inst._partitions)
if return_sample_ids:
return new_data_inst, sample_ids
else:
return new_data_inst
else:
raise ValueError("Stratified sampler not support method {} yet".format(self.method))
class Sampler(object):
"""
Sampling Object
Parameters
----------
sample_param : object, self-define sample parameters,
define in federatedml.param.param
"""
def __init__(self, sample_param):
SampleParamChecker.check_param(sample_param)
if sample_param.mode == "random":
self.sampler = RandomSampler(sample_param.fractions,
sample_param.random_state,
sample_param.method)
elif sample_param.mode == "stratified":
self.sampler = StratifiedSampler(sample_param.fractions,
sample_param.random_state,
sample_param.method)
else:
raise ValueError("{} sampler not support yet".format(sample_param.mde))
self.flowid = None
def sample(self, data_inst, sample_ids=None):
"""
Entry to use sample method
Parameters
----------
data_inst : DTable
The input data
sample_ids : None or list
if None, will sample data from the class instance's parameters,
otherwise, it will be sample transform process, which means use the samples_ids the generate data
Returns
-------
sample_data: DTable
the output sample data, same format with input
"""
ori_schema = data_inst.schema
sample_data = self.sampler.sample(data_inst, sample_ids)
try:
if len(sample_data) == 2:
sample_data[0].schema = ori_schema
except:
sample_data.schema = ori_schema
return sample_data
def set_flowid(self, flowid="samole"):
self.flowid = flowid
def sync_sample_ids(self, sample_ids):
transfer_inst = SampleTransferVariable()
federation.remote(obj=sample_ids,
name=transfer_inst.sample_ids.name,
tag=transfer_inst.generate_transferid(transfer_inst.sample_ids, self.flowid),
role="host")
def recv_sample_ids(self):
transfer_inst = SampleTransferVariable()
sample_ids = federation.get(name=transfer_inst.sample_ids.name,
tag=transfer_inst.generate_transferid(transfer_inst.sample_ids, self.flowid),
idx=0)
return sample_ids
def run(self, data_inst, task_type, task_role):
"""
Sample running entry
Parameters
----------
data_inst : DTable
The input data
task_type : "homo" or "hetero"
if task_type is "homo", it will sample standalone
if task_type is "heterl": then sampling will be done in one side, after that
the side sync the sample ids to another side to generated the same sample result
task_role: "guest" or "host":
only consider this parameter when task_type is "hetero"
if task_role is "guest", it will firstly sample ids, and sync it to "host"
to generate data instances with sample ids
if task_role is "host": it will firstly get the sample ids result of "guest",
then generate sample data by the receiving ids
Returns
-------
sample_data_inst: DTable
the output sample data, same format with input
"""
if task_type not in [consts.HOMO, consts.HETERO]:
raise ValueError("{} task type not support yet".format(task_type))
if task_type == consts.HOMO:
return self.sample(data_inst)[0]
elif task_type == consts.HETERO:
if task_role == consts.GUEST:
sample_data_inst, sample_ids = self.sample(data_inst)
self.sync_sample_ids(sample_ids)
elif task_role == consts.HOST:
sample_ids = self.recv_sample_ids()
sample_data_inst = self.sample(data_inst, sample_ids)
else:
raise ValueError("{} role not support yet".format(task_role))
return sample_data_inst
|
import torch
from zerovl.core.hooks import Hook
from zerovl.utils import ENV, logger, all_gather
from zerovl.utils.collections import AttrDict
from zerovl.tasks.clip.hooks.utils import RetrievalMetric, IndexedEmbInfo
class RetrievalEvalHook(Hook):
def __init__(self, runner):
self.retrieval = RetrievalMetric()
self.collection_keys = ['image_embeddings', 'text_embeddings', 'image_id', 'caption_id']
self.wandb_enable = runner.cfg.wandb.enable
def before_val_epoch(self, runner, epoch_state):
epoch_state.eval = AttrDict()
for key in self.collection_keys:
epoch_state.eval[key] = []
def after_val_step(self, runner, epoch_state, step_state):
for key in self.collection_keys:
epoch_state.eval[key].append(step_state.batch_output[key])
def after_val_epoch(self, runner, epoch_state):
collection_dict = {}
for key in self.collection_keys:
value = torch.cat(epoch_state.eval[key], 0)
value = torch.cat(all_gather(value), 0)
collection_dict[key] = value #.cuda(ENV.device)
valid_index = collection_dict['image_id'] > -1
collection_dict = {k: v[valid_index] for k,v in collection_dict.items()}
collection_dict['dataset_name'] = epoch_state.get('dataset_name')
self.calcaulate_retrieval_metrics_and_log(runner, collection_dict)
@ENV.root_only
def calcaulate_retrieval_metrics_and_log(self, runner, collection_dict):
logger.info('---- Calculating retrieval metrics ... ----')
index = collection_dict['image_id']
image_embedding = collection_dict['image_embeddings']
text_embedding = collection_dict['text_embeddings']
if not runner.cfg.data.cuda_eval:
logger.info('---- Using cpu evaluation ----')
index = index.cpu()
image_embedding = image_embedding.cpu()
text_embedding = text_embedding.cpu()
else:
logger.info('---- Using cuda evaluation ----')
img_emb = IndexedEmbInfo(emb_name='image',group_idx=index,emb_mat=image_embedding).unique()
text_emb = IndexedEmbInfo(emb_name='text',group_idx=index,emb_mat=text_embedding)
logger.info('{} validation: image emb shape: {}, text emb shape: {}'.format(collection_dict['dataset_name'], img_emb.emb_mat.shape, text_emb.emb_mat.shape))
i2t = self.retrieval(img_emb, text_emb)
t2i = self.retrieval(text_emb, img_emb)
i2t.update(t2i)
summary_dict = {}
for k, v in i2t.items():
k = k.replace('[image] to [text]', 'I2T')
k = k.replace('[text] to [image]', 'T2I')
k = k.replace(': ', '-')
summary_dict[k] = v * 100.0
summary_dict['RSUM'] = sum(list(summary_dict.values()))
summary_dict = {'{}_{}'.format(collection_dict['dataset_name'], k): v for k, v in summary_dict.items()}
temperature = runner.model.module.loss.temperature.detach().cpu().numpy()
summary_dict['temperature'] = temperature
logger.emph('-----{} summary -----'.format(collection_dict['dataset_name']))
logger.info(summary_dict)
if self.wandb_enable:
runner.state.wandb_record.val_record = summary_dict
logger.emph('-----{} summary -----'.format(collection_dict['dataset_name']))
class RetrievalLocalEvalHook(RetrievalEvalHook):
def __init__(self, runner):
super(RetrievalLocalEvalHook, self).__init__(runner)
@ENV.root_only
def after_val_epoch(self, runner, epoch_state):
collection_dict = {}
for key in self.collection_keys:
value = torch.cat(epoch_state.eval[key], 0)
collection_dict[key] = value
valid_index = collection_dict['image_id'] > 0
collection_dict = {k: v[valid_index] for k,v in collection_dict.items()}
collection_dict['dataset_name'] = epoch_state.get('dataset_name')
self.calcaulate_retrieval_metrics_and_log(runner, collection_dict)
if __name__ == "__main__":
a = torch.rand((512, 1000)).cuda()
b = torch.rand((512, 1000)).cuda()
c = torch.arange(0, 512).cuda()
retrieval = RetrievalMetric()
img_emb = IndexedEmbInfo(emb_name='image',group_idx=c,emb_mat=a).unique()
text_emb = IndexedEmbInfo(emb_name='text',group_idx=c,emb_mat=b)
i2t = retrieval(img_emb, text_emb)
t2i = retrieval(text_emb, img_emb)
i2t.update(t2i)
summary_dict = {}
for k, v in i2t.items():
k = k.replace('[image] to [text]', 'I2T')
k = k.replace('[text] to [image]', 'T2I')
k = k.replace(': ', '-')
summary_dict[k] = v * 100
summary_dict['RSUM'] = sum(list(summary_dict.values()))
print(summary_dict)
|
import argparse
import cv2 as cv
import os
import pickle
import sys
import time
from operator import itemgetter
import numpy as np
np.set_printoptions(precision=2)
import pandas as pd
import openface
from sklearn.svm import SVC
from sklearn.preprocessing import LabelEncoder
from sklearn.lda import LDA
from sklearn.mixture import GMM
from sklearn.pipeline import Pipeline
fileDir = os.path.dirname(os.path.realpath(__file__))
modelDir = os.path.join(fileDir, 'models')
dlibModelDir = os.path.join(modelDir, 'dlib')
openfaceModelDir = os.path.join(modelDir, 'openface')
networkModel = os.path.join(openfaceModelDir, 'nn4.small2.v1.t7')
imgDim = 96 # TODO make this a global parameter
# Check that we have generated embeddings: labels.csv and reps.csv
def getRep(imgPath, multiple=False, verbose=False):
"""
Detects faces in the given image path. If faces are found, then they are
aligned and passed through the network to get a vector representation.
Returns a list of tuples of type (numpy.darray, int)
"""
start = time.time()
bgrImg = cv.imread(imgPath)
if bgrImg is None:
raise Exception("Unable to load image: {}".format(imgPath))
rgbImg = cv.cvtColor(bgrImg, cv.COLOR_BGR2RGB)
if verbose:
print(" + Original size: {}".format(rgbImg.shape))
if verbose:
print("Loading the image took {} seconds.".format(time.time() - start))
start = time.time()
if multiple:
bbs = align.getAllFaceBoundingBoxes(rgbImg)
else:
bb1 = align.getLargestFaceBoundingBox(rgbImg)
bbs = [bb1]
if len(bbs) == 0 or (not multiple and bb1 is None):
raise Exception("Unable to find a face: {}".format(imgPath))
if verbose:
print("Face detection took {} seconds.".format(time.time() - start))
reps = []
for bb in bbs:
start = time.time()
alignedFace = align.align(
imgDim,
rgbImg,
bb,
landmarkIndices=openface.AlignDlib.OUTER_EYES_AND_NOSE)
if alignedFace is None:
raise Exception("Unable to align image: {}".format(imgPath))
if verbose:
print("Alignment took {} seconds.".format(time.time() - start))
print("This bbox is centered at {}, {}".format(bb.center().x, bb.center().y))
start = time.time()
rep = net.forward(alignedFace)
if verbose:
print("Neural network forward pass took {} seconds.".format(
time.time() - start))
reps.append((bb.center().x, rep))
sreps = sorted(reps, key=lambda x: x[0])
return sreps
def train(workDir,ldaDim=1):
"""
Trains a linear classifier on the recognized faces.
"""
print("Loading embeddings.")
fname = "{}/labels.csv".format(workDir)
labels = pd.read_csv(fname, header=None).as_matrix()[:, 1]
labels = map(itemgetter(1),
map(os.path.split,
map(os.path.dirname, labels))) # Get the directory.
fname = "{}/reps.csv".format(workDir)
embeddings = pd.read_csv(fname, header=None).as_matrix()
le = LabelEncoder().fit(labels)
labelsNum = le.transform(labels)
nClasses = len(le.classes_)
print("Training for {} classes.".format(nClasses))
# Linear SVM classifier
clf = SVC(C=1, kernel='linear', probability=True)
if ldaDim > 0:
clf_final = clf
clf = Pipeline([('lda', LDA(n_components=ldaDim)),
('clf', clf_final)])
clf.fit(embeddings, labelsNum)
# save classifer in package
fName = "{}/classifier.pkl".format(workDir)
print("Saving classifier to '{}'".format(fName))
with open(fName, 'w') as f:
pickle.dump((le, clf), f)
def infer(classifierModel, imgs,multiple=False,verbose=False):
"""
Calls a function to extract and process faces in an image, then predicts with
some confidence what face exists in the image.
@Return: none
"""
with open(classifierModel, 'rb') as f:
if sys.version_info[0] < 3:
(le, clf) = pickle.load(f)
else:
(le, clf) = pickle.load(f, encoding='latin1')
for img in imgs:
print("\n=== {} ===".format(img))
reps = getRep(img, multiple)
if len(reps) > 1:
print("List of faces in image from left to right")
for r in reps:
rep = r[1].reshape(1, -1)
bbx = r[0]
start = time.time()
predictions = clf.predict_proba(rep).ravel()
maxI = np.argmax(predictions)
person = le.inverse_transform(maxI)
confidence = predictions[maxI]
if verbose:
print("Prediction took {} seconds.".format(time.time() - start))
if multiple:
print("Predict {} @ x={} with {:.2f} confidence.".format(person.decode('utf-8'), bbx,
confidence))
else:
print("Predict {} with {:.2f} confidence.".format(person.decode('utf-8'), confidence))
if isinstance(clf, GMM):
dist = np.linalg.norm(rep - clf.means_[maxI])
print(" + Distance from the mean: {}".format(dist))
if __name__ == '__main__':
start = time.time()
dlibFacePredictor = os.path.join(dlibModelDir,"shape_predictor_68_face_landmarks.dat")
align = openface.AlignDlib("models/dlib/shape_predictor_68_face_landmarks.dat")
net = openface.TorchNeuralNet(networkModel, imgDim, cuda=False) # model used for formatting
print("Loading the dlib and OpenFace models took {} seconds.".format(
time.time() - start))
start = time.time()
parser = argparse.ArgumentParser()
parser.add_argument('imgs', type=str, nargs='+',
help="Input image.")
args = parser.parse_args()
classifierModel = "./generated-embeddings/classifier.pkl"
workDir = "./generated-embeddings/"
# train(workDir)
infer(classifierModel,args.imgs, multiple=False, verbose=True) |
#code without list comprehension
myl1 = []
for a in range(4,6):
for b in range(3,5):
myl1.append(a*b)
print(myl1)
#code with list comprehension
lic1 = [a*b for a in range(4,6) for b in range(3,5)]
print(lic1)
|
from trezor.messages.NEMProvisionNamespace import NEMProvisionNamespace
from trezor.messages.NEMTransactionCommon import NEMTransactionCommon
from ..helpers import NEM_TRANSACTION_TYPE_PROVISION_NAMESPACE
from ..writers import (
serialize_tx_common,
write_bytes_with_len,
write_uint32_le,
write_uint64_le,
)
def serialize_provision_namespace(
common: NEMTransactionCommon, namespace: NEMProvisionNamespace, public_key: bytes
) -> bytearray:
tx = serialize_tx_common(
common, public_key, NEM_TRANSACTION_TYPE_PROVISION_NAMESPACE
)
write_bytes_with_len(tx, namespace.sink.encode())
write_uint64_le(tx, namespace.fee)
write_bytes_with_len(tx, namespace.namespace.encode())
if namespace.parent:
write_bytes_with_len(tx, namespace.parent.encode())
else:
write_uint32_le(tx, 0xFFFFFFFF)
return tx
|
#!/usr/bin/env python
# Copyright 2014 Open Connectome Project (http://openconnecto.me)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#graphml_headers.py
# Created by Disa Mhembere on 2015-09-10.
# Email: [email protected]
import argparse
import re
class Container(object):
""" This is a shell for an igraph cotainer object """
def __init__(self, attrs=[]):
self.attrs = attrs
def attribute_names(self,):
return self.attrs
def __repr__(self):
return "{0}".format(self.attrs)
class VertexContainer(Container):
pass
class EdgeContainer(Container):
pass
class GraphContainer(object):
""" This is a shell for an igraph graph object """
def __init__(self, g_attrs={}, v_attrs=VertexContainer(), e_attrs=EdgeContainer()):
self.attrs = g_attrs
self.vs = v_attrs
self.es = e_attrs
def __getitem__(self, var):
return self.attrs.__getitem__(var)
def vcount(self,):
return self.attrs["vcount"]
def ecount(self,):
return self.attrs["ecount"]
def attributes(self,):
return self.attrs.keys()
def __repr__(self,):
return "\nGraph Container:\nGraph: {0}\nVertex: {1}\nEdges: {2}".\
format(self.attrs, self.vs, self.es)
def read_graphml_headers(fn):
f = open(fn, "rb")
g_attrs = {}
e_attrs = []
v_attrs = []
g_key_patt = re.compile("g_\w*")
while True:
line = f.readline().strip()
if line.startswith("<node"):
break # No more metadata
elif line.startswith("<key"):
attr = line.split("\"")[1]
if attr.startswith("v_"):
v_attrs.append(attr[2:])
elif attr.startswith("e_"):
e_attrs.append(attr[2:])
elif line.startswith("<data"): # These are graph attributes
lsplit = line.split(">")
m = re.search(g_key_patt, lsplit[0])
key = m.string[m.span()[0]:m.span()[1]][2:] # skip the `g_`
g_attrs[key] = lsplit[1].split("<")[0]
# Fail on graphs without these attrs
if not g_attrs.has_key("vcount") and not g_attrs.has_key("ecount"):
raise AttributeError("Expected graph attribures vcount & ecount")
return GraphContainer(g_attrs, VertexContainer(v_attrs), EdgeContainer(e_attrs))
def test():
parser = argparse.ArgumentParser(description="Partial read of a graphml graph for attrs")
parser.add_argument("graphfn", action="store", help="The graph filename")
result = parser.parse_args()
g = read_graphml_headers(result.graphfn)
print g
if __name__ == "__main__":
test()
|
import sys
sys.path.append("..")
from driver_risk_utils import argument_utils, general_utils
import speed_estimator
import cv2
args = argument_utils.parse_args()
args.use_gps = False
args.lane_based_speed = True
def test_with_display():
print("Testing on images with display!")
time = 0.0
fps = 30.0
est = speed_estimator.SpeedEstimator(
args,
default_speed=25,
verbose=True,
display_speed_lane=True
)
for im_num in range(2700,2801):
image = cv2.imread("GH_frames/"+str(im_num)+".jpg")
#image = image[10:15,11:15]
H, W, C = image.shape
print(H,W,C)
print(image[0,0,:])
est.update_estimates(image, time)
time += 1.0/fps
assert round(est.get_reading()) == 17 # mps
assert round(general_utils.mps_to_mph(est.get_reading())) == 37 #mph
cv2.destroyAllWindows()
def test_with_no_display():
# test without display
print("Testing no display!")
time = 0.0
fps = 30.0
est = speed_estimator.SpeedEstimator(
args,
default_speed=25,
verbose=True,
display_speed_lane=False
)
for im_num in range(2700,2801):
image = cv2.imread("GH_frames/"+str(im_num)+".jpg")
#image = image[10:15,11:15]
H, W, C = image.shape
print(H,W,C)
print(image[0,0,:])
est.update_estimates(image, time)
time += 1.0/fps
assert round(est.get_reading()) == 17 # mps
assert round(general_utils.mps_to_mph(est.get_reading())) == 37 #mph
def test_720p_video():
print("Testing 720p video!")
time = 0.0
fps = 30.0
est = speed_estimator.SpeedEstimator(
args,
default_speed=25,
verbose=True,
display_speed_lane=False
)
camera = cv2.VideoCapture('/scratch/derek/video_captures/FullFOVandHD/video11a.mp4')
resolution = (1280, 720)
camera.set(cv2.CAP_PROP_FRAME_WIDTH, resolution[0])
camera.set(cv2.CAP_PROP_FRAME_HEIGHT, resolution[1])
assert camera.isOpened(), \
'Cannot capture source'
done = False
while not done:
_, image = camera.read()
if image is None:
print('\nEnd of Video')
done = True
break
#image = image[10:15,11:15]
H, W, C = image.shape
print(H,W,C)
print(image[0,0,:])
est.update_estimates(image, time)
time += 1.0/fps
assert round(est.get_reading()) == 17 # mps
assert round(general_utils.mps_to_mph(est.get_reading())) == 37 #mph
test_with_display()
#test_with_no_display()
#test_720p_video()
|
from .frame import AudioFrame
|
import numpy as np
import numpy.testing as npt
from nitime import utils as ut
import nitime.timeseries as ts
import nitime.analysis as nta
import nose.tools as nt
import decotest
def test_SpectralAnalyzer():
Fs = np.pi
t = np.arange(1024)
x = np.sin(10*t) + np.random.rand(t.shape[-1])
y = np.sin(10*t) + np.random.rand(t.shape[-1])
T = ts.TimeSeries(np.vstack([x,y]),sampling_rate=Fs)
C = nta.SpectralAnalyzer(T)
f,c = C.psd
npt.assert_equal(f.shape,(33,)) #This is the setting for this analyzer
#(window-length of 64)
npt.assert_equal(c.shape,(2,33))
f,c = C.cpsd
npt.assert_equal(f.shape,(33,)) #This is the setting for this analyzer
#(window-length of 64)
npt.assert_equal(c.shape,(2,2,33))
f,c = C.cpsd
npt.assert_equal(f.shape,(33,)) #This is the setting for this analyzer
#(window-length of 64)
npt.assert_equal(c.shape,(2,2,33))
f,c = C.spectrum_fourier
npt.assert_equal(f.shape,(t.shape[0]/2+1,))
npt.assert_equal(c.shape,(2,t.shape[0]/2+1))
f,c = C.spectrum_multi_taper
npt.assert_equal(f.shape,(t.shape[0]/2+1,))
npt.assert_equal(c.shape,(2,t.shape[0]/2+1))
f,c = C.periodogram
npt.assert_equal(f.shape,(t.shape[0]/2+1,))
npt.assert_equal(c.shape,(2,t.shape[0]/2+1))
# Test for data with only one channel
T = ts.TimeSeries(x,sampling_rate=Fs)
C = nta.SpectralAnalyzer(T)
f,c = C.psd
npt.assert_equal(f.shape,(33,)) #Same length for the frequencies
npt.assert_equal(c.shape,(33,)) #1-d spectrum for the single channels
def test_CoherenceAnalyzer():
Fs = np.pi
t = np.arange(10)
x = np.sin(10*t) + np.random.rand(t.shape[-1])
y = np.sin(10*t) + np.random.rand(t.shape[-1])
T = ts.TimeSeries(np.vstack([x,y]),sampling_rate=Fs)
C = nta.CoherenceAnalyzer(T)
npt.assert_equal(C.coherence.shape,(2,2,33)) #Default mlab_method
#Coherence symmetry:
npt.assert_equal(C.coherence[0,1],C.coherence[1,0])
#Phase/delay asymmetry:
npt.assert_equal(C.phase[0,1],-1*C.phase[1,0])
npt.assert_equal(C.delay[0,1][1:],-1*C.delay[1,0][1:]) #The very first one
#is a nan
#Calculation of the spectrum is the same as in the default spectral
#analyzer:
S = nta.SpectralAnalyzer(T)
npt.assert_equal(S.cpsd,(C.frequencies,C.spectrum))
def test_SparseCoherenceAnalyzer():
Fs = np.pi
t = np.arange(256)
x = np.sin(10*t) + np.random.rand(t.shape[-1])
y = np.sin(10*t) + np.random.rand(t.shape[-1])
T = ts.TimeSeries(np.vstack([x,y]),sampling_rate=Fs)
C1 = nta.SparseCoherenceAnalyzer(T,ij=((0,1),(1,0)))
#Coherence symmetry:
npt.assert_equal(np.abs(C1.coherence[0,1]),np.abs(C1.coherence[1,0]))
#Make sure you get the same answers as you would from the standard
#CoherenceAnalyzer:
C2 = nta.CoherenceAnalyzer(T)
yield npt.assert_almost_equal, C2.coherence[0,1],C1.coherence[0,1]
yield npt.assert_almost_equal, C2.coherence[0,1],C1.coherence[0,1]
def test_CorrelationAnalyzer():
Fs = np.pi
t = np.arange(1024)
x = np.sin(10*t) + np.random.rand(t.shape[-1])
y = np.sin(10*t) + np.random.rand(t.shape[-1])
T = ts.TimeSeries(np.vstack([x,y]),sampling_rate=Fs)
C = nta.CorrelationAnalyzer(T)
#Test the symmetry: correlation(x,y)==correlation(y,x)
npt.assert_equal(C.corrcoef[0,1],C.corrcoef[1,0])
#Test the self-sameness: correlation(x,x)==1
npt.assert_equal(C.corrcoef[0,0],1)
npt.assert_equal(C.corrcoef[1,1],1)
#Test the cross-correlation:
#First the symmetry:
npt.assert_array_almost_equal(C.xcorr.data[0,1],C.xcorr.data[1,0])
#Test the normalized cross-correlation
#The cross-correlation should be equal to the correlation at time-lag 0
npt.assert_equal(C.xcorr_norm.data[0,1,C.xcorr_norm.time==0]
,C.corrcoef[0,1])
#And the auto-correlation should be equal to 1 at 0 time-lag:
npt.assert_equal(C.xcorr_norm.data[0,0,C.xcorr_norm.time==0],1)
#Does it depend on having an even number of time-points?
#make another time-series with an odd number of items:
t = np.arange(1023)
x = np.sin(10*t) + np.random.rand(t.shape[-1])
y = np.sin(10*t) + np.random.rand(t.shape[-1])
T = ts.TimeSeries(np.vstack([x,y]),sampling_rate=Fs)
C = nta.CorrelationAnalyzer(T)
npt.assert_equal(C.xcorr_norm.data[0,1,C.xcorr_norm.time==0]
,C.corrcoef[0,1])
def test_EventRelatedAnalyzer():
cycles = 10
l = 1024
unit = 2*np.pi/l
t = np.arange(0,2*np.pi+unit,unit)
signal = np.sin(cycles*t)
events = np.zeros(t.shape)
#Zero crossings:
idx = np.where(np.abs(signal)<0.03)[0]
#An event occurs at the beginning of every cycle:
events[idx[:-2:2]]=1
#and another kind of event at the end of each cycle:
events[idx[1:-1:2]]=2
T_signal = ts.TimeSeries(signal,sampling_rate=1)
T_events = ts.TimeSeries(events,sampling_rate=1)
ETA = nta.EventRelatedAnalyzer(T_signal,T_events,l/(cycles*2)).eta
#This looks good, but doesn't pass unless you consider 3 digits:
npt.assert_almost_equal(ETA.data[0],signal[:ETA.data.shape[-1]],3)
npt.assert_almost_equal(ETA.data[1],-1*signal[:ETA.data.shape[-1]],3)
#Same should be true for the FIR analysis:
FIR = nta.EventRelatedAnalyzer(T_signal,T_events,l/(cycles*2)).FIR
npt.assert_almost_equal(FIR.data[0],signal[:FIR.data.shape[-1]],3)
npt.assert_almost_equal(FIR.data[1],-1*signal[:FIR.data.shape[-1]],3)
#Same should be true for
XCORR = nta.EventRelatedAnalyzer(T_signal,T_events,l/(cycles*2)).xcorr_eta
npt.assert_almost_equal(XCORR.data[0],signal[:XCORR.data.shape[-1]],3)
npt.assert_almost_equal(XCORR.data[1],-1*signal[:XCORR.data.shape[-1]],3)
#More dimensions:
T_signal = ts.TimeSeries(np.vstack([signal,signal]),sampling_rate=1)
T_events = ts.TimeSeries(np.vstack([events,events]),sampling_rate=1)
ETA = nta.EventRelatedAnalyzer(T_signal,T_events,l/(cycles*2)).eta
#The events input and the time-series input have different dimensions:
T_events = ts.TimeSeries(events,sampling_rate=1)
ETA = nta.EventRelatedAnalyzer(T_signal,T_events,l/(cycles*2)).eta
npt.assert_almost_equal(ETA.data[0][0],signal[:ETA.data.shape[-1]],3)
npt.assert_almost_equal(ETA.data[1][1],-1*signal[:ETA.data.shape[-1]],3)
#Input is an Events object, instead of a time-series:
ts1 = ts.TimeSeries(np.arange(100),sampling_rate=1)
ev = ts.Events([10,20,30])
et = nta.EventRelatedAnalyzer(ts1,ev,5)
#The five points comprising the average of the three sequences:
npt.assert_equal(et.eta.data,[20.,21.,22.,23.,24.])
ts2 = ts.TimeSeries(np.arange(200).reshape(2,100),sampling_rate=1)
ev = ts.Events([10,20,30])
et = nta.EventRelatedAnalyzer(ts2,ev,5)
npt.assert_equal(et.eta.data,[[ 20., 21., 22., 23., 24.],
[ 120., 121., 122., 123., 124.]])
#Test that providing the analyzer with an array, instead of an Events or a
#TimeSeries object throws an error:
npt.assert_raises(ValueError,nta.EventRelatedAnalyzer,ts2,events,10)
def test_HilbertAnalyzer():
"""Testing the HilbertAnalyzer (analytic signal)"""
pi = np.pi
Fs = np.pi
t = np.arange(0,2*pi,pi/256)
a0 = np.sin(t)
a1 = np.cos(t)
a2 = np.sin(2*t)
a3 = np.cos(2*t)
T = ts.TimeSeries(data=np.vstack([a0,a1,a2,a3]),
sampling_rate=Fs)
H = nta.HilbertAnalyzer(T)
h_abs = H.amplitude.data
h_angle = H.phase.data
h_real = H.real.data
#The real part should be equal to the original signals:
npt.assert_almost_equal(h_real,T.data)
#The absolute value should be one everywhere, for this input:
npt.assert_almost_equal(h_abs,np.ones(T.data.shape))
#For the 'slow' sine - the phase should go from -pi/2 to pi/2 in the first
#256 bins:
npt.assert_almost_equal(h_angle[0,:256],np.arange(-pi/2,pi/2,pi/256))
#For the 'slow' cosine - the phase should go from 0 to pi in the same
#interval:
npt.assert_almost_equal(h_angle[1,:256],np.arange(0,pi,pi/256))
#The 'fast' sine should make this phase transition in half the time:
npt.assert_almost_equal(h_angle[2,:128],np.arange(-pi/2,pi/2,pi/128))
#Ditto for the 'fast' cosine:
npt.assert_almost_equal(h_angle[3,:128],np.arange(0,pi,pi/128))
def test_FilterAnalyzer():
"""Testing the FilterAnalyzer """
t = np.arange(np.pi/100,10*np.pi,np.pi/100)
fast = np.sin(50*t)+10
slow = np.sin(10*t)-20
fast_mean = np.mean(fast)
slow_mean = np.mean(slow)
fast_ts = ts.TimeSeries(data=fast,sampling_rate=np.pi)
slow_ts = ts.TimeSeries(data=slow,sampling_rate=np.pi)
#Make sure that the DC is preserved
f_slow = nta.FilterAnalyzer(slow_ts,ub=0.6)
f_fast = nta.FilterAnalyzer(fast_ts,lb=0.6)
npt.assert_almost_equal(f_slow.filtered_fourier.data.mean(),slow_mean,
decimal=2)
npt.assert_almost_equal(f_slow.filtered_boxcar.data.mean(),slow_mean,
decimal=2)
npt.assert_almost_equal(f_slow.fir.data.mean(),slow_mean)
npt.assert_almost_equal(f_fast.filtered_fourier.data.mean(),10)
npt.assert_almost_equal(f_fast.filtered_boxcar.data.mean(),10,decimal=2)
npt.assert_almost_equal(f_fast.fir.data.mean(),10)
#Check that things work with a two-channel time-series:
T2 = ts.TimeSeries(np.vstack([fast,slow]),sampling_rate=np.pi)
f_both = nta.FilterAnalyzer(T2,ub=1.0,lb=0.1)
#These are rather basic tests:
npt.assert_equal(f_both.fir.shape,T2.shape)
npt.assert_equal(f_both.iir.shape,T2.shape)
npt.assert_equal(f_both.filtered_boxcar.shape,T2.shape)
npt.assert_equal(f_both.filtered_fourier.shape,T2.shape)
def test_NormalizationAnalyzer():
"""Testing the NormalizationAnalyzer """
t1 = ts.TimeSeries(data=[[99,100,101],[99,100,101]],sampling_interval=1.)
t2 = ts.TimeSeries(data=[[-1,0,1],[-1,0,1]],sampling_interval=1.)
N1 = nta.NormalizationAnalyzer(t1)
npt.assert_almost_equal(N1.percent_change[0],t2[0])
t3 = ts.TimeSeries(data=[[100,102],[1,3]],sampling_interval=1.)
t4 = ts.TimeSeries(data=[[-1,1],[-1,1]],sampling_interval=1.)
N2 = nta.NormalizationAnalyzer(t3)
npt.assert_almost_equal(N2.z_score[0],t4[0])
def test_MorletWaveletAnalyzer():
"""Testing the MorletWaveletAnalyzer """
time_series = ts.TimeSeries(data=np.random.rand(100),sampling_rate=100)
W = nta.MorletWaveletAnalyzer(time_series,freqs=20)
WL = nta.MorletWaveletAnalyzer(time_series,freqs=20,log_morlet=True)
H = nta.HilbertAnalyzer(W.real)
HL = nta.HilbertAnalyzer(WL.real)
npt.assert_almost_equal(np.sin(H.phase.data[10:-10]),np.sin(W.phase.data[10:-10]),decimal=0)
npt.assert_almost_equal(np.sin(HL.phase.data[10:-10]),np.sin(WL.phase.data[10:-10]),decimal=0)
def test_CoherenceMTAnalyzer():
""" Testing the multi-taper coherence analysis. See also comparison in doc/examples/multi_taper_coh.py"""
Fs = np.pi
t = np.arange(100)
x = np.sin(10*t) + np.random.rand(t.shape[-1])
y = np.sin(10*t) + np.random.rand(t.shape[-1])
T = ts.TimeSeries(np.vstack([x,y]),sampling_rate=Fs)
C1 = nta.MTCoherenceAnalyzer(T)
#Coherence symmetry:
npt.assert_equal(C1.coherence[0,1],C1.coherence[1,0])
#Test that it runs through (it will trivially be equal to itself):
npt.assert_equal(C1.confidence_interval,C1.confidence_interval)
#Test that it works with adaptive set to False:
C2 = nta.MTCoherenceAnalyzer(T,adaptive=False)
#Coherence symmetry:
npt.assert_equal(C2.coherence[0,1],C2.coherence[1,0])
#Test that it runs through (it will trivially be equal to itself):
npt.assert_equal(C2.confidence_interval,C2.confidence_interval)
#print 'woot'
def test_SeedCoherenceAnalyzer():
Fs = np.pi
t = np.arange(256)
x = np.sin(10*t) + np.random.rand(t.shape[-1])
y = np.sin(10*t) + np.random.rand(t.shape[-1])
T1 = ts.TimeSeries(np.vstack([x,y]),sampling_rate=Fs)
z = y = np.sin(10*t) + np.random.rand(t.shape[-1])
T2 = ts.TimeSeries(z,sampling_rate=Fs)
C1 = nta.SeedCoherenceAnalyzer(T2, T1)
T3 = ts.TimeSeries(np.vstack([x,y,z]),sampling_rate=Fs)
#Make sure you get the same answers as you would from the standard
#CoherenceAnalyzer:
C2 = nta.CoherenceAnalyzer(T3)
npt.assert_almost_equal(C2.coherency[2,0],C1.coherency[0])
|
# from django.test import Client, client
from projetodj.django_assertions import assert_contains
import pytest
from django.urls import reverse
@pytest.fixture
def resp(client):
resp = client.get(reverse('base:home'))
return resp
def test_status_code(resp):
assert resp.status_code == 200
def test_title(resp):
assert_contains(resp, 'title>projetodj-home</title>')
def test_home_ink(resp):
assert_contains(resp, f'href="{reverse("base:home")}">Projetodj')
|
import importlib.util
import os
from pathlib import Path
from .environment import EnvironmentConf
class PathsConf(EnvironmentConf):
"""
Configure the default paths for a Django project
"""
def get_repo_dir(self):
"""
Return the repository directory.
"""
# Try to guess the base repository directory. We first assume the
# project uses git and look for a parent folder with a .git tree
path = git_folder(self)
if path is not None:
return path
# The final guess is the current directory
return os.getcwd()
def get_settings_file_path(self):
django_settings = os.environ.get("DJANGO_SETTINGS_MODULE", None)
if django_settings:
spec = importlib.util.find_spec(django_settings)
return Path(spec.origin)
def get_config_dir(self):
settings = self.SETTINGS_FILE_PATH
if settings:
return None
if settings.name == "__init__.py":
settings = settings.parent
return settings.parent
def get_base_dir(self):
return get_dir(self)
def get_log_file_path(self):
value = self.env("DJANGO_LOG_FILE_PATH", default=None)
if value is None:
value = self.BASE_DIR / "logfile.log"
return value
def get_django_project_path(self):
name, _, _ = os.environ["DJANGO_SETTINGS_MODULE"].rpartition(".")
return name
#
# Auxiliary functions
#
def git_folder(conf):
paths = []
if conf.SETTINGS_FILE_PATH:
paths.append(conf.SETTINGS_FILE_PATH.parent)
if not conf.__module__.startswith("boogie"):
spec = importlib.util.find_spec(conf.__module__)
paths.append(Path(spec.origin).parent)
paths.append(Path(os.getcwd()))
for path in paths:
for subpath in [path, *path.parents]:
if (subpath / ".git").exists():
return subpath
def get_dir(conf):
spec = importlib.util.find_spec(conf.__module__)
return Path(spec.origin).parent
|
"""
httplib2test_appengine
A set of unit tests for httplib2.py on Google App Engine
"""
__author__ = "Joe Gregorio ([email protected])"
__copyright__ = "Copyright 2011, Joe Gregorio"
import os
import sys
import unittest
# The test resources base uri
base = 'http://bitworking.org/projects/httplib2/test/'
#base = 'http://localhost/projects/httplib2/test/'
cacheDirName = ".cache"
APP_ENGINE_PATH='../../google_appengine'
sys.path.insert(0, APP_ENGINE_PATH)
import dev_appserver
dev_appserver.fix_sys_path()
from google.appengine.ext import testbed
testbed = testbed.Testbed()
testbed.activate()
testbed.init_urlfetch_stub()
import httplib2
class AppEngineHttpTest(unittest.TestCase):
def setUp(self):
if os.path.exists(cacheDirName):
[os.remove(os.path.join(cacheDirName, file)) for file in os.listdir(cacheDirName)]
if sys.version_info < (2, 6):
disable_cert_validation = True
else:
disable_cert_validation = False
def test(self):
h = httplib2.Http()
response, content = h.request("http://bitworking.org")
self.assertEqual(httplib2.SCHEME_TO_CONNECTION['https'],
httplib2.AppEngineHttpsConnection)
print h.connections
self.assertEquals(1, len(h.connections))
self.assertEquals(type(h.connections['http:bitworking.org']),
httplib2.AppEngineHttpConnection)
self.assertEquals(response.status, 200)
self.assertEquals(response['status'], '200')
def test_no_key_or_cert_file(self):
h = httplib2.Http(proxy_info='foo.txt')
try:
response, content = h.request("http://bitworking.org")
self.fail('Should raise exception.')
except httplib2.NotSupportedOnThisPlatform:
pass
if __name__ == '__main__':
unittest.main()
|
# Program 34 : Python program to count Even and Odd numbers in a List
# list of numbers
list1 = [19, 222, 43, 4, 86, 3, 1]
even_count, odd_count = 0, 0
# iterating each number in list
for num in list1:
# checking condition
if num % 2 == 0:
even_count += 1
else:
odd_count += 1
print("Even numbers in the list: ", even_count)
print("Odd numbers in the list: ", odd_count)
|
from mongoengine import Document, StringField, ObjectIdField, ReferenceField
from . import Language
class Author(Document):
_id = ObjectIdField()
name = StringField(required=True)
url_name = StringField(required=True)
language_id = ReferenceField(Language)
def __init__(self, name, language_id, url_name):
self.name = name
self.language_id = language_id
self.url_name = url_name
def __str__(self):
return "Name: " + self.name + " Language: " + self.language_id + " URL Name: " + self.url_name
def set_author(self, name, language_id, url_name):
self.name = name
self.language_id = language_id
self.url_name = url_name
def set_name(self, name):
self.name = name
def set_language_id(self, language_id):
self.language_id = language_id
def set_url_name(self, url_name):
self.url_name = url_name
def get_author(self):
return {
"name": self.name,
"language_id": self.language_id,
"url_name": self.url_name
}
def get_name(self):
return self.name
def get_language_id(self):
return self.language_id
def get_url_name(self):
return self.url_name
|
import urllib.request
import os
import numpy as np
from meld_classifier.paths import BASE_PATH, DEFAULT_HDF5_FILE_ROOT, EXPERIMENT_PATH, MODEL_NAME, MODEL_PATH, MELD_DATA_PATH
import sys
import shutil
import tempfile
# --- download data from figshare ---
def _fetch_url(url, fname):
def dlProgress(count, blockSize, totalSize):
percent = int(count*blockSize*100/totalSize)
sys.stdout.write("\r" + url + "...%d%%" % percent)
sys.stdout.flush()
return urllib.request.urlretrieve(url, fname, reporthook=dlProgress)
def download_test_data():
"""
Download test data from figshare
"""
url = "https://figshare.com/ndownloader/files/33164459?private_link=0f64f446f0fc0f3e917bd715f8b95a21"
test_data_dir = MELD_DATA_PATH
os.makedirs(test_data_dir, exist_ok=True)
print('downloading test data to '+ test_data_dir)
with tempfile.TemporaryDirectory() as tmpdirname:
# download to tmpdir
_fetch_url(url, os.path.join(tmpdirname, "test_data.tar.gz"))
# unpack
shutil.unpack_archive(os.path.join(tmpdirname, "test_data.tar.gz"), test_data_dir)
print(f"\nunpacked data to {test_data_dir}")
return test_data_dir
def download_test_input():
"""
Download test input from figshare
"""
url = "https://figshare.com/ndownloader/files/31618445?private_link=c01b5bb003ad062c8fc2"
#print()
test_data_dir = MELD_DATA_PATH
os.makedirs(test_data_dir, exist_ok=True)
print('downloading test input data to '+ test_data_dir)
with tempfile.TemporaryDirectory() as tmpdirname:
# download to tmpdir
_fetch_url(url, os.path.join(tmpdirname, "input_test.tar.gz"))
# unpack
shutil.unpack_archive(os.path.join(tmpdirname, "input_test.tar.gz"), test_data_dir)
print(f"\nunpacked data to {test_data_dir}")
return test_data_dir
def download_models():
"""
download pretrained ensemble models and return experiment_name and experiment_dir
"""
url = "https://figshare.com/ndownloader/files/31618988?private_link=2ed2d8cddbfdda5f00ae"
with tempfile.TemporaryDirectory() as tmpdirname:
# download to tmpdir
_fetch_url(url, os.path.join(tmpdirname, "models.tar.gz"))
# unpack
shutil.unpack_archive(os.path.join(tmpdirname, "models.tar.gz"), os.path.dirname(EXPERIMENT_PATH))
print(f"\ndownloaded models to {EXPERIMENT_PATH}")
# --- return path to data (and optionally download) ---
def get_test_input(force_download=False):
test_data_dir = os.path.join(MELD_DATA_PATH, "input")
exists_test_patient = os.path.exists(os.path.join(test_data_dir,'MELD_TEST_3T_FCD_0011'))
if exists_test_patient:
if force_download:
print("Overwriting existing test data.")
return download_test_input()
else:
print("Test data exists. Specify --force-download to overwrite.")
return test_data_dir
else:
return download_test_input()
def get_test_data(force_download=False):
test_data_dir = os.path.join(BASE_PATH, "MELD_TEST")
exists_patient = os.path.exists(os.path.join(test_data_dir, DEFAULT_HDF5_FILE_ROOT.format(site_code='TEST', group='patient')))
exists_control = os.path.exists(os.path.join(test_data_dir, DEFAULT_HDF5_FILE_ROOT.format(site_code='TEST', group='control')))
if exists_patient and exists_control:
if force_download:
print("Overwriting existing test data.")
return download_test_data()
else:
print("Test data exists. Specify --force-download to overwrite.")
return test_data_dir
else:
return download_test_data()
def get_model(force_download=False):
experiment_path = {
'default': "ensemble_21-09-15/fold_all",
'reverse': "ensemble_21-09-20/fold_all",
}
# test if exists and do not download then
if not os.path.exists(os.path.join(EXPERIMENT_PATH, MODEL_PATH)):
download_models()
else:
if force_download:
print("Overwriting existing model.")
download_models()
else:
print("Model exists. Specify --force-download to overwrite.")
return MODEL_PATH, MODEL_NAME
|
class Scrub(object):
health = 0
def __init__(self,health):
self.health = health
def printHealth(self):
print(self.health) |
default_app_config = 'izi.apps.voucher.config.VoucherConfig'
|
import sys
import os
import yaml
import glob
import shutil
try:
from conda_build.config import config
except ImportError:
# For older versions of conda-build
from conda_build import config
with open(os.path.join(sys.argv[1], 'meta.yaml')) as f:
name = yaml.load(f)['package']['name']
binary_package_glob = os.path.join(config.bldpkgs_dir, '{0}*.tar.bz2'.format(name))
binary_package = glob.glob(binary_package_glob)[0]
shutil.move(binary_package, '.')
|
from django.shortcuts import render, HttpResponse,get_object_or_404
from django.contrib.auth.mixins import LoginRequiredMixin
from .models import Message
from users.models import User
from django.views.generic import DetailView, ListView , CreateView
class MessageInboxView(ListView,LoginRequiredMixin):
model = Message
template_name = "messaging/message_inbox.html"
context_object_name = "inbox_messages"
ordering = ["created_at"]
paginate_by = 10
def get_queryset(self):
user = self.request.user
messages = Message.objects.filter(reciever=user).order_by("-created_at")
return messages
class MessageSentView(ListView,LoginRequiredMixin):
model = Message
template_name = "messaging/message_sent.html"
context_object_name = "sent_messages"
ordering = ["created_at"]
paginate_by = 10
def get_queryset(self):
user = self.request.user
messages = Message.objects.filter(sender=user).order_by("-created_at")
return messages
class MessageCreateView(LoginRequiredMixin,CreateView):
model = Message
fields = ["reciever", "msg_content"]
template_name = "messaging/message_create.html"
def form_valid(self,form):
form.instance.sender = self.request.user
return super().form_valid(form) |
# ------------------------------------------------------------------------------------
# BaSSL
# Copyright (c) 2021 KakaoBrain. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 [see LICENSE for details]
# ------------------------------------------------------------------------------------
import random
import numpy as np
class InstanceShotSampler:
""" This is for instance at pre-training stage """
def __call__(self, center_sid: int, *args, **kwargs):
return center_sid
class TemporalShotSampler:
""" This is for temporal at pre-training stage """
def __init__(self, neighbor_size: int):
self.N = neighbor_size
def __call__(self, center_sid: int, total_num_shot: int):
""" we randomly sample one shot from neighbor shots within local temporal window
"""
shot_idx = center_sid + np.arange(
-self.N, self.N + 1
) # total number of neighbor shots = 2N+1 (query (1) + neighbors (2*N))
shot_idx = np.clip(
shot_idx, 0, total_num_shot
) # deal with out-of-boundary indices
shot_idx = random.choice(
np.unique(np.delete(shot_idx, np.where(shot_idx == center_sid)))
)
return shot_idx
class SequenceShotSampler:
""" This is for bassl or shotcol at pre-training stage """
def __init__(self, neighbor_size: int, neighbor_interval: int):
self.interval = neighbor_interval
self.window_size = neighbor_size * self.interval # temporal coverage
def __call__(
self, center_sid: int, total_num_shot: int, sparse_method: str = "edge"
):
"""
Args:
center_sid: index of center shot
total_num_shot: last index of shot for given video
sparse_stride: stride to sample sparse ones from dense sequence
for curriculum learning
"""
dense_shot_idx = center_sid + np.arange(
-self.window_size, self.window_size + 1, self.interval
) # total number of shots = 2*neighbor_size+1
if dense_shot_idx[0] < 0:
# if center_sid is near left-side of video, we shift window rightward
# so that the leftmost index is 0
dense_shot_idx -= dense_shot_idx[0]
elif dense_shot_idx[-1] > (total_num_shot - 1):
# if center_sid is near right-side of video, we shift window leftward
# so that the rightmost index is total_num_shot - 1
dense_shot_idx -= dense_shot_idx[-1] - (total_num_shot - 1)
# to deal with videos that have smaller number of shots than window size
dense_shot_idx = np.clip(dense_shot_idx, 0, total_num_shot)
if sparse_method == "edge":
# in this case, we use two edge shots as sparse sequence
sparse_stride = len(dense_shot_idx) - 1
sparse_idx_to_dense = np.arange(0, len(dense_shot_idx), sparse_stride)
elif sparse_method == "edge+center":
# in this case, we use two edge shots + center shot as sparse sequence
sparse_idx_to_dense = np.array(
[0, len(dense_shot_idx) - 1, len(dense_shot_idx) // 2]
)
# sparse_shot_idx = dense_shot_idx[sparse_idx_to_dense]
# shot_idx = [sparse_shot_idx, dense_shot_idx]
shot_idx = [sparse_idx_to_dense, dense_shot_idx]
return shot_idx
class NeighborShotSampler:
""" This is for scene boundary detection (sbd), i.e., fine-tuning stage """
def __init__(self, neighbor_size: int = 8):
self.neighbor_size = neighbor_size
def __call__(self, center_sid: int, total_num_shot: int):
# total number of shots = 2 * neighbor_size + 1
shot_idx = center_sid + np.arange(-self.neighbor_size, self.neighbor_size + 1)
shot_idx = np.clip(shot_idx, 0, total_num_shot) # for out-of-boundary indices
return shot_idx
|
# -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Module for validating configuration information supplied by the user.
"""
import re
import socket
from fabric.context_managers import settings
from fabric.operations import run, sudo
from trinoadmin.util.exception import ConfigurationError
def validate_username(username):
if not isinstance(username, basestring):
raise ConfigurationError('Username must be of type string.')
return username
def validate_port(port):
try:
port_int = int(port)
except TypeError:
raise ConfigurationError('Port must be of type string, but '
'found ' + str(type(port)) + '.')
except ValueError:
raise ConfigurationError('Invalid port number ' + port +
': port must be a number between 1 and 65535')
if not port_int > 0 or not port_int < 65535:
raise ConfigurationError('Invalid port number ' + port +
': port must be a number between 1 and 65535')
return port_int
def validate_host(host):
try:
socket.inet_pton(socket.AF_INET, host)
return host
except TypeError:
raise ConfigurationError('Host must be of type string. Found ' +
str(type(host)) + '.')
except socket.error:
pass
try:
socket.inet_pton(socket.AF_INET6, host)
return host
except socket.error:
pass
if not is_valid_hostname(host):
raise ConfigurationError(repr(host) + ' is not a valid '
'ip address or host name.')
return host
def is_valid_hostname(hostname):
valid_name = '^(([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]*[a-zA-Z0-9])\.)*' \
'([A-Za-z0-9]|[A-Za-z0-9][A-Za-z0-9\-]*[A-Za-z0-9])$'
return re.match(valid_name, hostname)
def validate_can_connect(user, host, port):
with settings(host_string='%s@%s:%d' % (user, host, port), user=user):
return run('exit 0').succeeded
def validate_can_sudo(sudo_user, conn_user, host, port):
with settings(host_string='%s@%s:%d' % (conn_user, host, port),
warn_only=True):
return sudo('exit 0', user=sudo_user).succeeded
|
import pytest
from bddrest import status, response, when
from yhttp import statuses
def test_httpstatus(app, Given):
@app.route()
def get(req):
raise statuses.badrequest()
@app.route('/foo')
def get(req):
return statuses.badrequest()
with Given():
assert status == '400 Bad Request'
assert response.text.startswith('400 Bad Request\r\n')
assert response.headers['content-type'] == 'text/plain; charset=utf-8'
app.settings.debug = False
when()
assert status == '400 Bad Request'
assert response.text == '400 Bad Request'
assert response.headers['content-type'] == 'text/plain; charset=utf-8'
when('/foo')
assert status == 400
def test_unhandledexception(app, Given):
class MyException(Exception):
pass
@app.route()
def get(req):
raise MyException()
with pytest.raises(MyException), Given():
pass
def test_redirect(app, Given):
@app.route()
def get(req):
raise statuses.found('http://example.com')
with Given():
assert status == 302
assert response.headers['location'] == 'http://example.com'
assert response.text == ''
def test_modified(app, Given):
@app.route()
def get(req):
raise statuses.notmodified()
with Given():
assert status == 304
assert response.text == ''
def test_nocontent(app, Given):
@app.route()
def remove(req):
raise statuses.nocontent()
with Given(verb='REMOVE'):
assert status == 204
assert response == ''
|
for c in range(1, 5):
nome: str = str(input("Digite o nome: "))
sexo = str(input("Sexo[M][F]: "))
idade = int(input("Idade: "))
media = 0
idadehomem = 0
idademulher = 0
if sexo.upper() == "M":
if idade < idadehomem:
media += idade
elif idade > idadehomem:
nomehomem = nome
idadehomem = idade
media += idade
elif sexo.upper() == "F":
if idade < 20:
media += idade
elif idade >= 20:
idademulher += 1
media += idade
else:
print("Erro")
print("Á Média de idade do grupo é: {}".format(media / 4))
print("O nome do homem mais velho é {} com a idade {} ".format(nomehomem,idadehomem))
print("A quantidade de mulher com mais de 20 anos é {}".format(idademulher))
|
from asposepdf import Settings
from com.aspose.pdf import Document
from com.aspose.pdf import SvgSaveOptions
class PdfToSvg:
def __init__(self):
dataDir = Settings.dataDir + 'WorkingWithDocumentConversion/PdfToSvg/'
# Open the target document
pdf = Document(dataDir + 'input1.pdf');
# instantiate an object of SvgSaveOptions
save_options = SvgSaveOptions();
# do not compress SVG image to Zip archive
save_options.CompressOutputToZipArchive = False;
# Save the output to XLS format
pdf.save(dataDir + "Output.svg", save_options);
print "Document has been converted successfully"
if __name__ == '__main__':
PdfToSvg() |
SG_PATH = '/work/awilf/Standard-Grid'
import sys
sys.path.append(SG_PATH)
import standard_grid
import pickle
if __name__=="__main__":
hash_in = sys.argv[1]
grid=pickle.load(open(f'.{hash_in}.pkl',"rb"))
csv_path=f"results/{hash_in}/csv_results.csv"
grid.json_interpret("output/results.txt",csv_path)
|
# note that the entire code here is my own
import numpy as np
# Util Functions
def random_action():
""" This function returns a random integer corresponding in range [0,5)"""
moves = np.random.random(size=2)*2 - 1
return moves
def one_hot(moves):
""" This function converts a 1d array to a one-hot encoding"""
return [1 if i == maximum(moves) else 0 for i in range(len(moves))]
def maximum(moves):
""" This function gets the arg max of an array"""
return np.argmax(moves)
def process_output(moves):
""" This function converts a neural network output to range [0,1] and then gives the corresponding move"""
return moves
|
#for mutable then things += extend sthe object and operation wluld be reflected on the previouslly created
#object no new ojec tis created
def SomeListHeck(test_list):
''' here we are going to add new elemnts
and want reflection back '''
test_list+=[2,2,3,3]
def SomeListHeck2(test_list):
'''here we didnt want to '''
test_list=test_list+[34,12,45]
if __name__=="__main__":
#todo est on videos
testlist=[1,2,3,4,4]
print(testlist)
SomeListHeck(testlist)
print(testlist)
SomeListHeck2(testlist)
print(testlist)
test_string="i am a tester"
test_string1=test_string
test_string1+='dsjnd'
print(test_string1)
|
# -*- coding: utf-8 -*-
# @Time : 2018/7/9 20:14
# @Author : QuietWoods
# @FileName: url_config.py
# @Software: PyCharm
# @Email :[email protected]
import requests
url_index = {
'url': 'http://www.pss-system.gov.cn/sipopublicsearch/patentsearch/tableSearch-showTableSearchIndex.shtml',
'headers': {}
}
# 预处理地址,主要的目的是把ip发送给对面
url_pre_execute = {
# 'url': 'http://www.pss-system.gov.cn/sipopublicsearch/patentsearch/preExecuteSearch!preExecuteSearch.do',
'url': 'http://www.pss-system.gov.cn/sipopublicsearch/patentsearch/pageIsUesd-pageUsed.shtml',
'headers': {
"Accept": "application/json, text/javascript, */*; q=0.01",
"Accept-Encoding": "gzip, deflate",
"Accept-Language": "zh-CN,zh;q=0.8",
"Connection": "keep-alive",
"Content-Length": "0",
"Content-Type": "application/x-www-form-urlencoded",
"Origin": "http://www.pss-system.gov.cn",
"Referer": "http://www.pss-system.gov.cn/sipopublicsearch/patentsearch/tableSearch-showTableSearchIndex.shtml",
"X-Requested-With": "XMLHttpRequest"
}
}
# 主查询地址
# 这个地址经常改变
mainSearch = {
'url': 'http://www.pss-system.gov.cn/sipopublicsearch/patentsearch/executeTableSearch0529-executeCommandSearch.shtml',
'headers': {
"Host": "www.pss-system.gov.cn",
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:57.0) Gecko/20100101 Firefox/57.0",
"Accept": "application/json, text/javascript, */*; q=0.01",
"Accept-Language": "zh-CN,zh;q=0.8,zh-TW;q=0.7,zh-HK;q=0.5,en-US;q=0.3,en;q=0.2",
"Accept-Encoding": "gzip, deflate",
"Referer": "http://www.pss-system.gov.cn/sipopublicsearch/patentsearch/tableSearch-showTableSearchIndex.shtml",
"Content-Type": "application/x-www-form-urlencoded; charset=UTF-8",
"X-Requested-With": "XMLHttpRequest",
"Connection": "keep-alive",
"Pragma": "no-cache",
"Cache-Control": "no-cache"
},
'form_data': {
"searchCondition.searchExp": '',
"searchCondition.dbId": "VDB",
"searchCondition.searchType": "Sino_foreign",
"searchCondition.extendInfo['MODE']": "MODE_TABLE",
"searchCondition.extendInfo['STRATEGY']": "STRATEGY_CALCULATE",
"searchCondition.originalLanguage": "",
"searchCondition.targetLanguage": "",
"wee.bizlog.modulelevel": "0200201",
"resultPagination.limit": '12'
}
}
# 查询专利摘要的地址
detailSearch = {
'url': 'http://www.pss-system.gov.cn/sipopublicsearch/patentsearch/viewAbstractInfo0529-viewAbstractInfo.shtml',
'headers': {
"Host": "www.pss-system.gov.cn",
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:57.0) Gecko/20100101 Firefox/57.0",
"Accept": "application/json, text/javascript, */*; q=0.01",
"Accept-Language": "zh-CN,zh;q=0.8,zh-TW;q=0.7,zh-HK;q=0.5,en-US;q=0.3,en;q=0.2",
"Accept-Encoding": "gzip, deflate",
"Referer": "http://www.pss-system.gov.cn/sipopublicsearch/patentsearch/showViewList-jumpToView.shtml",
"Content-Type": "application/x-www-form-urlencoded; charset=UTF-8",
"X-Requested-With": "XMLHttpRequest",
"Connection": "keep-alive",
"Pragma": "no-cache",
"Cache-Control": "no-cache"
},
'form_data': {
'nrdAn': '',
'cid': '',
'sid': '',
'wee.bizlog.modulelevel': '0201101'
}
}
# 专利全文
full_text = {
'url': 'http://www.pss-system.gov.cn/sipopublicsearch/patentsearch/showFullText0529-viewFullText.shtml',
'headers': {
"Host": "www.pss-system.gov.cn",
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:57.0) Gecko/20100101 Firefox/57.0",
"Accept": "application/json, text/javascript, */*; q=0.01",
"Accept-Language": "zh-CN,zh;q=0.8,zh-TW;q=0.7,zh-HK;q=0.5,en-US;q=0.3,en;q=0.2",
"Accept-Encoding": "gzip, deflate",
"Referer": "http://www.pss-system.gov.cn/sipopublicsearch/patentsearch/showViewList-jumpToView.shtml",
"Content-Type": "application/x-www-form-urlencoded; charset=UTF-8",
"X-Requested-With": "XMLHttpRequest",
"Connection": "keep-alive",
"Pragma": "no-cache",
"Cache-Control": "no-cache",
},
'form_data': {
'nrdAn': '',
'cid': '',
'sid': '',
'wee.bizlog.modulelevel': '0201103'
}
}
# 验证码地址
url_captcha = {
'url': 'http://www.pss-system.gov.cn/sipopublicsearch/portal/login-showPic.shtml',
'headers': {}
}
# 登录地址
url_login = {
'url': 'http://www.pss-system.gov.cn/sipopublicsearch/wee/platform/wee_security_check',
'headers': {
"Accept": "application/json, text/javascript, */*; q=0.01",
"Accept-Language": "zh-CN,zh;q=0.8,zh-TW;q=0.7,zh-HK;q=0.5,en-US;q=0.3,en;q=0.2",
"Accept-Encoding": "gzip, deflate",
'Cache-Control': 'max-age=0',
'Connection': 'keep-alive',
"Content-Type": "application/x-www-form-urlencoded; charset=UTF-8",
'Host': 'www.pss-system.gov.cn',
'Origin': 'http://www.pss-system.gov.cn',
'Referer': 'http://www.pss-system.gov.cn/sipopublicsearch/portal/uilogin-forwardLogin.shtml',
'Upgrade-Insecure-Requests': '1',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:57.0) Gecko/20100101 Firefox/57.0',
},
'form_data': {
"j_loginsuccess_url": "",
"j_validation_code": '',
"j_username": '',
"j_password": ''
}
}
# 药类专利原文目录,文本格式
PATENT_TEXT_DIR = "G:\\data\\patent\\tmp\\text"
# PATENT_TEXT_DIR = "patent_text"
# 专利号集
PATENT_NUMBER_SET = "patent_number\\PDF原文集.txt"
if __name__ == '__main__':
resp = requests.get(url_index.get('url'), headers=mainSearch.get('headers'))
coo = resp.cookies
print(coo)
coo.__delitem__('JSESSIONID')
coo.set('JSESSIONID', '8U9nCtA0LoRYs75ado1-eMcbTsLZINYi2r3aILoqbKjmy9DbWY_v!891074563!-395222046',
domain='www.pss-system.gov.cn')
coo.__delitem__('IS_LOGIN')
coo.set('IS_LOGIN', 'true', domain='www.pss-system.gov.cn/sipopublicsearch/patentsearch')
coo.__delitem__("WEE_SID")
coo.set("WEE_SID", '8U9nCtA0LoRYs75ado1-eMcbTsLZINYi2r3aILoqbKjmy9DbWY_v!891074563!-395222046!1522147184692',
domain='www.pss-system.gov.cn/sipopublicsearch/patentsearch')
print(coo)
form_data = detailSearch.get('form_data')
# '''
# 'nrdAn': '',
# 'cid': '',
# 'sid': '',
# 'wee.bizlog.modulelevel': '0201101'
# '''
form_data.__setitem__('nrdAn', 'CN201711283836')
form_data.__setitem__('cid', 'CN201711283836.720180302FM')
form_data.__setitem__('sid', 'CN201711283836.720180302FM')
resp = requests.post(detailSearch.get('url'), headers=detailSearch.get('headers'), cookies=coo, data=form_data)
print(resp.text)
pass
# search_exp_cn = QUERY_LIST[0].search_exp_cn
# form_data = url_search.get('formdata')
# form_data.__setitem__('searchCondition.searchExp', search_exp_cn)
# print(resp.content.decode()) |
#
# Contains State Variables
#
# FT0300 Invalid Statements
# Invalid data / null / max / min defines
import threading
INVALID_DATA_8 = 0x7a # Invalid value (corresponding to 8bit value)
INVALID_DATA_16 = 0x7ffa # Invalid value (corresponding to 16bit value)
INVALID_DATA_32 = 0x7ffffffa # Invalid value (corresponding to 32bit value)
NULL_DATA_8 = 0x7b # Indicates that the field does not exist
NULL_DATA_16 = 0x7ffb
NULL_DATA_32 = 0x7ffffffb
LOW_DATA_8 = 0x7c # Means less than the minimum value that can be expressed
LOW_DATA_16 = 0x7ffc
LOW_DATA_32 = 0x7ffffffc
HIGH_DATA_8 = 0x7d # Means greater than the maximum value that can be expressed
HIGH_DATA_16 = 0x7ffd
HIGH_DATA_32 = 0x7ffffffd
# 0x7e, 0x7f skip
# ===============================================================================
# Maximum and minimum
# ===============================================================================
TEMP_MIN_F = 0 # -40.0F, offset 40.0F
TEMP_MAX_F = 1800 # 140.0F, offset 40.0F
HUMI_MIN = 10 # 10%
HUMI_MAX = 99 # 99%
WIND_MAX = 500 # 50.0m/s
RAIN_MAX = 99999 # 9999.9mm
# WeatherSTEM info
WeatherSTEMHash = ""
# Weather Variable Sensor Reads
######################
# Weather State Variables
######################
# JSON state record
StateJSON = ""
# Weather Variable Sensor Reads
lastMainReading = "Never"
lastIndoorReading = "Never"
previousMainReading = "Never"
previousIndoorReading = "Never"
mainID = ""
insideID = ""
# Weather Variables
OutdoorTemperature = 0.0
OutdoorHumidity = 0.0
IndoorTemperature = 0.0
IndoorHumidity = 0.0
Rain60Minutes = 0.0
SunlightVisible = 0.0
SunlightUVIndex = 0.0
WindSpeed = 0
WindGust = 0
WindDirection = 0.2
TotalRain = 0
BarometricTemperature = 0
BarometricPressure = 0
Altitude = 0
BarometricPressureSeaLevel = 0
BarometricTemperature = 0
barometricTrend = True
pastBarometricReading = 0
AQI = 0.0
Hour24_AQI = 0.0
# WeatherSense AQI Values
WS_AQI = 0.0
WS_Hour24_AQI = 0.0
BatteryOK = "OK"
CPUTemperature = 0.0
# Indoor Temperature Sensor Array
IndoorTH = []
# status Values
Last_Event = "My Last Event"
# Button Variables
runRainbow = False
flashStrip = False
runOLED = True
# status Values
Last_Event = "My Last Event"
# Solar Values
batteryVoltage = 0
batteryCurrent = 0
solarVoltage = 0
solarCurrent = 0
loadVoltage = 0
loadCurrent = 0
batteryPower = 0
solarPower = 0
loadPower = 0
batteryCharge = 0
SolarMAXLastReceived = "Never"
SolarMaxInsideTemperature = 0.0
SolarMaxInsideHumidity = 0.0
# Fan State
fanState = False
def printState():
print("-------------")
print("Current State")
print("-------------")
print("-------------")
print("latest MainSensor Reading=", lastMainReading)
print("MainDeviceNumber=", mainID)
print("OutdoorTemperature = ", OutdoorTemperature)
print("OutdoorHumidity = ", OutdoorHumidity)
print("latest Indoor Sensor Reading=", lastIndoorReading)
print("IndoorDeviceNumber=", insideID)
print("IndoorTemperature = ", IndoorTemperature)
print("IndoorHumidity = ", IndoorHumidity)
print("Rain60Minutes = ", Rain60Minutes)
print("SunlightVisible = ", SunlightVisible)
print("SunlightUVIndex = ", SunlightUVIndex)
print("WindSpeed = ", WindSpeed)
print("WindGust = ", WindGust)
print("WindDirection = ", WindDirection)
print("TotalRain = ", TotalRain)
print("BarometricTemperature = ", BarometricTemperature)
print("BarometricPressure = ", BarometricPressure)
print("Altitude = ", Altitude)
print("BarometricPressureSeaLevel = ", BarometricPressureSeaLevel)
print("BarometricTemperature = ", BarometricTemperature)
print("barometricTrend =", barometricTrend)
print("pastBarometricReading = ", pastBarometricReading)
print("AQI = ", AQI)
print("Hour24_AQI = ", Hour24_AQI)
print("WS_AQI = ", WS_AQI)
print("WS_Hour24_AQI = ", WS_Hour24_AQI)
print("Main Battery Status = ", BatteryOK)
print("CPU Temperature = ", CPUTemperature)
print("-------------")
print("runRainbow = ", runRainbow)
print("flashStrip = ", flashStrip)
print("runOLED =", runOLED)
print("-------------")
print("Last_Event = ", Last_Event)
print("-------------")
print("batteryVoltage", batteryVoltage)
print("batteryCurrent", batteryCurrent)
print("solarVoltage", solarVoltage)
print("solarCurrent", solarCurrent)
print("loadVoltage", loadVoltage)
print("loadCurrent", loadCurrent)
print("batteryPower", batteryPower)
print("solarPower", solarPower)
print("loadPower", loadPower)
print("batteryCharge", batteryCharge)
print("SolarMAX Inside Temperature", SolarMaxInsideTemperature)
print("SolarMAX Inside Humidity", SolarMaxInsideHumidity)
print("SolarMAX Last Received", SolarMAXLastReceived)
print("-------------")
print("-------------")
print("-------------")
print("fanState = ", fanState)
print("-------------")
buildJSONSemaphore = threading.Semaphore()
mqtt_client = None
|
import argparse
from pathlib import Path
import sys
import time
from googleapiclient.discovery import build
from googleapiclient.errors import HttpError
import srt
class Translator(object):
"""
Source: https://github.com/agermanidis/autosub
Class for translating a sentence from a one language to another.
"""
def __init__(self, language, api_key, src, dst):
self.language = language
self.api_key = api_key
self.service = build('translate', 'v2', developerKey=self.api_key)
self.src = src
self.dst = dst
def __call__(self, sentencesList):
for attempt in range(10):
try:
if not sentencesList or sentencesList.empty():
return None
result = self.service.translations().list(
source=self.src,
target=self.dst,
format='text',
q=[sentence]
).execute()
if 'translations' in result and result['translations'] and \
'translatedText' in result['translations'][0]:
return result['translations'][0]['translatedText']
return None
except HttpError:
print("Retry translation in 5 seconds...")
time.sleep(5)
continue
def main():
parser = argparse.ArgumentParser()
parser.add_argument('-i', '--input', required=True)
parser.add_argument('-o', '--output', required=True)
parser.add_argument('--src-lang', required=True)
parser.add_argument('--dest-lang', required=True)
parser.add_argument('--api-key', required=True)
args = parser.parse_args()
try:
# Open and read the input file
inputPath = Path(args.input)
with open(inputPath) as file:
contents = file.read()
subs = list(srt.parse(contents))
# Create the translator instance
# Start citation
# Source: https://github.com/agermanidis/autosub
src_language = args.src_lang
dst_language = args.dest_lang
google_translate_api_key = args.api_key
translator = Translator(dst_language, google_translate_api_key,
dst=dst_language,
src=src_language)
print("Translating from {0} to {1}: ".format(src_language, dst_language))
# End citation
for sub in subs:
print(sub.content)
translatedString = translator(sub.content)
sub.content = translatedString
print(sub.content)
print('---')
# Write subtitle to output file
f = open(args.output, "w")
f.write(srt.compose(subs))
f.close()
except KeyboardInterrupt:
return 1
return 0
if __name__ == '__main__':
sys.exit(main())
|
# !/usr/bin/env python
# -*- coding: utf-8 -*-
"""Dataverse data-types data model."""
from __future__ import absolute_import
from pyDataverse.utils import dict_to_json
from pyDataverse.utils import read_file_json
from pyDataverse.utils import write_file_json
"""
Data-structure to work with data and metadata of Dataverses, Datasets and
Datafiles - coming from different sources.
"""
class Dataverse(object):
"""Base class for Dataverse data model."""
"""Attributes required for Dataverse metadata json."""
__attr_required_metadata = [
'alias',
'name',
'dataverseContacts'
]
"""Attributes valid for Dataverse metadata json."""
__attr_valid_metadata = [
'alias',
'name',
'affiliation',
'description',
'dataverseContacts',
'dataverseType'
]
"""Attributes valid for Dataverse class."""
__attr_valid_class = [
# 'datasets',
# 'dataverses',
'pid'
] + __attr_valid_metadata
def __init__(self):
"""Init a Dataverse() class.
Examples
-------
Create a Dataverse::
>>> from pyDataverse.models import Dataverse
>>> dv = Dataverse()
"""
"""Misc"""
self.datasets = []
self.dataverses = []
self.pid = None
"""Metadata"""
self.name = None
self.alias = None
self.dataverseContacts = []
self.affiliation = None
self.description = None
self.dataverseType = None
def __str__(self):
"""Return name of Dataverse() class for users."""
return 'pyDataverse Dataverse() model class.'
def set(self, data):
"""Set class attributes with a flat dict.
Parameters
----------
data : dict
Flat dict with data. Key's must be name the same as the class
attribute, the data should be mapped to.
Examples
-------
Set Dataverse attributes via flat dict::
>>> from pyDataverse.models import Dataverse
>>> dv = Dataverse()
>>> data = {
>>> 'dataverseContacts': [{'contactEmail': '[email protected]'}],
>>> 'name': 'Test pyDataverse',
>>> 'alias': 'test-pyDataverse'
>>> }
>>> dv.set(data)
>>> dv.name
'Test pyDataverse'
"""
for key, val in data.items():
if key in self.__attr_valid_class:
self.__setattr__(key, val)
else:
# TODO: Raise Exception
print('Key {0} not valid.'.format(key))
def import_metadata(self, filename, format='dv_up'):
"""Import Dataverse metadata from file.
This simply parses in data with valid attribute naming as keys.
Data must not be complete, and also attributes required for the
metadata json export can be missing.
Parameters
----------
filename : string
Filename with full path.
format : string
Data format of input. Available formats are: `dv_up` for Dataverse
Api upload compatible format.
Examples
-------
Import metadata coming from json file::
>>> from pyDataverse.models import Dataverse
>>> dv = Dataverse()
>>> dv.import_metadata('tests/data/dataverse_min.json')
>>> dv.name
'Test pyDataverse'
"""
data = {}
if format == 'dv_up':
metadata = read_file_json(filename)
# get first level metadata and parse it automatically
for attr in self.__attr_valid_metadata:
if attr in metadata:
data[attr] = metadata[attr]
self.set(data)
elif format == 'dv_down':
metadata = read_file_json(filename)
self.set(data)
else:
# TODO: Exception
print('Data-format not right.')
def is_valid(self):
"""Check if set attributes are valid for Dataverse api metadata creation.
The attributes required are listed in `__attr_required_metadata`.
Returns
-------
bool
True, if creation of metadata json is possible. False, if not.
Examples
-------
Check if metadata is valid for Dataverse api upload::
>>> from pyDataverse.models import Dataverse
>>> dv = Dataverse()
>>> data = {
>>> 'dataverseContacts': [{'contactEmail': '[email protected]'}],
>>> 'name': 'Test pyDataverse',
>>> 'alias': 'test-pyDataverse'
>>> }
>>> dv.set(data)
>>> dv.is_valid
True
>>> dv.name = None
>>> dv.is_valid
False
"""
is_valid = True
for attr in self.__attr_required_metadata:
if not self.__getattribute__(attr):
is_valid = False
print('attribute \'{0}\' missing.'.format(attr))
return is_valid
def dict(self, format='dv_up'):
"""Create dicts in different data formats.
`dv_up`: Checks if data is valid for the different dict formats.
Parameters
----------
format : string
Data format for dict creation. Available formats are: `dv_up` with
all metadata for Dataverse api upload, and `all` with all attributes
set.
Returns
-------
dict
Data as dict.
Examples
-------
Get dict of Dataverse metadata::
>>> from pyDataverse.models import Dataverse
>>> dv = Dataverse()
>>> data = {
>>> 'dataverseContacts': [{'contactEmail': '[email protected]'}],
>>> 'name': 'Test pyDataverse',
>>> 'alias': 'test-pyDataverse'
>>> }
>>> dv.set(data)
>>> data = dv.dict()
>>> data['name']
'Test pyDataverse'
Todo
-------
Validate standards.
"""
data = {}
if format == 'dv_up':
if self.is_valid():
for attr in self.__attr_valid_metadata:
if self.__getattribute__(attr) is not None:
data[attr] = self.__getattribute__(attr)
# TODO: prüfen, ob required attributes gesetzt sind = Exception
return data
else:
print('dict can not be created. Data is not valid for format')
return None
elif format == 'all':
for attr in self.__attr_valid_class:
if self.__getattribute__(attr) is not None:
data[attr] = self.__getattribute__(attr)
return data
else:
# TODO: Exception
print('Format not right for dict.')
return None
def json(self, format='dv_up'):
r"""Create json from attributes.
Parameters
----------
format : string
Data format of input. Available formats are: `dv_up` for Dataverse
Api upload compatible format and `all` with all attributes named in
`__attr_valid_class`.
Returns
-------
string
json-formatted string of Dataverse metadata for api upload.
Examples
-------
Get dict of Dataverse metadata::
>>> from pyDataverse.models import Dataverse
>>> dv = Dataverse()
>>> data = {
>>> 'dataverseContacts': [{'contactEmail': '[email protected]'}],
>>> 'name': 'Test pyDataverse',
>>> 'alias': 'test-pyDataverse'
>>> }
>>> dv.set(data)
>>> data = dv.json()
>>> data
'{\n "name": "Test pyDataverse",\n "dataverseContacts": [\n {\n "contactEmail": "[email protected]"\n }\n ],\n "alias": "test-pyDataverse"\n}'
Todo
-------
Validate standards.
"""
if format == 'dv_up':
data = self.dict('dv_up')
if data:
return dict_to_json(data)
else:
return None
elif format == 'all':
data = self.dict('all')
if data:
return dict_to_json(data)
else:
return None
else:
# TODO Exception
print('data format not valid.')
def export_metadata(self, filename, format='dv_up'):
"""Export Dataverse metadata to Dataverse api upload json.
Parameters
----------
filename : string
Filename with full path.
format : string
Data format for export. Available format is: `dv_up` with all
metadata for Dataverse api upload.
Examples
-------
Export Dataverse metadata::
>>> from pyDataverse.models import Dataverse
>>> dv = Dataverse()
>>> data = {
>>> 'dataverseContacts': [{'contactEmail': '[email protected]'}],
>>> 'name': 'Test pyDataverse',
>>> 'alias': 'test-pyDataverse'
>>> }
>>> dv.set(data)
>>> dv.export_metadata('tests/data/dataverse_export.json')
"""
if format == 'dv_up':
return write_file_json(filename, self.dict())
else:
# TODO: Exception
print('Data-format not right.')
class Dataset(object):
"""Base class for the Dataset data model."""
"""Attributes required for Dataset metadata json."""
__attr_required_metadata = [
'title',
'author',
'datasetContact',
'dsDescription',
'subject'
]
"""
Dataset metadata attributes of Dataverse api upload inside
[\'datasetVersion\'].
"""
__attr_valid_metadata_datasetVersion = [
'license',
'termsOfUse',
'termsOfAccess'
]
"""
Dataset metadata attributes of Dataverse api upload inside
[\'datasetVersion\'][\'metadataBlocks\'][\'citation\'].
"""
__attr_valid_metadata_citation_dicts = [
'title',
'subtitle',
'alternativeTitle',
'alternativeURL',
'subject',
'notesText',
'productionDate',
'productionPlace',
'distributionDate',
'depositor',
'dateOfDeposit',
'kindOfData',
'seriesName',
'seriesInformation',
'relatedMaterial',
'relatedDatasets',
'otherReferences',
'dataSources',
'originOfSources',
'characteristicOfSources',
'accessToSources',
'kindOfData'
]
"""
Dataset metadata attributes of Dataverse api upload inside
[\'datasetVersion\'][\'metadataBlocks\'][\'citation\'][\'fields\'].
"""
__attr_valid_metadata_citation_arrays = {
'otherId': ['otherIdAgency', 'otherIdValue'],
'author': ['authorName', 'authorAffiliation', 'authorIdentifierScheme',
'authorIdentifier'],
'datasetContact': ['datasetContactName', 'datasetContactAffiliation',
'datasetContactEmail'],
'dsDescription': ['dsDescriptionValue', 'dsDescriptionDate'],
'keyword': ['keywordValue', 'keywordVocabulary',
'keywordVocabularyURI'],
'producer': ['producerName', 'producerAffiliation',
'producerAbbreviation', 'producerURL', 'producerLogoURL'],
'contributor': ['contributorType', 'contributorName'],
'grantNumber': ['grantNumberAgency', 'grantNumberValue'],
'topicClassification': ['topicClassValue', 'topicClassVocab'],
'publication': ['publicationCitation', 'publicationIDType',
'publicationIDNumber', 'publicationURL'],
'distributor': ['distributorName', 'distributorAffiliation',
'distributorAbbreviation', 'distributorURL',
'distributorLogoURL'],
'timePeriodCovered': ['timePeriodCoveredStart',
'timePeriodCoveredEnd'],
'dateOfCollection': ['dateOfCollectionStart', 'dateOfCollectionEnd'],
'software': ['softwareName', 'softwareVersion']
}
"""
Dataset metadata attributes of Dataverse api upload inside
[\'datasetVersion\'][\'metadataBlocks\'][\'geospatial\'].
"""
__attr_valid_metadata_geospatial_dicts = [
'geographicUnit'
]
"""
Dataset metadata attributes of Dataverse api upload inside
[\'datasetVersion\'][\'metadataBlocks\'][\'geospatial\'][\'fields\'].
"""
__attr_valid_metadata_geospatial_arrays = {
'geographicCoverage': ['country', 'state', 'city',
'otherGeographicCoverage'],
'geographicBoundingBox': ['westLongitude', 'eastLongitude',
'northLongitude', 'southLongitude']
}
"""
Dataset metadata attributes of Dataverse api upload inside
[\'datasetVersion\'][\'metadataBlocks\'][\'socialscience\'].
"""
__attr_valid_metadata_socialscience_dicts = [
'unitOfAnalysis',
'universe',
'timeMethod',
'dataCollector',
'collectorTraining',
'frequencyOfDataCollection',
'samplingProcedure',
'deviationsFromSampleDesign',
'collectionMode',
'researchInstrument',
'dataCollectionSituation',
'actionsToMinimizeLoss',
'controlOperations',
'weighting',
'cleaningOperations',
'datasetLevelErrorNotes',
'responseRate',
'samplingErrorEstimates',
'otherDataAppraisal',
]
"""
Dataset metadata attributes of Dataverse api upload inside
[\'datasetVersion\'][\'metadataBlocks\'][\'journal\'].
"""
__attr_valid_metadata_journal_dicts = [
'journalArticleType'
]
"""
Dataset metadata attributes of Dataverse api upload inside
[\'datasetVersion\'][\'metadataBlocks\'][\'journal\'][\'fields\'].
"""
__attr_valid_metadata_journal_arrays = {
'journalVolumeIssue': ['journalVolume', 'journalIssue',
'journalPubDate']
}
"""Attributes valid for Dataset class."""
__attr_valid_class = [
'datafiles'
] + __attr_valid_metadata_datasetVersion \
+ __attr_valid_metadata_citation_dicts \
+ list(__attr_valid_metadata_citation_arrays.keys()) \
+ __attr_valid_metadata_geospatial_dicts \
+ list(__attr_valid_metadata_geospatial_arrays.keys()) \
+ __attr_valid_metadata_socialscience_dicts \
+ __attr_valid_metadata_journal_dicts \
+ list(__attr_valid_metadata_journal_arrays.keys()) \
def __init__(self):
"""Init a Dataset() class.
Examples
-------
Create a Dataverse::
>>> from pyDataverse.models import Dataset
>>> ds = Dataset()
"""
"""Misc"""
self.datafiles = []
"""Metadata: dataset"""
self.license = None
self.termsOfUse = None
self.termsOfAccess = None
"""Metadata: citation"""
self.citation_displayName = None
self.title = None
self.subtitle = None
self.alternativeTitle = None
self.alternativeURL = None
self.otherId = []
self.author = []
self.datasetContact = []
self.dsDescription = []
self.subject = []
self.keyword = []
self.topicClassification = []
self.publication = []
self.notesText = None
self.producer = []
self.productionDate = None
self.productionPlace = None
self.contributor = []
self.grantNumber = []
self.distributor = []
self.distributionDate = None
self.depositor = None
self.dateOfDeposit = None
self.timePeriodCovered = []
self.dateOfCollection = []
self.kindOfData = []
self.seriesName = None
self.seriesInformation = None
self.software = []
self.relatedMaterial = []
self.relatedDatasets = []
self.otherReferences = []
self.dataSources = []
self.originOfSources = None
self.characteristicOfSources = None
self.accessToSources = None
"""Metadata: geospatial"""
self.geospatial_displayName = None
self.geographicCoverage = []
self.geographicUnit = None
self.geographicBoundingBox = []
"""Metadata: socialscience"""
self.socialscience_displayName = None
self.unitOfAnalysis = []
self.universe = []
self.timeMethod = None
self.dataCollector = None
self.collectorTraining = None
self.frequencyOfDataCollection = None
self.samplingProcedure = None
self.targetSampleActualSize = None
self.targetSampleSizeFormula = None
self.socialScienceNotesType = None
self.socialScienceNotesSubject = None
self.socialScienceNotesText = None
self.deviationsFromSampleDesign = None
self.collectionMode = None
self.researchInstrument = None
self.dataCollectionSituation = None
self.actionsToMinimizeLoss = None
self.controlOperations = None
self.weighting = None
self.cleaningOperations = None
self.datasetLevelErrorNotes = None
self.responseRate = None
self.samplingErrorEstimates = None
self.otherDataAppraisal = None
"""Metadata: journal"""
self.journal_displayName = None
self.journalVolumeIssue = []
self.journalArticleType = None
def __str__(self):
"""Return name of Dataset() class for users."""
return 'pyDataverse Dataset() model class.'
def set(self, data):
"""Set class attributes with a flat dict as input.
Parameters
----------
data : dict
Flat dict with data. Key's must be name the same as the class
attribute, the data should be mapped to.
Examples
-------
Set Dataverse attributes via flat dict::
>>> from pyDataverse.models import Dataset
>>> ds = Dataset()
>>> data = {
>>> 'title': 'pyDataverse study 2019',
>>> 'dsDescription': 'New study about pyDataverse usage in 2019'
>>> }
>>> ds.set(data)
>>> ds.title
'pyDataverse study 2019'
"""
for key, val in data.items():
if key in self.__attr_valid_class or key == 'citation_displayName' or key == 'geospatial_displayName' or key == 'socialscience_displayName' or key == 'journal_displayName' or key == 'targetSampleActualSize' or key == 'targetSampleSizeFormula' or key == 'socialScienceNotesType' or key == 'socialScienceNotesText' or key == 'socialScienceNotesSubject':
self.__setattr__(key, val)
else:
# TODO: Raise Exception
print('Key {0} not valid.'.format(key))
def import_metadata(self, filename, format='dv_up'):
"""Import Dataset metadata from file.
Parameters
----------
filename : string
Filename with full path.
format : string
Data format of input. Available formats are: `dv_up` for Dataverse
api upload compatible format.
Examples
-------
Set Dataverse attributes via flat dict::
>>> from pyDataverse.models import Dataset
>>> ds = Dataset()
>>> ds.import_metadata('tests/data/dataset_full.json')
>>> ds.title
'Replication Data for: Title'
"""
data = {}
if format == 'dv_up':
metadata = read_file_json(filename)
"""dataset"""
# get first level metadata and parse it automatically
for key, val in metadata['datasetVersion'].items():
if key in self.__attr_valid_metadata_datasetVersion:
data[key] = val
# get nested metadata and parse it manually
if 'dataverseContacts' in metadata:
data['contactEmail'] = []
for contact in metadata['dataverseContacts']:
for key, val in contact.items():
if key == 'contactEmail':
data['contactEmail'].append(val)
"""citation"""
if 'citation' in metadata['datasetVersion']['metadataBlocks']:
citation = metadata['datasetVersion']['metadataBlocks']['citation']
if 'displayName' in citation:
data['citation_displayName'] = citation['displayName']
for field in citation['fields']:
if field['typeName'] in self.__attr_valid_metadata_citation_dicts:
data[field['typeName']] = field['value']
if field['typeName'] in self.__attr_valid_metadata_citation_arrays:
data[field['typeName']] = self.__parse_dicts(
field['value'],
self.__attr_valid_metadata_citation_arrays[field['typeName']])
if field['typeName'] == 'series':
if 'seriesName' in field['value']:
data['seriesName'] = field['value']['seriesName']['value']
if 'seriesInformation' in field['value']:
data['seriesInformation'] = field['value']['seriesInformation']['value']
else:
# TODO: Exception
print('citation not in json')
"""geospatial"""
if 'geospatial' in metadata['datasetVersion']['metadataBlocks']:
geospatial = metadata['datasetVersion']['metadataBlocks']['geospatial']
if 'displayName' in geospatial:
self.__setattr__('geospatial_displayName',
geospatial['displayName'])
for field in geospatial['fields']:
if field['typeName'] in self.__attr_valid_metadata_geospatial_dicts:
data[field['typeName']] = field['value']
if field['typeName'] in self.__attr_valid_metadata_geospatial_arrays:
data[field['typeName']] = self.__parse_dicts(
field['value'],
self.__attr_valid_metadata_geospatial_arrays[field['typeName']])
else:
# TODO: Exception
print('geospatial not in json')
"""socialscience"""
if 'socialscience' in metadata['datasetVersion']['metadataBlocks']:
socialscience = metadata['datasetVersion']['metadataBlocks']['socialscience']
if 'displayName' in socialscience:
self.__setattr__('socialscience_displayName',
socialscience['displayName'])
for field in socialscience['fields']:
if field['typeName'] in self.__attr_valid_metadata_socialscience_dicts:
data[field['typeName']] = field['value']
if field['typeName'] == 'targetSampleSize':
if 'targetSampleActualSize' in field['value']:
data['targetSampleActualSize'] = field['value']['targetSampleActualSize']['value']
if 'targetSampleSizeFormula' in field['value']:
data['targetSampleSizeFormula'] = field['value']['targetSampleSizeFormula']['value']
if field['typeName'] == 'socialScienceNotes':
if 'socialScienceNotesType' in field['value']:
data['socialScienceNotesType'] = field['value']['socialScienceNotesType']['value']
if 'socialScienceNotesSubject' in field['value']:
data['socialScienceNotesSubject'] = field['value']['socialScienceNotesSubject']['value']
if 'socialScienceNotesText' in field['value']:
data['socialScienceNotesText'] = field['value']['socialScienceNotesText']['value']
else:
# TODO: Exception
print('socialscience not in json')
"""journal"""
if 'journal' in metadata['datasetVersion']['metadataBlocks']:
journal = metadata['datasetVersion']['metadataBlocks']['journal']
if 'displayName' in journal:
self.__setattr__('journal_displayName',
journal['displayName'])
for field in journal['fields']:
if field['typeName'] in self.__attr_valid_metadata_journal_dicts:
data[field['typeName']] = field['value']
if field['typeName'] in self.__attr_valid_metadata_journal_arrays:
data[field['typeName']] = self.__parse_dicts(
field['value'],
self.__attr_valid_metadata_journal_arrays[field['typeName']])
else:
# TODO: Exception
print('journal not in json')
self.set(data)
elif format == 'dv_down':
metadata = read_file_json(filename)
self.set(data)
else:
# TODO: Exception
print('Data-format not right')
def __parse_dicts(self, data, attr_list):
"""Parse out Dataverse api metadata dicts.
Parameters
----------
data : list
List of Dataverse api metadata fields.
attr_list : list
List of attributes to be parsed.
Returns
-------
list
List of dicts with parsed out key-value pairs.
"""
data_tmp = []
for d in data:
tmp_dict = {}
for key, val in d.items():
if key in attr_list:
tmp_dict[key] = val['value']
else:
print('Key \'{0}\' not in attribute list'.format(key))
data_tmp.append(tmp_dict)
return data_tmp
def is_valid(self):
"""Check if attributes available are valid for Dataverse api metadata creation.
The attributes required are listed in `__attr_required_metadata`.
Returns
-------
bool
True, if creation of metadata json is possible. False, if not.
Examples
-------
Check if metadata is valid for Dataverse api upload::
>>> from pyDataverse.models import Dataset
>>> ds = Dataset()
>>> data = {
>>> 'title': 'pyDataverse study 2019',
>>> 'dsDescription': 'New study about pyDataverse usage in 2019'
>>> }
>>> ds.set(data)
>>> ds.is_valid()
False
>>> ds.author = [{'authorName': 'LastAuthor1, FirstAuthor1'}]
>>> ds.datasetContact = [{'datasetContactName': 'LastContact1, FirstContact1'}]
>>> ds.subject = ['Engineering']
>>> ds.is_valid()
True
Todo
-------
Test out required fields or ask Harvard.
"""
is_valid = True
# check if all required attributes are set
for attr in self.__attr_required_metadata:
if not self.__getattribute__(attr):
is_valid = False
print('Metadata not valid: attribute \'{0}\' missing.'.format(attr))
# check if attribute sets are complete where necessary
tp_cov = self.__getattribute__('timePeriodCovered')
if tp_cov:
for tp in tp_cov:
if 'timePeriodCoveredStart' in tp or 'timePeriodCoveredEnd' in tp:
if not ('timePeriodCoveredStart' in tp and 'timePeriodCoveredEnd' in tp):
is_valid = False
d_coll = self.__getattribute__('dateOfCollection')
if d_coll:
for d in d_coll:
if 'dateOfCollectionStart' in d or 'dateOfCollectionEnd' in d:
if not ('dateOfCollectionStart' in d and 'dateOfCollectionEnd' in d):
is_valid = False
authors = self.__getattribute__('author')
if authors:
for a in authors:
if 'authorAffiliation' in a or 'authorIdentifierScheme' in a or 'authorIdentifier' in a:
if 'authorName' not in a:
is_valid = False
ds_contac = self.__getattribute__('datasetContact')
if ds_contac:
for c in ds_contac:
if 'datasetContactAffiliation' in c or 'datasetContactEmail' in c:
if 'datasetContactName' not in c:
is_valid = False
producer = self.__getattribute__('producer')
if producer:
for p in producer:
if 'producerAffiliation' in p or 'producerAbbreviation' in p or 'producerURL' in p or 'producerLogoURL' in p:
if not p['producerName']:
is_valid = False
contributor = self.__getattribute__('contributor')
if contributor:
for c in contributor:
if 'contributorType' in c:
if 'contributorName' not in c:
is_valid = False
distributor = self.__getattribute__('distributor')
if distributor:
for d in distributor:
if 'distributorAffiliation' in d or 'distributorAbbreviation' in d or 'distributorURL' in d or 'distributorLogoURL' in d:
if 'distributorName' not in d:
is_valid = False
bbox = self.__getattribute__('geographicBoundingBox')
if bbox:
for b in bbox:
if b:
if not ('westLongitude' in b and 'eastLongitude' in b and 'northLongitude' in b and 'southLongitude' in b):
is_valid = False
return is_valid
def dict(self, format='dv_up'):
"""Create dicts in different data formats.
Parameters
----------
format : string
Data format for dict creation. Available formats are: `dv_up` with
all metadata for Dataverse api upload, and `all` with all attributes
set.
Returns
-------
dict
Data as dict.
Examples
-------
Get dict of Dataverse metadata::
>>> from pyDataverse.models import Dataset
>>> ds = Dataset()
>>> data = {
>>> 'title': 'pyDataverse study 2019',
>>> 'dsDescription': 'New study about pyDataverse usage in 2019'
>>> }
>>> ds.set(data)
>>> data = dv.dict()
>>> data['title']
'pyDataverse study 2019'
Todo
-------
Validate standard
"""
if format == 'dv_up':
if self.is_valid():
data = {}
data['datasetVersion'] = {}
data['datasetVersion']['metadataBlocks'] = {}
citation = {}
citation['fields'] = []
geospatial = {}
geospatial['fields'] = []
socialscience = {}
socialscience['fields'] = []
journal = {}
journal['fields'] = []
"""dataset"""
# Generate first level attributes
for attr in self.__attr_valid_metadata_datasetVersion:
if self.__getattribute__(attr) is not None:
data['datasetVersion'][attr] = self.__getattribute__(attr)
"""citation"""
if self.citation_displayName:
citation['displayName'] = self.citation_displayName
# Generate first level attributes
for attr in self.__attr_valid_metadata_citation_dicts:
if self.__getattribute__(attr) is not None:
citation['fields'].append({
'typeName': attr,
'value': self.__getattribute__(attr)
})
# Generate fields attributes
for key, val in self.__attr_valid_metadata_citation_arrays.items():
if self.__getattribute__(key) is not None:
citation['fields'].append({
'typeName': key,
'value': self.__generate_dicts(key, val)
})
# Generate series attributes
if self.__getattribute__('seriesName') is not None or self.__getattribute__('seriesInformation') is not None:
tmp_dict = {}
tmp_dict['value'] = {}
if self.__getattribute__('seriesName') is not None:
tmp_dict['value']['seriesName'] = {}
tmp_dict['value']['seriesName']['typeName'] = 'seriesName'
tmp_dict['value']['seriesName']['value'] = self.__getattribute__('seriesName')
if self.__getattribute__('seriesInformation') is not None:
tmp_dict['value']['seriesInformation'] = {}
tmp_dict['value']['seriesInformation']['typeName'] = 'seriesInformation'
tmp_dict['value']['seriesInformation']['value'] = self.__getattribute__('seriesInformation')
citation['fields'].append({
'typeName': 'series',
'value': tmp_dict
})
"""geospatial"""
# Generate first level attributes
for attr in self.__attr_valid_metadata_geospatial_dicts:
if self.__getattribute__(attr) is not None:
geospatial['fields'].append({
'typeName': attr,
'value': self.__getattribute__(attr)
})
# Generate fields attributes
for key, val in self.__attr_valid_metadata_geospatial_arrays.items():
# check if attribute exists
if self.__getattribute__(key) is not None:
geospatial['fields'].append({
'typeName': key,
'value': self.__generate_dicts(key, val)
})
"""socialscience"""
# Generate first level attributes
for attr in self.__attr_valid_metadata_socialscience_dicts:
if self.__getattribute__(attr) is not None:
socialscience['fields'].append({
'typeName': attr,
'value': self.__getattribute__(attr)
})
# Generate targetSampleSize attributes
if self.__getattribute__('targetSampleActualSize') is not None or self.__getattribute__('targetSampleSizeFormula') is not None:
tmp_dict = {}
tmp_dict['value'] = {}
if 'targetSampleActualSize' in self.__getattribute__('targetSampleSize'):
if self.__getattribute__('targetSampleActualSize') is not None:
tmp_dict['value']['targetSampleActualSize'] = {}
tmp_dict['value']['targetSampleActualSize']['typeName'] = 'targetSampleActualSize'
tmp_dict['value']['targetSampleActualSize']['value'] = self.__getattribute__('targetSampleActualSize')
if 'targetSampleSizeFormula' in self.__getattribute__('targetSampleSize'):
if self.__getattribute__('targetSampleSizeFormula') is not None:
tmp_dict['value']['targetSampleSizeFormula'] = {}
tmp_dict['value']['targetSampleSizeFormula']['typeName'] = 'targetSampleSizeFormula'
tmp_dict['value']['targetSampleSizeFormula']['value'] = self.__getattribute__('targetSampleSizeFormula')
socialscience['fields'].append({
'typeName': 'targetSampleSize',
'value': tmp_dict
})
# Generate socialScienceNotes attributes
if self.__getattribute__('socialScienceNotesType') is not None or self.__getattribute__('socialScienceNotesSubject') is not None or self.__getattribute__('socialScienceNotesText') is not None:
tmp_dict = {}
tmp_dict['value'] = {}
if self.__getattribute__('socialScienceNotesType') is not None:
tmp_dict['value']['socialScienceNotesType'] = {}
tmp_dict['value']['socialScienceNotesType']['typeName'] = 'socialScienceNotesType'
tmp_dict['value']['socialScienceNotesType']['value'] = self.__getattribute__('socialScienceNotesType')
if self.__getattribute__('socialScienceNotesSubject') is not None:
tmp_dict['value']['socialScienceNotesSubject'] = {}
tmp_dict['value']['socialScienceNotesSubject']['typeName'] = 'socialScienceNotesSubject'
tmp_dict['value']['socialScienceNotesSubject']['value'] = self.__getattribute__('socialScienceNotesSubject')
if self.__getattribute__('socialScienceNotesText') is not None:
tmp_dict['value']['socialScienceNotesText'] = {}
tmp_dict['value']['socialScienceNotesText']['typeName'] = 'socialScienceNotesText'
tmp_dict['value']['socialScienceNotesText']['value'] = self.__getattribute__('socialScienceNotesText')
socialscience['fields'].append({
'typeName': 'socialScienceNotes',
'value': tmp_dict
})
"""journal"""
# Generate first level attributes
for attr in self.__attr_valid_metadata_journal_dicts:
if self.__getattribute__(attr) is not None:
journal['fields'].append({
'typeName': attr,
'value': self.__getattribute__(attr)
})
# Generate fields attributes
for key, val in self.__attr_valid_metadata_journal_arrays.items():
if self.__getattribute__(key) is not None:
journal['fields'].append({
'typeName': key,
'value': self.__generate_dicts(key, val)
})
# TODO: prüfen, ob required attributes gesetzt sind. wenn nicht = Exception!
data['datasetVersion']['metadataBlocks']['citation'] = citation
data['datasetVersion']['metadataBlocks']['socialscience'] = socialscience
data['datasetVersion']['metadataBlocks']['geospatial'] = geospatial
data['datasetVersion']['metadataBlocks']['journal'] = journal
return data
else:
print('dict can not be created. Data is not valid for format')
return None
elif format == 'all':
for attr in self.__attr_valid_class:
if self.__getattribute__(attr) is not None:
data[attr] = self.__getattribute__(attr)
return data
else:
print('dict can not be created. Format is not valid')
return None
def __generate_dicts(self, key, val):
"""Generate dicts for array attributes of Dataverse api metadata upload.
Parameters
----------
key : string
Name of attribute
val : string
Value of attribute.
Returns
-------
list
List of filled dicts of metadata for Dataverse api upload.
"""
# check if attribute exists
tmp_list = []
if self.__getattribute__(key):
# loop over list of attribute dicts()
for d in self.__getattribute__(key):
tmp_dict = {}
# iterate over key-value pairs
for k, v in d.items():
# check if key is in attribute list
if k in val:
tmp_dict[k] = {}
tmp_dict[k]['typeName'] = k
tmp_dict[k]['value'] = v
tmp_list.append(tmp_dict)
return tmp_list
def json(self, format='dv_up'):
"""Create Dataset json from attributes.
Parameters
----------
format : string
Data format of input. Available formats are: `dv_up` for Dataverse
Api upload compatible format and `all` with all attributes named in
`__attr_valid_class`.
Returns
-------
string
json-formatted string of Dataverse metadata for api upload.
Examples
-------
Get json of Dataverse api upload::
>>> from pyDataverse.models import Dataset
>>> ds = Dataset()
>>> data = {
>>> 'title': 'pyDataverse study 2019',
>>> 'dsDescription': 'New study about pyDataverse usage in 2019'
>>> 'author': [{'authorName': 'LastAuthor1, FirstAuthor1'}],
>>> 'datasetContact': [{'datasetContactName': 'LastContact1, FirstContact1'}],
>>> 'subject': ['Engineering'],
>>> }
>>> ds.set(data)
>>> data = ds.json()
Todo
-------
TODO: Validate standard
TODO: Link to default json file
"""
if format == 'dv_up':
return dict_to_json(self.dict())
elif format == 'all':
return dict_to_json(self.dict('all'))
else:
# TODO Exception
print('data format not valid.')
def export_metadata(self, filename, format='dv_up'):
"""Export Dataset metadata to Dataverse api upload json.
Parameters
----------
filename : string
Filename with full path.
format : string
Data format for export. Available format is: `dv_up` with all
metadata for Dataverse api upload.
Examples
-------
Export metadata to json file::
>>> from pyDataverse.models import Dataset
>>> ds = Dataset()
>>> data = {
>>> 'title': 'pyDataverse study 2019',
>>> 'dsDescription': 'New study about pyDataverse usage in 2019'
>>> 'author': [{'authorName': 'LastAuthor1, FirstAuthor1'}],
>>> 'datasetContact': [{'datasetContactName': 'LastContact1, FirstContact1'}],
>>> 'subject': ['Engineering'],
>>> }
>>> ds.export_metadata('tests/data/export_dataset.json')
"""
if format == 'dv_up':
return write_file_json(filename, self.dict())
else:
# TODO: Exception
print('Data-format not right.')
class Datafile(object):
"""Base class for the Datafile model.
Parameters
----------
filename : string
Filename with full path.
pid : type
Description of parameter `pid` (the default is None).
Attributes
----------
description : string
Description of datafile
restrict : bool
Unknown
__attr_required_metadata : list
List with required metadata.
__attr_valid_metadata : list
List with valid metadata for Dataverse api upload.
__attr_valid_class : list
List of all attributes.
pid
filename
"""
"""Attributes required for Datafile metadata json."""
__attr_required_metadata = [
'filename',
'pid'
]
"""Attributes on first level of Datafile metadata json."""
__attr_valid_metadata = [
'description',
'pid',
'restrict'
]
"""Attributes on first level of Datafile metadata json."""
__attr_valid_class = [
'filename'
] + __attr_valid_metadata
def __init__(self, filename=None, pid=None):
"""Init a Datafile() class.
Parameters
----------
filename : string
Filename with full path.
pid : string
Persistend identifier, e.g. DOI.
Examples
-------
Create a Datafile::
>>> from pyDataverse.models import Datafile
>>> df = Datafile()
>>> df
<pyDataverse.models.Datafile at 0x7f4dfc0466a0>
"""
"""Misc"""
self.pid = pid
self.filename = filename
"""Metadata"""
self.description = None
self.restrict = None
def __str__(self):
"""Return name of Datafile() class for users."""
return 'pyDataverse Datafile() model class.'
def set(self, data):
"""Set class attributes with a flat dict.
Parameters
----------
data : dict
Flat dict with data. Key's must be name the same as the class
attribute, the data should be mapped to.
Examples
-------
Set Datafile attributes via flat dict::
>>> from pyDataverse.models import Datafile
>>> df = Datafile()
>>> data = {
>>> 'pid': 'doi:10.11587/EVMUHP',
>>> 'description': 'Test file',
>>> 'filename': 'tests/data/datafile.txt'
>>> }
>>> df.set(data)
>>> df.pid
'doi:10.11587/EVMUHP',
"""
for key, val in data.items():
if key in self.__attr_valid_class:
self.__setattr__(key, val)
else:
# TODO: Raise Exception
print('Key {0} not valid.'.format(key))
def is_valid(self):
"""Check if set attributes are valid for Dataverse api metadata creation.
Returns
-------
bool
True, if creation of metadata json is possible. False, if not.
Examples
-------
Check if metadata is valid for Dataverse api upload::
>>> from pyDataverse.models import Datafile
>>> df = Datafile()
>>> data = {
>>> 'pid': 'doi:10.11587/EVMUHP',
>>> 'description': 'Test file',
>>> 'filename': 'tests/data/datafile.txt'
>>> }
>>> df.set(data)
>>> df.is_valid
True
>>> df.filename = None
>>> df.is_valid
False
"""
is_valid = True
for attr in self.__attr_required_metadata:
if self.__getattribute__(attr) is None:
is_valid = False
print('attribute \'{0}\' missing.'.format(attr))
return is_valid
def dict(self, format='dv_up'):
"""Create dict in different data formats.
Parameters
----------
format : string
Data format for dict creation. Available formats are: `dv_up` with
all metadata for Dataverse api upload, and `all` with all attributes
set.
Returns
-------
dict
Data as dict.
Examples
-------
Check if metadata is valid for Dataverse api upload::
>>> from pyDataverse.models import Datafile
>>> df = Datafile()
>>> data = {
>>> 'pid': 'doi:10.11587/EVMUHP',
>>> 'description': 'Test file',
>>> 'filename': 'tests/data/datafile.txt'
>>> }
>>> df.set(data)
>>> data = df.dict()
>>> data['description']
'Test file'
Todo
-------
Validate standards.
"""
data = {}
if format == 'dv_up':
if self.is_valid():
for attr in self.__attr_valid_metadata:
if self.__getattribute__(attr) is not None:
data[attr] = self.__getattribute__(attr)
return data
else:
print('dict can not be created. Data is not valid')
return None
elif format == 'all':
for attr in self.__attr_valid_class:
if self.__getattribute__(attr) is not None:
data[attr] = self.__getattribute__(attr)
return data
else:
# TODO: Exception
print('Format not right for dict.')
return None
def json(self, format='dv_up'):
r"""Create json from attributes.
Parameters
----------
format : string
Data format of input. Available formats are: `dv_up` for Dataverse
Api upload compatible format and `all` with all attributes named in
`__attr_valid_class`.
Returns
-------
string
json-formatted string of Dataverse metadata for api upload.
Examples
-------
Get dict of Dataverse metadata::
>>> from pyDataverse.models import Datafile
>>> df = Datafile()
>>> data = {
>>> 'pid': 'doi:10.11587/EVMUHP',
>>> 'description': 'Test file',
>>> 'filename': 'tests/data/datafile.txt'
>>> }
>>> df.set(data)
>>> df.dict()
{'description': 'Test file',
'directoryLabel': None,
'restrict': None}
Todo
-------
Validate standards.
Link to default json file
"""
if format == 'dv_up':
data = self.dict('dv_up')
if data:
return dict_to_json(data)
else:
print('Dict can not be created')
return None
elif format == 'all':
data = self.dict('all')
if data:
return dict_to_json(data)
else:
print('Dict can not be created')
return None
else:
# TODO Exception
print('data format not valid.')
return None
|
#! /usr/bin/env python3
import sys
def print_usage_and_exit():
print(f"Usage: {sys.argv[0]} <workload> <ufs_data_dir> <ext4_data_dir>")
exit(1)
if len(sys.argv) != 4:
print_usage_and_exit()
workload = sys.argv[1]
ufs_data_dir = sys.argv[2]
ext4_data_dir = sys.argv[3]
valid_workloads = [f"ycsb-{name}" for name in ["a", "b", "c", "d", "e", "f"]]
NUM_OPS = 100000 if "fill" not in workload \
else 10000000 if "random" not in workload else 2000000
if workload not in valid_workloads:
print_usage_and_exit()
def collect_data(data_dir):
tp_list = []
for num_app in range(1, 11):
tp_sum = 0
for appid in range(1, num_app + 1):
with open(
f"{data_dir}/{workload}_num-app-{num_app}_leveldb/leveldb-{appid}.out",
"r") as f:
for line in f:
split = line.split()
if len(split
) >= 3 and split[0] == "Timer" and split[1] == "0:":
latency = int(split[2])
tp_sum += NUM_OPS / latency * 1000000
tp_list.append(tp_sum)
return tp_list
ufs_tp = collect_data(ufs_data_dir)
ext4_tp = collect_data(ext4_data_dir)
print("# num_client\tuFS\t\text4")
for i in range(0, 10):
print(f"{i+1}\t\t{ufs_tp[i]:.2f}\t{ext4_tp[i]:.2f}")
|
__author__ = 'Maxim Dutkin ([email protected])'
import unittest
from m2core import M2Core
from m2core.common import Permission, And, Or, Not, PermissionsEnum
from m2core.data_schemes.db_system_scheme import M2PermissionCheckMixin
class User(M2PermissionCheckMixin):
def __init__(self, permissions: set):
self.permissions = permissions
class PermissionsTest(unittest.TestCase):
def setUp(self):
self.maxDiff = None
m2core = M2Core()
m2core.run_for_test()
class PlatformPerms(PermissionsEnum):
Perm1 = Permission('perm 1')
Perm2 = Permission('perm 2')
Perm3 = Permission('perm 3')
Perm4 = Permission('perm 4')
Perm5 = Permission('perm 5')
Perm6 = Permission('perm 6')
Perm7 = Permission('perm 7')
Perm8 = Permission('perm 8')
Perm9 = Permission('perm 9')
Perm10 = Permission('perm 10')
Perm11 = Permission('perm 11')
Perm12 = Permission('perm 12')
Perm13 = Permission('perm 13')
Perm14 = Permission('perm 14')
Perm15 = Permission('perm 15')
Perm16 = Permission('perm 16')
Perm17 = Permission('perm 17')
class AnotherPlatformPerms(PermissionsEnum):
Perm18 = Permission('perm 18')
Perm19 = Permission('perm 19')
Perm20 = Permission('perm 20')
Perm21 = Permission('perm 21')
Perm22 = Permission('perm 22')
self.pp1 = PlatformPerms
self.pp2 = AnotherPlatformPerms
self.sample_set = [
(
# 0
PlatformPerms.Perm1 & PlatformPerms.Perm2 & PlatformPerms.Perm3 |
PlatformPerms.Perm4 & PlatformPerms.Perm5 & PlatformPerms.Perm6 |
PlatformPerms.Perm7 & PlatformPerms.Perm8 & PlatformPerms.Perm9 |
PlatformPerms.Perm10 | PlatformPerms.Perm11 | PlatformPerms.Perm12,
Or(
And(PlatformPerms.Perm1, PlatformPerms.Perm2, PlatformPerms.Perm3),
And(PlatformPerms.Perm4, PlatformPerms.Perm5, PlatformPerms.Perm6),
And(PlatformPerms.Perm7, PlatformPerms.Perm8, PlatformPerms.Perm9),
PlatformPerms.Perm10,
PlatformPerms.Perm11,
PlatformPerms.Perm12
),
{PlatformPerms.Perm12},
{PlatformPerms.Perm1, PlatformPerms.Perm2, PlatformPerms.Perm4,
PlatformPerms.Perm6, PlatformPerms.Perm7, PlatformPerms.Perm8,
PlatformPerms.Perm13}
),
(
# 1
(~PlatformPerms.Perm1 & ~PlatformPerms.Perm2 & PlatformPerms.Perm3) |
(~PlatformPerms.Perm4 & ~PlatformPerms.Perm5 & PlatformPerms.Perm6) |
(~PlatformPerms.Perm7 & ~PlatformPerms.Perm8 & PlatformPerms.Perm9) |
PlatformPerms.Perm10 | PlatformPerms.Perm11 | PlatformPerms.Perm12,
Or(
And(Not(PlatformPerms.Perm1), Not(PlatformPerms.Perm2), PlatformPerms.Perm3),
And(Not(PlatformPerms.Perm4), Not(PlatformPerms.Perm5), PlatformPerms.Perm6),
And(Not(PlatformPerms.Perm7), Not(PlatformPerms.Perm8), PlatformPerms.Perm9),
PlatformPerms.Perm10,
PlatformPerms.Perm11,
PlatformPerms.Perm12
),
{PlatformPerms.Perm3, PlatformPerms.Perm6},
{PlatformPerms.Perm1, PlatformPerms.Perm2, PlatformPerms.Perm4,
PlatformPerms.Perm6, PlatformPerms.Perm7, PlatformPerms.Perm8,
PlatformPerms.Perm13}
),
(
# 2
PlatformPerms.Perm1 & PlatformPerms.Perm2 & PlatformPerms.Perm3 |
PlatformPerms.Perm4 & ~PlatformPerms.Perm5 & PlatformPerms.Perm6 |
PlatformPerms.Perm7 & PlatformPerms.Perm8 & PlatformPerms.Perm9 |
PlatformPerms.Perm10 | ~PlatformPerms.Perm11 | PlatformPerms.Perm12,
Or(
And(PlatformPerms.Perm1, PlatformPerms.Perm2, PlatformPerms.Perm3),
And(PlatformPerms.Perm4, Not(PlatformPerms.Perm5), PlatformPerms.Perm6),
And(PlatformPerms.Perm7, PlatformPerms.Perm8, PlatformPerms.Perm9),
PlatformPerms.Perm10,
Not(PlatformPerms.Perm11),
PlatformPerms.Perm12
),
{PlatformPerms.Perm4, PlatformPerms.Perm6},
{PlatformPerms.Perm4, PlatformPerms.Perm5, PlatformPerms.Perm6, PlatformPerms.Perm11}
),
(
# 3
~(PlatformPerms.Perm1 & PlatformPerms.Perm2),
Not(
And(PlatformPerms.Perm1, PlatformPerms.Perm2)
),
{PlatformPerms.Perm1},
{PlatformPerms.Perm1, PlatformPerms.Perm2}
),
(
# 4
~PlatformPerms.Perm1,
Not(PlatformPerms.Perm1),
{PlatformPerms.Perm2, PlatformPerms.Perm3},
{PlatformPerms.Perm1, PlatformPerms.Perm2, PlatformPerms.Perm3}
),
(
# 5
~((PlatformPerms.Perm1 | ~PlatformPerms.Perm2) &
(~PlatformPerms.Perm3 | PlatformPerms.Perm4) &
(PlatformPerms.Perm5 | ~PlatformPerms.Perm6)),
Not(
And(
Or(PlatformPerms.Perm1, Not(PlatformPerms.Perm2)),
Or(Not(PlatformPerms.Perm3), PlatformPerms.Perm4),
Or(PlatformPerms.Perm5, Not(PlatformPerms.Perm6)),
)
),
{PlatformPerms.Perm2, PlatformPerms.Perm3, PlatformPerms.Perm6},
{PlatformPerms.Perm1, PlatformPerms.Perm2, PlatformPerms.Perm5, PlatformPerms.Perm6}
)
]
def test_constructions(self):
for num, (sample, right_result, true_perm_set, false_perm_set) in enumerate(self.sample_set):
self.assertEqual(
repr(sample.rule_chain),
repr(right_result),
msg=f'Error in {num} sample'
)
def test_user_can(self):
for num, (sample, right_result, true_perm_set, false_perm_set) in enumerate(self.sample_set):
user = User(true_perm_set)
self.assertTrue(
user.can(sample),
msg=f'Error in {num} sample'
)
def test_user_can_not(self):
for num, (sample, right_result, true_perm_set, false_perm_set) in enumerate(self.sample_set):
user = User(false_perm_set)
self.assertFalse(
user.can(sample),
msg=f'Error in {num} sample'
)
def test_and(self):
perm = self.pp1.Perm1 & self.pp1.Perm2 & self.pp1.Perm3 & self.pp1.Perm4
user = User({self.pp1.Perm1, self.pp1.Perm2, self.pp1.Perm3, self.pp1.Perm4})
self.assertEqual(type(perm.rule_chain), And)
self.assertEqual(len(perm.rule_chain), 4)
self.assertTrue(user.can(perm))
def test_or(self):
perm = self.pp1.Perm1 | self.pp1.Perm2 | self.pp1.Perm3 | self.pp1.Perm4
user = User({self.pp1.Perm10, self.pp1.Perm12, self.pp1.Perm13, self.pp1.Perm4})
self.assertEqual(type(perm.rule_chain), Or)
self.assertEqual(len(perm.rule_chain), 4)
self.assertTrue(user.can(perm))
def test_not(self):
perm = ~(self.pp1.Perm1 & self.pp1.Perm2 & self.pp1.Perm3 & self.pp1.Perm4)
user = User({self.pp1.Perm11, self.pp1.Perm12, self.pp1.Perm13, self.pp1.Perm14})
self.assertEqual(type(perm.rule_chain), Not)
self.assertEqual(len(perm.rule_chain), 1)
self.assertEqual(type(perm.rule_chain[0]), And)
self.assertTrue(user.can(perm))
def test_permission_properties(self):
with self.assertRaises(AttributeError):
Permission()
Permission(None)
Permission('')
name = 'permission 1'
sys_name = 'PERMISSION_1'
custom_sys_name = 'COOL_PERMISSION_____1'
p = Permission(name)
self.assertEqual(p.name, name)
p = Permission(name, None)
self.assertEqual(p.sys_name, sys_name)
p = Permission(name, sys_name)
self.assertEqual(p.sys_name, sys_name)
p = Permission(name, custom_sys_name)
self.assertEqual(p.sys_name, custom_sys_name)
def test_permissions_enum_all(self):
result = self.pp1.ALL
expected_result = {
self.pp1.AUTHORIZED,
self.pp1.Perm1,
self.pp1.Perm2,
self.pp1.Perm3,
self.pp1.Perm4,
self.pp1.Perm5,
self.pp1.Perm6,
self.pp1.Perm7,
self.pp1.Perm8,
self.pp1.Perm9,
self.pp1.Perm10,
self.pp1.Perm11,
self.pp1.Perm12,
self.pp1.Perm13,
self.pp1.Perm14,
self.pp1.Perm15,
self.pp1.Perm16,
self.pp1.Perm17,
}
self.assertEqual(expected_result, result)
def test_all_across_instances(self):
result = self.pp1.all_platform_permissions
expected_result = {
self.pp1.AUTHORIZED,
self.pp1.Perm1,
self.pp1.Perm2,
self.pp1.Perm3,
self.pp1.Perm4,
self.pp1.Perm5,
self.pp1.Perm6,
self.pp1.Perm7,
self.pp1.Perm8,
self.pp1.Perm9,
self.pp1.Perm10,
self.pp1.Perm11,
self.pp1.Perm12,
self.pp1.Perm13,
self.pp1.Perm14,
self.pp1.Perm15,
self.pp1.Perm16,
self.pp1.Perm17,
self.pp2.Perm18,
self.pp2.Perm19,
self.pp2.Perm20,
self.pp2.Perm21,
self.pp2.Perm22,
}
self.assertEqual(expected_result, result)
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from .common import _findRawContent
from telegram_util import matchKey
def getAttrString(attrs):
if not attrs:
return ''
r = []
for k, v in attrs.items():
if k in ['content']:
continue
r.append(k + ': ' + str(v))
return '\n'.join(r)
def _yieldPossibleAuthorItem(soup):
yield soup.find("meta", {"name": "byl"})
yield soup.find("span", {"class" : "byline__name"})
for item in soup.find_all('meta'):
if 'author' in getAttrString(item.attrs):
yield item
for item in soup.find_all('div', class_='news_about'):
yield item.find('p')
yield soup.find("a", {"id" : "js_name"})
yield soup.find('a', class_='author-url')
yield soup.find('span', class_='posted-date')
yield soup.find('a', class_='name')
yield soup.find('div', class_='article-author')
yield soup.find("meta", {"name": "application-name"})
for key in ['articleauthor', 'author', '/people/']:
for item in soup.find_all('a'):
if matchKey(getAttrString(item.attrs), [key]):
yield item
def _yieldPossibleOrgItem(soup):
yield soup.find("meta", {"property": "twitter:site"})
yield soup.find("meta", {"property": "twitter:domain"})
yield soup.find("meta", {"property": "og:site_name"})
def _findPossibleRawContent(item_iterator, words_to_ignore = []):
for item in item_iterator:
if not item:
continue
r = _findRawContent(item)
if r and not matchKey(r, words_to_ignore):
index = r.find(' - ')
if index == -1:
return r
else:
return r[:index]
def _findOrgName(soup):
head = str(soup.find('head'))
if matchKey(head, ['bbc.com']):
return 'BBC', True
if matchKey(head, ['nyt.com', 'new york times']):
return 'NYT', True
if matchKey(head, ['stackoverflow']):
return 'StackOverflow', False
if matchKey(head, ['medium.com']):
return 'Medium', False
if matchKey(head, ['dw.come']):
return 'DW', True
r = _findPossibleRawContent(_yieldPossibleOrgItem(soup))
if r:
return r, False
return 'Source', False
def _findAuthor(soup):
author_name = _findPossibleRawContent(
_yieldPossibleAuthorItem(soup),
['://', 'http', 'www'])
org, required = _findOrgName(soup)
if not author_name:
return org
if not required or '-' in author_name:
return author_name
return author_name + ' - ' + org |
# -*- coding: utf-8 -*-
# Copyright (c) 2020-2021 Ramon van der Winkel.
# All rights reserved.
# Licensed under BSD-3-Clause-Clear. See LICENSE file for details.
# maak een account HWL van specifieke vereniging, vanaf de commandline
from django.core.management.base import BaseCommand
from Competitie.models import Competitie, DeelcompetitieRonde, LAAG_REGIO
from NhbStructuur.models import NhbRegio, NhbCluster, NhbVereniging
from Wedstrijden.models import CompetitieWedstrijd
import datetime
class Command(BaseCommand):
help = "Maak en competitiewedstrijd aan"
def __init__(self, stdout=None, stderr=None, no_color=False, force_color=False):
super().__init__(stdout, stderr, no_color, force_color)
def add_arguments(self, parser):
parser.add_argument('afstand', nargs=1, help="Competitie afstand: 18 of 25")
parser.add_argument('geo', nargs=1, help="Regio (101) of cluster (101N)")
parser.add_argument('ver_nr', nargs=1, help="Verenigingsnummer")
parser.add_argument('datum', nargs=1, help="Datum in formaat YYYY-MM-DD")
parser.add_argument('tijd', nargs=1, help="Tijdstip in formaat HHMM")
def _maak_regio_wedstrijd(self, comp, geo, ver_nr, datum, tijd):
pass
@staticmethod
def _maak_wedstrijd(plan, ver, datum, tijd):
loc = ver.wedstrijdlocatie_set.all()[0]
wedstrijd = CompetitieWedstrijd(beschrijving='automatisch aangemaakt',
vereniging=ver,
locatie=loc,
datum_wanneer=datum,
tijd_begin_wedstrijd=tijd,
tijd_begin_aanmelden='00:00',
tijd_einde_wedstrijd='00:00')
wedstrijd.save()
plan.wedstrijden.add(wedstrijd)
def handle(self, *args, **options):
afstand = options['afstand'][0]
geo = options['geo'][0]
ver_nr = options['ver_nr'][0]
datum = options['datum'][0]
tijd = options['tijd'][0]
if afstand not in ('18', '25'):
self.stderr.write('afstand moet 18 of 25 zijn')
return
cluster = None
try:
if len(geo) == 4:
cluster = NhbCluster.objects.get(regio__regio_nr=geo[:3],
letter=geo[-1])
regio = cluster.regio
else:
regio = NhbRegio.objects.get(regio_nr=geo)
except (NhbRegio.DoesNotExist, NhbCluster.DoesNotExist):
self.stderr.write('Geen regio of cluster kunnen matchen met %s' % repr(geo))
return
try:
ver = NhbVereniging.objects.get(ver_nr=ver_nr)
except NhbVereniging.DoesNotExist:
self.stderr.write('Kan vereniging %s niet vinden' % repr(ver_nr))
return
# controleer dat de vereniging in het cluster zit
if cluster:
if not cluster.nhbvereniging_set.filter(ver_nr=ver_nr).exists():
self.stderr.write('Vereniging %s zit niet in cluster %s' % (repr(ver_nr), repr(geo)))
return
done = False
for comp in Competitie.objects.filter(is_afgesloten=False,
afstand=afstand):
for ronde in (DeelcompetitieRonde
.objects
.filter(deelcompetitie__competitie=comp,
deelcompetitie__laag=LAAG_REGIO,
deelcompetitie__nhb_regio=regio,
cluster=cluster)):
self._maak_wedstrijd(ronde.plan, ver, datum, tijd)
done = True
# for
# for
if not done:
self.stderr.write('Geen competitie kunnen vinden in fase < F')
# end of file
|
# Generated by Django 4.0 on 2021-12-12 04:40
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('CheckersGame', '0003_alter_game_turn'),
]
operations = [
migrations.AlterField(
model_name='game',
name='room_code',
field=models.CharField(max_length=8),
),
]
|
from dateutil.parser import parse
def is_date(string, fuzzy=False):
"""
Return whether the string can be interpreted as a date.
Parameters
----------
string: str
str, string to check for date
fuzzy: bool
ignore unknown tokens in string if True
"""
try:
parse(string, fuzzy=fuzzy)
return True
except ValueError:
return False
def is_numeric(s):
"""test if a string is numeric"""
for c in s:
if c in "1234567890-.":
return True
else:
return False
def change_numeric(data):
"""if the data to be sorted is numeric change to float"""
new_data = []
if is_numeric(data[0][0]):
# change child to a float
for child, col in data:
new_data.append((float(child), col))
return new_data
return data
|
# Copyright (c) 2018 Red Hat, Inc.
# All Rights Reserved.
# Python
import logging
# Django
from django.utils.translation import gettext_lazy as _
# Django REST Framework
from rest_framework.response import Response
from rest_framework.exceptions import PermissionDenied
# AWX
# from awx.main.analytics import collectors
import awx.main.analytics.subsystem_metrics as s_metrics
from awx.main.analytics.metrics import metrics
from awx.api import renderers
from awx.api.generics import APIView
logger = logging.getLogger('awx.analytics')
class MetricsView(APIView):
name = _('Metrics')
swagger_topic = 'Metrics'
renderer_classes = [renderers.PlainTextRenderer, renderers.PrometheusJSONRenderer, renderers.BrowsableAPIRenderer]
def get(self, request):
'''Show Metrics Details'''
if request.user.is_superuser or request.user.is_system_auditor:
metrics_to_show = ''
if not request.query_params.get('subsystemonly', "0") == "1":
metrics_to_show += metrics().decode('UTF-8')
if not request.query_params.get('dbonly', "0") == "1":
metrics_to_show += s_metrics.metrics(request)
return Response(metrics_to_show)
raise PermissionDenied()
|
from django.apps import AppConfig
from django.conf import settings
from django.utils.autoreload import autoreload_started
def schema_watchdog(sender, **kwargs):
sender.watch_dir(settings.BASE_DIR, "**/*.graphql")
class DirectorConfig(AppConfig):
name = "director"
def ready(self):
autoreload_started.connect(schema_watchdog)
|
from .player import *
from .projectile import *
from .asteroids import *
from .powerups import *
|
from django import forms
from django.conf import settings
from django.core.mail import EmailMessage
from captcha.fields import CaptchaField
from localflavor.us.forms import USPhoneNumberField
class ContactForm(forms.Form):
name = forms.CharField()
email_address = forms.EmailField()
phone_number = USPhoneNumberField(required=False)
message = forms.CharField(widget=forms.Textarea)
captcha = CaptchaField()
def send_mail(self, ip_address):
self.cleaned_data['ip_address'] = ip_address
email_message = EmailMessage()
email_message.subject = 'Message from %s' % self.cleaned_data['name']
email_message.body = '''
Name: %(name)s
Email address: %(email_address)s
Phone number: %(phone_number)s
Message: %(message)s
IP address: %(ip_address)s
'''.strip() % self.cleaned_data
email_message.to = [settings.DEFAULT_FROM_EMAIL]
email_message.send() |
import random
import numpy as np
from .mutate_method_call import MutateMethodCall
def flip_bit(pos, chrom: bytearray):
chrom[pos >> 3] ^= (128 >> (pos & 7))
def get_bit(pos, chrom: bytearray):
return chrom[pos >> 3] >> (7 - (pos & 7)) & 1
def mutation_reserve_bytes(self: MutateMethodCall, chrom: bytearray):
size = len(chrom) * 8
for i in range(size):
if random.random() < self.prob_mutate:
flip_bit(i, chrom)
def mutation_swap_bytes(self: MutateMethodCall, chrom: bytearray):
size = len(chrom) * 8
for i in range(size):
if random.random() < self.prob_mutate:
n = random.randint(0, size - 1)
i_bit = get_bit(i, chrom)
n_bit = get_bit(n, chrom)
if i_bit != n_bit:
flip_bit(i, chrom)
flip_bit(n, chrom)
def mutation_reserve(self: MutateMethodCall, chrom: np.array):
"""
reserve gene according to prob_mutate
:param self:
:param chrom: 0/1 type chromosome
"""
mask = (np.random.rand(len(chrom)) < self.prob_mutate)
chrom ^= mask
def mutation_swap(self: MutateMethodCall, chrom: np.array):
"""
swap gene with random n position gene according to prob_mutate
:param self:
:param chrom: 0/1 type chromosome
"""
for i in range(len(chrom)):
if random.random() < self.prob_mutate:
n = np.random.randint(0, len(chrom), 1)
chrom[i], chrom[n] = chrom[n], chrom[i]
def mutation_reverse(self: MutateMethodCall, chrom: np.array):
"""
Reverse n1 to n2
Also called `2-Opt`: removes two random edges, reconnecting them so they cross
Karan Bhatia, "Genetic Algorithms and the Traveling Salesman Problem", 1994
https://pdfs.semanticscholar.org/c5dd/3d8e97202f07f2e337a791c3bf81cd0bbb13.pdf
:param self:
:param chrom: 0/1 type chromosome
"""
n1, n2 = np.random.randint(0, len(chrom) - 1, 2)
if n1 >= n2:
n1, n2 = n2, n1 + 1
chrom[n1:n2] = chrom[n1:n2][::-1]
def mutation_transpose(self: MutateMethodCall, chrom: np.array):
"""
randomly generate n1 < n2 < n3. Notice: not equal
:param self:
:param chrom: 0/1 type chromosome
"""
n1, n2, n3 = sorted(np.random.randint(0, chrom.shape[0] - 2, 3))
n2 += 1
n3 += 2
slice1, slice2, slice3, slice4 = chrom[0:n1], chrom[n1:n2], chrom[n2:n3 + 1], chrom[n3 + 1:]
new_chrom = np.concatenate([slice1, slice3, slice2, slice4])
for i in range(len(chrom)):
chrom[i] = new_chrom[i]
|
#pylint: disable=W0703,R0912,R0904,E1101,R0904,E1124,W0105
"""
Copyright 2014 eBay Software Foundation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Thread to perform creation of a service
"""
import traceback
from agent.lib.errors import Errors
from agent.lib.errors import AgentException
from agent.lib.agent_thread.agent_thread import AgentThread
import logging
import pylons
from agent.lib import manifestutil, contextutils
import os
from agent.lib.utils import readlink
import re
LOG = logging.getLogger(__name__)
class AgentUpdate(AgentThread):
""" This thread will attempt to update agent
"""
THREAD_NAME = 'agent_update'
CAT = 'agent_blocking'
def __init__(self, threadMgr, wisbVersion, wisbSource = None, skipProp = True):
""" Constructor """
AgentThread.__init__(self, threadMgr, cat = [AgentUpdate.CAT], name = AgentUpdate.THREAD_NAME)
self.__wisbVersion = wisbVersion
self.__wisbSource = wisbSource
self.__packages = []
self.__service = 'agent'
self.__manifest = ('agent_selfupdate-%s' % self.__wisbVersion)
self.__skipProp = skipProp
def doRun(self):
""" Main body of the thread """
errorMsg = ""
errorCode = None
failed = False
appGlobal = pylons.config['pylons.app_globals']
appGlobal.agentInfo['update_inprogress'] = True
try:
self._checkStop()
self.__prepManifest()
self._checkStop()
self.__createManifest()
self._checkStop()
self.__activateManifest()
except AgentException as exc:
failed = True
errorMsg = 'Agent update - Agent Exception - %s' % exc.getMsg()
errorCode = exc.getCode()
except Exception as exc:
failed = True
errorMsg = 'Agent update - Unknown error - (%s/%s) - %s - %s' \
% ('agent', self.__manifest, str(exc), traceback.format_exc(5))
errorCode = Errors.UNKNOWN_ERROR
finally:
if failed:
LOG.error(errorMsg)
self._updateStatus(httpStatus = 500, error = errorCode, errorMsg = errorMsg)
self._updateStatus(progress = 100)
appGlobal.agentInfo['update_inprogress'] = False
def __prepManifest(self):
""" prepare data for manifest
"""
try:
if not self.__wisbVersion:
raise AgentException(Errors.MANIFEST_NOT_FOUND, 'wisb version not found')
agtPkg, agtcfgPkg = AgentUpdate.getAgentPkgs(self.__wisbSource, self.__wisbVersion)
pyPkg = AgentUpdate.getLocalPyPkg()
LOG.info('Agent update using local python package %s' % pyPkg)
self.__packages = [agtPkg, agtcfgPkg, pyPkg]
except AgentException as exc:
errorMsg = 'Agent update - prepManifest - Agent exception - %s - %s' \
% (str(exc), traceback.format_exc(2))
raise exc
except Exception as exc:
errorMsg = 'Agent update - prepManifest - Unknown error - %s - %s' \
% (str(exc), traceback.format_exc(2))
error = Errors.UNKNOWN_ERROR
raise AgentException(error, errorMsg)
def __createManifest(self):
""" create a manifest
"""
service = 'agent'
LOG.info("Create Manifest %s - %s - %s" % (service, self.__manifest, str(self.__packages)))
path = manifestutil.manifestPath(service, self.__manifest)
# check to see if the manifest already exists
if (os.path.isdir(path)):
LOG.info('Manifest %s already exist, skip creating' % self.__manifest)
return
from agent.lib.agent_thread.manifest_create import ManifestCreate
manThread = ManifestCreate(self._threadMgr, service, self.__manifest, self.__packages, skipProp = self.__skipProp)
contextutils.copycontexts(self, manThread, contextutils.CTX_NAMES)
manThread.run()
status = manThread.getStatus()
if (status.has_key('error') and status['error']):
raise AgentException(status['error'], status['errorMsg'])
def __activateManifest(self):
""" activate a manifest
"""
LOG.info("Activate Manifest %s - %s" % (self.__service, self.__manifest))
# make sure manifest to be activated exist
manifestPath = manifestutil.manifestPath(self.__service, self.__manifest)
if (not os.path.exists(manifestPath)):
LOG.error('Manifest %s does not exist, fail activation' % self.__manifest)
raise AgentException(Errors.MANIFEST_NOT_FOUND, 'Manifest %s does not exist' % self.__manifest)
# check to see if the manifest already active
activeManifest = manifestutil.activeManifestPath(self.__service)
if activeManifest == self.__manifest:
LOG.info('Manifest %s already active, skip activation' % self.__manifest)
return
from agent.lib.agent_thread.activate_manifest import ActivateManifest
activateThread = ActivateManifest(self._threadMgr, self.__service, self.__manifest)
contextutils.copycontexts(self, activateThread, contextutils.CTX_NAMES)
activateThread.run()
status = activateThread.getStatus()
if (status.has_key('error') and status['error']):
raise AgentException(status['error'], status['errorMsg'])
nameRe = re.compile('^[a-zA-Z0-9_]+-[0-9]+\.[0-9]+\.[a-zA-Z0-9_]+\.[a-zA-Z0-9_]+?')
@staticmethod
def getAgentPkgs(wisbSource, wisbVersion):
""" generate agent packages location
"""
if not wisbSource:
wisbSource = pylons.config['selfupdate_source']
agtPkg = '%s/agent-%s.unix.cronus' % (wisbSource, wisbVersion)
agtcfgPkg = '%s/agent_config-%sprod.unix.cronus' % (wisbSource, wisbVersion)
return (agtPkg, agtcfgPkg)
@staticmethod
def getLocalPyPkg():
"""
reuse of local python package instead of download it again from source of truth,
this should be the common use case for selfupdate without needing to update the python package
"""
activeManifest = manifestutil.getActiveManifest('agent')
activePyLink = os.path.join(manifestutil.manifestPath('agent', activeManifest), 'python_package')
if (os.path.exists(activePyLink)):
activePyPath = readlink(activePyLink)
pyVersion = os.path.basename(activePyPath)
pyPkgName = ('python_package-%s' % pyVersion)
if AgentUpdate.nameRe.match(pyPkgName):
return ('http://localhost:12020/%s.cronus' % pyPkgName)
else:
raise AgentException(Errors.PACKAGE_SCHEME_ERROR, 'package name %s is not valid' % pyPkgName)
|
for _ in range(int(input())):
n=int(input())
s=input()
for i in range(5):
if s[0:i]+s[n-(4-i):n]=='2020':
print('YES')
break
else:
print('NO')
|
import numpy as np
import math
from distance import euclidean, manhattan, cosine
from time import time
VALID_DISTANCE_ARG = {
"euclidean": euclidean,
"manhattan": manhattan,
"cosine": cosine
}
VALID_INIT_CENTROID_ARG = ["random", "naive_sharding"]
class KMeans():
'''
Initialization of KMeans model
params:
- k : number of cluster
- init_centroid : strategy to initialize the centroid. valid arguments: "random", "naive_sharding"
- distannce : metrics to calculate distance of each point of datum. valid arguments: "euclidean", "manhattan", "cosine"
'''
def __init__(self, k=3, init_centroid="random", distance="euclidean"):
self.k = k
if init_centroid in VALID_INIT_CENTROID_ARG:
self.init_centroid = init_centroid
else:
raise Exception("init_centroid is not valid")
if distance in VALID_DISTANCE_ARG.keys():
self.distance = VALID_DISTANCE_ARG[distance]
else:
raise Exception("distance is not valid")
def choose_random_point(self, X):
'''
Pick random point in range of (min_value of X - max_value of X)
'''
min_val = np.min(X)
max_val = np.max(X)
return np.random.uniform(low=min_val,high=max_val, size=(self.n_features,))
def random_init(self, X):
'''
Initialize each cluster's centroid with random point
'''
initial_centroids = []
for _ in range(self.k):
rand_centroid = self.choose_random_point(X)
initial_centroids.append(rand_centroid)
return initial_centroids
def naive_sharding_init(self, X):
'''
Intuition from this article https://www.kdnuggets.com/2017/03/naive-sharding-centroid-initialization-method.html
1. sum each instance and create new column for it
2. sort by sum column from 1
3. split into k-equal size, we call it shard
4. get mean of each shard and make them the centroids of each cluster
'''
initial_centroids = []
# 1
list_of_instance_sum_tupple = []
for instance in X:
list_of_instance_sum_tupple.append((np.sum(instance), instance))
# 2
list_of_instance_sum_tupple.sort(key=lambda tup: tup[0], reverse=False)
# 3 & 4
segment = math.ceil(len(list_of_instance_sum_tupple) / self.k)
for i in range(self.k):
# 3
shard = list_of_instance_sum_tupple[(i * segment):((i+1) * segment)]
shard = [x[1] for x in shard]
# 4 mean of shard
mean_shard = np.zeros(self.n_features)
for x in shard:
mean_shard = mean_shard + x
mean_shard = mean_shard / len(shard)
initial_centroids.append(mean_shard)
return initial_centroids
def train(self, X, max_iteration = 100, tolerance = 0.001, verbose=False):
'''
Process to train data into K cluster using KMeans
params:
- X : data train (2D array)
- max_iterations : force condition to stop the training
- tolerance : stop iteration when the centroid do not change that much
'''
start_time = time()
X = np.array(X)
# Validate: matrix X must be 2D array
if len(X.shape) != 2:
raise Exception("Data must be 2D array")
# save the dimension of features
self.n_features = X[0].shape[0]
# Create k cluster and initialize centroid foreach cluster
self.centroids = []
if self.init_centroid == "random":
self.centroids = self.random_init(X)
else:
self.centroids = self.naive_sharding_init(X)
if verbose:
print("initial centroid", self.centroids)
# Init empty cluster member
self.cluster_members = [[] for _ in range(self.k)]
# Enter the iteration
iteration = 0
total_diff = float("inf")
while iteration < max_iteration:
if verbose:
print("iteration", iteration)
print("centroid", self.centroids)
current_inertia = float(0.0)
current_cluster_members = [[] for _ in range(self.k)]
for data_point in X:
# print()
# print(data_point)
# calculate distance to each centroids
min_distance = float("inf")
cluster = 0
for cluster_idx, centroid_i in enumerate(self.centroids):
distance = self.distance(centroid_i, data_point)
# print("centroid, distance", centroid_i, distance)
if distance <= min_distance:
cluster = cluster_idx
min_distance = distance
# the nearest distance will place the point to corresponding cluster
current_cluster_members[cluster].append(data_point)
current_inertia = current_inertia + min_distance
if verbose:
print("cluster member")
for idx, ccm in enumerate(current_cluster_members):
print("cluster" + str(idx), ccm)
new_centroids = [[] for _ in range(self.k)]
for cluster_i in range(self.k):
# Adjust new centroids
new_centroid_i = np.zeros(self.n_features)
members_of_current_cluster = current_cluster_members[cluster_i]
if len(members_of_current_cluster) > 0:
for member in current_cluster_members[cluster_i]:
new_centroid_i = new_centroid_i + member
new_centroid_i = new_centroid_i / len(members_of_current_cluster) # Get average point from all members
else:
# If cluster has no member then pick random point
new_centroid_i = self.choose_random_point(X)
new_centroids[cluster_i] = new_centroid_i
if verbose:
print("new centroid", new_centroids)
# Stop Iteration if centroids do not change
total_diff = float(0.0)
for cluster_i in range(self.k):
total_diff = total_diff + self.distance(self.centroids[cluster_i], new_centroids[cluster_i])
self.centroids = new_centroids
self.cluster_members = current_cluster_members
self.inertia = current_inertia
if verbose:
print("total diffs:", total_diff)
print()
if total_diff <= tolerance:
break
iteration = iteration + 1
if verbose:
print(self.cluster_members)
for idx, cm in enumerate(self.cluster_members):
print("cluster"+ str(idx), cm)
print("Training time", (time() - start_time) * 100 , "ms")
print("Stopped at iteration", iteration)
self.n_iteration = iteration
return self.predict(X)
def predict(self, X):
result = []
for data_point in X:
# calculate distance to each centroids
min_distance = float("inf")
cluster = None
for cluster_idx, centroid_i in enumerate(self.centroids):
distance = self.distance(centroid_i, data_point)
if distance <= min_distance:
cluster = cluster_idx
min_distance = distance
result.append(cluster)
return result
|
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
class Solution:
def isValidBST(self, root: TreeNode) -> bool:
def helper(node, lower = float('-inf'), upper = float('inf')):
if node is None:
return True
val = node.val
if val <= lower or val >= upper:
return False
if not helper(node.right, val, upper):
return False
if not helper(node.left, lower, val):
return False
return True
return helper(root) |
import threading
from slackbot.bot import Bot
from app.rtm.emoji_fetcher import connect
import app
def main():
rtm_reaction_loop = threading.Thread(target=connect)
rtm_reaction_loop.start()
bot = Bot()
bot.run()
if __name__ == "__main__":
main()
|
# dict.py
cities = {"CA":'San Francisco', "MI":'Detroit', "FL":'Jacksonville'} # 定义dict
cities['NY'] = 'New York' # 添加元素
cities['OR'] = 'Portland'
def find_city(themap, state): # 定义函数
if state in themap:
return themap[state]
else:
return "Not found."
cities['_find'] = find_city # 把函数 find_city 放到叫做 cities 的字典中,并将其标记为 '_find'。
# '_find'是函数find_city的键
while True:
print("State? (ENTER to quit)")
state = input("> ")
if not state: break
city_found = cities['_find'](cities, state)
# == city_found = find_city(cities, state)
print(city_found)
|
from discord.ext import commands
from bolt.optional_cogs.base import OptionalCog
class Example(OptionalCog):
"""
An example file to demonstrate
the function of optional cog usage.
"""
RESTRICTED = False
@commands.command()
async def hello(self, ctx):
await ctx.send("Hello from an optional cog!")
|
#!/usr/bin/env python
#
# Templite+
# A light-weight, fully functional, general purpose templating engine
#
# Copyright (c) 2009 joonis new media
# Author: Thimo Kraemer <[email protected]>
#
# Based on Templite by Tomer Filiba
# http://code.activestate.com/recipes/496702/
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
import sys, os
import re
class Templite(object):
autowrite = re.compile('(^[\'\"])|(^[a-zA-Z0-9_\[\]\'\"]+$)')
delimiters = ('${', '}$')
cache = {}
def __init__(self, text=None, filename=None,
encoding='utf-8', delimiters=None, caching=False):
"""Loads a template from string or file."""
if filename:
filename = os.path.abspath(filename)
mtime = os.path.getmtime(filename)
self.file = key = filename
elif text is not None:
self.file = mtime = None
key = hash(text)
else:
raise ValueError('either text or filename required')
# set attributes
self.encoding = encoding
self.caching = caching
if delimiters:
start, end = delimiters
if len(start) != 2 or len(end) != 2:
raise ValueError('each delimiter must be two characters long')
self.delimiters = delimiters
# check cache
cache = self.cache
if caching and key in cache and cache[key][0] == mtime:
self._code = cache[key][1]
return
# read file
if filename:
with open(filename) as fh:
text = fh.read()
self._code = self._compile(text)
if caching:
cache[key] = (mtime, self._code)
def _compile(self, source):
offset = 0
tokens = ['# -*- coding: %s -*-' % self.encoding]
start, end = self.delimiters
escaped = (re.escape(start), re.escape(end))
regex = re.compile('%s(.*?)%s' % escaped, re.DOTALL)
for i, part in enumerate(regex.split(source)):
part = part.replace('\\'.join(start), start)
part = part.replace('\\'.join(end), end)
if i % 2 == 0:
if not part: continue
part = part.replace('\\', '\\\\').replace('"', '\\"')
part = '\t' * offset + 'write("""%s""")' % part
else:
part = part.rstrip()
if not part: continue
part_stripped = part.lstrip()
if part_stripped.startswith(':'):
if not offset:
raise SyntaxError('no block statement to terminate: ${%s}$' % part)
offset -= 1
part = part_stripped[1:]
if not part.endswith(':'): continue
elif self.autowrite.match(part_stripped):
part = 'write(%s)' % part_stripped
lines = part.splitlines()
margin = min(len(l) - len(l.lstrip()) for l in lines if l.strip())
part = '\n'.join('\t' * offset + l[margin:] for l in lines)
if part.endswith(':'):
offset += 1
tokens.append(part)
if offset:
raise SyntaxError('%i block statement(s) not terminated' % offset)
return compile('\n'.join(tokens), self.file or '<string>', 'exec')
def render(self, **namespace):
"""Renders the template according to the given namespace."""
stack = []
namespace['__file__'] = self.file
# add write method
def write(*args):
for value in args:
#if isinstance(value, str):
# value = value.encode(self.encoding)
stack.append(str(value))
namespace['write'] = write
# add include method
def include(file):
if not os.path.isabs(file):
if self.file:
base = os.path.dirname(self.file)
else:
base = os.path.dirname(sys.argv[0])
file = os.path.join(base, file)
t = Templite(None, file, self.encoding,
self.delimiters, self.caching)
stack.append(t.render(**namespace))
namespace['include'] = include
# execute template code
exec(self._code, namespace)
return ''.join(stack)
|
class Solution:
def mostCommonWord(self, paragraph: str, banned: List[str]) -> str:
banset = set(banned)
return collections.Counter([w for w in re.findall(r'\w+', paragraph.lower()) if w not in banset]).most_common(1)[0][0]
|
# The contents of this file are subject to the BitTorrent Open Source License
# Version 1.1 (the License). You may not copy or use this file, in either
# source code or executable form, except in compliance with the License. You
# may obtain a copy of the License at http://www.bittorrent.com/license/.
#
# Software distributed under the License is distributed on an AS IS basis,
# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
# for the specific language governing rights and limitations under the
# License.
# False and True are not distinct from 0 and 1 under Python 2.2,
# and we want to handle boolean options differently.
class MyBool(object):
def __init__(self, value):
self.value = value
def __repr__(self):
if self.value:
return 'True'
return 'False'
def __nonzero__(self):
return self.value
MYTRUE = MyBool(True)
MYFALSE = MyBool(False)
import os
### add your favorite here
BAD_LIBC_WORKAROUND_DEFAULT = MYFALSE
if os.name == 'posix':
if os.uname()[0] in ['Darwin']:
BAD_LIBC_WORKAROUND_DEFAULT = MYTRUE
MIN_INCOMPLETE = 100
if os.name == 'nt':
from BitTorrent.platform import win_version_num
# starting in XP SP2 the incomplete outgoing connection limit was set to 10
if win_version_num >= (2, 5, 1, 2, 0):
MIN_INCOMPLETE = 10
from BitTorrent import languages
basic_options = [
('data_dir', '',
_("directory under which variable data such as fastresume information "
"and GUI state is saved. Defaults to subdirectory 'data' of the "
"bittorrent config directory.")),
('filesystem_encoding', '',
_("character encoding used on the local filesystem. "
"If left empty, autodetected. "
"Autodetection doesn't work under python versions older than 2.3")),
('language', '',
_("ISO Language code to use") + ': ' + ', '.join(languages)),
]
common_options = [
('ip', '',
_("ip to report to the tracker (has no effect unless you are on the same "
"local network as the tracker)")),
('forwarded_port', 0,
_("world-visible port number if it's different from the one the client "
"listens on locally")),
('minport', 6881,
_("minimum port to listen on, counts up if unavailable")),
('maxport', 6999,
_("maximum port to listen on")),
('bind', '',
_("ip to bind to locally")),
('display_interval', .5,
_("seconds between updates of displayed information")),
('rerequest_interval', 5 * 60,
_("minutes to wait between requesting more peers")),
('min_peers', 20,
_("minimum number of peers to not do rerequesting")),
('max_initiate', 60,
_("number of peers at which to stop initiating new connections")),
('max_incomplete', MIN_INCOMPLETE,
_("max number of outgoing incomplete connections")),
('max_allow_in', 80,
_("maximum number of connections to allow, after this new incoming "
"connections will be immediately closed")),
('check_hashes', MYTRUE,
_("whether to check hashes on disk")),
('max_upload_rate', 20,
_("maximum kB/s to upload at, 0 means no limit")),
('min_uploads', 2,
_("the number of uploads to fill out to with extra optimistic unchokes")),
('max_files_open', 50,
_("the maximum number of files in a multifile torrent to keep open at a "
"time, 0 means no limit. Used to avoid running out of file descriptors.")),
('start_trackerless_client', MYTRUE,
_("Initialize a trackerless client. This must be enabled in order to download trackerless torrents.")),
('upnp', MYTRUE,
_("Enable automatic port mapping")+' (UPnP)'),
]
rare_options = [
('keepalive_interval', 120.0,
_("number of seconds to pause between sending keepalives")),
('download_slice_size', 2 ** 14,
_("how many bytes to query for per request.")),
('max_message_length', 2 ** 23,
_("maximum length prefix encoding you'll accept over the wire - larger "
"values get the connection dropped.")),
('socket_timeout', 300.0,
_("seconds to wait between closing sockets which nothing has been "
"received on")),
('timeout_check_interval', 60.0,
_("seconds to wait between checking if any connections have timed out")),
('max_slice_length', 16384,
_("maximum length slice to send to peers, close connection if a larger "
"request is received")),
('max_rate_period', 20.0,
_("maximum time interval over which to estimate the current upload and download rates")),
('max_rate_period_seedtime', 100.0,
_("maximum time interval over which to estimate the current seed rate")),
('max_announce_retry_interval', 1800,
_("maximum time to wait between retrying announces if they keep failing")),
('snub_time', 30.0,
_("seconds to wait for data to come in over a connection before assuming "
"it's semi-permanently choked")),
('rarest_first_cutoff', 4,
_("number of downloads at which to switch from random to rarest first")),
('upload_unit_size', 1380,
_("how many bytes to write into network buffers at once.")),
('retaliate_to_garbled_data', MYTRUE,
_("refuse further connections from addresses with broken or intentionally "
"hostile peers that send incorrect data")),
('one_connection_per_ip', MYTRUE,
_("do not connect to several peers that have the same IP address")),
('peer_socket_tos', 8,
_("if nonzero, set the TOS option for peer connections to this value")),
('bad_libc_workaround', BAD_LIBC_WORKAROUND_DEFAULT,
_("enable workaround for a bug in BSD libc that makes file reads very slow.")),
('tracker_proxy', '',
_("address of HTTP proxy to use for tracker connections")),
('close_with_rst', 0,
_("close connections with RST and avoid the TCP TIME_WAIT state")),
('twisted', -1,
_("Use Twisted network libraries for network connections. 1 means use twisted, 0 means do not use twisted, -1 means autodetect, and prefer twisted")),
]
def get_defaults(ui):
assert ui in ("bittorrent" , "bittorrent-curses", "bittorrent-console" ,
"maketorrent", "maketorrent-console",
"launchmany-curses", "launchmany-console" ,
)
r = []
if ui.startswith('bittorrent') or ui.startswith('launchmany'):
r.extend(common_options)
if ui == 'bittorrent':
r.extend([
('save_as', '',
_("file name (for single-file torrents) or directory name (for "
"batch torrents) to save the torrent as, overriding the default "
"name in the torrent. See also --save_in, if neither is "
"specified the user will be asked for save location")),
('advanced', MYFALSE,
_("display advanced user interface")),
('next_torrent_time', 300,
_("the maximum number of minutes to seed a completed torrent "
"before stopping seeding")),
('next_torrent_ratio', 80,
_("the minimum upload/download ratio, in percent, to achieve "
"before stopping seeding. 0 means no limit.")),
('last_torrent_ratio', 0,
_("the minimum upload/download ratio, in percent, to achieve "
"before stopping seeding the last torrent. 0 means no limit.")),
('seed_forever', MYFALSE,
_("Seed each completed torrent indefinitely "
"(until the user cancels it)")),
('seed_last_forever', MYTRUE,
_("Seed the last torrent indefinitely "
"(until the user cancels it)")),
('pause', MYFALSE,
_("start downloader in paused state")),
('start_torrent_behavior', 'replace',
_('specifies how the app should behave when the user manually '
'tries to start another torrent: "replace" means always replace '
'the running torrent with the new one, "add" means always add '
'the running torrent in parallel, and "ask" means ask the user '
'each time.')),
('open_from', '',
'local directory to look in for .torrent files to open'),
('ask_for_save', MYFALSE,
'whether or not to ask for a location to save downloaded files in'),
('start_minimized', MYFALSE,
_("Start BitTorrent minimized")),
('new_version', '',
_("override the version provided by the http version check "
"and enable version check debugging mode")),
('current_version', '',
_("override the current version used in the version check "
"and enable version check debugging mode")),
('geometry', '',
_("specify window size and position, in the format: "
"WIDTHxHEIGHT+XOFFSET+YOFFSET")),
])
if os.name == 'nt':
r.extend([
('launch_on_startup', MYTRUE,
_("Launch BitTorrent when Windows starts")),
('minimize_to_tray', MYTRUE,
_("Minimize to system tray")),
])
if ui in ('bittorrent-console', 'bittorrent-curses'):
r.append(
('save_as', '',
_("file name (for single-file torrents) or directory name (for "
"batch torrents) to save the torrent as, overriding the "
"default name in the torrent. See also --save_in")))
if ui.startswith('bittorrent'):
r.extend([
('max_uploads', -1,
_("the maximum number of uploads to allow at once. -1 means a "
"(hopefully) reasonable number based on --max_upload_rate. "
"The automatic values are only sensible when running one "
"torrent at a time.")),
('save_in', '',
_("local directory where the torrent contents will be saved. The "
"file (single-file torrents) or directory (batch torrents) will "
"be created under this directory using the default name "
"specified in the .torrent file. See also --save_as.")),
('responsefile', '',
_("deprecated, do not use")),
('url', '',
_("deprecated, do not use")),
('ask_for_save', 0,
_("whether or not to ask for a location to save downloaded files in")),
])
if ui.startswith('launchmany'):
r.extend([
('max_uploads', 6,
_("the maximum number of uploads to allow at once. -1 means a "
"(hopefully) reasonable number based on --max_upload_rate. The "
"automatic values are only sensible when running one torrent at "
"a time.")),
('save_in', '',
_("local directory where the torrents will be saved, using a "
"name determined by --saveas_style. If this is left empty "
"each torrent will be saved under the directory of the "
"corresponding .torrent file")),
('parse_dir_interval', 60,
_("how often to rescan the torrent directory, in seconds") ),
('launch_delay', 0,
_("wait this many seconds after noticing a torrent before starting it, to avoid race with tracker")),
('saveas_style', 4,
_("How to name torrent downloads: "
"1: use name OF torrent file (minus .torrent); "
"2: use name encoded IN torrent file; "
"3: create a directory with name OF torrent file "
"(minus .torrent) and save in that directory using name "
"encoded IN torrent file; "
"4: if name OF torrent file (minus .torrent) and name "
"encoded IN torrent file are identical, use that "
"name (style 1/2), otherwise create an intermediate "
"directory as in style 3; "
"CAUTION: options 1 and 2 have the ability to "
"overwrite files without warning and may present "
"security issues."
) ),
('display_path', ui == 'launchmany-console' and MYTRUE or MYFALSE,
_("whether to display the full path or the torrent contents for "
"each torrent") ),
])
if ui.startswith('launchmany') or ui == 'maketorrent':
r.append(
('torrent_dir', '',
_("directory to look for .torrent files (semi-recursive)")),)
if ui in ('bittorrent-curses', 'bittorrent-console'):
r.append(
('spew', MYFALSE,
_("whether to display diagnostic info to stdout")))
if ui.startswith('maketorrent'):
r.extend([
('piece_size_pow2', 18,
_("which power of two to set the piece size to")),
('tracker_name', 'http://my.tracker:6969/announce',
_("default tracker name")),
('tracker_list', '', ''),
('use_tracker', MYTRUE,
_("if false then make a trackerless torrent, instead of "
"announce URL, use reliable node in form of <ip>:<port> or an "
"empty string to pull some nodes from your routing table")),
])
r.extend(basic_options)
if ui.startswith('bittorrent') or ui.startswith('launchmany'):
r.extend(rare_options)
return r
|
from openstuder import SIAsyncGatewayClient, SIProtocolError, SIStatus
def on_error(error: SIProtocolError):
print(f'Unable to connect: {error.reason()}')
def on_connected(access_level: str, gateway_version: str):
client.find_properties('*.*.3136')
def on_properties_found(status: SIStatus, id_: str, count: int, properties: list):
print(f'Found properties for {id_}, status = {status}, count = {count} : {properties}')
client.disconnect()
client = SIAsyncGatewayClient()
client.on_error = on_error
client.on_connected = on_connected
client.on_properties_found = on_properties_found
client.connect('localhost', background=False) |
import argparse
import sdkhttp
DEFAULT_HOST = "https://salkku.co"
options = {
'verbose': False
}
def parse_command_line():
# type: () -> Namespace
parser = argparse.ArgumentParser(description='CLI commands for paaomat.fi.')
parser.add_argument('command', help='Main command name')
parser.add_argument('--data', help='Post data file')
parser.add_argument('--host', help='Salkku host address', default=DEFAULT_HOST)
parser.add_argument('--user', help='HTTP auth user')
parser.add_argument('--password', help='HTTP auth password')
parser.add_argument('--id', help='Optional resource id', type=int)
parser.add_argument('--email', help='Optional user email')
parser.add_argument('--date', help='Begin date')
parser.add_argument('--name', help='Optional resource name')
parser.add_argument('--format', help='API response format')
parser.add_argument('-v', '--verbose', action='store_true',
dest="verbose", help='Verbose output')
parser.add_argument('-d', '--dev', action='store_true',
dest="debug", help='Development mode')
args = parser.parse_args()
return args
def main():
args = parse_command_line()
if args.verbose:
options['verbose'] = True
if args.debug:
options['verify'] = False
else:
options['verify'] = True
api_url = '{}/api/v1'.format(args.host)
client = sdkhttp.HTTPClient(api_url, args.user, args.password, options)
if args.command == "currency":
stdout(client.get_currencies())
elif args.command == "exchange":
stdout(client.get_exchanges())
elif args.command == "exchange-security":
if args.id is not None:
stdout(client.get_exchange_securities(args.id))
else:
raise Exception("Need exchange id")
elif args.command == "transaction-type":
stdout(client.get_transaction_types())
elif args.command == "security":
if args.name is not None:
stdout(client.search_security(args.name))
elif args.id is not None:
stdout(client.get_security(args.id))
elif args.command == "portfolio":
if args.data is not None:
stdout(client.post_portfolio(args.data))
elif args.id is not None:
stdout(client.get_portfolio(args.id))
else:
stdout(client.get_portfolios())
elif args.command == "portfolio-history":
if args.id is not None:
stdout(client.get_portfolio_history(args.id))
else:
raise Exception("Need portfolio id")
elif args.command == "portfolio-performance":
if args.id is not None:
stdout(client.get_portfolio_performance(args.id))
else:
raise Exception("Need portfolio id")
elif args.command == "ping":
stdout(client.ping())
elif args.command == "portfolio-transaction":
if args.id is not None:
if args.data is not None:
client.post_portfolio_transactions(args.id, args.data)
else:
stdout(client.get_portfolio_transactions(args.id, args.format))
else:
raise Exception("Need portfolio id")
elif args.command == "portfolio-dividend":
if args.id is not None:
if args.data is not None:
client.post_portfolio_dividends(args.id, args.data)
else:
stdout(client.get_portfolio_dividends(args.id, args.format))
else:
raise Exception("Need portfolio id")
def stdout(response):
print(response.text)
if __name__ == "__main__":
main()
|
import hashlib
import random
from pathlib import Path
def current_data_hash():
paths = [str(p) for p in Path("data").iterdir()]
path_strs = " ".join(paths)
hex = hashlib.md5(path_strs.encode()).hexdigest()
return hex
def random_shuffle(str_list: list):
data = " ".join(str_list)
digit = from_string(data, len(data))
random.seed(digit)
random.shuffle(str_list)
return str_list
def from_string(data: str, scale: int) -> int:
hash_object = hashlib.md5(data.encode()).hexdigest()
digit = int(hash_object, 16) % scale
return digit
class HashGenerator:
def __init__(self, data: str, scale: int):
self.num = 0
self.scale = scale
self.data = data
def __iter__(self):
return self
def __next__(self) -> int:
data = self.data + str(self.num)
result = from_string(data, self.scale)
self.num += 1
return result
def sample(self, size: int, unique=True) -> list:
if unique: assert size <= self.scale, "Cannot generate unique sample with given sample size"
arr = []
while len(arr) < size:
sample = next(self)
if unique:
if sample not in arr:
arr.append(sample)
else:
arr.append(sample)
return arr
|
#!/usr/bin/env python
from __future__ import print_function
import os
import subprocess
import sys
import numpy
import contextlib
from distutils.command.build_ext import build_ext
from distutils.sysconfig import get_python_inc
from distutils import ccompiler, msvccompiler
from setuptools import Extension, setup, find_packages
PACKAGE_DATA = {'': ['*.pyx', '*.pxd']}
PACKAGES = find_packages()
MOD_NAMES = ['neuralcoref.neuralcoref']
@contextlib.contextmanager
def chdir(new_dir):
old_dir = os.getcwd()
try:
os.chdir(new_dir)
sys.path.insert(0, new_dir)
yield
finally:
del sys.path[0]
os.chdir(old_dir)
def generate_cython(root, source):
print('Cythonizing sources')
p = subprocess.call([sys.executable,
os.path.join(root, 'bin', 'cythonize.py'),
source], env=os.environ)
if p != 0:
raise RuntimeError('Running cythonize failed')
def is_source_release(path):
return os.path.exists(os.path.join(path, 'PKG-INFO'))
def setup_package():
root = os.path.abspath(os.path.dirname(__file__))
with chdir(root):
if not is_source_release(root):
generate_cython(root, 'neuralcoref')
include_dirs = [
get_python_inc(plat_specific=True),
os.path.join(root, 'neuralcoref', 'cli', 'include')]
if (ccompiler.new_compiler().compiler_type == 'msvc'
and msvccompiler.get_build_version() == 9):
include_dirs.append(os.path.join(root, 'neuralcoref', 'cli', 'include', 'msvc9'))
ext_modules = []
for mod_name in MOD_NAMES:
mod_path = mod_name.replace('.', '/') + '.cpp'
extra_link_args = []
# ???
# Imported from patch from @mikepb
# See Issue #267. Running blind here...
if sys.platform == 'darwin':
dylib_path = ['..' for _ in range(mod_name.count('.'))]
dylib_path = '/'.join(dylib_path)
dylib_path = '@loader_path/%s/neuralcoref/platform/darwin/lib' % dylib_path
extra_link_args.append('-Wl,-rpath,%s' % dylib_path)
ext_modules.append(
Extension(mod_name, [mod_path],
language='c++', include_dirs=include_dirs,
extra_link_args=extra_link_args))
setup(name='neuralcoref',
version='3.0',
description="State-of-the-art coreference resolution using neural nets",
url='https://github.com/huggingface/neuralcoref',
download_url='https://github.com/huggingface/neuralcoref/archive/3.0.tar.gz',
author='Thomas Wolf',
author_email='[email protected]',
ext_modules=ext_modules,
include_dirs=[numpy.get_include()],
classifiers=[
'Development Status :: 3 - Alpha',
'Environment :: Console',
'Intended Audience :: Developers',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5'
],
install_requires=[
'spacy',
'falcon'],
packages=PACKAGES,
package_data=PACKAGE_DATA,
keywords='NLP chatbots coreference resolution',
license='MIT',
zip_safe=False,
platforms='any')
if __name__ == '__main__':
setup_package()
|
"""
Class that facilitates FITS header standardization to keys required by models.
"""
from abc import ABC, abstractmethod
import warnings
import logging
import numpy as np
from astropy.io.fits import PrimaryHDU, CompImageHDU
from astropy.io import fits
from astropy.wcs import WCS
import astropy.units as u
from astroquery.astrometry_net import AstrometryNet
from trail.settings import ASTROMETRY_KEY, ASTROMETRY_TIMEOUT
import upload.models as models
__all__ = ["HeaderStandardizer", ]
logger = logging.getLogger(__name__)
ASTRONET_CLIENT = AstrometryNet()
if ASTROMETRY_KEY:
ASTRONET_CLIENT.api_key = ASTROMETRY_KEY
class StandardizeWcsException(Exception):
"""Exception raised when error in WCS processing
Attributes:
message -- explanation of the error
"""
def __init__(self, message="The WCS is not valid"):
self.message = message
super().__init__(self.message)
class HeaderStandardizer(ABC):
"""Supports standardization of various headers.
Standardization consists of:
* standardizing WCS data by projecting WCS onto a unit sphere
* standardizing PrimaryHDU data to find time and location of observer,
and select optional values such as filter, science program etc.
Parameters
----------
header : `object`
The header, Astropy HDU and its derivatives.
**kwargs : `dict`
Additional keyword arguments
Keyword arguments
-----------------
filename : `str`
Name of the file from which the HDU was read from, sometimes can encode
additional metadata.
Notes
-----
Standardizers are intended to operate on primary headers, but not all
primary headers contain all of the required metadata. Standardizers must be
instantiated with the header containing at least the time and location of
the observer and can be dynamically given a different HDU from which to
standardize WCS.
"""
standardizers = dict()
"""All registered header standardizers."""
name = None
"""Standardizer's name. Only named standardizers will be registered."""
priority = 0
"""Priority. Standardizers with high priority are prefered over
standardizers with low priority when processing header metadata.
"""
def __init__(self, header, **kwargs):
self.header = header
self._kwargs = kwargs
def __init_subclass__(cls, **kwargs):
name = getattr(cls, "name", False)
if name and name is not None:
super().__init_subclass__(**kwargs)
HeaderStandardizer.standardizers[cls.name] = cls
@staticmethod
def _computeStandardizedWcs(header, dimX, dimY):
"""Given an Header containing WCS data and the dimensions of an image
calculates the values of world coordinates at image corner and image
center and projects them to a unit sphere in Cartesian coordinate
system.
Parameters
----------
header : `object`
The header, Astropy HDU and its derivatives.
dimX : `int`
Image dimension in x-axis.
dimY : `int`
Image dimension in y-axis.
Returns
-------
standardizedWcs : `dict`
Calculated coorinate values, a dict with wcs_radius,
wcs_center_[x, y, z] and wcs_corner_[x, y, z]
Notes
-----
The center point is assumed to be at the (dimX/2, dimY/2) pixel
location. Corner is taken to be the (0,0)-th pixel.
"""
standardizedWcs = {}
centerX, centerY = int(dimX/2), int(dimY/2)
# TODO: test if a header doesn't actually have a valid WCS
# what is the error raised?
with warnings.catch_warnings(record=True) as warns:
wcs = WCS(header)
if warns:
for w in warns:
logger.warning(w.message)
centerSkyCoord = wcs.pixel_to_world(centerX, centerY)
centerRa = centerSkyCoord.ra.to(u.rad)
centerDec = centerSkyCoord.dec.to(u.rad)
cornerSkyCoord = wcs.pixel_to_world(0, 0)
cornerRa = cornerSkyCoord.ra.to(u.rad)
cornerDec = cornerSkyCoord.dec.to(u.rad)
unitSphereCenter = np.array([
np.cos(centerDec) * np.cos(centerRa),
np.cos(centerDec) * np.sin(centerRa),
np.sin(centerDec)
])
unitSphereCorner = np.array([
np.cos(cornerDec) * np.cos(cornerRa),
np.cos(cornerDec) * np.sin(cornerRa),
np.sin(cornerDec)
])
unitRadius = np.linalg.norm(unitSphereCenter - unitSphereCorner)
standardizedWcs["wcs_radius"] = unitRadius
standardizedWcs["wcs_center_x"] = unitSphereCenter[0]
standardizedWcs["wcs_center_y"] = unitSphereCenter[1]
standardizedWcs["wcs_center_z"] = unitSphereCenter[2]
standardizedWcs["wcs_corner_x"] = unitSphereCorner[0]
standardizedWcs["wcs_corner_y"] = unitSphereCorner[1]
standardizedWcs["wcs_corner_z"] = unitSphereCorner[2]
return standardizedWcs
# wow, do not flip these two decorators around...
@classmethod
@abstractmethod
def canStandardize(self, header, **kwargs):
"""Returns `True` when the standardizer knows how to handle given
upload.
Parameters
----------
header : `object`
The header, Astropy HDU and its derivatives.
**kwargs : `dict`
Additional keyword arguments
Keyword arguments
-----------------
filename : `str`
Name of the file from which the HDU was read from, sometimes can encode
additional metadata.
Returns
-------
canProcess : `bool`
`True` when the processor knows how to handle uploaded file and
`False` otherwise
Notes
-----
Implementation is standardizer-specific.
"""
raise NotImplementedError()
@classmethod
def getStandardizer(cls, header, **kwargs):
"""Get the standardizer class that can handle given header.
Parameters
----------
header : `object`
The header, Astropy HDU and its derivatives.
**kwargs : `dict`
Additional keyword arguments
Keyword arguments
-----------------
filename : `str`
Name of the file from which the HDU was read from, sometimes can encode
additional metadata.
Returns
-------
standardizerCls : `cls`
Standardizer class that can process the given upload.`
"""
standardizers = []
for standardizer in cls.standardizers.values():
if standardizer.canStandardize(header):
standardizers.append(standardizer)
def get_priority(standardizer):
"""Return standardizers priority."""
return standardizer.priority
standardizers.sort(key=get_priority, reverse=True)
if standardizers:
if len(standardizers) > 1:
# I think this should never be an issue really, but just in case
names = [proc.name for proc in standardizers]
logger.info("Multiple standardizers declared ability to process "
f"the given upload: {names}. Using {names[-1]} "
"to process FITS.")
return standardizers[0]
else:
raise ValueError("None of the known standardizers can handle this upload.\n "
f"Known standardizers: {list(cls.standardizers.keys())}")
@classmethod
def fromHeader(cls, header, **kwargs):
"""Get the standardizer instance from a given header.
Parameters
----------
header : `object`
The header, Astropy HDU and its derivatives.
**kwargs : `dict`
Additional keyword arguments
Keyword arguments
-----------------
filename : `str`
Name of the file from which the HDU was read from, sometimes can encode
additional metadata.
Returns
-------
standardizerCls : `cls`
Standardizer class that can process the given upload.`
Raises
------
ValueError
None of the registered processors can process the upload.
"""
# TODO: get some error handling here
standardizerCls = cls.getStandardizer(header, **kwargs)
return standardizerCls(header, **kwargs)
@abstractmethod
def standardizeMetadata(self):
"""Normalizes FITS header information of the primary header unit and
returns a dictionary with standardized, as understood by trailblazer,
keys.
Returns
-------
standardizedHeaderMetadata : `upload.model.Metadata`
Metadata object containing standardized values.
Notes
-----
Implementation is instrument-specific.
"""
raise NotImplementedError()
def standardizeWcs(self, hdu=None):
"""Standardize WCS data a given header.
Parameters
----------
hdu : `object` or `None`, optional
An Astropy image-like HDU unit. Useful when dealing with
mutli-extension fits files where metadata is in the PrimaryHDU but
the WCS and image data are stored in the extensions.
Returns
-------
standardizedWCS : `upload.models.Wcs`
Standardized WCS keys and values.
Raises
------
StandardizeWcsException
There was a problem in standardizing the header
Notes
-----
Tries to read the WCS from the header.
if that does not work then gives it to astrometry.net to look for a solution for the WCS
"""
try:
try:
header, dimX, dimY = self._astropyWcsReader(hdu)
except (ValueError, TypeError):
header, dimX, dimY = self._astrometryNetSolver(self.filepath)
except (ValueError, RuntimeError, TypeError) as err:
raise StandardizeWcsException("Failed to standardize WCS") from err
return models.Wcs(**self._computeStandardizedWcs(header, dimX, dimY))
def _astropyWcsReader(self, hdu=None):
"""Standardize WCS data a given header.
Parameters
----------
hdu : `object` or `None`, optional
An Astropy image-like HDU unit. Useful when dealing with
mutli-extension fits files where metadata is in the PrimaryHDU but
the WCS and image data are stored in the extensions.
Returns
-------
standardizedWCS : `upload.models.Wcs`
Standardized WCS keys and values.
Raises
------
ValueError
Header contains no image dimension keys (NAXIS1, NAXIS2) but an
additional HDU was not provided.
ValueError
An additional image-like header was provided but contains no image
data.
TypeError
Provided additional HDU is not image-like HDU.
Notes
-----
Standardized values are the are the Cartesian components of the central
and corner pixels on the image, projected onto a unit sphere, and the
distance between them.
The center pixel coordinates is determined from header `NAXIS` keys,
when possible, and is otherwise determined from the dimensions of the
image in the given HDU.
The (0, 0) pixel is taken as the corner pixel.
"""
dimX = self.header.get("NAXIS1", False)
dimY = self.header.get("NAXIS2", False)
if not dimX or not dimY:
if hdu is None:
raise ValueError("Header contains no image dimension keys "
"(NAXIS1, NAXIS2) but an additional HDU was "
"not provided")
if not (isinstance(hdu, PrimaryHDU) or isinstance(hdu, CompImageHDU)):
raise TypeError(f"Expected image-like HDU, got {type(hdu)} instead.")
if hdu.data is None:
raise ValueError("Given image-type HDU contains no image to take"
"image dimensions from.")
dimX, dimY = hdu.data.shape
header = hdu.header
else:
header = self.header
return header, dimX, dimY
@staticmethod
def _astrometryNetSolver(path_to_file):
"""Given a fits file it will process and send to astrometry.net
where it will obtain the WCS data for the file if it is able to.
Otherwise will raise relevant errors.
Parameters
----------
path_to_file : `string`
path to the fits file
Returns
-------
header : `dict`
returns the header with the WCS data in it if it was found.
If an error occurred or no solution is found then returns an empty dictionary.
dimX : `int`
width of the image
dimY : `int`
height of the image
Raises
------
ValueError
Found no solution from astrometry.net
RuntimeError
There is no astrometry.net key
TimeoutError
If it does not find a solution with the given time. Does not mean
that astrometry.net cannot solve it.
TypeError
Some error in fits file preventing astrometry.net from working
FileNotFoundError
File not found from the path to file
Notes
-----
Send the file to astrometry.net to find WCS from the location of the stars in the image
"""
dimX, dimY = fits.open(path_to_file)[0].data.shape
if ASTROMETRY_KEY:
header = ASTRONET_CLIENT.solve_from_image(path_to_file, False, solve_timeout=ASTROMETRY_TIMEOUT)
if header == {}:
raise ValueError("Could not find WCS from fits file")
else:
logger.info("Astrometry.net api key not found")
raise RuntimeError("There is no astrometry.net key")
return header, dimX, dimY
|
# -*- coding: utf-8 -*
from expects import *
from expects.aliases import *
from expects.testing import failure
with describe('have_len'):
with it('passes if string has the expected length'):
expect('foo').to(have_len(3))
with it('passes if string has length matching'):
expect('foo').to(have_len(above_or_equal(3)))
with it('passes if iterable has the expected length'):
expect(iter('foo')).to(have_len(3))
with it('fails if string does not have the expected length'):
with failure("but: was 3"):
expect('foo').to(have_len(2))
with it('fails if string does not have length matching'):
with failure("but: was 3"):
expect('foo').to(have_len(below(3)))
with it('fails if iterable does not have the expected length'):
with failure("but: was 3"):
expect(iter('foo')).to(have_len(2))
with context('when negated'):
with it('passes if string does not have the expected length'):
expect('foo').not_to(have_len(2))
with it('fails if string has the expected length'):
with failure("but: was 3"):
expect('foo').not_to(have_len(3))
with describe('have_length'):
with it('passes if string has the expected length'):
expect('foo').to(have_length(3))
with it('fails if string does not have the expected length'):
with failure("but: was 3"):
expect('foo').to(have_length(2))
|
__version__ = "0.1.0"
from .main import Optic, OpticConfig
|
#!/usr/bin/python3
from collections import OrderedDict
from uuid import uuid1
import dash
import json
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Event, State, Input, Output
from pprint import pprint
from app import app
from app import app_controller
import filestore
from utils import *
from dash.exceptions import PreventUpdate
from magic_defines import *
import json
from localstorage_writer import LocalStorageWriter
from localstorage_reader import LocalStorageReader
from autolink import Redirect
import dash_table_experiments as dt
import coloredlogs, logging
logger = logging.getLogger(__name__)
coloredlogs.install(level='DEBUG', logger=logger)
from dateutil.relativedelta import *
import gettext
zh = gettext.translation("club_order", locale_d(), languages=["zh_CN"])
zh.install(True)
_ = zh.gettext
def gen_id(name):
# user module as name prefix
s_id = g_id(__name__, name)
return s_id
def filter_and_map_dict(d):
#change column name
map_d = {
"id": "ID",
"paid": _("paid"),
"total_price": _("total_price"),
"time_order": _("time_order")
}
new_d = {}
for k, v in d.items():
if k in map_d.keys():
new_key = map_d[k]
new_d[new_key] = v
return new_d
def generate_order_data(orders):
orders_data = [{"id":item.id,
"paid": item.paid,
"total_price": item.total_price(),
"time_order":item.time} for item in orders]
return [filter_and_map_dict(item) for item in orders_data]
def generate_layout(orders):
orders_data = generate_order_data(orders)
return html.Div([
html.H4(_("All orders")),
dcc.Dropdown(
id=gen_id('all-day-month-year'),
options=[
{'label': _("All"), 'value': 'all'},
{'label': _("Today"), 'value': 'day'},
{'label': _("This Month"), 'value': 'month'},
{'label': _("This year"), 'value': 'year'}
],
value='all'
),
dt.DataTable(
rows= orders_data if orders_data else [{"No order": "No order"}],
# optional - sets the order of columns
#columns=sorted(DF_GAPMINDER.columns),
resizable=True,
row_selectable=True,
filterable=True,
sortable=True,
editable=False,
selected_row_indices=[],
id=gen_id(TABLE)
),
html.Hr(),
html.Div(_("click one order to view detail")),
html.Div(id=gen_id("order-cards")),
html.Hr()
])
def layout():
club = app_controller.get_club_by_name(CLUB_NAME)
if not club or not club.orders:
return html.H1(_("No orders!"))
return generate_layout(club.orders)
def generate_order_detail(order_detail):
return html.Div(className="container-fluid", children = [
html.Div(className="d-flex",children=[
html.Div(className="p-2",children =[
html.Img(src=order_detail.get_img_link(MAJOR_IMG),
width="100px",
height="100px",
alt=order_detail.name,
className="img-responsive")
]),
html.Div(className="p-2",children=[
html.H5(order_detail.name,className="nomargin"),
html.P(order_detail.description)
])
]),
html.Div(className="d-flex justify-content-between",children=[
html.Div(className="p-2", children=[_("price: {}*{}={}").format(order_detail.price,
order_detail.discount_percent_str(),
order_detail.final_price())
]),
html.Div(className="p-2", children=[
html.Span(_("Qty: {}").format(order_detail.quantity))
]),
html.Div(className="p-2",children=[
html.Div(children=[_("Subtotal:"),order_detail.calc_price()]),
])
])
])
def generate_order(order):
detail = []
header = html.Div(className="d-flex justify-content-between",children=[
html.Div(className="p-2", children=[
html.Span("order id: {}".format(order.id))
]),
html.Div(className="p-2", children=[
html.Span("Total: {}".format(order.total_price()))
]),
html.Div(className="p-2", children=[
html.Span("Paid: {}".format(order.paid))
]),
html.Div(className="p-2",children=[
html.Div(children=[_("time:"),order.time]),
])
])
detail.append(header)
detail.append(html.Hr())
for item in order.details:
detail.append(generate_order_detail(item))
detail.append(html.Hr())
user = order.user
footer = html.Div(className="d-flex justify-content-between",children=[
html.Div(className="p-2", children=[
html.Span(_("user id: {}").format(user.id))
]),
html.Div(className="p-2", children=[
html.Span(_("Email: {}").format(user.email))
]),
])
detail.append(footer)
return html.Div(children=detail)
def generate_order_card(order_id):
order = app_controller.get_club_order_by_id(order_id)
return html.Div(className='container-fluid border border-info', children=[
generate_order(order)
])
@app.callback(
Output(gen_id("order-cards"), 'children'),
[Input(gen_id(TABLE), 'rows'),
Input(gen_id(TABLE), 'selected_row_indices')])
def update_order_cards(rows, selected_row_indices):
all_cards = []
for i in selected_row_indices:
logger.debug(rows[i]["ID"])
all_cards.append(generate_order_card(rows[i]["ID"]))
all_cards.append(html.Br())
return all_cards
@app.callback(
Output(gen_id(TABLE), 'rows'),
[Input(gen_id('all-day-month-year'), 'value')]
)
def update_order_table(value):
logger.debug(value)
all_order = app_controller.get_club_order_list(CLUB_NAME)
ret_order = None
first_day = None
last_day = None
if value.lower() == "all":
ret_order = all_order
elif value.lower() == "day":
first_day = datetime.datetime.utcnow().replace(day=1, hour=0, minute=0, second=0)
last_day = first_day + relativedelta(days=1)
elif value.lower() == 'month':
first_day = datetime.datetime.utcnow().replace(day=1, hour=0, minute=0, second=0)
last_day = first_day + relativedelta(months=1)
elif value.lower() == 'year':
first_day = datetime.datetime.utcnow().replace(day=1, hour=0, minute=0, second=0)
last_day = first_day + relativedelta(years=1)
ret_order = [item for item in all_order if item.between_time(first_day, last_day)]
return generate_order_data(ret_order)
|
# -*- coding: utf-8 -*-
import random
import re
import struct
import six
from py_zipkin.util import generate_random_64bit_string
from py_zipkin.zipkin import ZipkinAttrs
from pyramid.interfaces import IRoutesMapper
DEFAULT_REQUEST_TRACING_PERCENT = 0.5
def get_trace_id(request):
"""Gets the trace id based on a request. If not present with the request,
create a custom (depending on config: `zipkin.trace_id_generator`) or a
completely random trace id.
:param: current active pyramid request
:returns: a 64-bit hex string
"""
if 'X-B3-TraceId' in request.headers:
trace_id = _convert_signed_hex(request.headers['X-B3-TraceId'])
# Tolerates 128 bit X-B3-TraceId by reading the right-most 16 hex
# characters (as opposed to overflowing a U64 and starting a new trace).
trace_id = trace_id[-16:]
elif 'zipkin.trace_id_generator' in request.registry.settings:
trace_id = _convert_signed_hex(request.registry.settings[
'zipkin.trace_id_generator'](request))
else:
trace_id = generate_random_64bit_string()
return trace_id
def _convert_signed_hex(s):
"""Takes a signed hex string that begins with '0x' and converts it to
a 16-character string representing an unsigned hex value.
Examples:
'0xd68adf75f4cfd13' => 'd68adf75f4cfd13'
'-0x3ab5151d76fb85e1' => 'c54aeae289047a1f'
"""
if s.startswith('0x') or s.startswith('-0x'):
s = '{0:x}'.format(struct.unpack('Q', struct.pack('q', int(s, 16)))[0])
return s.zfill(16)
def should_not_sample_path(request):
"""Decided whether current request path should be sampled or not. This is
checked previous to `should_not_sample_route` and takes precedence.
:param: current active pyramid request
:returns: boolean whether current request path is blacklisted.
"""
blacklisted_paths = request.registry.settings.get(
'zipkin.blacklisted_paths', [])
# Only compile strings, since even recompiling existing
# compiled regexes takes time.
regexes = [
re.compile(r) if isinstance(r, six.string_types) else r
for r in blacklisted_paths
]
return any(r.match(request.path) for r in regexes)
def should_not_sample_route(request):
"""Decided whether current request route should be sampled or not.
:param: current active pyramid request
:returns: boolean whether current request route is blacklisted.
"""
blacklisted_routes = request.registry.settings.get(
'zipkin.blacklisted_routes', [])
if not blacklisted_routes:
return False
route_mapper = request.registry.queryUtility(IRoutesMapper)
route_info = route_mapper(request).get('route')
return (route_info and route_info.name in blacklisted_routes)
def should_sample_as_per_zipkin_tracing_percent(tracing_percent):
"""Calculate whether the request should be traced as per tracing percent.
:param tracing_percent: value between 0.0 to 100.0
:type tracing_percent: float
:returns: boolean whether current request should be sampled.
"""
return (random.random() * 100) < tracing_percent
def is_tracing(request):
"""Determine if zipkin should be tracing
1) Check whether the current request path is blacklisted.
2) If not, check whether the current request route is blacklisted.
3) If not, check if specific sampled header is present in the request.
4) If not, Use a tracing percent (default: 0.5%) to decide.
:param request: pyramid request object
:returns: boolean True if zipkin should be tracing
"""
if should_not_sample_path(request):
return False
elif should_not_sample_route(request):
return False
elif 'X-B3-Sampled' in request.headers:
return request.headers.get('X-B3-Sampled') == '1'
else:
zipkin_tracing_percent = request.registry.settings.get(
'zipkin.tracing_percent', DEFAULT_REQUEST_TRACING_PERCENT)
return should_sample_as_per_zipkin_tracing_percent(
zipkin_tracing_percent)
def create_zipkin_attr(request):
"""Create ZipkinAttrs object from a request with sampled flag as True.
Attaches lazy attribute `zipkin_trace_id` with request which is then used
throughout the tween.
Consumes custom is_tracing function to determine if the request is traced
if one is set in the pyramid registry.
:param request: pyramid request object
:rtype: :class:`pyramid_zipkin.request_helper.ZipkinAttrs`
"""
settings = request.registry.settings
if 'zipkin.is_tracing' in settings:
is_sampled = settings['zipkin.is_tracing'](request)
else:
is_sampled = is_tracing(request)
span_id = request.headers.get(
'X-B3-SpanId', generate_random_64bit_string())
parent_span_id = request.headers.get('X-B3-ParentSpanId', None)
flags = request.headers.get('X-B3-Flags', '0')
# Store zipkin_trace_id and zipkin_span_id in the request object so that
# they're still available once we leave the pyramid_zipkin tween. An example
# is being able to log them in the pyramid exc_logger, which runs after all
# tweens have been exited.
request.zipkin_trace_id = get_trace_id(request)
request.zipkin_span_id = span_id
return ZipkinAttrs(
trace_id=request.zipkin_trace_id,
span_id=span_id,
parent_span_id=parent_span_id,
flags=flags,
is_sampled=is_sampled,
)
def get_binary_annotations(request, response):
"""Helper method for getting all binary annotations from the request.
:param request: the Pyramid request object
:param response: the Pyramid response object
:returns: binary annotation dict of {str: str}
"""
route = request.matched_route.pattern if request.matched_route else ''
annotations = {
'http.uri': request.path,
'http.uri.qs': request.path_qs,
'http.route': route,
'response_status_code': str(response.status_code),
}
settings = request.registry.settings
if 'zipkin.set_extra_binary_annotations' in settings:
annotations.update(
settings['zipkin.set_extra_binary_annotations'](request, response)
)
return annotations
|
import os
import pdb
import re
import sys
empirical_base_path = "../../empirical_results/"
formal_base_path = "../../../synthesis_analysis/formal_results"
devices = ["Quadro RTX 4000 (IWS)", "Quadro RTX 4000"]
device_alias = {"Quadro RTX 4000 (IWS)": "CUDA/toucan_quadro_rtx4000_subgroup_sm70.csv",
"Quadro RTX 4000": "CUDA/toucan_quadro_rtx4000_subgroup.csv"}
config = ["2_thread_2_instruction",
"2_thread_3_instruction",
"2_thread_4_instruction",
"3_thread_3_instruction",
"3_thread_4_instruction"]
def get_data(fname):
f = open(fname, 'r')
ret = f.read()
f.close()
return ret
def find_ff_d_tests(c):
ret = []
p = os.path.join(formal_base_path,c,"schedulers/WEAK_FAIR_results.csv")
data = get_data(p).split("\n")[1:-2]
for t in data:
if "P" in t:
num = int(t.split(",")[0])
ret.append(num)
return ret
def find_ff_d_tests2(c):
ret = []
p = os.path.join(formal_base_path,c,"schedulers/LOBE_STRONG_results.csv")
data = get_data(p).split("\n")[1:-2]
for t in data:
if "P" in t:
num = int(t.split(",")[0])
ret.append(num)
return ret
def get_failed_and_total(s):
tt = re.match('.*\((\d+)/(\d+)\).*',s)
assert(tt)
return int(tt[1]),int(tt[2])
def check_res(res):
res = res.split(",")[1:4]
for r in res:
if "P" not in r:
#pdb.set_trace()
return "F",res
return "P",res
def get_csv_path(d,c):
da = device_alias[d]
return os.path.join(empirical_base_path,c,da)
def split_d(d):
d = d.split("\n")
while d[-1] == '':
d = d[:-1]
assert("Total" in d[-1])
assert("Test File" in d[0])
return d[1:-1]
verbose = False
if "-v" in sys.argv:
verbose = True
def pp_status(s):
return s.replace("P","0").replace("(","").replace(")","").replace(" ","").replace("F","")
print("checking weak fairness")
def pp_device_prop(d):
if ("IWS" in d):
return "IWS"
else:
return "non-IWS"
for d in devices:
c_f = 0
for c in config:
r = find_ff_d_tests(c)
dp = device_alias[d]
p = get_csv_path(d,c)
data = split_d(get_data(p))
for t in r:
v,vv = check_res(data[t])
if v in ["F"]:
assert("(IWS)" not in d)
if verbose:
print("found failed test")
print("config:",c)
print("id:",t)
print("plain fails:",pp_status(vv[0]))
print("--")
c_f += 1
if c_f == 0:
print("passed all weak fairness for the " + pp_device_prop(d) + " setting")
else:
print("failed " + str(c_f) + " weak fairness tests for the " + pp_device_prop(d) + " setting")
if not verbose:
print("run with -v to see failure test information")
print("------------------")
print("checking strong fairness")
for d in devices:
c_f = 0
for c in config:
r = find_ff_d_tests2(c)
dp = device_alias[d]
p = get_csv_path(d,c)
data = split_d(get_data(p))
for t in r:
v,vv = check_res(data[t])
if v in ["F"]:
c_f+=1
if verbose:
print("found failed test")
print("config:",c)
print("id:",t)
print("plain fails:",pp_status(vv[0]))
print("--")
if c_f == 0:
print("passed all strong fairness for the " + pp_device_prop(d) + " setting")
else:
print("failed " + str(c_f) + " strong fairness tests for the " + pp_device_prop(d) + " setting")
if not verbose:
print("run with -v to see failure test information")
|
from ..datasets.ss_dataset import SSDataset
from ..features.ss_feature import SSFeature
from ..models.ss_crf import SS_CRF
train_set = SSDataset('ViNLP/data/sentence-segmentation/train.txt')
dev_set = SSDataset('ViNLP/data/sentence-segmentation/dev.txt')
ss_feature = SSFeature()
X_train, y_train = ss_feature.transform(train_set.data)
X_dev, y_dev = ss_feature.transform(dev_set.data)
model = SS_CRF(
c1=1.0,
c2=1e-3,
max_iterations=100,
all_possible_transitions=True,
verbose=True,
)
model.fit(X_train, y_train, X_dev, y_dev)
model.save("ViNLP/pipeline/bin/ss.crfsuite") |
#!/usr/bin/env python
import diamond.plist as plist
import unittest
class PyListModule(unittest.TestCase):
''' #1: A simple list of integers, with cardinality ''. (One element only). '''
def testSimple(self):
l = plist.List(int)
self.assertEqual(l.__str__(), "list of <type 'int'> of cardinality: ")
self.assertEqual(l.__repr__(), "list of <type 'int'> of cardinality: ")
self.assertEqual(l("0"), "0")
''' #2: A simple list of integers, with cardinality '+'. '''
def testOneOrMore(self):
l = plist.List(int, '+')
self.assertEqual(l.__str__(), "list of <type 'int'> of cardinality: +")
self.assertEqual(l.__repr__(), "list of <type 'int'> of cardinality: +")
self.assertEqual(l("3,4,5"), "3 4 5")
''' #3: A list of two strings, with cardinality 2. '''
def testTwoStrings(self):
l = plist.List(str, "2")
self.assertEqual(l.__str__(), "list of <type 'str'> of cardinality: 2")
self.assertEqual(l.__repr__(), "list of <type 'str'> of cardinality: 2")
self.assertEqual(l("first second"), "first second")
''' #4: A list of none type, which should throw an non-callable exception when called. '''
def testNoneType(self):
l = plist.List(None)
try:
l("3,4,5")
self.fail()
except:
pass
if __name__ == '__main__':
suite = unittest.TestLoader().loadTestsFromTestCase(PyListModule)
unittest.TextTestRunner(verbosity=3).run(suite)
|
from llvmlite import ir, binding
class CodeGen():
def __init__(self):
self.binding = binding
self.binding.initialize()
self.binding.initialize_native_target()
self.binding.initialize_native_asmprinter()
self._setup_llvm()
self._setup_engine()
def _setup_llvm(self):
self.module = ir.Module(name=__file__)
self.module.triple = self.binding.get_default_triple()
function_type = ir.FunctionType(ir.IntType(32), [], False)
main_function = ir.Function(self.module, function_type, name="main")
block = main_function.append_basic_block(name="entry")
self.builder = ir.IRBuilder(block)
def _setup_engine(self):
target = self.binding.Target.from_default_triple()
target_machine = target.create_target_machine()
backing_mod = binding.parse_assembly("")
engine = binding.create_mcjit_compiler(backing_mod, target_machine)
self.engine = engine
def create_ir(self):
llvm_ir = str(self.module)
mod = self.binding.parse_assembly(llvm_ir)
mod.verify()
self.engine.add_module(mod)
self.engine.finalize_object()
self.engine.run_static_constructors()
return mod
def save_ir(self, filename):
with open(filename, 'w') as output_file:
output_file.write(str(self.module))
|
import torch
import torch.nn as nn
from model import recons_video
from model import flow_pwc
from utils import utils
def make_model(args):
device = 'cpu' if args.cpu else 'cuda'
load_flow_net = True
load_recons_net = False
flow_pretrain_fn = args.pretrain_models_dir + 'network-default.pytorch'
recons_pretrain_fn = ''
is_mask_filter = True
return CDVD_TSP(in_channels=args.n_colors, n_sequence=args.n_sequence, out_channels=args.n_colors,
n_resblock=args.n_resblock, n_feat=args.n_feat,
load_flow_net=load_flow_net, load_recons_net=load_recons_net,
flow_pretrain_fn=flow_pretrain_fn, recons_pretrain_fn=recons_pretrain_fn,
is_mask_filter=is_mask_filter, device=device)
class CDVD_TSP(nn.Module):
def __init__(self, in_channels=3, n_sequence=3, out_channels=3, n_resblock=3, n_feat=32,
load_flow_net=False, load_recons_net=False, flow_pretrain_fn='', recons_pretrain_fn='',
is_mask_filter=False, device='cuda'):
super(CDVD_TSP, self).__init__()
print("Creating CDVD-TSP Net")
self.n_sequence = n_sequence
self.device = device
assert n_sequence == 5, "Only support args.n_sequence=5; but get args.n_sequence={}".format(n_sequence)
self.is_mask_filter = is_mask_filter
print('Is meanfilter image when process mask:', 'True' if is_mask_filter else 'False')
extra_channels = 1
print('Select mask mode: concat, num_mask={}'.format(extra_channels))
self.flow_net = flow_pwc.Flow_PWC(load_pretrain=load_flow_net, pretrain_fn=flow_pretrain_fn, device=device)
self.recons_net = recons_video.RECONS_VIDEO(in_channels=in_channels, n_sequence=3, out_channels=out_channels,
n_resblock=n_resblock, n_feat=n_feat,
extra_channels=extra_channels)
if load_recons_net:
self.recons_net.load_state_dict(torch.load(recons_pretrain_fn))
print('Loading reconstruction pretrain model from {}'.format(recons_pretrain_fn))
def get_masks(self, img_list, flow_mask_list):
num_frames = len(img_list)
img_list_copy = [img.detach() for img in img_list] # detach backward
if self.is_mask_filter: # mean filter
img_list_copy = [utils.calc_meanFilter(im, n_channel=3, kernel_size=5) for im in img_list_copy]
delta = 1.
mid_frame = img_list_copy[num_frames // 2]
diff = torch.zeros_like(mid_frame)
for i in range(num_frames):
diff = diff + (img_list_copy[i] - mid_frame).pow(2)
diff = diff / (2 * delta * delta)
diff = torch.sqrt(torch.sum(diff, dim=1, keepdim=True))
luckiness = torch.exp(-diff) # (0,1)
sum_mask = torch.ones_like(flow_mask_list[0])
for i in range(num_frames):
sum_mask = sum_mask * flow_mask_list[i]
sum_mask = torch.sum(sum_mask, dim=1, keepdim=True)
sum_mask = (sum_mask > 0).float()
luckiness = luckiness * sum_mask
return luckiness
def forward(self, x):
frame_list = [x[:, i, :, :, :] for i in range(self.n_sequence)]
# Interation 1
warped01, _, _, flow_mask01 = self.flow_net(frame_list[1], frame_list[0])
warped21, _, _, flow_mask21 = self.flow_net(frame_list[1], frame_list[2])
warped12, _, _, flow_mask12 = self.flow_net(frame_list[2], frame_list[1])
warped32, _, _, flow_mask32 = self.flow_net(frame_list[2], frame_list[3])
warped23, _, _, flow_mask23 = self.flow_net(frame_list[3], frame_list[2])
warped43, _, _, flow_mask43 = self.flow_net(frame_list[3], frame_list[4])
one_mask = torch.ones_like(flow_mask01)
frame_warp_list = [warped01, frame_list[1], warped21]
flow_mask_list = [flow_mask01, one_mask.detach(), flow_mask21]
luckiness = self.get_masks(frame_warp_list, flow_mask_list)
concated = torch.cat([warped01, frame_list[1], warped21, luckiness], dim=1)
recons_1, _ = self.recons_net(concated)
frame_warp_list = [warped12, frame_list[2], warped32]
flow_mask_list = [flow_mask12, one_mask.detach(), flow_mask32]
luckiness = self.get_masks(frame_warp_list, flow_mask_list)
concated = torch.cat([warped12, frame_list[2], warped32, luckiness], dim=1)
recons_2, _ = self.recons_net(concated)
frame_warp_list = [warped23, frame_list[3], warped43]
flow_mask_list = [flow_mask23, one_mask.detach(), flow_mask43]
luckiness = self.get_masks(frame_warp_list, flow_mask_list)
concated = torch.cat([warped23, frame_list[3], warped43, luckiness], dim=1)
recons_3, _ = self.recons_net(concated)
# Interation 2
warped_recons12, _, _, flow_mask_recons12 = self.flow_net(recons_2, recons_1)
warped_recons32, _, _, flow_mask_recons32 = self.flow_net(recons_2, recons_3)
frame_warp_list = [warped_recons12, recons_2, warped_recons32]
flow_mask_list = [flow_mask_recons12, one_mask.detach(), flow_mask_recons32]
luckiness = self.get_masks(frame_warp_list, flow_mask_list)
concated = torch.cat([warped_recons12, recons_2, warped_recons32, luckiness], dim=1)
out, _ = self.recons_net(concated)
mid_loss = None
return recons_1, recons_2, recons_3, out, mid_loss
|
import unittest
import simplejson
class TestSimpleJson(unittest.TestCase):
def testUseSimpleJson(self):
loaded = simplejson.loads('{"foo": 1}')
self.assertEqual(loaded, {"foo": 1})
if __name__ == "__main__":
unittest.main()
|
from django.apps import AppConfig
class GolocalsConfig(AppConfig):
name = 'golocals'
|
# author: Fei Gao
#
# Search In Rotated Sorted Array
#
# Suppose a sorted array is rotated at some pivot unknown to you beforehand.
# (i.e., 0 1 2 4 5 6 7 might become 4 5 6 7 0 1 2).
# You are given a target value to search. If found in the array return its
# index, otherwise return -1.
# You may assume no duplicate exists in the array.
import bisect
class Solution:
# @param A, a list of integers
# @param target, an integer to be searched
# @return an integer
def search(self, A, target):
if A is None or len(A) == 0:
return -1
min_index = self.find_min(A)
if min_index == 0:
return self.index(A, target)
if A[0] <= target:
index = self.index(A[:min_index], target)
return index
else:
index = self.index(A[min_index:], target)
if index != -1:
index += min_index
return index
def index(self, a, x):
'Locate the leftmost value exactly equal to x'
i = bisect.bisect_left(a, x)
if i != len(a) and a[i] == x:
return i
# raise ValueError
return -1
def find_min(self, A):
"""
# @param A, a rotated sorted list without duplicate elements
# @return the index of minimum element
"""
if A[0] <= A[-1]:
return 0
lo = 0
hi = len(A) - 1
while lo < hi:
mi = (lo + hi) // 2
if A[mi] > A[hi]:
lo = mi + 1
else:
hi = mi
return lo
def main():
solver = Solution()
lst = list(range(5))
for i in range(5):
rlst = lst[i:] + lst[:i]
print(rlst, " -> ", solver.find_min(rlst))
t = 5
print(t, solver.search(rlst, t))
pass
if __name__ == '__main__':
main()
pass
|
#!/usr/bin/python3.8
import sys
import os
import time
from os.path import join, dirname
from dotenv import load_dotenv
sys.path.append('/home/pit/Documents/py_env/general_env/lib/python3.8/site-packages')
from selenium import webdriver
def github_login():
dotenv_path = join(dirname(__file__), '.env')
load_dotenv(dotenv_path)
my_browser = webdriver.Firefox()
# send the browser to a specif URL
my_browser.get('https://github.com/login')
time.sleep(1)
my_browser.find_element_by_css_selector('#login_field').send_keys(os.environ.get('GITHUB_USERNAME'))
my_browser.find_element_by_css_selector('#password').send_keys(os.environ.get('GITHUB_PWD'))
my_browser.find_element_by_css_selector('.btn').click()
github_login()
|
# -*- coding: utf-8 -*-
# This code is part of Qiskit.
#
# (C) Copyright IBM 2017, 2019.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""
Channels.
"""
from abc import ABCMeta
from qiskit.pulse.exceptions import PulseError
class Channel(metaclass=ABCMeta):
"""Base class of channels."""
prefix = None
def __init__(self, index: int = None, buffer: int = 0):
"""Channel class.
Args:
index: Index of channel
buffer: Buffer that should be placed between instructions on channel
Raises:
PulseError: If integer index or buffer not supplied
"""
if not isinstance(index, int):
raise PulseError('Channel index must be integer')
self._index = index
if not isinstance(buffer, int):
raise PulseError('Channel buffer must be integer')
self._buffer = buffer
@property
def index(self) -> int:
"""Return the index of this channel."""
return self._index
@property
def buffer(self) -> int:
"""Return the buffer for this channel."""
return self._buffer
@property
def name(self) -> str:
"""Return the name of this channel."""
return '%s%d' % (self.__class__.prefix, self._index)
def __repr__(self):
return '%s(%s)' % (self.__class__.__name__, self._index)
def __eq__(self, other):
"""Two channels are the same if they are of the same type, and have the same index.
Args:
other (Channel): other Channel
Returns:
bool: are self and other equal.
"""
if type(self) is type(other) and \
self._index == other._index:
return True
return False
def __hash__(self):
return hash((type(self), self._index))
class AcquireChannel(Channel):
"""Acquire channel."""
prefix = 'a'
class SnapshotChannel(Channel):
"""Snapshot channel."""
prefix = 's'
def __init__(self):
"""Create new snapshot channel."""
super().__init__(0)
class MemorySlot(Channel):
"""Memory slot channel."""
prefix = 'm'
class RegisterSlot(Channel):
"""Classical resister slot channel."""
prefix = 'c'
|
from pyspark.sql.functions import explode, col, lit, rand, asc, regexp_replace, lower, array, udf, collect_set, rank, collect_list, log
from pyspark.sql.window import Window
hdfs_mpdDir = "hdfs:///recsys_spotify_2018/mpd.v1/mpd.slice.*.json"
hdfs_challengeDir = "hdfs:///recsys_spotify_2018/challenge.v1/*.json"
hdfs_userDir = "hdfs:///user/XXX"
hdfs_trackFactorPath = hdfs_userDir + "/factorTracks"
df = spark.read.json(hdfs_mpdDir, multiLine=True)
playlists = df.select(explode("playlists").alias("playlist"))
pid_track_uri = playlists.select("playlist.pid",explode("playlist.tracks.track_uri").alias("track_uri")).groupBy('pid','track_uri').count()
trackList = pid_track_uri.groupBy("track_uri").count()
trackListFactor = trackList.withColumn("factor",1000000/col("count")).select("track_uri","factor")
trackListFactor.write.mode('overwrite').parquet(hdfs_trackFactorPath)
|
# Doubly LinkedList
'''
head second third
| | |
| | |
+----+------+ +----+------+ +----+------+
| 1 | o-------->| 2 | o-------->| 3 | null |
| | o<--------| | o<--------| | |
+----+------+ +----+------+ +----+------+
'''
# Node class of a linked list
class DoubleNode:
# Constructor to create a new node
def __init__(self, data):
self.data = data
self.next = None # Reference to next node
self.previous = None # Reference to the previous node
class DoublyLinkedList:
# Constructor for empty linked list
def __init__(self):
self.head = None
self.tail = None
# Given a reference to the head
# appends a new node at the end
def append(self, data):
# Allocates node and put in the data
new_node = DoubleNode(data)
# This new node is going to be the last node
new_node.next = None
# If the Linked List is empty,
# make the new node as head
if self.head == None:
new_node.previous = None
self.head = new_node
return
last_node = self.head
while last_node.next:
last_node = last_node.next
last_node.next = new_node
new_node.previous = last_node
return
# Returns the length of the linked list.
def length(self):
if self.head == None:
return 0
current_node = self.head
total = 0 # Init count
# Loop while end of linked list is not reached
while current_node:
total += 1
current_node = current_node.next
return total
# Converts a linked list back into a Python list
def to_list(self):
# Init as null
node_data = []
current_node = self.head
while current_node:
node_data.append(current_node.data)
current_node = current_node.next
return node_data
def reverse_linked_list(self):
if self.head is None:
print("The list has no element to reverse")
return 0
# The next reference of the start node
# should be set none because the first node will
# become the last node in the reversed list.
# The previous reference of the last node should be set to None
# since the last node will become the previous node
# The next references of the nodes (except the first and last node) in the original list
# Should be swapped with the previous references.
current_node = self.head
new_node = current_node.next
current_node.next = None
current_node.previous = new_node
while new_node != None:
new_node .previous = new_node .next
new_node .next = current_node
current_node = new_node
new_node = new_node .previous
self.head = current_node
def display(self):
contents = self.head
# If the list is null
if contents is None:
print("List has no element")
while contents:
print(contents.data)
contents = contents.next
print("----------") # to see better the lists
# This insert a node at the start
def insert_at_start(self, data):
if self.head == None:
new_node = DoubleNode(data)
self.head = new_node
print("Node inserted")
return
new_node = DoubleNode(data)
new_node.next = self.head
self.head.previous = new_node
self.head = new_node
# This insert a node at the end
def insert_at_end(self, data):
if self.head == None:
new_node = DoubleNode(data)
self.head = new_node
return
current_node = self.head
while current_node.next != None:
current_node = current_node.next
new_node = DoubleNode(data)
current_node.next = new_node
new_node.previous = current_node
# Deleting Elements from the Start
def remove_at_start(self):
if self.head == None:
print("The list has no element to delete")
return
if self.head.next == None:
self.head = None
return
self.head = self.head.next
self.start_prev = None
# Deleting Elements from the end
def remove_at_end(self):
if self.head == None:
print("The list has no element to delete")
return
if self.head.next == None:
self.head = None
return
current_node = self.head
while current_node.next != None:
current_node = current_node.next
current_node.previous.next = None
# This remove a node with the specified value
def remove_element_by_value(self, value):
if self.head == None:
print("The list has no element to delete")
return
if self.head.next == None:
if self.head.item == value:
self.head = None
else:
print("Item not found")
return
if self.head.data == value:
self.head = self.head.next
self.head.previous = None
return
current_node = self.head
while current_node.next != None:
if current_node.data == value:
break
current_node = current_node.next
if current_node.next != None:
current_node.previous.next = current_node.next
current_node.next.previous = current_node.previous
else:
if current_node.data == value:
current_node.previous.next = None
else:
print("Element not found")
# Test
my_list = DoublyLinkedList()
my_list.display()
# Add the elements
my_list.append(3)
my_list.append(2)
my_list.append(7)
my_list.append(1)
my_list.display()
print("The total number of elements are: " + str(my_list.length()))
print(my_list.to_list()) # Python list
print("---------")
my_list.reverse_linked_list() # Reverse linked list
my_list.display()
my_list.remove_at_start()
my_list.display()
my_list.remove_at_end()
my_list.display()
my_list.insert_at_start(1)
my_list.display()
my_list.insert_at_end(3)
my_list.display()
my_list.remove_element_by_value(7)
my_list.display()
|
"""
Usage:
python main.py parse_raw_athlete_events_to_csv
python main.py identify_unique_games_to_json > olympics/import_json/games.json
python main.py parse_countries_csv_to_json > olympics/import_json/countries.json
python main.py generate_games_script_commands > tmp/games_script_commands.txt
python main.py parse_athlete_events_csv_to_json <game> > olympics/import_json/<game>.json
python main.py generate_games_import_commands > tmp/games_import_commands.txt
python main.py generate_mongo_init_commands > tmp/mongo_init_commands.txt
-
python main.py parse_openflights_data airports > olympics/import_json/airports.json
python main.py parse_openflights_data airlines > olympics/import_json/airlines.json
python main.py parse_openflights_data routes > olympics/import_json/routes.json
python main.py parse_openflights_data planes > olympics/import_json/planes.json
python main.py parse_openflights_data countries > olympics/import_json/countries.json
-
Options:
-h --help Show this screen.
--version Show version.
"""
import csv
import json
import os
import sys
import time
import traceback
import uuid
from datetime import datetime
from docopt import docopt
RAW_ATHLETTE_EVENTS = 'olympics/raw/athlete_events.csv'
RAW_COUNTRIES = 'olympics/raw/noc_regions.csv'
PARSED_ATHLETTE_EVENTS = 'olympics/raw/athlete_events_parsed.csv'
GAMES_JSON = 'olympics/import_json/games.json'
PARSED_CSV_FIELD_DELIM = '|'
VERSION = 'May 2021'
def print_options(msg):
print(msg)
arguments = docopt(__doc__, version=VERSION)
print(arguments)
def parse_raw_athlete_events_to_csv():
infile = RAW_ATHLETTE_EVENTS
outfile = PARSED_ATHLETTE_EVENTS
print('parse_raw_athlete_events_to_csv: {}'.format(infile))
fields = "id|name|sex|age|height|weight|team|noc|games|year|season|city|sport|event|medal|medal_value".split('|')
rows, row = list(), list()
print("field count: {} {}".format(str(len(fields)), fields))
# header row
for field in fields:
row.append(field)
rows.append(row)
with open(infile, 'rt') as csvfile:
rdr = csv.DictReader(csvfile)
for idx, obj in enumerate(rdr):
row = list()
if idx < 300000:
#print(obj)
row.append(parse_int(obj['id']))
row.append(parse_str(obj['name']))
row.append(parse_str(obj['sex']).lower())
row.append(parse_int(obj['age']))
row.append(parse_float(obj['height']))
row.append(parse_float(obj['weight']))
row.append(parse_str(obj['team']).lower())
row.append(parse_str(obj['noc']).lower())
row.append(parse_str(obj['games']).lower().replace(' ','_'))
row.append(parse_int(obj['year']))
row.append(parse_str(obj['season']).lower())
row.append(parse_str(obj['city']).lower())
row.append(parse_str(obj['sport']).lower())
row.append(parse_str(obj['event']).lower())
row.append(parse_str(obj['medal']).lower())
row.append(medal_value(obj['medal']))
rows.append(row)
with open(outfile, 'w') as f:
for row in rows:
line = PARSED_CSV_FIELD_DELIM.join(row)
#print(line)
f.write(line)
f.write("\n")
print('file written: {} count: {}'.format(outfile, len(rows)))
def parse_countries_csv_to_json():
infile = RAW_COUNTRIES
with open(infile, 'rt') as csvfile:
rdr = csv.DictReader(csvfile)
for idx, obj in enumerate(rdr):
j = obj_to_mongoimport_json(obj)
print(j)
def identify_unique_games_to_json():
infile = PARSED_ATHLETTE_EVENTS
unique_games = dict()
with open(infile, 'rt') as csvfile:
rdr = csv.DictReader(csvfile, delimiter='|')
for idx, obj in enumerate(rdr):
game = dict()
game['games'] = obj['games']
game['city'] = obj['city']
key = obj['games']
unique_games[key] = game
games_keys = sorted(unique_games.keys())
for key in games_keys:
game = unique_games[key]
j = obj_to_mongoimport_json(game)
print(j)
def generate_games_script_commands():
infile = PARSED_ATHLETTE_EVENTS
unique_games = dict()
with open(infile, 'rt') as csvfile:
rdr = csv.DictReader(csvfile, delimiter='|')
for idx, obj in enumerate(rdr):
key = obj['games']
unique_games[key] = 1
games_keys = sorted(unique_games.keys())
for game in games_keys:
cmd = 'python main.py parse_athlete_events_csv_to_json {} > olympics/import_json/g{}.json'
print('')
print("echo 'parsing game {}'".format(game))
print(cmd.format(game, game))
def generate_games_import_commands():
infile = PARSED_ATHLETTE_EVENTS
unique_games = dict()
with open(infile, 'rt') as csvfile:
rdr = csv.DictReader(csvfile, delimiter='|')
for idx, obj in enumerate(rdr):
key = obj['games']
unique_games[key] = 1
games_keys = sorted(unique_games.keys())
for game in games_keys:
print('')
print("echo 'importing game g{}'".format(game))
print("mongoimport --db olympics \\")
print(" --collection g{} \\".format(game))
print(" --file olympics/import_json/{}.json \\".format(game))
print(" --numInsertionWorkers 1 \\")
print(" --batchSize 24")
print('sleep 3')
def generate_mongo_init_commands():
infile = PARSED_ATHLETTE_EVENTS
unique_games = dict()
with open(infile, 'rt') as csvfile:
rdr = csv.DictReader(csvfile, delimiter='|')
for idx, obj in enumerate(rdr):
key = obj['games']
unique_games[key] = 1
print('')
print('// generated by: main.py generate_mongo_init_commands()')
print('')
games_keys = sorted(unique_games.keys())
print('')
for game in games_keys:
print('db.g{}.drop()'.format(game))
print('')
for game in games_keys:
print('db.createCollection("g{}")'.format(game))
# print('db.g{}.ensureIndex(<"name" : 1>, <"unique" : false>)'.format(game).replace('<','{').replace('>','}'))
# print('db.g{}.ensureIndex(<"noc" : 1>, <"unique" : false>)'.format(game).replace('<','{').replace('>','}'))
# print('db.g{}.ensureIndex(<"event" : 1>, <"unique" : false>)'.format(game).replace('<','{').replace('>','}'))
print('')
for game in games_keys:
print('db.g{}.count()'.format(game))
def parse_athlete_events_csv_to_json(game):
infile = PARSED_ATHLETTE_EVENTS
with open(infile, 'rt') as csvfile:
rdr = csv.DictReader(csvfile, delimiter='|')
for idx, obj in enumerate(rdr):
if game == obj['games']:
j = obj_to_mongoimport_json(obj)
print(j)
def parse_openflights_data(name):
infile = 'openflights/raw/{}.dat'.format(name)
row_count = 0
with open(infile, 'rt') as csvfile:
rdr = csv.DictReader(csvfile)
for idx, obj in enumerate(rdr):
row_count = row_count + 1
j = obj_to_mongoimport_json(obj)
print(j)
def obj_to_mongoimport_json(obj):
# example: {"_id":{"$oid":"5cb21be890d09ce938a7b3b7"},"name":"Putnam County Airport","city":"Greencastle","country":"United States","iata_code":"4I7","latitude":"39.6335556","longitude":"-86.8138056","altitude":"842","timezone_num":"-5","timezone_code":"America/New_York","location":{"type":"Point","coordinates":[-86.8138056,39.6335556]}}
id = uuid.uuid4()
return json.dumps(obj, separators=(',', ':'))
def parse_int(s):
try:
return str(int(s.strip()))
except:
return '-1'
def parse_float(s):
try:
return str(float(s.strip()))
except:
return '-1'
def parse_str(s):
try:
s1 = s.strip()
if s1 == 'NA':
s1 = ''
s1 = s1.replace('"',"")
s1 = s1.replace("'","")
s1 = s1.replace(",","")
s1 = s1.replace("|","")
return s1
except:
return '?'
def medal_value(s):
# gold, silver, bron
try:
s1 = s.strip().lower()
if s1.startswith('g'):
return '3'
if s1.startswith('s'):
return '2'
if s1.startswith('b'):
return '1'
return '0'
except:
return '-1'
def load_json_file(infile):
with open(infile) as json_file:
return json.load(json_file)
def write_obj_as_json_file(outfile, obj):
txt = json.dumps(obj, sort_keys=False, indent=2)
with open(outfile, 'wt') as f:
f.write(txt)
print("file written: " + outfile)
if __name__ == "__main__":
if len(sys.argv) > 1:
func = sys.argv[1].lower()
if func == 'parse_raw_athlete_events_to_csv':
parse_raw_athlete_events_to_csv()
elif func == 'identify_unique_games_to_json':
identify_unique_games_to_json()
elif func == 'parse_countries_csv_to_json':
parse_countries_csv_to_json()
elif func == 'generate_games_script_commands':
generate_games_script_commands()
elif func == 'generate_games_import_commands':
generate_games_import_commands()
elif func == 'generate_mongo_init_commands':
generate_mongo_init_commands()
elif func == 'parse_athlete_events_csv_to_json':
game = sys.argv[2]
parse_athlete_events_csv_to_json(game)
elif func == 'parse_openflights_data':
name = sys.argv[2]
parse_openflights_data(name)
else:
print_options('Error: invalid function: {}'.format(func))
else:
print_options('Error: no function argument provided.')
|
#########################
# Imports
#########################
import requests, json, base64
from secrets import *
#########################
# Headers
#########################
headers = {
'Authorization': "Basic " + str(base64.urlsafe_b64encode((CLIENT_ID + ':' + CLIENT_SECRET).encode())).decode()
}
data = [
('grant_type', 'authorization_code'),
('code', AUTHORIZATION_CODE),
('redirect_uri', REDIRECT_URI),
]
#########################
# Requests
#########################
response = requests.post('https://accounts.spotify.com/api/token', headers=headers, data=data)
json = json.loads(response.text)
#########################
# Populating
#########################
f = open("secrets.py", 'r')
i = f.readlines()
o = ""
f.close()
for line in i:
if line == "ACCESS_TOKEN = \"ACCESS_TOKEN\"\n":
line = "ACCESS_TOKEN = \"%s\"\n" % json["access_token"]
elif line == "REFRESH_TOKEN = \"REFRESH_TOKEN\"\n":
line = "REFRESH_TOKEN = \"%s\"\n" % json["refresh_token"]
o += line
f = open("secrets.py", 'w')
f.write(o)
f.close()
|
import os
from categorias import *
from proveedores import *
from productos import *
from clientes import *
Datos = []
Email = []
def logueo():
os.system("clear")
print(":::: MENU ACCESO ::::")
print("[1.] INGRESAR")
print("[2.] CREAR CUENTA DE USUARIO")
print("[3.] SALIR")
op = input("SELECCIONA UNA OPCION: ")
if op == '1' :
ingresar()
elif op == '2' :
registrar()
elif op == '3' :
salir()
else :
print("::: Vuelve pronto :::")
def ingresar():
os.system("clear")
if len(Datos) == 0 :
print("::: LA LISTA ESTÁ VACÍA :::")
key = input("Presione cualquier tecla para volver al menú")
logueo()
else :
i = 0
encontrado = False
while i < len(Datos)-1 :
usuario = input("Digite Usuario: ")
if usuario == Datos[i] :
encontrado = True
i += 1
if encontrado == True :
print("::: EL usuario ENCONTRADO EN LA LISTA")
else :
usuario = input("USUARIO: ")
contraseña = input("CONTRESEÑA: ")
print(":::: MENU ADMINISTRADOR ::::")
print("[1.] CATEGORIAS")
print("[2.] PROVEEDORES")
print("[3.] PRODUCTOS")
print("[4.] CLIENTES")
print("[5.] REPORTES")
print("[6.] SALIR")
op = input("SELECCIONA UNA OPCION: ")
if op == '1' :
categorias()
elif op == '2' :
proveedores()
elif op == '3' :
productos()
elif op == '4':
clientes()
elif op == '5':
reportes()
elif op == '6':
salir()
else :
print("::: Vuelve pronto :::")
logueo()
def registrar ():
os.system("clear")
if len(Datos) == 0:
nombres = input("DIGITE SUS NOMBRES: ")
apellidos = input("DIGITE SUS APELLIDOS: ")
celular = int(input("DIGITE NUMERO CELULAR: "))
email = input("DIGITE SU E-MAIL: ")
Email.append(email)
usuario = input("DIGITE SU USUARIO: ")
contraseña = input("DIGITE SU CONTRASEÑA: ")
Datos.append([nombres, apellidos, celular, usuario, contraseña])
else :
nombres = input("DIGITE SUS NOMBRES: ")
apellidos = input("DIGITE SUS APELLIDOS: ")
celular = int(input("DIGITE NUMERO CELULAR: "))
email = input("DIGITE SU E-MAIL: ")
usuario = input("DIGITE SU USUARIO: ")
contraseña = input("DIGITE SU CONTRASEÑA: ")
i = 0
encontrado = False
while i < len(Datos)-1 :
if email == Email[i] :
encontrado = True
i += 1
#print("mail ya registrado")
if encontrado == True :
print("::: EMAIL FUE ENCONTRADO EN LA LISTA")
else:
Datos.append([nombres, apellidos, celular, email, usuario, contraseña])
logueo()
def salir ():
print(Datos)
#logueo() |
# MIT License
#
# Copyright (c) 2020 Jonathan Zernik
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import hashlib
import ecpy
from ecpy.curves import ECPyException
from ecpy.ecschnorr import ECSchnorr
from ecpy.keys import ECPrivateKey
from ecpy.keys import ECPublicKey
from squeak.core.elliptic import bytes_to_payment_point
from squeak.core.elliptic import CURVE
from squeak.core.elliptic import payment_point_to_bytes
from squeak.core.hashing import sha256
SIGNER = ECSchnorr(hashlib.sha256,"LIBSECP","ITUPLE")
PRIV_KEY_LENGTH = 32
PUB_KEY_LENGTH = 33
SIGNATURE_LENGTH = 64
class SqueakPublicKey:
"""Represents a squeak public key.
"""
def __init__(self, pub_key):
self.pub_key = pub_key
def verify(self, msg, sig):
r = int.from_bytes(sig[:32], "big")
s = int.from_bytes(sig[32:], "big")
sig_tuple = r, s
return SIGNER.verify(msg, sig_tuple, self.pub_key)
def to_bytes(self):
return payment_point_to_bytes(self.pub_key.W)
@classmethod
def from_bytes(cls, pub_key_bytes):
if len(pub_key_bytes) != PUB_KEY_LENGTH:
raise InvalidPublicKeyError()
try:
point = bytes_to_payment_point(pub_key_bytes)
pub_key = ECPublicKey(point)
return cls(pub_key)
except ECPyException:
raise InvalidPublicKeyError()
def __eq__(self, other):
return other.to_bytes() == self.to_bytes()
def __ne__(self, other):
return other.to_bytes() != self.to_bytes()
def __hash__(self):
return hash(self.to_bytes())
def __repr__(self):
return 'SqueakPublicKey(%r)' % (
self.to_bytes().hex(),
)
class SqueakPrivateKey:
"""Represents a squeak private key.
"""
def __init__(self, priv_key):
self.priv_key = priv_key
@classmethod
def generate(cls):
priv_key_bytes = ecpy.ecrand.rnd(CURVE.order)
priv_key = ECPrivateKey(priv_key_bytes, CURVE)
return cls(priv_key=priv_key)
def sign(self, msg):
r, s = SIGNER.sign(msg, self.priv_key)
r = r.to_bytes(32, 'big')
s = s.to_bytes(32, 'big')
return r+s
def get_public_key(self):
pubkey = self.priv_key.get_public_key()
return SqueakPublicKey(pub_key=pubkey)
def get_shared_key(self, public_key: SqueakPublicKey) -> bytes:
point = self.priv_key.d * public_key.pub_key.W
x_bytes = point.x.to_bytes(32, 'big')
return sha256(x_bytes)
def to_bytes(self):
return self.priv_key.d.to_bytes(32, 'big')
@classmethod
def from_bytes(cls, priv_key_bytes):
if len(priv_key_bytes) != PRIV_KEY_LENGTH:
raise InvalidPrivateKeyError()
priv_key_int = int.from_bytes(priv_key_bytes, "big")
priv_key = ECPrivateKey(priv_key_int, CURVE)
return cls(priv_key)
def __eq__(self, other):
return other.to_bytes() == self.to_bytes()
def __ne__(self, other):
return other.to_bytes() != self.to_bytes()
def __hash__(self):
return hash(self.to_bytes())
def __repr__(self):
return 'SqueakPrivateKey(%r)' % (
self.to_bytes().hex(),
)
class InvalidPrivateKeyError(Exception):
""" Invalid private key error.
"""
class InvalidPublicKeyError(Exception):
""" Invalid public key error.
"""
|
# AmazonのProduct Advertising APIを使って商品情報を取得する
# 取得した商品情報の利用目的はAmazonのサイトにエンドユーザーを誘導し商品の販売を促進することに限定されている
# APIの利用にはAmazonアソシエイト・プログラムへの登録が必要。一定期間売上が発生しないと利用できなくなる場合があるので注意
# 実行方法
# forego run python amazon_product_search.py
# 上記のように実行するとforegoがカレントディレクトリに存在する「.env」という名前のファイルから環境変数を読み取ってプログラムにわたす。
# これを実行すると次々とツイートが表示される。キャンセルするには「ctrl-C」
import os
from amazon.api import AmazonAPI # pip install python-amazon-simple-product-api
# 環境変数から認証情報を取得する。
AMAZON_ACCESS_KEY = os.environ['AMAZON_ACCESS_KEY']
AMAZON_SECRET_KEY = os.environ['AMAZON_SECRET_KEY']
AMAZON_ASSOCIATE_TAG = os.environ['AMAZON_ASSOCIATE_TAG']
# AmazonAPIオブジェクトを作成する。キーワード引数Regionに'JP'を指定し、Amazon.co.jpを選択する。
amazon = AmazonAPI(AMAZON_ACCESS_KEY, AMAZON_SECRET_KEY, AMAZON_ASSOCIATE_TAG, Region='JP')
# search()メソッドでItemSearch操作を使い、商品情報を検索する。
# キーワード引数Keywordsで検索語句を、SearchIndexで検索対象とする商品のカテゴリを指定する。
# SearchIndex='All'はすべてのカテゴリから検索することを意味する。
products = amazon.search(Keywords='kindle', SearchIndex='All')
for product in products: # 得られた商品(AmazonProductオブジェクト)について反復する。
print(product.title) # 商品名を表示。
print(product.offer_url) # 商品のURLを表示。
price, currency = product.price_and_currency
print(price, currency) # 価格と通貨を表示。
|
# Authors: Jonas Schluter <[email protected]>, Grant Hussey <[email protected]>
# License: MIT
import os
import shutil
import unittest
from pathlib import Path
import logging
import taxumap.taxumap as t
# The data for unittests is from Olin and Axel:
# Olin, Axel (2018), “Stereotypic Immune System Development in Newborn Children”, Mendeley Data, v1
# setup logging
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
stream_format = logging.Formatter("%(funcName)s:%(levelname)s\n%(message)s\n")
sh = logging.StreamHandler()
sh.setFormatter(stream_format)
sh.setLevel(logging.WARNING) # this level and ABOVE
class TestTaxumap(unittest.TestCase):
@classmethod
def setUpClass(cls):
"""Makes temp directory to save everything"""
unique_folder_name = Path("./temp").resolve() # maybe beef this up later lol
logger.warning("Making temp directory at {}.".format(unique_folder_name))
try:
os.mkdir(unique_folder_name)
except FileExistsError:
try:
os.rmdir(unique_folder_name)
except OSError:
warnings.warn("Pease delete the temp directory in taxumap/taxumap/unittests")
@classmethod
def tearDownTest(cls):
""""Deletes temp directory"""
unique_folder_name = Path("./temp").resolve() # maybe beef this up later lol
shutil.rmtree(unique_folder_name)
pass
def setUp(self):
self.broke_name = t.Taxumap(
fpt="taxonomy.csv",
fpx="microbiota_table.csv",
name=3465,
)
self.t1 = t.Taxumap(
fpt="taxonomy.csv",
fpx="microbiota_table.csv",
name="test1",
)
self.t2 = t.Taxumap(
fpt="taxonomy.csv",
fpx="microbiota_table.csv",
name="test2_transformed",
)
self.t2.transform_self()
def test_interactive_loading(self):
with self.assertRaises(NameError):
t.Taxumap(rel_abundances={}, taxonomy={})
with self.assertRaises(ValueError):
t.Taxumap(rel_abundances=None, taxonomy=None)
self.assertIsNone(self.broke_name.name)
def test_fp_loading(self):
pass
def test_is_loaded_from_logic(self):
pass
if __name__ == "__main__":
unittest.main()
|
import argparse
import logging
import sys
from .fetch import download_fns
logger = logging.getLogger("mne")
AVAILABLE_DATASETS = set(download_fns.keys())
def download_dataset(output_dir, n_first=None, cohort="eegbci"):
download_fns[cohort](output_dir, n_first)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"-o", "--output_dir", type=str, required=True, help="Path to output directory.\nWill be created if not available.",
)
parser.add_argument(
"-c",
"--cohort",
type=str,
help="Choice of EEG dataset (default 'eegbci').",
default="eegbci",
choices=AVAILABLE_DATASETS,
)
parser.add_argument("-n", "--n_first", default=109, type=int, help="Number of recordings to download.")
parser.add_argument("--log", action="store_true")
args = parser.parse_args()
if args.log:
file_handler = logging.FileHandler("logs/fetch_data.log", mode="w")
file_handler.setLevel(logging.DEBUG)
logger.addHandler(file_handler)
logger.info(f'Usage: {" ".join([x for x in sys.argv])}\n')
logger.info("Settings:")
logger.info("---------------------------")
for idx, (k, v) in enumerate(sorted(vars(args).items())):
if idx == (len(vars(args)) - 1):
logger.info(f"{k:>15}\t{v}\n")
else:
logger.info(f"{k:>15}\t{v}")
download_dataset(args.output_dir, args.n_first, args.cohort)
|
/home/runner/.cache/pip/pool/85/03/a8/e8d26fae6c7b3b8548be18e886767b946dd069a8481930ee9dda2adccd |
#%%
#! python
import h5py
import matplotlib.pyplot as plt
import mcmc.image_cupy as im
import mcmc.plotting as p
import numpy as np
import scipy.linalg as sla
import scipy.special as ssp
import mcmc.util_cupy as util
import cupy as cp
import importlib
import datetime
import pathlib,os
import argparse
import parser_help as ph
import post_analysis as pa
from skimage.transform import iradon
def initialize_using_FBP(sim):
#Set initial condition of fourier index as the fourier index of FBP
FBP_image = iradon(cp.asnumpy(sim.measurement.sinogram),theta=cp.asnumpy(sim.measurement.theta),circle=True)
fbp_fHalf = sim.fourier.rfft2(cp.asarray(FBP_image,dtype=cp.float32))
fbp_fSym2D = util.symmetrize_2D(fbp_fHalf)
fbp_fSym = fbp_fSym2D.ravel(im.ORDER)
sim.Layers[-1].current_sample_sym = fbp_fSym
sim.Layers[-1].current_sample = sim.Layers[-1].current_sample_sym[sim.fourier.basis_number_2D_ravel-1:]
sim.Layers[-1].record_sample()
#%%
if __name__=='__main__':
# n_samples = 1000,n = 2**6,beta = 2e-1,num = 2**8,uHalfInit=None,
# kappa = 1e17,sigma_u = 5e6,sigma_v = 10,printInterval = 100,
# seed=1,burnPercentage = 5,useLaTeX=True,randVectInitiated=True,
# showFigures=True
parser = argparse.ArgumentParser()
parser.add_argument('--n-layers',default=2,type=int,help='number SPDE layers, Default=2')
parser.add_argument('--n-theta',default=45,type=int,help='number theta, Default=50')
parser.add_argument('--n',default=32,type=int,help='number of Fourier basis, Default=16')
parser.add_argument('--seed',default=1,type=int,help='random generator seed, Default=1')
parser.add_argument('--n-extended',default=64,type=int,help='number of point per axis, Default=64')
parser.add_argument('--n-samples',default=100,type=int,help='number of MCMC samples per computer core, Default=100')
parser.add_argument('--evaluation-interval',default=100,type=int,help='interval to print and reevaluate beta, Default=100')
parser.add_argument('--beta',default=1,type=float,help='preconditioned Crank Nicholson beta parameter, Default=1')
parser.add_argument('--kappa',default=5e9,type=float,help='kappa constant for u_t, Default=1e9')
parser.add_argument('--chol-epsilon',default=0,type=float,help='epsilon to ensure cholesky factorization always result in PD, Default=0')
parser.add_argument('--sigma-0',default=4e7,type=float,help='Sigma_u constant, Default=1e7')
parser.add_argument('--sigma-v',default=3.2e4,type=float,help='Sigma_v constant, Default=1e4')
parser.add_argument('--meas-std',default=0.2,type=float,help='Measurement stdev, Default=0.2')
parser.add_argument('--sigma-scaling',default=4e-1,type=float,help='Sigma_scaling constant, Default=1e-3')
parser.add_argument('--burn-percentage',default=25.0,type=float,help='Burn Percentage, Default=25.0')
parser.add_argument('--variant',default="dunlop",type=str,help='preconditioned Crank Nicholson multilayered algorithm variant, Default=dunlop')
parser.add_argument('--phantom-name',default="shepp.png",type=str,help='Phantom name, Default=shepp.png')
parser.add_argument('--meas-type',default="tomo",type=str,help='Two D Measurement, Default=tomo')
parser.add_argument('--init-folder',default="",type=str,help='Initial condition for the states, Default=empty')
ph.add_boolean_argument(parser,'enable-beta-feedback',default=True,messages='Whether beta-feedback will be enabled, Default=True')
ph.add_boolean_argument(parser,'print-progress',default=True,messages='Whether progress is printed, Default=True')
ph.add_boolean_argument(parser,'verbose',default=True,messages='Verbose mode, Default=True')
ph.add_boolean_argument(parser,'hybrid',default=False,messages='Use both GPU and CPU memory, Default=False')
args = parser.parse_args()
sim = im.Simulation(n_layers=args.n_layers,n_samples = args.n_samples,n = args.n,n_extended = args.n_extended,beta = args.beta,
kappa = args.kappa,sigma_0 = args.sigma_0,sigma_v = args.sigma_v,sigma_scaling=args.sigma_scaling,meas_std=args.meas_std,evaluation_interval = args.evaluation_interval,printProgress=args.print_progress,
seed=args.seed,burn_percentage = args.burn_percentage,enable_beta_feedback=args.enable_beta_feedback,pcn_variant=args.variant,phantom_name=args.phantom_name
,meas_type=args.meas_type,n_theta=args.n_theta,verbose=args.verbose,hybrid_GPU_CPU=args.hybrid)
folderName = 'result-'+ datetime.datetime.now().strftime('%d-%b-%Y_%H_%M_%S')
if 'WRKDIR' in os.environ:
simResultPath = pathlib.Path(os.environ['WRKDIR']) / 'SimulationResult'/folderName
elif 'USER' in os.environ and pathlib.Path('/scratch/work/'+os.environ['USER']+'/SimulationResult').exists():
simResultPath = pathlib.Path('/scratch/work/'+os.environ['USER']+'/SimulationResult')/folderName
else:
simResultPath = pathlib.Path.home() / 'Documents' / 'SimulationResult'/folderName
if not simResultPath.exists():
simResultPath.mkdir()
#set pcn epsilon for cholesky
sim.pcn.set_chol_epsilon(args.chol_epsilon)
if not args.init_folder:
initialize_using_FBP(sim)
else:
#TODO:HARD CODED relative path BADDD
relative_path = pathlib.Path("/scratch/work/emzirm1/SimulationResult")
# relative_path = pathlib.Path("//data.triton.aalto.fi/work/emzirm1/SimulationResult")
init_folder = relative_path /args.init_folder
init_file = init_folder/'result.hdf5'
if not init_file.exists():
initialize_using_FBP(sim)
else:
with h5py.File(init_file,mode='r') as file:
#take the latest sample from the folder
samples_history = file['Layers 1/samples_history'][()]
init_Sym = util.symmetrize(cp.asarray(samples_history[-1]))
del samples_history
u_samples_history = file['Layers 0/samples_history'][()]
u_init_Sym = util.symmetrize(cp.asarray(u_samples_history[-1]))
del u_samples_history
sim.Layers[-1].current_sample_sym = init_Sym
sim.Layers[-1].current_sample = sim.Layers[-1].current_sample_sym[sim.fourier.basis_number_2D_ravel-1:]
sim.Layers[-1].record_sample()
sim.Layers[0].current_sample_sym = u_init_Sym
sim.Layers[0].current_sample = sim.Layers[0].current_sample_sym[sim.fourier.basis_number_2D_ravel-1:]
sim.Layers[0].record_sample()
print("Used bytes so far, before even running the simulation {}".format(sim.mempool.used_bytes()))
sim.run()
sim.save(str(simResultPath/'result.hdf5'))
print('simulation Result path is {}'.format(simResultPath))
#
#do analysis offline
pa.post_analysis(folderName,simResultPath.parent)
|
import json
import requests
import time
import multiprocessing as mp
import src.PathSolving.path_solver as ps
import logging
logging.getLogger("requests").setLevel(logging.WARNING)
class Package:
coordinates: (float, float, float)=None
weight: float = None
id :str = None
def __init__(self, coordinates:(float, float, float), weight:float, id:str):
self.coordinates = coordinates
self.weight = weight
self.id = id
class Action:
duration: float = None
relative: bool = None
target_x: float = None
target_y: float = None
target_yaw: float = None
target_z: float = None
waited_dur: bool = False
def __init__(self, action: dict):
att_list = ["duration", "relative", "target_x", "target_y", "target_z", "target_yaw", "battery_percentage"]
for x in action:
if(x in att_list):
setattr(self, x, action[x])
else:
raise Exception("Action got an unexpected attribute: "+x+"\n val: "+str(action[x]))
self.waited_dur = False
class Api:
server_id:str = None
def __init__(self, server_id:str="http://10.4.14.248:5000/api"):
self.server_id=server_id
arena = self.get_arena()
self.min_x = arena["min_x"]
self.min_y = arena["min_y"]
self.min_z = arena["min_z"]
self.max_x = arena["max_x"]
self.max_y = arena["max_y"]
self.max_z = arena["max_z"]
self.buildings = arena["buildings"]
def _command(self, adress: str, command: str) -> dict or bool:
try:
print(self.server_id + "/" + command)
r = requests.get(url=adress + "/" + command, )
print(r.text)
except Exception as err:
raise Exception(
"Could not execute:\n " + adress + "/" + command + "\n got a problem from server: \n" + str(
err.args))
try:
if ("404 Not Found" in r.text):
result = False
elif ("500 Internal Server Error" in r.text):
raise Exception("got 505 error from server!:\n" + r.text)
else:
result = json.loads(r.text)
except Exception as err:
raise Exception(
"Could not convert json:\n " + adress + "/" + command + "\n got a following text: \n" + r.text)
return result
def _api_command(self, command: str) -> dict or bool:
return self._command(adress=self.server_id, command=command)
def get_arena(self) -> dict:
command="arena"
return self._api_command(command=command)
class Swarm(Api):
id:str = None
swarm_adress:str = None
droneIDs:list = []
packages: list = []
drone_jobs:dict = {}
drones:list =[]
swarm_thread_pool:mp.Pool = None
processes = []
def __init__(self, swarm_id:str, server_id:str = "http://10.4.14.248:5000/api", swarm_drones=[34,35,36], arena:int=2):
super().__init__(server_id=server_id)
self.id = swarm_id
self.swarm_adress = server_id+"/"+self.id
self.droneIDs = swarm_drones
self.drone_jobs = {x:"FREE" for x in self.droneIDs}
#self.swarm_thread_pool = mp.Pool(processes=len(self.droneIDs))
try:
self.register(arena=arena)
except Exception as err:
print("Was already Registered")
def init_drones(self):
print("connect drones")
for ind,x in enumerate(self.droneIDs):
print("Drone: "+str(x))
tmp_drone = Drone(droneID=x, swarm=self)
self.drones.append(tmp_drone)
tmp_drone.connect()
def shutdown(self):
for drone in self.drones:
try:
drone.land()
time.sleep(1)
drone.disconnect()
except:
continue
def schedule_jobs(self,job):
print("new JOB")
print(job)
print(self.drone_jobs)
assigned = False
for ind,droneID in enumerate(self.droneIDs):
if(self.drone_jobs[droneID] == "FREE" or not droneID in self.drone_jobs):
self.drone_jobs.update({droneID:"BUSY"})
p=mp.Process(target=self.drones[ind].assign_job, args=job)
self.processes.append(p)
print(self.drone_jobs)
return p
if not(assigned):
time.sleep(7) #parameter!
return self.schedule_jobs(job)
def run_processes(self):
print(self.processes)
for p in self.processes:
print("START")
p.start()
time.sleep(2)
self.wait_for_jobs()
def swarm_assign_job(self, drone_ID, job):
self.drones[self.droneIDs.index(drone_ID)].assign_job(job)
def __getstate__(self):
self_dict = self.__dict__.copy()
del self_dict['swarm_thread_pool']
return self_dict
def wait_for_jobs(self):
for p in self.processes:
p.join()
print("Wait")
def _swarm_command(self, command:str)->dict or bool:
return self._command(adress=self.swarm_adress, command=command)
#SWARM FUNCS
def get_package(self) -> Package:
command = "package"
packages = self._swarm_command(command=command)
return Package(coordinates=packages["coordinates"], weight=packages["weight"], id=packages["id"])
def print_deliveries(self) -> dict:
command="print_deliveries"
return self._swarm_command(command=command)
def reset_packageList(self, seed:int=1)->bool:
command="reset_package_generator?seed="+str(seed)
return self._swarm_command(command=command)["success"]
def register(self, arena:int = 2)->bool:
command="register_swarm?arena_id="+str(arena)
register = self._swarm_command(command=command)
return register["success"]
def status(self)->dict:
command="status"
return self._swarm_command(command=command)
class Drone(Api):
ID:int = None
swarm: Swarm = None
swarm_adress:str = None
capacity:float = None
action:Action = None
status:str = None
sleep_update: float = 0.5
accepting_ration:float = 0.75
accepted_variance = 0.2
packages:Package = []
load = 0
flight_heigth = 0.3
def __init__(self, droneID:int, swarm:Swarm, capacity:float=1.0):
super().__init__(server_id=swarm.server_id)
self.swarm = swarm
self.swarm_adress = swarm.swarm_adress
self.ID = droneID
self.capacity = capacity
def load_Package(self, package:Package):
self._wait_for_task()
ret=self.pickup(package.id)
if(ret):
self.packages.append(package)
self.load += package.weight
return ret
def _reached_target(self):
targets = {x:getattr(self.action, x) for x in vars(self.action) if "target" in x and not "yaw" in x}
actual_pos = {"x":self.x, "y":self.y, "z":self.z, "yaw":self.yaw}
var_pos = [self.var_x, self.var_y, self.var_z]
print("Target params: "+str(targets))
print("actual_vars: "+str(actual_pos))
accepted_var=self.accepted_variance
#check if pos is reached
for target in targets:
if(targets[target] >= actual_pos[target.replace("target_", "")]-accepted_var and targets[target] <= actual_pos[target.replace("target_", "")]+accepted_var ):
continue
else:
return False
print("Reached Targets")
return True
def _wait_for_task(self):
if(self.action == None):
print("DID not find previous Action")
return True
while True:
if(not self._reached_target()):
if(self.action.waited_dur):
sleep_time = self.sleep_update
else:
sleep_time = self.accepting_ration*self.action.duration
setattr(self.action, "waited_dur", True)
print("Waiting for " + str(sleep_time) + "s with status: " + self.status)
time.sleep(sleep_time)
self._update_status()
time.sleep(1)
continue
elif any([self.status == x for x in ["OFFLINE"]]):
raise Exception("DRONE - ACTION FAILED drone is : "+self.status)
else:
print("next")
break
def _update_status(self)->dict:
command="status"
status = self._drone_command(command=command)
#update class attributes
try:
for x in status:
if x in ["id", "var_x", "var_y", "var_z", "x", "y", "z", "yaw", "status", "battery_voltage", "battery_percentage"]:
setattr(self, x, status[x])
except Exception as err:
raise Exception("Could not update. got: "+str(status))
return status
def _drone_command(self, command:str)->dict or bool:
return self._command(adress=self.swarm_adress+"/"+str(self.ID), command=command)
#funcs
def assign_job(self, *job):
print("assign job")
print(job)
(delivery_paths, package) = job
self.swarm.drone_jobs.update({self.ID:"BUSY"})
time.sleep(5*self.swarm.droneIDs.index(self.ID))
if(self.z < 0.2):
self.takeoff()
if(self.x != delivery_paths[0][0] and self.y != delivery_paths[0][1]):
self.goto(delivery_paths[0])
self.lower()
while True:
if(self.load_Package(package)):
print("loaded Package: "+str(package.id))
break
time.sleep(self.sleep_update)
for node in delivery_paths:
print("go to node: "+str(ps.get_point_or_idx(node)))
self.goto((float(node[0]), float(node[1])))
self.do_delivery(self.packages.pop())
for node in reversed(delivery_paths):
self.goto((float(node[0]), float(node[1])))
self.swarm.drone_jobs.update({self.ID:"FREE"})
return 0
def do_delivery(self, package:Package):
self.lower()
while True:
if(self.deliver(package)):
print("Delivered package to: "+str(ps.get_point_or_idx(package.coordinates[:2])))
break
time.sleep(self.sleep_update)
def lower(self):
self._wait_for_task()
# check bugs
if (self.z == 0):
raise Exception("I'm not in the air!")
command = "goto?x=" + str(float(self.x)) + "&y=" + str(float(self.y)) + "&z=" + str(
float(0.1)) + "&yaw=" + str(0.0) + "&v=" + str(0.3)
action_dict = self._drone_command(command)
print("Drone " + str(self.ID) + " Lowering")
action = Action(action_dict)
setattr(self, "action", action)
return action
#API
def connect(self, radio:int=0):
command="connect?r="+str(radio)+"&c=98&a=E7E7E7E7"+str(self.ID)+"&dr=2M"
self._drone_command(command=command)
self._update_status()
def disconnect(self)->bool:
self._wait_for_task()
command="disconnect"
register = self._drone_command(command=command)
return register["success"]
def takeoff(self, height:float=flight_heigth, vel:float=1)->dict:
self._wait_for_task()
command="takeoff?z="+str(height)+"&v="+str(vel)
register = self._drone_command(command)
setattr(self, "action", Action(register))
return register
def land(self, height:float=0, vel:float=1)->dict:
self._wait_for_task()
command="land?z="+str(height)+"&v="+str(vel)
register = self._drone_command(command)
setattr(self, "action", Action(register))
print("Drone: "+str(self.ID)+"\t Landing")
return register
def _other_drone(self, droneID):
command =str(self.swarm_adress)+"/"+str(droneID)+"/status"
status = self._command(command=command)
return status
def _check_others(self, pos:(float, float)):
droneIDs=self.swarm.droneIDs
found_clash = False
max_iter=3
i=0
while found_clash or (i >= max_iter):
found_clash = False
for id in droneIDs:
od_status = self._other_drone(id)
od = [d for d in self.swarm.drones if(d.ID == id)]
current_coords = pos[0] == od_status["x"] and pos[1] == od_status["y"]
if(od.action != None):
future_coords=(od.action.target_x == pos[0] and od.action.target_y == pos[1])
if(self.ID > od_status["id"] and (current_coords)or future_coords):
time.sleep(5)
found_clash = True
print("WAIT FOUND CLASH!")
else:
continue
i += 1
def goto(self, pos:(float,float)=(1,1), vel:float=0.5, yaw:float = 0.0)->float:
self._wait_for_task()
self._check_others(pos)
#check bugs
if(self.z == 0):
raise Exception("I'm not in the air!")
command="goto?x="+str(float(pos[0]))+"&y="+str(float(pos[1]))+"&z="+str(float(self.flight_heigth))+"&yaw="+str(yaw)+"&v="+str(vel)
action_dict = self._drone_command(command)
print("Drone "+str(self.ID)+" is navigating to: "+str(ps.get_point_or_idx(pos)))
action = Action(action_dict)
setattr(self, "action", action)
return action
def stop(self)->dict:
command="stop"
register = self._drone_command(command=command)
setattr(self, "action", Action(register))
return register
def status(self)->dict:
status = self._update_status()
return status
def deliver(self, package:Package)->dict:
self._wait_for_task()
command="deliver?package_id="+str(package.id)
register = self._drone_command(command=command)
self.load -= package.weight
setattr(self, "action", None)
return register["success"]
def calibrate(self)->bool:
command="calibrate"
register = self._drone_command(command)
return register["success"]
def pickup(self, packageID:str):
command="pickup?package_id="+packageID
register = self._drone_command(command=command)
setattr(self, "action", None)
return register["success"] |
# -*- coding: utf-8 -*-
"""
This module contains a HTMLExportDefinition Model
"""
import re
from loglan_db import db
from loglan_db.model_db.base_definition import BaseDefinition
from loglan_db.model_html import DEFAULT_HTML_STYLE
from loglan_db.model_html.html_word import HTMLExportWord
class DefinitionFormatter:
"""
Additional methods for definition's formatting
"""
@staticmethod
def format_body(body: str) -> str:
"""
Substitutes tags in the definition's body
Formats punctuation signs
:param body:
:return:
"""
to_key = '<k>' # key
tc_key = '</k>'
to_log = '<l>' # log
tc_log = '</l>'
return body \
.replace("<", "<").replace(">", ">") \
.replace("«", to_key).replace("»", tc_key) \
.replace("{", to_log).replace("}", tc_log) \
.replace("...", "…").replace("--", "—")
@staticmethod
def highlight_key(def_body, word, case_sensitive: bool = False) -> str:
"""
Highlights the current key from the list, deselecting the rest
:param def_body:
:param word:
:param case_sensitive:
:return:
"""
to_key = '<k>' # key
tc_key = '</k>'
to_del = '<do_not_delete>'
tc_del = '</do_not_delete>'
key_pattern = f"{to_key}{word.replace('*', '.*')}{tc_key}"
list_of_keys = def_body.replace("</k>", "</k>@").split("@")
for key in list_of_keys:
res = re.search(key_pattern, key, flags=0 if case_sensitive else re.IGNORECASE)
if not res:
continue
original_key = res[0]
replace_key = original_key.replace(to_key, to_del).replace(tc_key, tc_del)
def_body = def_body.replace(original_key, replace_key)
def_body = def_body.replace(tc_key, str()).replace(to_key, str())
def_body = def_body.replace(to_del, to_key).replace(tc_del, tc_key)
return def_body
@staticmethod
def tagged_word_origin_x(d_source_word, tag: str) -> str:
"""
Generate Word.origin_x as HTML tag
Args:
d_source_word:
tag:
Returns:
"""
w_origin_x = d_source_word.origin_x \
if d_source_word.origin_x and d_source_word.type.group == "Cpx" else str()
return tag % w_origin_x if w_origin_x else str()
@staticmethod
def tagged_word_name(usage: str, d_source_word, tag: str) -> str:
"""
Generate Word.name as HTML tag
Args:
usage:
d_source_word:
tag:
Returns:
"""
w_name = d_source_word.name if not usage \
else usage.replace("%", d_source_word.name)
return tag % w_name
@classmethod
def tagged_definition_body(cls, body: str, key_word: str, tag: str) -> str:
"""
Generate Definition.body as HTML tag with highlighted key word
Args:
body:
key_word:
tag:
Returns:
"""
definition_body = cls.format_body(body)
definition_body = cls.highlight_key(definition_body, key_word)
definition_body = tag % definition_body
return definition_body
class HTMLExportDefinition(BaseDefinition, DefinitionFormatter):
"""
HTMLExportDefinition Class
"""
_source_word = db.relationship(
HTMLExportWord.__name__, back_populates="_definitions", viewonly=True)
def export_for_english(self, word: str, style: str = DEFAULT_HTML_STYLE) -> str:
"""
:param word:
:param style:
:return:
"""
# de = definition english
tags = {
"normal": [
'<span class="dg">(%s)</span>',
'<span class="dt">[%s]</span> ',
' <span class="db">%s</span>',
f'<span class="definition eng" id={self.id}>%s</span>',
'<div class="d_line">%s</div>',
'<span class="w_name">%s</span>, ',
'<span class="w_origin"><%s></span> ',
],
"ultra": [
'(%s)', '[%s] ', ' %s', '<de>%s</de>',
'<ld>%s</ld>', '<wn>%s</wn>, ', '<o><%s></o> ', ],
}
t_d_gram, t_d_tags, t_d_body, t_def, t_def_line, t_word_name, t_word_origin = tags[style]
gram_form = self.stringer(self.slots) + self.grammar_code
def_gram = t_d_gram % gram_form if gram_form else ''
def_tags = t_d_tags % self.case_tags.replace("-", "‍-‍") if self.case_tags else ''
def_body = self.tagged_definition_body(self.body, word, t_d_body)
word_name = self.tagged_word_name(self.usage, self.source_word, t_word_name)
word_origin_x = self.tagged_word_origin_x(self.source_word, t_word_origin)
definition = t_def % f'{def_tags}{def_gram}{def_body}'
return t_def_line % f'{word_name}{word_origin_x}{definition}'
def export_for_loglan(self, style: str = DEFAULT_HTML_STYLE) -> str:
"""
:param style:
:return:
"""
tags = {
# usage, gram, body, tags, definition
"normal": [
'<span class="du">%s</span> ', '<span class="dg">(%s)</span> ',
'<span class="db">%s</span>', ' <span class="dt">[%s]</span>',
f'<div class="definition log" id={self.id}>%s</div>', ],
"ultra": ['<du>%s</du> ', '(%s) ', '%s', ' [%s]', '<dl>%s</dl>', ],
}
t_d_usage, t_d_gram, t_d_body, t_d_tags, t_definition = tags[style]
def_usage = t_d_usage % self.usage.replace("%", "—") if self.usage else ''
gram_form = f"{str(self.slots) if self.slots else ''}" + self.grammar_code
def_gram = t_d_gram % gram_form if gram_form else ''
def_body = t_d_body % self.format_body(self.body)
def_tags = t_d_tags % self.case_tags.replace("-", "‍-‍") if self.case_tags else ''
return t_definition % f'{def_usage}{def_gram}{def_body}{def_tags}'
|
##
# File: PdbxReadWriteTests.py
# Author: jdw
# Date: 9-Oct-2011
# Version: 0.001
#
# Updated:
# 24-Oct-2012 jdw update path details and reorganize.
#
##
""" Various tests caess for PDBx/mmCIF data file and dictionary reader and writer.
"""
from __future__ import absolute_import
__docformat__ = "restructuredtext en"
__author__ = "John Westbrook"
__email__ = "[email protected]"
__license__ = "Creative Commons Attribution 3.0 Unported"
__version__ = "V0.01"
import sys, unittest, traceback
import sys, time, os, os.path, shutil
from simtk.openmm.app.internal.pdbx.reader.PdbxReader import PdbxReader
from simtk.openmm.app.internal.pdbx.writer.PdbxWriter import PdbxWriter
from simtk.openmm.app.internal.pdbx.reader.PdbxContainers import *
class PdbxReadWriteTests(unittest.TestCase):
def setUp(self):
self.lfh=sys.stdout
self.verbose=False
self.pathPdbxDataFile = "../tests/1kip.cif"
self.pathOutputFile = "testOutputDataFile.cif"
def tearDown(self):
pass
def testSimpleInitialization(self):
"""Test case - Simple initialization of a data category and data block
"""
self.lfh.write("\nStarting %s %s\n" % (self.__class__.__name__,
sys._getframe().f_code.co_name))
try:
#
fn="test-simple.cif"
attributeNameList=['aOne','aTwo','aThree','aFour','aFive','aSix','aSeven','aEight','aNine','aTen']
rowList=[[1,2,3,4,5,6,7,8,9,10],
[1,2,3,4,5,6,7,8,9,10],
[1,2,3,4,5,6,7,8,9,10],
[1,2,3,4,5,6,7,8,9,10],
[1,2,3,4,5,6,7,8,9,10],
[1,2,3,4,5,6,7,8,9,10],
[1,2,3,4,5,6,7,8,9,10],
[1,2,3,4,5,6,7,8,9,10],
[1,2,3,4,5,6,7,8,9,10],
[1,2,3,4,5,6,7,8,9,10]
]
nameCat='myCategory'
#
#
curContainer=DataContainer("myblock")
aCat=DataCategory(nameCat,attributeNameList,rowList)
aCat.printIt()
curContainer.append(aCat)
curContainer.printIt()
#
myContainerList=[]
myContainerList.append(curContainer)
ofh = open(fn, "w")
pdbxW=PdbxWriter(ofh)
pdbxW.write(myContainerList)
ofh.close()
myContainerList=[]
ifh = open(fn, "r")
pRd=PdbxReader(ifh)
pRd.read(myContainerList)
ifh.close()
for container in myContainerList:
for objName in container.getObjNameList():
name,aList,rList=container.getObj(objName).get()
self.lfh.write("Recovered data category %s\n" % name)
self.lfh.write("Attribute list %r\n" % repr(aList))
self.lfh.write("Row list %r\n" % repr(rList))
except:
traceback.print_exc(file=self.lfh)
self.fail()
def testWriteDataFile(self):
"""Test case - write data file
"""
self.lfh.write("\nStarting %s %s\n" % (self.__class__.__name__,
sys._getframe().f_code.co_name))
try:
#
myDataList=[]
ofh = open("test-output.cif", "w")
curContainer=DataContainer("myblock")
aCat=DataCategory("pdbx_seqtool_mapping_ref")
aCat.appendAttribute("ordinal")
aCat.appendAttribute("entity_id")
aCat.appendAttribute("auth_mon_id")
aCat.appendAttribute("auth_mon_num")
aCat.appendAttribute("pdb_chain_id")
aCat.appendAttribute("ref_mon_id")
aCat.appendAttribute("ref_mon_num")
aCat.append([1,2,3,4,5,6,7])
aCat.append([1,2,3,4,5,6,7])
aCat.append([1,2,3,4,5,6,7])
aCat.append([1,2,3,4,5,6,7])
aCat.append([7,6,5,4,3,2,1])
aCat.printIt()
curContainer.append(aCat)
curContainer.printIt()
#
myDataList.append(curContainer)
pdbxW=PdbxWriter(ofh)
pdbxW.write(myDataList)
ofh.close()
except:
traceback.print_exc(file=self.lfh)
self.fail()
def testUpdateDataFile(self):
"""Test case - update data file
"""
self.lfh.write("\nStarting %s %s\n" % (self.__class__.__name__,
sys._getframe().f_code.co_name))
try:
# Create a initial data file --
#
myDataList=[]
curContainer=DataContainer("myblock")
aCat=DataCategory("pdbx_seqtool_mapping_ref")
aCat.appendAttribute("ordinal")
aCat.appendAttribute("entity_id")
aCat.appendAttribute("auth_mon_id")
aCat.appendAttribute("auth_mon_num")
aCat.appendAttribute("pdb_chain_id")
aCat.appendAttribute("ref_mon_id")
aCat.appendAttribute("ref_mon_num")
aCat.append([9,2,3,4,5,6,7])
aCat.append([10,2,3,4,5,6,7])
aCat.append([11,2,3,4,5,6,7])
aCat.append([12,2,3,4,5,6,7])
#self.lfh.write("Assigned data category state-----------------\n")
#aCat.dumpIt(fh=self.lfh)
curContainer.append(aCat)
myDataList.append(curContainer)
ofh = open("test-output-1.cif", "w")
pdbxW=PdbxWriter(ofh)
pdbxW.write(myDataList)
ofh.close()
#
#
# Read and update the data -
#
myDataList=[]
ifh = open("test-output-1.cif", "r")
pRd=PdbxReader(ifh)
pRd.read(myDataList)
ifh.close()
#
myBlock=myDataList[0]
myBlock.printIt()
myCat=myBlock.getObj('pdbx_seqtool_mapping_ref')
myCat.printIt()
for iRow in xrange(0,myCat.getRowCount()):
myCat.setValue('some value', 'ref_mon_id',iRow)
myCat.setValue(100, 'ref_mon_num',iRow)
ofh = open("test-output-2.cif", "w")
pdbxW=PdbxWriter(ofh)
pdbxW.write(myDataList)
ofh.close()
#
except:
traceback.print_exc(file=self.lfh)
self.fail()
def testReadDataFile(self):
"""Test case - read data file
"""
self.lfh.write("\nStarting %s %s\n" % (self.__class__.__name__,
sys._getframe().f_code.co_name))
try:
#
myDataList=[]
ifh = open(self.pathPdbxDataFile, "r")
pRd=PdbxReader(ifh)
pRd.read(myDataList)
ifh.close()
except:
traceback.print_exc(file=self.lfh)
self.fail()
def testReadWriteDataFile(self):
"""Test case - data file read write test
"""
self.lfh.write("\nStarting %s %s\n" % (self.__class__.__name__,
sys._getframe().f_code.co_name))
try:
myDataList=[]
ifh = open(self.pathPdbxDataFile, "r")
pRd=PdbxReader(ifh)
pRd.read(myDataList)
ifh.close()
ofh = open(self.pathOutputFile, "w")
pWr=PdbxWriter(ofh)
pWr.write(myDataList)
ofh.close()
except:
traceback.print_exc(file=self.lfh)
self.fail()
def simpleSuite():
suiteSelect = unittest.TestSuite()
suiteSelect.addTest(PdbxReadWriteTests("testSimpleInitialization"))
suiteSelect.addTest(PdbxReadWriteTests("testUpdateDataFile"))
suiteSelect.addTest(PdbxReadWriteTests("testReadWriteDataFile"))
return suiteSelect
if __name__ == '__main__':
#
mySuite=simpleSuite()
unittest.TextTestRunner(verbosity=2).run(mySuite)
#
|
#!/usr/bin/env python
"""
:module: contextManager
:platform: None
:synopsis: This module contains classes related to module attributes
:plans:
"""
__author__ = "Andres Weber"
__email__ = "[email protected]"
__version__ = 1.0
#py import
import collections as _collections
import os
#mpc import
import mpc.logging as _logging
#import ftrack
_log = _logging.getLogger()
class Services(object):
def findJobs(cls):
pass
class _AbstractContext(object):
""" Base context object.
The base context presents a service provider throughout the project
"""
# The type of child nodes from this object
__childType__ = None
# The levels of the context's hierarchy
# This provides the possible levels to the hierarchy
__levelHierarchy__ = ()
# Sets the level that this particular object represents within the hierarchy
# If set to None this object is not tied to a particular level
__levelIndex__ = None
########################################
# Implement class behaviour
#
def __init__(self, services=Services(), **contextDictIn):
self.__services = services
self.services = services
super(_AbstractContext, self).__init__()
@classmethod
def fromDict(cls, contextDict):
""" Alternative constructor, creating a new context object from a dict
containing the keys of the context levels.
For the facility (optional), job, scene and shot.
"""
# Copy contextDict and build the keyword args
kwargs = dict((key.encode('ascii'), val) for key, val in contextDict.items() if val is not None)
return (cls)(**kwargs)
def __iter__(self):
""" Return an iterable set of tuples, suitable for use with dict.
The is order is important and reflects the hierarchy of the context
"""
levels = [ (level, getattr(self, '_' + level, None)) for level in self.__levelHierarchy__ ]
return iter(levels)
def __eq__(self, obj):
""" Provides equality tests between facility objects
"""
return isinstance(obj, type(self)) and tuple(self) == tuple(obj)
def __ne__(self, obj):
""" Provides inequality tests between facility objects
"""
return not (self == obj)
def __cmp__(self, other):
""" Provides comparison between context objects
"""
return cmp(tuple(self), tuple(other))
def __repr__(self):
""" Returns a string representation of the context object
"""
keywords = []
# Get an iterator to walk over our state backwards
stateIter = reversed(list(self))
# Wind over the initial 'None' values
for level, value in stateIter:
if value is not None:
keywords = [ '%s=%r' % (level, str(value)) ]
break
# Continue by putting the results in the dict
# Note, any 'None' values mixed up in the hierarchy get included
keywords += ['%s=%r' % (level, str(value)) for level, value in stateIter]
# Undo the reverse we did to begin with
keywords.reverse()
# Build the repr string
return "%s(%s, services=%r)" % ( self.__class__.__name__, ", ".join(keywords), self.services )
def __str__(self):
""" The context's name
"""
return self.name or ''
def __hash__(self):
return hash(tuple(self))
# Properties
#
@property
def services(self):
""" Provides the services that are to be used in this context
"""
return self.__services
########################################
# Public interface
#
@property
def name(self):
""" The context's name
"""
return getattr(self, '_' + self.__levelHierarchy__[self.__levelIndex__], None)
def fullName(self):
""" A fullname for ths object, to be presented in application interfaces.
"""
# Override this if the desired fullName is different from the context class name
labels = []
for item in self:
labels.append("%s: %s" % (item[0], item[1]))
return ', '.join(labels)
@classmethod
def label(cls):
""" Label for this context
"""
return cls.__name__
def findChildren(self,
withReleases=True,
assetTypeFilter=None,
assetGroupFilter=None,
jobTypeFilter=None,
jobFilter=None):
""" Provides a list of the child contexts
"""
raise NotImplementedError # pragma: no cover
def hasChildren(self):
""" Provide a cheap way to determine whether this node has children
"""
return (self.__levelHierarchy__ is None
or self.__levelIndex__ < len(self.__levelHierarchy__) - 1)
class _AbstractJobContext(_AbstractContext): # pylint: disable=C0103
""" Base context object for job contexts
"""
# comment please
__levelHierarchy__ = ('facility', 'job', 'scene', 'shot')
# Cache the facility short names
_facilityShortNames = _collections.defaultdict(lambda: None)
def __init__(self,
job=None,
scene=None,
shot=None,
services=None,
facility=None):
super(_AbstractJobContext, self).__init__(services,
facility=facility,
job=job,
scene=scene,
shot=shot)
# Store context on the instance
self._job = job.name if type(job) == Job else job
self._scene = scene.name if type(scene) == Scene else scene
self._shot = shot.name if type(shot) == Shot else shot
# A default facility is used from config if none is given
if facility is None:
facilityShortName = self.__class__._facilityShortNames[self.services]
if facilityShortName is None:
facilityShortName = self.services.context.getConfig("facility")['shortName']
self.__class__._facilityShortNames[self.services] = facilityShortName
self._facility = facilityShortName
else:
self._facility = facility.name if type(facility) == Facility else facility
self._validateContextLevels()
def _validateContextLevels(self):
""" Validate all context levels, ensuring that appropriate ones are
valid strings or None.
"""
currentLevelIndex = self.__class__.__levelIndex__
# We check each level in the level hierarchy. For levels up to and including the
# current level, the name must not be None and must match the appropriate
# regular expression from the regexConstants module. E.g. for a Scene context,
# all of facility, job and scene must not be None and must be valid strings;
# moreover shot *has* to be None.
for levelIndex, levelName in enumerate(self.__class__.__levelHierarchy__):
levelValue = getattr(self, "_%s" % levelName)
if levelIndex <= currentLevelIndex:
# This is the case of levels before and including self's level
# All values should not be None and valid strings.
if levelValue is None:
raise _exceptions.ContextException(
"In %r, value for %r should not be None for %r contexts" % (
tuple(self), levelName, self.__class__.__name__,
)
)
self._validateContextLevel(levelName, levelValue)
else:
# This is the case of levels after self's level. All values
# must be None
if levelValue is not None:
raise _exceptions.ContextException(
"In %r, value %r must be None for %r contexts." % (
tuple(self), levelValue, self.__class__.__name__,
)
)
def _validateContextLevel(self, levelName, levelValue):
""" Validate the level string against the given level regex
"""
assert levelName in self.__class__.__levelHierarchy__, "Unknown level name %r given" % (levelName,)
levelRegexStr = _regexConstants.PATTERN_DICT["%s" % (levelName,)]
levelRegex = _re.compile("^%s$" % (levelRegexStr,))
if not levelRegex.match(levelValue):
raise _exceptions.ContextException(
"Context level %r with value %r does not match expected regex pattern %r for context %r." % (
levelName, levelValue, levelRegexStr, tuple(self)
),
)
def findChildren(self,
withReleases=True,
assetTypeFilter=None,
assetGroupFilter=None,
jobTypeFilter=None,
jobFilter=None):
""" Provides a list of the child contexts
"""
raise NotImplementedError # pragma: no cover
def _findChildContextNames(self, assetTypeFilter=None, assetGroupFilter=None):
""" Returns a set containing all of the child Contexts
"""
childRecords = self.services.asset.findChildContexts(
_uri.fromContext(self),
assetGroupFilter,
assetTypeFilter,
True)
contexts = (_uri.toContext(context, services=self.services) for context, _ in childRecords)
return set(contexts)
# Redefine the fromDict to give a more complete public docstring
@classmethod
def fromDict(cls, contextDict, services=None):
""" Alternative constructor, creating a new context object from a dict
containing the keys of the context levels.
Possible context levels include:
facility (optional), job, scene and shot.
"""
return super(_AbstractJobContext, cls).fromDict(contextDict, services)
@property
def __defaultLevel(self): #pylint: disable=R0201
""" The default value for an unused part of the JobContext hierarchy
"""
return None
facility = job = scene = shot = __defaultLevel
class Shot(_AbstractJobContext):
__childType__ = Asset
def __init__(self):
self.facility = None
self.job = None
self.scene = None
self.shot = None
@property
def facility(self):
""" The facility associated with this context
"""
return Facility(services=self.services, facility=self._facility)
@property
def job(self):
""" The job associated with this context
"""
return Job(job=self._job, facility=self._facility, services=self.services)
@property
def scene(self):
""" The scene associated with this context
"""
return Scene(job=self._job, scene=self._scene, facility=self._facility, services=self.services)
@property
def shot(self):
""" The shot associated with this context
"""
return self
@classmethod
def validate(cls):
pass
def findChildren(self, withReleases=False, assetTypeFilter=None, assetGroupFilter=None, jobTypeFilter=None,
jobFilter=None):
""" Returns an empty list, as the are no children to a shot
"""
return list()
class Scene(_AbstractJobContext):
__childType__ = Shot
__levelIndex__ = 2
_nonSceneDirectories = _collections.defaultdict(lambda: None)
@property
def facility(self):
""" The facility associated with this context
"""
return Facility(services=self.services, facility=self._facility)
@property
def job(self):
""" The job associated with this context
"""
return Job(job=self._job, facility=self._facility, services=self.services)
@property
def scene(self):
""" The scene associated with this context
"""
return self
@classmethod
def validate(cls):
pass
def findChildren(self, withReleases=False, assetTypeFilter=None, assetGroupFilter=None, jobTypeFilter=None,
jobFilter=None):
""" Returns a list of the shots for a scene
Generally:
* withReleases=True will be used when performing 'gather' operations
* withReleases=False will be used when performing 'release' operations
"""
ctxUri = _uri.fromContext(self)
# The backend that needs to be queried to find the scenes is different depending on how
# what's being requested. We provide an interface to both through this mechanism since it
# is very common within asset-based applications.
if withReleases is True:
names = self._findChildContextNames(
assetTypeFilter=assetTypeFilter,
assetGroupFilter=assetGroupFilter)
else:
namesOnDisc = self._findChildContextNames()
names = set(namesOnDisc).union(
_uri.toContext(ctx, services=self.services) for ctx in
set(self.services.context.findShots(ctxUri))
)
return sorted(set(name.shot for name in names if name.shot is not None), key=lambda x: x.name)
findShots = findChildren
class Job(_AbstractJobContext):
__childType__ = Scene
__levelIndex__ = 1
def __init__(self):
self.facility = None
self.job = None
self.scene = None
self.shot = None
@property
def facility(self):
""" The facility associated with this context
"""
return Facility(services=self.services, facility=self._facility)
@property
def job(self):
""" The job associated with this context
"""
return self
@classmethod
def validate(cls):
pass
def findChildren(self, withReleases=False, assetTypeFilter=None, assetGroupFilter=None, jobTypeFilter=None,
jobFilter=None):
""" Returns a list of the scenes under a job.
Generally:
* withReleases=True will be used when performing 'gather' operations
* withReleases=False will be used when performing 'release' operations
"""
ctxUri = _uri.fromContext(self)
# The backend that needs to be queried to find the scenes is different depending on how
# what's being requested. We provide an interface to both through this mechanism since it
# is very common within asset-based applications.
if withReleases is True:
names = self._findChildContextNames(
assetTypeFilter=assetTypeFilter,
assetGroupFilter=assetGroupFilter)
else:
names = self._findChildContextNames()
names = set(names).union(
_uri.toContext(ctxUri, services=self.services)
for ctxUri in set(self.services.context.findScenes(ctxUri))
)
return sorted(set(name.scene for name in names if name.scene is not None), key=lambda x: x.name)
findScenes = findChildren
class Facility(_AbstractJobContext):
""" A Facility context object representing, for example, MPC.
Note: The facility should not be confused with the geographical sites in which a
facility might operate.
"""
__childType__ = Job
__levelIndex__ = 0
def __init__(self, services=None, facility=None):
super(Facility, self).__init__(services=services, facility=facility)
@property
def facility(self):
""" The facility associated with this context
"""
return self
def findChildren(self, withReleases=False, assetTypeFilter=None,
assetGroupFilter=None, jobTypeFilter=None, jobFilter=None):
""" Returns a list of the child contexts
Generally:
* withReleases=True will be used when performing 'gather' operations
* withReleases=False will be used when performing 'release' operations
"""
# The backend that needs to be queried to find the scenes is different depending on how
# what's being requested. We provide an interface to both through this mechanism since it
# is very common within asset-based applications.
ctxUri = _uri.fromContext(self)
if withReleases is True:
names = self.services.asset.findJobsWithReleases(jobTypeFilter, jobFilter)
else:
names = self.services.asset.findJobsWithReleases(jobTypeFilter, jobFilter)
names = set(names).union(set(self.services.context.findJobs(ctxUri, True, jobTypeFilter, jobFilter)))
res = set()
for name in names:
conUri = _uri.toContext(name, services=self.services)
if conUri.job is not None:
res.add(conUri.job)
return sorted(res, key=lambda x: x.name)
findJobs = findChildren
def contextFactory(contextIn, services=None):
""" Returns a context object from the given input.
Args:
contextIn (dict|context object): If using a dict, it must be a 'context' dict
Returns:
context object
"""
# We have been supplied a context, we can use this as it is
if isinstance(contextIn, _AbstractJobContext):
return contextIn
# Easier to work with the context as a dict
contextIn = dict(contextIn)
if contextIn.get('shot') is not None:
return Shot.fromDict(contextIn, services=services)
elif contextIn.get('scene') is not None:
return Scene.fromDict(contextIn, services=services)
elif contextIn.get('job') is not None and contextIn.get('job') != ".mpc":
return Job.fromDict(contextIn, services=services)
if contextIn.has_key("job"):
contextIn.pop("job", None)
return Facility.fromDict(contextIn, services=services)
def fromEnvironment(services=None):
""" Provide a context object from the environment.
Uses the environment variables: JOB, SCENE and SHOTNAME to create a new context
object.
Kwargs:
services (mpc.pyCore.services.ServiceProvider): The services to use
Returns:
context object
"""
jobEnv = _os.getenv('JOB') or None
sceneEnv = _os.getenv('SCENE') or None if jobEnv is not None else None
shotEnv = _os.getenv('SHOTNAME') or None if sceneEnv is not None else None
# Need to ensure that all context levels are valid. If shot context, then we have
# to have job and scene defined and valid.
return contextFactory({'job': jobEnv,
'scene': sceneEnv,
'shot': shotEnv},
services=services)
def validateContext(context):
""" Validates that a context object is valid
Arguments:
context: and instance of Facility, Job, Scene or Shot to be
validated
Returns:
True if the context exists, False otherwise
"""
if not isinstance(context, (Facility, Job, Scene, Shot)):
raise ValueError('Invalid argument type: %r, context object expected' % (type(context),))
contextUri = URIFactory.fromContext(context)
services = context.service
valid = Services.validateContext(contextUri)
# Only cache valid contexts, since invalid ones can become valid at any time
if valid:
return _validContextsCache.setdefault(contextUri, valid)
return valid
def URIFactory(object):
self.order = {'contextTypes': ['job', 'scene', 'shot', 'asset']}
@classmethod
def parse_uri(cls, uri):
pass
@classmethod
def build_uri(cls, context):
pass
@classmethod
def fromContext(cls, context):
pass
@classmethod
def toContext(cls, dict):
pass
@classmethod
def fromDict(cls, dict):
pass
@classmethod
def toDict(cls, context):
pass
@classmethod
def fromAsset(cls, context):
pass
@classmethod
def toAsset(cls, dict):
pass |
import time
import numpy
import json
import utils
import pypot.robot
import pypot.dynamixel
import motorTrajectoryManager
listOfTraj = [
[0.0029282637700648157, 532.49287882689293, 29.07849099701108, -1058.1470413492355, 459.3664329672057],
[169.9973127875532, 1.2118904739507608, -859.49525560910968, 109.93882674890278, 489.17556618589202],
[-0.0029282637700933502, -532.49287882689202, -29.078490997017791, 1058.1470413492527, -459.36643296722087],
[-169.99731278755326, -1.2118904739506096, 859.49525560910888, -109.93882674889758, -489.17556618590021]
]
listOfTorques = []
robot = pypot.robot.from_json("robot_config.json")
params = utils.Parameters(robot, 0, 50)
trajMan = motorTrajectoryManager.MotorTrajectoryManager()
trajMan.createMotorTrajectory(params.robot.motors[0], listOfTraj, listOfTorques, [5000, 5000, 5000, 5000], mode=2, repeat=False, delay=0.1)
trajMan.start()
while(True):
try:
trajMan.tick()
time.sleep(0.1)
except KeyboardInterrupt:
exit |
import sys
sys.path.append(".")
from ai.action.movement.movements.basic import *
import ai.actionplanner
speed_one = 0.6
def main(mars, times=6):
stretchingInit(mars)
for i in range(times):
stretching(mars)
#init(mars)
stretchingEnd(mars)
def stretchingInit(mars):
mars.setHeadAngle(1, 20, speed_one)
mars.setHeadAngle(2, 0, speed_one)
mars.setLegAngle(1, 1, 0, speed_one)
mars.setLegAngle(1, 2, -20, speed_one)
mars.setLegAngle(1, 3, 70, speed_one)
mars.setLegAngle(2, 1, 0, speed_one)
mars.setLegAngle(2, 2, -20, speed_one)
mars.setLegAngle(2, 3, 70, speed_one)
mars.setLegAngle(3, 1, 0, speed_one)
mars.setLegAngle(3, 2, 30, speed_one)
mars.setLegAngle(3, 3, -80, speed_one)
mars.setLegAngle(4, 1, 0, speed_one)
mars.setLegAngle(4, 2, 30, speed_one)
mars.setLegAngle(4, 3, -80, speed_one)
ai.actionplanner.ActionPlanner.sleep(1)
def full_stretching(mars):
# 向前伸懒腰
# 左前腿伸直
mars.setLegAngle(1, 2, 80, 0.3)
mars.setLegAngle(1, 3, -30, 0.3)
mars.setLegAngle(2, 2, 80, 0.3)
mars.setLegAngle(2, 3, -30, 0.3)
ai.actionplanner.ActionPlanner.sleep(1)
# 右前腿伸直
# 前腿下压,后腿上抬,同时抬头
mars.setLegAngle(1, 2, 80, 0.5)
mars.setLegAngle(1, 3, 0, 0.5)
mars.setLegAngle(2, 2, 80, 0.5)
mars.setLegAngle(2, 3, 0, 0.5)
ai.actionplanner.ActionPlanner.sleep(4)
# 后腿先抬
rand_speed_1 = get_rand_speed(0.2, 0.7)
mars.setLegAngle(3, 2, -60, rand_speed_1)
mars.setLegAngle(4, 2, -60, rand_speed_1)
get_rand_delay_time(0.3, 0.7)
mars.setLegAngle(3, 3, -30, rand_speed_1)
mars.setLegAngle(4, 3, -30, rand_speed_1)
get_rand_delay_time(0.3, 0.7)
# 后腿进一步抬
rand_speed_2 = get_rand_speed(0.2, 0.7)
mars.setLegAngle(3, 2, -80, rand_speed_2)
mars.setLegAngle(3, 3, -30, rand_speed_2)
get_rand_delay_time(0.3, 0.7)
mars.setLegAngle(4, 2, -80, rand_speed_2)
mars.setLegAngle(4, 3, -30, rand_speed_2)
get_rand_delay_time(0.7, 1)
def stretching(mars):
speed_one = 0.3
speed_two = 0.3
# 向前伸懒腰
# 左前腿伸直# 右前腿伸直
mars.setLegAngle(1, 1, 20, speed_two)
mars.setLegAngle(1, 2, 80, speed_two)
mars.setLegAngle(1, 3, -30, speed_two)
mars.setLegAngle(2, 1, 20, speed_two)
mars.setLegAngle(2, 2, 80, speed_two)
mars.setLegAngle(2, 3, -30, speed_two)
ai.actionplanner.ActionPlanner.sleep(2)
# 前腿下压,后腿上抬,同时抬头
mars.setLegAngle(1, 2, 70, speed_one)
mars.setLegAngle(1, 3, 30, speed_one)
mars.setLegAngle(2, 2, 70, speed_one)
mars.setLegAngle(2, 3, 30, speed_one)
ai.actionplanner.ActionPlanner.sleep(1)
mars.setLegAngle(3, 2, 30, speed_one)
mars.setLegAngle(3, 3, -30, speed_one)
mars.setLegAngle(4, 2, 30, speed_one)
mars.setLegAngle(4, 3, -30, speed_one)
# 头左右晃动,然后回到正常
move_head_tail(mars, 4)
# 向后伸懒腰
# 前腿站立,后腿弯曲, 头回到正常
mars.setLegAngle(1, 1, 0, speed_two)
mars.setLegAngle(1, 2, -20, speed_two)
mars.setLegAngle(1, 3, 50, speed_two)
mars.setLegAngle(2, 1, 0, speed_two)
mars.setLegAngle(2, 2, -20, speed_two)
mars.setLegAngle(2, 3, 50, speed_two)
ai.actionplanner.ActionPlanner.sleep(0.8)
mars.setLegAngle(3, 2, 0, speed_two)
mars.setLegAngle(4, 2, 0, speed_two)
mars.setLegAngle(3, 3, -60, speed_two)
mars.setLegAngle(4, 3, -60, speed_two)
move_head_tail(mars, 2)
def stretchingEnd(mars):
mars.setHeadAngle(1, 20, speed_one)
mars.setHeadAngle(2, 0, speed_one)
mars.setLegAngle(1, 1, 0, speed_one)
mars.setLegAngle(1, 2, -20, speed_one)
mars.setLegAngle(1, 3, 70, speed_one)
mars.setLegAngle(2, 1, 0, speed_one)
mars.setLegAngle(2, 2, -20, speed_one)
mars.setLegAngle(2, 3, 70, speed_one)
mars.setLegAngle(3, 1, 0, speed_one)
mars.setLegAngle(3, 2, 30, speed_one)
mars.setLegAngle(3, 3, -80, speed_one)
mars.setLegAngle(4, 1, 0, speed_one)
mars.setLegAngle(4, 2, 30, speed_one)
mars.setLegAngle(4, 3, -80, speed_one)
ai.actionplanner.ActionPlanner.sleep(1)
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Software License Agreement (BSD License)
#
# Copyright (c) 2010-2011, Antons Rebguns.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of University of Arizona nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
__author__ = 'Antons Rebguns'
__copyright__ = 'Copyright (c) 2010-2011 Antons Rebguns'
__license__ = 'BSD'
__maintainer__ = 'Antons Rebguns'
__email__ = '[email protected]'
import sys
import os
from optparse import OptionParser
import rospy
from dynamixel_controllers.srv import StartController
from dynamixel_controllers.srv import StopController
from dynamixel_controllers.srv import RestartController
parser = OptionParser()
def manage_controller(controller_name, port_namespace, controller_type, command, deps, start, stop, restart):
try:
controller = rospy.get_param(controller_name + '/controller')
package_path = controller['package']
module_name = controller['module']
class_name = controller['type']
except KeyError as ke:
rospy.logerr('[%s] configuration error: could not find controller parameters on parameter server' % controller_name)
sys.exit(1)
except Exception as e:
rospy.logerr('[%s]: %s' % (controller_name, e))
sys.exit(1)
if command.lower() == 'start':
try:
response = start(port_namespace, package_path, module_name, class_name, controller_name, deps)
if response.success: rospy.loginfo(response.reason)
else: rospy.logerr(response.reason)
except rospy.ServiceException as e:
rospy.logerr('Service call failed: %s' % e)
elif command.lower() == 'stop':
try:
response = stop(controller_name)
if response.success: rospy.loginfo(response.reason)
else: rospy.logerr(response.reason)
except rospy.ServiceException as e:
rospy.logerr('Service call failed: %s' % e)
elif command.lower() == 'restart':
try:
response = restart(port_namespace, package_path, module_name, class_name, controller_name, deps)
if response.success: rospy.loginfo(response.reason)
else: rospy.logerr(response.reason)
except rospy.ServiceException as e:
rospy.logerr('Service call failed: %s' % e)
else:
rospy.logerr('Invalid command.')
parser.print_help()
if __name__ == '__main__':
try:
rospy.init_node('controller_spawner', anonymous=True)
parser.add_option('-m', '--manager', metavar='MANAGER',
help='specified serial port is managed by MANAGER')
parser.add_option('-p', '--port', metavar='PORT',
help='motors of specified controllers are connected to PORT')
parser.add_option('-t', '--type', metavar='TYPE', default='simple', choices=('simple','meta'),
help='type of controller to be loaded (simple|meta) [default: %default]')
parser.add_option('-c', '--command', metavar='COMMAND', default='start', choices=('start','stop','restart'),
help='command to perform on specified controllers: start, stop or restart [default: %default]')
(options, args) = parser.parse_args(rospy.myargv()[1:])
if len(args) < 1:
parser.error('specify at least one controller name')
manager_namespace = options.manager
port_namespace = options.port
controller_type = options.type
command = options.command
joint_controllers = args
if controller_type == 'meta': port_namespace = 'meta'
start_service_name = '%s/%s/start_controller' % (manager_namespace, port_namespace)
stop_service_name = '%s/%s/stop_controller' % (manager_namespace, port_namespace)
restart_service_name = '%s/%s/restart_controller' % (manager_namespace, port_namespace)
parent_namespace = 'global' if rospy.get_namespace() == '/' else rospy.get_namespace()
rospy.loginfo('%s controller_spawner: waiting for controller_manager %s to startup in %s namespace...' % (port_namespace, manager_namespace, parent_namespace))
rospy.wait_for_service(start_service_name)
rospy.wait_for_service(stop_service_name)
rospy.wait_for_service(restart_service_name)
start_controller = rospy.ServiceProxy(start_service_name, StartController)
stop_controller = rospy.ServiceProxy(stop_service_name, StopController)
restart_controller = rospy.ServiceProxy(restart_service_name, RestartController)
rospy.loginfo('%s controller_spawner: All services are up, spawning controllers...' % port_namespace)
if controller_type == 'simple':
for controller_name in joint_controllers:
manage_controller(controller_name, port_namespace, controller_type, command, [], start_controller, stop_controller, restart_controller)
elif controller_type == 'meta':
controller_name = joint_controllers[0]
dependencies = joint_controllers[1:]
manage_controller(controller_name, port_namespace, controller_type, command, dependencies, start_controller, stop_controller, restart_controller)
except rospy.ROSInterruptException: pass
|
from setuptools import find_packages, setup
setup(
name='src',
packages=find_packages(),
version='0.1.0',
description='Research into bias in Google Ads conducted for CS520 at Umass-Amherst',
author='Jenn Halbleib',
license='MIT',
)
|
# -*- coding: utf-8 -*-
import os
import csv
import yaml
"""
Facades for accessing the configuration data.
"""
__author__ = 'Bernardo Martínez Garrido'
__license__ = 'MIT'
__status__ = 'Development'
class _FileReader(object):
"""
Offers methods to read the data files.
"""
# Singleton control object
__shared_state = {}
def __init__(self):
self.__dict__ = self.__shared_state
def __path(self):
"""
Returns the path to the folder in which this class is contained.
As this class is to be on the same folder as the data files, this will
be the data files folder.
:return: path to the data files folder.
"""
return os.path.dirname(__file__)
def read_csv_file(self, file_name):
"""
Parses a CSV file into a list.
:param file_name: name of the CSV file
:return: a list with the file's contents
"""
result = []
with open(os.path.join(self.__path(), os.path.basename(file_name)),
'rt') as csvfile:
headers_reader = csv.reader(csvfile, delimiter=',', quotechar='|')
for type_row in headers_reader:
for t in type_row:
result.append(t)
return result
def read_yaml_file(self, file_name):
"""
Parses a YAML file into a matrix.
:param file_name: name of the YAML file
:return: a matrix with the file's contents
"""
with open(os.path.join(self.__path(), os.path.basename(file_name)),
'rt') as yamlfile:
return yaml.load(yamlfile)
class CWRTables(object):
"""
Accesses the data inside CWR table files.
This is used on the Lookup fields, to know which values are valid.
The files are read only once, and then the data is stored to be returned
each time it is required.
"""
def __init__(self):
self._file_values = {}
# Reader for the files
self._reader = _FileReader()
def get_data(self, file_id):
"""
Acquires the data from the table identified by the id.
The file is read only once, consecutive calls to this method will
return the sale collection.
:param file_id: identifier for the table
:return: all the values from the table
"""
if file_id not in self._file_values:
file_contents = 'cwr_%s.csv' % file_id
self._file_values[file_id] = self._reader.read_csv_file(
file_contents)
return self._file_values[file_id]
|
""" Check whether a commit message contains the jira ticket in the message """
import re
JIRA_KEYS = 'VFTEST|VFG|VFA|VFS|VFW'
def check_message(message):
""" Check whether a message contains the Jira-specific header """
match_str = ".*({0})-[0-9]+".format(JIRA_KEYS)
match = re.match(match_str, message)
if not match:
print "ERROR: Commit message is missing Jira issue number: %s" % message
return match
def check_messages(messages):
""" Check whethe a list of messages contains the Jira-specific header """
errors = 0
for message in messages:
if not check_message(message):
errors += 1
return errors == 0
|
# decorator for registering the survey footprint loaders and projections
projection_register = {}
def register_projection(cls):
projection_register[cls.__name__] = cls
survey_register = {}
def register_survey(cls):
survey_register[cls.__name__] = cls
# [blatant copy from six to avoid dependency]
# python 2 and 3 compatible metaclasses
# see http://python-future.org/compatible_idioms.html#metaclasses
class Meta(type):
def __new__(meta, name, bases, class_dict):
cls = type.__new__(meta, name, bases, class_dict)
# remove those that are directly derived from BaseProjection
if BaseProjection not in bases:
register_projection(cls)
return cls
def with_metaclass(meta, *bases):
"""Create a base class with a metaclass."""
# This requires a bit of explanation: the basic idea is to make a dummy
# metaclass for one level of class instantiation that replaces itself with
# the actual metaclass.
class metaclass(type):
def __new__(cls, name, this_bases, d):
return meta(name, bases, d)
@classmethod
def __prepare__(cls, name, this_bases):
return meta.__prepare__(name, bases)
return type.__new__(metaclass, 'temporary_class', (), {})
from .map import *
from .projection import *
from . import survey
|
# Generated by Django 2.1.5 on 2019-01-31 21:47
from django.db import migrations, models
import django.db.models.deletion
import taggit.managers
class Migration(migrations.Migration):
dependencies = [("question", "0002_auto_20190131_2055")]
operations = [
migrations.CreateModel(
name="QuestionTag",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
(
"name",
models.CharField(max_length=100, unique=True, verbose_name="Name"),
),
(
"slug",
models.SlugField(max_length=100, unique=True, verbose_name="Slug"),
),
("deleted", models.UUIDField(blank=True, default=None, null=True)),
],
options={"abstract": False},
),
migrations.CreateModel(
name="TaggedQuestion",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
)
],
options={"abstract": False},
),
migrations.AlterField(
model_name="question",
name="tags",
field=taggit.managers.TaggableManager(
help_text="A comma-separated list of tags.",
through="question.TaggedQuestion",
to="question.QuestionTag",
verbose_name="Tags",
),
),
migrations.AddField(
model_name="taggedquestion",
name="content_object",
field=models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE, to="question.Question"
),
),
migrations.AddField(
model_name="taggedquestion",
name="tag",
field=models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name="question_taggedquestion_items",
to="question.QuestionTag",
),
),
]
|
from setuptools import setup, find_packages
def readme():
with open('README.rst') as f:
return f.read()
setup(name='dupfileremover',
version='0.2.2',
description='A command line utility that helps you find and remove duplicate files',
long_description=readme(),
url='http://github.com/kpeterstech/dupfileremover',
author='Kit Peterson',
author_email='[email protected]',
license='MIT',
packages=find_packages(exclude=['tests']),
install_requires=[
'Click',
'tqdm',
],
entry_points={
'console_scripts': ['dupfileremover=dupfileremover.command_line:cli']
})
|
"""Test parsing arguments.
Test target:
- :py:meth:`lmp.script.gen_txt.parse_args`.
"""
import lmp.infer
import lmp.script.gen_txt
from lmp.infer import Top1Infer, TopKInfer, TopPInfer
def test_top_1_parse_results(ckpt: int, exp_name: str, max_seq_len: int, seed: int) -> None:
"""Must correctly parse all arguments for :py:class:`lmp.infer.Top1Infer`."""
txt = 'Hello world'
args = lmp.script.gen_txt.parse_args(
argv=[
Top1Infer.infer_name,
'--ckpt',
str(ckpt),
'--exp_name',
exp_name,
'--max_seq_len',
str(max_seq_len),
'--seed',
str(seed),
'--txt',
txt,
]
)
assert args.ckpt == ckpt
assert args.exp_name == exp_name
assert args.infer_name == Top1Infer.infer_name
assert args.max_seq_len == max_seq_len
assert args.seed == seed
assert args.txt == txt
def test_top_k_parse_results(ckpt: int, exp_name: str, max_seq_len: int, seed: int) -> None:
"""Must correctly parse all arguments for :py:class:`lmp.infer.TopKInfer`."""
k = 5
txt = 'Hello world'
args = lmp.script.gen_txt.parse_args(
argv=[
TopKInfer.infer_name,
'--ckpt',
str(ckpt),
'--exp_name',
exp_name,
'--k',
str(k),
'--max_seq_len',
str(max_seq_len),
'--seed',
str(seed),
'--txt',
txt,
]
)
assert args.ckpt == ckpt
assert args.exp_name == exp_name
assert args.infer_name == TopKInfer.infer_name
assert args.k == k
assert args.max_seq_len == max_seq_len
assert args.seed == seed
assert args.txt == txt
def test_top_p_parse_results(ckpt: int, exp_name: str, max_seq_len: int, seed: int) -> None:
"""Must correctly parse all arguments for :py:class:`lmp.infer.TopPInfer`."""
p = 0.9
txt = 'Hello world'
args = lmp.script.gen_txt.parse_args(
argv=[
TopPInfer.infer_name,
'--ckpt',
str(ckpt),
'--exp_name',
exp_name,
'--max_seq_len',
str(max_seq_len),
'--p',
str(p),
'--seed',
str(seed),
'--txt',
txt,
]
)
assert args.ckpt == ckpt
assert args.exp_name == exp_name
assert args.infer_name == TopPInfer.infer_name
assert args.max_seq_len == max_seq_len
assert args.p == p
assert args.seed == seed
assert args.txt == txt
|
from .se_gcn import SE_GCN
|
from __future__ import division, print_function, unicode_literals
# This code is so you can run the samples without installing the package
import sys
import os
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..'))
#
testinfo = "t 0.1, s, t 1.1, s, t 3.1, s, t 4.1, s, q"
tags = "RotateBy, Reverse"
autotest = 0
import pyglet
import cocos
from cocos.actions import *
from cocos.director import director
key = pyglet.window.key
class DebugLabel(cocos.text.Label):
def draw(self):
sprite = self.parent.sprite
self.element.text = "* (%.0f, %.0f) %.0f" % (sprite.position
+ (sprite.rotation,))
super(DebugLabel, self).draw()
class Logo(cocos.layer.Layer):
is_event_handler = True
def __init__(self):
super(Logo, self).__init__()
self.wx, self.wy = director.get_window_size()
self.sprite = cocos.sprite.Sprite('grossini.png')
self.sprite.position = self.wx//2, self.wy//2
self.label = DebugLabel()
self.add(self.label)
self.add(self.sprite)
self.schedule(lambda x: 0)
if autotest:
self.do(CallFunc(self.on_key_press, key.SPACE, 0))
def on_key_press(self, k, m):
if k == key.SPACE:
action = RotateBy(180, 1.0) + Delay(1.0)
self.sprite.do(action + Reverse(action))
def main():
print("press space to initiate action")
director.init(fullscreen=0, width=800, height=600)
scene = cocos.scene.Scene()
scene.add(Logo())
director.run(scene)
if __name__ == '__main__':
main()
|
from setuptools import setup, find_packages
from setuptools.extension import Extension
from Cython.Build import cythonize
import os
current_dir = os.path.dirname(os.path.realpath(__file__))
setup(name='pngpack-py',
version='1.0.0',
url='https://github.com/axiom-data-science/pngpack/',
author='Axiom Data Science',
author_email='[email protected]',
description="library to pack floating point values into 16-bit PNG images",
packages=[],
ext_modules=cythonize(
[
Extension("pngpack",
sources=["pngpack.pyx", "../libpngpack/pngpack.c"],
define_macros=[('_GNU_SOURCE', None)],
libraries=["png"],
),
],
compiler_directives={'language_level': '3'},
),
zip_safe=False,
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
])
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.