content
stringlengths 5
1.05M
|
---|
from torch.utils.data import Sampler
class HardNegativeSampler(Sampler):
def __init__(self, data_source):
super().__init__(data_source)
def __len__(self):
pass
def __iter__(self):
pass
|
import requests
def initdb():
groot = "http://localhost:8081/db"
# Create container
resp = requests.post(
groot,
auth=("root", "root"),
json={"@type": "Container", "id": "web", "title": "Guillotina CMS Site"},
)
assert resp.status_code in (200, 409)
# Install CMS package
resp = requests.post(
"{}/web/@addons".format(groot), auth=("root", "root"), json={"id": "cms"}
)
assert resp.status_code in (200, 412)
# Install DB users package
resp = requests.post(
"{}/web/@addons".format(groot), auth=("root", "root"), json={"id": "dbusers"}
)
assert resp.status_code in (200, 412)
# Create initial user
payload = {
"@type": "User",
"username": "admin",
"email": "[email protected]",
"password": "admin",
}
resp = requests.post("{}/web/users".format(groot), auth=("root", "root"), json=payload)
assert resp.status_code == 201
# Grant initial permissions to admin user
resp = payload = {
"roleperm": [
{
"setting": "AllowSingle",
"role": "guillotina.Anonymous",
"permission": "guillotina.ViewContent",
},
{
"setting": "AllowSingle",
"role": "guillotina.Anonymous",
"permission": "guillotina.AccessContent",
},
],
"prinrole": [
{"setting": "Allow", "role": "guillotina.Manager", "principal": "admin"},
{"setting": "Allow", "role": "guillotina.Owner", "principal": "admin"},
],
}
resp = requests.post("{}/web/@sharing".format(groot), auth=("root", "root"), json=payload)
assert(resp.status_code == 200)
def deletedb():
groot = "http://localhost:8081/db"
resp = requests.delete("{}/web".format(groot), auth=("root", "root"))
assert(resp.status_code == 200)
|
# plugin by @deleteduser420
# ported to telethon by @mrconfused (@sandy1709)
import os
from usercodex import codex
from usercodex.core.logger import logging
from ..Config import Config
from ..core.managers import edit_or_reply
from ..helpers import humanbytes, post_to_telegraph
from ..helpers.utils import _codutils, _format
plugin_category = "utils"
LOGS = logging.getLogger(__name__)
async def file_data(reply):
hmm = ""
if reply.file.name:
hmm += f"Name : {reply.file.name}<br>"
if reply.file.mime_type:
hmm += f"Mime type : {reply.file.mime_type}<br>"
if reply.file.size:
hmm += f"Size : {humanbytes(reply.file.size)}<br>"
if reply.date:
hmm += f"Date : {_format.yaml_format(reply.date)}<br>"
if reply.file.id:
hmm += f"Id : {reply.file.id}<br>"
if reply.file.ext:
hmm += f"Extension : '{reply.file.ext}'<br>"
if reply.file.emoji:
hmm += f"Emoji : {reply.file.emoji}<br>"
if reply.file.title:
hmm += f"Title : {reply.file.title}<br>"
if reply.file.performer:
hmm += f"Performer : {reply.file.performer}<br>"
if reply.file.duration:
hmm += f"Duration : {reply.file.duration} seconds<br>"
if reply.file.height:
hmm += f"Height : {reply.file.height}<br>"
if reply.file.width:
hmm += f"Width : {reply.file.width}<br>"
if reply.file.sticker_set:
hmm += f"Sticker set :\
\n {_format.yaml_format(reply.file.sticker_set)}<br>"
try:
if reply.media.document.thumbs:
hmm += f"Thumb :\
\n {_format.yaml_format(reply.media.document.thumbs[-1])}<br>"
except Exception as e:
LOGS.info(str(e))
return hmm
@codex.cod_cmd(
pattern="minfo$",
command=("minfo", plugin_category),
info={
"header": "To get media information.",
"description": "reply to media to get information about it",
"usage": "{tr}minfo",
},
)
async def mediainfo(event):
"Media information"
X_MEDIA = None
reply = await event.get_reply_message()
if not reply:
await edit_or_reply(event, "reply to media to get info")
return
if not reply.media:
await edit_or_reply(event, "reply to media to get info")
return
codevent = await edit_or_reply(event, "`Gathering ...`")
X_MEDIA = reply.file.mime_type
if (not X_MEDIA) or (X_MEDIA.startswith(("text"))):
return await codevent.edit("Reply To a supported Media Format")
hmm = await file_data(reply)
file_path = await reply.download_media(Config.TEMP_DIR)
out, err, ret, pid = await _codutils.runcmd(f"mediainfo '{file_path}'")
if not out:
out = "Not Supported"
body_text = f"""
<h2>JSON</h2>
<code>
{hmm}
</code>
<h2>DETAILS</h2>
<code>
{out}
</code>"""
link = await post_to_telegraph(f"{X_MEDIA}", body_text)
await codevent.edit(
f"ℹ️ <b>MEDIA INFO: <a href ='{link}' > {X_MEDIA}</a></b>",
parse_mode="HTML",
link_preview=True,
)
os.remove(file_path)
|
class node:
def __init__(self,key):
self.left=self.right=None
self.val=key
def preorder(root):
if root:
print(root.val)
preorder(root.left)
preorder(root.right)
root=node(1)
root.left=node(2)
root.right=node(3)
preorder(root)
|
# Implementation of Proxy-based deep Graph Metric Learning (ProxyGML) approach
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.parameter import Parameter
from torch.nn import init
import numpy as np
class ProxyGML(nn.Module):
def __init__(self, opt, dim=512):
super(ProxyGML, self).__init__()
self.opt=opt
dim=self.opt.dim
self.C = opt.C
self.N = opt.N
self.Proxies = Parameter(torch.Tensor(dim, opt.C*opt.N))
self.instance_label = torch.tensor(np.repeat(np.arange(opt.C), opt.N)).to(self.opt.device)
self.y_instacne_onehot = self.to_one_hot(self.instance_label, n_dims=self.C).to(self.opt.device)
self.class_label = torch.tensor(np.repeat(np.arange(opt.C), 1)).to(self.opt.device)
init.kaiming_uniform_(self.Proxies, a=math.sqrt(5))
self.index = 0
print("#########")
print("GraphLoss trained on dataset {}, |weight_lambda is {}, N is {}, r is {}, |center lr is {}, rate is {}, epoch_to_decay is {}|".format(opt.dataset,opt.weight_lambda,opt.N,opt.r,opt.centerlr,opt.rate,opt.new_epoch_to_decay))
return
def to_one_hot(self, y, n_dims=None):
''' Take integer tensor y with n dims and convert it to 1-hot representation with n+1 dims. '''
y_tensor = y.type(torch.LongTensor).view(-1, 1)
n_dims = n_dims if n_dims is not None else int(torch.max(y_tensor)) + 1
y_one_hot = torch.zeros(y_tensor.size()[0], n_dims).scatter_(1, y_tensor, 1)
y_one_hot = y_one_hot.view(*y.shape, -1)
return y_one_hot
def scale_mask_softmax(self,tensor,mask,softmax_dim,scale=1.0):
#scale = 1.0 if self.opt.dataset != "online_products" else 20.0
scale_mask_exp_tensor = torch.exp(tensor* scale) * mask.detach()
scale_mask_softmax_tensor = scale_mask_exp_tensor / (1e-8 + torch.sum(scale_mask_exp_tensor, dim=softmax_dim)).unsqueeze(softmax_dim)
return scale_mask_softmax_tensor
def forward(self, input, target):
self.index += 1
centers = F.normalize(self.Proxies, p=2, dim=0)
#constructing directed similarity graph
similarity= input.matmul(centers)
#relation-guided sub-graph construction
positive_mask=torch.eq(target.view(-1,1).to(self.opt.device)-self.instance_label.view(1,-1),0.0).float().to(self.opt.device) #obtain positive mask
topk = math.ceil(self.opt.r * self.C * self.N)
_, indices = torch.topk(similarity + 1000 * positive_mask, topk, dim=1) # "1000*" aims to rank faster
mask = torch.zeros_like(similarity)
mask = mask.scatter(1, indices, 1)
prob_a =mask*similarity
#revere label propagation (including classification process)
logits=torch.matmul(prob_a , self.y_instacne_onehot)
y_target_onehot = self.to_one_hot(target, n_dims=self.C).to(self.opt.device)
logits_mask=1-torch.eq(logits,0.0).float().to(self.opt.device)
predict=self.scale_mask_softmax(logits, logits_mask,1).to(self.opt.device)
# classification loss
lossClassify=torch.mean(torch.sum(-y_target_onehot* torch.log(predict + 1e-20),dim=1))
#regularization on proxies
if self.opt.weight_lambda > 0:
simCenter = centers.t().matmul(centers)
centers_logits = torch.matmul(simCenter , self.y_instacne_onehot)
reg=F.cross_entropy(centers_logits, self.instance_label)
return lossClassify+self.opt.weight_lambda*reg, lossClassify
else:
return lossClassify,torch.tensor(0.0).to(self.opt.device)
|
from getpass import getpass
from owl import app
from flask_bcrypt import Bcrypt
bcrypt = Bcrypt(app)
def generate_pw_hash(password, file):
pw_hash = bcrypt.generate_password_hash(password).decode('utf-8')
with open(file, 'w') as pwfile:
pwfile.write(pw_hash)
if __name__ == '__main__':
with app.app_context():
file = input('Enter password file name (default: .pw): ')
if not file:
file = '.pw'
password = getpass('Enter new password: ')
confirm = getpass('Confirm password: ')
if password != confirm:
print('Abort! Password mismatch.')
exit()
generate_pw_hash(password, file)
print('Success! New password file created: {}'.format(file))
if file != '.pw':
print('Don\'t forgot change password file name in `config.py`.') |
import numpy as np
from collections import OrderedDict
from gym.spaces import Space
class DiscreteBox(Space):
"""
A discrete action space, but each dicrete action is parameterized by a [a, b]^d vector,
where d is a non-negative integer that can be different for each discrete action.
Each discrete action can be named, or if not provided, default numbering will be used.
Example:
>>> DiscreteBox(low=-1.0, high=1.0, dimensions=[2, 3, 0])
DiscreteBox(OrderedDict([(0, 2), (1, 5), (2, 0)]))
>>> DiscreteBox(low=0.0, high=1.0,
dimensions={
"move": 2,
"grasp": 5, # 5 dof arm
})
DiscreteBox(OrderedDict([('move', 2), ('grasp', 5)]))
"""
def __init__(self, low=-1.0, high=1.0, dimensions=None, dtype=np.float32):
assert dimensions != None
assert isinstance(dimensions, (list, dict))
assert dtype is not None, 'dtype must be explicitly provided. '
self.dtype = dtype
if isinstance(dimensions, list):
self.dimensions = OrderedDict(enumerate(dimensions))
elif isinstance(dimensions, dict):
self.dimensions = OrderedDict(dimensions)
assert all(isinstance(d, int) and d >= 0 for d in self.dimensions.values()), 'dimensions must be non-negative integers'
self.discrete_keys = list(self.dimensions.keys())
self.low = self.dtype(low)
self.high = self.dtype(high)
self.num_discrete = len(self.dimensions)
self.num_continuous = sum(self.dimensions.values())
self.cumulative_dimension = np.cumsum(list(self.dimensions.values())).tolist()
@property
def shape(self):
return self.dimensions
def sample(self):
"""
Chooses a random discrete action uniformly and then sample a random vector from the corresponding dimension.
Returns a tuple of the action name and the vector.
Example:
>>> s = DiscreteBox(low=0.0, high=1.0, dimensions={"move": 2, "grasp": 1, "shutoff": 0})
>>> s.sample()
("move", array([0.1132, 0.8323], dtype=float32))
>>> s.sample()
("shutoff", None)
"""
key = np.random.choice(self.discrete_keys)
dim = self.dimensions[key]
if dim == 0:
return (key, None)
else:
sample = np.random.uniform(low=self.low, high=self.high, size=(dim,)).astype(self.dtype)
return (key, sample)
def contains(self, x):
if not (isinstance(x, (list, tuple)) and len(x) == 2):
return False
key = x[0]
sample = x[1]
if key not in self.dimensions:
return False
dim = self.dimensions[key]
if dim == 0:
return sample is None
sample = np.array(sample)
if sample.shape != (dim,):
return False
return np.all(sample >= self.low) and np.all(sample <= self.high)
def from_onehot(self, x):
"""
Convert sample from a onehot encoding representation.
Example:
>>> s = DiscreteBox(low=-1.0, high=1.0, dimensions=OrderedDict((("move", 2), ("grasp", 1), ("shutoff", 0))))
>>> s.from_onehot(np.array([
0.0, 1.0, 0.0, # one-hot encoding for the 3 discrete actions ["move", "grasp", "shutoff"]
-0.2, 1.0, # "move" has 2 dims
0.5, # "grasp" has 1 dims
# "shutoff" has 0 dims
]))
("grasp", array([0.5], dtype=float32))
"""
onehot = x[:self.num_discrete]
i = np.argmax(onehot)
key = self.discrete_keys[i]
dim = self.dimensions[key]
if dim == 0:
return (key, None)
sample = x[self.num_discrete + self.cumulative_dimension[i] - dim: self.num_discrete + self.cumulative_dimension[i]]
return (key, np.array(sample, self.dtype))
def to_jsonable(self, sample_n):
return [[sample[0], np.array(sample[1]).tolist() if sample[1] is not None else None] for sample in sample_n]
def from_jsonable(self, sample_n):
return [(sample[0], np.asarray(sample[1]) if sample[1] is not None else None) for sample in sample_n]
def __repr__(self):
return f"DiscreteBox({self.dimensions})"
def __eq__(self, other):
return isinstance(other, DiscreteBox) and dict(self.dimensions) == dict(other.dimensions) \
and np.isclose(self.low, other.low) and np.isclose(self.high, other.high) |
#import RPi.GPIO as GPIO
import time
import numpy
import keyboard
# import servo
# import UltraSonic
# import gps
import sys
from PyQt5.QtWidgets import *
from PyQt5 import uic
from PyQt5 import uic,QtGui,QtCore
from PyQt5.QtCore import *
import get_phone_number as phone
import error
import example
#for gui
app = QApplication(sys.argv)
#Variables
send_count = 0
depth1 = 0
depth2 = 0
#Functions
# Load Cell function
# return True/False
# True : No inner water
# False : There is some water, process it first
def check_load_cell():
return False
# Classify the cup using TensorFlow Lite
# return True/False
# True : Plastic
# False : Paper
def tensor_flow():
return True
def point_add_gui()->str: # 입력한 전화번호를 리턴
myWindow = phone.PhoneWindow()
myWindow.show()
app.exec_()
return myWindow.result_number
def do_empty_gui():
myWindow = error.ErrorWindow()
myWindow.show()
app.exec_()
# main
if __name__ == "__main__":
#servo.servo_init()
#UltraSonic.ultra_init()
#gps.gps_init()
while 1:
try:
time.sleep(2)
# send_count = send_count+1
# if send_count == 10:
# # update value of ultra and gps info,
# # and send info to server
# (d1, d2) = UltraSonic.get_distance()
#
# send_count = 0
#
if check_load_cell() == True :
do_empty_gui()
time.sleep(3)
continue
#
# classify_result = tensor_flow()
# if classify_result == True:
# servo.to_left()
#
# else :
# servo.to_right()
# Point Accumlate
phone_number = point_add_gui()
print("received phone number : ",phone_number)
except KeyboardInterrupt:
print("keyboard interrupt")
# servo.p.stop()
# servo.GPIO.cleanup()
break
|
from ...Model.SQLModelAbstract import SQLModelAbstract
class Model(SQLModelAbstract):
def __init__(
self,
table_name: str,
) -> None:
super(Model, self).__init__(
table_name=table_name,
)
def get_item(self, condition: dict) -> 'SQLModelAbstract':
pass
def get_items(self, condition: dict) -> list['SQLModelAbstract']:
pass
def first_or_create(self, condition: dict) -> 'SQLModelAbstract':
pass
def update_item(self, condition: dict, values: dict) -> 'SQLModelAbstract':
pass
def update_items(self, condition: dict, values: dict) -> list['SQLModelAbstract']:
pass
def upsert_item(self, condition: dict, values: dict) -> 'SQLModelAbstract':
pass
def upsert_items(self, condition: dict, values: dict) -> list['SQLModelAbstract']:
pass
def execute_raw(self, query: str) -> object:
pass
|
from dcf_test_app.models import Brand, Product
from django.contrib.auth.models import User
from django.test import TestCase
from rest_framework.test import APIClient
from django_client_framework import permissions as p
class TestPatchPerms(TestCase):
def setUp(self) -> None:
self.user = User.objects.create_user(username="testuser")
self.user_client = APIClient()
self.user_client.force_authenticate(self.user)
self.br1 = Brand.objects.create(name="br1")
self.br2 = Brand.objects.create(name="br2")
self.pr1 = Product.objects.create(barcode="pr1", brand=self.br1)
self.pr2 = Product.objects.create(barcode="pr2", brand=self.br2)
def test_patch_no_permission(self) -> None:
resp = self.user_client.patch(
f"/product/{self.pr1.id}/brand",
data=self.br2.id,
format="json",
)
self.assertEqual(404, resp.status_code)
def test_patch_incorrect_permission(self) -> None:
p.add_perms_shortcut(self.user, Product, "rcd")
resp = self.user_client.patch(
f"/product/{self.pr1.id}/brand",
data=self.br2.id,
format="json",
)
self.assertEqual(403, resp.status_code)
def test_patch_only_parent_permission(self) -> None:
p.add_perms_shortcut(self.user, Product, "w")
resp = self.user_client.patch(
f"/product/{self.pr1.id}/brand",
data=self.br2.id,
format="json",
)
self.assertEqual(404, resp.status_code)
def test_patch_parent_but_incorrect_related_perms(self) -> None:
p.add_perms_shortcut(self.user, Product, "w")
p.add_perms_shortcut(self.user, Brand, "rcd")
resp = self.user_client.patch(
f"/product/{self.pr1.id}/brand",
data=self.br2.id,
format="json",
)
self.assertEqual(403, resp.status_code)
def test_correct_patch_perms_no_read(self) -> None:
p.add_perms_shortcut(self.user, Product, "w")
p.add_perms_shortcut(self.user, Brand, "w")
resp = self.user_client.patch(
f"/product/{self.pr1.id}/brand",
data=self.br2.id,
format="json",
)
self.pr1.refresh_from_db()
self.assertEqual(self.br2, self.pr1.brand)
self.assertEqual(resp.status_code, 200)
self.assertDictContainsSubset(
{
"message": "Action was successful but you have no permission to view the result."
},
resp.json(),
)
def test_correct_patch_perms_no_read_v2(self) -> None:
p.add_perms_shortcut(self.user, Product, "w")
p.add_perms_shortcut(self.user, Brand, "wr")
resp = self.user_client.patch(
f"/product/{self.pr1.id}/brand",
data=self.br2.id,
format="json",
)
self.pr1.refresh_from_db()
self.assertEqual(self.br2, self.pr1.brand)
self.assertEqual(resp.status_code, 200)
self.assertDictContainsSubset(
{
"message": "Action was successful but you have no permission to view the result."
},
resp.json(),
)
def test_correct_patch_perms_can_read(self) -> None:
p.add_perms_shortcut(self.user, Brand, "rw")
p.add_perms_shortcut(self.user, Product, "rw")
resp = self.user_client.patch(
f"/product/{self.pr1.id}/brand",
data=self.br2.id,
format="json",
)
self.pr1.refresh_from_db()
self.assertEqual(self.br2, self.pr1.brand)
self.assertDictContainsSubset({"id": str(self.br2.id)}, resp.json())
def test_correct_patch_perms_can_read_v2(self) -> None:
p.add_perms_shortcut(self.user, self.pr1, "rw")
p.add_perms_shortcut(self.user, self.br1, "w")
p.add_perms_shortcut(self.user, self.br2, "rw")
resp = self.user_client.patch(
f"/product/{self.pr1.id}/brand",
data=self.br2.id,
format="json",
)
self.pr1.refresh_from_db()
self.assertEqual(self.br2, self.pr1.brand)
self.assertDictContainsSubset({"id": str(self.br2.id)}, resp.json())
def test_correct_patch_perms_can_read_v3(self) -> None:
p.add_perms_shortcut(self.user, self.pr1, "rw", field_name="brand")
p.add_perms_shortcut(self.user, self.br1, "w", field_name="products")
p.add_perms_shortcut(self.user, self.br2, "r")
p.add_perms_shortcut(self.user, self.br2, "w", field_name="products")
resp = self.user_client.patch(
f"/product/{self.pr1.id}/brand",
data=self.br2.id,
format="json",
)
self.pr1.refresh_from_db()
self.assertEqual(self.br2, self.pr1.brand)
self.assertDictContainsSubset(
{"id": str(self.br2.id)}, resp.json(), resp.content
)
def test_assign_from_null(self) -> None:
"""PATCH Product.brand from None to existing"""
product = Product.objects.create()
p.add_perms_shortcut(self.user, product, "r")
p.add_perms_shortcut(self.user, product, "w", field_name="brand")
brand = Brand.objects.create(name="new branch")
p.add_perms_shortcut(self.user, brand, "r")
p.add_perms_shortcut(self.user, brand, "w", field_name="products")
resp = self.user_client.patch(
f"/product/{product.id}/brand",
data=brand.id,
format="json",
)
product.refresh_from_db()
self.assertEqual(product.brand, brand, resp.content)
|
#!/usr/bin/env python
from pathlib import Path
from argparse import ArgumentParser, Namespace, ArgumentDefaultsHelpFormatter
from chris_plugin import chris_plugin
import subprocess as sp
Gstr_title = r"""
_ _ _____ _ _
| | | | / __ \ (_|_)
_ __ | |______ __| | ___ _ __ ___ `' / /'_ __ _ ___ __
| '_ \| |______/ _` |/ __| '_ ` _ \ / / | '_ \| | \ \/ /
| |_) | | | (_| | (__| | | | | |./ /__| | | | | |> <
| .__/|_| \__,_|\___|_| |_| |_|\_____/_| |_|_|_/_/\_\
| |
|_| DICOM to NIFTI converter
"""
parser = ArgumentParser(description='ChRIS ds plugin wrapper around dcm2niix. '
'Converts a directory of DICOM files to NIFTI.',
formatter_class=ArgumentDefaultsHelpFormatter)
parser.add_argument('-b', default='y', choices=('y', 'n', 'o'),
help='BIDS sidecar')
parser.add_argument('-d', default=5, choices=range(10), type=int,
help='directory search depth. Convert DICOMs in sub-folders of in_folder?')
parser.add_argument('-f', default='%f_%p_%t_%s', type=str,
help='filename (%%a=antenna (coil) name, %%b=basename, %%c=comments, %%d=description, '
'%%e=echo number, %%f=folder name, %%g=accession number, %%i=ID of patient, '
'%%j=seriesInstanceUID, %%k=studyInstanceUID, %%m=manufacturer, %%n=name of patient, '
'%%o=mediaObjectInstanceUID, %%p=protocol, %%r=instance number, %%s=series number, '
'%%t=time, %%u=acquisition number, %%v=vendor, %%x=study ID; %%z=sequence name;)')
parser.add_argument('-m', default='2', choices=('n', 'y', '0', '1', '2'),
help='merge 2D slices from same series regardless of echo, exposure, etc. [no, yes, auto]')
parser.add_argument('-v', default='0', choices=('n', 'y', '0', '1', '2'),
help='verbose [no, yes, logorrheic]')
parser.add_argument('-x', default='n', choices=('y', 'n', 'i'),
help='crop 3D acquisitions')
parser.add_argument('-z', default='n', choices=('y', 'o', 'i', 'n', '3'),
help='gz compress images [y=pigz, o=optimal pigz, i=internal:miniz, n=no, 3=no,3D]')
@chris_plugin(
parser=parser,
title='dcm2niix',
category='MRI Processing',
min_memory_limit='100Mi',
min_cpu_limit='1000m',
)
def main(options: Namespace, inputdir: Path, outputdir: Path):
cmd = (
'dcm2niix',
'-b', options.b,
'-d', str(options.d),
'-f', options.f,
'-m', options.m,
'-v', options.v,
'-x', options.x,
'-z', options.z,
'-o', outputdir, inputdir
)
print(Gstr_title)
print(f'Command: {" ".join(map(str, cmd))}')
sp.run(cmd, check=True)
if __name__ == '__main__':
main()
|
import pandas as pd
def load_stream(mongo, activity_id, stream_type):
index_stream = mongo.db.streams.find_one({'activity_id': activity_id, 'type': 'time'})
data_stream = mongo.db.streams.find_one({'activity_id': activity_id, 'type': stream_type})
return pd.DataFrame({stream_type: data_stream['data']},
index=index_stream['data']).groupby(level=0).last() \
if (index_stream and data_stream and len(data_stream['data']) > 0) \
else None
|
#!/bin/usr/python3
import pandas as pd
import sys
arguments = sys.argv
if len(arguments) == 1:
sys.exit('no excel file given')
elif len(arguments) > 2:
sys.exit(f'too many files give:{arguments[1:]}')
file = pd.read_excel(sys.argv[1])
file.to_csv(sys.stdout, sep = '\t')
|
# Copyright (c) 2019 Adam Dodd
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import sys
import time
import sqlite3
from source import SourceServer
class SurfMap(object):
"""Class for Valve GoldSrc/Source surf maps"""
def __init__(self, name, tier = -1, rating = -1, stages = -1, bonus = -1,
complete = False, favourite = False):
self._name = name.lower()
self.tier = tier
self.rating = rating
self.stages = stages
self.bonus = bonus
self.complete = bool(complete)
self.favourite = bool(favourite)
def __str__(self):
return ("SurfMap:\n-> name : \"%s\"\n-> tier : %d\n" + \
"-> rating : %d\n-> stages : %d/%d\n-> comp : %s\n" + \
"-> fave : %s") % (self._name, self.tier, self.rating,
self.stages, self.bonus, self.complete, self.favourite)
def __repr__(self):
return str(self)
def get(self):
"""Look up map by name"""
conn = sqlite3.connect("surf.db")
conn.isolation_level = None
cur = conn.cursor()
cur.execute("SELECT `tier`, `rating`, `stages`, `bonus`, " \
"`complete`, `favourite` FROM maps WHERE `name`=?",
(self._name,))
row = cur.fetchone()
if row == None:
ret = False
else:
self.tier = int(row[0])
self.rating = int(row[1])
self.stages = int(row[2])
self.bonus = int(row[3])
self.complete = bool(row[4])
self.favourite = bool(row[5])
ret = True
cur.close()
conn.close()
return ret
def insert(self):
"""Insert map by name"""
conn = sqlite3.connect("surf.db")
conn.isolation_level = None
cur = conn.cursor()
cur.execute("INSERT INTO maps (`name`, `tier`, `rating`, `stages`, " \
"`bonus`, `complete`, `favourite`) VALUES " \
"(?, ?, ?, ?, ?, ?, ?)", (self._name, self.tier,
self.rating, self.stages, self.bonus, self.complete,
self.favourite))
cur.close()
conn.close()
def update(self):
"""Update map by name"""
conn = sqlite3.connect("surf.db")
conn.isolation_level = None
cur = conn.cursor()
cur.execute("UPDATE maps SET `tier`=?, `rating`=?, `stages`=?, " \
"`bonus`=?, `complete`=?, `favourite`=? WHERE `name`=?",
(self.tier, self.rating, self.stages, self.bonus,
self.complete, self.favourite, self._name))
cur.close()
conn.close()
def delete(self, confirm = True):
"""Delete map by name"""
conn = sqlite3.connect("surf.db")
conn.isolation_level = None
cur = conn.cursor()
if confirm:
sys.stdout.write("Are you sure you want to delete map \"%s\" (y/n)? "
% (self._name,))
sys.stdout.flush()
if sys.stdin.read(1).lower() == 'y':
cur.execute("DELETE FROM maps WHERE `name`=?", (self._name,))
print("Map \"%s\" deleted" % (self._name,))
else:
print("Delete aborted")
else:
cur.execute("DELETE FROM maps WHERE `name`=?", (self._name,))
cur.close()
conn.close()
class SurfDb(object):
"""Static database class for surf servers and maps"""
lastServers = None
@staticmethod
def join(sID):
if SurfDb.lastServers == None:
SurfDb.lastServers = SurfDb.getServers()
SurfDb.lastServers[sID].join()
@staticmethod
def getServers():
conn = sqlite3.connect("surf.db")
conn.isolation_level = None
cur = conn.cursor()
cur.execute("SELECT `id`, `name`, `address`, `port` FROM servers")
ret = list()
for row in cur:
ret.append(SourceServer(row[0], row[1], row[2], row[3]))
cur.close()
conn.close()
SurfDb.lastServers = ret
return ret
@staticmethod
def getMaps():
conn = sqlite3.connect("surf.db")
conn.isolation_level = None
cur = conn.cursor()
cur.execute("SELECT `name`, `tier`, `rating`, `stages`, `bonus`, " \
"`complete`, `favourite` FROM maps")
ret = list()
for row in cur:
ret.append(SurfMap(row[0], row[1], row[2], row[3], row[4], row[5],
row[6]))
cur.close()
conn.close()
return ret
@staticmethod
def prettyPrint():
maps = SurfDb.getMaps()
servers = SurfDb.getServers()
SourceServer.pingAll(servers)
first = True
maxIDLen = 0
maxNickLen = 0
maxMapLen = 10
for i, server in enumerate(servers):
nickLen = len(server.nick)
if nickLen > maxNickLen:
maxNickLen = nickLen
idLen = len(str(i))
if idLen > maxIDLen:
maxIDLen = idLen
if server._online:
mapLen = len(server._map)
if mapLen > maxMapLen:
maxMapLen = mapLen
print("=" * (maxNickLen + 7 + maxMapLen + 7) \
+ "==============================================")
print(" ID | Server" + " " * (maxNickLen + 1) + "| Game | Map" + " " * \
(maxMapLen + 4) + "| Tier | Rating | Comp | Ping")
for i, server in enumerate(servers):
if first:
print("====|" + "=" * (maxNickLen + 8) + "|======|" + "=" \
* (maxMapLen + 8) + "|======|========|=======|======")
first = False
else:
print("----|" + "-" * (maxNickLen + 8) + "|------|" + "-" \
* (maxMapLen + 8) + "|------|--------|-------|------")
if server._online:
# Find map the server is on!!
thisMap = None
for map_ in maps:
if map_._name == server._map:
thisMap = map_
break
if thisMap == None:
outStr = "{:>3d} | {:<" + str(maxNickLen) + "s} {:>2d}/{:<2d} " \
"| {:>4d} | {:<" + str(maxMapLen) + "s} -/- | - " \
"| - | - | {:>4}"
print(outStr.format(i, server.nick, server._players,
server._max_players, server._gameID,
server._map, server._latency))
else:
outStr = "{:>3d} | {:<" + str(maxNickLen) + "s} {:>2d}/{:<2d} " \
"| {:>4d} | {:<" + str(maxMapLen) + "s} {:>2d}/{:<2d}" \
" | {:>4d} | {:>6d} | {:<5s} | {:>4}"
print(outStr.format(i, server.nick, server._players,
server._max_players, server._gameID,
server._map, thisMap.stages, thisMap.bonus,
thisMap.tier, thisMap.rating,
str(thisMap.complete), server._latency))
else:
outStr = "{:>3d} | {:<" + str(maxNickLen + 6) + "s} | | {:<" \
+ str(maxMapLen + 6) + "s} | | | |"
print(outStr.format(i, server.nick, "(offline!)"))
print("=" * (maxNickLen + 7 + maxMapLen + 7) \
+ "==============================================")
@staticmethod
def pp():
SurfDb.prettyPrint()
@staticmethod
def monitor(delay = 30):
while True:
SurfDb.prettyPrint()
time.sleep(delay)
@staticmethod
def mon(delay = 30):
SurfDb.monitor(delay)
@staticmethod
def getNextServerID():
"""Get next free server ID"""
conn = sqlite3.connect("surf.db")
conn.isolation_level = None
cur = conn.cursor()
cur.execute("SELECT MAX(`id`) + 1 FROM servers")
row = cur.fetchone()
if row == None:
print("Couldn't get next free ID!")
ret = -1
else:
ret = int(row[0])
cur.close()
conn.close()
return ret
@staticmethod
def getServer(sID):
SurfDb.lastServers = SurfDb.getServers()
return SurfDb.lastServers[sID]
@staticmethod
def getMap(name):
m = SurfMap(name)
if not m.get():
print("Map not found; returning defaults")
return m
assert sys.version_info[0] >= 3
|
from twilio.rest import TwilioRestClient
TWILIO_ACCOUNT_SID = ''
TWILIO_AUTH_TOKEN = ''
TO_NUMBER = '' # Your verified phone number
FROM_NUMBER = '' # Your Twilio phone number
TWIML_URL = 'http://twimlets.com/message?Message=Hello+World'
client = TwilioRestClient(TWILIO_ACCOUNT_SID, TWILIO_AUTH_TOKEN)
client.calls.create(to=TO_NUMBER, from_=FROM_NUMBER, url=TWIML_URL)
|
# Copyright (c) 2018, Toby Slight. All rights reserved.
# ISC License (ISCL) - see LICENSE file for details.
import curses
from .actions import Actions
from os import environ
environ.setdefault('ESCDELAY', '12') # otherwise it takes an age!
class Keys(Actions):
def getpadkeys(self):
self.screen.refresh()
self.pad.refresh(self.pos, 0, 0, 0, self.y - 2, self.x - 1)
while True:
key = self.screen.getch()
keys = {
27: self.quit,
curses.KEY_DOWN: self.pad_dn,
curses.KEY_UP: self.pad_up,
curses.KEY_NPAGE: self.pad_pgup,
curses.KEY_PPAGE: self.pad_pgdn,
curses.KEY_RESIZE: self.resize,
ord('q'): self.quit,
ord('j'): self.pad_dn,
ord('k'): self.pad_up,
ord('f'): self.pad_pgdn,
ord('b'): self.pad_pgup,
}
try:
if keys[key]():
break
except KeyError:
pass
self.pad.refresh(self.pos, 0, 0, 0, self.y - 2, self.x - 1)
self.screen.erase()
self.screen.refresh()
def parse_curline(self, action):
line = 0
for child, depth in self.traverse():
if depth == 0:
continue
if line == self.curline:
{
'expand': lambda: child.expand(self),
'expand_all': lambda: child.expand_all(self),
'toggle_expand': child.toggle_expand,
'collapse': child.collapse,
'collapse_all': lambda: child.collapse_all(self, depth),
'toggle_pick': lambda: child.pick(self),
'nextparent': lambda: child.nextparent(self, depth),
'prevparent': lambda: child.prevparent(self, depth),
'getsize': lambda: child.getsize(self),
}[action]()
break
line += 1
def getkeys(self):
while True:
self.drawtree()
key = self.screen.getch()
keys = {
27: self.quit,
curses.KEY_F1: self.mkkeypad,
curses.KEY_F2: self.mkpickpad,
curses.KEY_F5: self.reset_all,
curses.KEY_F4: self.reset_picked,
curses.KEY_UP: self.up,
curses.KEY_DOWN: self.dn,
curses.KEY_PPAGE: self.pgup,
curses.KEY_NPAGE: self.pgdn,
curses.KEY_LEFT: lambda: self.parse_curline('collapse'),
curses.KEY_RIGHT: lambda: self.parse_curline('expand'),
curses.KEY_SRIGHT: lambda: self.parse_curline('expand_all'),
curses.KEY_SLEFT: lambda: self.parse_curline('collapse_all'),
curses.KEY_HOME: self.top,
curses.KEY_END: self.bottom,
curses.KEY_RESIZE: self.resize,
ord('q'): self.quit,
ord('?'): self.mkkeypad,
ord('p'): self.mkpickpad,
ord('R'): self.reset_all,
ord('r'): self.reset_picked,
ord('j'): self.dn,
ord('k'): self.up,
ord('b'): self.pgup,
ord('f'): self.pgdn,
ord('l'): lambda: self.parse_curline('expand'),
ord('h'): lambda: self.parse_curline('collapse'),
ord('L'): lambda: self.parse_curline('expand_all'),
ord('H'): lambda: self.parse_curline('collapse_all'),
ord('g'): self.top,
ord('G'): self.bottom,
ord('\t'): lambda: self.parse_curline('toggle_expand'),
ord('\n'): lambda: self.parse_curline('toggle_expand'),
ord(' '): lambda: self.parse_curline('toggle_pick'),
ord('J'): lambda: self.parse_curline('nextparent'),
ord('K'): lambda: self.parse_curline('prevparent'),
ord('s'): lambda: self.parse_curline('getsize'),
ord('S'): self.getsizeall,
ord('.'): self.toggle_hidden,
ord('/'): self.find,
ord('n'): self.findnext,
ord('N'): self.findprev,
ord('v'): self.pickall,
ord(':'): self.pickglobs,
}
try:
if keys[key]():
return self.picked
except KeyError:
pass
self.curline %= self.line
|
"""
This module handles loading data from csv files and set data to objects.
"""
import csv
import ast
from django.db import models
from django.db.models.loading import get_model
from django.conf import settings
from evennia.utils import create, utils, search, logger
DATA_INFO_CATEGORY = "data_info"
def import_csv(file_name, app_name, model_name):
"""
Import data from a csv file to the db model
Args:
file_name: (string) CSV file's name.
app_name: (string) Db app's name.
model_name: (string) Db model's name.
"""
try:
# load file
csvfile = open(file_name, 'r')
reader = csv.reader(csvfile)
# get model
model_obj = get_model(app_name, model_name)
# clear old data
model_obj.objects.all().delete()
# read title
title = reader.next()
# get field types
"""
type = 0 means common field
type = 1 means ForeignKey field
type = 2 means ManyToManyField field, not support
type = -1 means field does not exist
"""
types = []
related_fields = []
for field_name in title:
type = -1
related_field = 0
try:
# get field info
field = model_obj._meta.get_field(field_name)
if isinstance(field, models.ForeignKey):
type = 1
related_field = field.related_field
elif isinstance(field, models.ManyToManyField):
type = 2
else:
type = 0
except Exception, e:
logger.log_errmsg("Field error: %s" % e)
pass
types.append(type)
related_fields.append(related_field)
# import values
# read next line
values = reader.next()
while values:
try:
record = {}
for item in zip(title, types, values, related_fields):
field_name = item[0]
type = item[1]
value = item[2]
related_field = item[3]
# set field values
if type == 0:
record[field_name] = value
elif type == 1:
arg = {}
arg[related_field.name] = value
record[field_name] = related_field.model.objects.get(**arg)
# create new record
data = model_obj.objects.create(**record)
data.save()
except Exception, e:
logger.log_errmsg("Can't load data: %s" % e)
# read next line
values = reader.next()
except StopIteration:
# reach the end of file, pass this exception
pass
################################################################
#
# These motherds set data to an object
#
################################################################
def set_obj_data_info(obj, app, model, key):
"""
Set data_info's database. It saves info to attributes of data_info category, then load these data.
Args:
obj: Object in game.
app: (string) Db app's name.
model: (string) Db model's name.
key: (string) Key of the data info.
"""
obj.attributes.add("app", app, category=DATA_INFO_CATEGORY, strattr=True)
obj.attributes.add("model", model, category=DATA_INFO_CATEGORY, strattr=True)
obj.attributes.add("key", key, category=DATA_INFO_CATEGORY, strattr=True)
if (not app) or (not model) or (not key):
return True
return load_data(obj)
def load_data(obj):
"""
Load object data from db, and set them to the obj."
Args:
obj: Object in game.
"""
if not obj:
return
# Get app, model and key names.
app = obj.attributes.get(key="app", category=DATA_INFO_CATEGORY, strattr=True)
if not app:
return False
model = obj.attributes.get(key="model", category=DATA_INFO_CATEGORY, strattr=True)
if not model:
return False
key = obj.attributes.get(key="key", category=DATA_INFO_CATEGORY, strattr=True)
if not key:
return False
# Get db model
model_obj = get_model(app, model)
if not model_obj:
logger.log_errmsg("%s can not open model %s" % (key, model))
return False
# Get data record.
data_info = model_obj.objects.filter(key=key)
if not data_info:
logger.log_errmsg("%s can not find key %s" % (key, key))
return False
info = data_info[0]
if info.typeclass:
set_obj_typeclass(obj, info.typeclass)
if info.name:
set_obj_name(obj, info.name)
if info.alias:
set_obj_alias(obj, info.alias)
if info.location:
set_obj_location(obj, info.location)
if info.home:
set_obj_home(obj, info.home)
if info.desc:
set_obj_desc(obj, info.desc)
if info.lock:
set_obj_lock(obj, info.lock)
if info.destination:
set_obj_destination(obj, info.destination)
# Set attributes.
attributes = {}
if info.attributes:
try:
# info.attributes: (string) Attribues in form of python dict. Such as: "{'age':'22', 'career':'warrior'}"
# Convert string to dict
attributes = ast.literal_eval(info.attributes)
except Exception, e:
logger.log_errmsg("%s can't load attributes %s: %s" % (get_info_key(obj), info.attributes, e))
# Add other fields to attributes.
known_fields = {"key",
"name",
"alias",
"typeclass",
"location",
"home",
"desc",
"lock",
"destination",
"attributes"}
for field in model_obj._meta.fields:
if not field.name in known_fields:
attributes[field.name] = info.serializable_value(field.name)
set_obj_attributes(obj, attributes)
return True
def set_obj_typeclass(obj, typeclass):
"""
Set object's typeclass.
Args:
obj: Object in game.
typeclass: (string) Typeclass's name.
"""
if not obj:
return
if not typeclass:
typeclass = settings.BASE_OBJECT_TYPECLASS
if obj.is_typeclass(typeclass, exact=True):
# No change.
return
if not hasattr(obj, 'swap_typeclass'):
logger.log_errmsg("%s cannot have a type at all!" % get_info_key(obj))
return
obj.swap_typeclass(typeclass, clean_attributes=False)
def set_obj_name(obj, name):
"""
Set object's name.
Args:
obj: Object in game.
name: (string) Name of the object.
"""
if name == obj.name:
# No change.
return
obj.name = name
# we need to trigger this here, since this will force
# (default) Exits to rebuild their Exit commands with the new
# aliases
#obj.at_cmdset_get(force_init=True)
if obj.destination:
obj.flush_from_cache()
def set_obj_alias(obj, aliases):
"""
Set object's alias.
Args:
obj: Object in game.
aliases: (string) Aliases of the object.
"""
# merge the old and new aliases (if any)
new_aliases = [alias.strip().lower() for alias in aliases.split(';')
if alias.strip()]
set_new_aliases = set(new_aliases)
set_current_aliases = set(obj.aliases.all())
if set_new_aliases == set_current_aliases:
# No change.
return
obj.aliases.clear()
obj.aliases.add(new_aliases)
# we need to trigger this here, since this will force
# (default) Exits to rebuild their Exit commands with the new
# aliases
#obj.at_cmdset_get(force_init=True)
if obj.destination:
obj.flush_from_cache()
def set_obj_location(obj, location):
"""
Set object's location.
Args:
obj: Object in game.
location: (string) Location's name. Must be the key of data info.
"""
location_obj = None
if location:
# If has location, search location object.
location_obj = search_obj_info_key(location)
if not location_obj:
logger.log_errmsg("%s can't find location %s!" % (get_info_key(obj), location))
return
location_obj = location_obj[0]
if obj.location == location_obj:
# No change.
return
if obj == location_obj:
# Can't set location to itself.
logger.log_errmsg("%s can't teleport itself to itself!" % get_info_key(obj))
return
# try the teleport
obj.move_to(location_obj, quiet=True, to_none=True)
def set_obj_home(obj, home):
"""
Set object's home.
Args:
obj: Object in game.
home: (string) Home's name. Must be the key of data info.
"""
home_obj = None
if home:
# If has home, search home object.
home_obj = search_obj_info_key(home)
if not home_obj:
logger.log_errmsg("%s can't find home %s!" % (get_info_key(obj), home))
return
home_obj = home_obj[0]
if obj.home == home_obj:
# No change.
return
if obj == home_obj:
# Can't set home to itself.
logger.log_errmsg("%s can't set home to itself!" % get_info_key(obj))
return
obj.home = home_obj
def set_obj_desc(obj, desc):
"""
Set object's description.
Args:
obj: Object in game.
desc: (string) Description.
"""
obj.db.desc = desc
def set_obj_lock(obj, lock):
"""
Set object's lock.
Args:
obj: Object in game.
lock: (string) Object's lock string.
"""
if lock:
try:
obj.locks.add(lock)
except:
logger.log_errmsg("%s can't set lock %s." % (get_info_key(obj), lock))
def set_obj_attributes(obj, attributes):
"""
Set object's attribute.
Args:
obj: Object in game.
attributes: (dict) Object's attribues."
"""
if not attributes:
return
for key in attributes:
# Add attributes.
try:
obj.attributes.add(key, attributes[key])
except:
logger.log_errmsg("%s can't set attribute %s!" % (get_info_key(obj), key))
def set_obj_destination(obj, destination):
"""
Set object's destination
Args:
obj: Object in game.
destination: (string) Destination's name. Must be the key of data info.
"""
destination_obj = None
if destination:
# If has destination, search destination object.
destination_obj = search_obj_info_key(destination)
if not destination_obj:
logger.log_errmsg("%s can't find destination %s!" % (get_info_key(obj), destination))
return
destination_obj = destination_obj[0]
if obj.destination == destination_obj:
# No change.
return
if obj == destination_obj:
# Can't set destination to itself.
logger.log_errmsg("%s can't set destination to itself!" % get_info_key(obj))
return
obj.destination = destination_obj
def set_obj_detail(obj, key, detail):
"""
Set object's detail.
Args:
obj: Object in game.
key: (string) Detail's key.
detail: (string) Detail's info.
"""
if hasattr(obj, "set_detail"):
obj.set_detail(key, detail)
def get_info_key(obj):
"""
Get an object's data info key.
Args:
obj: Object in game.
"""
return obj.attributes.get(key="key", category=DATA_INFO_CATEGORY, strattr=True)
def search_obj_info_key(key):
"""
Search objects which have the given key.
Args:
key: (string) Data info key.
"""
obj = search.search_object_attribute(key="key", strvalue=key, category=DATA_INFO_CATEGORY)
return obj
def search_obj_info_model(model):
"""
Search objects which have the given model.
Args:
model: (string) Data model's name.
"""
obj = search.search_object_attribute(key="model", strvalue=model, category=DATA_INFO_CATEGORY)
return obj
|
from telegram.ext import Updater
updater = Updater(token='1435594962:AAE3UHSB2I7XxpQKEGkzcvHzUnTrkYKpclY', use_context=True)
:
dispatcher = updater.dispatcher
import logging
logging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
level=logging.INFO)
def start(update, context):
context.bot.send_message(chat_id=update.effective_chat.id, text="I'm a bot, please talk to me!")
:
from telegram.ext import CommandHandler
start_handler = CommandHandler('start', start)
dispatcher.add_handler(start_handler)
updater.start_polling()
def echo(update, context):
context.bot.send_message(chat_id=update.effective_chat.id, text=update.message.text)
from telegram.ext import MessageHandler, Filters
echo_handler = MessageHandler(Filters.text & (~Filters.command), echo)
dispatcher.add_handler(echo_handler)
def caps(update, context):
text_caps = ' '.join(context.args).upper()
context.bot.send_message(chat_id=update.effective_chat.id, text=text_caps)
caps_handler = CommandHandler('caps', caps)
dispatcher.add_handler(caps_handler)
from telegram import InlineQueryResultArticle, InputTextMessageContent
def inline_caps(update, context):
query = update.inline_query.query
if not query:
return
results = list()
results.append(
InlineQueryResultArticle(
id=query.upper(),
title='Caps',
input_message_content=InputTextMessageContent(query.upper())
)
)
context.bot.answer_inline_query(update.inline_query.id, results)
from telegram.ext import InlineQueryHandler
inline_caps_handler = InlineQueryHandler(inline_caps)
dispatcher.add_handler(inline_caps_handler)
.
def unknown(update, context):
context.bot.send_message(chat_id=update.effective_chat.id, text="Sorry, I didn't understand that command.")
unknown_handler = MessageHandler(Filters.command, unknown)
dispatcher.add_handler(unknown_handler)
updater.stop()
|
#! /usr/bin/env python
"""
Scan a list of genome files and create individual "info file" CSVs
for genome-grist to use for private genomes.
"""
import sys
import argparse
import screed
import csv
import os
import shutil
def main():
p = argparse.ArgumentParser()
p.add_argument('info_csv')
args = p.parse_args()
info_d = {}
with open(args.info_csv, 'r', newline="") as fp:
r = csv.DictReader(fp)
for row in r:
ident = row['ident']
info_d[ident] = row
print(f"loaded {len(info_d)} info files from '{args.info_csv}'")
n = 0
for ident, item_d in info_d.items():
# write .info.csv.
dirname = os.path.dirname(item_d['genome_filename'])
info_filename = os.path.join(dirname, f"{ident}.info.csv")
name = item_d['display_name']
with open(info_filename, 'wt') as fp:
w2 = csv.DictWriter(fp, fieldnames=['ident',
'display_name'])
w2.writeheader()
w2.writerow(dict(ident=ident, display_name=name))
print(f"Created info CSV '{info_filename}'")
n += 1
print(f"wrote {n} info files.")
return 0
if __name__ == '__main__':
sys.exit(main())
|
def summation_of_primes_below_two_million():
def sieve(scope):
prime = [True] * scope
p = 2
while p * p <= scope:
if prime[p]:
for i in range(p * 2, scope, p):
prime[i] = False
p += 1
return prime[2:]
sum_of_primes = 0
for index, number in enumerate(sieve(2000000)):
if number:
sum_of_primes += index + 2
return sum_of_primes
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""IPython -- An enhanced Interactive Python
The actual ipython script to be installed with 'python setup.py install' is
in './scripts' directory. This file is here (ipython source root directory)
to facilitate non-root 'zero-installation' (just copy the source tree
somewhere and run ipython.py) and development. """
# Ensure that the imported IPython is the local one, not a system-wide one
import os, sys
this_dir = os.path.dirname(sys.argv[0])
sys.path.insert(0, this_dir)
# Now proceed with execution
execfile(os.path.join(
this_dir, 'IPython', 'scripts', 'ipython'
))
|
import dateutil.parser
from botocore.vendored import requests
def lambda_handler(request, context):
insert_nps_responses, last_nps_response_date = api_response(request['state'], request['secrets'])
insert = dict()
insert['nps_responses'] = insert_nps_responses
delete = dict()
delete['nps_responses'] = list()
state = dict()
state['nps_responsesCursor'] = last_nps_response_date
nps_responses_schema = dict()
nps_responses_schema['primary_key'] = ['id']
schema = dict()
schema['nps_responses'] = nps_responses_schema
return dict(
state=state,
insert=insert,
delete=delete,
schema=schema,
hasMore='false'
)
def api_response(state, secrets):
page = 1
if 'nps_responsesCursor' in state:
last_datetime_str = state['nps_responsesCursor']
last_datetime = dateutil.parser.parse(last_datetime_str)
else:
last_datetime_str = None
last_datetime = None
if last_datetime is not None:
add_request = dict(startDate=int(last_datetime.replace(microsecond=0).timestamp()))
else:
add_request = dict()
new_datetime_str = last_datetime_str
new_datetime = last_datetime
return_data = list()
while True:
response = requests.get('https://app.retently.com/api/v2/nps/customers/response', dict(
page=page,
limit=50,
sort='createdDate',
**add_request
), headers={
'Content-Type': 'application/json',
'Authorization': f"api_key={secrets['api_key']}"
})
data = response.json()
pages = data['data']['pages']
if 'data' in data and 'responses' in data['data'] and len(data['data']['responses']) > 0:
response_data = data['data']['responses']
for datum in response_data:
created_datetime = dateutil.parser.parse(datum['createdDate'])
if new_datetime is None or created_datetime > new_datetime:
new_datetime = created_datetime
new_datetime_str = datum['createdDate']
if last_datetime is None or created_datetime > last_datetime:
return_data.append(datum)
else:
break
if page + 1 > pages:
break
page = page + 1
return return_data, new_datetime_str
|
"""
This benchmark was adapted from
https://bitbucket.org/pypy/benchmarks/src/34f06618ef7f29d72d9c19f1e3894607587edc76/unladen_swallow/performance/bm_django.py?at=default
"""
__author__ = "[email protected] (Collin Winter)"
import os
import sys
sys.path.append(os.path.join(os.path.dirname(__file__), "../test/integration/django"))
from django.conf import settings
settings.configure()
from django.template import Context, Template
import time
DJANGO_TMPL = Template("""<table>
{% for row in table %}
<tr>{% for col in row %}<td>{{ col|escape }}</td>{% endfor %}</tr>
{% endfor %}
</table>
""")
def test_django(count):
table = [xrange(50) for _ in xrange(50)]
context = Context({"table": table})
times = []
for _ in xrange(count):
t0 = time.time()
data = DJANGO_TMPL.render(context)
t1 = time.time()
times.append(t1 - t0)
return times
test_django(15)
|
import tarfile
import os
class TARFile:
def __init__(self, path, tarfilename):
self.path = path
self.tarfilename = tarfilename
def extract_all(self):
print "Extracting files from (" + self.path + "/" + self.tarfilename + ") ..."
tfile = tarfile.open(self.path + "/" + self.tarfilename, 'r')
extract = False
for member in tfile.getmembers():
if not os.path.exists(self.path + "/" + member.name):
print "Extracting (" + member.name + " -> " + self.path + "/" + member.name + ")"
extract = True
if extract == True:
tfile.extractall(self.path)
|
class HCF4094Capture(object):
"""
This class is used to emulate the data that is shifted to the HCF4094.
A callback method is registered and will be called with a list of tuples (index, bit_state) for each
output that has changed since last strobe. This allows you to simulate hardware that occurs when the
bit state changes.
An example of this would be if the bit is controlling power to a device. With a `1` bit state, you
could simulate what actions occur when power is applied to the device.
"""
def __init__(self, gpio_ref, data_gpio, clock_gpio, strobe_gpio, out_enable_gpio,
bits_list, callback):
"""
Initialization
Callback method details:
One argument is given to callback method, a list of tuples (index, 0 or 1).
index: index of bits_list which has changed since last strobe event.
0 or 1: new state of bit.
It is expected that you maintain old state to see if you need to trigger simulation events for what
electrical event corresponds with that bit changing.
:param gpio_ref: reference to Mock.GPIO object
:param data_gpio: data pin number
:param clock_gpio: clock pin number
:param strobe_gpio: strobe pin number
:param out_enable_gpio: output enable pin number
:param bits_list: list of bit states for current output, order is furthest to nearest bit. As shifting
occurs at index 0 and finished at index ``n``.
This will determine initial state to trigger callbacks and bit length to maintain for data
Initial state usually all 0, so ``[0] * bit_depth`` might be easy initialization
:param callback: method to call when bits change
"""
self._gpio = gpio_ref
self._data_pin = data_gpio
self._clock_pin = clock_gpio
self._strobe_pin = strobe_gpio
self._out_enable_pin = out_enable_gpio
test_list = [bit for bit in bits_list if bit not in (0, 1)]
if len(test_list) != 0:
raise ValueError('bits_list may only contain 0 or 1. Found {}'.format(test_list))
self._bit_count = len(bits_list)
self.current_data = tuple(bits_list)
self._buffered_data = []
self._callback = callback
# Register with Mock GPIO to call methods when clock or strobe occurs
# We are using FALLING instead of RISING, because logic is backwards due to
# MOSFET for output.
self._gpio.add_event_callback(self._clock_pin, self._gpio.FALLING, self._clocked)
self._gpio.add_event_callback(self._strobe_pin, self._gpio.FALLING, self._strobed)
def _clocked(self):
# Have to reverse data, as MOSFET output is backwards
bit_value = (1, 0)[self._gpio._simulate_read_out_pin(self._data_pin)]
self._buffered_data.append(bit_value)
def _strobed(self):
self._send_data()
def _send_data(self):
# Make list with last `self._bit_count` number of bits shifted.
# May be called with only a few bits shifted, so need to include old data.
new_data = (list(self.current_data) + self._buffered_data)[-self._bit_count:]
changes = [(index, new)
for (index, (old, new)) in enumerate(zip(self.current_data, new_data))
if old != new]
self.current_data = tuple(new_data)
self._callback(changes)
self._buffered_data = self._buffered_data[-self._bit_count:]
|
import os
TOKEN = os.environ.get("BOT_TOKEN")
START_MESSAGE = os.environ.get("START_MESSAGE", "*Hi ! I am a simple torrent searcher using @sjprojects's Torrent Searcher api.\n\n\nMade with 🐍 by @KeralasBots*")
FOOTER_TEXT = os.environ.get("FOOTER_TEXT", "*Made with ❤️ by @KeralasBots*") |
# generated from catkin/cmake/template/cfg-extras.context.py.in
DEVELSPACE = 'FALSE' == 'TRUE'
INSTALLSPACE = 'TRUE' == 'TRUE'
CATKIN_DEVEL_PREFIX = '/root/ros_catkin_ws/devel_isolated/genmsg'
CATKIN_GLOBAL_BIN_DESTINATION = 'bin'
CATKIN_GLOBAL_ETC_DESTINATION = 'etc'
CATKIN_GLOBAL_INCLUDE_DESTINATION = 'include'
CATKIN_GLOBAL_LIB_DESTINATION = 'lib'
CATKIN_GLOBAL_LIBEXEC_DESTINATION = 'lib'
CATKIN_GLOBAL_PYTHON_DESTINATION = 'lib/python2.7/dist-packages'
CATKIN_GLOBAL_SHARE_DESTINATION = 'share'
CATKIN_PACKAGE_BIN_DESTINATION = 'lib/genmsg'
CATKIN_PACKAGE_ETC_DESTINATION = 'etc/genmsg'
CATKIN_PACKAGE_INCLUDE_DESTINATION = 'include/genmsg'
CATKIN_PACKAGE_LIB_DESTINATION = 'lib'
CATKIN_PACKAGE_LIBEXEC_DESTINATION = ''
CATKIN_PACKAGE_PYTHON_DESTINATION = 'lib/python2.7/dist-packages/genmsg'
CATKIN_PACKAGE_SHARE_DESTINATION = 'share/genmsg'
CMAKE_BINARY_DIR = '/root/ros_catkin_ws/build_isolated/genmsg'
CMAKE_CURRENT_BINARY_DIR = '/root/ros_catkin_ws/build_isolated/genmsg'
CMAKE_CURRENT_SOURCE_DIR = '/root/ros_catkin_ws/src/genmsg'
CMAKE_INSTALL_PREFIX = '/root/ros_catkin_ws/install_isolated'
CMAKE_SOURCE_DIR = '/root/ros_catkin_ws/src/genmsg'
PKG_CMAKE_DIR = '${genmsg_DIR}'
PROJECT_NAME = 'genmsg'
PROJECT_BINARY_DIR = '/root/ros_catkin_ws/build_isolated/genmsg'
PROJECT_SOURCE_DIR = '/root/ros_catkin_ws/src/genmsg'
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.8 on 2018-04-27 15:45
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('users', '0009_card_is_active'),
]
operations = [
migrations.RemoveField(
model_name='tradeinfo',
name='from_card',
),
migrations.RemoveField(
model_name='tradeinfo',
name='to_card',
),
migrations.RemoveField(
model_name='tradeinfo',
name='trade_status',
),
migrations.AddField(
model_name='tradeinfo',
name='from_user',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='from_user', to=settings.AUTH_USER_MODEL, verbose_name='\u8f6c\u51fa\u7528\u6237'),
),
migrations.AddField(
model_name='tradeinfo',
name='to_user',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='to_user', to=settings.AUTH_USER_MODEL, verbose_name='\u8f6c\u5165\u7528\u6237'),
),
]
|
from __future__ import print_function, unicode_literals
import os
import warnings
import tensorflow as tf
from dragnn.protos import spec_pb2
from dragnn.python import graph_builder, spec_builder
from google.protobuf import text_format
from syntaxnet import sentence_pb2
from syntaxnet.ops import gen_parser_ops
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
warnings.filterwarnings("ignore", message="Conversion of the second argument")
class SyntaxNet(object):
def __init__(self, lang="English", model_dir="/usr/local/tfmodels/"):
tf.logging.set_verbosity(tf.logging.ERROR)
segmenter_path = os.path.join(model_dir, lang, "segmenter")
parser_path = os.path.join(model_dir, lang)
self.segmenter = self._load_model(segmenter_path, "spec.textproto")
self.parser = self._load_model(parser_path, "parser_spec.textproto")
def parse(self, sentence):
return [
{
"word": token.word,
"label": token.label,
"attributes": self._parse_attribute(token.tag),
"head": token.head + 1,
}
for token in self._annotate(sentence).token
]
def _load_model(self, base_dir, master_spec_name):
master_spec = spec_pb2.MasterSpec()
with open(os.path.join(base_dir, master_spec_name)) as f:
text_format.Merge(f.read(), master_spec)
spec_builder.complete_master_spec(master_spec, None, base_dir)
graph = tf.Graph()
with graph.as_default():
hyperparam_config = spec_pb2.GridPoint()
builder = graph_builder.MasterBuilder(
master_spec,
hyperparam_config
)
annotator = builder.add_annotation(enable_tracing=True)
builder.add_saver()
sess = tf.Session(graph=graph)
with graph.as_default():
builder.saver.restore(sess, os.path.join(base_dir, "checkpoint"))
def annotate_sentence(sentence):
with graph.as_default():
return sess.run(
[annotator['annotations'], annotator['traces']],
feed_dict={annotator['input_batch']: [sentence]}
)
return annotate_sentence
def _parse_attribute(self, attributed_tag):
'''
ex) attribute { name: \"Case\" value: \"Nom\" }
attribute { name: \"Number\" value: \"Sing\" }
attribute { name: \"Person\" value: \"1\" }
attribute { name: \"PronType\" value: \"Prs\" }
attribute { name: \"fPOS\" value: \"PRP++PRP\" }
=>
{'Case':'Nom', ..., 'fPOS':'PRP++PRP'}
'''
return {
line.strip().split('\"')[1]: line.strip().split('\"')[3]
for line in attributed_tag.split("attribute")
if line
}
def _annotate(self, text):
sentence = sentence_pb2.Sentence(
text=text,
token=[sentence_pb2.Token(word=text, start=-1, end=-1)]
)
with tf.Session(graph=tf.Graph()) as tmp_session:
char_input = gen_parser_ops.char_token_generator([
sentence.SerializeToString()
])
preprocessed = tmp_session.run(char_input)[0]
segmented, _ = self.segmenter(preprocessed)
annotations, traces = self.parser(segmented[0])
assert len(annotations) == 1
assert len(traces) == 1
return sentence_pb2.Sentence.FromString(annotations[0])
|
import random
import time
class Goalkeeper():
"""A definition that produces the attributes for a goalkeeper on the basis of the players overall rating"""
def __init__(self, name, reflexes, jumping, bravery, kicking):
self.name = name
self.reflexes = reflexes
self.jumping = jumping
self.bravery = bravery
self.kicking = kicking
self.overall = int((reflexes + jumping + bravery + kicking)/4)
self.maxipgk = 320
if self.reflexes + self.jumping + self.bravery + self.kicking > self.maxipgk:
raise ValueError("Points maximum exceeded! You points maximum is " + str(self.maxipgk))
if self.reflexes > 100:
raise ValueError("Attacking Points maximum exceeded! You Attacking Points maximum is 100")
if self.jumping > 100:
raise ValueError("Defending Points maximum exceeded! You Defending Points maximum is 100")
if self.bravery > 100:
raise ValueError("Fitness Points maximum exceeded! You Fitness Points maximum is 100")
if self.kicking > 100:
raise ValueError("Pace Points maximum exceeded! You Pace Points maximum is 100")
def __repr__(self):
return repr((self.name, self.reflexes, self.jumping, self.bravery, self.kicking))
class Outfield_Player():
""" A class for a player, Attributes: Name (a string), Position (a definition, string), Overall (a string) """
def __init__(self, name, position, attacking, defending, fitness, pace, passing, skill):
self.name = name
self.position = position
self.attacking = attacking
self.defending = defending
self.fitness = fitness
self.pace = pace
self.passing = passing
self.skill = skill
self.overall = int((attacking + defending + fitness + pace + passing + skill)/6)
self.maxip = 480
if self.attacking + self.defending + self.fitness + self.pace + self.passing + self.skill > self.maxip:
raise ValueError("Points maximum exceeded! You points maximum is " + str(self.maxip))
if self.attacking > 100:
raise ValueError("Attacking Points maximum exceeded! You Attacking Points maximum is 100")
if self.defending > 100:
raise ValueError("Defending Points maximum exceeded! You Defending Points maximum is 100")
if self.fitness > 100:
raise ValueError("Fitness Points maximum exceeded! You Fitness Points maximum is 100")
if self.pace > 100:
raise ValueError("Pace Points maximum exceeded! You Pace Points maximum is 100")
if self.passing > 100:
raise ValueError("Passing Points maximum exceeded! You Passing Points maximum is 100")
if self.skill > 100:
raise ValueError("Skill Points maximum exceeded! You Skill Points maximum is 100")
if self.position not in ['DF','MF','FW']:
raise ValueError("Position not valid. Select from " + "'DF','MF','FW'")
def __repr__(self):
return repr((self.name, self.position, self.attacking, self.defending, self.fitness, self.pace, self.passing, self.skill))
class Team:
""" A class for creating a team with an attacking, defending and overall attribute"""
def __init__(self, name, player1, player2, player3, player4, player5):
self.name = name
self.player1 = player1
self.player2 = player2
self.player3 = player3
self.player4 = player4
self.player5 = player5
self.overall = int((player1.overall + player2.overall + player3.overall + player4.overall + player5.overall)/5)
self.defending = int((player1.overall + player2.defending + player3.defending + player4.defending + player5.defending)/5)
self.attacking = int((player2.attacking + player3.attacking + player4.attacking + player5.attacking)/4)
def __repr__(self):
return repr((self.name, self.overall, self.player1, self.player2, self.player3, self.player4, self.player5))
Vivaldi = Goalkeeper('Juan Vivaldi', 83, 77, 72, 82)
Peillat = Outfield_Player('Gonzalo Peillat', 'DF', 70, 89, 78, 73, 79, 67)
Ortiz = Outfield_Player('Ignacio Ortiz', 'MF', 79, 78, 77, 80, 75, 81)
Rey = Outfield_Player('Matias Rey', 'MF', 81, 77, 74, 72, 87, 72)
Vila = Outfield_Player('Lucas Vila', 'FW', 87, 50, 80, 82, 74, 85)
ARG = Team('Argentina', Vivaldi, Peillat, Ortiz, Rey, Vila)
Charter = Goalkeeper('Andrew Charter', 84, 80, 75, 78)
Dawson = Outfield_Player('Mattew Dawson', 'DF', 74, 86, 80, 79, 80, 81)
Wickham = Outfield_Player('Tom Wickham', 'MF', 79, 78, 80, 82, 80, 81)
Edwards = Outfield_Player('Jeremy Edwards', 'MF', 80, 81, 76, 82, 80, 75)
Craig = Outfield_Player('Tom Craig', 'FW', 95, 65, 81, 82, 77, 80)
AUS = Team('Australia', Charter, Dawson, Wickham, Edwards, Craig)
Mantler = Goalkeeper('Michael Mantler', 64, 67, 62, 69)
Podpera = Outfield_Player('Mathias Podpera', 'DF', 63, 74, 67, 64, 65, 68)
Binder = Outfield_Player('Oliver Binder', 'MF', 76, 70, 62, 74, 66, 67)
Schmidt = Outfield_Player('Bernhard Schmidt', 'MF', 68, 77, 71, 67, 66, 76)
Bele = Outfield_Player('Robert Bele', 'FW', 76, 68, 69, 87, 62, 68)
AUT = Team('Austria', Mantler, Podpera, Binder, Schmidt, Bele)
Vanasch = Goalkeeper('Vincent Vanasch', 80, 77, 70, 79)
Briels = Outfield_Player('Thomas Briels', 'DF', 68, 87, 75, 70, 75, 71)
Boccard = Outfield_Player('Gautheir Boccard', 'MF', 75, 77, 79, 78, 76, 80)
Dockier = Outfield_Player('Sebastian Dockier', 'MF', 79, 78, 70, 71, 81, 70)
Charlier = Outfield_Player('Cedric Charlier', 'FW', 82, 68, 74, 79, 71, 82)
BEL = Team('Belgium', Vanasch, Briels, Boccard, Dockier, Charlier)
Pinner = Goalkeeper('George Pinner', 76, 77, 74, 79)
Dixon = Outfield_Player('Adam Dixon', 'DF', 45, 77, 79, 65, 81, 52)
Middleton = Outfield_Player('Barry Middleton', 'MF', 75, 81, 79, 76, 77, 73)
Martin = Outfield_Player('Harry Martin', 'MF', 79, 78, 73, 79, 81, 78)
Jackson = Outfield_Player('Ashley Jackson', 'FW', 85, 65, 74, 77, 73, 78)
ENG = Team('England', Pinner, Dixon, Middleton, Martin, Jackson)
Cortes = Goalkeeper('Francisco Cortes', 79, 74, 79, 69)
Enrique = Outfield_Player('Sergio Enrique', 'DF', 51, 79, 77, 73, 79, 69)
Alegre = Outfield_Player('David Alegre', 'MF', 75, 68, 75, 73, 74, 76)
Carrera = Outfield_Player('Jardi Carrera', 'MF', 71, 73, 76, 74, 79, 78)
Lleonart = Outfield_Player('Xavi Lleonart', 'FW', 78, 50, 70, 78, 77, 85)
ESP = Team('Spain', Cortes, Enrique, Alegre, Carrera, Lleonart)
Jacobi = Goalkeeper('Niclas Jacobi', 80, 73, 78, 77)
Butt = Outfield_Player('Linus Butt', 'DF', 60, 87, 76, 75, 70, 75)
Tompertz = Outfield_Player('Moritz Tompertz', 'MF', 70, 69, 73, 80, 77, 73)
Herzbruch = Outfield_Player('Timm Herzbruch', 'MF', 81, 73, 72, 74, 75, 73)
Grambusch = Outfield_Player('Tom Grambusch', 'FW', 78, 68, 72, 73, 72, 74)
GER = Team('Germany', Jacobi, Butt, Tompertz, Herzbruch, Grambusch)
Carless = Goalkeeper('Ben Carless', 68, 65, 66, 67)
Kyriakides = Outfield_Player('Dan Kyriakides', 'DF', 63, 74, 67, 63, 66, 65)
Cornick = Outfield_Player('Andrew Cornick', 'MF', 67, 66, 68, 63, 69, 65)
Brignull = Outfield_Player('Liam Brignull', 'MF', 62, 69, 65, 69, 67, 65)
Furlong = Outfield_Player('Gareth Furlong', 'FW', 77, 59, 66, 64, 67, 63)
WAL = Team('Wales', Carless, Kyriakides, Cornick, Brignull, Furlong)
Pieterse = Goalkeeper('Erasmus Pieterse', 75, 69, 74, 71)
Malgraff = Outfield_Player('Ignatius Malgraff', 'DF', 74, 64, 70, 75, 65, 69)
Madsen = Outfield_Player('Lloyd Madsen', 'MF', 65, 67, 66, 73, 79, 70)
Paton = Outfield_Player('Wade Paton', 'MF', 66, 73, 68, 65, 66, 68)
Hykes = Outfield_Player('Julian Hykes', 'FW', 79, 65, 72, 68, 79, 66)
RSA = Team('South Africa', Pieterse, Malgraff, Madsen, Paton, Hykes)
Singh = Goalkeeper('Harmanpreet Singh', 79, 72, 77, 74)
Tirkey = Outfield_Player('Dipsan Tirkey', 'DF', 61, 79, 75, 78, 68, 70)
Sharma = Outfield_Player('Nilakanta Sharma', 'MF', 72, 68, 72, 79, 78, 74)
Qureshi = Outfield_Player('Armaan Qureshi', 'MF', 76, 68, 77, 72, 75, 73)
Yousuf = Outfield_Player('Affan Yousuf', 'FW', 80, 70, 70, 74, 76, 73)
IND = Team('India', Singh, Tirkey, Sharma, Qureshi, Yousuf)
Harte = Goalkeeper('David Harte', 71, 77, 73, 68)
Gormley = Outfield_Player('Ronan Gormley', 'DF', 69, 77, 72, 70, 71, 68)
Watt = Outfield_Player('Michael Watt', 'MF', 61, 78, 79, 77, 73, 70)
Cargo = Outfield_Player('Chris Cargo', 'MF', 80, 64, 71, 74, 67, 73)
Bell = Outfield_Player('Jonny Bell', 'FW', 84, 59, 73, 80, 71, 84)
IRL = Team('Ireland', Harte, Gormley, Watt, Cargo, Bell)
Othman = Goalkeeper('Hafizuddin Othman', 74, 72, 68, 70)
Rahim = Outfield_Player('Razie Rahim', 'DF', 70, 71, 71, 72, 69, 73)
Hassan = Outfield_Player('Azi Hassan', 'MF', 77, 73, 76, 71, 74, 72)
Saari = Outfield_Player('Fitri Saari', 'MF', 76, 71, 67, 68, 70, 72)
Saabah = Outfield_Player('Shahril Saabah', 'FW', 71, 73, 75, 70, 76, 73)
MAL = Team('Malaysia', Othman, Rahim, Hassan, Saari, Saabah)
Ali = Goalkeeper('Amjad Ali', 66, 71, 67, 68)
Mahmood = Outfield_Player('Abu Mahmood', 'DF', 63, 73, 70, 78, 67, 63)
Shaked = Outfield_Player('Ammad Shaked', 'MF', 79, 69, 69, 66, 69, 74)
Ashfaq = Outfield_Player('Nawaz Ashfaq', 'MF', 68, 70, 69, 74, 63, 79)
Abbas = Outfield_Player('Tasawar Abbas', 'FW', 79, 63, 68, 77, 69, 77)
PAK = Team('Pakistan', Ali, Mahmood, Shaked, Ashfaq, Abbas)
Manchester = Goalkeeper('Devon Manchester', 74, 78, 71, 70)
Hilton = Outfield_Player('Blair Hilton', 'DF', 56, 78, 71, 70, 73, 72)
Archibald = Outfield_Player('Ryan Archibald', 'MF', 79, 65, 78, 77, 75, 72)
Child = Outfield_Player('Simon Child', 'MF', 67, 72, 75, 73, 68, 76)
Shaw = Outfield_Player('Bradley Shaw', 'FW', 76, 62, 77, 75, 74, 79)
NZL = Team('New Zealand', Manchester, Hilton, Archibald, Child, Shaw)
Stockmann = Goalkeeper('Jaap Stockmann', 79, 75, 78, 78)
Schuurman = Outfield_Player('Glenn Schuurman', 'DF', 63, 85, 77, 74, 68, 67)
Verga = Outfield_Player('Valetin Verga', 'MF', 72, 73, 74, 75, 75, 76)
Hertzberger = Outfield_Player('Jeroen Hertzberger', 'MF', 78, 78, 72, 73, 80, 71)
Pruyser = Outfield_Player('Micro Pruyser', 'FW', 83, 68, 72, 76, 72, 80)
NED = Team('Netherlands', Stockmann, Schuurman, Verga, Hertzberger, Pruyser)
def CPU_match(team1, team2):
minute = 1
team1players = [team1.player2.name, team1.player3.name, team1.player4.name, team1.player5.name]
team2players = [team2.player2.name, team2.player3.name, team2.player4.name, team2.player5.name]
team1score = []
team2score = []
potentacy = ((team1.attacking+team2.attacking)*(team1.defending+team2.defending))/(10*(team1.overall+team2.overall)**2)
while minute <= 70:
attackingteam = scoring_chance(team1, team2)
if attackingteam is team1.name:
team1goal = random.randint(1, team1.attacking)*potentacy
team2defend = random.randint(1, team2.defending)
if team1goal > team2defend:
team1score.append('team1')
if attackingteam is team2.name:
team2goal = random.randint(1, team2.attacking)*potentacy
team1defend = random.randint(1, team1.defending)
if team2goal > team1defend:
team2score.append('team2')
minute += 1
if len(team1score) != len(team2score):
print(team1.name, len(team1score)," - ", len(team2score),team2.name)
else:
while len(team1score) == len(team2score):
attackingteam = scoring_chance(team1, team2)
if attackingteam is team1.name:
team1goal = random.randint(1, team1.attacking)*potentacy
team2defend = random.randint(1, team2.defending)
if team1goal > team2defend:
team1score.append("team1")
if attackingteam is team2.name:
team2goal = random.randint(1, team2.attacking)*potentacy
team1defend = random.randint(1, team1.defending)
if team2goal > team1defend:
team2score.append("team2")
minute += 1
if len(team1score) != len(team2score):
print(team1.name, len(team1score)," - ", len(team2score),team2.name, " (after extra-time)")
break
if len(team1score) < len(team2score):
return team2
if len(team1score) > len(team2score):
return team1
def create_quarterfinalists():
return []
quarterfinalists=create_quarterfinalists()
def round_of_16_draw(team):
if len(quarterfinalists) < 1:
roundof16teams=[WAL,AUT,RSA,PAK,MAL,IRL,NZL,ESP,ENG,IND,GER,NED,BEL,ARG,AUS]
roosmatchday=[]
opponent = random.choice(roundof16teams)
roundof16teams.pop(roundof16teams.index(opponent))
otherteams= sorted(roundof16teams, key=lambda team: team.name)
i = 0
print(team.name, " v ", opponent.name)
time.sleep(1)
while i<6:
otherteams.reverse()
hometeam = otherteams.pop(int((len(otherteams)-1)/2))
awayteam = otherteams.pop(int((len(otherteams)+1)/2))
roosmatchday.append((hometeam, awayteam))
print(hometeam.name, " v ", awayteam.name)
time.sleep(1)
i += 1
lasthometeam = otherteams.pop()
lastawayteam = otherteams.pop()
roosmatchday.append((lasthometeam, lastawayteam))
print(lasthometeam.name, " v ", lastawayteam.name)
teamt1,teamt2 = zip(*roosmatchday)
team1 = list(teamt1)
team2 = list(teamt2)
q = 0
print()
print("Results:")
while q < 7:
time.sleep(1)
print()
roogames = CPU_match(team1.pop(), team2.pop())
quarterfinalists.append(roogames)
q += 1
else:
quarterfinalists.clear()
raise ValueError('You already have the Quarter-finalists')
def create_semifinalists():
return []
semifinalists=create_semifinalists()
def quarter_finals_draw(team):
if len(semifinalists) < 1:
quarterfinalteams=list(quarterfinalists)
sfmatchday=[]
opponent = random.choice(quarterfinalteams)
quarterfinalteams.pop(quarterfinalteams.index(opponent))
otherteams2 = sorted(quarterfinalteams, key=lambda team: team.name)
i = 0
print(team.name, " v ", opponent.name)
time.sleep(1)
while i<2:
otherteams2.reverse()
hometeam = otherteams2.pop(int((len(otherteams2)-1)/2))
awayteam = otherteams2.pop(int((len(otherteams2)+1)/2))
sfmatchday.append((hometeam, awayteam))
print(hometeam.name, " v ", awayteam.name)
time.sleep(1)
i += 1
lasthometeam = otherteams2.pop()
lastawayteam = otherteams2.pop()
sfmatchday.append((lasthometeam, lastawayteam))
print(lasthometeam.name, " v ", lastawayteam.name)
teamt1,teamt2 = zip(*sfmatchday)
team1 = list(teamt1)
team2 = list(teamt2)
q = 0
print()
print("Results:")
while q < 3:
time.sleep(1)
print()
quarterfinalgames = CPU_match(team1.pop(),team2.pop())
semifinalists.append(quarterfinalgames)
q += 1
else:
semifinalists.clear()
raise ValueError('You already have the Semi-finalists')
def create_finalists():
return []
finalists =create_finalists()
def semi_finals_draw(team):
if len(finalists) < 1:
semifinalteams=list(semifinalists)
fmatchday=[]
opponent = random.choice(semifinalteams)
semifinalteams.pop(semifinalteams.index(opponent))
otherteams3 = sorted(semifinalteams, key=lambda team: team.name)
i = 0
time.sleep(1)
print(team.name, " v ", opponent.name)
lasthometeam = otherteams3.pop()
lastawayteam = otherteams3.pop()
fmatchday.append((lasthometeam, lastawayteam))
time.sleep(1)
print(lasthometeam.name, " v ", lastawayteam.name)
teamt1,teamt2 = zip(*fmatchday)
team1 = list(teamt1)
team2 = list(teamt2)
print()
print("Results:")
print()
semifinalgames = CPU_match(team1.pop(),team2.pop())
finalists.append(semifinalgames)
else:
finalists.clear()
raise ValueError('You already have the results for your finalists')
def commentary(team, otherteam):
teamplayers = [team.player2.name, team.player3.name, team.player4.name, team.player5.name]
otherteamplayers = [otherteam.player2.name, otherteam.player3.name, otherteam.player4.name, otherteam.player5.name]
probs = [0.1,0.225,0.225,0.45]
probs2 = [0.1,0.2,0.2,0.2,0.2,0.1,0.1,0.1]
GSFPC = [' with just the keeper to beat!',' hits it from the top of the D.',' there to tap it in at the back post.']
PFCC = [' in possesion at the moment passing it round the back.', ' win a long corner.', ' under pressure now.']
PFPC = [' plays a long ball forward', ' cuts in from the right.', ' cuts in from the left.']
PEPPC = [' goes round ', ' intercepts the ball due to a poor pass by ']
APFPC = [' centres it from the baseline.',' slaps it to the back post.',' wins a penalty corner.']
teamplayer = str(random.choices(teamplayers, weights=probs, k=1)).replace('[','').replace(']','').replace("'",'')
otherteamplayer = str(random.choices(otherteamplayers, weights=probs, k=1)).replace('[','').replace(']','').replace("'",'')
goalscoring1 = str(random.choices(GSFPC)).replace('[','').replace(']','').replace("'",'')
possession2 = str(random.choices(PFCC)).replace('[','').replace(']','').replace("'",'')
possession1 = str(random.choices(PFPC)).replace('[','').replace(']','').replace("'",'')
possession6 = str(random.choices(APFPC)).replace('[','').replace(']','').replace("'",'')
possession5 = str(random.choices(PEPPC)).replace('[','').replace(']','').replace("'",'')
scoringchancecom1 = teamplayer + goalscoring1
possessioncom3 = 'Lovely bit of skill from ' + teamplayer + ' to get out of a sticky situation.'
possessioncom2 = team.name + possession2
possessioncom1 = teamplayer + possession1
possessioncom4 = 'Great pass from ' + teamplayer
possessioncom5 = teamplayer + possession5 + otherteamplayer
possessioncom6 = teamplayer + possession6
scoringchancecom2 = teamplayer + ' gives away a foul in the D! Penalty corner to ' + otherteam.name
possessioncomlist = [possessioncom1, possessioncom2, possessioncom3, possessioncom4, possessioncom5, possessioncom6, scoringchancecom1, scoringchancecom2]
print(" ".join(random.choices(possessioncomlist, weights=probs2, k=1)))
def scoring_chance(team1, team2):
team1chance = random.randint(1, team1.overall)
team2chance = random.randint(1, team2.overall)
if team1chance > team2chance:
return team1.name
if team1chance < team2chance:
return team2.name
def Match(team1, team2):
"""Simulates a match in real-time with teams inputted, if there is a draw at the end of the game the result will be decided by a next goal wins format"""
minute = 1
team1players = [team1.player2.name, team1.player3.name, team1.player4.name, team1.player5.name]
team2players = [team2.player2.name, team2.player3.name, team2.player4.name, team2.player5.name]
probs = [0.1,0.225,0.225,0.45]
team1score = []
team2score = []
GSFPC = [" with just the keeper to beat!"," hits it from the top of the D."," there to tap it in at the back post."]
potentacy = ((team1.attacking+team2.attacking)*(team1.defending+team2.defending))/(10*(team1.overall+team2.overall)**2)
print("Kick-Off")
print()
while minute <= 70:
attackingteam = scoring_chance(team1, team2)
if attackingteam is team1.name:
if minute % 5 == 0:
commentary(team1, team2)
print()
time.sleep(1)
team1goal = random.randint(1, team1.attacking)*potentacy
team2defend = random.randint(1, team2.defending)
if team1goal > team2defend:
team1score.append("team1")
scorer1 = str(random.choices(team1players, weights=probs)).replace('[','').replace(']','').replace("'",'')
comment1 = str(random.choices(GSFPC)).replace('[','').replace(']','').replace("'",'')
scorercommentary = str(scorer1 + comment1)
print(scorercommentary)
print()
time.sleep(1)
print(scorer1, minute,"'")
print()
time.sleep(1)
if attackingteam is team2.name:
if minute % 5 == 0:
commentary(team2, team1)
print()
time.sleep(1)
team2goal = random.randint(1, team2.attacking)*potentacy
team1defend = random.randint(1, team1.defending)
if team2goal > team1defend:
team2score.append("team2")
scorer2 = str(random.choices(team2players, weights=probs)).replace('[','').replace(']','').replace("'",'')
comment1 = str(random.choices(GSFPC)).replace('[','').replace(']','').replace("'",'')
scorercommentary = str(scorer2 + comment1)
print(scorercommentary)
print()
time.sleep(1)
print(" ", minute,"'", scorer2)
print()
time.sleep(1)
minute += 1
time.sleep(0.5)
if minute == 35:
print("Half-time: ", team1.name, len(team1score)," - ", len(team2score),team2.name)
time.sleep(5)
print()
print("We are underway here for the second half of", team1.name, " v ", team2.name)
time.sleep(2)
print()
print("Full-time: ", team1.name, len(team1score)," - ", len(team2score),team2.name)
print()
time.sleep(2)
if len(team1score) == len(team2score):
print("It's all square here after full time. We are going to golden goal!")
print()
while len(team1score) == len(team2score):
attackingteam = scoring_chance(team1, team2)
if attackingteam is team1.name:
if minute % 5 == 0:
commentary(team2, team1)
print()
time.sleep(1)
team1goal = random.randint(1, team1.attacking)*potentacy
team2defend = random.randint(1, team2.defending)
if team1goal > team2defend:
team1score.append("team1")
scorer1 = str(random.choices(team1players, weights=probs)).replace('[','').replace(']','').replace("'",'')
comment1 = str(random.choices(GSFPC)).replace('[','').replace(']','').replace("'",'')
scorercommentary = str(scorer1 + comment1)
print(scorercommentary)
print()
time.sleep(1)
print(scorer1, minute,"'")
print()
time.sleep(1)
if attackingteam is team2.name:
if minute % 5 == 0:
commentary(team2, team1)
print()
time.sleep(1)
team2goal = random.randint(1, team2.attacking)*potentacy
team1defend = random.randint(1, team1.defending)
if team2goal > team1defend:
team2score.append("team2")
scorer2 = str(random.choices(team2players, weights=probs)).replace('[','').replace(']','').replace("'",'')
comment1 = str(random.choices(GSFPC)).replace('[','').replace(']','').replace("'",'')
scorercommentary = str(scorer2 + comment1)
print(scorercommentary)
print()
time.sleep(1)
print(" ", minute,"'", scorer2)
print()
time.sleep(1)
minute += 1
time.sleep(0.5)
if len(team1score) > len(team2score):
print(team1.name, "have won it in extra time unbelievable scenes!")
print()
if len(team1score) < len(team2score):
print(team2.name, "have won it in extra time unbelievable scenes!")
print()
print("Final Score: ", team1.name, len(team1score)," - ", len(team2score),team2.name) |
#!/usr/bin/env python
from distutils.core import setup
setup(name='dae_RelayBoard',
version='1.5.2',
description='Denkovi Relay Board Controller',
author='Peter Bingham',
author_email='[email protected]',
url='https://code.google.com/p/dae-py-relay-controller/',
packages=['dae_RelayBoard']
) |
"""
A slack client with much better async support
"""
from slackclient import SlackClient
import websockets
import asyncio
import ssl
import json
ssl_context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
# os x is dumb so this fixes the openssl cert import
try:
ssl_context.load_verify_locations('/usr/local/etc/openssl/cert.pem')
except:
pass
# we need to redefine these because the slackclient library is bad
class SlackLoginError(Exception):
pass
class SlackConnectionError(Exception):
pass
class BetterSlack(SlackClient):
""" a better slack client with async/await support """
def __init__(self, *args, **kwargs):
SlackClient.__init__(self, *args, **kwargs)
self.known_users = {}
self._conn = None
self.message_queue = []
self._should_reconnect = False
self._in_count = 0
async def __aenter__(self):
reply = self.server.api_requester.do(self.token, "rtm.start")
if reply.status_code != 200:
raise SlackConnectionError
else:
login_data = reply.json()
if login_data["ok"]:
self.ws_url = login_data['url']
if not self._should_reconnect:
self.server.parse_slack_login_data(login_data)
self._conn = websockets.connect(self.ws_url, ssl=ssl_context)
else:
raise SlackLoginError
self.websocket = await self._conn.__aenter__()
return self
async def __aexit__(self, *args, **kwargs):
await self._conn.__aexit__(*args, **kwargs)
async def main_loop(self, parser=None, on_tick=None):
async with self as self:
while True:
while len(self.message_queue) > 0:
await self.websocket.send(self.message_queue.pop(0))
if parser is not None:
incoming = await self.get_message()
try:
parser(incoming)
except Exception as e:
print(f'Error: {e}')
if on_tick() is not None:
on_tick()
self._in_count += 1
if self._in_count > (0.5 * 60 * 3):
self.ping()
self._in_count = 0
asyncio.sleep(0.5)
async def get_message(self):
incoming = await self.websocket.recv()
json_data = ""
json_data += "{0}\n".format(incoming)
json_data = json_data.rstrip()
data = []
if json_data != '':
for d in json_data.split('\n'):
data.append(json.loads(d))
for item in data:
self.process_changes(item)
return data
def ping(self):
return self.send_to_websocket({"type": "ping"})
def send_to_websocket(self, data):
"""
Send a JSON message directly to the websocket. See
`RTM documentation <https://api.slack.com/rtm` for allowed types.
:Args:
data (dict) the key/values to send the websocket.
"""
data = json.dumps(data)
self.message_queue.append(data)
def set_known_users(self):
response = self.api_call('users.list')
if not response['ok']:
return
for member in response['members']:
self.known_users[member['name']] = member['id']
def user_name_from_id(self, my_id):
for (name, id) in self.known_users.items():
if id == my_id:
return name
return None
def open_chat(self, name: str) -> str:
if name not in self.known_users:
self.set_known_users()
person = self.known_users[name]
response = self.api_call('im.open', user=person)
return response['channel']['id']
def send_message(self, name: str, message: str) -> None:
id = self.open_chat(name)
json = {"type": "message", "channel": id, "text": message}
self.send_to_websocket(json)
def send_channel_message(self, channel: str, message: str) -> None:
json = {"type": "message", "channel": channel, "text": message}
self.send_to_websocket(json)
def connected_user(self, username: str) -> str:
if username not in self.known_users:
self.set_known_users()
return self.known_users[username]
def attachment_strings(self, attachment):
strings = []
for (k, v) in attachment.items():
if isinstance(v, str):
strings.append(v)
for field in attachment.get('fields', []):
strings.append(field['title'])
strings.append(field['value'])
return strings
|
import struct
import json
import os
import sys
import time
import shutil
import re
import json
import logging
class PatchAsar:
asar_file_path = os.getenv("LocalAppData") + \
"\\Programs\\Termius\\resources\\app.asar"
backup_filename_base = "app.asar.bak"
def __init__(self, logger=None):
if logger is not None:
self.logger = logger
else:
self.logger = getLogger()
self.logger.info(f"打开ASAR:{self.asar_file_path}")
with open(self.asar_file_path, "rb") as asar_file:
asar_file.seek(4)
header_len = struct.unpack("<I", asar_file.read(4))[0]
asar_file.seek(8)
header_json_bytes = asar_file.read(header_len)[8:]
header_json_bytes = header_json_bytes.split(b'\0', 1)[0]
try:
self.header = json.loads(header_json_bytes)
except ValueError as e:
self.logger.trace(header_json_bytes)
self.logger.error("解析JSON失败")
raise e
asar_file.seek(8 + header_len)
self.body = asar_file.read()
def make_backup(self):
backup_filename = "{}.{}".format(
self.backup_filename_base,
str(int(time.time())))
try:
self.logger.info(f"创建备份:{backup_filename}")
shutil.copyfile(self.asar_file_path, backup_filename)
except OSError as e:
self.logger.error(
f"尝试创建备份时发生了错误。"
f"请检查文件'{self.asar_file_path}'可以访问,"
f"且工作目录可写。")
raise e
return backup_filename
def get_file_obj(self, file_path):
file_obj = self.header
for name in file_path:
file_obj = file_obj["files"][name]
return file_obj
def get_file_loc_length(self, file_path):
file_obj = self.get_file_obj(file_path)
loc = int(file_obj["offset"])
length = int(file_obj["size"])
return loc, length
def get_file_content(self, file_path):
loc, length = self.get_file_loc_length(file_path)
file_content = self.body[loc: loc + length]
return file_content
def update_file(self, file_path, new_content):
loc, length = self.get_file_loc_length(file_path)
self.body = self.body[:loc] + \
new_content + self.body[loc+length:]
file_obj = self.get_file_obj(file_path)
new_length = len(new_content)
file_obj["size"] = new_length
PatchAsar.update_header(self.header, loc, new_length - length)
@staticmethod
def update_header(header, loc, diff):
for key in header:
if key == "offset":
offset = int(header["offset"])
if offset > loc:
header["offset"] = str(offset + diff)
elif key != "size" and key != "executable" and key != "unpacked":
# print(key)
PatchAsar.update_header(header[key], loc, diff)
def regenerate_header(self):
header_json_bytes = json.dumps(self.header).encode("ascii")
def int_to_bytes(num):
return num.to_bytes(4, byteorder='little', signed=False)
len4 = int_to_bytes(len(header_json_bytes))
len3 = int_to_bytes(len(header_json_bytes) + 6)
len2 = int_to_bytes(len(header_json_bytes) + 10)
len1 = int_to_bytes(len(len2))
suf = b"\00\00"
return len1 + len2 + len3 + len4 + header_json_bytes + suf
def write_asar_file(self):
new_asar_content = self.regenerate_header() + self.body
try:
self.logger.info(f"写入ASAR:{self.asar_file_path}")
with open(self.asar_file_path, "wb") as asar_file:
asar_file.write(new_asar_content)
except OSError as e:
self.logger.error(f"写入文件'{self.asar_file_path}'失败。")
raise e
def getLogger(debug=True):
logger = logging.getLogger("termius_patches")
formatter = logging.Formatter("%(levelname)-6s %(message)s")
handler = logging.StreamHandler()
handler.setFormatter(formatter)
logger.addHandler(handler)
if debug:
logger.setLevel(logging.DEBUG)
else:
logger.setLevel(logging.INFO)
return logger
def do_patch(patches):
"""
传入一个字典:{(文件路径元组): patches里定义的补丁函数}
"""
logger = getLogger()
patch = PatchAsar(logger=logger)
for file_path in patches:
content = patch.get_file_content(file_path)
for patch_func in patches[file_path]:
content = patch_func(content)
patch.update_file(file_path, content)
patch.make_backup()
patch.write_asar_file()
logging.info("完成!")
|
import torch
import numpy as np
from path import Path
import matplotlib.pyplot as plt
from sampler import Sampler
from random import random
import matplotlib.pyplot as plt
import torch
def main1():
file = Path('./0000002.png')
img = plt.imread(file)
img = torch.tensor(img)
mask1 = torch.ones(img.shape,dtype=torch.uint8)
for i in range(mask1.shape[0]):
for j in range(mask1.shape[1]):
if i%2==0 and j%2 ==0:
mask1[i][j] = True
else:
mask1[i][j]=False
s1 = img[mask1].reshape(300,400)
s1 = s1.data.numpy()
#plt.imshow(mask1)
print(img)
plt.imshow(s1)
plt.show()
def main2():
#记录下tensor index
a = torch.tensor([1,2,3,4,5,6,7,8,9,10]).reshape(1,1,2,5)
b = torch.tensor([1,0,1,0,1,0,1,0,1,0]).reshape(1,1,2,5)
d = torch.tensor([True,False,True,False,True,False,True,False,True,False]).reshape(1,1,2,5)
c = a[d]
print(c)
pass
def main3():
file = Path('./0000002.png')
img = plt.imread(file)
img = torch.tensor(img)
img = img.unsqueeze(dim=0)
img = img.unsqueeze(dim=0)
img2 = torch.ones(4, 1, 128, 192)
sam = Sampler(batch=1,channels=1,height=128,width=192, scales=6)
scale_list = sam.down_resolution_sampling([img])
#ims show
nps = []
for i in range(len(scale_list)):
nps.append(scale_list[i].data.numpy())
plt.subplot(2,3,1)
plt.imshow(scale_list[0][0][0])
plt.subplot(2, 3, 2)
plt.imshow(scale_list[1][0][0])
plt.subplot(2, 3, 3)
plt.imshow(scale_list[2][0][0])
plt.subplot(2, 3, 4)
plt.imshow(scale_list[3][0][0])
plt.subplot(2, 3, 5)
plt.imshow(scale_list[4][0][0])
plt.subplot(2, 3, 6)
plt.imshow(scale_list[5][0][0])
plt.show()
print('ok')
if __name__=="__main__":
main3()
pass
|
from drqa import retriever
from tqdm import tqdm
from transformers import BertTokenizer
from multiprocessing import Pool
import argparse
import logging
import json
import random
import nltk.data
import drqa.drqa_tokenizers
import math
import os
from multiprocessing.util import Finalize
from collections import Counter
logger = logging.getLogger()
logger.setLevel(logging.INFO)
fmt = logging.Formatter('%(asctime)s: [ %(message)s ]', '%m/%d/%Y %I:%M:%S %p')
console = logging.StreamHandler()
console.setFormatter(fmt)
logger.addHandler(console)
parser = argparse.ArgumentParser()
parser.add_argument('--max_query_len', type=int, default=128)
args = parser.parse_args()
def generate_train_sample(trace_question):
question = trace_question['question']
table_id = trace_question['table_id']
answer_node = trace_question['answer-node']
# treat the answer row with most answer nodes as the ground-truth answer row
answer_row = []
for node in answer_node:
answer_row.append(node[1][0])
answer_row = Counter(answer_row).most_common(1)[0][0]
ground_truth_block = fused_blocks[table_id + f'_{answer_row}']
# add [CLS] token to front of the fused block
block_tokens = ["[CLS]"] + ground_truth_block[0]
block_type = [0] + ground_truth_block[1]
block_mask = [1] + ground_truth_block[2]
block_repr = [block_tokens, block_type, block_mask]
# preprocess the question
query_tokens = '[CLS] ' + question + ' [SEP]'
query_tokens = bert_tokenizer.tokenize(query_tokens)
query_types = [0] * len(query_tokens)
query_masks = [1] * len(query_tokens)
# truncate query length
if len(query_tokens) > args.max_query_len:
query_tokens = query_tokens[:args.max_query_len]
query_tokens[-1] = '[SEP]'
query_types = query_types[:args.max_query_len]
query_masks = query_mask[:args.max_query_len]
query = [query_tokens, query_types, query_masks]
return query, block_repr
if __name__ == '__main__':
n_threads = os.cpu_count()
bert_tokenizer = BertTokenizer.from_pretrained('bert-base-uncased', do_lower_case=True, cache_dir='/tmp/')
with open('released_data/train.traced.json', 'r') as f:
data = json.load(f)
with open('preprocessed_data/train_fused_blocks.json', 'r') as f:
fused_blocks = json.load(f)
with Pool(n_threads) as p:
results = list(
tqdm(
p.imap_unordered(generate_train_sample, data),
total=len(data),
desc='Generate fine-tune samples for retriever',
)
)
with open('retriever/fine_tune_pretrain_data.json', 'w') as f:
json.dump(results, f, indent=2) |
"""
Define the API versions.
"""
class APIVersion:
V1 = "/v1"
|
# cosine similarity and duplicate detection implementation
import numpy
from pathlib import Path
from collections import defaultdict
from copy import deepcopy
similarityThreshold = 0.99 # customizable similarity threshold
debug = True # for debug print
def cosineSimilarity(dictX: dict, dictY: dict):
"""
Source code can be found on
www.biaodianfu.com/cosine-similarity.html
"""
listX = convertToVector(dictX)
listY = convertToVector(dictY)
vectorX = numpy.array(listX)
vectorY = numpy.array(listY)
dotProduct = numpy.dot(vectorX, vectorY)
normalizedX = numpy.linalg.norm(vectorX)
normalizedY = numpy.linalg.norm(vectorY)
if normalizedX * normalizedY == 0:
# print(dictX,"\n",dictY)
# print(listX,"\n",listY)
# print(vectorX,"\n", vectorY)
# print(normalizedX, "\n", normalizedY)
# print("cannot divide by 0")
return -1
return dotProduct / (normalizedX * normalizedY)
# print(f"cos sim for [1,1,1] and [1,1,1] = {cosineSimilarity([1,1,1], [1,1,1])}\tshould be 1")
# print(f"cos sim for [1,0,1] and [0,1,0] = {cosineSimilarity([1,0,1], [0,1,0])}\tshould be 0")
def readText(file) -> list:
with file.open() as f:
url = f.readline()
text = list(map(lambda x: x.rstrip(), f.readlines()))
# if(len(text) == 0):
# print(f"{url} is empty!!")
return text
def calculateTermFreq(tokenList: list) -> dict:
termFreqDict = defaultdict(int)
for token in tokenList:
termFreqDict[token] += 1
return termFreqDict
def convertToVector(tokenDict: dict) -> list:
rawVector = sorted(tokenDict.items())
tokenVector = [x[1] for x in rawVector] # only preserve the frequency values
return tokenVector
def addMissingTerms(tokenDictX: dict, tokenDictY: dict) -> (dict, dict):
for key in tokenDictX.keys():
if not (key in tokenDictY):
tokenDictY[key] = 0
for key in tokenDictY.keys():
if not (key in tokenDictX):
tokenDictX[key] = 0
return (tokenDictX, tokenDictY)
def queryDocSimiliarity(query: [str], fileName: str) -> float:
queryDict = calculateTermFreq(query)
tokenList = readText(Path(fileName))
tokenDict = calculateTermFreq(tokenList)
queryDict, tokenDict = addMissingTerms(queryDict, tokenDict)
score = cosineSimilarity(queryDict, tokenDict)
# print(score)
return score
def duplicateCheck(allFiles) -> list:
global similarityThreshold
# allFiles = iterdir(Path("op"))
fileList = list(allFiles)
dupeList = list()
emptyList = list()
count = 0
x = 0
while(x != len(fileList)):
fileX = fileList[x]
y = x + 1
tokenListX = readText(fileX)
if len(tokenListX) == 0:
if debug: print(f"remove {fileX.name}, current x: {x}")
fileList.remove(fileX)
emptyList.append(fileX)
continue
while(y != len(fileList)):
if fileList[x] != fileList[y]:
fileY = fileList[y]
tokenListX = readText(fileX)
tokenListY = readText(fileY)
if len(tokenListY) == 0:
if debug: print(f"remove {fileY.name}, current y: {y}")
fileList.remove(fileY)
emptyList.append(fileY)
continue
tokenDictX = calculateTermFreq(tokenListX)
tokenDictY = calculateTermFreq(tokenListY)
tokenDictX, tokenDictY = addMissingTerms(tokenDictX, tokenDictY)
score = cosineSimilarity(tokenDictX, tokenDictY)
if debug:
# print(f"{fileX.name} compare to {fileY.name}\nScore is {score}")
if count % 1000 == 0: print(f"current count: {count}")
# print(count)
count+= 1
if (score >= similarityThreshold):
if debug: print(f"remove {fileY.name}, current y: {y}")
fileList.remove(fileY)
dupeList.append(fileY)
y -= 1
# end if !=
y += 1
# end inner while
x += 1
# end outer while
print(dupeList)
return fileList
if __name__ == '__main__':
testFolder = Path("C:\\Users\\hower\\Documents\\CS 121\\project3\\testFolder")
outputFolder = Path("C:\\Users\\hower\\Documents\\CS 121\\project3\\op")
noDupeList = duplicateCheck(outputFolder.iterdir())
print(noDupeList)
with open("C:\\Users\\hower\\Documents\\CS 121\\project3\\noDupeList.txt", 'w') as f:
for filePath in noDupeList:
f.write(filePath.name)
# if debug: print(fileList)
# score = queryDocSimiliarity(["fuck"],"C:\\Users\\hower\\Documents\\CS 121\\project3\\op\\10237.txt")
# query = ["comput"]
# result = []
# for file in outputFolder.iterdir():
# # print(file.name)
# score = queryDocSimiliarity(query, file)
# if score == -1:
# print(f"doc id {file.name} is empty")
# result.append(score)
# print(result)
|
from typing import Optional
from fastapi import FastAPI
import hashlib
import random
import base64
from pydantic import HttpUrl, BaseModel
from fastapi import FastAPI, Depends, Body, HTTPException
from fastapi.responses import RedirectResponse, HTMLResponse, JSONResponse
app = FastAPI()
posts = []
class Post(BaseModel):
description: str
num_rooms: int
price: float
location: str
def create_postid():
random_number = str(hex(random.randint(1000,9999)))
print(random_number)
return random_number
# For testing and showcasing purpose only. Real function will get data from centralized data source
@app.post('/createpost', status_code = 201)
async def post_info(post: Post):
id = create_postid()
posts.append({"pid":id, "posting":post})
return JSONResponse(status_code = 201, message="Post created successfully")
@app.get('/postings')
def get_posts(post: Post):
return posts |
# ------------------------------------------------------------------------------------------------
from astrodbkit2.astrodb import create_database
from astrodbkit2.astrodb import Database
from simple.schema import *
from astropy.table import Table
import numpy as np
import re
import os
from utils import convert_spt_string_to_code
from pathlib import Path
DRY_RUN = True
RECREATE_DB = True
VERBOSE = False
verboseprint = print if VERBOSE else lambda *a, **k: None
db_file = 'SIMPLE.db'
db_file_path = Path(db_file)
db_connection_string = 'sqlite:///SIMPLE.db' # SQLite
if RECREATE_DB and db_file_path.exists():
os.remove(db_file)
if not db_file_path.exists():
create_database(db_connection_string)
db = Database(db_connection_string)
db.load_database('data')
# try:
# db_file_path = db_file.resolve(strict=True)
# except:
# # SIMPLE.db file does not exist so create it
# create_database(db_connection_string)
# db = Database(db_connection_string)
# db.load_database('data')
# else:
# # SIMPLE.db file does exist
# if RECREATE_DB: # Recreate database anyway
# os.remove(db_file)
# create_database(db_connection_string)
# db = Database(db_connection_string)
# db.load_database('data')
# else: # Use pre-existing database
# db = Database(db_connection_string)
# ===============================================================
# Ingest new reference if missing
# ===============================================================
# Adding new reference Manj19 to publications table in database
manj19_search = db.query(db.Publications).filter(db.Publications.c.name == 'Manj19').table()
if len(manj19_search) == 0 and not DRY_RUN:
new_ref = [{'name': 'Manj19'}]
# Should have included bibcode and doi
# new_ref = [{'name': 'Manj19', 'bibcode': '2019AJ....157..101M', 'doi': '10.3847/1538-3881/aaf88f'}]
db.Publications.insert().execute(new_ref)
#add DOI and Bibcode after Manj19 already added
add_doi_bibcode = db.Publications.update().where(db.Publications.c.name == 'Manj19').\
values(bibcode='2019AJ....157..101M', doi='10.3847/1538-3881/aaf88f', description='Cloud Atlas: HST nir spectral library')
db.engine.execute(add_doi_bibcode)
# ===============================================================
# load table of sources to ingest
ingest_table = Table.read("scripts/ingests/ATLAS_table.vot")
ingest_table_df = ingest_table.to_pandas()
names = ingest_table['Name']
n_sources = len(names)
spectral_types_unknown = ingest_table['SpType'] # pre-existing spectral types
spectral_types_spex = ingest_table['SpTSpeX'] # new spectral types
# fetch primary name identifier from database
db_names = []
for name in names:
db_name = db.search_object(name, output_table='Sources')[0].source
db_names.append(db_name)
# ===============================================================
# Ingest new spectral type estimates
# from the SpTSpeX column
# ===============================================================
db_names_spex = []
spex_types_string = []
for i, db_name in enumerate(db_names):
if spectral_types_spex[i] != "":
db_names_spex.append(db_name)
spex_types_string.append(spectral_types_spex[i])
spex_types_codes = convert_spt_string_to_code(spex_types_string, verbose=False)
regime = ['nir'] * len(db_names_spex)
spt_ref = ['Manj19'] * len(db_names_spex)
# adopted = False * len(db_names_spex) # Should have specified adopted column
SpT_table_spex = Table([db_names_spex, spex_types_string, spex_types_codes, regime, spt_ref],
names=('source', 'spectral_type_string', 'spectral_type_code', 'regime', 'reference'))
SpT_table_spex_df = SpT_table_spex.to_pandas() # make a Pandas dataframe to explore with Pycharm
# Report results
print("\n",len(db_names_spex),"Spex SpTypes to be added")
verboseprint(SpT_table_spex_df)
# Add to database
if not DRY_RUN:
db.add_table_data(SpT_table_spex, table='SpectralTypes', fmt='astropy')
# Update adopted field after spectral types were already added
update_adopted = db.SpectralTypes.update().where(db.SpectralTypes.c.reference == 'Manj19').values(adopted=False)
db.engine.execute(update_adopted)
# Verify results
n_Manj19_types = db.query(db.SpectralTypes).filter(db.SpectralTypes.c.reference == 'Manj19').count()
print("\n",n_Manj19_types, 'spectral types referenced to Manj19 now found database')
verboseprint(db.query(db.SpectralTypes).filter(db.SpectralTypes.c.reference == 'Manj19').table())
# Deletion example (use with caution!)
# db.SpectralTypes.delete().where(db.SpectralTypes.c.reference == 'Manj19').execute()
# ===============================================================
# ===============================================================
# Ingest spectral types from unknown sources
# if sources have no other spectral type
# ===============================================================
# Find out which sources don't have spectral types
db_names_needs_spectral_type = []
spectral_types_to_add = []
for i, db_name in enumerate(db_names):
db_spectral_types = db.query(db.SpectralTypes).filter(db.SpectralTypes.c.source == db_name).table()
if db_spectral_types is None or len(db_spectral_types) == 0:
db_names_needs_spectral_type.append(db_name)
spectral_types_to_add.append(spectral_types_unknown[i])
# Convert SpT string to code
spectral_type_codes_unknown = convert_spt_string_to_code(spectral_types_to_add, verbose=False)
regime = ['unknown'] * len(db_names_needs_spectral_type)
spt_ref = ['Missing'] * len(db_names_needs_spectral_type)
comments = ['From ATLAS Table Manjavacas etal. 2019']*len(db_names_needs_spectral_type)
# adopted = False * len(db_names_needs_spectral_type) # Should have specified adopted column
SpT_table_unknown = Table([db_names_needs_spectral_type, spectral_types_to_add, spectral_type_codes_unknown, regime,
spt_ref, comments],
names=('source', 'spectral_type_string', 'spectral_type_code', 'regime', 'reference',
'comments'))
# Report the results
print("\n",len(db_names_needs_spectral_type),"Spectral types with Missing reference to be added")
# Add to database
if not DRY_RUN:
db.add_table_data(SpT_table_unknown, table='SpectralTypes', fmt='astropy')
update_adopted = db.SpectralTypes.update().where(db.SpectralTypes.c.reference == 'Missing').values(adopted=False)
db.engine.execute(update_adopted)
# Deletion example. Undos add_table_data. (use with caution!)
# db.SpectralTypes.delete().where(db.SpectralTypes.c.reference == 'Missing').execute()
# ===============================================================
#if not DRY_RUN:
db.save_db('data')
|
# -*- coding: utf-8 -*-
"""
A number of generic default fixtures to use with tests.
All model-related fixtures defined here require the database, and should imply as much by
including ``db`` fixture in the function resolution scope.
"""
from __future__ import absolute_import, print_function, unicode_literals
import os
import re
import sys
import yaml
import sentry
import pytest
import six
from datetime import datetime
DEFAULT_EVENT_DATA = {
'extra': {
'loadavg': [0.97607421875, 0.88330078125, 0.833984375],
'sys.argv': [
'/Users/dcramer/.virtualenvs/sentry/bin/raven', 'test',
'https://ebc35f33e151401f9deac549978bda11:[email protected]/1'
],
'user':
'dcramer'
},
'modules': {
'raven': '3.1.13'
},
'request': {
'cookies': {},
'data': {},
'env': {},
'headers': {},
'method': 'GET',
'query_string': '',
'url': 'http://example.com',
},
'stacktrace': {
'frames': [
{
'abs_path':
'www/src/sentry/models/foo.py',
'context_line':
' string_max_length=self.string_max_length)',
'filename':
'sentry/models/foo.py',
'function':
'build_msg',
'in_app':
True,
'lineno':
29,
'module':
'raven.base',
'post_context': [
' },', ' })', '',
" if 'stacktrace' in data:",
' if self.include_paths:'
],
'pre_context': [
'', ' data.update({',
" 'stacktrace': {",
" 'frames': get_stack_info(frames,",
' list_max_length=self.list_max_length,'
],
'vars': {
'culprit': 'raven.scripts.runner',
'date': 'datetime.datetime(2013, 2, 14, 20, 6, 33, 479471)',
'event_id': '598fb19363e745ec8be665e6ba88b1b2',
'event_type': 'raven.events.Message',
'frames': '<generator object iter_stack_frames at 0x103fef050>',
'handler': '<raven.events.Message object at 0x103feb710>',
'k': 'logentry',
'public_key': None,
'result': {
'logentry':
"{'message': 'This is a test message generated using ``raven test``', 'params': []}"
},
'self': '<raven.base.Client object at 0x104397f10>',
'stack': True,
'tags': None,
'time_spent': None,
},
},
{
'abs_path':
'/Users/dcramer/.virtualenvs/sentry/lib/python2.7/site-packages/raven/base.py',
'context_line':
' string_max_length=self.string_max_length)',
'filename':
'raven/base.py',
'function':
'build_msg',
'in_app':
False,
'lineno':
290,
'module':
'raven.base',
'post_context': [
' },', ' })', '',
" if 'stacktrace' in data:",
' if self.include_paths:'
],
'pre_context': [
'', ' data.update({',
" 'stacktrace': {",
" 'frames': get_stack_info(frames,",
' list_max_length=self.list_max_length,'
],
'vars': {
'culprit': 'raven.scripts.runner',
'date': 'datetime.datetime(2013, 2, 14, 20, 6, 33, 479471)',
'event_id': '598fb19363e745ec8be665e6ba88b1b2',
'event_type': 'raven.events.Message',
'frames': '<generator object iter_stack_frames at 0x103fef050>',
'handler': '<raven.events.Message object at 0x103feb710>',
'k': 'logentry',
'public_key': None,
'result': {
'logentry':
"{'message': 'This is a test message generated using ``raven test``', 'params': []}"
},
'self': '<raven.base.Client object at 0x104397f10>',
'stack': True,
'tags': None,
'time_spent': None,
},
},
],
},
'tags': [],
'platform': 'python',
}
@pytest.mark.django_db
@pytest.fixture
def factories():
# XXX(dcramer): hack to prevent recursive imports
from sentry.testutils.factories import Factories
return Factories
@pytest.fixture(scope='function')
def session():
return factories.create_session()
@pytest.mark.django_db
@pytest.fixture(scope='function')
def default_user(factories):
return factories.create_user(email='admin@localhost', is_superuser=True)
@pytest.mark.django_db
@pytest.fixture(scope='function')
def default_organization(factories, default_user):
# XXX(dcramer): ensure that your org slug doesnt match your team slug
# and the same for your project slug
return factories.create_organization(
name='baz',
slug='baz',
owner=default_user,
)
@pytest.mark.django_db
@pytest.fixture(scope='function')
def default_team(factories, default_organization):
from sentry.models import OrganizationMember, OrganizationMemberTeam
team = factories.create_team(
organization=default_organization,
name='foo',
slug='foo',
)
# XXX: handle legacy team fixture
queryset = OrganizationMember.objects.filter(
organization=default_organization,
)
for om in queryset:
OrganizationMemberTeam.objects.create(
team=team,
organizationmember=om,
is_active=True,
)
return team
@pytest.mark.django_db
@pytest.fixture(scope='function')
def default_project(factories, default_team):
return factories.create_project(
name='Bar',
slug='bar',
teams=[default_team],
)
@pytest.mark.django_db
@pytest.fixture(scope='function')
def default_projectkey(factories, default_project):
return factories.create_project_key(project=default_project)
@pytest.mark.django_db
@pytest.fixture(scope='function')
def default_environment(factories, default_project):
return factories.create_environment(
name='development',
project=default_project,
)
@pytest.mark.django_db
@pytest.fixture(scope='function')
def default_group(factories, default_project):
return factories.create_group(
project=default_project,
message=u'\u3053\u3093\u306b\u3061\u306f',
)
@pytest.mark.django_db
@pytest.fixture(scope='function')
def default_event(factories, default_group):
return factories.create_event(
group=default_group,
event_id='a' * 32,
message=u'\u3053\u3093\u306b\u3061\u306f',
)
@pytest.mark.django_db
@pytest.fixture(scope='function')
def default_activity(default_group, default_project, default_user):
from sentry.models import Activity
return Activity.objects.create(
group=default_group, project=default_project, type=Activity.NOTE, user=default_user, data={}
)
_snapshot_writeback = os.environ.get("SENTRY_SNAPSHOTS_WRITEBACK") or '0'
if _snapshot_writeback in ('true', '1', 'overwrite'):
_snapshot_writeback = 'overwrite'
elif _snapshot_writeback != 'new':
_snapshot_writeback = None
_test_base = os.path.realpath(os.path.dirname(
os.path.dirname(os.path.dirname(os.path.abspath(sentry.__file__)))))
_yaml_snap_re = re.compile(r'^---\r?\n(.*?)\r?\n---\r?\n(.*)$(?s)')
@pytest.fixture
def log():
def inner(x):
return sys.stdout.write(x + '\n')
return inner
@pytest.fixture
def insta_snapshot(request, log):
def inner(output, reference_file=None, subname=None):
if reference_file is None:
name = request.node.name
for c in ('::', '-', '[', ']'):
name = name.replace(c, '/')
name = name.strip('/')
reference_file = os.path.join(
os.path.dirname(six.text_type(request.node.fspath)),
'snapshots',
os.path.splitext(os.path.basename(request.node.parent.name))[0],
name + '.pysnap'
)
elif subname is not None:
raise ValueError(
"subname only works if you don't provide your own entire reference_file")
if not isinstance(output, six.string_types):
output = yaml.safe_dump(output, indent=2, default_flow_style=False)
try:
with open(reference_file) as f:
match = _yaml_snap_re.match(f.read().decode('utf-8'))
if match is None:
raise IOError()
_header, refval = match.groups()
except IOError:
refval = ''
refval = refval.rstrip()
output = output.rstrip()
if _snapshot_writeback is not None and refval != output:
if not os.path.isdir(os.path.dirname(reference_file)):
os.makedirs(os.path.dirname(reference_file))
source = os.path.realpath(six.text_type(request.node.fspath))
if source.startswith(_test_base + os.path.sep):
source = source[len(_test_base) + 1:]
if _snapshot_writeback == 'new':
reference_file += '.new'
with open(reference_file, "w") as f:
f.write('---\n%s\n---\n%s\n' % (yaml.safe_dump({
'created': datetime.utcnow().isoformat() + 'Z',
'creator': 'sentry',
'source': source,
}, indent=2, default_flow_style=False).rstrip(), output))
else:
log("Run with SENTRY_SNAPSHOTS_WRITEBACK=1 to update snapshots.")
assert refval == output
yield inner
|
try:
from pydicom.filewriter import *
except ImportError:
from dicom.filewriter import *
|
import numpy
import six.moves
import cellprofiler_core.image
import cellprofiler_core.measurement
import cellprofiler_core.measurement
import cellprofiler_core.modules
from cellprofiler_core.constants.measurement import FF_COUNT, COLTYPE_INTEGER, M_LOCATION_CENTER_X, COLTYPE_FLOAT, \
M_LOCATION_CENTER_Y, M_NUMBER_OBJECT_NUMBER, FF_CHILDREN_COUNT, FF_PARENT, R_FIRST_IMAGE_NUMBER, \
R_SECOND_IMAGE_NUMBER, R_FIRST_OBJECT_NUMBER, R_SECOND_OBJECT_NUMBER
import cellprofiler.modules.identifytertiaryobjects
import cellprofiler_core.object
import cellprofiler_core.pipeline
import cellprofiler_core.workspace
import tests.modules
PRIMARY = "primary"
SECONDARY = "secondary"
TERTIARY = "tertiary"
OUTLINES = "Outlines"
def on_pipeline_event(caller, event):
assert not isinstance(event, cellprofiler_core.pipeline.event.LoadException)
def make_workspace(primary_labels, secondary_labels):
"""Make a workspace that has objects for the input labels
returns a workspace with the following
object_set - has object with name "primary" containing
the primary labels
has object with name "secondary" containing
the secondary labels
"""
isl = cellprofiler_core.image.ImageSetList()
module = cellprofiler.modules.identifytertiaryobjects.IdentifyTertiarySubregion()
module.set_module_num(1)
module.primary_objects_name.value = PRIMARY
module.secondary_objects_name.value = SECONDARY
module.subregion_objects_name.value = TERTIARY
workspace = cellprofiler_core.workspace.Workspace(
cellprofiler_core.pipeline.Pipeline(),
module,
isl.get_image_set(0),
cellprofiler_core.object.ObjectSet(),
cellprofiler_core.measurement.Measurements(),
isl,
)
workspace.pipeline.add_module(module)
for labels, name in ((primary_labels, PRIMARY), (secondary_labels, SECONDARY)):
objects = cellprofiler_core.object.Objects()
objects.segmented = labels
workspace.object_set.add_objects(objects, name)
return workspace
def test_zeros():
"""Test IdentifyTertiarySubregion on an empty image"""
primary_labels = numpy.zeros((10, 10), int)
secondary_labels = numpy.zeros((10, 10), int)
workspace = make_workspace(primary_labels, secondary_labels)
module = workspace.module
module.run(workspace)
measurements = workspace.measurements
assert "Image" in measurements.get_object_names()
count_feature = "Count_%s" % TERTIARY
assert count_feature in measurements.get_feature_names("Image")
value = measurements.get_current_measurement("Image", count_feature)
assert numpy.product(value.shape) == 1
assert value == 0
assert TERTIARY in workspace.object_set.get_object_names()
output_objects = workspace.object_set.get_objects(TERTIARY)
assert numpy.all(output_objects.segmented == primary_labels)
columns = module.get_measurement_columns(workspace.pipeline)
for object_name in (
"Image",
PRIMARY,
SECONDARY,
TERTIARY,
):
ocolumns = [x for x in columns if x[0] == object_name]
features = measurements.get_feature_names(object_name)
assert len(ocolumns) == len(features)
assert all([column[1] in features for column in ocolumns])
def test_one_object():
"""Test creation of a single tertiary object"""
primary_labels = numpy.zeros((10, 10), int)
secondary_labels = numpy.zeros((10, 10), int)
primary_labels[3:6, 4:7] = 1
secondary_labels[2:7, 3:8] = 1
expected_labels = numpy.zeros((10, 10), int)
expected_labels[2:7, 3:8] = 1
expected_labels[4, 5] = 0
workspace = make_workspace(primary_labels, secondary_labels)
module = workspace.module
module.run(workspace)
measurements = workspace.measurements
assert "Image" in measurements.get_object_names()
count_feature = "Count_%s" % TERTIARY
assert count_feature in measurements.get_feature_names("Image")
value = measurements.get_current_measurement("Image", count_feature)
assert numpy.product(value.shape) == 1
assert value == 1
assert TERTIARY in measurements.get_object_names()
child_count_feature = "Children_%s_Count" % TERTIARY
for parent_name in (PRIMARY, SECONDARY):
parents_of_feature = "Parent_%s" % parent_name
assert parents_of_feature in measurements.get_feature_names(TERTIARY)
value = measurements.get_current_measurement(TERTIARY, parents_of_feature)
assert numpy.product(value.shape), 1
assert value[0], 1
assert child_count_feature in measurements.get_feature_names(parent_name)
value = measurements.get_current_measurement(parent_name, child_count_feature)
assert numpy.product(value.shape), 1
assert value[0], 1
for axis, expected in (("X", 5), ("Y", 4)):
feature = "Location_Center_%s" % axis
assert feature in measurements.get_feature_names(TERTIARY)
value = measurements.get_current_measurement(TERTIARY, feature)
assert numpy.product(value.shape), 1
assert value[0] == expected
assert TERTIARY in workspace.object_set.get_object_names()
output_objects = workspace.object_set.get_objects(TERTIARY)
assert numpy.all(output_objects.segmented == expected_labels)
def test_two_objects():
"""Test creation of two tertiary objects"""
primary_labels = numpy.zeros((10, 20), int)
secondary_labels = numpy.zeros((10, 20), int)
expected_primary_parents = numpy.zeros((10, 20), int)
expected_secondary_parents = numpy.zeros((10, 20), int)
centers = ((4, 5, 1, 2), (4, 15, 2, 1))
for x, y, primary_label, secondary_label in centers:
primary_labels[x - 1 : x + 2, y - 1 : y + 2] = primary_label
secondary_labels[x - 2 : x + 3, y - 2 : y + 3] = secondary_label
expected_primary_parents[x - 2 : x + 3, y - 2 : y + 3] = primary_label
expected_primary_parents[x, y] = 0
expected_secondary_parents[x - 2 : x + 3, y - 2 : y + 3] = secondary_label
expected_secondary_parents[x, y] = 0
workspace = make_workspace(primary_labels, secondary_labels)
module = workspace.module
module.run(workspace)
measurements = workspace.measurements
count_feature = "Count_%s" % TERTIARY
value = measurements.get_current_measurement("Image", count_feature)
assert value == 2
child_count_feature = "Children_%s_Count" % TERTIARY
output_labels = workspace.object_set.get_objects(TERTIARY).segmented
for parent_name, idx, parent_labels in (
(PRIMARY, 2, expected_primary_parents),
(SECONDARY, 3, expected_secondary_parents),
):
parents_of_feature = "Parent_%s" % parent_name
cvalue = measurements.get_current_measurement(parent_name, child_count_feature)
assert numpy.all(cvalue == 1)
pvalue = measurements.get_current_measurement(TERTIARY, parents_of_feature)
for value in (pvalue, cvalue):
assert numpy.product(value.shape), 2
#
# Make an array that maps the parent label index to the
# corresponding child label index
#
label_map = numpy.zeros((len(centers) + 1,), int)
for center in centers:
label = center[idx]
label_map[label] = pvalue[center[idx] - 1]
expected_labels = label_map[parent_labels]
assert numpy.all(expected_labels == output_labels)
def test_overlapping_secondary():
"""Make sure that an overlapping tertiary is assigned to the larger parent"""
expected_primary_parents = numpy.zeros((10, 20), int)
expected_secondary_parents = numpy.zeros((10, 20), int)
primary_labels = numpy.zeros((10, 20), int)
secondary_labels = numpy.zeros((10, 20), int)
primary_labels[3:6, 3:10] = 2
primary_labels[3:6, 10:17] = 1
secondary_labels[2:7, 2:12] = 1
expected_primary_parents[2:7, 2:12] = 2
expected_primary_parents[4, 4:12] = 0 # the middle of the primary
expected_primary_parents[4, 9] = 2 # the outline of primary # 2
expected_primary_parents[4, 10] = 2 # the outline of primary # 1
expected_secondary_parents[expected_primary_parents > 0] = 1
workspace = make_workspace(primary_labels, secondary_labels)
module = workspace.module
assert isinstance(
module, cellprofiler.modules.identifytertiaryobjects.IdentifyTertiarySubregion
)
module.run(workspace)
measurements = workspace.measurements
output_labels = workspace.object_set.get_objects(TERTIARY).segmented
for parent_name, parent_labels in (
(PRIMARY, expected_primary_parents),
(SECONDARY, expected_secondary_parents),
):
parents_of_feature = "Parent_%s" % parent_name
pvalue = measurements.get_current_measurement(TERTIARY, parents_of_feature)
label_map = numpy.zeros((numpy.product(pvalue.shape) + 1,), int)
label_map[1:] = pvalue.flatten()
mapped_labels = label_map[output_labels]
assert numpy.all(parent_labels == mapped_labels)
def test_wrong_size():
"""Regression test of img-961, what if objects have different sizes?
Slightly bizarre use case: maybe if user wants to measure background
outside of cells in a plate of wells???
"""
expected_primary_parents = numpy.zeros((20, 20), int)
expected_secondary_parents = numpy.zeros((20, 20), int)
primary_labels = numpy.zeros((10, 30), int)
secondary_labels = numpy.zeros((20, 20), int)
primary_labels[3:6, 3:10] = 2
primary_labels[3:6, 10:17] = 1
secondary_labels[2:7, 2:12] = 1
expected_primary_parents[2:7, 2:12] = 2
expected_primary_parents[4, 4:12] = 0 # the middle of the primary
expected_primary_parents[4, 9] = 2 # the outline of primary # 2
expected_primary_parents[4, 10] = 2 # the outline of primary # 1
expected_secondary_parents[expected_primary_parents > 0] = 1
workspace = make_workspace(primary_labels, secondary_labels)
module = workspace.module
assert isinstance(
module, cellprofiler.modules.identifytertiaryobjects.IdentifyTertiarySubregion
)
module.run(workspace)
def test_get_measurement_columns():
"""Test the get_measurement_columns method"""
module = cellprofiler.modules.identifytertiaryobjects.IdentifyTertiarySubregion()
module.primary_objects_name.value = PRIMARY
module.secondary_objects_name.value = SECONDARY
module.subregion_objects_name.value = TERTIARY
columns = module.get_measurement_columns(None)
expected = (
(
"Image",
FF_COUNT % TERTIARY,
COLTYPE_INTEGER,
),
(
TERTIARY,
M_LOCATION_CENTER_X,
COLTYPE_FLOAT,
),
(
TERTIARY,
M_LOCATION_CENTER_Y,
COLTYPE_FLOAT,
),
(
TERTIARY,
M_NUMBER_OBJECT_NUMBER,
COLTYPE_INTEGER,
),
(
PRIMARY,
FF_CHILDREN_COUNT % TERTIARY,
COLTYPE_INTEGER,
),
(
SECONDARY,
FF_CHILDREN_COUNT % TERTIARY,
COLTYPE_INTEGER,
),
(
TERTIARY,
FF_PARENT % PRIMARY,
COLTYPE_INTEGER,
),
(
TERTIARY,
FF_PARENT % SECONDARY,
COLTYPE_INTEGER,
),
)
assert len(columns) == len(expected)
for column in columns:
assert any([all([cv == ev for cv, ev in zip(column, ec)]) for ec in expected])
def test_do_not_shrink():
"""Test the option to not shrink the smaller objects"""
primary_labels = numpy.zeros((10, 10), int)
secondary_labels = numpy.zeros((10, 10), int)
primary_labels[3:6, 4:7] = 1
secondary_labels[2:7, 3:8] = 1
expected_labels = numpy.zeros((10, 10), int)
expected_labels[2:7, 3:8] = 1
expected_labels[3:6, 4:7] = 0
workspace = make_workspace(primary_labels, secondary_labels)
module = workspace.module
module.shrink_primary.value = False
module.run(workspace)
measurements = workspace.measurements
output_objects = workspace.object_set.get_objects(TERTIARY)
assert numpy.all(output_objects.segmented == expected_labels)
def test_do_not_shrink_identical():
"""Test a case where the primary and secondary objects are identical"""
primary_labels = numpy.zeros((20, 20), int)
secondary_labels = numpy.zeros((20, 20), int)
expected_labels = numpy.zeros((20, 20), int)
# first and third objects have different sizes
primary_labels[3:6, 4:7] = 1
secondary_labels[2:7, 3:8] = 1
expected_labels[2:7, 3:8] = 1
expected_labels[3:6, 4:7] = 0
primary_labels[13:16, 4:7] = 3
secondary_labels[12:17, 3:8] = 3
expected_labels[12:17, 3:8] = 3
expected_labels[13:16, 4:7] = 0
# second object and fourth have same size
primary_labels[3:6, 14:17] = 2
secondary_labels[3:6, 14:17] = 2
primary_labels[13:16, 14:17] = 4
secondary_labels[13:16, 14:17] = 4
workspace = make_workspace(primary_labels, secondary_labels)
module = workspace.module
module.shrink_primary.value = False
module.run(workspace)
output_objects = workspace.object_set.get_objects(TERTIARY)
assert numpy.all(output_objects.segmented == expected_labels)
measurements = workspace.measurements
count_feature = "Count_%s" % TERTIARY
value = measurements.get_current_measurement("Image", count_feature)
assert value == 3
child_count_feature = "Children_%s_Count" % TERTIARY
for parent_name in PRIMARY, SECONDARY:
parent_of_feature = "Parent_%s" % parent_name
parent_of = measurements.get_current_measurement(TERTIARY, parent_of_feature)
child_count = measurements.get_current_measurement(
parent_name, child_count_feature
)
for parent, expected_child_count in ((1, 1), (2, 0), (3, 1), (4, 0)):
assert child_count[parent - 1] == expected_child_count
for child in (1, 3):
assert parent_of[child - 1] == child
for location_feature in (
M_LOCATION_CENTER_X,
M_LOCATION_CENTER_Y,
):
values = measurements.get_current_measurement(TERTIARY, location_feature)
assert numpy.all(numpy.isnan(values) == [False, True, False])
def test_do_not_shrink_missing():
# Regression test of 705
for missing in range(1, 3):
for missing_primary in False, True:
primary_labels = numpy.zeros((20, 20), int)
secondary_labels = numpy.zeros((20, 20), int)
expected_labels = numpy.zeros((20, 20), int)
centers = ((5, 5), (15, 5), (5, 15))
pidx = 1
sidx = 1
for idx, (i, j) in enumerate(centers):
if (idx + 1 != missing) or not missing_primary:
primary_labels[(i - 1) : (i + 2), (j - 1) : (j + 2)] = pidx
pidx += 1
if (idx + 1 != missing) or missing_primary:
secondary_labels[(i - 2) : (i + 3), (j - 2) : (j + 3)] = sidx
sidx += 1
expected_labels = secondary_labels * (primary_labels == 0)
workspace = make_workspace(primary_labels, secondary_labels)
module = workspace.module
module.shrink_primary.value = False
module.run(workspace)
output_objects = workspace.object_set.get_objects(TERTIARY)
assert numpy.all(output_objects.segmented == expected_labels)
m = workspace.measurements
child_name = module.subregion_objects_name.value
primary_name = module.primary_objects_name.value
ftr = FF_PARENT % primary_name
pparents = m[child_name, ftr]
assert len(pparents) == (3 if missing_primary else 2)
if missing_primary:
assert pparents[missing - 1] == 0
secondary_name = module.secondary_objects_name.value
ftr = FF_PARENT % secondary_name
pparents = m[child_name, ftr]
assert len(pparents) == (3 if missing_primary else 2)
if not missing_primary:
assert all([x in pparents for x in range(1, 3)])
ftr = FF_CHILDREN_COUNT % child_name
children = m[primary_name, ftr]
assert len(children) == (2 if missing_primary else 3)
if not missing_primary:
assert children[missing - 1] == 0
assert numpy.all(numpy.delete(children, missing - 1) == 1)
else:
assert numpy.all(children == 1)
children = m[secondary_name, ftr]
assert len(children) == (3 if missing_primary else 2)
assert numpy.all(children == 1)
def test_no_relationships():
workspace = make_workspace(numpy.zeros((10, 10), int), numpy.zeros((10, 10), int))
workspace.module.run(workspace)
m = workspace.measurements
for parent, relationship in (
(PRIMARY, cellprofiler.modules.identifytertiaryobjects.R_REMOVED),
(SECONDARY, cellprofiler.modules.identifytertiaryobjects.R_PARENT),
):
result = m.get_relationships(
workspace.module.module_num, relationship, parent, TERTIARY
)
assert len(result) == 0
def test_relationships():
primary = numpy.zeros((10, 30), int)
secondary = numpy.zeros((10, 30), int)
for i in range(3):
center_j = 5 + i * 10
primary[3:6, (center_j - 1) : (center_j + 2)] = i + 1
secondary[2:7, (center_j - 2) : (center_j + 3)] = i + 1
workspace = make_workspace(primary, secondary)
workspace.module.run(workspace)
m = workspace.measurements
for parent, relationship in (
(PRIMARY, cellprofiler.modules.identifytertiaryobjects.R_REMOVED),
(SECONDARY, cellprofiler.modules.identifytertiaryobjects.R_PARENT),
):
result = m.get_relationships(
workspace.module.module_num, relationship, parent, TERTIARY
)
assert len(result) == 3
for i in range(3):
assert result[R_FIRST_IMAGE_NUMBER][i] == 1
assert result[R_SECOND_IMAGE_NUMBER][i] == 1
assert (
result[R_FIRST_OBJECT_NUMBER][i] == i + 1
)
assert (
result[R_SECOND_OBJECT_NUMBER][i] == i + 1
)
def test_load_v3():
file = tests.modules.get_test_resources_directory("identifytertiaryobjects/v3.pipeline")
with open(file, "r") as fd:
data = fd.read()
pipeline = cellprofiler_core.pipeline.Pipeline()
def callback(caller, event):
assert not isinstance(event, cellprofiler_core.pipeline.event.LoadException)
pipeline.add_listener(callback)
pipeline.loadtxt(six.moves.StringIO(data))
module = pipeline.modules()[0]
assert module.secondary_objects_name.value == "IdentifySecondaryObjects"
assert module.primary_objects_name.value == "IdentifyPrimaryObjects"
assert module.subregion_objects_name.value == "IdentifyTertiaryObjects"
assert module.shrink_primary.value
|
import cv2
import numpy as np
import pandas as pd
import imutils
'''
#data load
train_img_path = 'bone_data/train/'
train_csv_path = 'bone_data/training_dataset.csv'
# dataset setting
train_data = pd.read_csv(train_csv_path)
train_data.iloc[:, 1:3] = train_data.iloc[:, 1:3].astype(np.float)
def rotation(img, angle):
h, w = img.shape[:2]
M = cv2.getRotationMatrix2D((int(w/2), int(h/2)), angle, 1)
img = cv2.warpAffine(img, M, (w, h))
return img
for idx, data in train_data.iterrows():
imlist = []
angles = [-15, -10, -5, 0, 5, 10, 15]
id = data['id']
img = cv2.imread(train_img_path+str(int(id))+'.png', cv2.IMREAD_GRAYSCALE)
img=cv2.subtract(img, np.average(img.flatten()))
clahe = cv2.createCLAHE(clipLimit=15)
img = clahe.apply(img)
# img 비율을 맞춰주기 위한 pad 추가
sub = img.shape[0]-img.shape[1]
if sub < 0:
img = cv2.copyMakeBorder(img, int(-sub/2), int(-sub/2), 0, 0, cv2.BORDER_CONSTANT, value=[0,0,0])
else:
img = cv2.copyMakeBorder(img, 0, 0, int(sub/2), int(sub/2), cv2.BORDER_CONSTANT, value=[0,0,0])
for angle in angles:
aug1 = rotation(img, angle)
aug1 = cv2.resize(aug1, (500,500))
aug2 = imutils.translate(aug1, 50, 0)
aug3 = imutils.translate(aug1, -50, 0)
cv2.imwrite('D:/train/'+str(int(id))+'-{}-rotate{}.png'.format('original', angle), aug1)
cv2.imwrite('D:/train/'+str(int(id))+'-{}-rotate{}.png'.format('right50', angle), aug2)
cv2.imwrite('D:/train/'+str(int(id))+'-{}-rotate{}.png'.format('left50', angle), aug3)
'''
img = cv2.imread('bone_data/test/4461.png', cv2.IMREAD_GRAYSCALE)
img=cv2.subtract(img, np.average(img.flatten()))
clahe = cv2.createCLAHE(clipLimit=15)
img = clahe.apply(img)
# img 비율을 맞춰주기 위한 pad 추가
sub = img.shape[0]-img.shape[1]
if sub < 0:
img = cv2.copyMakeBorder(img, int(-sub/2), int(-sub/2), 0, 0, cv2.BORDER_CONSTANT, value=[0,0,0])
else:
img = cv2.copyMakeBorder(img, 0, 0, int(sub/2), int(sub/2), cv2.BORDER_CONSTANT, value=[0,0,0])
img = cv2.resize(img, (500,500))
cv2.imshow('img',img)
cv2.waitKey(0)
''''''
'''
~57: 779장
~114: 3500장
~171: 7129장
~228: 1203장
7 = rotate(-15 -10 -5 0 5 10 15)
5 = rotate(-10 -5 0 5 10)
3 = translate 10% 왼 오 0
779 * 7 * 3
female: 5778장
male: 6833장
''' |
import numpy as np
import matplotlib as mpl
from matplotlib import pyplot as plt
from matplotlib_venn import venn3, venn3_circles
# 35 trucks went out carrying early peaches;
# 69 carried late peaches;
# 54 carried extra late peaches;
# 26 carried early and late;
# 31 carried late and extra late;
# 7 carried early and extra late;
# 2 carried all three;
# 8 carried only figs (no peaches at all).
all = 2
late_early = 26 - all
late_extra = 31 - all
early_extra = 7 - all
early = 35 - ( late_early + early_extra + all )
late = 69 - ( late_extra + late_early + all )
extra = 54 - ( late_extra + early_extra + all )
v = venn3(subsets=( early, late, late_early, extra, early_extra, late_extra, all ), set_labels = ('E', 'L', 'X'))
v.get_label_by_id('A').set_text('Early')
v.get_label_by_id('B').set_text('Late')
v.get_label_by_id('C').set_text('Extra Late')
plt.title( "Peaches" )
plt.show()
all + late_early + late_extra + early_extra + early + late + extra + 8 |
from rich.table import Table
from rich.console import Console
console = Console()
table = Table(title='Filmes favoritos')
table.add_column("none", justify='left', style='red')
table.add_column('data de lançamento', style='green')
table.add_column('faturamento', style='purple')
table.add_row('Piratas do caribe', '2005', '1599999')
table.add_row('star wars', '2009', '345464')
table.add_row('avatar', '2009', '56555555')
table.add_row('vingadores', '2020', '20000')
console.print(table)
|
import uvicorn
if __name__ == '__main__':
uvicorn.run('app:app', host="127.0.0.1", port=8080, reload=True) |
# -*- test-case-name: foolscap.test -*-
"""foolscap tests"""
|
# -*- coding: utf-8 -*-
from django.db import models
from django.test import TestCase
from cms.api import add_plugin, create_page
from cms.models import CMSPlugin
from cms.models.placeholdermodel import Placeholder
from cms.plugin_base import CMSPluginBase
from cms.plugin_pool import plugin_pool
from ..helpers import get_request
from ..indexers.cms_title import TitleIndexer
from ..models import Index
class ValidSearchFieldsModel(CMSPlugin):
body = models.TextField()
additional_text = models.TextField()
search_fields = ('body', 'additional_text',
'cmsplugin_ptr__placeholder__slot')
class Meta:
app_label = 'tests'
class InvalidSearchFieldsModel(CMSPlugin):
body = models.TextField()
additional_text = models.TextField()
search_fields = ('body', 'additional_text_foo',
'cmsplugin_ptr__placeholder__slot')
class Meta:
app_label = 'tests'
class ValidSearchFieldsPlugin(CMSPluginBase):
model = ValidSearchFieldsModel
plugin_pool.register_plugin(ValidSearchFieldsPlugin)
class InvalidSearchFieldsPlugin(CMSPluginBase):
model = InvalidSearchFieldsModel
plugin_pool.register_plugin(InvalidSearchFieldsPlugin)
class SearchFieldsTestCase(TestCase):
def setUp(self):
self.index = TitleIndexer()
self.request = get_request(language='en')
def test_valid_search_fields_in_search_text_shows_as_expected(self):
page = create_page(title='page', template='test.html', language='en')
placeholder = page.placeholders.get(slot='body')
add_plugin(
placeholder, 'ValidSearchFieldsPlugin', 'en', body='Lorem ipsum',
additional_text='additional text')
page.publish('en')
indexed = Index.objects.all()[0]
self.assertEqual(
'Lorem ipsum additional text body', indexed.search_text)
def test_invalid_search_fields_in_search_text_shows_as_expected(self):
page = create_page(title='page', template='test.html', language='en')
placeholder = page.placeholders.get(slot='body')
add_plugin(
placeholder, 'InvalidSearchFieldsPlugin', 'en', body='Lorem ipsum',
additional_text='additional text')
page.publish('en')
indexed = Index.objects.all()[0]
self.assertEqual(
'Lorem ipsum body', indexed.search_text)
|
import csv
import os
from datetime import datetime
from flask.views import MethodView
from flask import make_response, current_app, abort, jsonify, json
import eligibility_eval
import requests
def error(code, message):
current_app.logger.error("code %i %s" % (code, message), stack_info=True)
return abort(make_response(jsonify(message=message), code))
def split_judgements_string(judgements):
return judgements.split(', ')
# Made this a function for ease of testing, since patching strptime is a huge hassle for some reason
def string_to_date(string, format):
return datetime.strptime(string, format).date()
# This class serves to demonstrate the website without concern about displaying real eviction cases
# From an outside perspective, it functions the same as the search endpoint, but the data is made up
class DemoSearch(MethodView):
def post(self):
search_results = []
path = os.path.abspath('data/demo_search_data.csv')
# print(path)
with open(path, newline='') as demoFile:
demoData = csv.reader(demoFile, delimiter=',')
for fakeCase in demoData:
eligibility = eligibility_eval.is_eligible(fakeCase[4],
string_to_date(fakeCase[6], '%m/%d/%Y'),
split_judgements_string(fakeCase[7]))
key = fakeCase[0]
value = {'style': fakeCase[1], 'location': fakeCase[2], 'violation_type': fakeCase[3],
'status': fakeCase[4], 'complaint_date': fakeCase[5], 'closed_date': fakeCase[6], 'judgements': split_judgements_string(fakeCase[7]),
'eligibility': [True if fakeCase[8] == "True" else False, fakeCase[9]], 'case_id' : fakeCase[10], 'balance': fakeCase[11]}
search_results.append({key: value})
# To view all search results:
# for key, value in search_results.items():
# print(key, " : ", value)
return json.dumps(search_results)
def register(app):
app.add_url_rule("/demo", view_func=DemoSearch.as_view("demo"))
|
# Django Core Modules
from django.db import models
from django.conf import settings
# Apps specific
class CoreConfig(models.Model):
user = models.ForeignKey(settings.AUTH_USER_MODEL, default=1, on_delete=models.CASCADE)
hostname = models.CharField(max_length = 128, default = 'hostname', blank=False)
fqdn = models.CharField(max_length = 256, default = 'hostname.company.com', blank = True)
ipv4_address = models.CharField(max_length = 128, default = '1.2.3.4', blank=False)
#isssys_agent = models.ForeignKey(IssSys, default=None, blank=True, null=True, on_delete=models.CASCADE)
def __str__(self):
return(self.fqdn)
|
import subprocess,os,glob
import numpy as np
import netCDF4
from bunch import Bunch
import gc,sys
g=9.8
atmvarlist=["T","Q","U","V","Z3"]
icar_atm_var=["t","qv","u","v","z"]
# from mygis, modified to work with netCDF4
def read_nc(filename,var="data",proj=None,returnNCvar=False):
'''read a netCDF file and return the specified variable
output is a structure :
data:raw data as an array
proj:string representation of the projection information
atts:data attribute dictionary (if any)
if (returnNCvar==True) then the netCDF4 file is note closed and the netCDF4
representation of the variable is returned instead of being read into
memory immediately.
'''
d=netCDF4.Dataset(filename, mode='r',format="nc")
outputdata=None
if var != None:
data=d.variables[var]
attributes=d.variables[var].__dict__
if returnNCvar:
outputdata=data
else:
outputdata=data[:]
# ntimes=365*4
# if len(data.shape)>2:
# outputdata=data[:ntimes,...]
# else:
# outputdata=data[:]
outputproj=None
if proj!=None:
projection=d.variables[proj]
outputproj=str(projection)
if returnNCvar:
return Bunch(data=outputdata,proj=outputproj,ncfile=d,atts=attributes)
d.close()
return Bunch(data=outputdata,proj=outputproj,atts=attributes)
def find_atm_file(time,varname,info):
file_base= info.atmdir+info.atmfile
file_base= file_base.replace("_VAR_",varname)
file_base= file_base.replace("_Y_",str(info.start_year))
file_base= file_base.replace("_EXP_",info.experiment)
atm_file = file_base.replace("_ENS_",info.ensemble)
print(atm_file)
return glob.glob(atm_file)[0]
def load_atm(time,info,starttime,endtime):
"""Load atmospheric variable from a netcdf file"""
outputdata=Bunch()
for s,v in zip(icar_atm_var,atmvarlist):
atmfile=find_atm_file(time,v,info)
print(atmfile)
sys.stdout.flush()
nc_data=read_nc(atmfile,v,returnNCvar=True)
outputdata[s]=nc_data.data[starttime:endtime,:,info.ymin:info.ymax,info.xmin:info.xmax]
nc_data.ncfile.close()
atmfile=find_atm_file(time,"PS",info)
nc_data=read_nc(atmfile,"PS",returnNCvar=True)
outputdata.ps=nc_data.data[starttime:endtime,info.ymin:info.ymax,info.xmin:info.xmax]
nc_data.ncfile.close()
del nc_data
print(gc.collect())
sys.stdout.flush()
a=read_nc(atmfile,"hyam").data
b=read_nc(atmfile,"hybm").data
p0=read_nc(atmfile,"P0").data
#p_(i,j,k)= A_k * P_0 + B_k P_s(i,j) from http://www.cesm.ucar.edu/models/atm-cam/docs/usersguide/node25.html
outputdata.p = a[np.newaxis,:,np.newaxis,np.newaxis]*p0+b[np.newaxis,:,np.newaxis,np.newaxis]*outputdata.ps[:,np.newaxis,:,:]
outputdata.ntimes=outputdata.p.shape[0]
return outputdata
def load_sfc(time, info,starttime,endtime):
"""docstring for load_sfc"""
outputdata=Bunch()
basefile="/glade/p/cesmdata/cseg/inputdata/atm/cam/topo/USGS-gtopo30_0.9x1.25_remap_c051027.nc"
lat = read_nc(basefile,"lat").data
lon = read_nc(basefile,"lon").data#-360
xmin=np.where(lon>=info.lon[0])[0][0]
xmax=np.where(lon<=info.lon[1])[0][-1]+1
ymin=np.where(lat>=info.lat[0])[0][0]
ymax=np.where(lat<=info.lat[1])[0][-1]+1
print("xmin,xmax,ymin,ymax")
print(xmin,xmax,ymin,ymax)
outputdata.hgt=read_nc(basefile,"PHIS").data[ymin:ymax,xmin:xmax]/g
outputdata.land=np.zeros(outputdata.hgt.shape)
landfrac=read_nc(basefile,"LANDFRAC").data[ymin:ymax,xmin:xmax]
outputdata.land[landfrac>=0.5]=1
tsfile=find_atm_file(time, "TS", info)
outputdata.ts=read_nc(tsfile,"TS").data[starttime:endtime,info.ymin:info.ymax,info.xmin:info.xmax]
swfile=find_atm_file(time, "FSDS", info)
tmp=read_nc(swfile,"FSDS",returnNCvar=True)
# print(tmp.data.shape)
tmp.ncfile.close()
tmp=read_nc(swfile,"FSDS").data
# print(tmp.shape, starttime, endtime)
outputdata.sw=tmp[starttime:endtime,info.ymin:info.ymax,info.xmin:info.xmax]
# print(swfile, starttime, endtime, info.xmin,info.xmax, info.ymin, info.ymax)
# print(outputdata.sw.shape)
# print(outputdata.sw[0].max(),outputdata.sw[0].min())
# print(outputdata.sw.max(),outputdata.sw.min())
lwfile=find_atm_file(time, "FLDS", info)
outputdata.lw=read_nc(lwfile,"FLDS").data[starttime:endtime,info.ymin:info.ymax,info.xmin:info.xmax]
return outputdata
def load_data(time,info,starttime,endtime):
"""docstring for load_data"""
print(time,starttime,endtime)
atm=load_atm(time,info,starttime,endtime)
sfc=load_sfc(time,info,starttime,endtime)
return Bunch(sfc=sfc,atm=atm)
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Illustrate how to get summary statistics for x1 and x2 where x1, x2 contains
designed noise, when training data is a pandas DataFrame in memory.
"""
import numpy as np
import pandas as pd
from svinfer.summary_statistics import SummaryStatistics
from svinfer.processor import DataFrameProcessor
def simulate_training_data(x_s2):
# generate independent variables
# where the random noise is added to the
n = 10000
np.random.seed(0)
z1 = np.random.poisson(lam=7, size=n)
z2 = np.random.poisson(lam=9, size=n) + 2 * z1
# generate y based on z1, z2
# add noise ~ N(0, 2^2) to independent variable z1
# add noise ~ N(0, 1^2) to independent variable z2
# generate training data
data = pd.DataFrame(
{
"y": 10 + 12 * z1 - 3 * z2 + 2 * np.random.standard_normal(size=n),
"x1": z1 + np.random.standard_normal(size=n) * np.sqrt(x_s2[0]),
"x2": z2 + np.random.standard_normal(size=n) * np.sqrt(x_s2[1]),
}
)
return data
if __name__ == "__main__":
# get training data
# assume the variance of the added noise are 4 and 1 for each predictor
x_s2 = [4, 1]
data = simulate_training_data(x_s2)
# get summary statistics for x1 and x2, where x1 and x2 have added noise
df_data = DataFrameProcessor(data)
result = SummaryStatistics(
["x1", "x2"], # column names for features of interest
x_s2, # variances of the added noises to each feature
).estimate_summary_statistics(df_data)
# check result
print("summary Statistics for x1 and x2 are: \n{}".format(result.summary_statistics))
# expect results to be
# summary Statistics for x1 and x2 are:
# average standard_deviation skewness kurtosis
# x1 7.005687 2.611832 0.481705 3.449543
# x2 23.042510 6.071953 0.303291 3.174952
|
from django.urls import path
from . import views
urlpatterns = [
path("admin/", views.admin, name="admin"),
path("admin/Create_User", views.user, name="admin_create_user"),
path("admin/Create_Course", views.course_add, name="admin_create_course"),
path("admin/Create_Section", views.section_add, name="admin_create_section"),
path("home", views.index, name="index"),
path("login/", views.login_view, name="login"),
path("logout/", views.logout_view, name="logout"),
path("register/", views.register, name="register"),
path("courses/", views.courses, name="courses"),
path("sections/<int:course_id>", views.course, name="sections"),
path("courses/add", views.course_add, name="course_add"),
path("section/<str:section_id>", views.section, name="section"),
path("section/add", views.section_add, name="section_add"),
path("lecture/add", views.lecture_add, name="create_lecture"),
path("attendance/", views.attendance, name="attendance"),
path("lecturenote/add", views.lecturenote_add, name="create_lecturenote"),
path("comment/add", views.comment_add, name="create_comment"),
path("assignment/add", views.assignment_add, name="create_assignment"),
path("assignment/view", views.assignment_view, name="view_assignment"),
path("submission/add", views.submission_add, name="create_submission"),
path("submission/view", views.submission_view, name="view_submission"),
path("mark/add", views.mark_add, name="create_mark"),
path("mark/view", views.mark_view, name="view_mark"),
]
|
#!/usr/bin/python3
import requests
from discord.ext import commands
TOKEN = "Your Discord token here"
OWNER_ID = 0 # Your user ID here
RTT_USERNAME = "Realtime Trains API username"
RTT_PASSWORD = "Realtime Trains API password"
## BOT SETUP
bot = commands.Bot(command_prefix = ">")
# Comment to respond to messages from anyone
@bot.check
def isOwner(ctx):
async def predicate(ctx):
return ctx.author.id == OWNER_ID
return commands.check(predicate)
## UTILITY FUNCTIONS
def rttDepartures(station):
rttData = requests.get("https://api.rtt.io/api/v1/json/search/"+station,
auth = (RTT_USERNAME, RTT_PASSWORD))
rttJson = rttData.json()
rttColour = 0xFF0000
try:
rttTitle = ("Departures from **%s**, powered by **Realtime Trains**"
% rttJson["location"]["name"])
rttDescription = (" ID | Time | Live | Plat | Destination\n" +
"-" * 41 + "\n")
if rttJson["services"] == None:
rttDescription = "No services at the moment."
else:
for service in rttJson["services"]:
if len(rttDescription)<1800:
try:
trainID = service["runningIdentity"]
except KeyError:
trainID = service["trainIdentity"]
depTime = service["locationDetail"]["gbttBookedDeparture"]
depTimeFormatted = depTime[:2] + ":" + depTime[2:]
try:
liveTime = service["locationDetail"]["realtimeDeparture"]
liveTimeFormatted = liveTime[:2] + ":" + liveTime[2:]
except KeyError:
liveTimeFormatted = " N/A "
try:
# | 10A* |
# | 12B |
# | 13 |
# | 6 |
MAX_PLAT_CHARS = 4
platform = service["locationDetail"]["platform"]
if service["locationDetail"]["platformChanged"]:
platform += "*"
if len(platform) < MAX_PLAT_CHARS:
platform += " " * (MAX_PLAT_CHARS - len(platform))
except KeyError:
platform = "----"
destination = service["locationDetail"]["destination"][0]["description"]
rttDescription += ("%s | %s | %s | %s | %s\n" %
(trainID, depTimeFormatted, liveTimeFormatted, platform, destination))
except KeyError:
rttTitle = "Please give me a valid NRS (3 letters) or TIPLOC (7 characters) code."
rttDescription = ("It appears that you took a wrong route back at " +
"Croxley Green Jn, as the rails to this station don't appear to exist " +
"anymore. Just in case there was meant to be a station here, we've told " +
"the permanent way team and they'll have a look into it.")
rttMessage = rttTitle + "\n```" + rttDescription + "```"
return rttMessage
## COMMANDS
@bot.command()
async def ping(ctx):
await ctx.send("pong")
@bot.command()
async def logout(ctx):
await ctx.bot.logout()
@bot.command()
async def trains(ctx, station):
await ctx.send(rttDepartures(station))
bot.run(TOKEN)
|
import copy
import numpy as np
import pytest
from sklearn.gaussian_process.kernels import WhiteKernel
from sklearn.model_selection import GridSearchCV
from sklearn.pipeline import Pipeline
from uncoverml.krige import krige_methods, Krige, krig_dict
from uncoverml.optimise.models import kernels
from uncoverml.optimise.models import transformed_modelmaps
from uncoverml.transforms import target as transforms
def _join_dicts(dicts):
if dicts is None:
return
d = {k: v for D in dicts for k, v in D.items()}
return d
modelmaps = copy.deepcopy(_join_dicts([krig_dict, transformed_modelmaps]))
svr = modelmaps.pop('transformedsvr')
krige = modelmaps.pop('krige')
mlkrige = modelmaps.pop('mlkrige')
@pytest.fixture(params=[k for k in modelmaps.keys()])
def get_models(request):
return request.param, modelmaps[request.param]
@pytest.fixture(params=[k for k in transforms.transforms.keys()])
def get_transform(request):
return transforms.transforms[request.param]
@pytest.fixture(params=[k for k in kernels.keys()])
def get_kernel(request):
return kernels[request.param]
@pytest.fixture(params=['linear', 'poly', 'rbf', 'sigmoid'])
def get_svr_kernel(request):
return request.param
def test_pipeline(get_models, get_transform, get_kernel):
alg, model = get_models
trans = get_transform()
kernel = get_kernel() + WhiteKernel()
pipe = Pipeline(steps=[(alg, model())])
param_dict = {}
if hasattr(model(), 'n_estimators'):
param_dict[alg + '__n_estimators'] = [5]
if hasattr(model(), 'kernel'):
param_dict[alg + '__kernel'] = [kernel]
param_dict[alg + '__target_transform'] = [trans]
estimator = GridSearchCV(pipe,
param_dict,
n_jobs=1,
iid=False,
pre_dispatch=2,
verbose=True,
)
np.random.seed(10)
estimator.fit(X=1 + np.random.rand(10, 3), y=1. + np.random.rand(10))
assert estimator.cv_results_['mean_train_score'][0] > -15.0
def test_svr_pipeline(get_transform, get_svr_kernel):
trans = get_transform()
pipe = Pipeline(steps=[('svr', svr())])
param_dict = {'svr__kernel': [get_svr_kernel]}
param_dict['svr__target_transform'] = [trans]
estimator = GridSearchCV(pipe,
param_dict,
n_jobs=1,
iid=False,
pre_dispatch=2,
verbose=True,
)
np.random.seed(1)
estimator.fit(X=1 + np.random.rand(10, 5), y=1. + np.random.rand(10))
assert estimator.cv_results_['mean_train_score'][0] > -10.0
@pytest.fixture(params=list(krige_methods.keys()))
def get_krige_method(request):
return request.param
@pytest.fixture(params=['linear', 'power', 'gaussian', 'spherical',
'exponential'])
def get_variogram_model(request):
return request.param
def test_krige_pipeline(get_krige_method, get_variogram_model):
pipe = Pipeline(steps=[('krige', Krige(method=get_krige_method))])
param_dict = {'krige__variogram_model': [get_variogram_model]}
estimator = GridSearchCV(pipe,
param_dict,
n_jobs=1,
iid=False,
pre_dispatch=2,
verbose=True
)
np.random.seed(1)
X = np.random.randint(0, 400, size=(20, 2)).astype(float)
y = 5*np.random.rand(20)
estimator.fit(X=X, y=y)
assert estimator.cv_results_['mean_train_score'][0] > -1.0
|
#!/usr/bin/python3
# number of output figures = 2
from helper.figure import Figure
import helper.plot
lineNames = ["B-spl. surrogate", "Linear surrogate", "Objective function"]
markerStyles = [".", "^", "v"]
lineStyles = ["-", "--", ":"]
fig = Figure.create(figsize=(5, 2))
ax = fig.gca()
functionNames = ["Bra02", "GoP", "Sch06", "Ack", "Alp02", "Sch22"]
lines = [{
"label" : r"\rlap{{{}}}".format(lineNames[r]),
"marker" : markerStyles[r],
"ms" : (6 if r == 0 else 3),
"ls" : lineStyles[r],
"color" : "k",
} for r in range(len(lineNames))]
lines = [(lines[r//2] if r % 2 == 0 else None)
for r in range(2 * len(lines))]
helper.plot.addCustomLegend(ax, (
[{
"label" : functionNames[r],
"ls" : "-",
"color" : "C{}".format(r),
} for r in range(len(functionNames))] +
lines
), ncol=6, loc="upper center", outside=True)
ax.set_axis_off()
fig.save()
fig = Figure.create(figsize=(5, 2))
ax = fig.gca()
lines = [{
"label" : lineNames[r],
"marker" : markerStyles[r],
"ms" : (6 if r == 0 else 3),
"ls" : lineStyles[r],
"color" : "k",
} for r in range(len(lineNames))
]
helper.plot.addCustomLegend(ax, lines, ncol=3,
loc="upper center", outside=True)
ax.set_axis_off()
fig.save()
|
import alpaca_trade_api as tradeapi
import api_credentials
from alpaca_trade_api.rest import TimeFrame
import pandas as pd
import time
unique_minutes = []
def alpaca_trader(ticker, polarity):
'''
Approximate price of the stock, then calculates a value equivalent to 1% of the current portfolio value divided
by the current price of the stock, which is then multiplied by the polarity score to determine the quantity of
shares to buy. If the buying power is exceeded by this amount, then the quantity is decremented by one share
until the expense is affordable for the account to make the purchase or the quantity of shares to buy is zero.
Before selling shares of a stock, the bot needs to determine if it even owns any of that stock to avoid throwing
an error by trying to sell something it does not own. If it does own that stock, it then decides to sell the
quantity of shares in the same way that it determines the number of shares to buy, using a combination of polarity
score, portfolio value, and current approximate price per share. If that quantity is greater than the number of
shares currently owned, then the bot simply sells all of that stock.
With all this math and fact checking, there is still room for error because the current stock price is always
an approximation since traders are buying and selling stock at various prices within milliseconds of each other.
Thus, the order is placed within a try-except block and marked as a valid trade once complete, inspired by how
mutual exclusion locks work with parallel programming systems. If an error is thrown because the trade expense
is suddenly too expensive within milliseconds, the bot decrements the quantity of shares to buy by one and tries
again. If the quantity decreases to zero before it becomes affordable, then the transaction is marked as “skipped”,
terminates the trading process, and exits the function.
'''
global unique_minutes
ALPACA_ENDPOINT_URL = api_credentials.ALPACA_ENDPOINT_URL
ALPACA_API_KEY = api_credentials.ALPACA_API_KEY
ALPACA_SECRET_KEY = api_credentials.ALPACA_SECRET_KEY
api = tradeapi.REST(ALPACA_API_KEY, ALPACA_SECRET_KEY,
ALPACA_ENDPOINT_URL, api_version='v2')
account = api.get_account()
if account.trading_blocked:
print('Account is currently restricted from trading.')
market_clock = api.get_clock()
minute = int(str(market_clock.timestamp)[14:16])
frequency = 10 # minutes
if minute < 2 and len(unique_minutes) == 6:
unique_minutes = []
with open('portfolio_performance.txt', 'a') as f:
# Write to file every {frequency} minutes
if minute % frequency == 0:
if minute not in unique_minutes:
unique_minutes.append(minute)
f.write(
f"Equity: {account.equity}, Time Stamp: {market_clock.timestamp} \n")
### Past Attempts to get bars working
# now = pd.Timestamp.now(tz='America/New_York')#.floor('1min')
# yesterday = (now - pd.Timedelta(days=2)).strftime('%Y-%m-%d')
# today = (now - pd.Timedelta(days=1)).strftime('%Y-%m-%d')
# thirty_minutes_ago = (now - pd.Timedelta(minutes=30))#.strftime('%Y-%m-%d')
# fifteen_minutes_ago = (now - pd.Timedelta(minutes=15))#.strftime('%Y-%m-%d')
# print(ticker, yesterday, today, thirty_minutes_ago, fifteen_minutes_ago, now)
# barset = api.get_bars(ticker[1:], TimeFrame.Day, yesterday, today, limit=1).df
# open_price = float(str(barset.open.iloc[0]).split()[0])
# close_price = float(str(barset.close.iloc[0]).split()[0])
# barset = api.get_barset(ticker[1:], 'day', limit=1)
# open_price = barset[ticker[1:]][0].o
# close_price = barset[ticker[1:]][0].c
now = pd.Timestamp.now(tz='America/New_York')
yesterday = (now - pd.Timedelta(days=1))
bars = api.get_bars(ticker[1:], TimeFrame.Day,
start=yesterday.isoformat(),
end=None,
limit=2
).df
open_price = float(str(bars.open.iloc[0]).split()[0])
close_price = float(str(bars.close.iloc[0]).split()[0])
approximate_price_per_share = (open_price + close_price)/2
# Determine how many shares to buy based on the price of the stock.
# Currently allowing for 1% of portfolio per trade.
shares_per_polarity_point = (
float(account.portfolio_value) * 0.01) // approximate_price_per_share
with open('stock_trading_decisions.txt', 'a') as f:
msg = f"Time Stamp: {market_clock.timestamp} \n"
print(msg)
f.write(msg)
if market_clock.is_open:
if polarity > 0:
side = "buy"
qty = polarity*shares_per_polarity_point
expense = approximate_price_per_share * qty
# If buying power is limited, then decrease quantity of shares until transaction amount is lower than buying power
while expense > float(account.buying_power):
qty -= 1
expense = approximate_price_per_share * qty
else:
side = "sell"
polarity *= -1
qty = polarity*shares_per_polarity_point
# Check how many shares I currently own, if any
# try except because an error is thrown if zero shares are owned.
try:
pos_qty = float(api.get_position(ticker[1:]).qty)
except Exception as exception:
if exception.__str__() == 'position does not exist':
pos_qty = 0
if qty > pos_qty:
qty = pos_qty
# only perform a trade if trading more than 0 shares
if qty > 0:
# Sometimes the prices change and throw a buying power error. Decrease qty until satisfied.
invalid = True
skipped = False
while invalid:
try:
if qty == 0:
skipped = True
break
# market: buy or sell at market price, opposed to a limit order.
# time_in_force: only keep order open until end of the day
order = api.submit_order(
symbol=ticker[1:], qty=qty, side=side, type="market", time_in_force="day")
invalid = False
except Exception as exception:
if exception.__str__() == 'insufficient buying power':
qty -= 1
if not skipped:
if order.status == 'accepted':
msg = f"Success! Order placed to {order.side} {order.qty} shares of {ticker}. \n"
print(msg)
f.write(msg)
else:
msg = f"Trade failed. Alpaca account status: {account.status}. \n"
print(msg)
f.write(msg)
else:
msg = f"Transaction prices changed during processing. Either not enough buying power or insufficient shares to sell. Skipping. \n"
print(msg)
f.write(msg)
time.sleep(3)
else:
if side == "buy":
msg = f"You don't have enough buying power to buy {ticker}. Skipping. \n"
print(msg)
else:
msg = f"You do not own any shares of {ticker} to sell. Skipping. \n"
print(msg)
time.sleep(3)
else:
msg = f"No orders were made because the stock market is currently closed for trading. \n"
print(msg)
time.sleep(3)
return account.equity, market_clock.timestamp, msg |
from operator import add
import unittest
from jsonbender import Context, K, S, bend
from jsonbender.control_flow import If, Alternation, Switch
from jsonbender.test import BenderTestMixin
class TestIf(BenderTestMixin, unittest.TestCase):
def setUp(self):
self.na_li = {'country': 'China',
'first_name': 'Li',
'last_name': 'Na'}
self.guga = {'country': 'Brazil',
'first_name': 'Gustavo',
'last_name': 'Kuerten'}
def test_if_true(self):
if_ = If(S('country') == K('China'), S('first_name'), S('last_name'))
self.assert_bender(if_, self.na_li, 'Li')
def test_if_false(self):
if_ = If(S('country') == K('China'), S('first_name'), S('last_name'))
self.assert_bender(if_, self.guga, 'Kuerten')
def test_if_true_default(self):
if_ = If(S('country') == K('China'), when_false=S('last_name'))
self.assert_bender(if_, self.na_li, None)
def test_if_false_default(self):
if_ = If(S('country') == K('China'), S('first_name'))
self.assert_bender(if_, self.guga, None)
class TestAlternation(BenderTestMixin, unittest.TestCase):
def test_empty_benders(self):
self.assertRaises(ValueError, Alternation(), {})
def test_matches(self):
bender = Alternation(S(1), S(0), S('key1'))
self.assert_bender(bender, ['a', 'b'], 'b')
self.assert_bender(bender, ['a'], 'a')
self.assert_bender(bender, {'key1': 23}, 23)
def test_no_match(self):
self.assertRaises(IndexError, Alternation(S(1)), [])
self.assertRaises(KeyError, Alternation(S(1)), {})
class TestSwitch(BenderTestMixin, unittest.TestCase):
def test_match(self):
bender = Switch(S('service'),
{'twitter': S('handle'),
'mastodon': S('handle') + K('@') + S('server')},
default=S('email'))
self.assert_bender(bender,
{'service': 'twitter', 'handle': 'etandel'},
'etandel')
self.assert_bender(bender,
{'service': 'mastodon',
'handle': 'etandel',
'server': 'mastodon.social'},
'[email protected]')
def test__no_match_with_default(self):
bender = Switch(S('service'),
{'twitter': S('handle'),
'mastodon': S('handle') + K('@') + S('server')},
default=S('email'))
self.assert_bender(bender,
{'service': 'facebook',
'email': '[email protected]'},
'[email protected]')
def test__no_match_without_default(self):
self.assertRaises(KeyError, Switch(S('key'), {}), {'key': None})
if __name__ == '__main__':
unittest.main()
|
import six
from maya.api import OpenMaya
from mango.fields import base
__all__ = [
"IntegerField",
"EnumField",
"FloatField",
"DegreeField",
"BooleanField",
"StringField",
"MatrixField",
]
class IntegerField(base.Field):
"""
The IntegerField can be used to set and retrieve integer values. If the
provided value is not a integer a TypeError will be raised.
"""
mfn = OpenMaya.MFnNumericAttribute()
mfn_type = OpenMaya.MFnNumericData.kLong
default_value = 0
def __init__(self, *args, **kwargs):
super(IntegerField, self).__init__(*args, **kwargs)
self._validators.append(self.validate_integer)
# ------------------------------------------------------------------------
def get_plug_value(self, plug):
"""
:param OpenMaya.MPlug plug:
:return: int
"""
return plug.asInt()
def set_plug_value(self, modifier, plug, value):
"""
:param OpenMaya.MDGModifier modifier:
:param OpenMaya.MPlug plug:
:param int value:
"""
modifier.newPlugValueInt(plug, value)
# ------------------------------------------------------------------------
def validate_integer(self, value):
"""
:raise TypeError: When the value is not a integer.
"""
if not isinstance(value, int):
raise TypeError(
"{} requires a 'int' value, '{}' provided.".format(
self.__class__.__name__,
type(value).__name__
)
)
class FloatField(base.Field):
"""
The FloatField can be used to set and retrieve double values. If the
provided value is not a float or integer a TypeError will be raised.
"""
mfn = OpenMaya.MFnNumericAttribute()
mfn_type = OpenMaya.MFnNumericData.kDouble
default_value = 0.0
def __init__(self, *args, **kwargs):
super(FloatField, self).__init__(*args, **kwargs)
self._validators.append(self.validate_int_or_float)
# ------------------------------------------------------------------------
def get_plug_value(self, plug):
"""
:param OpenMaya.MPlug plug:
:return: float
"""
return plug.asDouble()
def set_plug_value(self, modifier, plug, value):
"""
:param OpenMaya.MDGModifier modifier:
:param OpenMaya.MPlug plug:
:param float value:
"""
modifier.newPlugValueDouble(plug, value)
# ------------------------------------------------------------------------
def validate_int_or_float(self, value):
"""
:raise TypeError: When the value is not a float.
"""
if not isinstance(value, (float, int)):
raise TypeError(
"{} requires a 'int/float' value, '{}' provided.".format(
self.__class__.__name__,
type(value).__name__
)
)
class DegreeField(base.Field):
"""
The DegreesField can be used to set and retrieve degree values. If the
provided value is not a float or integer a TypeError will be raised.
"""
mfn = OpenMaya.MFnUnitAttribute()
mfn_type = OpenMaya.MFnUnitAttribute.kAngle
default_value = 0.0
def __init__(self, *args, **kwargs):
super(DegreeField, self).__init__(*args, **kwargs)
self._validators.append(self.validate_int_or_float)
# ------------------------------------------------------------------------
def get_plug_value(self, plug):
"""
:param OpenMaya.MPlug plug:
:return: float
"""
angle = plug.asMAngle()
return angle.asDegrees()
def set_plug_value(self, modifier, plug, value):
"""
:param OpenMaya.MDGModifier modifier:
:param OpenMaya.MPlug plug:
:param float value:
"""
angle = OpenMaya.MAngle(value, OpenMaya.MAngle.kDegrees)
modifier.newPlugValueMAngle(plug, angle)
# ------------------------------------------------------------------------
def validate_int_or_float(self, value):
"""
:raise TypeError: When the value is not a float.
"""
if not isinstance(value, (float, int)):
raise TypeError(
"{} requires a 'int/float' value, '{}' provided.".format(
self.__class__.__name__,
type(value).__name__
)
)
class BooleanField(base.Field):
"""
The BooleanField can be used to set and retrieve bool values. If the
provided value is not a bool a TypeError will be raised. The boolean field
will never be able to support a null value. A ValueError will be raised
when its provided anyway.
"""
mfn = OpenMaya.MFnNumericAttribute()
mfn_type = OpenMaya.MFnNumericData.kBoolean
default_value = True
def __init__(self, *args, **kwargs):
super(BooleanField, self).__init__(*args, **kwargs)
self._validators.append(self.validate_bool)
# ------------------------------------------------------------------------
def get_plug_value(self, plug):
"""
:param OpenMaya.MPlug plug:
:return: bool
"""
return plug.asBool()
def set_plug_value(self, modifier, plug, value):
"""
:param OpenMaya.MDGModifier modifier:
:param OpenMaya.MPlug plug:
:param bool value:
"""
modifier.newPlugValueBool(plug, value)
# ------------------------------------------------------------------------
def validate_bool(self, value):
"""
:raise TypeError: When the value is not a boolean.
"""
if not isinstance(value, bool):
raise TypeError(
"{} requires a 'bool' value, '{}' provided.".format(
self.__class__.__name__,
type(value).__name__
)
)
class StringField(base.Field):
"""
The StringField can be used to set and retrieve string values. If the
provided value is not a basestring a TypeError will be raised.
"""
mfn = OpenMaya.MFnTypedAttribute()
mfn_type = OpenMaya.MFnData.kString
default_value = ""
def __init__(self, *args, **kwargs):
super(StringField, self).__init__(*args, **kwargs)
self._validators.append(self.validate_basestring)
# ------------------------------------------------------------------------
def get_plug_value(self, plug):
"""
:param OpenMaya.MPlug plug:
:return: str
"""
value = plug.asString()
return value
def set_plug_value(self, modifier, plug, value):
"""
:param OpenMaya.MDGModifier modifier:
:param OpenMaya.MPlug plug:
:param str value:
"""
modifier.newPlugValueString(plug, value)
# ------------------------------------------------------------------------
def default(self, index=None):
"""
Default values of string attributes are not stored correctly. The
default value will be ignored and set once the attribute is added to
the instance.
"""
return None
def add_attribute_to_instance(self, instance):
"""
Create the field attribute to the instance. This function will take
the fields settings into account. It is possible to create arrays,
compounds and all sorts of other attribute types. Because default
attributes on string attributes do not stick. We set it after the
attribute is added.
:param models.Model instance:
"""
# validate name
if instance.has_attribute(self.name):
return
# add attribute
attribute = self.create()
instance.add_attribute(attribute)
# set attribute default
plug = instance.get_plug(self.name)
plug.setString(self.default_value)
# ------------------------------------------------------------------------
def validate_basestring(self, value):
"""
:raise TypeError: When the value is not a basestring.
"""
if not isinstance(value, six.string_types):
raise TypeError(
"{} requires a 'basestring' value, '{}' provided.".format(
self.__class__.__name__,
type(value).__name__
)
)
class EnumField(base.Field):
"""
The EnumField can be used to set and retrieve choice values. These values
can be of any type and are provided in the choices when initializing the
field. On the node EnumFields will always be stored as integers. But it is
possible to provide dictionaries or lists too as a choice. If a list is
provided integer values will automatically be assigned to the return
value. If a dictionary is provided the integer value is used as a mapper
between the keys and the values.
"""
mfn = OpenMaya.MFnEnumAttribute()
default_value = None
def __init__(self, *args, **kwargs):
super(EnumField, self).__init__(*args, **kwargs)
self._validators.append(self.validate_choices)
# split keys and indices
self._keys = self.choices.keys() if isinstance(self.choices, dict) else self.choices[:]
self._values = self.choices.values() if isinstance(self.choices, dict) else self.choices[:]
self._indices = self._values \
if all([isinstance(value, int) for value in self._values]) \
else range(len(self._keys))
# get enum keys and values
self._enum_keys = {str(key): i for key, i in zip(self._keys + self._values, self._indices * 2)}
self._enum_values = {i: value for i, value in enumerate(self._values)}
self.default_value = self._keys[0] if self.default_value is None else self.default_value
# ------------------------------------------------------------------------
@property
def enum_keys(self):
"""
:return: Enum keys
:rtype: dict
"""
return self._enum_keys
@property
def enum_values(self):
"""
:return: Enum values
:rtype: dict
"""
return self._enum_values
# ------------------------------------------------------------------------
def get_plug_value(self, plug):
"""
:param OpenMaya.MPlug plug:
:return:
"""
return self.enum_values[plug.asInt()]
def set_plug_value(self, modifier, plug, value):
"""
:param OpenMaya.MDGModifier modifier:
:param OpenMaya.MPlug plug:
:param int value:
"""
index = self.enum_keys[str(value)]
modifier.newPlugValueInt(plug, index)
# ------------------------------------------------------------------------
def default(self, index=None):
"""
:param int/None index:
:return: Default value
:rtype: int/None
"""
default_value = super(EnumField, self).default(index)
if default_value is not None:
return self.enum_keys.get(default_value, 0)
def create(self):
"""
:return: Attribute
:rtype: OpenMaya.MObject
"""
attribute = super(EnumField, self).create()
for key, index in self.enum_keys.items():
self.mfn.addField(key, index)
return attribute
# ------------------------------------------------------------------------
def validate_choices(self, value):
"""
:raise RuntimeError: When the value is not part of the provided choices.
"""
try:
self.enum_keys[str(value)]
except KeyError:
raise RuntimeError(
"{} '{}' value '{}' is not a valid choice, options are: {}".format(
self.__class__.__name__,
self.name,
value,
self._keys
)
)
class MatrixField(base.Field):
"""
The Matrix can be used to set and retrieve matrix values. The matrix
can be get/set using the OpenMaya.MMatrix object.
"""
mfn = OpenMaya.MFnMatrixAttribute()
mfn_type = OpenMaya.MFnMatrixAttribute.kDouble
default_value = OpenMaya.MMatrix()
def __init__(self, *args, **kwargs):
super(MatrixField, self).__init__(*args, **kwargs)
self._validators.append(self.validate_matrix)
# ------------------------------------------------------------------------
def get_plug_value(self, plug):
"""
:param OpenMaya.MPlug plug:
:return: Matrix
:rtype: OpenMaya.MMatrix
"""
return OpenMaya.MFnMatrixData(plug.asMObject()).matrix()
def set_plug_value(self, modifier, plug, value):
"""
:param OpenMaya.MDGModifier modifier:
:param OpenMaya.MPlug plug:
:param OpenMaya.MFnMatrixData/OpenMaya.MMatrix value:
"""
if not isinstance(value, OpenMaya.MFnMatrixData):
value = OpenMaya.MFnMatrixData().create(value)
modifier.newPlugValue(plug, value)
# ------------------------------------------------------------------------
def validate_matrix(self, value):
"""
:raise TypeError:
When the value is not either a OpenMaya.MMatrix or
OpenMaya.MFnMatrixData object.
"""
if not isinstance(value, OpenMaya.MMatrix) and not isinstance(value, OpenMaya.MFnMatrixData):
raise TypeError(
"{} requires a 'OpenMaya.MMatrix/OpenMaya.MFnMatrixData' object, '{}' provided.".format(
self.__class__.__name__,
type(value).__name__
)
) |
# Copyright (c) 2020-20201, LE GOFF Vincent
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS
# BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
# OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
# OF THE POSSIBILITY OF SUCH DAMAGE.
"""Session entity."""
import asyncio
import pickle
import typing as ty
from uuid import UUID, uuid4
from pony.orm import Optional, PrimaryKey, Required, Set
from context.base import CONTEXTS
from data.base import db, PicklableEntity
from data.decorators import lazy_property
from data.handlers import OptionHandler
import settings
# Asynchronous queue of all session output messages
OUTPUT = {
"unknown": asyncio.Queue(),
}
CMDS_TO_PORTAL = asyncio.Queue()
class Session(PicklableEntity, db.Entity):
"""
Session entity.
A session is an object identifying a live connection. Each time
a user connects, a session is created with a different identifier
(UUID). Each time this connection is broken, the session is destroyed.
Connections can store data (through the option handler and the
attributes handler).
Note: if a user connects to the portal, a session is created in the
game database. Should the connection remain live but the game be
restarted, the connection is maintained and the session information
is retrieved from the database.
Web sessions, created by the webserver to keep persistent data,
are stored in the WebSession entity (see web.session.WebSession).
"""
uuid = PrimaryKey(UUID, default=uuid4)
context_path = Required(str)
account = Optional("Account")
character = Optional("Character")
binary_options = Required(bytes, default=pickle.dumps({}))
@lazy_property
def context(self):
"""Find the context."""
Context = CONTEXTS[self.context_path]
return Context(self)
@context.setter
def context(self, context):
"""Change the session's context."""
self.context_path = context.pyname
@property
def focused_context(self):
"""Return the focused context."""
# If there's a character, return the character's active context.
if (character := self.character):
return character.context_stack.active_context
# Otherwise, return the session context
return self.context
@lazy_property
def options(self):
"""Return the session option handler."""
return OptionHandler(self)
async def msg(self, text: ty.Union[str, bytes]):
"""
Send some text to the session.
Args:
text (str or bytes): the text to send, encoded or not.
Sending bytes allows to bypass session encoding, which might
be handy for encoding test on the client side, for instance.
Awaiting on this method does not guarantee the message is
sent to the client. The relationship between game session
and portal session is not strongly maintained to avoid
slowing the game down if the portal is busy. Therefore,
if you await on a `session.msg`, be aware that the text
might not be sent to the client when the method returns.
Note about encoding:
If the sent text should be encoded (that is, if its type
is `str`), the session encoding is first selected, if it
exists. The session encoding is stored in the session
options `session.options["encoding"]`. The
`settings.DEFAULT_ENCODING` is used if the session
didn't specify any encoding. By default, errors
during the process are replaced, so accented letters not
supported by the specified encoding will appear as ?.
"""
if isinstance(text, str):
encoding = self.options.get("encoding", settings.DEFAULT_ENCODING)
try:
encoded = text.encode(encoding, errors="replace")
except LookupError:
# Use utf-8 as a default
encoding = "utf-8"
encoded = text.encode(encoding, errors="replace")
else:
encoded = text
try:
queue = OUTPUT[self.uuid]
except KeyError:
queue = OUTPUT["unknown"]
await queue.put(encoded)
async def msg_portal(self, cmd_name: str, args: ty.Optional[dict] = None):
"""
Send a command to the portal.
This highly-specialized method sends a command to the portal
process, through the CRUX server. Using this method should
be reserved for small actions with a limited control, unless
you want to use the great power of some of these commands,
like "restart_game". Be aware that sending a command to the
portal can do a lot of things, including damage.
Args:
cmd_name (str): the command name to send.
args (dict, optional): the arguments of this command.
"""
args = args or {}
await CMDS_TO_PORTAL.put((cmd_name, args))
|
#Um programa que lê o peso de 5 pessoas e informa qual o maior e menor peso
pesos = []
peso = 0
for pessoa in range(1,6):
peso = (float(input(f'Qual o peso da {pessoa}ª pessoa? ')))
if pessoa == 1:
maior = peso
menor = peso
else:
if peso > maior:
maior = peso
if peso < menor:
menor = peso
print(f'O menor peso é: {menor}Kg\nO maior peso é {maior}Kg')
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from . import _utilities
__all__ = ['TeamArgs', 'Team']
@pulumi.input_type
class TeamArgs:
def __init__(__self__, *,
description: Optional[pulumi.Input[str]] = None,
members: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
name: Optional[pulumi.Input[str]] = None,
notifications_criticals: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
notifications_defaults: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
notifications_infos: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
notifications_majors: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
notifications_minors: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
notifications_warnings: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None):
"""
The set of arguments for constructing a Team resource.
:param pulumi.Input[str] description: Description of the team.
:param pulumi.Input[Sequence[pulumi.Input[str]]] members: List of user IDs to include in the team.
:param pulumi.Input[str] name: Name of the team.
:param pulumi.Input[Sequence[pulumi.Input[str]]] notifications_criticals: Where to send notifications for critical alerts
:param pulumi.Input[Sequence[pulumi.Input[str]]] notifications_defaults: Where to send notifications for default alerts
:param pulumi.Input[Sequence[pulumi.Input[str]]] notifications_infos: Where to send notifications for info alerts
:param pulumi.Input[Sequence[pulumi.Input[str]]] notifications_majors: Where to send notifications for major alerts
:param pulumi.Input[Sequence[pulumi.Input[str]]] notifications_minors: Where to send notifications for minor alerts
:param pulumi.Input[Sequence[pulumi.Input[str]]] notifications_warnings: Where to send notifications for warning alerts
"""
if description is not None:
pulumi.set(__self__, "description", description)
if members is not None:
pulumi.set(__self__, "members", members)
if name is not None:
pulumi.set(__self__, "name", name)
if notifications_criticals is not None:
pulumi.set(__self__, "notifications_criticals", notifications_criticals)
if notifications_defaults is not None:
pulumi.set(__self__, "notifications_defaults", notifications_defaults)
if notifications_infos is not None:
pulumi.set(__self__, "notifications_infos", notifications_infos)
if notifications_majors is not None:
pulumi.set(__self__, "notifications_majors", notifications_majors)
if notifications_minors is not None:
pulumi.set(__self__, "notifications_minors", notifications_minors)
if notifications_warnings is not None:
pulumi.set(__self__, "notifications_warnings", notifications_warnings)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
Description of the team.
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter
def members(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
List of user IDs to include in the team.
"""
return pulumi.get(self, "members")
@members.setter
def members(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "members", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
Name of the team.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="notificationsCriticals")
def notifications_criticals(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
Where to send notifications for critical alerts
"""
return pulumi.get(self, "notifications_criticals")
@notifications_criticals.setter
def notifications_criticals(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "notifications_criticals", value)
@property
@pulumi.getter(name="notificationsDefaults")
def notifications_defaults(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
Where to send notifications for default alerts
"""
return pulumi.get(self, "notifications_defaults")
@notifications_defaults.setter
def notifications_defaults(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "notifications_defaults", value)
@property
@pulumi.getter(name="notificationsInfos")
def notifications_infos(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
Where to send notifications for info alerts
"""
return pulumi.get(self, "notifications_infos")
@notifications_infos.setter
def notifications_infos(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "notifications_infos", value)
@property
@pulumi.getter(name="notificationsMajors")
def notifications_majors(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
Where to send notifications for major alerts
"""
return pulumi.get(self, "notifications_majors")
@notifications_majors.setter
def notifications_majors(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "notifications_majors", value)
@property
@pulumi.getter(name="notificationsMinors")
def notifications_minors(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
Where to send notifications for minor alerts
"""
return pulumi.get(self, "notifications_minors")
@notifications_minors.setter
def notifications_minors(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "notifications_minors", value)
@property
@pulumi.getter(name="notificationsWarnings")
def notifications_warnings(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
Where to send notifications for warning alerts
"""
return pulumi.get(self, "notifications_warnings")
@notifications_warnings.setter
def notifications_warnings(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "notifications_warnings", value)
@pulumi.input_type
class _TeamState:
def __init__(__self__, *,
description: Optional[pulumi.Input[str]] = None,
members: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
name: Optional[pulumi.Input[str]] = None,
notifications_criticals: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
notifications_defaults: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
notifications_infos: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
notifications_majors: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
notifications_minors: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
notifications_warnings: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
url: Optional[pulumi.Input[str]] = None):
"""
Input properties used for looking up and filtering Team resources.
:param pulumi.Input[str] description: Description of the team.
:param pulumi.Input[Sequence[pulumi.Input[str]]] members: List of user IDs to include in the team.
:param pulumi.Input[str] name: Name of the team.
:param pulumi.Input[Sequence[pulumi.Input[str]]] notifications_criticals: Where to send notifications for critical alerts
:param pulumi.Input[Sequence[pulumi.Input[str]]] notifications_defaults: Where to send notifications for default alerts
:param pulumi.Input[Sequence[pulumi.Input[str]]] notifications_infos: Where to send notifications for info alerts
:param pulumi.Input[Sequence[pulumi.Input[str]]] notifications_majors: Where to send notifications for major alerts
:param pulumi.Input[Sequence[pulumi.Input[str]]] notifications_minors: Where to send notifications for minor alerts
:param pulumi.Input[Sequence[pulumi.Input[str]]] notifications_warnings: Where to send notifications for warning alerts
:param pulumi.Input[str] url: The URL of the team.
"""
if description is not None:
pulumi.set(__self__, "description", description)
if members is not None:
pulumi.set(__self__, "members", members)
if name is not None:
pulumi.set(__self__, "name", name)
if notifications_criticals is not None:
pulumi.set(__self__, "notifications_criticals", notifications_criticals)
if notifications_defaults is not None:
pulumi.set(__self__, "notifications_defaults", notifications_defaults)
if notifications_infos is not None:
pulumi.set(__self__, "notifications_infos", notifications_infos)
if notifications_majors is not None:
pulumi.set(__self__, "notifications_majors", notifications_majors)
if notifications_minors is not None:
pulumi.set(__self__, "notifications_minors", notifications_minors)
if notifications_warnings is not None:
pulumi.set(__self__, "notifications_warnings", notifications_warnings)
if url is not None:
pulumi.set(__self__, "url", url)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
Description of the team.
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter
def members(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
List of user IDs to include in the team.
"""
return pulumi.get(self, "members")
@members.setter
def members(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "members", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
Name of the team.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="notificationsCriticals")
def notifications_criticals(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
Where to send notifications for critical alerts
"""
return pulumi.get(self, "notifications_criticals")
@notifications_criticals.setter
def notifications_criticals(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "notifications_criticals", value)
@property
@pulumi.getter(name="notificationsDefaults")
def notifications_defaults(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
Where to send notifications for default alerts
"""
return pulumi.get(self, "notifications_defaults")
@notifications_defaults.setter
def notifications_defaults(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "notifications_defaults", value)
@property
@pulumi.getter(name="notificationsInfos")
def notifications_infos(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
Where to send notifications for info alerts
"""
return pulumi.get(self, "notifications_infos")
@notifications_infos.setter
def notifications_infos(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "notifications_infos", value)
@property
@pulumi.getter(name="notificationsMajors")
def notifications_majors(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
Where to send notifications for major alerts
"""
return pulumi.get(self, "notifications_majors")
@notifications_majors.setter
def notifications_majors(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "notifications_majors", value)
@property
@pulumi.getter(name="notificationsMinors")
def notifications_minors(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
Where to send notifications for minor alerts
"""
return pulumi.get(self, "notifications_minors")
@notifications_minors.setter
def notifications_minors(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "notifications_minors", value)
@property
@pulumi.getter(name="notificationsWarnings")
def notifications_warnings(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
Where to send notifications for warning alerts
"""
return pulumi.get(self, "notifications_warnings")
@notifications_warnings.setter
def notifications_warnings(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "notifications_warnings", value)
@property
@pulumi.getter
def url(self) -> Optional[pulumi.Input[str]]:
"""
The URL of the team.
"""
return pulumi.get(self, "url")
@url.setter
def url(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "url", value)
class Team(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
description: Optional[pulumi.Input[str]] = None,
members: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
name: Optional[pulumi.Input[str]] = None,
notifications_criticals: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
notifications_defaults: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
notifications_infos: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
notifications_majors: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
notifications_minors: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
notifications_warnings: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
__props__=None):
"""
Handles management of SignalFx teams.
You can configure [team notification policies](https://docs.signalfx.com/en/latest/managing/teams/team-notifications.html) using this resource and the various `notifications_*` properties.
## Example Usage
```python
import pulumi
import pulumi_signalfx as signalfx
myteam0 = signalfx.Team("myteam0",
description="Super great team no jerks definitely",
members=[
"userid1",
"userid2",
],
notifications_criticals=["PagerDuty,credentialId"],
notifications_infos=["Email,[email protected]"])
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] description: Description of the team.
:param pulumi.Input[Sequence[pulumi.Input[str]]] members: List of user IDs to include in the team.
:param pulumi.Input[str] name: Name of the team.
:param pulumi.Input[Sequence[pulumi.Input[str]]] notifications_criticals: Where to send notifications for critical alerts
:param pulumi.Input[Sequence[pulumi.Input[str]]] notifications_defaults: Where to send notifications for default alerts
:param pulumi.Input[Sequence[pulumi.Input[str]]] notifications_infos: Where to send notifications for info alerts
:param pulumi.Input[Sequence[pulumi.Input[str]]] notifications_majors: Where to send notifications for major alerts
:param pulumi.Input[Sequence[pulumi.Input[str]]] notifications_minors: Where to send notifications for minor alerts
:param pulumi.Input[Sequence[pulumi.Input[str]]] notifications_warnings: Where to send notifications for warning alerts
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: Optional[TeamArgs] = None,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Handles management of SignalFx teams.
You can configure [team notification policies](https://docs.signalfx.com/en/latest/managing/teams/team-notifications.html) using this resource and the various `notifications_*` properties.
## Example Usage
```python
import pulumi
import pulumi_signalfx as signalfx
myteam0 = signalfx.Team("myteam0",
description="Super great team no jerks definitely",
members=[
"userid1",
"userid2",
],
notifications_criticals=["PagerDuty,credentialId"],
notifications_infos=["Email,[email protected]"])
```
:param str resource_name: The name of the resource.
:param TeamArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(TeamArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
description: Optional[pulumi.Input[str]] = None,
members: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
name: Optional[pulumi.Input[str]] = None,
notifications_criticals: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
notifications_defaults: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
notifications_infos: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
notifications_majors: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
notifications_minors: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
notifications_warnings: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = TeamArgs.__new__(TeamArgs)
__props__.__dict__["description"] = description
__props__.__dict__["members"] = members
__props__.__dict__["name"] = name
__props__.__dict__["notifications_criticals"] = notifications_criticals
__props__.__dict__["notifications_defaults"] = notifications_defaults
__props__.__dict__["notifications_infos"] = notifications_infos
__props__.__dict__["notifications_majors"] = notifications_majors
__props__.__dict__["notifications_minors"] = notifications_minors
__props__.__dict__["notifications_warnings"] = notifications_warnings
__props__.__dict__["url"] = None
super(Team, __self__).__init__(
'signalfx:index/team:Team',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
description: Optional[pulumi.Input[str]] = None,
members: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
name: Optional[pulumi.Input[str]] = None,
notifications_criticals: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
notifications_defaults: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
notifications_infos: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
notifications_majors: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
notifications_minors: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
notifications_warnings: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
url: Optional[pulumi.Input[str]] = None) -> 'Team':
"""
Get an existing Team resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] description: Description of the team.
:param pulumi.Input[Sequence[pulumi.Input[str]]] members: List of user IDs to include in the team.
:param pulumi.Input[str] name: Name of the team.
:param pulumi.Input[Sequence[pulumi.Input[str]]] notifications_criticals: Where to send notifications for critical alerts
:param pulumi.Input[Sequence[pulumi.Input[str]]] notifications_defaults: Where to send notifications for default alerts
:param pulumi.Input[Sequence[pulumi.Input[str]]] notifications_infos: Where to send notifications for info alerts
:param pulumi.Input[Sequence[pulumi.Input[str]]] notifications_majors: Where to send notifications for major alerts
:param pulumi.Input[Sequence[pulumi.Input[str]]] notifications_minors: Where to send notifications for minor alerts
:param pulumi.Input[Sequence[pulumi.Input[str]]] notifications_warnings: Where to send notifications for warning alerts
:param pulumi.Input[str] url: The URL of the team.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _TeamState.__new__(_TeamState)
__props__.__dict__["description"] = description
__props__.__dict__["members"] = members
__props__.__dict__["name"] = name
__props__.__dict__["notifications_criticals"] = notifications_criticals
__props__.__dict__["notifications_defaults"] = notifications_defaults
__props__.__dict__["notifications_infos"] = notifications_infos
__props__.__dict__["notifications_majors"] = notifications_majors
__props__.__dict__["notifications_minors"] = notifications_minors
__props__.__dict__["notifications_warnings"] = notifications_warnings
__props__.__dict__["url"] = url
return Team(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def description(self) -> pulumi.Output[Optional[str]]:
"""
Description of the team.
"""
return pulumi.get(self, "description")
@property
@pulumi.getter
def members(self) -> pulumi.Output[Optional[Sequence[str]]]:
"""
List of user IDs to include in the team.
"""
return pulumi.get(self, "members")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
Name of the team.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="notificationsCriticals")
def notifications_criticals(self) -> pulumi.Output[Optional[Sequence[str]]]:
"""
Where to send notifications for critical alerts
"""
return pulumi.get(self, "notifications_criticals")
@property
@pulumi.getter(name="notificationsDefaults")
def notifications_defaults(self) -> pulumi.Output[Optional[Sequence[str]]]:
"""
Where to send notifications for default alerts
"""
return pulumi.get(self, "notifications_defaults")
@property
@pulumi.getter(name="notificationsInfos")
def notifications_infos(self) -> pulumi.Output[Optional[Sequence[str]]]:
"""
Where to send notifications for info alerts
"""
return pulumi.get(self, "notifications_infos")
@property
@pulumi.getter(name="notificationsMajors")
def notifications_majors(self) -> pulumi.Output[Optional[Sequence[str]]]:
"""
Where to send notifications for major alerts
"""
return pulumi.get(self, "notifications_majors")
@property
@pulumi.getter(name="notificationsMinors")
def notifications_minors(self) -> pulumi.Output[Optional[Sequence[str]]]:
"""
Where to send notifications for minor alerts
"""
return pulumi.get(self, "notifications_minors")
@property
@pulumi.getter(name="notificationsWarnings")
def notifications_warnings(self) -> pulumi.Output[Optional[Sequence[str]]]:
"""
Where to send notifications for warning alerts
"""
return pulumi.get(self, "notifications_warnings")
@property
@pulumi.getter
def url(self) -> pulumi.Output[str]:
"""
The URL of the team.
"""
return pulumi.get(self, "url")
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import frappe
from frappe import msgprint
import re
def validate(doc,method):
child_asset = frappe.db.sql("""SELECT ass.name FROM `tabAsset` ass
WHERE ass.docstatus = 1 AND
ass.asset_category = '%s'"""%(doc.name), as_list=1)
if child_asset:
frappe.throw(("Cannot Change this Asset Category as Asset {0} already \
submitted").format(child_asset[0][0]))
if len(doc.accounts) >1:
frappe.throw("Only one account allowed per Asset Category")
if len(doc.asset_short_name) != 3:
frappe.throw("Asset Short name should be EXACTLY THREE Characters long")
if not re.match("^[A-H, J-N, P-Z, 0-9]*$", doc.asset_short_name):
frappe.throw("Only numbers and letters except I and O are allowed in Asset Short Name")
other_short_names = frappe.db.sql("""SELECT name, asset_short_name AS asn FROM `tabAsset Category`
WHERE name <> '%s'"""%doc.name, as_dict = 1)
for i in other_short_names:
if i.asn == doc.asset_short_name:
frappe.throw(("Short name {0} already used in Asset Category \
{1}").format(doc.asset_short_name, i.name))
|
import inspect
import mmcv
# model:
# from .registry import (BACKBONES, DETECTORS, HEADS, LOSSES, NECKS, ROI_EXTRACTORS, SHARED_HEADS)
# build_from_cfg(cfg.model, registry=DETECTORS, dict(train_cfg=train_cfg, test_cfg=test_cfg))
# ...
# dataset:
# from .registry import DATASETS
# dataset = build_from_cfg(cfg.data.train, registry=DATASETS, default_args)
def build_from_cfg(cfg, registry, default_args=None):
"""
【Build a module from config dict.】
Args:
模型配置,模型占位符,训练/测试配置
cfg (dict): Config dict. (It should at least contain the key "type".)
registry (:obj:`Registry`): [The registry to search the type from.]
default_args (dict, optional): Default initialization arguments.
Returns:
obj: The [constructed] object. - 构建完成的包含网络7个大类的模块
注:上述7个大类,即便是DETECTORS,本质都是占位符,在传入cfg真正的参数之前都是不连接的,
顺序是先搭建DETECTORS,然后根据其配置需求依次搭建其下的前几种模块,整个地构成DETECTORS
"""
assert isinstance(cfg, dict) and 'type' in cfg
assert isinstance(default_args, dict) or default_args is None
args = cfg.copy() # cfg.model->args
# print(arg) # config info: RetinaNet->ResNeXt->FPN->FocalLoss->SmoothL1Loss...
obj_type = args.pop('type') # module name:RetinaNet/ResNeXt/FPN/FocalLoss/SmoothL1Loss/...
# print('obj_type: ', obj_type)
if mmcv.is_str(obj_type):
# print(registry)
# Registry(name=detector, items=['CascadeRCNN', 'TwoStageDetector', 'DoubleHeadRCNN', 'FastRCNN', 'FasterRCNN', 'SingleStageDetector', 'FCOS', 'GridRCNN', 'HybridTaskCascade', 'MaskRCNN', 'MaskScoringRCNN', 'RetinaNet', 'RPN'])
# Registry(name=backbone, items=['ResNet', 'HRNet', 'ResNeXt', 'SSDVGG'])
# Registry(name=neck, items=['BFP', 'FPN', 'HRFPN'])
# ...
# 这里的registry的get返回的_module_dict属性中包含的detector下的模型type
# 索引key得到相应的class
'''core'''
obj_type = registry.get(obj_type)
# print(obj_type)
# <class 'mmdet.models.detectors.retinanet.RetinaNet'>
# <class 'mmdet.models.backbones.resnext.ResNeXt'>
# <class 'mmdet.models.necks.fpn.FPN'>
# ...
if obj_type is None:
raise KeyError('{} is not in the {} registry'.format(
obj_type, registry.name))
elif not inspect.isclass(obj_type):
raise TypeError('type must be a str or valid type, but got {}'.format(
type(obj_type)))
if default_args is not None:
for name, value in default_args.items():
args.setdefault(name, value) # 将default_args的键值对加入到args中,将模型和训练配置整合送入类中
# 注意,无论训练还是检测,都会build DETECTORS
# **args将字典unpack得到各个元素分别与形参匹配送入函数中
# print(obj_type(**args))
# model
'''首先从DETECTOR registry中引入RetinaNet[索引],然后从这里执行前仍是class的索引,
重点在于各个组件从**args处传入,然后便开始[初始化]各个模组类:
首先是RetinaNet,super后进入SingleStageDetector,
再从SingleStageDetector中build backbone、head等'''
return obj_type(**args)
'''
功能:
注册模块占位符
在程序运行前先注册对应的模块占位,便于在config文件直接对应的模块进行配置填充
类型:
7大类(实际后来dataset也是这么管理的):BACKBONE,NECKS,ROI_EXTRACTORS,SHARED_HEADS,HEADS,LOSSES,DETECTORS
每类包含各个具体的分类,如BACKBONES中有'ResNet','ResNeXt','SSDVGG'等
直观理解:
Registry的具体形式是什么?
例如import的DETECTOR直接打印可以得到Registry(name=detector, items=['CascadeRCNN', 'TwoStageDetector', 'DoubleHeadRCNN', 'FastRCNN', 'FasterRCNN', 'SingleStageDetector', 'FCOS', 'GridRCNN', 'HybridTaskCascade', 'MaskRCNN', 'MaskScoringRCNN', 'RetinaNet', 'RPN'])
查看type为<class 'mmdet.utils.registry.Registry'>
【Registry 7个类的模块每个下的_module_dict字典会添加存放其中的不同类
- 作用是用于索引和搭建】
'''
# registry - DETECTORS = Registry('detector')
class Registry(object):
def __init__(self, name):
self._name = name
self._module_dict = dict()
# print(self._module_dict) # {}
# print info
def __repr__(self):
format_str = self.__class__.__name__ + '(name={}, items={})'.format(
self._name, list(self._module_dict.keys()))
return format_str
@property
def name(self):
return self._name
@property
def module_dict(self):
return self._module_dict
# obj_type = registry.get(obj_type='RetinaNet')
def get(self, key):
# print(self._module_dict) # 包含了各个组件(detector,head...)的索引,每一类组成一个dict
# class RetinaNet(SingleStageDetector)
return self._module_dict.get(key, None) # 获取class RetinaNet(见detectors:__init__.py)
# 这里在mmdet/models/detectors/retinanet.py的class RetinaNet()
# 装饰器@DETECTORS.register_module处调用传入config info
# ->
'''在__init___.py初始化时初始化class以调用装饰器修改_module_dict'''
def register_module(self, cls):
# print(cls)
self._register_module(cls)
return cls
# self._register_module(cls)
def _register_module(self, module_class):
"""
【Register a module.】
Args:
module (:obj:[`nn.Module`]): Module to be registered.
"""
if not inspect.isclass(module_class):
raise TypeError('module must be a class, but got {}'.format(
type(module_class)))
module_name = module_class.__name__
# RetinaNet与single_stage都有
if module_name in self._module_dict:
raise KeyError('{} is already registered in {}'.format(
module_name, self.name))
'''module_class继承自nn.module,为可训练类'''
self._module_dict[module_name] = module_class
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
from django.conf import settings
LDAP_ENABLED = settings.LDAP_ENABLED
|
from ipaddress import ip_network
from zeph.selectors.epsilon import (
EpsilonDFGSelector,
EpsilonGreedySelector,
EpsilonNaiveSelector,
EpsilonRewardSelector,
)
def test_epsilon_dfg_selector_first_cycle(bgp_prefixes):
selector = EpsilonDFGSelector("clickhouse://localhost:8123", 0.1, bgp_prefixes)
exploitation, total = selector.select("agent_1", budget=1)
assert exploitation == []
assert len(total) == 1
def test_epsilon_reward_selector_with_discoveries(bgp_prefixes, discoveries):
selector = EpsilonRewardSelector("clickhouse://localhost:8123", 0.1, bgp_prefixes)
selector.rank_per_agent = selector.compute_rank(discoveries)
exploitation, total = selector.select("agent_1", budget=1)
assert exploitation == {ip_network("10.0.0.0/24")}
assert len(total) == 1
def test_epsilon_naive_selector_with_discoveries(bgp_prefixes, discoveries):
selector = EpsilonNaiveSelector("clickhouse://localhost:8123", 0.1, bgp_prefixes)
selector.rank_per_agent = selector.compute_rank(discoveries)
exploitation, total = selector.select("agent_1", budget=1)
assert exploitation == {ip_network("10.0.0.0/24")}
assert len(total) == 1
def test_epsilon_greedy_selector_with_discoveries(bgp_prefixes, discoveries):
selector = EpsilonGreedySelector("clickhouse://localhost:8123", 0.1, bgp_prefixes)
selector.rank_per_agent = selector.compute_rank(discoveries)
exploitation, total = selector.select("agent_1", budget=1)
assert exploitation == {ip_network("10.0.0.0/24")}
assert len(total) == 1
def test_epsilon_dfg_selector_with_with_discoveries(bgp_prefixes, discoveries):
selector = EpsilonDFGSelector("clickhouse://localhost:8123", 0.1, bgp_prefixes)
selector.rank_per_agent = selector.compute_rank(discoveries)
exploitation, total = selector.select("agent_1", budget=1)
assert exploitation == {ip_network("10.0.0.0/24")}
assert len(total) == 1
|
import torch
import torchvision
import numpy as np
def get_loaders(dataset='cifar10', data_path='data', train_batch_size=128, test_batch_size=1, num_workers=4):
if dataset == 'cifar10':
num_classes = 10
mean = (0.4914, 0.4822, 0.4465)
std = (0.2471, 0.2435, 0.2616)
train_transforms = torchvision.transforms.Compose(
[torchvision.transforms.RandomCrop(32, padding=4),
torchvision.transforms.RandomHorizontalFlip(),
torchvision.transforms.ToTensor(),
torchvision.transforms.Normalize(mean, std)])
test_transforms = torchvision.transforms.Compose(
[torchvision.transforms.ToTensor(),
torchvision.transforms.Normalize(mean, std)])
train_dataset = torchvision.datasets.CIFAR10(
data_path, train=True, transform=train_transforms, download=True)
test_dataset = torchvision.datasets.CIFAR10(
data_path, train=False, transform=test_transforms, download=True)
else:
num_classes = 100
mean = (0.5071, 0.4867, 0.4408)
std = (0.2675, 0.2565, 0.2761)
train_transforms = torchvision.transforms.Compose(
[torchvision.transforms.RandomCrop(32, padding=4),
torchvision.transforms.RandomHorizontalFlip(),
torchvision.transforms.ToTensor(),
torchvision.transforms.Normalize(mean, std)])
test_transforms = torchvision.transforms.Compose(
[torchvision.transforms.ToTensor(),
torchvision.transforms.Normalize(mean, std)])
train_dataset = torchvision.datasets.CIFAR100(
data_path, train=True, transform=train_transforms, download=True)
test_dataset = torchvision.datasets.CIFAR100(
data_path, train=False, transform=test_transforms, download=True)
train_loader = torch.utils.data.DataLoader(
dataset=train_dataset,
batch_size=train_batch_size,
num_workers=num_workers,
shuffle=True)
test_loader = torch.utils.data.DataLoader(
dataset=test_dataset,
batch_size=test_batch_size,
num_workers=num_workers,
shuffle=False)
return train_loader, test_loader, num_classes
def get_corrupted_loaders(dataset='cifar10', data_path='data', train_batch_size=128, test_batch_size=1, num_workers=4):
corruptions = [
"brightness",
"contrast",
"defocus_blur",
"elastic_transform",
"fog",
"frost",
"gaussian_blur",
"gaussian_noise",
"glass_blur",
"impulse_noise",
"jpeg_compression",
"motion_blur",
"pixelate",
"saturate",
"shot_noise",
"snow",
"spatter",
"speckle_noise",
"zoom_blur"
]
if dataset == "cifar10":
num_classes = 10
labels_path = "data/CIFAR-10-C/labels.npy"
mean = (0.4914, 0.4822, 0.4465)
std = (0.2471, 0.2435, 0.2616)
images = []
for c in corruptions:
data = torch.from_numpy(
np.load(f"data/CIFAR-10-C/{c}.npy")).permute(0, 3, 1, 2) / 255.0
images.append(data)
images = torch.cat(images, dim=0)
for image in images:
image[0] = (image[0] - mean[0]) / std[0]
image[1] = (image[1] - mean[1]) / std[1]
image[2] = (image[2] - mean[2]) / std[2]
else:
num_classes = 100
labels_path = "data/CIFAR-100-C/labels.npy"
mean = (0.5071, 0.4867, 0.4408)
std = (0.2675, 0.2565, 0.2761)
images = []
for c in corruptions:
data = torch.from_numpy(
np.load(f"data/CIFAR-100-C/{c}.npy")).permute(0, 3, 1, 2) / 255.0
images.append(data)
images = torch.cat(images, dim=0)
for image in images:
image[0] = (image[0] - mean[0]) / std[0]
image[1] = (image[1] - mean[1]) / std[1]
image[2] = (image[2] - mean[2]) / std[2]
labels = np.load(labels_path)
labels = np.repeat([labels], len(corruptions), axis=0).flatten()
dataset = torch.utils.data.TensorDataset(images, torch.from_numpy(labels))
test_loader = torch.utils.data.DataLoader(
dataset=dataset,
batch_size=test_batch_size,
num_workers=num_workers,
shuffle=False)
return test_loader, labels, num_classes
|
# coding: utf-8
# Copyright (c) 2016, 2021, Oracle and/or its affiliates. All rights reserved.
# This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel # noqa: F401
from oci.decorators import init_model_state_from_kwargs
@init_model_state_from_kwargs
class LogAnalyticsAssociationParameter(object):
"""
LogAnalyticsAssociationParameter
"""
#: A constant which can be used with the status property of a LogAnalyticsAssociationParameter.
#: This constant has a value of "SUCCEEDED"
STATUS_SUCCEEDED = "SUCCEEDED"
#: A constant which can be used with the status property of a LogAnalyticsAssociationParameter.
#: This constant has a value of "FAILED"
STATUS_FAILED = "FAILED"
def __init__(self, **kwargs):
"""
Initializes a new LogAnalyticsAssociationParameter object with values from keyword arguments.
The following keyword arguments are supported (corresponding to the getters/setters of this class):
:param agent_id:
The value to assign to the agent_id property of this LogAnalyticsAssociationParameter.
:type agent_id: str
:param entity_type:
The value to assign to the entity_type property of this LogAnalyticsAssociationParameter.
:type entity_type: str
:param entity_id:
The value to assign to the entity_id property of this LogAnalyticsAssociationParameter.
:type entity_id: str
:param source_id:
The value to assign to the source_id property of this LogAnalyticsAssociationParameter.
:type source_id: str
:param source_display_name:
The value to assign to the source_display_name property of this LogAnalyticsAssociationParameter.
:type source_display_name: str
:param source_type:
The value to assign to the source_type property of this LogAnalyticsAssociationParameter.
:type source_type: str
:param status:
The value to assign to the status property of this LogAnalyticsAssociationParameter.
Allowed values for this property are: "SUCCEEDED", "FAILED", 'UNKNOWN_ENUM_VALUE'.
Any unrecognized values returned by a service will be mapped to 'UNKNOWN_ENUM_VALUE'.
:type status: str
:param missing_properties:
The value to assign to the missing_properties property of this LogAnalyticsAssociationParameter.
:type missing_properties: list[str]
:param required_properties:
The value to assign to the required_properties property of this LogAnalyticsAssociationParameter.
:type required_properties: list[str]
"""
self.swagger_types = {
'agent_id': 'str',
'entity_type': 'str',
'entity_id': 'str',
'source_id': 'str',
'source_display_name': 'str',
'source_type': 'str',
'status': 'str',
'missing_properties': 'list[str]',
'required_properties': 'list[str]'
}
self.attribute_map = {
'agent_id': 'agentId',
'entity_type': 'entityType',
'entity_id': 'entityId',
'source_id': 'sourceId',
'source_display_name': 'sourceDisplayName',
'source_type': 'sourceType',
'status': 'status',
'missing_properties': 'missingProperties',
'required_properties': 'requiredProperties'
}
self._agent_id = None
self._entity_type = None
self._entity_id = None
self._source_id = None
self._source_display_name = None
self._source_type = None
self._status = None
self._missing_properties = None
self._required_properties = None
@property
def agent_id(self):
"""
Gets the agent_id of this LogAnalyticsAssociationParameter.
The agent unique identifier.
:return: The agent_id of this LogAnalyticsAssociationParameter.
:rtype: str
"""
return self._agent_id
@agent_id.setter
def agent_id(self, agent_id):
"""
Sets the agent_id of this LogAnalyticsAssociationParameter.
The agent unique identifier.
:param agent_id: The agent_id of this LogAnalyticsAssociationParameter.
:type: str
"""
self._agent_id = agent_id
@property
def entity_type(self):
"""
Gets the entity_type of this LogAnalyticsAssociationParameter.
The entity type.
:return: The entity_type of this LogAnalyticsAssociationParameter.
:rtype: str
"""
return self._entity_type
@entity_type.setter
def entity_type(self, entity_type):
"""
Sets the entity_type of this LogAnalyticsAssociationParameter.
The entity type.
:param entity_type: The entity_type of this LogAnalyticsAssociationParameter.
:type: str
"""
self._entity_type = entity_type
@property
def entity_id(self):
"""
Gets the entity_id of this LogAnalyticsAssociationParameter.
The entity unique identifier.
:return: The entity_id of this LogAnalyticsAssociationParameter.
:rtype: str
"""
return self._entity_id
@entity_id.setter
def entity_id(self, entity_id):
"""
Sets the entity_id of this LogAnalyticsAssociationParameter.
The entity unique identifier.
:param entity_id: The entity_id of this LogAnalyticsAssociationParameter.
:type: str
"""
self._entity_id = entity_id
@property
def source_id(self):
"""
Gets the source_id of this LogAnalyticsAssociationParameter.
The source name.
:return: The source_id of this LogAnalyticsAssociationParameter.
:rtype: str
"""
return self._source_id
@source_id.setter
def source_id(self, source_id):
"""
Sets the source_id of this LogAnalyticsAssociationParameter.
The source name.
:param source_id: The source_id of this LogAnalyticsAssociationParameter.
:type: str
"""
self._source_id = source_id
@property
def source_display_name(self):
"""
Gets the source_display_name of this LogAnalyticsAssociationParameter.
The source display name.
:return: The source_display_name of this LogAnalyticsAssociationParameter.
:rtype: str
"""
return self._source_display_name
@source_display_name.setter
def source_display_name(self, source_display_name):
"""
Sets the source_display_name of this LogAnalyticsAssociationParameter.
The source display name.
:param source_display_name: The source_display_name of this LogAnalyticsAssociationParameter.
:type: str
"""
self._source_display_name = source_display_name
@property
def source_type(self):
"""
Gets the source_type of this LogAnalyticsAssociationParameter.
The source type.
:return: The source_type of this LogAnalyticsAssociationParameter.
:rtype: str
"""
return self._source_type
@source_type.setter
def source_type(self, source_type):
"""
Sets the source_type of this LogAnalyticsAssociationParameter.
The source type.
:param source_type: The source_type of this LogAnalyticsAssociationParameter.
:type: str
"""
self._source_type = source_type
@property
def status(self):
"""
Gets the status of this LogAnalyticsAssociationParameter.
The status. Either FAILED or SUCCEEDED.
Allowed values for this property are: "SUCCEEDED", "FAILED", 'UNKNOWN_ENUM_VALUE'.
Any unrecognized values returned by a service will be mapped to 'UNKNOWN_ENUM_VALUE'.
:return: The status of this LogAnalyticsAssociationParameter.
:rtype: str
"""
return self._status
@status.setter
def status(self, status):
"""
Sets the status of this LogAnalyticsAssociationParameter.
The status. Either FAILED or SUCCEEDED.
:param status: The status of this LogAnalyticsAssociationParameter.
:type: str
"""
allowed_values = ["SUCCEEDED", "FAILED"]
if not value_allowed_none_or_none_sentinel(status, allowed_values):
status = 'UNKNOWN_ENUM_VALUE'
self._status = status
@property
def missing_properties(self):
"""
Gets the missing_properties of this LogAnalyticsAssociationParameter.
A list of missing properties.
:return: The missing_properties of this LogAnalyticsAssociationParameter.
:rtype: list[str]
"""
return self._missing_properties
@missing_properties.setter
def missing_properties(self, missing_properties):
"""
Sets the missing_properties of this LogAnalyticsAssociationParameter.
A list of missing properties.
:param missing_properties: The missing_properties of this LogAnalyticsAssociationParameter.
:type: list[str]
"""
self._missing_properties = missing_properties
@property
def required_properties(self):
"""
Gets the required_properties of this LogAnalyticsAssociationParameter.
A list of requried properties.
:return: The required_properties of this LogAnalyticsAssociationParameter.
:rtype: list[str]
"""
return self._required_properties
@required_properties.setter
def required_properties(self, required_properties):
"""
Sets the required_properties of this LogAnalyticsAssociationParameter.
A list of requried properties.
:param required_properties: The required_properties of this LogAnalyticsAssociationParameter.
:type: list[str]
"""
self._required_properties = required_properties
def __repr__(self):
return formatted_flat_dict(self)
def __eq__(self, other):
if other is None:
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
|
# -*- coding: utf-8 -*-
"""
Test helpers to create and manage fixtures for our Superset app
"""
import json
import uuid
import superset
from superset.connectors.sqla.models import SqlaTable
from superset.models.core import Dashboard
from superset.models.slice import Slice
# Inspired by:
# https://github.com/apache/incubator-superset/blob/0.27/tests/import_export_tests.py
# ... but sadly these aren't packaged for our re-use.
# Also they're weird in places.
def create_table(name=None, database_name='main', schema='', tags=None):
"""Create a new test table (by default in the 'main' db)"""
if name is None:
name = "table-%s" % uuid.uuid4()
if tags is None:
tags = ['test']
table = SqlaTable(
table_name=name,
#
schema=schema,
params=json.dumps(dict(
tags=tags,
database_name=database_name,
)),
)
# Return imported obj
return superset.db.session.query(SqlaTable).filter_by(
id=SqlaTable.import_obj(table)).first()
def new_slice(name=None, table=None, tags=None):
"""Create a new test slice (and test table if none specified)"""
if name is None:
name = "slice-%s" % uuid.uuid4()
if table is None:
table = create_table(tags=tags)
if tags is None:
tags = ['test']
slyce = Slice(
slice_name=name,
datasource_type='table',
datasource_name=table.datasource_name,
viz_type='bubble',
params=json.dumps(dict(
tags=tags,
database_name=table.database_name,
datasource_name=table.datasource_name,
schema=table.schema,
metrics=[],
)),
)
# NOTE that we don't actually import the slice here - it needs to
# be attached to a dashboard for that to make sense
return slyce
def create_dashboard(title=None, slices=None, tags=None):
"""Create a new test dashboard (and slice and table if needed)"""
if tags is None:
tags = ['test']
if title is None:
title = "dashboard-%s" % uuid.uuid4()
if slices is None:
slices = [new_slice(tags=tags)]
dashboard = Dashboard(
dashboard_title=title,
slug=title.lower(),
slices=slices,
position_json=json.dumps(dict(size_y=2, size_x=2)),
json_metadata=json.dumps(dict(tags=tags)),
)
# Return imported obj
dashboard = superset.db.session.query(Dashboard).filter_by(
id=Dashboard.import_obj(dashboard)).first()
dashboard.published = True
superset.db.session.merge(dashboard)
superset.db.session.commit()
return dashboard
def cleanup_data(tags=None):
"""Cleanup Dashboard, Slice, and SqlaTable objects."""
cleanup_objs(Dashboard, tags)
cleanup_objs(Slice, tags)
cleanup_objs(SqlaTable, tags)
def cleanup_objs(model, tags=None):
"""
Delete datas that are tagged with certain things
TODO: Think about tracking with a fixture obj instead, cleaner?
"""
if tags is None:
tags = ['test']
tags = set(tags)
for obj in superset.db.session.query(model):
obj_tags = getattr(obj, 'params_dict', {}).get('tags', [])
if tags.isdisjoint(obj_tags):
continue
superset.db.session.delete(obj)
superset.db.session.commit()
def grant_db_access_to_role(role, db): # pylint: disable=invalid-name
"""Grant the role 'database_name', returns grant permission."""
return grant_obj_permission_to_role(role, db, 'database_access')
def grant_slice_access_to_role(role, slyce):
"""Grant the role 'datasource_access', returns grant permission."""
return grant_obj_permission_to_role(role, slyce, 'datasource_access')
def grant_obj_permission_to_role(role, obj, permission):
"""
Does the extremely confusing steps required to add permission for an object
to a role.
"""
superset.security_manager.add_permission_view_menu(permission, obj.perm)
superset.security_manager.get_session.commit()
# DRAGONS: This is super-confusing naming, having permission to access
# the "view menu" for an obj name === view access to that obj
grant = superset.security_manager.find_permission_view_menu(
view_menu_name=obj.perm,
permission_name=permission,
)
role.permissions.append(grant)
superset.security_manager.get_session.commit()
return grant
def grant_api_permission_to_role(role, api_clazz, api_method):
"""
Does the extremely confusing steps required to add api view permission to
a role.
"""
grant = superset.security_manager.find_permission_view_menu(
view_menu_name=api_clazz.__name__,
permission_name="can_%s" % api_method,
)
role.permissions.append(grant)
superset.security_manager.get_session.commit()
def add_owner_to_dashboard(dashboard, user):
"""Add's user as an owner to the dashboard object."""
dashboard.owners.append(user)
superset.db.session.merge(dashboard)
superset.db.session.commit()
|
"""
A simple python package for scraping and downloading images from Google
Usage:
$ noicesoup.py [-h] -k KEYWORD [-cd CHROMEDRIVER]
NOTE: Default webdriver is Chrome in relative path "chromedriver"
Images will be saved in "downloads/<keyword>"
This package is currently under development...
"""
import threading
import time
import urllib.request
import os
import argparse
from time import sleep
from selenium import webdriver
from bs4 import BeautifulSoup
from pathlib import Path
def get_driver():
path = 'chromedriver'
driver = webdriver.Chrome(executable_path=path)
driver.get(f'https://www.google.com/search?q={keyword}&tbm=isch')
for i in range(0, 7):
driver.execute_script('window.scrollBy(0,document.body.scrollHeight)')
try:
# for clicking show more results button
driver.find_element(
'//*[@id="islmp"]/div/div/div/div/div[2]/div[2]/input').click()
except Exception:
pass
time.sleep(3)
return driver
def download_images(driver):
soup = BeautifulSoup(driver.page_source, 'html.parser')
img_tags = soup.find_all('img', class_='rg_i')
length = len(img_tags)
# get pics and download
for i, v in enumerate(img_tags):
try:
loading_bar(i + 1, length)
urllib.request.urlretrieve(
v['src'], f"{downloads_path}/{keyword}/{str(i + 1)}.jpg")
except Exception:
pass
print()
def loading_bar(n, l):
print("\rDownloading : {} ({:.2f}%)".format(
"█" * round(n / l * 100 / 2), n / l * 100), end="")
def loading_spinner():
msg = "⠋⠙⠹⠸⠼⠴⠦⠧⠇⠏"
len_msg = len(msg)
counter = 0
while True:
displayed = ""
displayed += msg[(counter + 1) % len_msg]
print(f"\r{displayed} Loading {keyword=}", end="")
sleep(0.05)
counter = (counter + 1) % len_msg
if stop_thread:
break
def create_dir():
try:
os.makedirs(f'{downloads_path}/{keyword}')
except Exception:
pass
def main():
global keyword
global driver_path
global downloads_path
global stop_thread
downloads_path = os.path.join(
str(Path.home()), 'Downloads', 'noicesoup_dl')
parser = argparse.ArgumentParser(
description='A simple python package for scraping and downloading images from Google')
parser.add_argument('-k', '--keyword',
help='Input search keyword', required=True)
parser.add_argument('-cd', '--chromedriver',
help='Input ChromeDriver path', default="chromedriver")
args = parser.parse_args()
keyword = args.keyword
driver_path = args.chromedriver
stop_thread = False
thr = threading.Thread(target=loading_spinner)
thr.start()
create_dir()
driver = get_driver()
stop_thread = True
print('\r'+'=' * os.get_terminal_size().columns)
download_images(driver)
print('=' * os.get_terminal_size().columns)
print('Done!')
if "__main__" == __name__:
main()
|
import os
from feedhq.settings import parse_email_url, parse_redis_url
from . import TestCase
class SettingsTests(TestCase):
def test_redis_url(self):
os.environ['REDIS_URL'] = 'redis://:password@domain:12/44'
self.assertEqual(parse_redis_url(), ({
'host': 'domain',
'port': 12,
'password': 'password',
'db': 44,
}, False))
os.environ['REDIS_URL'] = 'redis://domain:6379/44?eager=True'
self.assertEqual(parse_redis_url(), ({
'host': 'domain',
'port': 6379,
'password': None,
'db': 44,
}, True))
os.environ['REDIS_URL'] = (
'redis://domain:6379/44?eager=True&foo=bar&port=stuff'
)
self.assertEqual(parse_redis_url(), ({
'host': 'domain',
'port': 6379,
'password': None,
'db': 44,
}, True))
os.environ['REDIS_URL'] = (
'redis://unix/some/path/44?eager=True'
)
self.assertEqual(parse_redis_url(), ({
'unix_socket_path': '/some/path',
'password': None,
'db': 44,
}, True))
os.environ['REDIS_URL'] = (
'redis://unix/some/other/path'
)
self.assertEqual(parse_redis_url(), ({
'unix_socket_path': '/some/other/path',
'password': None,
'db': 0,
}, False))
os.environ['REDIS_URL'] = (
'redis://:123456@unix/some/path/10'
)
self.assertEqual(parse_redis_url(), ({
'unix_socket_path': '/some/path',
'password': '123456',
'db': 10,
}, False))
def test_email_url(self):
os.environ['EMAIL_URL'] = (
'smtp://bruno:[email protected]:587'
'?use_tls=True&backend=custom.backend.EmailBackend'
)
self.assertEqual(parse_email_url(), {
'BACKEND': 'custom.backend.EmailBackend',
'HOST': 'example.com',
'PORT': 587,
'USE_TLS': True,
'USER': 'bruno',
'PASSWORD': 'test1234',
'SUBJECT_PREFIX': '[FeedHQ] ',
})
|
from django_filters.rest_framework import DjangoFilterBackend
from django.shortcuts import get_object_or_404
from rest_framework.permissions import IsAuthenticated
from rest_framework.response import Response
from rest_framework import viewsets
from rest_framework.filters import SearchFilter
from rest_framework.decorators import action
from business_register.filters import PepFilterSet
from business_register.models.pep_models import Pep
from business_register.permissions import PepSchemaToken
from business_register.serializers.company_and_pep_serializers import PepListSerializer, PepDetailSerializer
from data_ocean.views import CachedViewMixin
class PepViewSet(CachedViewMixin, viewsets.ReadOnlyModelViewSet):
permission_classes = [IsAuthenticated | PepSchemaToken]
queryset = Pep.objects.all()
serializer_class = PepListSerializer
filter_backends = (DjangoFilterBackend, SearchFilter)
filterset_class = PepFilterSet
search_fields = (
'fullname', 'fullname_transcriptions_eng', 'pep_type',
'last_job_title', 'last_employer',
)
def get_serializer_class(self):
if self.action == 'retrieve':
return PepDetailSerializer
return super().get_serializer_class()
@action(methods=['get'], detail=True, url_path='source-id', serializer_class=PepDetailSerializer)
def retrieve_by_source_id(self, request, pk):
pep = get_object_or_404(self.get_queryset(), source_id=pk)
serializer = self.get_serializer(pep)
return Response(serializer.data)
|
def eval_base( jct ):
jct['base'] = 1
if jct['base'] != 1:
raise Exception(' base is not == 1 :', jct['base'])
jct['base2']['sub'] = 1
if jct['base2']['sub'] != 1:
raise Exception(' base2.sub is not == 1 :', jct['base2']['sub'])
jct.init('base', 5 )
if jct['base'] != 1:
raise Exception(' init not set properly :', jct['base'])
jct.init('base3', 5 )
if jct['base3'] != 5:
raise Exception(' init not set properly :', jct['base3'])
# ---- test iad
# default dict
jct['RELES'] = [
{'id':33,'status':False },
{'id':37,'status':False }
]
jct['RELES'] += [
{ 'id':33,'status': False },
{ 'id':37,'status': False }
]
if len(jct['RELES']) != 4:
raise Exception('add not done correctly :', jct['RELES'])
jct.init('to_follow',[])
now = len(jct['to_follow'])
jct['to_follow'] += ['test']
add = len(jct['to_follow'])
if (add - 1) != now:
raise Exception('add not done correctly :', jct['to_follow'])
# jct = jict({"1":{"name":"bob","age":"20","work":"Assistant"}})
# jct2 = jict({"2":{"name":"James","age":"36","work":"Dev"}})
# jct.replace({"name":"bob","age":"25","work":"Dev"})
# jct2.replace({"name": "James", "age": "40", "work": "Assistant"})
|
#!/usr/bin/env python
# ------------------------------------------------------------------------------
# File: sum.py
# Author: Chase Ruskin
# Abstract:
# Compute the checksum for a list of files found from glob matching a pattern.
# The output will resemble the following:
# '''
# 8852e7f180e9bc0821b0136a859617a9588bd636fdac9612c066550203f1e8c9 lib.rs
# 67cf113292aedfdb788e63da973c5de0d2ae4dc1c649cb3718dddbb9f6a5dd7f main.rs
# '''
# Usage:
# python sum.py <pattern>
# Args:
# <pattern> a filepath pattern to collect a common set files
# ------------------------------------------------------------------------------
import hashlib
import unittest
import glob, os, sys
def compute_sha256(data: bytes) -> str:
'''Compute the sha256 using built-in library function.'''
return hashlib.sha256(data).hexdigest()
def main():
if len(sys.argv) != 2:
exit("error: enter a pattern to compute sha256")
pattern = sys.argv[1]
pkgs = glob.glob(pattern)
if len(pkgs) == 0:
exit("error: found zero matches for "+pattern)
for pkg in pkgs:
with open(pkg, 'rb') as f:
body_bytes = f.read()
sum = compute_sha256(body_bytes)
print(sum, os.path.basename(pkg))
pass
if __name__ == "__main__":
main()
class Test(unittest.TestCase):
def test_sha256(self):
# note: these test cases align with ones found in the rust implementation to ensure compatibility and correctness
self.assertEqual(compute_sha256(b'hello world'), \
'b94d27b9934d3e08a52e52d7da7dabfac484efe37a5380ee9088f7ace2efcde9')
text = """\
Tyger Tyger, burning bright,
In the forests of the night;
What immortal hand or eye,
Could frame thy fearful symmetry?
In what distant deeps or skies.
Burnt the fire of thine eyes?
On what wings dare he aspire?
What the hand, dare seize the fire?
And what shoulder, & what art,
Could twist the sinews of thy heart?
And when thy heart began to beat,
What dread hand? & what dread feet?
What the hammer? what the chain,
In what furnace was thy brain?
What the anvil? what dread grasp,
Dare its deadly terrors clasp!
When the stars threw down their spears
And water'd heaven with their tears:
Did he smile his work to see?
Did he who made the Lamb make thee?
Tyger Tyger burning bright,
In the forests of the night:
What immortal hand or eye,
Dare frame thy fearful symmetry?"""
self.assertEqual(compute_sha256(bytes(text.encode())), \
'0d732bb7f24e68fb3858646ba33bc9ce3240def191cde285a3f03ad1f763f52d')
pass
pass |
from __future__ import unicode_literals
import uuid
from allauth.socialaccount import providers
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
from django.utils.translation import ugettext_lazy as _
from helusers.models import AbstractUser
from oauth2_provider.models import AbstractApplication
from oidc_provider.models import Client
class User(AbstractUser):
primary_sid = models.CharField(max_length=100, unique=True)
def save(self, *args, **kwargs):
if not self.primary_sid:
self.primary_sid = uuid.uuid4()
return super(User, self).save(*args, **kwargs)
def get_login_methods():
yield ('saml', 'SAML')
provider_list = providers.registry.get_list()
for provider in provider_list:
yield (provider.id, provider.name)
@python_2_unicode_compatible
class LoginMethod(models.Model):
provider_id = models.CharField(
max_length=50, unique=True,
choices=sorted(providers.registry.as_choices()))
name = models.CharField(max_length=100)
background_color = models.CharField(max_length=50, null=True, blank=True)
logo_url = models.URLField(null=True, blank=True)
short_description = models.TextField(null=True, blank=True)
order = models.PositiveIntegerField(null=True)
def __str__(self):
return "{} ({})".format(self.name, self.provider_id)
class Meta:
ordering = ('order',)
class OptionsBase(models.Model):
SITE_TYPES = (
('dev', 'Development'),
('test', 'Testing'),
('production', 'Production')
)
site_type = models.CharField(max_length=20, choices=SITE_TYPES, null=True,
verbose_name='Site type')
login_methods = models.ManyToManyField(LoginMethod)
include_ad_groups = models.BooleanField(default=False)
class Meta:
abstract = True
class Application(OptionsBase, AbstractApplication):
class Meta:
ordering = ('site_type', 'name')
class OidcClientOptions(OptionsBase):
oidc_client = models.OneToOneField(Client, related_name='+', on_delete=models.CASCADE,
verbose_name=_("OIDC Client"))
def __str__(self):
return 'Options for OIDC Client "{}"'.format(self.oidc_client.name)
class Meta:
verbose_name = _("OIDC Client Options")
verbose_name_plural = _("OIDC Client Options")
|
# import packages
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestClassifier
from sklearn.preprocessing import MinMaxScaler, LabelEncoder
from sklearn.metrics import confusion_matrix, average_precision_score, precision_score, recall_score, roc_auc_score
import xgboost as xgb
from sklearn.decomposition import PCA
from pre_ml_process import pre_ml_process
from plot_confusion_matrix import plot_confusion_matrix
import pickle
# model location
model_loc = input("Please input the trained model dir:")
# model_loc = "xgb_model.p"
# Import trained model
clf = pickle.load(open(model_loc, "rb"))
# ../dataset.csv
# Import Data
df_raw_loc = input("Please input the testing data dir:")
df_raw = pd.read_csv(df_raw_loc, encoding="cp1252")
# Data cleaning
df = df_raw.dropna()
df = df.loc[df["f7"] != "#"]
df["f7"] = df["f7"].astype(float)
# f9 - remove the unknown record and binary encode the remaining two classes
df = df.loc[df["f9"] != "unknown"]
le_f9 = LabelEncoder()
df["f9"] = le_f9.fit_transform(df["f9"])
# isolate the numerical columns
numerical_cols = df.dtypes[df.dtypes != object].index.tolist()
df_num = df[numerical_cols]
# drop employee id primary key
df_num = df_num.drop("employee_id", axis=1)
# label encode string columns
def fit_label_encoders(df_in):
fitted_label_encoders = {}
for col in df_in.dtypes[df_in.dtypes == object].index.tolist():
fitted_label_encoders[col] = LabelEncoder().fit(df_in[col])
return fitted_label_encoders
fitted_label_encoders = fit_label_encoders(df.drop("employee_id", axis=1))
# concat the label encoded dataframe with the baseline dataframe
def add_label_encoded(df_baseline, df_to_le, cols, fitted_label_encoders):
df_out = df_baseline.copy()
for col in cols:
df_le = fitted_label_encoders[col].transform(df_to_le[col])
df_out[col] = df_le
return df_out
df_num_allLE = add_label_encoded(df_num, df, ["f1", "f2", "f3", "f4", "f10", "f12"], fitted_label_encoders)
# Separate X and y
y_col = "has_left"
y = df_num_allLE[y_col]
X = df_num_allLE.drop(y_col, axis=1)
X = X.astype(float)
# Scale predictors
scaler = pickle.load(open("scaler.p", "rb"))
X_scaled = scaler.transform(X)
# Get predictions
y_hat = clf.predict(X_scaled)
y_proba = clf.predict_proba(X_scaled)[:,1]
# Confusion Matrix
df_cm = confusion_matrix(y, y_hat, labels=[1, 0])
plot_confusion_matrix(df_cm,
target_names=[1, 0],
title="%s Confusion Matrix" % (type(clf).__name__),
normalize=True)
# accuracy metrics
ap = average_precision_score(y, y_proba)
ps = precision_score(y, y_hat)
rs = recall_score(y, y_hat)
roc = roc_auc_score(y, y_hat)
print("average_precision_score = {:.3f}".format(ap))
print("precision_score = {:.3f}".format(ps))
print("recall_score = {:.3f}".format(rs))
print("roc_auc_score = {:.3f}".format(roc))
# Feature Importances
df_feature_importances = pd.DataFrame(clf.feature_importances_, columns=["Importance"])
col_names = df_num_allLE.columns.tolist()
col_names.remove("has_left")
df_feature_importances["Feature"] = col_names
df_feature_importances.sort_values("Importance", ascending=False, inplace=True)
df_feature_importances = df_feature_importances.round(4)
df_feature_importances = df_feature_importances.reset_index(drop=True)
print(df_feature_importances)
# concat test data with predictions
df_in_with_predictions = pd.concat([df_num_allLE, pd.Series(y_hat, name="y_hat"), pd.Series(y_proba, name="y_hat_probability")], axis=1)
# Export predictions
df_in_with_predictions.to_csv("predictions_export.csv", index=False)
|
from pymongo import MongoClient
import pandas as pd
import csv
import json
MONGO_HOST= 'mongodb://localhost/mhaf_iot'
dbs=MongoClient().database_names()
dbs
print(dbs)
connection = MongoClient('localhost',27017)
#Generate Devices CSV
collectionname ='devices'
collection=connection['mhaf_iot'][collectionname]
ma3loumet=collection.find()
d=[]
for i in ma3loumet:
d.append([i['_id'],i['name'],i['longitude'],i['latitude'],i['description'],i['templates_id'],i['status'],i['last_time']])
data=pd.DataFrame(data=d,columns=['devices_id','devices_name','devices_longitude','devices_latitude','devices_description','devices_templates_id','devices_status','devices_last_time'])
print(data.tail())
data.to_csv("Devices.csv")
#Generate DataType CSV
collectionname ='datatypes'
collection=connection['mhaf_iot'][collectionname]
ma3loumet=collection.find()
d=[]
for i in ma3loumet:
d.append([i['_id'],i['data_type_name'],i['data_type_unit'],i['data_type_type']])
data=pd.DataFrame(data=d,columns=['datatypes_id','data_type_name','data_type_unit','data_type_type'])
print(data.tail())
data.to_csv("Datatypes.csv")
#Generate Template CSV
collectionname ='templates'
collection=connection['mhaf_iot'][collectionname]
ma3loumet=collection.find()
d=[]
for i in ma3loumet:
d.append([i['_id'],i['name'],i['location'],i['timezone'],['description'],['noOfPackets'],['timePeriod'],['dataSource'],['login'],['pwd'],['MQTTTopic'],['unity'],['datagroups']])
data=pd.DataFrame(data=d,columns=['templates_id','templates_name','templates_location','templates_timezone','templates_description',
'templates_noOfPackets','templates_timePeriod','templates_dataSource'
,'templates_login','templates_pwd','templates_MQTTTopic','templates_unity','templates_datagroups'])
print(data.tail())
data.to_csv("Templates.csv")
#Generate DataGroups CSV
collectionname ='data_groups'
collection=connection['mhaf_iot'][collectionname]
ma3loumet=collection.find()
d=[]
for i in ma3loumet:
d.append([i['_id'],i['name'],i['description'],i['accuracy'],['time_period'],['time_unit'],['action'],['datatypes']])
data=pd.DataFrame(data=d,columns=['DataGroups_id','DataGroups_name','DataGroups_description','DataGroups_accuracy',
'DataGroups_time_period','DataGroups_time_unit','DataGroups_action','DataGroups_datatypes'])
print(data.tail())
data.to_csv("DataGroups.csv")
#All Data
data=pd.read_csv("C:/Users/Mariam/Desktop/4SIM4/2eme Semestre/PIM/PIM/DevicesUpolad.csv",sep=";")
data.head()
MONGO_HOST= 'mongodb://localhost/mhaf_iot'
MONGO_PORT=27017
client = MongoClient(MONGO_HOST)
db = client.mhaf_iot
test=json.loads(data.T.to_json()).values()
db.devices.insert(test)
#db.insert(data.to_dict('devices'))
|
import numpy
import rospy
import time
import tf
from openai_ros import robot_gazebo_env
from sensor_msgs.msg import Imu
from sensor_msgs.msg import JointState
from moveit_msgs.msg import PlanningScene
from openai_ros.openai_ros_common import ROSLauncher
class ShadowTcEnv(robot_gazebo_env.RobotGazeboEnv):
"""Superclass for all ShadowTcEnv environments.
"""
def __init__(self, ros_ws_abspath):
"""
Initializes a new ShadowTcEnv environment.
To check any topic we need to have the simulations running, we need to do two things:
1) Unpause the simulation: without that th stream of data doesnt flow. This is for simulations
that are pause for whatever the reason
2) If the simulation was running already for some reason, we need to reset the controlers.
This has to do with the fact that some plugins with tf, dont understand the reset of the simulation
and need to be reseted to work properly.
The Sensors: The sensors accesible are the ones considered usefull for AI learning.
Sensor Topic List:
* /imu/data
* /joint_states
Actuators Topic List:
* As actuator we will use a class SmartGrasper to interface.
We use smart_grasping_sandbox smart_grasper.py, to move and get the pose
of the ball and the tool tip.
Args:
"""
rospy.logdebug("Start ShadowTcEnv INIT...")
# Variables that we give through the constructor.
# None in this case
# We launch the ROSlaunch that spawns the robot into the world
ROSLauncher(rospackage_name="shadow_gazebo",
launch_file_name="put_shadow_in_world.launch",
ros_ws_abspath=ros_ws_abspath)
# Internal Vars
# Doesnt have any accesibles
self.controllers_list = []
# It doesnt use namespace
self.robot_name_space = ""
# We launch the init function of the Parent Class robot_gazebo_env.RobotGazeboEnv
super(ShadowTcEnv, self).__init__(controllers_list=self.controllers_list,
robot_name_space=self.robot_name_space,
reset_controls=False,
start_init_physics_parameters=False,
reset_world_or_sim="NO_RESET_SIM")
rospy.logdebug("ShadowTcEnv unpause...")
self.gazebo.unpauseSim()
#self.controllers_object.reset_controllers()
self._check_all_systems_ready()
rospy.Subscriber("/imu/data", Imu, self._imu_callback)
rospy.Subscriber("/joint_states", JointState, self._joints_state_callback)
#rospy.Subscriber('/planning_scene', PlanningScene, self._planning_scene_callback)
self._setup_smart_grasper()
self.gazebo.pauseSim()
rospy.logdebug("Finished ShadowTcEnv INIT...")
# Methods needed by the RobotGazeboEnv
# ----------------------------
def _check_all_systems_ready(self):
"""
Checks that all the sensors, publishers and other simulation systems are
operational.
"""
rospy.logdebug("ShadowTcEnv check_all_systems_ready...")
self._check_all_sensors_ready()
rospy.logdebug("END ShadowTcEnv _check_all_systems_ready...")
return True
# CubeSingleDiskEnv virtual methods
# ----------------------------
def _check_all_sensors_ready(self):
rospy.logdebug("START ALL SENSORS READY")
self._check_imu_ready()
self._check_joint_states_ready()
#self._check_planning_scene_ready()
rospy.logdebug("ALL SENSORS READY")
def _check_imu_ready(self):
self.imu = None
rospy.logdebug("Waiting for /imu/data to be READY...")
while self.imu is None and not rospy.is_shutdown():
try:
self.imu = rospy.wait_for_message("/imu/data", Imu, timeout=5.0)
rospy.logdebug("Current/imu/data READY=>")
except:
rospy.logerr("Current /imu/data not ready yet, retrying for getting imu")
return self.imu
def _check_joint_states_ready(self):
self.joint_states = None
rospy.logdebug("Waiting for /joint_states to be READY...")
while self.joint_states is None and not rospy.is_shutdown():
try:
self.joint_states = rospy.wait_for_message("/joint_states", JointState, timeout=1.0)
rospy.logdebug("Current /joint_states READY=>")
except:
rospy.logerr("Current /joint_states not ready yet, retrying for getting joint_states")
return self.joint_states
def _check_planning_scene_ready(self):
self.planning_scene = None
rospy.logdebug("Waiting for /planning_scene to be READY...")
while self.planning_scene is None and not rospy.is_shutdown():
try:
self.planning_scene = rospy.wait_for_message('/planning_scene', PlanningScene, timeout=1.0)
rospy.logdebug("Current /planning_scene READY=>")
except:
rospy.logerr("Current /planning_scene not ready yet, retrying for getting planning_scene")
return self.planning_scene
def _imu_callback(self, data):
self.imu = data
def _joints_state_callback(self, data):
self.joint_states = data
def _planning_scene_callback(self, data):
self.planning_scene = data
def _setup_tf_listener(self):
"""
Set ups the TF listener for getting the transforms you ask for.
"""
self.listener = tf.TransformListener()
def _setup_smart_grasper(self):
"""
Setup of the movement system.
:return:
"""
rospy.logdebug("START _setup_smart_grasper")
# We need to tell it to not start a node
from smart_grasping_sandbox.smart_grasper import SmartGrasper
self.sgs = SmartGrasper(init_ros_node=False)
rospy.logdebug("END _setup_smart_grasper")
# Methods that the TrainingEnvironment will need to define here as virtual
# because they will be used in RobotGazeboEnv GrandParentClass and defined in the
# TrainingEnvironment.
# ----------------------------
def _set_init_pose(self):
"""Sets the Robot in its init pose
"""
raise NotImplementedError()
def _init_env_variables(self):
"""Inits variables needed to be initialised each time we reset at the start
of an episode.
"""
raise NotImplementedError()
def _compute_reward(self, observations, done):
"""Calculates the reward to give based on the observations given.
"""
raise NotImplementedError()
def _set_action(self, action):
"""Applies the given action to the simulation.
"""
raise NotImplementedError()
def _get_obs(self):
raise NotImplementedError()
def _is_done(self, observations):
"""Checks if episode done based on observations given.
"""
raise NotImplementedError()
# Methods that the TrainingEnvironment will need.
# ----------------------------
def open_hand(self):
"""
When called it opens robots hand
"""
self.sgs.open_hand()
def close_hand(self):
"""
When called it closes robots hand
"""
self.sgs.close_hand()
def get_ball_pose(self):
"""
Get Ball Pose
return: Ball Pose in the World frame
We unpause and pause the simulation because this calss is a service call.
This means that if the simulation is NOT
running it wont get the Ball information of position.
"""
rospy.logdebug("START get_ball_pose ==>")
self.gazebo.unpauseSim()
ball_pose = self.sgs.get_object_pose()
self.gazebo.pauseSim()
rospy.logdebug("ball_pose ==>"+str(ball_pose))
rospy.logdebug("STOP get_ball_pose ==>")
return ball_pose
def get_tip_pose(self):
"""
Returns the pose of the tip of the TCP
We unpause and pause the simulation because this calss is a service call.
This means that if the simulation is NOT
running it wont get the TCP information of position.
"""
rospy.logdebug("START get_tip_pose ==>")
self.gazebo.unpauseSim()
tcp_pose = self.sgs.get_tip_pose()
self.gazebo.pauseSim()
rospy.logdebug("END get_tip_pose ==>")
return tcp_pose
def move_tcp_world_frame(self, desired_pose):
"""
Moves the Tool tip TCP to the pose given
Its relative pose to world frame
:param: desired_pose: Pose where you want the TCP to move next
"""
self.sgs.move_tip_absolute(desired_pose)
def move_tip(self, x=0., y=0., z=0., roll=0., pitch=0., yaw=0.):
"""
Moves that increment of XYZ RPY in the world frame
Only state the increment of the variable you want, the rest will
not increment due to the default values
"""
self.sgs.move_tip(x,y,z,roll,pitch,yaw)
def send_movement_command(self, command, duration=0.2):
"""
Send a dictionnary of joint targets to the arm and hand directly.
To get the available joints names: rostopic echo /joint_states/name -n1
[H1_F1J1, H1_F1J2, H1_F1J3, H1_F2J1, H1_F2J2, H1_F2J3, H1_F3J1, H1_F3J2, H1_F3J3,
elbow_joint, shoulder_lift_joint, shoulder_pan_joint, wrist_1_joint, wrist_2_joint,
wrist_3_joint]
:param command: a dictionnary of joint names associated with a target:
{"H1_F1J1": -1.0, "shoulder_pan_joint": 1.0}
:param duration: the amount of time it will take to get there in seconds. Needs to be bigger than 0.0
"""
self.sgs.send_command(command, duration)
def set_fingers_colision(self, activate=False):
"""
It activates or deactivates the finger collisions.
It also will triguer the publish into the planning_scene the collisions.
We puase and unpause for the smae exact reason as the get TCP and get ball pos.
Being a service, untill the simulation is unpaused it wont get response.
"""
rospy.logdebug("START get_fingers_colision")
self.sgs.check_fingers_collisions(activate)
rospy.logdebug("END get_fingers_colision")
def get_fingers_colision(self, object_collision_name):
"""
Returns the collision of the three fingers
object_collision_name: Here yo ustate the name of the model to check collision
with fingers.
Objects in sim: cricket_ball__link, drill__link
"""
self.gazebo.unpauseSim()
self.set_fingers_colision(True)
planning_scene = self._check_planning_scene_ready()
self.gazebo.pauseSim()
objects_scene = planning_scene.allowed_collision_matrix.entry_names
colissions_matrix = planning_scene.allowed_collision_matrix.entry_values
# We look for the Ball object model name in the objects sceen list and get the index:
object_collision_name_index = objects_scene.index(object_collision_name)
Finger_Links_Names = [ "H1_F1_base_link",
"H1_F1_link_1",
"H1_F1_link_2",
"H1_F1_palm_link",
"H1_F1_tip",
"H1_F2_base_link",
"H1_F2_link_1",
"H1_F2_link_2",
"H1_F2_palm_link",
"H1_F2_tip",
"H1_F3_base_link",
"H1_F3_link_1",
"H1_F3_link_2",
"H1_F3_palm_link",
"H1_F3_tip"]
# We get all the index of the model links that are part of the fingers
# We separate by finguer to afterwards be easy to detect that there is contact in all of the finguers
finger1_indices = [i for i, var in enumerate(Finger_Links_Names) if "H1_F1" in var]
finger2_indices = [i for i, var in enumerate(Finger_Links_Names) if "H1_F2" in var]
finger3_indices = [i for i, var in enumerate(Finger_Links_Names) if "H1_F3" in var]
# Now we search in the entry_value corresponding to the object to check the collision
# With all the rest of objects.
object_collision_array = colissions_matrix[object_collision_name_index].enabled
# Is there a collision with Finguer1
f1_collision = False
for finger_index in finger1_indices:
if object_collision_array[finger_index]:
f1_collision = True
break
# Is there a collision with Finguer2
f2_collision = False
for finger_index in finger2_indices:
if object_collision_array[finger_index]:
f2_collision = True
break
# Is there a collision with Finguer3
f3_collision = False
for finger_index in finger3_indices:
if object_collision_array[finger_index]:
f3_collision = True
break
finger_collision_dict = {
"f1":f1_collision,
"f2":f2_collision,
"f3":f3_collision
}
return finger_collision_dict
def reset_scene(self):
"""
Restarts the simulation and world objects
"""
self.sgs.reset_world()
def get_imu(self):
return self.imu
def get_joint_states(self):
return self.joint_states |
# generated by datamodel-codegen:
# filename: openapi.yaml
# timestamp: 2021-12-31T02:57:52+00:00
from __future__ import annotations
from typing import Annotated, List, Optional
from pydantic import BaseModel
class String(BaseModel):
__root__: str
class Boolean(BaseModel):
__root__: bool
class ReplaceableAttribute(BaseModel):
Name: String
Value: String
Replace: Optional[Boolean] = None
class Attribute(BaseModel):
Name: String
AlternateNameEncoding: Optional[String] = None
Value: String
AlternateValueEncoding: Optional[String] = None
class Float(BaseModel):
__root__: float
class AttributeList(BaseModel):
__root__: List[Attribute]
class AttributeNameList(BaseModel):
__root__: List[String]
class CreateDomainRequest(BaseModel):
DomainName: String
class UpdateCondition(BaseModel):
"""
Specifies the conditions under which data should be updated. If an update condition is specified for a request, the data will only be updated if the condition is satisfied. For example, if an attribute with a specific name and value exists, or if a specific attribute doesn't exist.
"""
Name: Optional[String] = None
Value: Optional[String] = None
Exists: Optional[Boolean] = None
class DeleteDomainRequest(BaseModel):
DomainName: String
class DomainMetadataRequest(BaseModel):
DomainName: String
class Integer(BaseModel):
__root__: int
class Long(Integer):
pass
class DomainNameList(AttributeNameList):
pass
class GetAttributesRequest(BaseModel):
DomainName: String
ItemName: String
AttributeNames: Optional[AttributeNameList] = None
ConsistentRead: Optional[Boolean] = None
class Item(BaseModel):
Name: String
AlternateNameEncoding: Optional[String] = None
Attributes: AttributeList
class ItemList(BaseModel):
__root__: List[Item]
class ListDomainsRequest(BaseModel):
MaxNumberOfDomains: Optional[Integer] = None
NextToken: Optional[String] = None
class ReplaceableAttributeList(BaseModel):
__root__: List[ReplaceableAttribute]
class PutAttributesRequest(BaseModel):
DomainName: String
ItemName: String
Attributes: ReplaceableAttributeList
Expected: Optional[UpdateCondition] = None
class SelectRequest(BaseModel):
SelectExpression: String
NextToken: Optional[String] = None
ConsistentRead: Optional[Boolean] = None
class ReplaceableItem(BaseModel):
Name: String
Attributes: ReplaceableAttributeList
class DuplicateItemName(BaseModel):
"""
The item name was specified more than once.
"""
BoxUsage: Optional[Float] = None
class InvalidParameterValue(DuplicateItemName):
"""
The value for a parameter is invalid.
"""
pass
class MissingParameter(DuplicateItemName):
"""
The request must contain the specified missing parameter.
"""
pass
class NoSuchDomain(DuplicateItemName):
"""
The specified domain does not exist.
"""
pass
class NumberItemAttributesExceeded(DuplicateItemName):
"""
Too many attributes in this item.
"""
pass
class NumberDomainAttributesExceeded(DuplicateItemName):
"""
Too many attributes in this domain.
"""
pass
class NumberDomainBytesExceeded(DuplicateItemName):
"""
Too many bytes in this domain.
"""
pass
class NumberSubmittedItemsExceeded(DuplicateItemName):
"""
Too many items exist in a single call.
"""
pass
class NumberSubmittedAttributesExceeded(DuplicateItemName):
"""
Too many attributes exist in a single call.
"""
pass
class NumberDomainsExceeded(DuplicateItemName):
"""
Too many domains exist per this account.
"""
pass
class DeletableAttribute(BaseModel):
Name: String
Value: Optional[String] = None
class AttributeDoesNotExist(DuplicateItemName):
"""
The specified attribute does not exist.
"""
pass
class DomainMetadataResult(BaseModel):
ItemCount: Optional[Integer] = None
ItemNamesSizeBytes: Optional[Long] = None
AttributeNameCount: Optional[Integer] = None
AttributeNamesSizeBytes: Optional[Long] = None
AttributeValueCount: Optional[Integer] = None
AttributeValuesSizeBytes: Optional[Long] = None
Timestamp: Optional[Integer] = None
class GetAttributesResult(BaseModel):
Attributes: Optional[AttributeList] = None
class ListDomainsResult(BaseModel):
DomainNames: Optional[DomainNameList] = None
NextToken: Optional[String] = None
class InvalidNextToken(DuplicateItemName):
"""
The specified NextToken is not valid.
"""
pass
class SelectResult(BaseModel):
Items: Optional[ItemList] = None
NextToken: Optional[String] = None
class InvalidNumberPredicates(DuplicateItemName):
"""
Too many predicates exist in the query expression.
"""
pass
class InvalidNumberValueTests(DuplicateItemName):
"""
Too many predicates exist in the query expression.
"""
pass
class InvalidQueryExpression(DuplicateItemName):
"""
The specified query expression syntax is not valid.
"""
pass
class RequestTimeout(DuplicateItemName):
"""
A timeout occurred when attempting to query the specified domain with specified query expression.
"""
pass
class TooManyRequestedAttributes(DuplicateItemName):
"""
Too many attributes requested.
"""
pass
class ReplaceableItemList(BaseModel):
__root__: List[ReplaceableItem]
class BatchPutAttributesRequest(BaseModel):
DomainName: String
Items: ReplaceableItemList
class DeletableAttributeList(BaseModel):
__root__: List[DeletableAttribute]
class DeleteAttributesRequest(BaseModel):
DomainName: String
ItemName: String
Attributes: Optional[DeletableAttributeList] = None
Expected: Optional[UpdateCondition] = None
class DeletableItem(BaseModel):
Name: String
Attributes: Optional[DeletableAttributeList] = None
class DeletableItemList(BaseModel):
__root__: List[DeletableItem]
class BatchDeleteAttributesRequest(BaseModel):
DomainName: String
Items: DeletableItemList
|
"""
<Program Name>
conflict/__init__.py
<Purpose>
Module that is used with ut_seash_moduleconflicterror.py. It should not be
enabled successfully, as it will conflict with the default 'show' command.
Other fields are simply defined so that the module importer can import the
module without problems.
"""
moduledata = {
'help_text': '',
'url': None,
'command_dict': {
'show': { 'name':'nonexistant', 'help_text': 'bad help text',
'callback': None, 'children': {}},
}
} |
# Copyright (C) 2018-2021 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
from extensions.ops.spatial_transformer import SpatialTransformOp
from mo.front.caffe.collect_attributes import merge_attrs
from mo.front.extractor import FrontExtractorOp
class SpatialTransformFrontExtractor(FrontExtractorOp):
op = 'SpatialTransformer'
enabled = True
@classmethod
def extract(cls, node):
proto_layer = node.pb
param = proto_layer.st_param
update_attrs = {
'transform_type': param.transform_type,
'sampler_type': param.sampler_type,
'output_H': param.output_H,
'output_W': param.output_W,
'to_compute_dU': int(param.to_compute_dU),
'theta_1_1': param.theta_1_1,
'theta_1_2': param.theta_1_2,
'theta_1_3': param.theta_1_3,
'theta_2_1': param.theta_2_1,
'theta_2_2': param.theta_2_2,
'theta_2_3': param.theta_2_3
}
mapping_rule = merge_attrs(param, update_attrs)
# update the attributes of the node
SpatialTransformOp.update_node_stat(node, mapping_rule)
return cls.enabled
|
from pathlib import Path
import numpy as np
import pytest
from espnet2.text.token_id_converter import TokenIDConverter
def test_tokens2ids():
converter = TokenIDConverter(["a", "b", "c", "<unk>"])
assert converter.tokens2ids("abc") == [0, 1, 2]
def test_idstokens():
converter = TokenIDConverter(["a", "b", "c", "<unk>"])
assert converter.ids2tokens([0, 1, 2]) == ["a", "b", "c"]
def test_get_num_vocabulary_size():
converter = TokenIDConverter(["a", "b", "c", "<unk>"])
assert converter.get_num_vocabulary_size() == 4
def test_from_file(tmp_path: Path):
with (tmp_path / "tokens.txt").open("w") as f:
f.write("a\n")
f.write("b\n")
f.write("c\n")
f.write("<unk>\n")
converter = TokenIDConverter(tmp_path / "tokens.txt")
assert converter.tokens2ids("abc") == [0, 1, 2]
def test_duplicated():
with pytest.raises(RuntimeError):
TokenIDConverter(["a", "a", "c"])
def test_no_unk():
with pytest.raises(RuntimeError):
TokenIDConverter(["a", "b", "c"])
def test_input_2dim_array():
converter = TokenIDConverter(["a", "b", "c", "<unk>"])
with pytest.raises(ValueError):
converter.ids2tokens(np.random.randn(2, 2))
|
from __future__ import unicode_literals, absolute_import
import os
import sys
import unittest
try:
from StringIO import StringIO
except Exception as e:
from io import StringIO
from greb import meaning as greb
from . import data
class TestGreb(unittest.TestCase):
def test_read_page(self):
for each in data.READ_PAGE_DATA:
url = each.get('url')
expected_status_code = each.get('status_code')
response, status_code = greb.read_page(url)
self.assertEqual(status_code, expected_status_code)
def test_find_meaning(self):
for each_word, each_word_meaning in data.INPUT_WORDS['MEANING'].items():
tree, status_code = greb.make_parse_tree(data.COMMON['URLS']['base'].format(word=each_word))
self.assertEqual(status_code, data.COMMON['STATUS_CODE']['ok'])
meaning = greb.find_meaning(tree)
self.assertEqual(meaning, each_word_meaning)
def test_find_sentences(self):
for each_word, each_word_sentence in data.INPUT_WORDS['SENTENCE'].items():
tree, status_code = greb.make_parse_tree(data.COMMON['URLS']['base'].format(word=each_word))
self.assertEqual(status_code, data.COMMON['STATUS_CODE']['ok'])
sentence = greb.find_sentences(tree, each_word)
self.assertEqual(sentence, each_word_sentence)
def test_find_synonyms(self):
for each_word, each_word_synonym in data.INPUT_WORDS['SYNONYM'].items():
tree, status_code = greb.make_parse_tree(data.COMMON['URLS']['base'].format(word=each_word))
self.assertEqual(status_code, data.COMMON['STATUS_CODE']['ok'])
synonym = greb.find_synonyms(tree)
self.assertEqual(synonym, each_word_synonym)
def test_find_antonyms(self):
for each_word, each_word_antonym in data.INPUT_WORDS['ANTONYM'].items():
tree, status_code = greb.make_parse_tree(data.COMMON['URLS']['base'].format(word=each_word))
self.assertEqual(status_code, data.COMMON['STATUS_CODE']['ok'])
antonym = greb.find_antonyms(tree)
self.assertEqual(antonym, each_word_antonym)
def test_find_trending_words(self):
tree, status_code = greb.make_parse_tree(data.COMMON['URLS']['home'])
self.assertEqual(status_code, data.COMMON['STATUS_CODE']['ok'])
trending_words = greb.find_trending_words(tree)
self.assertEqual(len(trending_words), 5)
def test_find_trending_word_with_exception(self):
for each_word, each_word_dict in data.EXCEPTION_THROWING_WORDS['trending'].items():
expected_status_code = each_word_dict.get('status_code')
tree, status_code = greb.make_parse_tree(data.COMMON['URLS']['base'].format(word=each_word))
self.assertEqual(status_code, expected_status_code)
trending_words = greb.find_trending_words(tree)
self.assertEqual(trending_words, each_word_dict.get('console_output'))
def test_find_word_of_the_day(self):
tree, status_code = greb.make_parse_tree(data.COMMON['URLS']['home'])
self.assertEqual(status_code, data.COMMON['STATUS_CODE']['ok'])
word_of_day = greb.find_word_of_the_day(tree)
self.assertEqual(len(word_of_day), 1)
def test_find_suggestions(self):
for each_word, each_word_dict in data.MISSPELLED_WORDS.items():
expected_status_code = each_word_dict.get('status_code')
suggestion_string = each_word_dict.get('suggestion_string')
suggestion_key = each_word_dict.get('suggestion_key')
# expected_suggestions = each_word_dict.get('suggestions')
tree, status_code = greb.make_parse_tree(data.COMMON['URLS']['base'].format(word=each_word))
self.assertEqual(status_code, expected_status_code)
self.assertIn(suggestion_string, tree.get_text())
result = greb.find_suggestions(tree)
self.assertIn(suggestion_key, result)
# suggestions = result.get('suggestion')
# self.assertEqual(suggestions, expected_suggestions)
def test_find_suggestions_with_exception(self):
for each_word, each_word_dict in data.EXCEPTION_THROWING_WORDS['suggestions'].items():
expected_status_code = each_word_dict.get('status_code')
tree, status_code = greb.make_parse_tree(data.COMMON['URLS']['base'].format(word=each_word))
self.assertEqual(status_code, expected_status_code)
suggestions = greb.find_suggestions(tree)
self.assertEqual(suggestions, each_word_dict.get('console_output'))
def test_print_heading(self):
captured_output = StringIO()
sys.stdout = captured_output
greb.print_heading(data.PRINT_FUNCTION['print_heading']['input'])
sys.stdout = sys.__stdout__
self.assertEqual(captured_output.getvalue(), data.PRINT_FUNCTION['print_heading']['output'])
def test_print_word(self):
captured_output = StringIO()
sys.stdout = captured_output
greb.print_word(data.PRINT_FUNCTION['print_word']['input'])
sys.stdout = sys.__stdout__
self.assertEqual(captured_output.getvalue(), data.PRINT_FUNCTION['print_word']['output'])
def test_print_error_messages(self):
captured_output = StringIO()
sys.stdout = captured_output
greb.print_error_messages(data.PRINT_FUNCTION['print_error_messages']['input'])
sys.stdout = sys.__stdout__
self.assertEqual(captured_output.getvalue(), data.PRINT_FUNCTION['print_error_messages']['output'])
def test_print_result_for_info_msg(self):
result = {}
result['info_msg'] = data.PRINT_FUNCTION['print_result_for_info_msg']['input']
captured_output = StringIO()
sys.stdout = captured_output
greb.print_result(result)
sys.stdout = sys.__stdout__
self.assertEqual(captured_output.getvalue(), data.PRINT_FUNCTION['print_result_for_info_msg']['output'])
def test_print_result_for_error_msg(self):
result = {}
result['erroneous_key'] = data.PRINT_FUNCTION['print_result_for_error_msg']['input']
captured_output = StringIO()
sys.stdout = captured_output
greb.print_result(result)
sys.stdout = sys.__stdout__
self.assertEqual(captured_output.getvalue(), data.PRINT_FUNCTION['print_result_for_error_msg']['output'])
def test_write_meaning_to_file(self):
dummy_meaning_file = os.path.join(os.getcwd(), 'test', 'dummy_meaning.json')
options = {'word': 'awesome', 'meaning': True, 'file_path': dummy_meaning_file}
captured_output = StringIO()
sys.stdout = captured_output
greb.greb(**options)
sys.stdout = sys.__stdout__
os.remove(dummy_meaning_file)
def test_write_and_find_meaning_from_history(self):
dummy_meaning_file = os.path.join(os.getcwd(), 'test', 'dummy_meaning.json')
options = {'word': 'awesome', 'meaning': True, 'file_path': dummy_meaning_file}
captured_output = StringIO()
sys.stdout = captured_output
greb.greb(**options)
sys.stdout = sys.__stdout__
greb.find_meaning_from_history(file_path=dummy_meaning_file)
os.remove(dummy_meaning_file)
def test_greb_for_complete_word(self):
dummy_meaning_file = os.path.join(os.getcwd(), 'test', 'dummy_meaning.json')
options = {
'word': 'contribution',
'meaning': True,
'sentence': True,
'synonym': True,
'antonym': True,
'file_path': dummy_meaning_file
}
captured_output = StringIO()
sys.stdout = captured_output
greb.greb(**options)
sys.stdout = sys.__stdout__
os.remove(dummy_meaning_file)
def test_greb_for_trending_words(self):
options = {'trending_words': True}
captured_output = StringIO()
sys.stdout = captured_output
greb.greb(**options)
sys.stdout = sys.__stdout__
def test_greb_for_word_of_the_day(self):
options = {'word_of_day': True}
captured_output = StringIO()
sys.stdout = captured_output
greb.greb(**options)
sys.stdout = sys.__stdout__
def test_greb_for_word_suggestion(self):
for each_word, each_word_dict in data.MISSPELLED_WORDS.items():
options = {'word': each_word, 'meaning': True}
captured_output = StringIO()
sys.stdout = captured_output
greb.greb(**options)
sys.stdout = sys.__stdout__
def test_greb_for_find_meaning_from_history(self):
options = {'display_terminal': True}
captured_output = StringIO()
sys.stdout = captured_output
greb.greb(**options)
sys.stdout = sys.__stdout__
if __name__ == '__main__':
unittest.main()
|
'''Implementation for journal file reading and writing to serialize
glycopeptide spectrum match information to disk during processing.
'''
import csv
import json
import io
import gzip
from collections import defaultdict
from operator import attrgetter
from six import PY2
import numpy as np
from glycopeptidepy.utils import collectiontools
from ms_deisotope.output import ProcessedMzMLDeserializer
from glycan_profiling.task import TaskBase, TaskExecutionSequence, Empty
from glycan_profiling.structure.structure_loader import (
FragmentCachingGlycopeptide, DecoyFragmentCachingGlycopeptide,
PeptideProteinRelation, LazyGlycopeptide)
from glycan_profiling.structure.scan import ScanInformationLoader
from glycan_profiling.structure.lru import LRUMapping
from glycan_profiling.chromatogram_tree import MassShift, Unmodified
from glycan_profiling.tandem.ref import SpectrumReference
from glycan_profiling.tandem.spectrum_match import (
MultiScoreSpectrumMatch, MultiScoreSpectrumSolutionSet, ScoreSet, FDRSet)
from .search_space import glycopeptide_key_t, StructureClassification
def _gzopen(path, mode='w'):
handle = gzip.open(path, mode=mode.replace('t', 'b'))
if not PY2:
return io.TextIOWrapper(handle, encoding='utf8')
return handle
def _gzwrap(fh, mode='w'):
handle = gzip.GzipFile(fileobj=fh, mode=mode)
if not PY2:
return io.TextIOWrapper(handle, encoding='utf8')
return handle
class JournalFileWriter(TaskBase):
"""A task for writing glycopeptide spectrum matches to a TSV-formatted
journal file. This format is an intermediary result, and will contain many
random or non-useful matches.
"""
def __init__(self, path, include_fdr=False, include_auxiliary=False):
self.path = path
if not hasattr(path, 'write'):
self.handle = _gzopen(path, 'wb')
else:
self.handle = _gzwrap(self.path, 'w')
self.include_fdr = include_fdr
self.include_auxiliary = include_auxiliary
self.writer = csv.writer(self.handle, delimiter='\t')
self.write_header()
self.spectrum_counter = 0
self.solution_counter = 0
def _get_headers(self):
names = [
'scan_id',
'precursor_mass_accuracy',
'peptide_start',
'peptide_end',
'peptide_id',
'protein_id',
'hypothesis_id',
'glycan_combination_id',
'match_type',
'site_combination_index',
'glycopeptide_sequence',
'mass_shift',
'total_score',
'peptide_score',
'glycan_score',
'glycan_coverage',
]
if self.include_fdr:
names.extend([
"peptide_q_value",
"glycan_q_value",
"glycopeptide_q_value",
"total_q_value"
])
if self.include_auxiliary:
names.append("auxiliary")
return names
def write_header(self):
self.writer.writerow(self._get_headers())
def _prepare_fields(self, psm):
error = (psm.target.total_mass - psm.precursor_information.neutral_mass
) / psm.precursor_information.neutral_mass
fields = ([psm.scan_id, error, ] + list(psm.target.id) + [
psm.target,
psm.mass_shift.name,
psm.score,
psm.score_set.peptide_score,
psm.score_set.glycan_score,
psm.score_set.glycan_coverage,
])
if self.include_fdr:
q_value_set = psm.q_value_set
if q_value_set is None:
fdr_fields = [
1, 1, 1, 1
]
else:
fdr_fields = [
q_value_set.peptide_q_value,
q_value_set.glycan_q_value,
q_value_set.glycopeptide_q_value,
q_value_set.total_q_value
]
fields.extend(fdr_fields)
if self.include_auxiliary:
fields.append(json.dumps(psm.get_auxiliary_data(), sort_keys=True))
fields = [str(f) for f in fields]
return fields
def write(self, psm):
self.solution_counter += 1
self.writer.writerow(self._prepare_fields(psm))
def writeall(self, solution_sets):
for solution_set in solution_sets:
self.spectrum_counter += 1
for solution in solution_set:
self.write(solution)
self.flush()
def flush(self):
self.handle.flush()
def close(self):
self.handle.close()
class JournalingConsumer(TaskExecutionSequence):
def __init__(self, journal_file, in_queue, in_done_event):
self.journal_file = journal_file
self.in_queue = in_queue
self.in_done_event = in_done_event
self.done_event = self._make_event()
def run(self):
has_work = True
while has_work and not self.error_occurred():
try:
solutions = self.in_queue.get(True, 5)
self.journal_file.writeall(solutions)
# Only log if something changed.
if solutions:
self.log("... Handled %d spectra with %d solutions so far" % (
self.journal_file.spectrum_counter, self.journal_file.solution_counter))
except Empty:
if self.in_done_event.is_set():
has_work = False
break
self.done_event.set()
def close_stream(self):
self.journal_file.close()
def parse_float(value):
value = float(value)
if np.isnan(value):
return 0
return value
try:
from glycan_profiling._c.tandem.tandem_scoring_helpers import parse_float
except ImportError:
pass
class JournalFileReader(TaskBase):
def __init__(self, path, cache_size=2 ** 16, mass_shift_map=None, scan_loader=None, include_fdr=False):
if mass_shift_map is None:
mass_shift_map = {Unmodified.name: Unmodified}
else:
mass_shift_map.setdefault(Unmodified.name, Unmodified)
self.path = path
if not hasattr(path, 'read'):
self.handle = _gzopen(path, 'rt')
else:
self.handle = _gzwrap(self.path, 'r')
self.reader = csv.DictReader(self.handle, delimiter='\t')
self.glycopeptide_cache = LRUMapping(cache_size or 2 ** 12)
self.mass_shift_map = mass_shift_map
self.scan_loader = scan_loader
self.include_fdr = include_fdr
def _build_key(self, row):
glycopeptide_id_key = glycopeptide_key_t(
int(row['peptide_start']), int(row['peptide_end']), int(
row['peptide_id']), int(row['protein_id']),
int(row['hypothesis_id']), int(row['glycan_combination_id']),
StructureClassification[row['match_type']],
int(row['site_combination_index']))
return glycopeptide_id_key
def _build_protein_relation(self, glycopeptide_id_key):
return PeptideProteinRelation(
glycopeptide_id_key.start_position, glycopeptide_id_key.end_position,
glycopeptide_id_key.protein_id, glycopeptide_id_key.hypothesis_id)
def glycopeptide_from_row(self, row):
glycopeptide_id_key = self._build_key(row)
if glycopeptide_id_key in self.glycopeptide_cache:
return self.glycopeptide_cache[glycopeptide_id_key]
if glycopeptide_id_key.structure_type & StructureClassification.target_peptide_decoy_glycan.value:
glycopeptide = LazyGlycopeptide(row['glycopeptide_sequence'], glycopeptide_id_key)
else:
glycopeptide = LazyGlycopeptide(row['glycopeptide_sequence'], glycopeptide_id_key)
glycopeptide.protein_relation = self._build_protein_relation(glycopeptide_id_key)
self.glycopeptide_cache[glycopeptide_id_key] = glycopeptide
return glycopeptide
def _build_score_set(self, row):
score_set = ScoreSet(
parse_float(row['total_score']),
parse_float(row['peptide_score']),
parse_float(row['glycan_score']),
float(row['glycan_coverage']))
return score_set
def _build_fdr_set(self, row):
fdr_set = FDRSet(
float(row['total_q_value']),
float(row['peptide_q_value']),
float(row['glycan_q_value']),
float(row['glycopeptide_q_value']))
return fdr_set
def _make_mass_shift(self, row):
# mass_shift = MassShift(row['mass_shift'], MassShift.get(row['mass_shift']))
mass_shift = self.mass_shift_map[row['mass_shift']]
return mass_shift
def _make_scan(self, row):
if self.scan_loader is None:
scan = SpectrumReference(row['scan_id'])
else:
scan = self.scan_loader.get_scan_by_id(row['scan_id'])
return scan
def spectrum_match_from_row(self, row):
glycopeptide = self.glycopeptide_from_row(row)
scan = self._make_scan(row)
fdr_set = None
if self.include_fdr:
fdr_set = self._build_fdr_set(row)
score_set = self._build_score_set(row)
mass_shift = self._make_mass_shift(row)
match = MultiScoreSpectrumMatch(
scan, glycopeptide, score_set, mass_shift=mass_shift,
q_value_set=fdr_set,
match_type=str(glycopeptide.id.structure_type))
return match
def __iter__(self):
return self
def __next__(self):
return self.spectrum_match_from_row(next(self.reader))
def next(self):
return self.__next__()
def close(self):
self.handle.close()
def isclose(a, b, rtol=1e-05, atol=1e-08):
return abs(a - b) <= atol + rtol * abs(b)
class SolutionSetGrouper(TaskBase):
"""Partition multi-score glycopeptide identificatins into groups
according to either glycopeptide target/decoy classification (:attr:`match_type_groups`)
or by best match to a given scan and target/decoy classification (:attr:`exclusive_match_groups`)
"""
def __init__(self, spectrum_matches):
self.spectrum_matches = list(spectrum_matches)
self.spectrum_ids = set()
self.match_type_groups = self._collect()
self.exclusive_match_groups = self._exclusive()
def __getitem__(self, key):
return self.exclusive_match_groups[key]
def __iter__(self):
return iter(self.exclusive_match_groups.items())
def _by_scan_id(self):
acc = []
for by_scan in collectiontools.groupby(self.spectrum_matches, lambda x: x.scan.id).values():
scan = by_scan[0].scan
self.spectrum_ids.add(scan.scan_id)
ss = MultiScoreSpectrumSolutionSet(scan, by_scan)
ss.sort()
acc.append(ss)
acc.sort(key=lambda x: x.scan.id)
return acc
def _collect(self):
match_type_getter = attrgetter('match_type')
groups = collectiontools.groupby(
self.spectrum_matches, match_type_getter)
by_scan_groups = {}
for group, members in groups.items():
acc = []
for by_scan in collectiontools.groupby(members, lambda x: x.scan.id).values():
scan = by_scan[0].scan
self.spectrum_ids.add(scan.scan_id)
ss = MultiScoreSpectrumSolutionSet(scan, by_scan)
ss.sort()
acc.append(ss)
acc.sort(key=lambda x: x.scan.id)
by_scan_groups[group] = acc
return by_scan_groups
def _exclusive(self, score_getter=None, min_value=0):
if score_getter is None:
score_getter = attrgetter('score')
groups = collectiontools.groupby(
self.spectrum_matches, lambda x: x.scan.id)
by_match_type = defaultdict(list)
for _scan_id, members in groups.items():
top_match = max(members, key=score_getter)
top_score = score_getter(top_match)
seen = set()
for match in members:
if isclose(top_score, score_getter(match)) and score_getter(match) > 0 and match.match_type not in seen:
seen.add(match.match_type)
by_match_type[match.match_type].append(match)
for _group_label, matches in by_match_type.items():
matches.sort(key=lambda x: (x.scan.id, score_getter(x)))
return by_match_type
@property
def target_matches(self):
try:
return self.match_type_groups[StructureClassification.target_peptide_target_glycan]
except KeyError:
return []
@property
def decoy_matches(self):
try:
return self.match_type_groups[StructureClassification.decoy_peptide_target_glycan]
except KeyError:
return []
def target_count(self):
return len(self.target_matches)
def decoy_count(self):
return len(self.decoy_matches)
class JournalSetLoader(TaskBase):
"""A helper class to load a list of journal file fragments
into a single cohesive result, as from a previously compiled
analysis's bundled journal file shards.
"""
@classmethod
def from_analysis(cls, analysis, scan_loader=None, stub_wrapping=True):
mass_shift_map = {
m.name: m for m in analysis.parameters['mass_shifts']}
if scan_loader is None:
scan_loader = ProcessedMzMLDeserializer(analysis.parameters['sample_path'])
if stub_wrapping:
stub_loader = ScanInformationLoader(scan_loader)
else:
stub_loader = scan_loader
return cls([f.open() for f in analysis.files], stub_loader, mass_shift_map)
def __init__(self, journal_files, scan_loader, mass_shift_map=None):
if mass_shift_map is None:
mass_shift_map = {Unmodified.name: Unmodified}
self.journal_files = journal_files
self.scan_loader = scan_loader
self.mass_shift_map = mass_shift_map
self.solutions = []
def load(self):
n = len(self.journal_files)
for i, journal_path in enumerate(self.journal_files, 1):
self.log("... Reading Journal Shard %s, %d/%d" %
(journal_path, i, n))
self._load_identifications_from_journal(journal_path, self.solutions)
self.log("Partitioning Spectrum Matches...")
return SolutionSetGrouper(self.solutions)
def __iter__(self):
if not self.solutions:
self.load()
return iter(self.solutions)
def _load_identifications_from_journal(self, journal_path, accumulator=None):
if accumulator is None:
accumulator = []
reader = enumerate(JournalFileReader(
journal_path,
scan_loader=ScanInformationLoader(self.scan_loader),
mass_shift_map=self.mass_shift_map), len(accumulator))
i = float(len(accumulator))
should_log = False
for i, sol in reader:
if i % 100000 == 0:
should_log = True
if should_log:
self.log("... %d Solutions Loaded" % (i, ))
should_log = False
accumulator.append(sol)
return accumulator
|
# coding: utf-8
# import models into model package
|
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.chrome.options import Options
from utils.checks import FileCheck
from cookies import Cookies
import json
import sys
import os
import time
import random
opts = Options()
def load_settings():
fchk = FileCheck()
fchk.check_create_folder("data")
fchk.check_copy_file("data/settings.json")
with open("data/settings.json") as f:
print("[INFO] Loaded settings.")
return json.load(f)
def parse_arguments():
"""
################### Read ######################
-h stands for headless mode
-hot stands for checking the posts in hot
"""
global opts
opts.start = 9
for arg in sys.argv:
if arg == "-h":
opts.add_argument("--headless")
elif arg == "-hot":
opts.start = 0
class InstagramBot:
# todo: clear follow list
def __init__(self, username=None, password=None):
parse_arguments()
if os.name == "posix":
exec_path = "./chromedriver"
else:
exec_path = "chromedriver.exe"
self.settings = load_settings()
self.username, self.password = self.process_credentials(username, password)
self.comm_counter = 0
self.browser = webdriver.Chrome(options=opts, executable_path=exec_path)
self.cookies = Cookies(self.browser)
def process_credentials(self, username, password):
if not username:
username = self.settings["username"]
if not username:
username = input("Username: ")
if not password:
password = self.settings["password"]
if not password:
password = input("Password: ")
return username, password
def sign_in(self):
"""Sign in to Instagram with the provided credentials"""
self.browser.get('https://www.instagram.com/accounts/login/')
time.sleep(2)
form = self.browser.find_elements_by_css_selector("form input")
email_input = form[0]
pass_input = form[1]
email_input.send_keys(self.username)
pass_input.send_keys(self.password)
pass_input.send_keys(Keys.ENTER)
time.sleep(2)
# saving login details
save_login_btn = self.browser.find_element_by_css_selector("button")
save_login_btn.click()
self.cookies.save()
def exit(self):
"""Exit the bot process"""
time.sleep(5)
self.browser.quit()
def inject_cookies(self):
"""Insert previous existing cookies"""
self.cookies.load()
prev_cookies = self.cookies.get()
if not prev_cookies:
self.sign_in()
else:
try:
self.cookies.inject()
except Cookies.TooOldException:
self.sign_in()
def search_giveaway(self):
"""Retrieve a list of links of the latest posts tagged #giveaway"""
self.browser.get('https://www.instagram.com/explore/tags/giveaway/')
time.sleep(2)
posts = self.browser.find_elements_by_css_selector("a[href^='/p/']")
links = list()
for post in posts:
link = post.get_attribute("href")
links.append(link)
return links
def show_source(self):
"""Show page source of current tab"""
page_source = self.browser.page_source
with open("page_source.html", "w", encoding="utf-8") as fp:
fp.write(page_source)
page_source_path = os.path.abspath("page_source.html")
self.browser.execute_script(f"window.open('','_blank');")
new_tab = self.browser.window_handles[1]
self.browser.switch_to.window(new_tab)
self.browser.get(f"file://{page_source_path}")
old_tab = self.browser.window_handles[0]
self.browser.switch_to.window(old_tab)
@staticmethod
def like(buttons):
"""Like a post"""
time.sleep(2)
like_btn = buttons[2]
# h, w = like_btn.size["height"], like_btn.size["width"]
h, w = like_btn.size.values()
if not h == w == 40:
like_btn = buttons[3]
like_btn.click()
@staticmethod
def follow(buttons):
"""Folow a post's author"""
follow_btn = buttons[0]
if follow_btn.text.lower() == "follow":
follow_btn.click()
time.sleep(2)
def comment(self):
"""Comment on a post"""
comm_box = self.browser.find_elements_by_css_selector("form textarea")
if not comm_box:
return
comm_box = comm_box[0]
comm_box.click()
comm_box = self.browser.find_element_by_css_selector("form textarea")
# todo: send random number of comments
comm_choice = random.choice(self.settings["base_comments"])
comm_box.send_keys(comm_choice)
post_button = self.browser.find_element_by_css_selector("form button")
post_button.click()
self.comm_counter += 1
if self.comm_counter == 5:
self.comm_counter = 0
print("Waiting 5 seconds to avoid soft ban")
time.sleep(10)
time.sleep(2)
def process_posts(self, links):
for i in range(opts.start, len(links)):
post_url = links[i]
# todo: check if post was previously seen
print(post_url)
self.browser.get(post_url)
buttons = self.browser.find_elements_by_css_selector("button")
self.follow(buttons)
self.like(buttons)
self.comment()
def get_following(self):
"""Retrieves links to the users the bot is following"""
self.browser.get(f"https://www.instagram.com/{self.username}/")
time.sleep(1)
following_btn = self.browser.find_elements_by_css_selector("ul li a")[1]
following_btn.click()
time.sleep(1)
following_window = self.browser.find_element_by_css_selector('div[role=\'dialog\'] ul')
following_window.click()
time.sleep(2)
# todo: scroll till the end of the page
height = following_window.size["height"]
action_chain = webdriver.ActionChains(self.browser)
while True:
action_chain.send_keys(Keys.SPACE).perform()
time.sleep(5)
following_window = self.browser.find_element_by_css_selector('div[role=\'dialog\'] ul')
following_window.click()
new_height = following_window.size["height"]
if new_height == height:
break
height = new_height
following_list = following_window.find_elements_by_css_selector("li")
following_links = list()
for user in following_list:
link = user.find_element_by_css_selector('a').get_attribute('href')
following_links.append(link)
return following_links
def unfollow(self, user_link):
self.browser.get(user_link)
time.sleep(2)
unfollow_btn_path = "/html/body/div[1]/section/main/div/header/section/div[1]/div[2]/div/span/span[1]/button"
unfollow_btn = self.browser.find_elements_by_xpath(unfollow_btn_path)
if not unfollow_btn:
return
unfollow_btn[0].click()
time.sleep(1)
confirm_btn_path = "/html/body/div[4]/div/div/div/div[3]/button[1]"
confirm_btn = self.browser.find_element_by_xpath(confirm_btn_path)
confirm_btn.click()
def unfollow_many(self, count=1):
following_links = self.get_following()
following_links = enumerate(reversed(following_links))
for index, user_link in following_links:
self.unfollow(user_link)
if index == count - 1:
break
|
from setuptools import setup, find_packages
setup(
name='phasorpy',
version='0.1.0',
packages=['phasorpy'],
package_dir={'phasorpy': 'phasor'},
package_data={'phasorpy': ['data/*.m']},
author='Avinash Madavan',
author_email='[email protected]',
description='A python library for solving economic dispatch problems for MATPOWER case files.',
long_description='This library provides operations to load MATPOWER .m files and several functions that are '
'frequently used in the MATPOWER library. This allows one to leverage the existing literature '
'from MATPOWER without the need for MATLAB.',
install_requires=[
'numpy>=1.14.2',
'scipy>=1.0.0'
],
license='MIT',
)
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# @Author: Shuailong
# @Email: [email protected]
# @Date: 2019-08-14 15:26:18
# @Last Modified by: Shuailong
# @Last Modified time: 2019-08-14 15:54:54
# Modified from https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py
from typing import List, Dict
import csv
import logging
import sys
import argparse
import collections
import math
EXIT_STATUS_ANSWERS_MALFORMED = 1
EXIT_STATUS_PREDICTIONS_MALFORMED = 2
EXIT_STATUS_PREDICTIONS_EXTRA = 3
EXIT_STATUS_PREDICTION_MISSING = 4
def _get_ngrams(segment, max_order):
"""Extracts all n-grams upto a given maximum order from an input segment.
Args:
segment: text segment from which n-grams will be extracted.
max_order: maximum length in tokens of the n-grams returned by this
methods.
Returns:
The Counter containing all n-grams upto max_order in segment
with a count of how many times each n-gram occurred.
"""
ngram_counts = collections.Counter()
for order in range(1, max_order + 1):
for i in range(0, len(segment) - order + 1):
ngram = tuple(segment[i:i+order])
ngram_counts[ngram] += 1
return ngram_counts
def _compute_bleu(reference_corpus, translation_corpus, max_order=4, smooth=False):
"""Computes BLEU score of translated segments against one or more references.
Args:
reference_corpus: list of lists of references for each translation. Each
reference should be tokenized into a list of tokens.
translation_corpus: list of translations to score. Each translation
should be tokenized into a list of tokens.
max_order: Maximum n-gram order to use when computing BLEU score.
smooth: Whether or not to apply Lin et al. 2004 smoothing.
Returns:
3-Tuple with the BLEU score, n-gram precisions, geometric mean of n-gram
precisions and brevity penalty.
"""
matches_by_order = [0] * max_order
possible_matches_by_order = [0] * max_order
reference_length = 0
translation_length = 0
for (references, translation) in zip(reference_corpus, translation_corpus):
reference_length += min(len(r) for r in references)
translation_length += len(translation)
merged_ref_ngram_counts = collections.Counter()
for reference in references:
merged_ref_ngram_counts |= _get_ngrams(reference, max_order)
translation_ngram_counts = _get_ngrams(translation, max_order)
overlap = translation_ngram_counts & merged_ref_ngram_counts
for ngram in overlap:
matches_by_order[len(ngram)-1] += overlap[ngram]
for order in range(1, max_order+1):
possible_matches = len(translation) - order + 1
if possible_matches > 0:
possible_matches_by_order[order-1] += possible_matches
precisions = [0] * max_order
for i in range(0, max_order):
if smooth:
precisions[i] = ((matches_by_order[i] + 1.) /
(possible_matches_by_order[i] + 1.))
else:
if possible_matches_by_order[i] > 0:
precisions[i] = (float(matches_by_order[i]) /
possible_matches_by_order[i])
else:
precisions[i] = 0.0
if min(precisions) > 0:
p_log_sum = sum((1. / max_order) * math.log(p) for p in precisions)
geo_mean = math.exp(p_log_sum)
else:
geo_mean = 0
ratio = float(translation_length) / reference_length
if ratio > 1.0:
bp = 1.
else:
bp = math.exp(1 - 1. / ratio)
bleu = geo_mean * bp
return (bleu, precisions, bp, ratio, translation_length, reference_length)
def calculate_bleu(references: Dict[str, List[List[str]]],
predictions: Dict[str, List[str]],
max_order=4,
smooth=False) -> float:
reference_corpus = []
prediction_corpus = []
for instance_id, reference_sents in references.items():
try:
prediction_sent = predictions[instance_id]
except KeyError:
logging.error("Missing prediction for instance '%s'.", instance_id)
sys.exit(EXIT_STATUS_PREDICTION_MISSING)
del predictions[instance_id]
prediction_corpus.append(prediction_sent)
reference_corpus.append(reference_sents)
if len(predictions) > 0:
logging.error("Found %d extra predictions, for example: %s", len(predictions),
", ".join(list(predictions.keys())[:3]))
sys.exit(EXIT_STATUS_PREDICTIONS_EXTRA)
score = _compute_bleu(reference_corpus, prediction_corpus,
max_order=max_order, smooth=smooth)[0]
return score
def read_references(filename: str) -> List[List[List[str]]]:
references = {}
with open(filename, "rt", encoding="UTF-8", errors="replace") as f:
reader = csv.reader(f)
try:
for row in reader:
try:
instance_id = row[0]
references_raw1 = row[1]
references_raw2 = row[2]
references_raw3 = row[3]
except IndexError as e:
logging.error(
"Error reading value from CSV file %s on line %d: %s", filename, reader.line_num, e)
sys.exit(EXIT_STATUS_ANSWERS_MALFORMED)
if instance_id in references:
logging.error("Key %s repeated in file %s on line %d",
instance_id, filename, reader.line_num)
sys.exit(EXIT_STATUS_ANSWERS_MALFORMED)
if instance_id == "":
logging.error(
"Key is empty in file %s on line %d", filename, reader.line_num)
sys.exit(EXIT_STATUS_ANSWERS_MALFORMED)
tokens = []
for ref in [references_raw1, references_raw2, references_raw3]:
if ref:
tokens.append(ref.split())
if len(tokens) == 0:
logging.error(
"No reference sentence in file %s on line %d", filename, reader.line_num)
sys.exit(EXIT_STATUS_ANSWERS_MALFORMED)
references[instance_id] = tokens
except csv.Error as e:
logging.error('file %s, line %d: %s', filename, reader.line_num, e)
sys.exit(EXIT_STATUS_ANSWERS_MALFORMED)
return references
def read_predictions(filename: str) -> List[List[str]]:
predictions = {}
with open(filename, "rt", encoding="UTF-8", errors="replace") as f:
reader = csv.reader(f)
try:
for row in reader:
try:
instance_id = row[0]
prediction_raw = row[1]
except IndexError as e:
logging.error(
"Error reading value from CSV file %s on line %d: %s", filename, reader.line_num, e)
sys.exit(EXIT_STATUS_PREDICTIONS_MALFORMED)
if instance_id in predictions:
logging.error("Key %s repeated in file %s on line %d",
instance_id, filename, reader.line_num)
sys.exit(EXIT_STATUS_PREDICTIONS_MALFORMED)
if instance_id == "":
logging.error(
"Key is empty in file %s on line %d", filename, reader.line_num)
sys.exit(EXIT_STATUS_PREDICTIONS_MALFORMED)
if prediction_raw == "":
logging.warning("Key % s has empty prediction in file % s on line % d",
instance_id, filename, reader.line_num)
tokens = prediction_raw.split()
predictions[instance_id] = tokens
except csv.Error as e:
logging.error('file %s, line %d: %s', filename, reader.line_num, e)
sys.exit(EXIT_STATUS_PREDICTIONS_MALFORMED)
return predictions
def main():
references = read_references(args.references)
predictions = read_predictions(args.predictions)
bleu = calculate_bleu(references, predictions,
max_order=args.max_order, smooth=args.smooth)
print(f'BLEU score: {bleu*100:.4f}.')
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='SemEval 2020 Task 4 subtask C official evaluation script')
parser.add_argument('--references', '-r',
help='reference file in csv format')
parser.add_argument('--predictions', '-p',
help='prediction file in csv format')
parser.add_argument(
'--max_order', default=4, type=int, help='Maximum n-gram order to use when computing BLEU score')
parser.add_argument('--smooth', action='store_true',
help='Whether or not to apply Lin et al. 2004 smoothing')
args = parser.parse_args()
main()
|
"""timeout_req_ImageSender.py -- show use of ZMQ timeout options for restarts
A Raspberry Pi test program that uses imagezmq to send image frames from the
PiCamera continuously to a receiving program on a Mac that will display the
images as a video stream. Images are jpg compressed before sending.
One of the issues with the REQ/REP messaging pattern is that the sending program
will stall if the receiving program is stopped and restarted. This example
program show one way to use ZMQ options to restart the sender when that happens.
This image sending program uses the REQ/REP messaging pattern. It demonstrates
one way to deal with a failure to receive a REP after a REQ is sent. If the
receiving program restarts, this sending program will automatically restart.
Use with the 'with_ImageHub.py' program to receive the images on the Mac. Brief
test instructions are in that program: with_ImageHub.py. Stop the
'with_ImageHub' program and restart it. It should resume receiving images after
it is restarted.
"""
import cv2
import sys
import zmq # needed because we will be using zmq socket options & exceptions
import time
import socket
import imagezmq
import traceback
from time import sleep
from imutils.video import VideoStream
def sender_start(connect_to=None):
sender = imagezmq.ImageSender(connect_to=connect_to)
sender.zmq_socket.setsockopt(zmq.LINGER, 0) # prevents ZMQ hang on exit
# NOTE: because of the way PyZMQ and imageZMQ are implemented, the
# timeout values specified must be integer constants, not variables.
# The timeout value is in milliseconds, e.g., 2000 = 2 seconds.
sender.zmq_socket.setsockopt(zmq.RCVTIMEO, 2000) # set a receive timeout
sender.zmq_socket.setsockopt(zmq.SNDTIMEO, 2000) # set a send timeout
return sender
# use either of the formats below to specifiy address of display computer
# connect_to='tcp://jeff-macbook:5555'
# connect_to='tcp://192.168.1.190:5555'
connect_to = 'tcp://jeff-macbook:5555'
sender = sender_start(connect_to)
rpi_name = socket.gethostname() # send RPi hostname with each image
picam = VideoStream(usePiCamera=True).start()
time.sleep(3.0) # allow camera sensor to warm up
time_between_restarts = 5 # number of seconds to sleep between sender restarts
jpeg_quality = 95 # 0 to 100, higher is better quality, 95 is cv2 default
try:
while True: # send images as stream until Ctrl-C
image = picam.read()
ret_code, jpg_buffer = cv2.imencode(
".jpg", image, [int(cv2.IMWRITE_JPEG_QUALITY), jpeg_quality])
try:
reply_from_mac = sender.send_jpg(rpi_name, jpg_buffer)
except (zmq.ZMQError, zmq.ContextTerminated, zmq.Again):
if 'sender' in locals():
print('Closing ImageSender.')
sender.close()
sleep(time_between_restarts)
print('Restarting ImageSender.')
sender = sender_start(connect_to)
except (KeyboardInterrupt, SystemExit):
pass # Ctrl-C was pressed to end program
except Exception as ex:
print('Python error with no Exception handler:')
print('Traceback error:', ex)
traceback.print_exc()
finally:
if 'sender' in locals():
sender.close()
picam.stop() # stop the camera thread
sys.exit()
|
# Generated by Django 3.0.2 on 2020-02-04 12:32
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('budget', '0001_initial'),
]
operations = [
migrations.AlterModelOptions(
name='budget',
options={'get_latest_by': 'year', 'ordering': ('-year',)},
),
migrations.AlterField(
model_name='budget',
name='year',
field=models.PositiveSmallIntegerField(default=2020, help_text='Household fiscal year', unique=True),
),
]
|
import asyncio
from mesoor_recommendation_sdk import models, MesoorRecommendation
service = MesoorRecommendation('localhost:50051')
# 保存 Job
async def save_demo_job():
job_detail = models.JobDetail(
name='foo name',
description='foo jd',
employer='foo company',
degree=models.Degree.本科,
years=1,
salary_high=1000,
salary_low=100,
publish_date_ts=1597738339072,
expire_date_ts=1597738339073,
industries=['foo_ind'],
location_ids=[123],
department='foo department',
categories=['foo_cat']
)
job = models.Job(
job_detail=job_detail,
status=models.Status.ARCHIVE
)
job_id = 'foo_id'
await service.save_job(
id=job_id,
job=job
)
print("saved job")
# 保存 Candidate
async def save_demo_candidate():
basic = models.ResumeBasic(
location_id=310000,
birthday_ts=1597738339072
)
works = [
models.ResumeWork(
position='foo name',
company='foo company',
description='foo jd',
department='foo department',
industry='foo ind',
responsibility='foo resp',
start_date_ts=1597738339072,
end_date_ts=1597738339073,
until_now=False
)
]
projects = [
models.ResumeProject(
position='foo position',
company='foo company',
description='foo jd',
department='foo department',
name='foo name',
responsibility='foo resp',
start_date_ts=1597738339072,
end_date_ts=1597738339073,
until_now=False
)
]
interns = [
models.ResumeIntern(
position='foo name',
company='foo company',
description='foo jd',
department='foo department',
responsibility='foo resp',
start_date_ts=1597738339072,
end_date_ts=1597738339073,
until_now=False
)
]
educations = [
models.ResumeEducation(
major='foo major',
degree=models.Degree.本科,
school='foo school',
start_date_ts=1597738339072,
end_date_ts=1597738339073,
until_now=False
)
]
resume = models.Resume(
works=works,
educations=educations,
projects=projects,
interns=interns,
basic=basic,
update_date_ts=1597738339072
)
candidate = models.Candidate(
resume=resume,
status=models.Status.ACTIVE
)
candidate_id = 'foo_id_2'
await service.save_candidate(
id=candidate_id,
candidate=candidate
)
print("saved candidate")
if __name__ == "__main__":
loop = asyncio.get_event_loop()
loop.run_until_complete(save_demo_job())
loop.run_until_complete(save_demo_candidate())
job_id = 'foo_id'
result = loop.run_until_complete(service.recommend_candidates_by_job(job_id=job_id, size=10))
print([x.score for x in result])
|
# Generated by Django 2.1.2 on 2018-12-06 17:49
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('team', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Context',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(auto_now_add=True, help_text='The date and time when this object was created.')),
('updated', models.DateTimeField(auto_now=True, help_text='The date and time when this object was last updated.')),
('name', models.CharField(help_text='The name of this Context. Must be unique within the Team.', max_length=100)),
('slug', models.SlugField(help_text='The slug for this Context. Must be unique within the Team.')),
('description', models.TextField(help_text='The description of this context. Markdown is supported.')),
('team', models.ForeignKey(help_text='The Team to which this Context belongs', on_delete=django.db.models.deletion.PROTECT, related_name='contexts', to='team.Team')),
],
options={
'ordering': ['name'],
},
),
migrations.AlterUniqueTogether(
name='context',
unique_together={('name', 'team'), ('slug', 'team')},
),
]
|
import unittest
import os
import numpy as np
from shutil import rmtree
from six import add_metaclass
from abc import ABCMeta
import sys
try:
import z5py
except ImportError:
sys.path.append('..')
import z5py
@add_metaclass(ABCMeta)
class GroupTestMixin(object):
def setUp(self):
self.shape = (100, 100, 100)
self.root_file = z5py.File('array.%s' % self.data_format)
g = self.root_file.create_group('test')
g.create_dataset('test',
dtype='float32',
shape=self.shape,
chunks=(10, 10, 10))
def tearDown(self):
try:
rmtree('array.%s' % self.data_format)
except OSError:
pass
def test_open_group(self):
g = self.root_file['test']
ds = g['test']
out = ds[:]
self.assertEqual(out.shape, self.shape)
self.assertTrue((out == 0).all())
def test_open_dataset(self):
ds = self.root_file['test/test']
out = ds[:]
self.assertEqual(out.shape, self.shape)
self.assertTrue((out == 0).all())
def test_group(self):
g = self.root_file.create_group('group')
ds = g.create_dataset('data',
dtype='float32',
shape=self.shape,
chunks=(10, 10, 10))
in_array = np.random.rand(*self.shape).astype('float32')
ds[:] = in_array
out_array = ds[:]
self.assertEqual(out_array.shape, in_array.shape)
self.assertTrue(np.allclose(out_array, in_array))
def test_create_nested_group(self):
self.root_file.create_group('foo/bar/baz')
g = self.root_file['foo/bar/baz']
self.assertEqual(g.path, os.path.join(self.root_file.path, 'foo/bar/baz'))
def test_require_group(self):
self.root_file.require_group('group')
g = self.root_file.require_group('group')
self.assertEqual(g.path, os.path.join(self.root_file.path, 'group'))
def test_delete(self):
self.assertTrue('test' in self.root_file)
self.assertTrue('test/test' in self.root_file)
del self.root_file['test']
self.assertFalse('test' in self.root_file)
self.assertFalse('test/test' in self.root_file)
class TestGroupZarr(GroupTestMixin, unittest.TestCase):
data_format = 'zr'
class TestGroupN5(GroupTestMixin, unittest.TestCase):
data_format = 'n5'
if __name__ == '__main__':
unittest.main()
|
from abc import ABC, abstractmethod
_registered = []
class AbstractScene(ABC):
@classmethod
def __init_subclass__(cls, is_abstract=False, runtime_scene=False, **kwargs):
super().__init_subclass__(**kwargs)
if not is_abstract:
_registered.append(cls)
@abstractmethod
def draw(self, window):
raise NotImplementedError('subclasses must override draw')
@abstractmethod
def input(self, key):
raise NotImplementedError('subclasses must override input') |
from .retinaface import RetinaFace |
from xdg.IconTheme import getIconPath
from PySide.QtGui import QListWidget, QListWidgetItem, QIcon
from plustutocenter.qt.widgets.base_custom_entries_widget import \
BaseCustomEntriesWidget
class SoftwareEntry:
def __init__(self, name, iconPath, tutorials, desktopFilePath):
"""
:type: str
:type: str
:type: List[Tutorial]
:type: str
"""
self.name = name
self.iconPath = iconPath
self.tutorials = tutorials
self.desktopFilePath = desktopFilePath
def __lt__(self, other):
return self.name < other.name
def __eq__(self, other):
if not isinstance(other, SoftwareEntry):
return False
else:
return \
self.name == other.name and \
self.iconPath == other.iconPath and \
self.tutorials == other.tutorials and \
self.desktopFilePath == other.desktopFilePath
def __hash__(self):
return hash((self.name,
self.iconPath,
hash(t for t in self.tutorials),
self.desktopFilePath))
class SoftwareEntriesWidget(BaseCustomEntriesWidget):
def __init__(self, delegate):
"""
:type delegate: QListWidget
"""
super().__init__(delegate)
self._font.setPointSize(14)
def createCustomEntry(self, model):
"""
:type model: SoftwareEntry
:rtype: QListWidgetItem
"""
return QListWidgetItem(
QIcon(getIconPath(model.iconPath, theme="gnome")),
model.name)
|
#Jenny Steffens
from Analysis import *
import random, time
def main():
# This list and dictionary are now the default in the Analyzer. They
# do not need to be entered in a driver. However, if the dictionary is updated,
# either do so in Analysis.py or when initializing, set kD= name of new dicitonary
# dictionary is the variable name of the updated dicitonary
keyword_list = ["HillaryClinton", "Hillary2016", "Hillary",
"Lessig", "Lessig2016", "Lessig2016",
"O'Malley", "OMalley2016", "MartinOMalley",
"Bernie", "FeelTheBern", "Bernie2016",
"Jeb", "JebBush", "Jeb2016",
"Carson", "BC2DC16", "RealBenCarson",
"Chris Christie", "Christie2016", "ChrisChristie",
"Cruz", "CruzCrew", "TedCruz",
"Fiorina", "Carly2016", "CarlyFiorina",
"Jim Gilmore", "JimGilmore", "gov_gilmore",
"Graham", "LindseyGraham", "LindseyGrahamSC",
"Huckabee", "ImWithHuck", "GovMikeHuckabee",
"Jindal", "BobbyJindal", "BobbyJindal",
"Kasich", "Kasich4Us", "JohnKasich",
"Rand Paul", "RandPaul2016", "RandPaul",
"Rubio", "MarcoRubio",
"Santorum", "RickSantorum",
"Trump", "DonaldTrump2016", "realDonaldTrump"]
keyword_dictionary = dict([
('clinton',["HillaryClinton", "Hillary2016", "Hillary", 'clinton', 'hilary']),
('lessig',["Lessig", "Lessig2016", "Lessig2016"]),
('o\'malley',["O'Malley", "OMalley2016", "MartinOMalley", 'omalley']),
('sanders',["Bernie", "FeelTheBern", "Bernie2016", 'sanders']),
('bush',["Jeb", "JebBush", "Jeb2016"]),
('carson',["Carson", "BC2DC16", "RealBenCarson"]),
('christie', ["Chris Christie", "Christie2016", "ChrisChristie"]),
('cruz',["Cruz", "CruzCrew", "TedCruz"]),
('cruz',["Fiorina", "Carly2016", "CarlyFiorina"]),
('gilmore',["Jim Gilmore", "JimGilmore", "gov_gilmore"]),
('graham',["Graham", "LindseyGraham", "LindseyGrahamSC"]),
('huckabee', ["Huckabee", "ImWithHuck", "GovMikeHuckabee"]),
('jindal', ["Jindal", "BobbyJindal", "BobbyJindal"]),
('kasich', ["Kasich", "Kasich4Us", "JohnKasich"]),
('paul', ["Rand Paul", "RandPaul2016", "RandPaul"]),
('rubio', ["Rubio", "MarcoRubio", "Rubio2016"]),
('santorum', ["Santorum", "RickSantorum"]),
('trump', ["Trump", "DonaldTrump2016", "realDonaldTrump"])])
start_time = time.time() #let's see the runtime of this
v = Analyzer(state='NH') #the state here is New Hampshire
filelist = v.make_file_list('02') #tweet files for the month of February
f = v.screen_and_combine(filelist, "FebTweets.txt", notify=True)
# this will return the new name, in this case, "FebTweets.txt," of the output file, a combination of all the files
# in filelist. Notify simply prints to the console when the file is screened
# Demonstrating how one would call the functions.
a = v.write_csv("FebCandidate.csv", f, county_names=False, notify=True)
b = v.write_csv("FebCandidate.tsv", f, county_names=False, labels=False)
c = v.write_csv("FebCounty.csv", f, county_names=False, labels=False, group_by="county", notify=True)
d = v.csv_convert("FebCounty.csv")
print("New files: ", a,b,c)
print("done")
start_time = time.time()
main()
print("--- completed in %s seconds ---" % (time.time() - start_time))
|
import jwt
from fastapi import Request
from fastapi.security import HTTPBearer
from . import const
ALGORITHMS = ["RS256"]
token_auth_scheme = HTTPBearer()
class AuthError(Exception):
def __init__(self, error, status_code):
self.error = error
self.status_code = status_code
class JWTBearer(HTTPBearer):
def __init__(self, required_scope=None, any_scope=[], auto_error: bool = True):
super(JWTBearer, self).__init__(auto_error=auto_error)
self.token = None
self.required_scope = required_scope
self.any_scope = any_scope
async def __call__(self, request: Request):
self.token = await super(JWTBearer, self).__call__(request=request)
await self.check_auth()
await self.check_scope(required_scope=self.required_scope, any_scope=self.any_scope)
async def check_auth(self):
jwks_url = "https://{}/.well-known/jwks.json".format(const.AUTH0_DOMAIN)
jwks_client = jwt.PyJWKClient(jwks_url)
try:
signing_key = jwks_client.get_signing_key_from_jwt(self.token.credentials).key
except jwt.exceptions.PyJWKClientError as error:
raise AuthError({"code": "jwk_client_error", "description": "bad jwk client"}, 401)
except jwt.exceptions.DecodeError as error:
raise AuthError({"code": "jwt_decode_error", "description": error.__str__()}, 401)
if signing_key:
try:
jwt.decode(
self.token.credentials,
signing_key,
algorithms=ALGORITHMS,
audience=const.AUTH0_AUDIENCE,
issuer="https://{}/".format(const.AUTH0_DOMAIN),
)
except jwt.exceptions.ExpiredSignatureError:
raise AuthError({"code": "token_expired", "description": "token is expired"}, 401)
except jwt.exceptions.MissingRequiredClaimError:
raise AuthError(
{
"code": "invalid_claims",
"description": "incorrect claims," " please check the audience and issuer",
},
401,
)
except Exception as e:
print(e)
raise AuthError(
{"code": "invalid_header", "description": "Unable to parse authentication" " token."}, 401
)
else:
raise AuthError({"code": "invalid_header", "description": "Unable to find appropriate key"}, 401)
async def check_scope(self, required_scope=None, any_scope=[]):
unverified_claims = jwt.decode(self.token.credentials, options={"verify_signature": False})
if unverified_claims.get("scope"):
token_scopes = unverified_claims["scope"].split()
for token_scope in token_scopes:
if required_scope is not None:
if token_scope == required_scope:
return
if isinstance(any_scope, list):
for scope in any_scope:
if token_scope == scope:
return
raise AuthError({"code": "Unauthorized", "description": "You don't have access to this resource"}, 403)
|
import config
import apiai, json
from telegram.ext import Updater, CommandHandler, MessageHandler, Filters
updater = Updater(token=config.TOKEN)
dispatcher = updater.dispatcher
def startCommand(bot, update):
bot.send_message(chat_id=update.message.chat_id, text='Hello, let\'s talk?')
def textMessage(bot, update):
request = apiai.ApiAI(config.TOKEN_Small_Talk).text_request() # Token API to Dialogflow
request.lang = 'en-US' # In what language will the request be sent
request.session_id = 'BatlabAIBot' # Session ID is required (you need to learn the bot later)
request.query = update.message.text # send a request to the AI with a message from the user
responseJson = json.loads(request.getresponse().read().decode('utf-8'))
response = responseJson['result']['fulfillment']['speech'] # parse JSON and pull out the answer
# If there is an answer from the bot - send it to the user, if not - the bot did not understand it
if response:
bot.send_message(chat_id=update.message.chat_id, text=response)
else:
bot.send_message(chat_id=update.message.chat_id, text='I do not quite understand you!')
# Handlers
start_command_handler = CommandHandler('start', startCommand)
text_message_handler = MessageHandler(Filters.text, textMessage)
# Add handlers to dispatcher
dispatcher.add_handler(start_command_handler)
dispatcher.add_handler(text_message_handler)
# Start search updates
updater.start_polling(clean=True)
# stop the bot if it was clicked Ctrl + C
updater.idle()
|
$NetBSD: patch-setup.py,v 1.1 2020/05/18 15:19:01 wiz Exp $
Don't hardcode version numbers.
--- setup.py.orig 2020-03-01 07:51:19.388557700 +0000
+++ setup.py
@@ -11,7 +11,7 @@ package_data = \
{'': ['*']}
install_requires = \
-['pygls>=0.8.1,<0.9.0', 'pyparsing>=2.4,<3.0']
+['pygls', 'pyparsing']
entry_points = \
{'console_scripts': ['cmake-format = cmake_language_server.formatter:main',
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.