content
stringlengths 5
1.05M
|
---|
# AoC 2019. Day 12. The N-Body Problem
import util
class Moon():
def __init__(self, pos):
self.pos = pos.copy()
self.vel = [0,0,0]
def apply_velocity(self):
for dim in range(3):
self.pos[dim] += self.vel[dim]
def potential(self):
return sum([abs(x) for x in self.pos])
def kinetic(self):
return sum([abs(x) for x in self.vel])
def energy(self):
return self.potential() * self.kinetic()
def __str__(self):
return f"pos=<x={self.pos[0]}, y={self.pos[1]}, z={self.pos[2]}>, vel=<x={self.vel[0]}, y={self.vel[1]}, z={self.vel[2]}>"
def apply_gravity(A, B):
for dimention in range(3):
if A.pos[dimention] < B.pos[dimention]:
A.vel[dimention] += 1
B.vel[dimention] -= 1
elif A.pos[dimention] > B.pos[dimention]:
A.vel[dimention] -= 1
B.vel[dimention] += 1
def test1(positions, steps: int, expected_energy: int):
moons = list()
for p in positions:
moons.append(Moon(p))
for step in range(steps):
for i in range(len(moons)):
for j in range(0,i):
apply_gravity(moons[i], moons[j])
#print(f"\nAfter {step+1} steps:")
for m in moons:
m.apply_velocity()
#print(m)
result = sum([m.energy() for m in moons])
if result != expected_energy:
print(f"Error, expected={expected_energy}, actual={result}")
else:
print("OK")
def test2(positions, expected_steps: int):
moons = list()
for p in positions:
moons.append(Moon(p))
step = 0
while (1==1):
for i in range(len(moons)):
for j in range(0,i):
apply_gravity(moons[i], moons[j])
#print(f"\nAfter {step+1} steps:")
for m in moons:
m.apply_velocity()
# print(m)
step += 1
if positions == [m.pos for m in moons]:
break
if step % 1000000 == 0:
print(f'step = {step/1000000}M', )
result = step + 1 # why?
if result != expected_steps:
print(f"Error, expected={expected_steps}, actual={result}")
else:
print("OK")
print("Part 1.")
test1([[-1,0,2], [2,-10,-7], [4,-8,8], [3,5,-1]], 10, 179)
test1([[-8,-10,0], [5,5,10], [2,-7,3], [9,-8,-3]], 100, 1940)
test1([[-19,-4,2], [-9,8,-16], [-4,5,-11], [1,9,-13]], 1000, 8287) # my task
print("Part 2.")
test2([[-1,0,2], [2,-10,-7], [4,-8,8], [3,5,-1]], 2772)
# too long
# test2([[-8,-10,0], [5,5,10], [2,-7,3], [9,-8,-3]], 4686774924)
|
import re
import bpy
import bmesh
import logging
from mathutils import Vector
"""
split mesh into different objects
"""
def split(regex, split_z, **kwargs):
for obj in bpy.data.objects:
m = re.match(regex, obj.name)
if not m:
continue
for ob in bpy.data.objects:
ob.select = False
bpy.context.scene.objects.active = obj
bpy.ops.object.mode_set(mode='EDIT')
dmesh = obj.data
mesh = bmesh.from_edit_mesh(dmesh)
for f in mesh.faces:
f.select = f.calc_center_median().z > split_z
bpy.ops.mesh.separate(type='SELECTED')
bmesh.update_edit_mesh(dmesh)
dmesh.update()
bpy.ops.object.mode_set(mode='OBJECT')
"""
remove mesh (e.g. starting cube)
"""
def remove(mesh_name, **kwargs):
for ob in bpy.context.scene.objects:
ob.select = ob.type == 'MESH' and ob.name.startswith(mesh_name)
bpy.ops.object.delete()
"""
reposition by median
"""
def reposition(regex, **kwargs):
for obj in bpy.data.objects:
m = re.match(regex, obj.name)
if not m:
continue
me = obj.data
verts = [v.co for v in me.vertices]
pivot = sum(verts, Vector()) / len(verts)
for v in me.vertices:
v.co.z -= pivot.z
obj.location.z = pivot.z
|
import numpy as np
import torch
from torch.distributions import Distribution
def gaussian_processes(data=10):
return NotImplementedError |
import xarray
import numpy as np
BA_THRESH = 7.7 * 1e6 # Threshold for burned area
AGB_THRESH = 0 # Threshold for AGB dataset
def fuelload(path_agb: str, path_ba: str, path_fl: str = None) -> xarray.Dataset:
"""This function generates Fuel Load xarray dataset
Parameters
-----------
path_agb : str
path of AGB dataset
path_ba : str
path of BA dataset
path_fl : str
path to save final fuel load dataset. Defaults to None.
Returns
--------
xarray.Dataset
Returns xarray dataset of fuel load
"""
da_agb = xarray.open_dataset(path_agb)
da_ba = xarray.open_dataset(path_ba)
agb_data = da_agb["abg_avitabile_vodmean"][:, :, :]
ba_data = da_ba["burned_area"][:, :, :]
agb_data.values[agb_data.values == AGB_THRESH] = np.nan
ba_data.values[ba_data.values < BA_THRESH] = np.nan
# AGB units are Mg/h
# BA units are m2, therefore we convert BA to hectares.
ba_data = ba_data * 0.0001
# Now that units are consistent we calculate LOAD = AGB * BA
fuel_load_dataset = agb_data * ba_data
if path_fl is not None:
fuel_load_dataset.to_netcdf(path_fl)
return fuel_load_dataset
|
#!/usr/bin/env python3
from datetime import datetime, timedelta
import subprocess
import time
imagelist = [
{"filename": "test_images/nzflag.svg", "fuzz": None},
{"filename":"test_images/redpainting.jpg", "fuzz": None},
{"filename":"test_images/redwhiteblack-art.jpg", "fuzz": None},
{"filename":"test_images/textile.jpg", "fuzz": None},
{"filename":"test_images/stpaul.jpg", "fuzz": None},
{"filename": "test_images/banksy.jpg", "fuzz": 40},
{"filename": "test_images/art-cropped.jpg", "fuzz": None},
]
def run_cmd(cmdline):
convert = subprocess.Popen(cmdline, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = convert.communicate()
if len(stderr) > 1:
print(stderr)
return stdout.decode("UTF-8")
while True:
item = imagelist.pop()
imagelist.insert(0,item)
cmdline = ["./display.py", "-i", item["filename"]]
if item["fuzz"]:
cmdline.append("-f")
cmdline.append(f"{str(item['fuzz'])}")
print(item["filename"])
run_cmd(cmdline)
dt = datetime.now() + timedelta(hours=1)
#dt = dt.replace(minute=0)
while datetime.now() < dt:
time.sleep(60) |
'''
python manage.py --dest backup
'''
from django.apps import apps as django_apps
from django.db import connections, transaction
from django.db.utils import ConnectionDoesNotExist, OperationalError
from django.core.management.base import BaseCommand, CommandError
from django.core.management import call_command
class Command(BaseCommand):
help = 'Backup default database to destination database.'
def add_arguments(self, parser):
# ? 只用一个
# * 表示 0 到多个
# +表示至少一个
parser.add_argument('others', metavar='others', nargs='*')
parser.add_argument('-D', '--dest', metavar='dest database', nargs='?', default='backup')
def handle(self, *args, **options):
db = options['dest']
if db == 'default':
raise CommandError(f'This is the production database.')
# 验证 backup 是否有效
try:
connection = connections[db]
if not connection.is_usable():
raise OperationalError('Not Usable!')
except ConnectionDoesNotExist:
raise CommandError(f'Database connection of {db} does not exist, please check settings.py.')
except (OperationalError, AttributeError) as e:
raise CommandError(f'Database {db} connecting failed: {e}.')
# 同步并清空 备份数据库
call_command('migrate', database=db)
confirm = input(self.style.WARNING(f'Are you sure to trancate the database {db}? (y/n)'))
if confirm == 'y':
call_command('flush', database=db, interactive=False)
self.stdout.write(self.style.SUCCESS(f'Database {db} had been trancated!'))
else:
exit('Backup cancled, bye...')
# 将 default 数据库中的数据库写入 backup
all_models = django_apps.all_models
# contenttypes
# migrate 的时候自动就添加数据了,
# flush 的时候清空原来的,又创建新数据,所以主键改变了
self.backup(model=all_models['contenttypes']['contenttype'], db=db)
# auth - User
# permission migrate 的时候自动添加了内容
# flush 的时候清空原来的,又创建新数据,所以主键改变了
self.backup(model=all_models['auth']['permission'], db=db)
self.backup(model=all_models['auth']['group'], db=db)
self.backup(model=all_models['auth']['group_permissions'], db=db)
# self.backup(model=all_models['auth']['user'], db=db)
# account
# 顺序
self.backup(model=all_models['account']['user'], db=db)
self.backup(model=all_models['account']['user_groups'], db=db)
self.backup(model=all_models['account']['user_user_permissions'], db=db)
self.backup(model=all_models['account']['follow'], db=db)
self.backup(model=all_models['account']['groupsetting'], db=db)
# sessiongs
for model in all_models['sessions'].values():
self.backup(model=model, db=db)
# admin
for model in all_models['admin'].values():
self.backup(model=model, db=db)
# xadmin
for model in all_models['xadmin'].values():
self.backup(model=model, db=db)
# action
for model in all_models['action'].values():
self.backup(model=model, db=db)
# tft
for model in all_models['tft'].values():
self.backup(model=model, db=db)
self.stdout.write(self.style.SUCCESS('Backup Finished!'))
def backup(self, model=None, db=None):
# 自动添加的时间无效了
# 接下来的主键 ID 不是想要的结果
if db is None:
exit(self.style.ERROR('Please confirm the db parameter'))
try:
with transaction.atomic(using=db):
querysets = model.objects.using('default').all()
model.objects.using(db).delete() # 因为默认生成的数据主键变了
# for SQLite where the default is such that at most 999 variables per query are used.
model.objects.using(db).bulk_create(querysets)
except Exception as e:
transaction.rollback(using=db)
self.stderr.write(self.style.ERROR(f'Something happened, transaction rollbak: {e}.'))
else:
self.stdout.write(self.style.SQL_TABLE(f'{model._meta.verbose_name} finished...'))
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.5 on 2017-09-14 16:03
from __future__ import unicode_literals
from django.db import migrations
import django.db.models.manager
class Migration(migrations.Migration):
dependencies = [
('courses', '0002_auto_20170913_1307'),
]
operations = [
migrations.AlterModelManagers(
name='course',
managers=[
('object', django.db.models.manager.Manager()),
],
),
]
|
import argparse
from overrides import overrides
from typing import Any
from typing import Dict
from allennlp.commands.subcommand import Subcommand
import optuna
def fetch_best_params(storage: str, study_name: str) -> Dict[str, Any]:
study = optuna.load_study(study_name=study_name, storage=storage)
return study.best_params
def show_best_params(args: argparse.Namespace) -> None:
best_params = fetch_best_params(args.storage, args.study_name)
print(" ".join("{}={}".format(k, v) for k, v in best_params.items()))
@Subcommand.register("best-params")
class BestParam(Subcommand):
@overrides
def add_subparser(self, parser: argparse._SubParsersAction) -> argparse.ArgumentParser:
description = """Export best hyperparameters in the trials."""
subparser = parser.add_parser(self.name, description=description, help="Export best hyperparameters.")
subparser.add_argument(
"--study-name", default=None, help="The name of the study to start optimization on."
)
subparser.add_argument(
"--storage",
type=str,
help=(
"The path to storage. "
"allennlp-optuna supports a valid URL" "for sqlite3, mysql, postgresql, or redis."
),
default="sqlite:///allennlp_optuna.db",
)
subparser.set_defaults(func=show_best_params)
return subparser
|
'''
65-Crie um programa que leia vários números inteiros pelo teclado.No final da execução,mostre a média entre todos,e qual foi o maior e o menor valores lidos.O programa deve perguntar se o usuário quer continuar ou não.
'''
total=0
soma=0
maior=0
menor=0
resp=False
while not resp:
num=int(input('Digite um valor: '))
soma += num
total += 1
if total == 1:
maior = num
menor = num
else:
if num > maior:
maior = num
if num < menor:
menor = num
continuar=str(input('Quer continuar[S/N]: ')).upper()
if continuar == 'N':
resp=True
media=soma/total
print('Você digitou {} números\n'
'Media = {}\n'
'Maior = {}\n'
'Menor = {}\n'
.format(total,media,maior,menor))
|
import fixtures
import os
import uuid
from vnc_api.vnc_api import *
from cfgm_common.exceptions import NoIdError
from tcutils.util import get_dashed_uuid
from openstack import OpenstackAuth, OpenstackOrchestrator
from vcenter import VcenterAuth
from common import log_orig as contrail_logging
from contrailapi import ContrailVncApi
class VncLibFixture(fixtures.Fixture):
''' Wrapper for VncApi
:param domain : default is default-domain
:param project_name : default is admin
:param cfgm_ip : default is 127.0.0.1
:param api_port : default is 8082
:param connections : ContrailConnections object. default is None
:param username : default is admin
:param password : default is contrail123
:param auth_server_ip : default is 127.0.0.1
:param project_id : defualt is None
:param logger : logger object
'''
def __init__(self, *args, **kwargs):
self.username = os.getenv('OS_USERNAME') or \
kwargs.get('username', 'admin')
self.password = os.getenv('OS_PASSWORD') or \
kwargs.get('password', 'contrail123')
self.project_name = kwargs.get('project_name', 'admin')
self.domain = kwargs.get('domain', 'default-domain')
self.api_server_port = kwargs.get('api_server_port', '8082')
self.cfgm_ip = kwargs.get('cfgm_ip', '127.0.0.1')
self.logger = kwargs.get('logger', None)
self.connections = kwargs.get('connections', None)
self.orchestrator = kwargs.get('orchestrator', 'openstack')
self.vnc_api_h = None
self.inputs = self.connections.inputs if self.connections \
else kwargs.get('inputs', None)
self.neutron_handle = None
self.auth_server_ip = self.inputs.auth_ip if self.inputs else \
kwargs.get('auth_server_ip', '127.0.0.1')
self.auth_url = self.inputs.auth_url if self.inputs else \
os.getenv('OS_AUTH_URL') or \
'http://%s:5000/v2.0'%self.auth_server_ip
self.project_id = kwargs.get('project_id', None)
# end __init__
def setUp(self):
super(VncLibFixture, self).setUp()
if self.connections:
self.logger = self.connections.logger
self.project_name = self.connections.project_name
self.inputs = self.connections.inputs
self.neutron_handle = self.connections.quantum_h
self.vnc_api_h = self.connections.vnc_lib
self.username = self.connections.username
self.password = self.connections.password
self.cfgm_ip = self.inputs.cfgm_ip
self.auth_server_ip = self.inputs.auth_ip
self.project_id = self.connections.project_id
self.auth_url = 'http://' + self.inputs.auth_ip + ':5000/v2.0'
else:
self.logger = self.logger or contrail_logging.getLogger(__name__)
self.vnc_api_h = VncApi(
username=self.username,
password=self.password,
tenant_name=self.project_name,
api_server_host=self.cfgm_ip,
api_server_port=self.api_server_port,
auth_host=self.auth_server_ip)
if not self.project_id:
if self.orchestrator == 'openstack':
self.auth_client = OpenstackAuth(
self.username,
self.password,
self.project_name,
auth_url=self.auth_url,
logger=self.logger)
self.project_id = self.auth_client.get_project_id()
elif self.orchestrator == 'vcenter':
self.auth_client = VcenterAuth(self.username,
self.password,
self.project_name,
self.inputs
)
self.project_id = self.auth_client.get_project_id()
self.vnc_h = ContrailVncApi(self.vnc_api_h, self.logger)
# end setUp
def cleanUp(self):
super(VncLibFixture, self).cleanUp()
def get_handle(self):
return self.vnc_api_h
# end get_handle
def get_neutron_handle(self):
if self.neutron_handle:
return self.neutron_handle
else:
self.orch = OpenstackOrchestrator(username=self.username,
password=self.password,
project_id=self.project_id,
project_name=self.project_name,
auth_server_ip=self.auth_server_ip,
vnclib=self.vnc_api_h,
logger=self.logger, inputs=self.inputs)
self.neutron_handle = self.orch.get_network_handler()
return self.neutron_handle
# end get_neutron_handle
def get_project_obj(self):
if self.connections:
project_id = self.connections.project_id
elif self.project_id:
project_id = self.project_id
else:
project_id = self.vnc_api_h.project_read(
fq_name_str='default-domain:default-project').uuid
parent_obj = self.vnc_api_h.project_read(id=project_id)
return parent_obj
# end get_parent_obj
def get_forwarding_mode(self, vn_fq_name):
vnc_lib = self.vnc_api_h
# Figure out VN
vni_list = vnc_lib.virtual_networks_list(
parent_id=self.project_id)['virtual-networks']
for vni_record in vni_list:
if (vni_record['fq_name'][0] == vn_fq_name.split(":")[0] and
vni_record['fq_name'][1] == vn_fq_name.split(":")[1] and
vni_record['fq_name'][2] == vn_fq_name.split(":")[2]):
vni_obj = vnc_lib.virtual_network_read(id=vni_record['uuid'])
vni_obj_properties = vni_obj.get_virtual_network_properties()
if vni_obj_properties:
fw_mode = vni_obj_properties.get_forwarding_mode()
else:
fw_mode = None
return fw_mode
# end get_forwarding_mode
def get_vn_subnet_dhcp_flag(self, vn_fq_name):
vnc_lib = self.vnc_api_h
# Figure out VN
vni_list = vnc_lib.virtual_networks_list(
parent_id=self.project_id)['virtual-networks']
for vni_record in vni_list:
if (vni_record['fq_name'][0] == vn_fq_name.split(":")[0] and
vni_record['fq_name'][1] == vn_fq_name.split(":")[1] and
vni_record['fq_name'][2] == vn_fq_name.split(":")[2]):
vni_obj = vnc_lib.virtual_network_read(id=vni_record['uuid'])
subnets = vni_obj.network_ipam_refs[0]['attr']
ipam = subnets.get_ipam_subnets()
enable_dhcp = ipam[0].get_enable_dhcp()
return enable_dhcp
# get_vn_subnet_dhcp_flag
def set_rpf_mode(self, vn_fq_name, mode):
vnc_lib = self.vnc_api_h
# Figure out VN
vni_list = self.vnc_api_h.virtual_networks_list(
parent_id=self.project_id)['virtual-networks']
for vni_record in vni_list:
if (vni_record['fq_name'][0] == vn_fq_name.split(":")[0] and
vni_record['fq_name'][1] == vn_fq_name.split(":")[1] and
vni_record['fq_name'][2] == vn_fq_name.split(":")[2]):
vni_obj = vnc_lib.virtual_network_read(id=vni_record['uuid'])
vni_obj_properties = vni_obj.get_virtual_network_properties() or VirtualNetworkType()
vni_obj_properties.set_rpf(mode)
vni_obj.set_virtual_network_properties(vni_obj_properties)
vnc_lib.virtual_network_update(vni_obj)
# end set_rpf_mode
def id_to_fq_name(self, id):
return self.vnc_api_h.id_to_fq_name(id)
def set_vxlan_mode(self, vxlan_mode='automatic'):
''' one of automatic or configured
'''
fq_name = [ 'default-global-system-config',
'default-global-vrouter-config']
vrouter_config = self.vnc_api_h.global_vrouter_config_read(fq_name=fq_name)
vrouter_config.set_vxlan_network_identifier_mode(vxlan_mode)
self.vnc_api_h.global_vrouter_config_update(vrouter_config)
def get_vxlan_mode(self):
fq_name = [ 'default-global-system-config',
'default-global-vrouter-config']
vrouter_config = self.vnc_api_h.global_vrouter_config_read(fq_name=fq_name)
return vrouter_config.get_vxlan_network_identifier_mode()
# end
def get_global_asn(self, gsc_id=None):
gsc_id = gsc_id or self.vnc_api_h.get_default_global_system_config_id()
gsc_obj = self.vnc_api_h.global_system_config_read(id=gsc_id)
return gsc_obj.get_autonomous_system()
# end get_global_asn
def set_global_asn(self, asn, gsc_id=None):
gsc_id = gsc_id or self.vnc_api_h.get_default_global_system_config_id()
gsc_obj = self.vnc_api_h.global_system_config_read(id=gsc_id)
gsc_obj.set_autonomous_system(int(asn))
self.vnc_api_h.global_system_config_update(gsc_obj)
# end set_global_asn
def get_global_forwarding_mode(self):
fq_name = [ 'default-global-system-config',
'default-global-vrouter-config']
gsc_obj = self.vnc_api_h.global_vrouter_config_read(fq_name=fq_name)
return gsc_obj.get_forwarding_mode()
# end get_global_forwarding_mode
def get_active_forwarding_mode(self,vn_fq_name):
''' Returns l2 or l3 or l2_l3
Returns Vn's forwarding mode if set.
If VN forwarding mode is not set, returns global forwarding mode
If global forwarding mode too is not set, returns 'l2_l3' since this is the default.
"'''
if type(vn_fq_name).__name__ == 'str':
vn_fq_name = vn_fq_name.split(':')
gl_fw_mode = self.get_global_forwarding_mode()
vn_fw_mode = self.get_forwarding_mode(vn_fq_name)
if vn_fw_mode:
return vn_fw_mode
elif gl_fw_mode:
return gl_fw_mode
else:
return 'l2_l3'
#end get_active_forwarding_mode
def set_global_forwarding_mode(self,forwarding_mode):
fq_name = [ 'default-global-system-config',
'default-global-vrouter-config']
gsc_obj = self.vnc_api_h.global_vrouter_config_read(fq_name=fq_name)
gsc_obj.set_forwarding_mode(forwarding_mode)
self.vnc_api_h.global_vrouter_config_update(gsc_obj)
#end set_global_forwarding_mode
def get_flow_export_rate(self):
fq_name = [ 'default-global-system-config',
'default-global-vrouter-config']
gv_obj = self.vnc_api_h.global_vrouter_config_read(fq_name=fq_name)
rate = gv_obj.get_flow_export_rate()
if not rate:
# If not set, return 100 , since this is default
return 100
else:
return rate
# end get_flow_export_rate
def set_flow_export_rate(self, value):
'''
Set flow export rate in default global vrouter config
value : Value of flow export rate to be set
'''
fq_name = [ 'default-global-system-config',
'default-global-vrouter-config']
gv_obj = self.vnc_api_h.global_vrouter_config_read(fq_name=fq_name)
gv_obj.set_flow_export_rate(int(value))
self.vnc_api_h.global_vrouter_config_update(gv_obj)
self.logger.info('Setting flow export rate: %s' % (value))
return True
# end set_flow_export_rate
# end VncLibFixture
|
#Automatic Hyperparameter Tuning Method for Local Outlier Factor, with Applications to Anomaly Detection
#https://arxiv.org/pdf/1902.00567v1.pdf
import numpy as np
import tqdm
import matplotlib
import matplotlib.pyplot as plt
from celluloid import Camera
from collections import defaultdict
from sklearn.neighbors import LocalOutlierFactor
from scipy.stats import nct
np.set_printoptions(precision=5)
"""
Algorithm : Tuning algorithm for LOF
1: training data X ∈ R
n×p
2: a grid of feasible values gridc
for contamination c
3: a grid of feasible values gridk
for neighborhood size k
4: for each c ∈ gridc do
5: for each k ∈ gridk do
6: set Mc,k,out to be mean log LOF for the bcnc outliers
7: set Mc,k,in to be mean log LOF for the bcnc inliers
8: set Vc,k,out to be variance of log LOF for the bcnc outliers
9: set Vc,k,in to be variance of log LOF for the bcnc inliers
10: set Tc,k = √
Mc,k,out−Mc,k,in
1
bcnc (Vc,k,out+Vc,k,in)
11: end for
12: set Mc,out to be mean Mc,k,out over k ∈ gridk
13: set Mc,in to be mean Mc,k,in over k ∈ gridk
14: set Vc,out to be mean Vc,k,out over k ∈ gridk
15: set Vc,in to be mean Vc,k,in over k ∈ gridk
16: set ncpc = √
Mc,out−Mc,in
1
bcnc (Vc,out+Vc,in)
17: set dfc = 2bcnc − 2
18: set kc,opt = arg maxk Tc,k
19: end for
20: set copt = arg maxc P(Z < Tc,kc,opt ; d fc
, ncpc), where the random variable Z follows a noncentral
t distribution with dfc degrees of freedom and ncpc noncentrality parameter
"""
class LOF_AutoTuner(object):
def __init__(self, n_samples = 500, data = None, c_max = 0.1, k_max = 100):
if data is None:
self.n_samples = n_samples
print("Input 'data', array-like, shape : (n_samples, n_features).")
else:
self.data = data
self.n_samples = self.data.shape[0]
self.eps = 1e-8
self.c_max = c_max
self.k_max = k_max
self.c_steps = 100
self.k_grid = np.arange(1,self.k_max + 1) #neighbors
self.c_grid = np.linspace(0.005, self.c_max, self.c_steps) #contamination
def test(self):
#sample random gaussian data
self.data = np.random.standard_normal(size=(self.n_samples,2))
#run tuner
self.run()
#visualize tuning
self.visualise()
def visualise(self):
#set inlier threshold. i.e - Any point with Log-LOF score < thresh is considered an inlier.
thresh = 0.2
fig, ax = plt.subplots(2,2,dpi= 100)
cam = Camera(fig)
c_list = [c[3] for c in self.collector]
k_list = [c[0] for c in self.collector]
z_list = [c[2] for c in self.collector]
for i, v in tqdm.tqdm(enumerate(self.collector)):
Kopt, Topt, Z, contamination = v
clf = LocalOutlierFactor(n_neighbors=Kopt,
contamination=contamination)
clf.fit_predict(self.data)
X_scores = clf.negative_outlier_factor_
log_lof = np.log(-X_scores).flatten()
#viz--->
ax[0,1].hist(log_lof, density = True, bins = 100)
ax[0,1].text(0.05, 0.85, 'Log-LOF :', transform=ax[0,1].transAxes)
c_lis = c_list[:i+1]
k_lis = k_list[:i+1]
z_lis = z_list[:i+1]
ax[0,0].scatter(c_lis, z_lis, c = 'b', s = 5.)
ax[0,0].text(0.05, 0.85, 'Z :' + str(Z), c = 'b', transform=ax[0,0].transAxes)
ax[1,0].scatter(c_lis, k_lis, c = 'r', s = 5.)
ax[1,0].text(0.05, 0.85, 'K :' + str(Kopt), c = 'r', transform=ax[1,0].transAxes)
#set axes limits
ax[1,0].set_xlim(0,self.c_max)
ax[1,0].set_ylim(0,self.k_max)
ax[0,0].set_xlim(0,self.c_max)
ax[0,0].set_ylim(min(z_list),max(z_list))
if Kopt == self.tuned_params['k'] and contamination == self.tuned_params['c']:
ax[1,1].scatter(self.data[:, 0], self.data[:, 1], facecolors = 'none', s=1000 * log_lof, edgecolors = 'darkgray')
ax[1,1].scatter(np.ma.masked_where(log_lof < thresh, self.data[:, 0]), np.ma.masked_where(log_lof < thresh, self.data[:, 1]), c = 'orange', s=5.0)
ax[1,1].scatter(np.ma.masked_where(log_lof > thresh, self.data[:, 0]), np.ma.masked_where(log_lof > thresh, self.data[:, 1]), c = 'green', s=5.0)
ax[1,1].text(0.05, 0.85, 'C :' + str(contamination), c = 'darkgray', transform=ax[1,1].transAxes)
cam.snap()
self.animation = cam.animate()
return
def run(self):
self.collector = []
#main op
for contamination in tqdm.tqdm(self.c_grid):
samps = int(contamination * self.n_samples)
if samps < 2:
continue
#init running metrics
running_metrics = defaultdict(list)
for k in self.k_grid:
clf = LocalOutlierFactor(n_neighbors=k, contamination=contamination)
clf.fit_predict(self.data)
X_scores = np.log(- clf.negative_outlier_factor_)
t0 = X_scores.argsort()#[::-1]
top_k = t0[-samps:]
min_k = t0[:samps]
x_out = X_scores[top_k]
x_in = X_scores[min_k]
mc_out = np.mean(x_out)
mc_in = np.mean(x_in)
vc_out = np.var(x_out)
vc_in = np.var(x_in)
Tck = (mc_out - mc_in)/np.sqrt((self.eps + ((1/samps)*(vc_out +vc_in))))
running_metrics['tck'].append(Tck)
running_metrics['mck_out'].append(mc_out)
running_metrics['mck_in'].append(mc_in)
running_metrics['vck_in'].append(vc_in)
running_metrics['vck_out'].append(vc_out)
largest_idx = np.array(running_metrics['tck']).argsort()[-1]
mean_mc_out = np.mean(running_metrics['mck_out'])
mean_mc_in = np.mean(running_metrics['mck_in'])
mean_vc_out = np.mean(running_metrics['vck_out'])
mean_vc_in = np.mean(running_metrics['vck_in'])
#ncpc - non-centrality parameter
ncpc = (mean_mc_out - mean_mc_in)/np.sqrt((self.eps + ((1/samps)*(mean_vc_out
+ mean_vc_in))))
#dfc - degrees of freedom
dfc = (2*samps) - 2
if dfc <= 0:
continue
Z = nct(dfc, ncpc) #non-central t-distribution
Kopt = self.k_grid[largest_idx]
Topt = running_metrics['tck'][largest_idx]
Z = Z.cdf(Topt)
self.collector.append([Kopt, Topt, Z, contamination])
max_cdf = 0.
self.tuned_params = {}
for v in self.collector:
Kopt, Topt, Z, contamination = v
if Z > max_cdf:
max_cdf = Z
if max_cdf == Z:
self.tuned_params['k'] = Kopt
self.tuned_params['c'] = contamination
print("\nTuned LOF Parameters : {}".format(self.tuned_params))
return
|
from . import packet
class Packet68(packet.Packet):
def __init__(self, player):
super().__init__(68)
self.add_data(player.uuid, pascal_string=True)
|
import requests
import re
from typing import List, Optional, Iterator
from .codingamer import CodinGamer
from .clash_of_code import ClashOfCode
from .notification import Notification
from .endpoints import Endpoints
from .exceptions import CodinGamerNotFound, ClashOfCodeNotFound, LoginRequired
from .utils import validate_args
class Client:
"""CodinGame API client.
Attributes
-----------
logged_in: :class:`bool`
If the client is logged in as a CodinGamer.
codingamer: Optional[:class:`CodinGamer`]
The CodinGamer that is logged in through the client. ``None`` if the client isn't logged in.
"""
_CODINGAMER_HANDLE_REGEX = re.compile(r"[0-9a-f]{32}[0-9]{7}")
_CLASH_OF_CODE_HANDLE_REGEX = re.compile(r"[0-9]{7}[0-9a-f]{32}")
logged_in: bool
codingamer: Optional[CodinGamer]
def __init__(self, email=None, password=None):
self._session = requests.Session()
self.logged_in = False
self.codingamer = None
if email is not None and password is not None:
self.login(email, password)
@validate_args
def login(self, email: str, password: str):
"""Login to a CodinGamer account.
Parameters
-----------
email: :class:`str`
Email adress of the CodinGamer.
password: :class:`str`
Password of the CodinGamer.
Raises
------
:exc:`ValueError`
Error with the login (empty email, empty password, wrong email format, incorrect password, etc).
Returns
--------
:class:`CodinGamer`
The CodinGamer that is logged in.
"""
if email == "":
raise ValueError("Email is required")
if password == "":
raise ValueError("Password is required")
r = self._session.post(Endpoints.CodinGamer_login, json=[email, password, True])
json = r.json()
if "id" in json and "message" in json:
raise ValueError(f"{json['id']}: {json['message']}")
self.logged_in = True
self.codingamer = CodinGamer(client=self, **r.json()["codinGamer"])
return self.codingamer
@validate_args
def get_codingamer(self, codingamer_handle: str) -> CodinGamer:
"""Get a CodinGamer from his public handle.
Parameters
-----------
codingamer_handle: :class:`str`
The CodinGamer's public handle.
39 character long hexadecimal string (regex: ``[0-9a-f]{32}[0-9]{7}``).
Raises
------
:exc:`ValueError`
The CodinGamer handle isn't in the good format.
:exc:`.CodinGamerNotFound`
The CodinGamer with the given public handle isn't found.
Returns
--------
:class:`CodinGamer`
The CodinGamer.
"""
if not self._CODINGAMER_HANDLE_REGEX.match(codingamer_handle):
raise ValueError(
f"CodinGamer handle {codingamer_handle!r} isn't in the good format "
"(regex: [0-9a-f]{32}[0-9]{7})."
)
r = self._session.post(Endpoints.CodinGamer, json=[codingamer_handle])
if r.json() is None:
raise CodinGamerNotFound(f"No CodinGamer with handle {codingamer_handle!r}")
return CodinGamer(client=self, **r.json()["codingamer"])
@validate_args
def get_clash_of_code(self, clash_of_code_handle: str) -> ClashOfCode:
"""Get a Clash of Code from its public handle.
Parameters
-----------
clash_of_code_handle: :class:`str`
The Clash of Code's public handle.
39 character long hexadecimal string (regex: ``[0-9]{7}[0-9a-f]{32}``).
Raises
------
:exc:`ValueError`
The Clash of Code handle isn't in the good format.
:exc:`.ClashOfCodeNotFound`
The Clash of Code with the given public handle isn't found.
Returns
--------
:class:`ClashOfCode`
The ClashOfCode.
"""
if not self._CLASH_OF_CODE_HANDLE_REGEX.match(clash_of_code_handle):
raise ValueError(
f"Clash of Code handle {clash_of_code_handle!r} isn't in the good format "
"(regex: [0-9]{7}[0-9a-f]{32})."
)
r = self._session.post(Endpoints.ClashOfCode, json=[clash_of_code_handle])
json = r.json()
if "id" in json and "message" in json:
raise ClashOfCodeNotFound(f"No Clash of Code with handle {clash_of_code_handle!r}")
return ClashOfCode(client=self, **json)
def get_pending_clash_of_code(self) -> Optional[ClashOfCode]:
"""Get a pending Clash of Code.
Returns
--------
Optional[:class:`ClashOfCode`]
The pending ClashOfCode if there's one or ``None``.
"""
r = self._session.post(Endpoints.ClashOfCode_pending, json=[])
json = r.json()
if len(json) == 0:
return None
return ClashOfCode(client=self, **json[0])
@property
def language_ids(self) -> List[str]:
"""List[:class:`str`]: List of all available language ids."""
if hasattr(self, "_language_ids"):
return self._language_ids
else:
r = self._session.post(Endpoints.LanguageIds, json=[])
self._language_ids = r.json()
return self._language_ids
@property
def notifications(self) -> Iterator[Notification]:
"""Get all the unseen notifications of the Client.
You need to be logged in to get notifications or else a :exc:`LoginRequired` will be raised.
.. note::
This property is a generator.
Raises
------
:exc:`LoginRequired`
The Client needs to log in. See :meth:`login`.
Yields
-------
:class:`Notification`
The Notification.
"""
if not self.logged_in:
raise LoginRequired()
r = self._session.post(Endpoints.UnseenNotifications, json=[self.codingamer.id])
for notification in r.json():
yield Notification(notification)
|
import pytest
from kopf.reactor.registries import Resource
def test_no_args():
with pytest.raises(TypeError):
Resource()
def test_all_args(mocker):
group = mocker.Mock()
version = mocker.Mock()
plural = mocker.Mock()
resource = Resource(
group=group,
version=version,
plural=plural,
)
assert resource.group is group
assert resource.version is version
assert resource.plural is plural
|
from PyObjCTools.TestSupport import *
from AppKit import *
try:
unicode
except NameError:
unicode = str
class TestNSErrors (TestCase):
def testConstants(self):
self.assertIsInstance(NSTextLineTooLongException, unicode)
self.assertIsInstance(NSTextNoSelectionException, unicode)
self.assertIsInstance(NSWordTablesWriteException, unicode)
self.assertIsInstance(NSWordTablesReadException, unicode)
self.assertIsInstance(NSTextReadException, unicode)
self.assertIsInstance(NSTextWriteException, unicode)
self.assertIsInstance(NSPasteboardCommunicationException, unicode)
self.assertIsInstance(NSPrintingCommunicationException, unicode)
self.assertIsInstance(NSAbortModalException, unicode)
self.assertIsInstance(NSAbortPrintingException, unicode)
self.assertIsInstance(NSIllegalSelectorException, unicode)
self.assertIsInstance(NSAppKitVirtualMemoryException, unicode)
self.assertIsInstance(NSBadRTFDirectiveException, unicode)
self.assertIsInstance(NSBadRTFFontTableException, unicode)
self.assertIsInstance(NSBadRTFStyleSheetException, unicode)
self.assertIsInstance(NSTypedStreamVersionException, unicode)
self.assertIsInstance(NSTIFFException, unicode)
self.assertIsInstance(NSPrintPackageException, unicode)
self.assertIsInstance(NSBadRTFColorTableException, unicode)
self.assertIsInstance(NSDraggingException, unicode)
self.assertIsInstance(NSColorListIOException, unicode)
self.assertIsInstance(NSColorListNotEditableException, unicode)
self.assertIsInstance(NSBadBitmapParametersException, unicode)
self.assertIsInstance(NSWindowServerCommunicationException, unicode)
self.assertIsInstance(NSFontUnavailableException, unicode)
self.assertIsInstance(NSPPDIncludeNotFoundException, unicode)
self.assertIsInstance(NSPPDParseException, unicode)
self.assertIsInstance(NSPPDIncludeStackOverflowException, unicode)
self.assertIsInstance(NSPPDIncludeStackUnderflowException, unicode)
self.assertIsInstance(NSRTFPropertyStackOverflowException, unicode)
self.assertIsInstance(NSAppKitIgnoredException, unicode)
self.assertIsInstance(NSBadComparisonException, unicode)
self.assertIsInstance(NSImageCacheException, unicode)
self.assertIsInstance(NSNibLoadingException, unicode)
self.assertIsInstance(NSBrowserIllegalDelegateException, unicode)
self.assertIsInstance(NSAccessibilityException, unicode)
if __name__ == "__main__":
main()
|
# -*- coding: utf-8 -*-
# visigoth: A lightweight Python3 library for rendering data visualizations in SVG
# Copyright (C) 2020-2021 Visigoth Developers
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software
# and associated documentation files (the "Software"), to deal in the Software without
# restriction, including without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all copies or
# substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
# BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import os.path
from visigoth.common.diagram_element import DiagramElement
from visigoth.common.panzoom import PanZoom
from visigoth.common.axis import ContinuousAxis
from visigoth.internal.utils.js import Js
class ContinuousHueLegend(DiagramElement):
"""
Create a legend graphic describing the hues used in a ContinuousHueManager
Arguments:
manager(visigoth.utils.ContinuousHueManager: the hue_manager object
Keyword Arguments:
length(int): length of the hue bar
label(str): a descriptive label to display
value_formatter(visigoth.utils.ValueFormatter): control the way values are represented in the legend
orientation(str): "horizontal"|"vertical" whether to display legend horizontally or vertically (applies only to continuous hue_manager)
stroke(str): the stroke hue for lines
stroke_width(int): the stroke width lines
font_height(int): the font size for the legend
text_attributes(dict): a dict containing SVG name/value pairs
bar_thickness(int): the thickness of the bar in pixels
bar_spacing(int): the spacing between the bar and the axis/labels
"""
def __init__(self, manager, length=512, label=None, value_formatter=None, orientation="horizontal", stroke="black", stroke_width=2, font_height=24, text_attributes={}, bar_thickness=40, bar_spacing=10):
DiagramElement.__init__(self)
self.hue_manager = manager
self.text_attributes = text_attributes
self.input_length = length
self.length = length
self.width = 0
self.height = 0
self.axis = None
self.label = label
self.value_formatter = value_formatter
self.stroke = stroke
self.stroke_width = stroke_width
self.font_height = font_height
self.bar_thickness = bar_thickness
self.bar_spacing = bar_spacing
self.panzoom_spacing = 20
self.orientation = orientation
self.adjustable = False
self.fill = "white"
self.adjustable = self.hue_manager.getAdjustable()
self.bar_length = self.length - 2 * self.hue_manager.getCapSize()
self.axis = ContinuousAxis(self.bar_length, orientation=self.orientation, min_value=self.hue_manager.getMinValue(),
max_value=self.hue_manager.getMaxValue(),
label=self.label, value_formatter=self.value_formatter, font_height=self.font_height,
axis_font_height=self.font_height, text_attributes=self.text_attributes,
stroke=self.stroke, stroke_width=self.stroke_width)
self.panzoom = None
def getHeight(self):
return self.height
def getWidth(self):
return self.width
def getAxis(self):
return self.axis
def build(self, fmt):
self.hue_manager.build()
if self.adjustable:
pan_controls = ["e","w"] if self.orientation == "horizontal" else ["n","s"]
self.panzoom = PanZoom(zoom_to=16,radius=50,pan_controls=pan_controls)
self.panzoom.build(fmt)
self.hue_manager.addEventProducer(self, "hue_scale")
tickpoints = self.hue_manager.getTickPositions()
if self.hue_manager.hasIntervals():
self.axis.setMinValue(tickpoints[0])
self.axis.setMaxValue(tickpoints[-1])
self.axis.setTickPoints(tickpoints)
else:
self.axis.setMinValue(self.hue_manager.getMinValue())
self.axis.setMaxValue(self.hue_manager.getMaxValue())
self.axis.build(fmt)
if self.orientation == "horizontal":
self.height = self.bar_thickness + self.bar_spacing + self.axis.getHeight()
self.width = self.input_length
else:
self.height = self.input_length
self.width = self.bar_thickness + self.bar_spacing + self.axis.getWidth()
self.legend_width = self.width
self.legend_height = self.height
if self.adjustable:
if self.orientation == "horizontal":
self.height = max(self.height, self.panzoom.getHeight())
self.width = self.width + self.panzoom.getWidth() + self.panzoom_spacing
else:
self.width = max(self.width, self.panzoom.getWidth())
self.height = self.height + self.panzoom.getHeight() + self.panzoom_spacing
def draw(self, d, cx, cy):
d.getDiagram().addEventBroker(self.hue_manager)
ox = cx - self.getWidth() / 2
oy = cy - self.getHeight() / 2
config = {}
if self.adjustable:
if self.orientation == "horizontal":
cx = ox + self.panzoom.getWidth()/2
else:
cy = oy + self.panzoom.getHeight()/2
self.panzoom.draw(d,cx,cy)
if self.orientation == "horizontal":
ox += self.panzoom.getWidth()+self.panzoom_spacing
cx = ox + self.legend_width/2
else:
oy += self.panzoom.getHeight()+self.panzoom_spacing
cy = oy + self.legend_height/2
rect_length = self.bar_length + 2 * self.hue_manager.getCapSize()
if self.orientation == "horizontal":
self.axis.draw(d, cx, oy + self.bar_thickness + self.bar_spacing + self.axis.getHeight() / 2)
config["hue_manager"] = self.hue_manager.draw(d, cx - rect_length / 2, oy, rect_length,
self.bar_thickness,
self.orientation, stroke=self.stroke,
stroke_width=self.stroke_width)
else:
self.axis.draw(d, ox + self.axis.getWidth() / 2, cy)
config["hue_manager"] = self.hue_manager.draw(d, ox + self.axis.getWidth(), cy - rect_length / 2,
self.bar_thickness, rect_length, self.orientation,
stroke=self.stroke, stroke_width=self.stroke_width)
config["adjustable"] = self.adjustable
config["type"] = "continuous"
config["with_intervals"] = self.hue_manager.hasIntervals()
config["orientation"] = self.orientation
config["min_value"] = self.axis.getMinValue()
config["max_value"] = self.axis.getMaxValue()
if self.adjustable and d.getFormat() == "html":
config["zoom_to"] = 16
config["adjust_increment"] = self.axis.getTickSpacing()/4
d.getDiagram().connect(self.panzoom,"zoom",self,"zoom")
d.getDiagram().connect(self.panzoom, "pan", self, "pan")
if self.adjustable:
hue_manager_js_path = os.path.join(os.path.split(__file__)[0], "..", "..", "utils", "hue_manager",
"continuous_hue_manager.js")
with open(hue_manager_js_path) as f:
hue_manager_js = f.read()
d.addLibraryCode(hue_manager_js)
axisutils_js_path = os.path.join(os.path.split(__file__)[0], "..", "..", "internal", "utils", "axis",
"axisutils.js")
with open(axisutils_js_path) as f:
axisutils_js = f.read()
d.addLibraryCode(axisutils_js)
with open(os.path.join(os.path.split(__file__)[0], "continuous_hue_legend.js"), "r") as jsfile:
jscode = jsfile.read()
Js.registerJs(d, self, jscode, "continuous_hue_legend", cx, cy, config)
if not self.hue_manager.isDiscrete():
d.getDiagram().connect(self.axis,"axis_drag",self,"axis_drag");
d.getDiagram().connect(self, "axis_rescale", self.axis, "axis_rescale")
d.getDiagram().connect(self, "axis_slide", self.axis, "axis_slide")
d.getDiagram().connect(self.axis, "rescaled_axis", self, "rescaled_axis")
|
import cv2
import numpy as np
# imgpath = os.path.join(os.getcwd(),
# "images", "4.2.06.tiff")
# img = cv2.imread(imgpath, 0)
original = np.zeros((200, 200), dtype=np.uint8)
original[50:150, 50:150] = 255
original[90:110, 90:110] = 128
# ret, thresh = cv2.threshold(original, 127, 255, cv2.THRESH_BINARY)
image, contours, hierarchy = cv2.findContours(
original, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
gray = cv2.cvtColor(image, cv2.COLOR_GRAY2BGR)
imgContour = cv2.drawContours(gray, contours, -1, (0, 255, 0), 1)
cv2.imshow('original', original)
# cv2.imshow('thresh', thresh)
cv2.imshow('gray', gray)
cv2.imshow('imgContour', imgContour)
cv2.waitKey()
cv2.destroyAllWindows()
|
"""Implements learning rate schedulers used in training loops."""
import numpy as np
# %%
class LRCosineAnnealingScheduler():
"""Implements an LRCosineAnnealingScheduler for lr adjustment."""
def __init__(self, eta_max, eta_min, Ti, Tmultiplier, num_batches_per_epoch):
"""Initialize LRCosineAnnealingScheduler Object.
Args:
eta_max (float): Max eta.
eta_min (float): Minimum eta.
Ti (float): Initial temperature
Tmultiplier (float): Temperature multiplier
num_batches_per_epoch (int): Number of batches per epoch.
"""
self.eta_min = eta_min
self.eta_max = eta_max
self.Ti = Ti
self.Tcur = 0.0
self.nbpe = num_batches_per_epoch
self.iteration_counter = 0.0
self.eta = eta_max
self.Tm = Tmultiplier
def _compute_rule(self):
self.eta = self.eta_min + 0.5 * \
(self.eta_max - self.eta_min) * \
(1 + np.cos(np.pi * self.Tcur / self.Ti))
return self.eta
def step(self):
"""Apply scheduler one step, and adjust the learning rate accordingly.
Returns:
float: Adjusted learning rate
"""
self.Tcur = self.iteration_counter / self.nbpe
self.iteration_counter = self.iteration_counter + 1.0
eta = self._compute_rule()
if eta <= self.eta_min + 1e-10:
self.Tcur = 0
self.Ti = self.Ti * self.Tm
self.iteration_counter = 0
return eta
def update_optimizer(self, optimizer):
"""Apply current scheduler learning rate to optimizer.
Args:
optimizer (torch.optim.Optimizer): Torch optimizer instance.
"""
state_dict = optimizer.state_dict()
for param_group in state_dict['param_groups']:
param_group['lr'] = self.eta
optimizer.load_state_dict(state_dict)
# %%
class FixedScheduler():
"""Implements a fixed learning rate for each epoch."""
def __init__(self, lr):
"""Initialize a FixedScheduler Object.
Args:
lr (float): Learning rate
"""
self.lr = lr
def step(self):
"""Update learning rate for scheduler.
Returns:
float: Updated learning rate
"""
return self.lr
def update_optimizer(self, optimizer):
"""Update optimizer instance with current learning rate.
Args:
optimizer (nn.optim.Optimizer): Optimizer instance to modify
"""
state_dict = optimizer.state_dict()
for param_group in state_dict['param_groups']:
param_group['lr'] = self.lr
optimizer.load_state_dict(state_dict)
|
import json
import numpy as np
class NumpyEncoder(json.JSONEncoder):
def default(self, o):
if isinstance(o, np.void):
return None
if isinstance(o, (np.generic, np.bool_)):
return o.item()
if isinstance(o, np.ndarray):
return o.tolist()
return o
|
from unittest import TestCase
import pytest
from zanna._binding_spec import InstanceBindingSpec
class DummyClass(object):
def __init__(self, value):
self.value = value
def TEST_CALLABLE(x):
return x + 500
class TestInstanceBindingSpec(TestCase):
TEST_STRING = "testtest"
TEST_INT = 9999
TEST_DUMMY_INSTANCE = DummyClass(TEST_STRING)
def test_instance_binding_spec(self):
bspec = InstanceBindingSpec(self.TEST_STRING)
assert bspec.get_instance() == self.TEST_STRING
def test_int_instance_binding_spec(self):
bspec = InstanceBindingSpec(self.TEST_INT)
assert bspec.get_instance() == self.TEST_INT
def test_dummyclass_instance_binding_spec(self):
bspec = InstanceBindingSpec(self.TEST_DUMMY_INSTANCE)
assert bspec.get_instance() == self.TEST_DUMMY_INSTANCE
def test_class_binding_spec_raises(self):
with pytest.raises(TypeError):
InstanceBindingSpec(DummyClass)
def test_none_binding_spec_raises(self):
with pytest.raises(TypeError):
InstanceBindingSpec(None)
def test_argspec_methods_raise(self):
bspec = InstanceBindingSpec(3)
with pytest.raises(TypeError):
bspec.construct_instance({})
with pytest.raises(TypeError):
bspec.get_argument_specs()
def test_callable_binding_spec(self):
bspec = InstanceBindingSpec(TEST_CALLABLE)
print(bspec.get_instance())
assert bspec.get_instance()(500) == 1000
|
import numpy as np
def linear(x):
return x
def sigmoid(x):
return np.divide(1, np.add(1, np.exp(np.multiply(-1, x))))
def tanh(x):
return np.tanh(x)
def relu(x):
return np.maximum(x, 0)
def lrelu(x):
if x >= 0:
return x
else:
return np.multiply(0.01, x)
def prelu(x, a):
if x >= 0:
return x
else:
return np.multiply(a, x)
def selu(x, a=1):
if x >= 0:
return x
else:
return np.multiply(a, np.subtract(np.exp(x), 1))
|
import time
import matplotlib.pyplot as plt
import numpy as np
from core import get_model, global_opt, LOOCV, normalize, plot_2d
from scipy.special import erfc
def sbostep(X_, Y, bnds, acq='EI', model_type='GP', output=False,
plotting=False, loocv=False):
acqd = {'EI': EI,
'Bayesian optimization': EI,
'FMIN': FMIN,
'SDMAX': SDMAX}
acqdk = {'EI': {'y_min': Y.min()},
'Bayesian optimization': {'y_min': Y.min()},
'FMIN': {},
'SDMAX': {}}
acqf = acqd[acq]
acqk = acqdk[acq]
ubnds = np.zeros(bnds.shape)
ubnds[:, 1] = 1.
X_ = np.array(X_)
Y = Y[:, 0]
ndims = bnds.shape[0]
model = get_model(X_, Y, bnds, model_type)
if output and model_type == 'GP':
print('GP kernel:', model.kernel_)
if loocv:
if output:
print('LOOCV:')
LOOCV(model, X_, Y, output, plotting)
def surrogate_eval(x, model):
ymean, ystd = model.predict(
np.atleast_2d(x), return_std=True)
return ymean, ystd
def acqf_opt(x, bnds=None):
ymean, ystd = model.predict(
np.atleast_2d(x), return_std=True)
res = acqf(np.squeeze(ymean), np.squeeze(ystd), **acqk)
return res
if output:
print('find acquistion function minimum')
x_p_, y_p = global_opt(acqf_opt, ubnds, typ='DE', output=output)
x_p = np.zeros(x_p_.shape)
for ii in range(bnds.shape[0]):
x_p[ii] = x_p_[ii]*(bnds[ii, 1] - bnds[ii, 0]) + bnds[ii, 0]
if plotting and ndims == 2:
def obj_fs(x, bnds=None):
return np.squeeze(model.predict(np.atleast_2d(x),
return_std=False))
def obj_stddev(x, bnds=None):
ymean, ystd = model.predict(
np.atleast_2d(x), return_std=True)
return ystd
X__ = normalize(X_, bnds)
x_p_ = normalize(np.atleast_2d(x_p), bnds)
plot_2d(obj_fs, ubnds, pts=X__, newpts=x_p_,
resolution=25, fignum='surrogate mean')
plot_2d(obj_stddev, ubnds, pts=X__, newpts=x_p_,
resolution=25, fignum='surrogate std. dev.')
plot_2d(acqf_opt, ubnds, pts=X__, newpts=x_p_,
resolution=25, fignum='acquisition function')
return x_p
def EI(y_mean, y_std, y_min):
alpha = (y_min - y_mean)/(np.sqrt(2)*np.pi)
ei = y_std*(np.exp(-alpha**2)+np.sqrt(np.pi)*alpha*erfc(-1*alpha)) \
/ np.sqrt(2*np.pi)
return -ei
def FMIN(y_mean, y_std):
return y_mean
def soptimize(func, bnds, stset, acq='EI', model_type='GP',
niter=30, plot_freq=1, output=False,
loocv=False):
randA = np.zeros((10000, bnds.shape[0]))
for ii in range(bnds.shape[0]):
randA[:, ii] = np.random.uniform(
bnds[ii, 0], bnds[ii, 1], size=(10000))
iterL = np.arange(0, niter, 1)
XL, resL, teL, bestL = [], [], [], []
c = np.linspace(1, 0, niter)
best = 1e10
st = time.time()
for ii in iterL:
te = time.time() - st
if np.mod(ii+1, plot_freq) == 0 and output and ii >= len(stset):
plotting = True
else:
plotting = False
if acq == 'random':
x = list(randA[ii, :])
elif ii < stset.shape[0]:
x = list(stset[ii, :])
else:
x = sbostep(XL, np.array(resL), bnds, acq,
model_type, output, plotting, loocv)
res = np.atleast_1d(func(x, bnds))
teL += [te]
XL += [x]
resL += [res]
if res[0] < best:
best = res[0]
bestL += [best]
if output:
print('iteration', ii, 'complete')
if plotting:
plt.show()
return iterL, XL, resL, bestL
def SDMAX(y_mean, y_std):
return -y_std
|
import os
import uuid
import shutil
import subprocess
from avel.shared_lib import call_ffmpeg, get_duration
def _pad_integer(i):
return str(i).zfill(2)
def combine_audio(audio_list, output_path, transition_time=13, debugging=False):
"""Creates a single audio file from a list"""
temp0 = os.path.join(os.path.dirname(output_path), 'temp0.wav')
temp1 = os.path.join(os.path.dirname(output_path), 'temp1.wav')
def temp_file(i):
if i % 2 == 0:
return temp0
else:
return temp1
if len(audio_list) > 2:
print(audio_list)
call_ffmpeg(f'ffmpeg -i {audio_list[0]} -i {audio_list[1]} -vn -filter_complex acrossfade=d={transition_time}:c1=tri:c2=squ {temp1}', verbose=True)
for i in range(2, len(audio_list) - 1):
call_ffmpeg(f'ffmpeg -i {temp_file(i-1)} -i {audio_list[i]} -vn -filter_complex acrossfade=d={transition_time}:c1=tri:c2=squ {temp_file(i)}', verbose=True)
# final call to convert to mp3
call_ffmpeg(f'ffmpeg -i {temp_file(len(audio_list) - 2)} -i {audio_list[-1]} -vn -filter_complex acrossfade=d={transition_time}:c1=tri:c2=squ {output_path}', verbose=True)
elif len(audio_list) == 2:
call_ffmpeg(f'ffmpeg -i {audio_list[0]} -i {audio_list[1]} -vn -filter_complex acrossfade=d={transition_time}:c1=tri:c2=squ {output_path}', verbose=True)
elif len(audio_list) == 1:
shutil.copyfile(audio_list[0], output_path)
else:
raise ValueError("Empty audio list")
if not debugging:
try:
os.remove(temp0)
os.remove(temp1)
except OSError:
pass
return output_path
def extract_audio(video_file, output_file, time1=None, time2=None):
"""Creates audio file from video and timestamps"""
if time1:
ss_str = f'-ss {time1} '
if time2:
to_str = f'-to {time2} '
call_ffmpeg(f'ffmpeg -i {video_file} {ss_str}{to_str}-c:a libmp3lame {output_file}', verbose=True)
#f"volume=enable='between(t,t1,t2)':volume=0, volume=enable='between(t,t3,t4)':volume=0",
def merge_audio(audio_file1, audio_file2, output_file, vol1=1.0, vol2=1.0):
"""Merges two audios into one. option to adjust volumes for both audio"""
call_ffmpeg(f'ffmpeg -i {audio_file1} -i {audio_file2} -filter_complex [0:0]volume={vol1}[a];[1:0]volume={vol2}[b];[a][b]amix=inputs=2:duration=longest -c:a libmp3lame {output_file}', verbose=True) |
#!/usr/bin/python
# ex:set fileencoding=utf-8:
from __future__ import unicode_literals
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
from django.utils.translation import ugettext_lazy as _
from djangobmf.categories import SETTINGS
from djangobmf.contrib.accounting.models import ACCOUNTING_LIABILITY
from djangobmf.models import BMFModel
from djangobmf.settings import CONTRIB_ACCOUNT
from decimal import Decimal
@python_2_unicode_compatible
class AbstractTax(BMFModel):
name = models.CharField(max_length=255, null=False, blank=False, )
# invoice_name_long = models.CharField( max_length=255, null=False, blank=False, )
# invoice_name_short = models.CharField( max_length=255, null=False, blank=False, )
account = models.ForeignKey(
CONTRIB_ACCOUNT, null=False, blank=False, related_name="tax_liability",
limit_choices_to={'type': ACCOUNTING_LIABILITY, 'read_only': False},
on_delete=models.PROTECT,
)
rate = models.DecimalField(max_digits=8, decimal_places=5)
passive = models.BooleanField(
_('Tax is allways included in the product price and never visible to the customer'),
null=False, blank=False, default=False,
)
is_active = models.BooleanField(_("Is active"), null=False, blank=False, default=True)
def get_rate(self):
return self.rate / Decimal(100)
class Meta:
verbose_name = _('Tax')
verbose_name_plural = _('Taxes')
ordering = ['name']
abstract = True
swappable = "BMF_CONTRIB_TAX"
class BMFMeta:
category = SETTINGS
observed_fields = ['name', 'invoice_name', 'rate']
def __str__(self):
return self.name
class Tax(AbstractTax):
pass
|
from Module import AbstractModule
class Module(AbstractModule):
def __init__(self):
AbstractModule.__init__(self)
def run(
self, network, in_data, out_attributes, user_options, num_cores,
outfile):
import shutil
shutil.copyfile(in_data.identifier, outfile)
def name_outfile(self, antecedents, user_options):
from Betsy import module_utils
original_file = module_utils.get_inputid(antecedents.identifier)
filename = 'signal_check_normalize_' + original_file + '.tdf'
return filename
def set_out_attributes(self, antecedents, out_attributes):
import arrayio
new_parameters = out_attributes.copy()
M = arrayio.read(antecedents.identifier)
if is_gene_normalize_variance(M):
new_parameters['gene_normalize'] = 'variance'
elif is_gene_normalize_ss(M):
new_parameters['gene_normalize'] = 'sum_of_squares'
else:
new_parameters['gene_normalize'] = 'no'
return new_parameters
def is_gene_normalize_variance(M):
import numpy
for line in M.slice():
if abs(numpy.var(line) - 1) > 0.000001:
return False
return True
def is_gene_normalize_ss(M):
import numpy
for line in M.slice():
if abs(numpy.sum([(x - numpy.mean(line)) ** 2
for x in line]) - 1) > 0.000001:
return False
return True
|
import numpy as np
class Dynamics:
"""docstring for Dynamics"""
def __init__(self, dt):
self.dt = dt
def create_motion_model_params(motion_model_cov):
raise NotImplementedError
def create_meas_model_params(meas_cov):
raise NotImplementedError
if __name__ == '__main__':
main() |
from java.lang import String
# a demo of using speech recognition to
# turn an Arduino's pin 13 off or on
# the commands are 2 stage - with the command, the the system
# asking if that command was said, then a affirmation or negation
# e.g. - you say "on", the system asks if you said "on", you say "yes"
# the system turns the Arduino pin 13 on
# create services
python = Runtime.createAndStart("python", "Python")
mouth = Runtime.createAndStart("mouth", "Speech")
arduino = Runtime.createAndStart("arduino", "Arduino")
ear = Runtime.createAndStart("ear", "Sphinx")
# connect mrl to the arduino - change the port on your system
arduino.connect("COM4")
# attaching the mouth to the ear
# prevents listening when speaking
# which causes an undesired feedback loop
ear.attach(mouth)
# for anything recognized we'll send that data back to python to be printed
# in the python tab
# ear.addListener("recognized", "python", "heard", String.class);
# add a "on" -> arduino.digitalWrite(13, 1) - turn's pin 13 on
ear.addCommand("on", arduino.getName(), "digitalWrite", 13, 1)
# add a "off" -> arduino.digitalWrite(13, 0) - turn's pin 13 off
ear.addCommand("off", arduino.getName(), "digitalWrite", 13, 0)
arduino.pinMode(13,1)
# add confirmations and negations - this makes any command a 2 part commit
# where first you say the command then mrl asks you if that is what you said
# after recognition
ear.addComfirmations("yes","correct","yeah","ya")
ear.addNegations("no","wrong","nope","nah")
# begin listening
ear.startListening()
def heard(data):
# print it
print "heard ", data
|
import sys
from pprint import pprint
from collections import deque
import urllib
import urlparse
import json
import random
from lxml import html
def create_node(name, children):
return { "name": name, "children": children }
def crawl(addr):
current = create_node(addr, [])
root = current
depth = random.randint(1, 5)
page_content = ""
crawled = [] # Not crawl same url twice
to_crawl = addr
for i in range(depth):
remain = depth - i - 1
print "Crawling: " + to_crawl + " - Remains: " + str(remain)
url = urlparse.urlparse(to_crawl)
try:
response = urllib.urlopen(to_crawl)
except:
print "Error opening url: " + to_crawl
break
crawled.append(to_crawl)
page_content = response.read()
raw_links = html.fromstring(page_content).xpath('//a')
full_links = []
for link in (raw_links.pop(0) for _ in xrange(len(raw_links))):
if 'href' in link.attrib:
link = link.attrib['href']
else:
continue
if link.startswith('/'):
link = 'http://' + url[1] + link
elif link.startswith('#'):
link = 'http://' + url[1] + url[2] + link
elif not link.startswith('http'):
link = 'http://' + url[1] + '/' + link
if link not in crawled:
full_links.append(link)
if not full_links: # no link crawlable
break
for link in full_links:
current["children"].append(create_node(link, []))
rand_link = random.randint(0, len(full_links)) - 1
print "selected " + str(rand_link + 1) + " of " + str(len(full_links))
current["children"][rand_link]["visited"] = True
current = current["children"][rand_link]
to_crawl = current["name"]
return { 'crawled': root, 'page_content': page_content }
def get_term(content):
try:
tree = html.fromstring(content)
paragraphs = tree.xpath('//p')
if paragraphs:
for par in paragraphs:
for word in par.text.split():
if len(word) > random.randint(4, 7):
return word
except Exception, err:
sys.stderr.write('ERROR: %s\n' % str(err))
return "_err_term_"
return "_no_term_"
def get_google_results(searchfor):
query = urllib.urlencode({'q': searchfor})
url = 'http://ajax.googleapis.com/ajax/services/search/web?v=1.0&%s' % query
search_response = urllib.urlopen(url)
j = json.load(search_response)
return [url['url'] for url in j['responseData']['results']]
def get_terms(search_content):
divterms = []
contents = []
#try:
# Get a bunch of keywords
#search_content = raw_input('Please, insert terms you would like to diverge: ')
# Search them on google and get links
urls_to_crawl = get_google_results(search_content)
#print urls_to_crawl
# go in depth following links in those pages
crawled_paths = { "name": search_content, "children": [] }
for url in urls_to_crawl:
content = crawl(url)
#print content['page_content']
new_term = get_term(content['page_content'])
crawled_paths["children"].append(content['crawled'])
# Find some words longer than 4-5 characters
if new_term not in divterms:
if new_term not in ['_no_term_', '_err_term_']:
divterms.append(new_term)
result = { 'crawled_paths': crawled_paths, 'divterms': divterms }
return result
#except Exception, err:
# sys.stderr.write('ERROR: %s\n' % str(err))
# raise err
|
#I officially GIVE UP ON THIS FUNCTION.
# In order to remove silence I have to split into mono, then resample and then clean
# and EVEN AFTER I managed to do all that, the script I use to remove silence doesn't like
# libosa resambling, however when I manually resamble with audacity it works just fine.
# THIS MAKES NO SENSE WHATSOEVER.
# I THOUGHT THIS WILL BE AN EASY TASK AND NOW I WASTED 3 HOURS!
# import librosa
# import resampy
# import wave
# import scipy
# x, sr_orig = librosa.load("/home/ravingmad/Desktop/Work/amazonthon/stutter_AI/silence_removal_trials/right.wav",sr=None)
# y_low = resampy.resample(x, sr_orig, 16000)
# scipy.io.wavfile.write("/home/ravingmad/Desktop/Work/amazonthon/stutter_AI/silence_removal_trials/ugh.wav", 16000, y_low)
# # librosa.output.write_wav("/home/ravingmad/Desktop/Work/amazonthon/stutter_AI/silence_removal_trials/librosad.wav",y,sr,True)
# # import os
# # import wave
# # def downsampleWav(src, dst, inrate=44100, outrate=16000, inchannels=2, outchannels=1):
# # if not os.path.exists(src):
# # print('Source not found!')
# # return False
# # if not os.path.exists(os.path.dirname(dst)):
# # os.makedirs(os.path.dirname(dst))
# # try:
# # s_read = wave.open(src, 'r')
# # s_write = wave.open(dst, 'w')
# # except:
# # print('Failed to open files!')
# # return False
# # n_frames = s_read.getnframes()
# # data = s_read.readframes(n_frames)
# # try:
# # converted = audioop.ratecv(data, 2, inchannels, inrate, outrate, None)
# # if outchannels == 1:
# # converted = audioop.tomono(converted[0], 2, 1, 0)
# # except:
# # print('Failed to downsample wav')
# # return False
# # try:
# # s_write.setparams((outchannels, 2, outrate, 0, 'NONE', 'Uncompressed'))
# # s_write.writeframes(converted)
# # except:
# # print('Failed to write wav')
# # return False
# # try:
# # s_read.close()
# # s_write.close()
# # except:
# # print('Failed to close wav files')
# # return False
# # return True
# # downsampleWav("/home/ravingmad/Desktop/Work/amazonthon/stutter_AI/silence_removal_trials/test.wav","/home/ravingmad/Desktop/Work/amazonthon/stutter_AI/silence_removal_trials/downed.wav") |
from functools import partial
import torch
from glasses.models.classification.resnest import *
def test_resnest():
x = torch.rand(1, 3, 224, 224)
with torch.no_grad():
model = ResNeSt.resnest14d().eval()
pred = model(x)
assert pred.shape[-1] == 1000
n_classes = 10
model = ResNeSt.resnest14d(n_classes=n_classes).eval()
pred = model(x)
assert pred.shape[-1] == n_classes
model = ResNeSt.resnest26d().eval()
pred = model(x)
assert pred.shape[-1] == 1000
model = ResNeSt.resnest50d().eval()
pred = model(x)
assert pred.shape[-1] == 1000
model = ResNeSt.resnest50d_1s4x24d().eval()
pred = model(x)
assert pred.shape[-1] == 1000
model = ResNeSt.resnest50d_4s2x40d().eval()
pred = model(x)
assert pred.shape[-1] == 1000
model = ResNeSt.resnest101e().eval()
pred = model(x)
assert pred.shape[-1] == 1000
model = ResNeSt.resnest50d_fast().eval()
pred = model(x)
assert pred.shape[-1] == 1000
# too big!
# model = ResNetSt.resnest200e().eval()
# pred = model(x)
# assert pred.shape[-1] == 1000
# model = ResNetSt.resnest269e().eval()
# pred = model(x)
# assert pred.shape[-1] == 1000
|
# Standard Imports
import time
import paho.mqtt.client as mqtt
from db.connection import add_weatherData
def on_message(client, userdata, message):
dataArray = str(message.payload.decode("utf-8")).split(",")
add_weatherData(dataArray[0], dataArray[1], dataArray[2], dataArray[3], dataArray[4], dataArray[5], dataArray[6], dataArray[7], dataArray[8], dataArray[9])
# Set MQTT broker and Topic
broker = "test.mosquitto.org"
pub_topic = "weatheralytics/data"
# Connect functions for MQTT
client = mqtt.Client()
# Connect to MQTT
print("Attempting to connect to broker " + broker)
client.connect(broker)
client.subscribe(pub_topic)
client.on_message = on_message
client.loop_start()
|
import json
from django.contrib import admin, auth
from django.core import serializers
from .models import LandZone
class LandZoneAdmin(admin.ModelAdmin):
"""
Creates the land zone dashboard section
:author Munir Safi
:since 2020-11-22
"""
model = LandZone
list_display = ('geo_json', 'owner')
ordering = ('owner',)
fieldsets = ()
def change_view(self, request, object_id, form_url='', extra_context=None):
land_zone = LandZone.objects.get(pk=object_id)
land_zone_json = {
'owner': str(land_zone.owner),
'geo_json': json.dumps(land_zone.geo_json)
}
extra_context = extra_context or {}
extra_context['zone_data'] = land_zone_json
response = super(LandZoneAdmin, self).change_view(request, object_id, form_url, extra_context=extra_context)
return response
admin.site.register(LandZone, LandZoneAdmin)
|
import cv2
cv2.ocl.setUseOpenCL(False)
import numpy as np
from copy import deepcopy
from utils.sth import sth
from utils.sampler import create_sampler_manager
from mlagents.mlagents_envs.environment import UnityEnvironment
from mlagents.mlagents_envs.side_channel.engine_configuration_channel import EngineConfigurationChannel
from mlagents.mlagents_envs.side_channel.environment_parameters_channel import EnvironmentParametersChannel
class UnityWrapper(object):
def __init__(self, env_args):
self.engine_configuration_channel = EngineConfigurationChannel()
if env_args['train_mode']:
self.engine_configuration_channel.set_configuration_parameters(time_scale=env_args['train_time_scale'])
else:
self.engine_configuration_channel.set_configuration_parameters(width=env_args['width'],
height=env_args['height'],
quality_level=env_args['quality_level'],
time_scale=env_args['inference_time_scale'],
target_frame_rate=env_args['target_frame_rate'])
self.float_properties_channel = EnvironmentParametersChannel()
if env_args['file_path'] is None:
self._env = UnityEnvironment(base_port=5004,
seed=env_args['env_seed'],
side_channels=[self.engine_configuration_channel, self.float_properties_channel])
else:
self._env = UnityEnvironment(file_name=env_args['file_path'],
base_port=env_args['port'],
no_graphics=not env_args['render'],
seed=env_args['env_seed'],
side_channels=[self.engine_configuration_channel, self.float_properties_channel])
def reset(self, **kwargs):
reset_config = kwargs.get('reset_config', {})
for k, v in reset_config.items():
self.float_properties_channel.set_property(k, v)
self._env.reset()
def __getattr__(self, name):
if name.startswith('_'):
raise AttributeError("attempted to get missing private attribute '{}'".format(name))
return getattr(self._env, name)
class BasicWrapper:
def __init__(self, env):
self._env = env
self._env.reset()
def __getattr__(self, name):
if name.startswith('_'):
raise AttributeError("attempted to get missing private attribute '{}'".format(name))
return getattr(self._env, name)
class InfoWrapper(BasicWrapper):
def __init__(self, env, env_args):
super().__init__(env)
self.resize = env_args['resize']
self.brain_names = self._env.get_behavior_names() #所有脑的名字列表
self.fixed_brain_names = list(map(lambda x: x.replace('?','_'), self.brain_names))
self.brain_specs = [self._env.get_behavior_spec(b) for b in self.brain_names] # 所有脑的信息
self.vector_idxs = [[i for i,b in enumerate(spec.observation_shapes) if len(b)==1] for spec in self.brain_specs] # 得到所有脑 观测值为向量的下标
self.vector_dims = [[b[0] for b in spec.observation_shapes if len(b)==1] for spec in self.brain_specs] # 得到所有脑 观测值为向量的维度
self.visual_idxs = [[i for i,b in enumerate(spec.observation_shapes) if len(b)==3] for spec in self.brain_specs] # 得到所有脑 观测值为图像的下标
self.brain_num = len(self.brain_names)
self.visual_sources = [len(v) for v in self.visual_idxs]
self.visual_resolutions = []
stack_visual_nums = env_args['stack_visual_nums'] if env_args['stack_visual_nums'] > 1 else 1
for spec in self.brain_specs:
for b in spec.observation_shapes:
if len(b) == 3:
self.visual_resolutions.append(
list(self.resize)+[list(b)[-1] * stack_visual_nums])
break
else:
self.visual_resolutions.append([])
self.s_dim = [sum(v) for v in self.vector_dims]
self.a_dim_or_list = [spec.action_shape for spec in self.brain_specs]
self.a_size = [spec.action_size for spec in self.brain_specs]
self.is_continuous = [spec.is_action_continuous() for spec in self.brain_specs]
self.brain_agents = [len(d) for d in [self._env.get_steps(bn)[0] for bn in self.brain_names]] # 得到每个环境控制几个智能体
def random_action(self):
'''
choose random action for each brain and each agent.
continuous: [-1, 1]
discrete: [0-max, 0-max, ...] i.e. action dim = [2, 3] => action range from [0, 0] to [1, 2].
'''
actions = []
for i in range(self.brain_num):
if self.is_continuous[i]:
actions.append(np.random.random((self.brain_agents[i], self.a_dim_or_list[i])) * 2 - 1) # [-1, 1]
else:
actions.append(np.random.randint(self.a_dim_or_list[i], size=(self.brain_agents[i], self.a_size[i]), dtype=np.int32))
return actions
class UnityReturnWrapper(BasicWrapper):
def __init__(self, env):
super().__init__(env)
def reset(self, **kwargs):
self._env.reset(**kwargs)
return self.get_obs()
def step(self, actions):
for k, v in actions.items():
self._env.set_actions(k, v)
self._env.step()
return self.get_obs()
def get_obs(self):
'''
解析环境反馈的信息,将反馈信息分为四部分:向量、图像、奖励、done信号
'''
vector = []
visual = []
reward = []
done = []
for i, bn in enumerate(self.brain_names):
vec, vis, r, d = self.coordinate_information(i, bn)
vector.append(vec)
visual.append(vis)
reward.append(r)
done.append(d)
return zip(vector, visual, reward, done)
def coordinate_information(self, i, bn):
'''
TODO: Annotation
'''
n = self.brain_agents[i]
d, t = self._env.get_steps(bn)
ps = [t]
if len(d) != 0 and len(d) != n:
raise ValueError('agents number error.')
while len(d) != n:
self._env.step()
d, t = self._env.get_steps(bn)
ps.append(t)
obs, reward = d.obs, d.reward
done = np.full(n, False)
for t in ps: # TODO: 有待优化
if len(t) != 0:
reward[t.agent_id] = t.reward
done[t.agent_id] = True
for _obs, _tobs in zip(obs, t.obs):
_obs[t.agent_id] = _tobs
return (self.deal_vector(n, [obs[vi] for vi in self.vector_idxs[i]]),
self.deal_visual(n, [obs[vi] for vi in self.visual_idxs[i]]),
np.asarray(reward),
np.asarray(done))
def deal_vector(self, n, vecs):
'''
把向量观测信息 按每个智能体 拼接起来
'''
if len(vecs):
return np.hstack(vecs)
else:
return np.array([]).reshape(n, -1)
def deal_visual(self, n, viss):
'''
viss : [camera1, camera2, camera3, ...]
把图像观测信息 按每个智能体 组合起来
'''
ss = []
for j in range(n):
s = []
for v in viss:
s.append(self.resize_image(v[j]))
ss.append(np.array(s)) # [agent1(camera1, camera2, camera3, ...), ...]
return np.array(ss) # [B, N, (H, W, C)]
def resize_image(self, image):
image = cv2.resize(image, tuple(self.resize), interpolation=cv2.INTER_AREA).reshape(list(self.resize)+[-1])
return image
class SamplerWrapper(BasicWrapper):
def __init__(self, env, env_args):
super().__init__(env)
self.reset_config = env_args['reset_config']
self.sampler_manager, self.resample_interval = create_sampler_manager(env_args['sampler_path'], 0)
self.episode = 0
def reset(self):
self.episode += 1
if self.episode % self.resample_interval == 0:
self.reset_config.update(self.sampler_manager.sample_all())
obs = self._env.reset(config=self.reset_config)
return obs
class ActionWrapper(BasicWrapper):
def __init__(self, env):
super().__init__(env)
def step(self, actions):
actions = deepcopy(actions)
for i, k in enumerate(actions.keys()):
if self.is_continuous[i]:
pass
else:
actions[k] = sth.int2action_index(actions[k], self.a_dim_or_list[i])
return self._env.step(actions)
|
#
# PySNMP MIB module SAMSUNG-DIAGNOSTICS-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/SAMSUNG-DIAGNOSTICS-MIB
# Produced by pysmi-0.3.4 at Mon Apr 29 20:52:28 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
OctetString, Integer, ObjectIdentifier = mibBuilder.importSymbols("ASN1", "OctetString", "Integer", "ObjectIdentifier")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ValueRangeConstraint, SingleValueConstraint, ConstraintsIntersection, ValueSizeConstraint, ConstraintsUnion = mibBuilder.importSymbols("ASN1-REFINEMENT", "ValueRangeConstraint", "SingleValueConstraint", "ConstraintsIntersection", "ValueSizeConstraint", "ConstraintsUnion")
samsungCommonMIB, = mibBuilder.importSymbols("SAMSUNG-COMMON-MIB", "samsungCommonMIB")
NotificationGroup, ModuleCompliance = mibBuilder.importSymbols("SNMPv2-CONF", "NotificationGroup", "ModuleCompliance")
Bits, Gauge32, IpAddress, NotificationType, TimeTicks, iso, Counter32, Integer32, Unsigned32, ModuleIdentity, Counter64, MibIdentifier, MibScalar, MibTable, MibTableRow, MibTableColumn, ObjectIdentity = mibBuilder.importSymbols("SNMPv2-SMI", "Bits", "Gauge32", "IpAddress", "NotificationType", "TimeTicks", "iso", "Counter32", "Integer32", "Unsigned32", "ModuleIdentity", "Counter64", "MibIdentifier", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "ObjectIdentity")
DisplayString, TextualConvention = mibBuilder.importSymbols("SNMPv2-TC", "DisplayString", "TextualConvention")
scmDiagnostics = ModuleIdentity((1, 3, 6, 1, 4, 1, 236, 11, 5, 11, 64))
if mibBuilder.loadTexts: scmDiagnostics.setLastUpdated('0408240000Z')
if mibBuilder.loadTexts: scmDiagnostics.setOrganization('Samsung Corporation - Samsung Common Management Interface (SCMI) Working Group')
scmDiagnosticsDevice = MibIdentifier((1, 3, 6, 1, 4, 1, 236, 11, 5, 11, 64, 1))
scmDiagnosticsDeviceTable = MibTable((1, 3, 6, 1, 4, 1, 236, 11, 5, 11, 64, 1, 2), )
if mibBuilder.loadTexts: scmDiagnosticsDeviceTable.setStatus('current')
scmDiagnosticsDeviceEntry = MibTableRow((1, 3, 6, 1, 4, 1, 236, 11, 5, 11, 64, 1, 2, 1), ).setIndexNames((0, "SAMSUNG-DIAGNOSTICS-MIB", "scmDiagnosticsDeviceIndex"))
if mibBuilder.loadTexts: scmDiagnosticsDeviceEntry.setStatus('current')
scmDiagnosticsDeviceIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 236, 11, 5, 11, 64, 1, 2, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 2147483647))).setMaxAccess("readonly")
if mibBuilder.loadTexts: scmDiagnosticsDeviceIndex.setStatus('current')
scmDiagnosticsDeviceItem = MibTableColumn((1, 3, 6, 1, 4, 1, 236, 11, 5, 11, 64, 1, 2, 1, 2), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 64))).setMaxAccess("readonly")
if mibBuilder.loadTexts: scmDiagnosticsDeviceItem.setStatus('current')
scmDiagnosticsDeviceType = MibTableColumn((1, 3, 6, 1, 4, 1, 236, 11, 5, 11, 64, 1, 2, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6, 7, 8, 21, 22, 23, 24, 25, 26, 41, 42, 43))).clone(namedValues=NamedValues(("input", 1), ("output", 2), ("cover", 3), ("geeralPrinter", 4), ("mediaPath", 5), ("marker", 6), ("markerSupplies", 7), ("markerColorant", 8), ("fax", 21), ("scanner", 22), ("network", 23), ("usb", 24), ("parallel", 25), ("finisher", 26), ("motor", 41), ("smps", 42), ("memory", 43)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: scmDiagnosticsDeviceType.setStatus('current')
scmDiagnosticsDeviceDescr = MibTableColumn((1, 3, 6, 1, 4, 1, 236, 11, 5, 11, 64, 1, 2, 1, 4), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 64))).setMaxAccess("readonly")
if mibBuilder.loadTexts: scmDiagnosticsDeviceDescr.setStatus('current')
scmDiagnosticsDeviceID = MibTableColumn((1, 3, 6, 1, 4, 1, 236, 11, 5, 11, 64, 1, 2, 1, 5), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 2147483647))).setMaxAccess("readonly")
if mibBuilder.loadTexts: scmDiagnosticsDeviceID.setStatus('current')
scmDiagnosticsDeviceStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 236, 11, 5, 11, 64, 1, 2, 1, 6), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6))).clone(namedValues=NamedValues(("unknown", 1), ("running", 2), ("warning", 3), ("testing", 4), ("down", 5), ("printing", 6)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: scmDiagnosticsDeviceStatus.setStatus('current')
scmDiagnosticsDeviceErrors = MibTableColumn((1, 3, 6, 1, 4, 1, 236, 11, 5, 11, 64, 1, 2, 1, 7), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: scmDiagnosticsDeviceErrors.setStatus('current')
scmDiagnosticsRequest = MibTableColumn((1, 3, 6, 1, 4, 1, 236, 11, 5, 11, 64, 1, 2, 1, 8), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("on", 1), ("off", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: scmDiagnosticsRequest.setStatus('current')
scmGenBaseDeviceImageFileName = MibTableColumn((1, 3, 6, 1, 4, 1, 236, 11, 5, 11, 64, 1, 2, 1, 999), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 31))).setMaxAccess("readonly")
if mibBuilder.loadTexts: scmGenBaseDeviceImageFileName.setStatus('current')
mibBuilder.exportSymbols("SAMSUNG-DIAGNOSTICS-MIB", PYSNMP_MODULE_ID=scmDiagnostics, scmDiagnosticsDeviceItem=scmDiagnosticsDeviceItem, scmDiagnosticsDeviceEntry=scmDiagnosticsDeviceEntry, scmDiagnosticsDeviceStatus=scmDiagnosticsDeviceStatus, scmGenBaseDeviceImageFileName=scmGenBaseDeviceImageFileName, scmDiagnosticsDeviceDescr=scmDiagnosticsDeviceDescr, scmDiagnosticsDeviceTable=scmDiagnosticsDeviceTable, scmDiagnosticsDeviceID=scmDiagnosticsDeviceID, scmDiagnosticsDeviceErrors=scmDiagnosticsDeviceErrors, scmDiagnosticsRequest=scmDiagnosticsRequest, scmDiagnosticsDeviceIndex=scmDiagnosticsDeviceIndex, scmDiagnosticsDevice=scmDiagnosticsDevice, scmDiagnosticsDeviceType=scmDiagnosticsDeviceType, scmDiagnostics=scmDiagnostics)
|
import numpy as np
from mpi4py import MPI
from time import time
import sys
sys.path.append("/Users/bl/Dropbox/repos/Delight/")
from delight.utils_cy import *
comm = MPI.COMM_WORLD
threadNum = comm.Get_rank()
numThreads = comm.Get_size()
# --------------------------------------
numsamples = int(sys.argv[1])
prefix = "./sv3dhst_"
useSpec = True
usePhoto = not useSpec
importanceSampling = not useSpec
numTypes = 10
nz = 1000
z_grid_bounds = np.logspace(-2, 0.4, nz+1)
muell_range = [0.1, 100]
mulnz_range = [-1, 0.2]
varell_range = [0.1, 100]
varlnz_range = [0.001, 0.1]
# --------------------------------------
if useSpec:
specdata = np.load(prefix+'specdata.npz')
assert specdata["redshifts"].size == specdata["nobj"]
assert specdata["sedfeatures"].shape[0] == specdata["features_zgrid"].size
assert specdata["sedfeatures"].shape[1] == specdata["numFeatures"]
assert specdata["sedfeatures"].shape[2] == specdata["numBands"]
assert specdata["fluxes"].shape[1] == specdata["numBands"]
assert specdata["fluxes"].shape[0] == specdata["nobj"]
assert specdata["fluxes"].shape[1] == specdata["numBands"]
assert specdata["fluxVars"].shape[0] == specdata["nobj"]
assert specdata["fluxVars"].shape[1] == specdata["numBands"]
f_mod_features_spec = np.zeros((specdata["nobj"], specdata["numFeatures"], specdata["numBands"]))
for it in range(specdata["numFeatures"]):
for ib in range(specdata["numBands"]):
f_mod_features_spec[:, it, ib] = np.interp(specdata["redshifts"],
specdata["features_zgrid"], specdata["sedfeatures"][:, it, ib])
if usePhoto:
photdata = np.load(prefix+'photdata.npz')
if useSpec:
assert photdata["numFeatures"] == specdata["numFeatures"]
assert photdata["sedfeatures"].shape[0] == photdata["features_zgrid"].size
assert photdata["sedfeatures"].shape[1] == photdata["numFeatures"]
assert photdata["sedfeatures"].shape[2] == photdata["numBands"]
assert photdata["fluxes"].shape[0] == photdata["nobj"]
assert photdata["fluxes"].shape[1] == photdata["numBands"]
assert photdata["fluxVars"].shape[0] == photdata["nobj"]
assert photdata["fluxVars"].shape[1] == photdata["numBands"]
z_grid_centers = (z_grid_bounds[1:] + z_grid_bounds[:-1]) / 2.0
z_grid_sizes = (z_grid_bounds[1:] - z_grid_bounds[:-1])
f_mod_features_phot = np.zeros((nz, photdata["numFeatures"], photdata["numBands"]))
for it in range(photdata["numFeatures"]):
for ib in range(photdata["numBands"]):
f_mod_features_phot[:, it, ib] = np.interp(z_grid_centers,
photdata["features_zgrid"], photdata["sedfeatures"][:, it, ib])
def hypercube2simplex(zs):
fac = np.concatenate((1 - zs, np.array([1])))
zsb = np.concatenate((np.array([1]), zs))
fs = np.cumprod(zsb) * fac
return fs
if useSpec:
numFeatures = specdata["numFeatures"]
if usePhoto:
numFeatures = photdata["numFeatures"]
def logposterior(x):
if not importanceSampling:
if np.any(x < param_ranges_min) or np.any(x > param_ranges_max):
ind = x < param_ranges_min
ind |= x > param_ranges_max
print("parameters outside of allowed range: ", np.where(ind)[0])
return -np.inf
zs = x[0:numTypes-1]
fs = hypercube2simplex(zs)
order = np.argsort(fs)
alpha_zs = x[numTypes-1:numTypes-1+numTypes*(numFeatures-1)]
alpha_fs = np.vstack([hypercube2simplex(alpha_zs[i*(numFeatures-1):(i+1)*(numFeatures-1)])
for i in order]) # numTypes * numFeatures
betas = x[numTypes-1+numTypes*(numFeatures-1):]
mu_ell, mu_lnz, var_ell, var_lnz, corr = np.split(betas, 5)
mu_ell, mu_lnz, var_ell, var_lnz, corr =\
mu_ell[order], mu_lnz[order], var_ell[order], var_lnz[order], corr[order]
rho = corr * np.sqrt(var_ell * var_lnz)
logpost = 0
if useSpec:
f_mod_spec = np.dot(alpha_fs, f_mod_features_spec)
speclnevidences = np.zeros((specdata["nobj"], ))
specobj_evidences_margell(
speclnevidences, fs,
specdata["nobj"], numTypes, specdata["numBands"],
specdata["fluxes"], specdata["fluxVars"], f_mod_spec,
specdata["redshifts"],
mu_ell, mu_lnz, var_ell, var_lnz, rho)
logpost += np.sum(speclnevidences)
if usePhoto:
photolnevidences = np.zeros((photdata["nobj"], ))
f_mod_phot = np.dot(alpha_fs, f_mod_features_phot)
photoobj_evidences_marglnzell(
photolnevidences, fs,
photdata["nobj"], numTypes, nz, photdata["numBands"],
photdata["fluxes"], photdata["fluxVars"], f_mod_phot,
z_grid_centers, z_grid_sizes,
mu_ell, mu_lnz, var_ell, var_lnz, rho)
ind = np.where(np.isfinite(photolnevidences))[0]
logpost += np.sum(photolnevidences[ind])
return logpost
fname = prefix+"samples_thread"+str(threadNum+1)+"on"+str(numThreads)
if importanceSampling:
samples = np.genfromtxt(fname+".txt")
t1 = time()
for i in range(numsamples):
samples[i, 0] += logposterior(samples[i, 1:])
t2 = time()
print('Thread', threadNum+1, "on", numThreads, ': Finished sampling! Took', (t2-t1)/numsamples, 'sec per sample')
np.savetxt(fname+"_importancesampled.txt", samples[0:numsamples, :])
print("Wrote to file", fname+"_importancesampled.txt")
else:
param_ranges = \
[[0, 1]] * (numTypes-1) +\
[[0, 1]] * (numTypes*(numFeatures-1)) +\
[muell_range] * numTypes +\
[mulnz_range] * numTypes +\
[varell_range] * numTypes +\
[varlnz_range] * numTypes +\
[[-.9, .9]] * numTypes
ndim = len(param_ranges)
param_ranges_min = np.array([rr[0] for rr in param_ranges])
param_ranges_max = np.array([rr[1] for rr in param_ranges])
t1 = time()
samples = np.zeros((numsamples, ndim+1))
for i in range(numsamples):
samples[i, 1:] = param_ranges_min + (param_ranges_max - param_ranges_min) * np.random.uniform(0.1, 0.9, size=ndim)
samples[i, 0] = logposterior(samples[i, 1:])
t2 = time()
print('Thread', threadNum+1, "on", numThreads, ': Finished sampling! Took', (t2-t1)/numsamples, 'sec per sample')
t1 = time()
order = np.argsort(samples[:, 0])[::-1]
samples = samples[order, :]
t2 = time()
print('Thread', threadNum+1, "on", numThreads, ': Finished sorting samples. Took', (t2-t1), 'sec')
np.savetxt(fname+".txt", samples)
print("Wrote to file", fname+".txt")
|
name0_1_1_1_0_3_0 = None
name0_1_1_1_0_3_1 = None
name0_1_1_1_0_3_2 = None
name0_1_1_1_0_3_3 = None
name0_1_1_1_0_3_4 = None |
import datetime
import numpy as np
import matplotlib as plt
from floodsystem.datafetcher import fetch_measure_levels
from floodsystem.analysis import polyfit
from floodsystem.flood import stations_level_over_threshold
from floodsystem.stationdata import build_station_list, update_water_levels
"""Explanation
If the derivative is positive, then the water levels are increasing, hence the flood risk increases.
If the relative water level is higher than the typical high, then flooding may occur if the water levels are increasing
Severe flood risk if 1.5 relative water level and a positive derivative
High flood risk if 1.0 relative water level and a positive derivative
Moderate flood risk if 0.75 relative water level and a positive derivate
Low flood risk if any other combination.
"""
def run():
stations = build_station_list()
update_water_levels(stations)
stationsover = stations_level_over_threshold(stations, 0.75)
severe = set()
high = set()
moderate = set()
low = set()
for station in stationsover:
dates, levels = fetch_measure_levels(station[0].measure_id, dt=datetime.timedelta(days=1))
if dates == [] or levels == []:
pass
else:
poly, d0 = polyfit(dates, levels, 4)
derivative = np.polyder(poly, 4)
value = derivative(plt.dates.date2num(dates[-1]) - d0)
if station[1] > 1.5 and value > 0:
for i in stations:
if station[0].name == i.name and i.town != None:
severe.add(i.town)
elif station[1] > 1.0 and value > 0:
for i in stations:
if station[0].name == i.name and i.town != None:
high.add(i.town)
elif station[1] > 0.75 or value > 0:
for i in stations:
if station[0].name == i.name and i.town != None:
moderate.add(i.town)
else:
for i in stations:
if station[0].name == i.name and i.town != None:
low.add(i.town)
print("--TOWNS WITH SEVERE FLOOD RISK--")
print(severe)
print("--TOWNS WITH HIGH FLOOD RISK--")
print(high)
if __name__ == "__main__":
run() |
from .models import Campaign
def connect_campaign(sender, **kwargs):
reference = kwargs.get('reference')
if not reference:
return
parts = reference.split(':', 1)
if len(parts) != 2:
return
namespace = parts[0]
try:
campaign = Campaign.objects.get(ident=namespace)
except Campaign.DoesNotExist:
return
sender.campaign = campaign
sender.save()
sender.user.tags.add(campaign.ident)
if not sender.user.is_active:
# First-time requester
sender.user.tags.add('%s-first' % campaign.ident)
|
import discord
import logging
import asyncio
import aiosqlite
import contextlib
from bot import Main
# source: rdanny because I'm lazy to do my own launcher
@contextlib.contextmanager
def setlogging():
try:
logging.getLogger('discord').setLevel(logging.INFO)
logging.getLogger('discord.http').setLevel(logging.INFO)
log = logging.getLogger()
handler = logging.FileHandler(filename='mod.log', encoding='utf-8', mode='w')
date_fmt = "%d-%m-%Y %H:%M:%S"
fmt = logging.Formatter('[{asctime}] [{levelname:<7}] {name}: {message}', date_fmt, style='{')
handler.setFormatter(fmt)
log.addHandler(handler)
yield
finally:
handlers = log.handlers[:]
for handler in handlers:
handler.close()
log.removeHandler(handler)
def run_bot():
loop = asyncio.get_event_loop()
log = logging.getLogger()
try:
db = loop.run_until_complete(aiosqlite.connect('mod.db'))
except Exception as e:
print(e)
return
bot = Main()
bot.db = db
bot.run()
if __name__ == '__main__':
run_bot() |
#!/usr/bin/env python3
"""The Museum of Incredibly Dull Things.
A museum wants to get rid of some exhibitions.
Vanya, the interior architect, comes up with a plan to remove
the most boring exhibitions. She gives them a rating, and removes
the one with the lowest rating. Just as she finishes rating the
exhibitions, she's called off to an important meeting.
She asks you to write a program that tells her the ratings
of the items after the lowest one is removed.
Create a function that takes a list of integers and removes the smallest value.
Source:
https://edabit.com/challenge/cx7eFvQBzjauLgwgZ
"""
def remove_smallest(ratings: list) -> list:
"""Remove the smallest rating from list."""
if ratings:
min_, index_ = ratings[0], 0
for index, score in enumerate(ratings):
if score < int(min_):
min_ = score
index_ = index
ratings.pop(index_)
return ratings
def main():
"""Run sample remove_smallest functions. Do not import."""
print(remove_smallest([1, 2, 3, 4, 5]))
print(remove_smallest([5, 3, 2, 1, 4]))
print(remove_smallest([2, 2, 1, 2, 1]))
if __name__ == "__main__":
main()
|
import os, sys
import cgi
import webapp2
from google.appengine.ext import ndb
from datetime import datetime
import json
from Cube.models import Cube, Content
import logging
#Get Functionality.
class CubeHandler(webapp2.RequestHandler):
def post(self):
# try:
cube_fields = json.loads(self.request.body)
logging.info(cube_fields)
if cube_fields:
cube = Cube()
self.createCube(cube,cube_fields)
cube.put()
else:
logging.info("No input data")
return
# except Exception as e:
# logging.info("Error posting a Content")
def get(self):
user = dict(self.request.params)["user"]
logging.info(user)
response = {"personal": [], "shared": []}
lists = []
#Do a gql Query to fetch the personal; queries
query_1 = ndb.gql("SELECT * FROM Cube WHERE created_by=:1",ndb.Key(urlsafe=str(user))).fetch()
if query_1:
response["personal"] = convert_query_to_dict(query_1)
#Another gql query shall fetch the shared.
logging.info(response["personal"])
query_2 = ndb.gql("SELECT * FROM Cube WHERE shared_list=:1", ndb.Key(urlsafe=str(user))).fetch()
#Code to convert an instance to a dictionary value
logging.info(query_2)
if query_2:
logging.info("adfad")
response["shared"] = convert_query_to_dict(query_2)
logging.info(response)
return self.response.write(json.dumps(response))
def put(self):
# """
# This shall handle the Cube Share Functionality
# """
# try:
cube_shared_fields = json.loads(self.request.body)
user_to_be_shared_with = cube_shared_fields["user"]
cube_key = cube_shared_fields["cube"]
#First Update the CUBE.
cube = ndb.Key(urlsafe=str(cube_key)).get()
list_of_contents = cube.content_list
logging.info(list_of_contents)
shared_list = [key.urlsafe() for key in cube.shared_list]
shared_list.append(user_to_be_shared_with)
cube.shared_list = [ndb.Key(urlsafe=str(key)) for key in list(set(shared_list))]
#Fetch all the contents and input the user2 key.
for content in list_of_contents:
logging.info(content)
content_instance = content.get()
shared_with_users = [user.urlsafe() for user in content_instance.content_shared_list]
shared_with_users.append(user_to_be_shared_with)
content_instance.content_shared_list = [ndb.Key(urlsafe=user) for user in list(set(shared_with_users))]
content_instance.put()
cube.put()
# except Exception as e:
# logging.info("Error while sharing a cube")
def delete(self):
""" This handles the delete functionality"""
pass
def createCube(self,cube, cube_fields):
"""
A helper Function that initializes a cube during a post request.
"""
cube.name = cube_fields["name"]
cube.created_by = ndb.Key(urlsafe=str(cube_fields["user"]))
cube.create_on = datetime.now()
return cube
class ContentHandler(webapp2.RequestHandler):
"""
This Handler handles all the requests for deleting or adding contents.
"""
def get(self):
try:
params = dict(self.request.params)
user_id = params["user"]
# Contents created by the user and the contents shared with the user.
result = {"personal": None, "shared": None}
#Query to read all the personalized contents
query = ndb.gql("SELECT * FROM Content WHERE created_by=:1", ndb.Key(urlsafe=str(user_id))).fetch()
if query:
result["personal"] = [str(query.link) for query in query]
query = ndb.gql("SELECT * FROM Content WHERE content_shared_list=:1", ndb.Key(urlsafe=str(user_id))).fetch()
if query:
result["shared"] = [str(query.link) for query in query]
return self.response.write(json.dumps(result))
except Exception as e:
logging.info("Error in Getting the List")
def post(self):
# try:
content_fields = json.loads(self.request.body)
content = Content()
cubes = content_fields["cubes"] if content_fields.has_key("cubes") else None
logging.info(cubes)
content.created_by = ndb.Key(urlsafe=content_fields["user"])
content.link = content_fields["link"]
content.created_on = datetime.now()
if cubes:
content.put()
key = content.key
for cube in cubes:
#Fetch the cube and append it to the list of contents.
cube_instance = ndb.Key(urlsafe=str(cube)).get()
logging.info(cube_instance)
cube_instance.content_list.append(key)
cube_instance.put()
else:
content.independent_content = True
content.put()
# except Exception as e:
# logging.info("Error in creating a Content")
def put(self):
# """
# Put Here amounts to Sharing a Content.
# """
# try:
content_share_fields = json.loads(self.request.body)
user_key_to_share = content_share_fields["user"]
content_key = content_share_fields["key"]
#Update the information.
fetched_content = ndb.Key(urlsafe=str(content_key)).get()
prev_content_list = fetched_content.content_shared_list
if prev_content_list:
prev_content_list.append(ndb.Key(urlsafe=str(user_key_to_share)))
else:
prev_content_list = [ndb.Key(urlsafe=str(user_key_to_share))]
fetched_content.content_shared_list = prev_content_list
fetched_content.put()
#
# except Exception as e:
# logging.info("Error in Sharing with others")
class CubeContentDeleteHandler(webapp2.RequestHandler):
""" This class handles the delete functionality
"""
def get(self):
pass
def post(self):
"""
This Handler takes a cube id and a content id.
"""
fields = json.loads(self.request.body)
logging.info(fields)
cube = fields["cube"] if fields.has_key("cube") else None
logging.info("adadads")
content = fields["content"] if fields.has_key("content") else None
logging.info(content)
if cube:
if content:
#Fetch the Cube.
cube_instance = ndb.Key(urlsafe=str(cube)).get()
content_instance = ndb.Key(urlsafe=str(content)).get()
cube_shared_list = [key.urlsafe() for key in cube_instance.shared_list]
content_list = [content.urlsafe() for content in cube_instance.content_list]
logging.info(content)
logging.info(content_list)
if content.urlsafe() in content_list:
logging.info("jbkjhjb")
index = content_list.index(content.urlsafe())
del content_list[index]
cube_instance.content_list = [ndb.Key(urlsafe=str(key)) for key in list(set(content_list))]
cube_instance.put()
logging.info("sacdasdca")
#Fetch the Content now.logging.info("sacdasdca")
logging.info(content_instance)
logging.info("sacdasdca")
created_by = content_instance.created_by.urlsafe()
shared_content_list = [key.urlsafe() for key in content_instance.content_shared_list]
logging.info("sacdasdca")
logging.info(content)
if created_by in cube_shared_list:
for key in cube_shared_list:
index = shared_content_list.index(key)
del shared_content_list[index]
if shared_content_list:
content_instance.created_by = ndb.Key(urlsafe=shared_content_list[0])
else:
#Delete the instance.
pass
else:
# Delete the Cube
ndb.Key(urlsafe=str(cube)).delete()
#Check for the
def convert_query_to_dict(query_object):
query_list = []
logging.info(query_object)
for query in query_object:
query_obj = {}
query_obj["name"] = str(query.name)
if query.content_list:
query_obj["content_list"] = [id.urlsafe() for id in query.content_list]
query_list.append(query_obj)
logging.info(query_list)
return query_list
def get_name_from_key(content):
return "www.google.com" |
#!/usr/bin/env pybricks-micropython
from pybricks.hubs import EV3Brick
from pybricks.ev3devices import Motor, TouchSensor, ColorSensor, GyroSensor
from pybricks.parameters import Port, Direction, Button, Stop
from pybricks.tools import wait, StopWatch
from pybricks.robotics import DriveBase
from pybricks.media.ev3dev import ImageFile, SoundFile
# Initialize the EV3 brick.
ev3 = EV3Brick()
# Configure 2 motors on Ports A and B. Set the motor directions to
# clockwise, so that positive speed values make the robot move
# forward. These will be the left and right motors of the Tank Bot.
left_motor = Motor(Port.B, Direction.CLOCKWISE)
right_motor = Motor(Port.C, Direction.CLOCKWISE)
# The wheel diameter of the Bot in mm.
WHEEL_DIAMETER = 54
# The axle track is the distance between the centers of each of the
# wheels. This is about 200 mm for the Tank Bot.
AXLE_TRACK = 125
# The Driving Base is comprised of 2 motors. There is a wheel on each
# motor. The wheel diameter and axle track values are used to make the
# motors move at the correct speed when you give a drive command.
#robot = DriveBase(left_motor, right_motor, WHEEL_DIAMETER, AXLE_TRACK)
# Sets up the robot's drive base
def setupTank():
global robot
robot = DriveBase(left_motor, right_motor, WHEEL_DIAMETER, AXLE_TRACK)
# Set up the robot speed
robot.stop
# settings(straight_speed, straight_acceleration, turn_rate, turn_acceleration)
robot.settings(800, 5000, 40000, 100000)
# Sets up the tank for the first time
setupTank()
# Initialize the steering and overshoot variables.
steering = 60
overshoot = 5
# Set up the Gyro Sensor. It is used to measure the angle of the robot.
# Keep the Gyro Sensor and EV3 steady when connecting the cable and
# during start-up of the EV3.
gyroSensor = GyroSensor(Port.S1)
#Set up the fork lift
liftMotor = Motor(Port.D, Direction.CLOCKWISE, [8, 12, 28])
# Functions are under this line
#-------------------------------------------------------------------------
def rightTurnLoop(inputAngle):
gyroSensor.reset_angle(0)
print("Sub target angle " + str(inputAngle - 10))
# Turn quickly only if the angle is bigger than 10
if (inputAngle > 10):
robot.turn(inputAngle - 10)
print(gyroSensor.angle())
print("Target angle " + str(inputAngle))
print("Loop condition " + str(gyroSensor.angle() < inputAngle))
if (gyroSensor.angle() < inputAngle):
while (gyroSensor.angle() < inputAngle):
robot.turn(1)
print(gyroSensor.angle())
if (gyroSensor.angle() > inputAngle):
while (gyroSensor.angle() > inputAngle):
robot.turn(-1)
print(gyroSensor.angle())
def leftTurnLoop(inputAngle):
gyroSensor.reset_angle(0)
print("Target angle " + str(-inputAngle))
print("Sub target angle " + str((-inputAngle) + 10))
# Turn quickly only if the angle is bigger than 10
if (inputAngle > 10):
robot.turn((-inputAngle) + 10)
print(gyroSensor.angle())
if (gyroSensor.angle() > -inputAngle):
while (gyroSensor.angle() > -inputAngle):
robot.turn(-1)
print(gyroSensor.angle())
if (gyroSensor.angle() < -inputAngle):
while (gyroSensor.angle() < -inputAngle):
robot.turn(1)
print(gyroSensor.angle())
# torque; only for run_until_stalled, limit is the torque of the motor
# height; max height to lift to
def grab(torque, height):
# Opens up the arm
liftMotor.run_target(400, -450)
# move in to grab by length of the black pins on the fork
forkLength = 30
robot.straight(forkLength)
# alternative lift function
liftMotor.run_target(250, height)
def letGo():
liftMotor.run_target(400, -450)
liftMotor.hold()
# back out by length of the black pins on the fork
forkLenght = 30
constant = 10
robot.straight(-forkLenght -constant)
# restore fork position
liftMotor.run_target(400, 0)
# Moves the fork up
# Must start with the fork positioned like a forklift
def liftUp():
# Reset the position of the fork to 0
liftMotor.run_target(400, 0)
# lift it up
liftMotor.run_target(400, 450)
# Moves the fork down
def liftDown():
liftMotor.run_target(400, 0)
def ram():
# Record the angle while moving for better accuracy
robot.straight(500)
gyroSensor.reset_angle(0)
angleBefore = gyroSensor.angle()
robot.straight(270)
ramDistance = 20
index = 0
indexMax = 20
while (index < indexMax):
wait(100)
robot.straight(-ramDistance)
print("Back distance " + str(-ramDistance))
wait(100)
# The distance changes for each push
robot.straight(ramDistance + 30)
print("Forward distance " + str(ramDistance + 30))
index += 1
angleAfter = gyroSensor.angle()
print("Angle in " + str(angleBefore))
print("Angle after " + str(angleAfter))
# Go back a bit
robot.straight(-50)
if (0 > angleAfter):
rightTurnLoop(abs(angleAfter))
else:
restoreAngle(angleBefore, angleAfter)
# Go back to start
robot.straight(-1200)
# Restores the previous angle of the bot
def restoreAngle(angleBefore, angleAfter):
if (angleBefore != angleAfter):
if (angleAfter < angleBefore):
rightTurnLoop(abs(angleAfter - angleBefore))
if (angleAfter > angleBefore):
leftTurnLoop(abs(angleAfter - angleBefore))
# Goes under the pull up bar
def goUnder():
robot.straight(800)
leftTurnLoop(90)
robot.straight(640)
leftTurnLoop(65)
robot.straight(350)
dance()
def dance():
gyroSensor.reset_angle(0)
robot.turn(180)
robot.straight(-10)
robot.turn(360)
robot.straight(10)
robot.turn(360 * 2)
# Takes an string input
# Makes the robot say the input out loud in Norwegian
def speakNor(whatToSay):
ev3.speaker.set_speech_options(language='no', voice='f1', speed=200, pitch=70)
ev3.speaker.say(whatToSay)
# Pulls the arms down from the locked position
def releaseArms():
liftMotor.run_target(400, -1050)
liftMotor.run_target(400, 180)
liftMotor.run_target(400, 0)
def wallSmash():
robot.straight(280)
leftTurnLoop(90)
robot.straight(1000)
robot.straight(-170)
gyroSensor.reset_angle(90)
leftTurnLoop(90)
robot.straight(800)
def treadmill_complete():
robot.straight(1000)
gyroSensor.reset_angle(0)
robot.straight(650)
treadmill()
angleAfter = gyroSensor.angle()
robot.straight(-200)
restoreAngle(0, angleAfter)
rightTurnLoop(180)
robot.straight(1700)
def treadmill():
# Disable the tank
robot.stop()
index = 0
while index < 20:
right_motor.dc(100)
wait(100)
index += 1
# Set up the tank again
setupTank()
#-------------------------------------------------------------------------
# checks which button is pressed and does methods that are being called upon.
def buttonPress():
if Button.UP in ev3.buttons.pressed(): # Checks if button UP is pressed.
print("You pressed the up button")
ev3.speaker.beep()
wait(400)
ram()
speakNor('Er du sikker på at du trykket rett knapp?')
elif Button.RIGHT in ev3.buttons.pressed():
print("You pressed the right button")
ev3.speaker.beep()
wait(400)
wallSmash()
speakNor('Tror ikke denne knappen gjør noe heller...')
elif Button.DOWN in ev3.buttons.pressed(): # Checks if button right is pressed.
print("You pressed the down button")
ev3.speaker.beep()
wait(400)
goUnder()
speakNor('Ja, ja, ja, dette var gøy')
elif Button.LEFT in ev3.buttons.pressed(): # Checks if button down is pressed.
print("You pressed the left button")
ev3.speaker.beep()
wait(400)
treadmill_complete()
speakNor('Tror du mente å trykke høyre knapp.')
#-------------------------------------------------------------------------
ev3.speaker.beep()
print('I am ready!')
# Main loop, makes buttonPres() run for ever!
while True:
buttonPress()
wait(0.01)
|
# Copyright 2019-2022 Cambridge Quantum Computing
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pytket.circuit import Circuit, OpType # type: ignore
from pytket.architecture import Architecture # type: ignore
from pytket.passes import AASRouting, CNotSynthType, ComposePhasePolyBoxes # type: ignore
from pytket.predicates import CompilationUnit # type: ignore
def test_AAS() -> None:
arc = Architecture([[0, 1], [1, 2], [2, 3], [3, 4]])
circ = Circuit(5)
circ.H(0).H(2)
circ.CX(0, 1).CX(1, 2).CX(3, 4)
circ.Rz(0, 1)
pass1 = AASRouting(arc, lookahead=2)
assert pass1.apply(circ)
def test_AAS_2() -> None:
arc = Architecture([[0, 1], [1, 2], [2, 3], [3, 4]])
circ = Circuit(5)
circ.H(0).H(2)
circ.CX(0, 1).CX(1, 2).CX(3, 4)
circ.Rz(0, 1)
pass1 = AASRouting(arc)
assert pass1.apply(circ)
def test_AAS_3() -> None:
arc = Architecture([[0, 1], [1, 2], [2, 3], [3, 4]])
circ = Circuit(5)
circ.H(0).H(2)
circ.CX(0, 1).CX(1, 2).CX(3, 4)
circ.Rz(0, 1)
pass1 = AASRouting(arc, lookahead=2)
assert pass1.apply(circ)
def test_AAS_4() -> None:
arc = Architecture([[0, 1], [1, 2], [2, 3], [3, 4]])
circ = Circuit(5)
circ.H(0).H(2)
circ.CX(0, 1).CX(1, 2).CX(3, 4)
circ.Rz(0, 1)
pass1 = AASRouting(arc)
assert pass1.apply(circ)
def test_AAS_5() -> None:
arc = Architecture([[0, 1], [1, 2], [2, 3], [3, 4]])
circ = Circuit(5)
circ.H(0).H(2)
circ.CX(0, 1).CX(1, 2).CX(3, 4)
circ.Rz(0, 1)
pass1 = AASRouting(arc, lookahead=2)
assert pass1.apply(circ)
def test_AAS_6() -> None:
arc = Architecture([[0, 1], [1, 2], [2, 3], [3, 4]])
circ = Circuit(5)
circ.H(0).H(2)
circ.CX(0, 1).CX(1, 2).CX(3, 4)
circ.Rz(0, 1)
pass1 = AASRouting(arc)
assert pass1.apply(circ)
def test_AAS_7() -> None:
arc = Architecture([[0, 1], [1, 2], [2, 3], [3, 4]])
circ = Circuit(5)
circ.H(0).H(2)
circ.CX(0, 1).CX(1, 2).CX(3, 4)
circ.Rz(0, 1)
pass1 = AASRouting(arc, lookahead=2)
assert pass1.apply(circ)
def test_AAS_8() -> None:
arc = Architecture([[0, 1], [1, 2], [2, 3], [3, 4]])
circ = Circuit(5)
circ.CX(0, 1)
circ.H(0)
circ.Z(1)
circ.CX(0, 3)
circ.Rx(1.5, 3)
circ.CX(2, 4)
circ.X(2)
circ.CX(1, 4)
circ.CX(0, 4)
pass1 = AASRouting(arc, lookahead=2)
assert pass1.apply(circ)
def test_AAS_9() -> None:
arc = Architecture([[0, 1], [1, 2], [2, 3], [3, 4], [4, 5], [5, 6], [6, 7], [7, 8]])
circ = Circuit(9)
circ.CX(0, 8).CX(8, 1).CX(1, 7).CX(7, 2).CX(2, 6).CX(6, 3).CX(3, 5).CX(5, 4)
circ.Rz(0.5, 4)
pass1 = AASRouting(arc, lookahead=2)
cu = CompilationUnit(circ)
assert pass1.apply(cu)
out_circ = cu.circuit
assert out_circ.valid_connectivity(arc, False, True)
assert out_circ.depth() < 56
def test_AAS_10() -> None:
arc = Architecture([[0, 1], [1, 2], [2, 3], [3, 4], [4, 5], [5, 6]])
circ = Circuit(7)
circ.CX(0, 6).CX(6, 1).CX(1, 5).CX(5, 2).CX(2, 4).CX(4, 3)
circ.Rz(0.5, 3)
pass1 = AASRouting(arc, lookahead=2)
cu = CompilationUnit(circ)
assert pass1.apply(cu)
out_circ = cu.circuit
assert out_circ.valid_connectivity(arc, False, True)
assert out_circ.depth() < 33
def test_AAS_11() -> None:
arc = Architecture([[0, 1], [1, 2], [2, 3], [3, 4], [4, 5], [5, 6]])
circ = Circuit(7)
circ.CX(0, 6).CX(6, 1).CX(1, 5).CX(5, 2).CX(2, 4).CX(4, 3)
circ.Rz(0.5, 3)
pass1 = AASRouting(arc, lookahead=1, cnotsynthtype=CNotSynthType.SWAP)
cu = CompilationUnit(circ)
assert pass1.apply(cu)
out_circ = cu.circuit
assert out_circ.valid_connectivity(arc, False, True)
assert out_circ.depth() == 119
def test_AAS_12() -> None:
arc = Architecture([[0, 1], [1, 2], [2, 3], [3, 4], [4, 5], [5, 6]])
circ = Circuit(7)
circ.CX(0, 6).CX(6, 1).CX(1, 5).CX(5, 2).CX(2, 4).CX(4, 3)
circ.Rz(0.5, 3)
pass1 = AASRouting(arc, lookahead=1, cnotsynthtype=CNotSynthType.HamPath)
cu = CompilationUnit(circ)
assert pass1.apply(cu)
out_circ = cu.circuit
assert out_circ.valid_connectivity(arc, False, True)
assert out_circ.depth() == 36
def test_AAS_13() -> None:
arc = Architecture([[0, 1], [1, 2], [2, 3], [3, 4], [4, 5], [5, 6]])
circ = Circuit(7)
circ.CX(0, 6).CX(6, 1).CX(1, 5).CX(5, 2).CX(2, 4).CX(4, 3)
circ.Rz(0.5, 3)
pass1 = AASRouting(arc, lookahead=1, cnotsynthtype=CNotSynthType.Rec)
cu = CompilationUnit(circ)
assert pass1.apply(cu)
out_circ = cu.circuit
assert out_circ.valid_connectivity(arc, False, True)
assert out_circ.depth() == 28
def test_AAS_14() -> None:
arc = Architecture([[0, 1], [1, 0], [1, 2], [2, 1]])
circ = Circuit(3).CZ(0, 1)
pass1 = AASRouting(arc, lookahead=1, cnotsynthtype=CNotSynthType.Rec)
cu = CompilationUnit(circ)
assert pass1.apply(cu)
out_circ = cu.circuit
assert out_circ.valid_connectivity(arc, False, True)
assert out_circ.depth() == 3
def test_AAS_15() -> None:
arc = Architecture([[0, 1], [1, 0], [1, 2], [2, 1]])
circ = Circuit(2).CZ(0, 1)
pass1 = AASRouting(arc, lookahead=1, cnotsynthtype=CNotSynthType.Rec)
cu = CompilationUnit(circ)
assert pass1.apply(cu)
out_circ = cu.circuit
assert out_circ.valid_connectivity(arc, False, True)
assert out_circ.depth() == 3
def test_noncontiguous_arc_phase_poly() -> None:
# testing non-contiguous ascending named nodes
arc = Architecture([[0, 2]])
pass1 = AASRouting(arc, lookahead=1)
c = Circuit(2).H(0).H(1)
pass1.apply(c)
assert c.n_gates_of_type(OpType.H) == 2
assert c.n_gates_of_type(OpType.CX) == 0
assert c.n_gates_of_type(OpType.CX) == 0
def test_compose_ppb() -> None:
circ = Circuit(5).CZ(0, 1).CZ(1, 2).CX(2, 3).CX(3, 4)
pass1 = ComposePhasePolyBoxes(min_size=2)
cu = CompilationUnit(circ)
assert pass1.apply(cu)
out_circ = cu.circuit
assert out_circ.depth() == 6
if __name__ == "__main__":
test_AAS()
test_AAS_2()
test_AAS_3()
test_AAS_4()
test_AAS_5()
test_AAS_6()
test_AAS_7()
test_AAS_8()
test_AAS_9()
test_AAS_10()
test_AAS_11()
test_AAS_12()
test_AAS_13()
test_AAS_14()
test_AAS_15()
test_noncontiguous_arc_phase_poly()
test_compose_ppb()
|
# -*- coding: utf-8 -*-
"""
@date: 2020/11/4 下午1:49
@file: build.py
@author: zj
@description:
"""
import torch.nn as nn
from .. import registry
from .sgd import build_sgd
from .adam import build_adam
def build_optimizer(cfg, model):
assert isinstance(model, nn.Module)
return registry.OPTIMIZERS[cfg.OPTIMIZER.NAME](cfg, model)
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.9 on 2016-10-22 19:45
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('access', '0013_auto_20160608_0018'),
]
operations = [
migrations.AddField(
model_name='emailaliastype',
name='priority',
field=models.IntegerField(default=0, help_text='When determining the e-mail address of a person in relation to a specific event, the e-mail alias type with the smallest priority number wins.', verbose_name='priority'),
),
]
|
# Author : ThammeGowda Narayanaswamy
# Email : [email protected]
# Student ID : 2074669439
# Subject : CSCI 567 Fall 16 Homework 2
# Date : Oct 2, 2016
from __future__ import print_function
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
quiet = True # to disable unwanted output
def log(*args):
if not quiet:
print(args)
### Global Parameters
alpha = 0.001 # learning rate
conv_tol = 0.005
num_iter = 50000
print_interval = 500
print("The following parameters will be used for gradient descent optimizer:")
print("\t Learning rate=", alpha)
print("\t Convergence Tolerance=", conv_tol)
print("\t Max Iterations=", num_iter)
###
from sklearn.datasets import load_boston
boston = load_boston()
dfX = boston.data
dfY = np.array([boston.target]).transpose()
assert dfX.shape[0] == dfY.shape[0]
test_split_rule = np.arange(dfX.shape[0], dtype=int) % 7 == 0
testX, testY = dfX[test_split_rule], dfY[test_split_rule]
trainX, trainY = dfX[~test_split_rule], dfY[~test_split_rule]
print("##### 3.1 DATA ANALYSIS / EXPLORATION ")
print('Feature Names:\n', pd.DataFrame(boston.feature_names))
print('Dataset shape:', dfX.shape, dfY.shape)
print('Test shape:', testX.shape, testY.shape)
print('Train shape:', trainX.shape, trainY.shape)
n_attrs = trainX.shape[1]
attrs = [trainX[:, i] for i in range(n_attrs)]
stds = [a.std() for a in attrs]
corrs = np.zeros(shape=(n_attrs, n_attrs))
for i, a in enumerate(attrs):
for j in range(0, i + 1):
b = attrs[j]
res = np.cov(a, b) / (stds[i] * stds[j])
corrs[i][j] = res[0,1]
print("Correlations between the attributes:")
print(pd.DataFrame(corrs))
def pearson_cor(X, Y, names):
Y = Y[:, 0]
assert X.shape[0] == Y.shape[0]
assert X.shape[1] == len(names)
y_std = Y.std()
cor = []
for i in range(X.shape[1]): # each column
attr = trainX[:, i]
# Correlation between the attribs and target value
cor_ab = np.cov(attr, Y) / (stds[i] * y_std)
cor_ab = abs(cor_ab[0,1])
cor.append((names[i], cor_ab))
return cor
def show_plots(bins=10):
plt.imshow(corrs, cmap='hot', interpolation='nearest')
plt.title("Heatmap of correlation between attributes")
plt.show()
for i, cor_ab in enumerate(pearson_cor(trainX, trainY, boston.feature_names)):
print(cor_ab)
print("##### Generating Histograms")
plt.figure(1, figsize=(16, 16))
for i, attr in enumerate(attrs):
plt.subplot(5, 3, 1 + i)
plt.hist(attr, bins=bins)
plt.title("Histogram of '%s' with %d bins" % (boston.feature_names[i], bins))
plt.subplot(5, 3, len(attrs) + 1)
plt.hist(trainY, bins=bins)
plt.title("Histogram of Target Price with %d bins" % bins)
plt.savefig('Histograms.png')
plt.show()
show_plots()
print("\n\n###### 3.2 a LINEAR REGRESSION ALGORITHM")
## Linear Regression
def predict(X, W):
return np.matmul(X, W)
def MSECost(Y2, Y1):
# Cost = 1/N SIGMA[(XW-Y)^2]
return float(np.sum((Y2 - Y1) ** 2) / len(Y2))
def analytical_optimizer(X, Y):
'''W = [X^T X]^{-1} X^T Y '''
return np.matmul(
np.matmul(
np.linalg.pinv(np.matmul(X.transpose(), X)),
X.transpose()),
Y)
def gradient_desc(X, Y, W, alpha,
num_iter = num_iter, conv_tol=conv_tol, print_interval = print_interval):
c = float('inf')
log("Learn Rate", alpha)
for i in range(num_iter):
# delta = 2/N SIGMA[(XW - Y)*x]
predY = predict(X, W)
diff = predY - Y
delta = np.sum(np.multiply(X, diff), axis=0) # sum top to bottom for each attribute
delta = delta * 2.0 / len(Y)
delta = np.array([delta]).transpose() # restore vector shape of (n_attr x 1)
W = (W - alpha * delta)
if i % print_interval == 0:
predY = predict(X, W)
newcost = MSECost(predY, Y)
log("#%d, cost = %.8g" % (i, newcost))
if np.isnan(newcost) or np.isinf(newcost) or np.isneginf(newcost):
raise Exception("ERROR: number overflow, please adjust learning rate")
diff = abs(newcost - c)
c = newcost
if diff < conv_tol:
log("Converged with tolerance %f " % conv_tol)
break
if i % (print_interval * 10) == 0:
log(W.flatten())
return W
# compute means and stds
class LinearRegression(object):
def __init__(self, X, Y, learn_rate=0.001, num_iter=10000, conv_tol=0.01, opt='analytical'):
self.means = X.mean(axis=0)
self.stds = X.std(axis=0)
X = self.normalize(X)
self.n_attrs = X.shape[1]
if opt == 'gradient_desc':
W = np.random.rand(self.n_attrs, 1)
self.W = gradient_desc(X, Y, W, alpha=learn_rate,
num_iter=num_iter, conv_tol=conv_tol)
elif opt == 'analytical':
self.W = analytical_optimizer(X, Y)
else:
raise Exception('Unknown Optimizer %s' % opt)
def normalize(self, X):
X = (X - self.means) / self.stds
# Bias is added as a weight to simplify the calculations
X = np.insert(X, 0, 1, axis=1)
return X
def predict(self, X, normalize=True):
if normalize:
X = self.normalize(X)
return np.matmul(X, self.W)
def find_cost(self, X, Y,normalize=True):
return MSECost(self.predict(X, normalize=normalize), Y)
for opt_val in ["analytical", "gradient_desc"]:
print("Using %s optimizer" % opt_val)
linreg = LinearRegression(trainX, trainY, alpha, num_iter, conv_tol, opt=opt_val)
print("W=", linreg.W.flatten())
train_mse_cost = linreg.find_cost(trainX, trainY)
test_mse_cost = linreg.find_cost(testX, testY)
print('Train MSE::', train_mse_cost, '\tTest MSE::', test_mse_cost)
######################
print("\n\n##### 3.2 b RIDGE REGRESSION########")
def gradient_desc_ridge(X, Y, W, alpha, lambd,
num_iter = 1000, conv_tol=0.01, check_interval = 500):
c = float('inf')
log("Learn Rate", alpha)
for i in range(num_iter):
#
# delta = 2/N SIGMA[(XW - Y)*x] + 2 * \lambd * W
diff = predict(X, W) - Y
delta = np.sum(np.multiply(X, diff), axis=0) # sum top to bottom for each attribute
delta = delta * 2.0 / len(Y)
delta = np.array([delta]).transpose() # restore vector shape of (n_attr x 1)
delta = delta + (2 * lambd * W) # Vectors addition
W = (W - alpha * delta)
if i % check_interval == 0:
predY = predict(X, W)
newcost = MSECost(predY, Y)
log("#%d, cost = %.8g" % (i, newcost))
if np.isnan(newcost) or np.isinf(newcost) or np.isneginf(newcost):
raise Exception("ERROR: number overflow, please adjust learning rate")
diff = abs(newcost - c)
c = newcost
if diff < conv_tol:
log("Converged with tolerance %f " % conv_tol)
break
if not quiet and i % (check_interval * 10) == 0:
print(W.flatten())
return W
class RidgeRegression(LinearRegression):
def __init__(self, X, Y, learn_rate=0.001, lambd=0.1, num_iter=1000, conv_tol=0.1):
self.means = X.mean(axis=0)
self.stds = X.std(axis=0)
X = self.normalize(X)
self.n_attrs = X.shape[1]
W = np.random.rand(self.n_attrs, 1)
self.lambd = lambd
self.W = gradient_desc_ridge(X, Y, W, alpha=learn_rate, lambd=lambd,
num_iter=num_iter, conv_tol=conv_tol)
lambds = [0.01, 0.1, 1.0]
results = []
print("\nLambda\t\tTrain MSE\t\tTest MSE")
for lambd in lambds:
ridreg = RidgeRegression(trainX, trainY, alpha, lambd, num_iter, conv_tol)
train_mse = ridreg.find_cost(trainX, trainY)
test_mse = ridreg.find_cost(testX, testY)
t = (lambd, train_mse, test_mse)
print("\t\t".join(map(lambda x: str(x), t)))
###########################
# 10 fold validation
print("\n\n###### 3.2 c RIDGE REGRESSION : 10-FOLD CROSS VALIDATION########")
k = 10
def k_fold_cv(X, Y, lambd, k):
n = len(Y)
# Shuffle
shuf_idx = np.random.permutation(n)
X, Y = X[shuf_idx], Y[shuf_idx]
assert k > 1 and k <= n
ss = n / k # split size
# split
accuracy = []
for i in range(k):
start, end = i * ss, (i + 1) * ss
if i == k-1 and end < n:
# anything left over shall go to the last split (if n is not multiple of k)
end = n
# ith split is for testing
test_X, test_Y = X[start:end], Y[start:end]
# everything else for training
train_X = np.delete(X, np.s_[start, end], axis=0)
train_Y = np.delete(Y, np.s_[start, end], axis=0)
ridreg = RidgeRegression(train_X, train_Y, alpha, lambd, num_iter, conv_tol)
acc = ridreg.find_cost(test_X, test_Y)
accuracy.append(acc)
return np.array(accuracy).mean()
lambds = [0.0001, 0.001, 0.01, 0.1, 1, 10]
print("\nLambda\t\tTrain MSE\t\tTest MSE")
for lambd in lambds:
train_mse = k_fold_cv(trainX, trainY, lambd, k)
ridreg = RidgeRegression(trainX, trainY, alpha, lambd, num_iter, conv_tol)
test_mse = ridreg.find_cost(testX, testY)
t = (lambd, train_mse, test_mse)
print("\t\t".join(map(lambda x: str(x), t)))
######################################################
print("\n\n###### 3.3 a FEATURE SELECTION: TOP 4 CORRELATIONS WITH TARGET")
# Top 4 features
top4TrainX = np.zeros(shape=(len(trainY), 0))
top4TestX = np.zeros(shape=(len(testY), 0))
tuples = pearson_cor(trainX, trainY, boston.feature_names)
target_cor = dict(tuples)
top4= sorted(target_cor, key=target_cor.get, reverse=True)[:4]
for k in top4:
column = np.where(boston.feature_names == k)[0][0]
x = np.array([trainX[:, column]]).transpose()
top4TrainX = np.concatenate((top4TrainX, x), axis=1)
x = np.array([testX[:, column]]).transpose()
top4TestX = np.concatenate((top4TestX, x), axis=1)
print("Top4 attrs::", top4)
linreg = LinearRegression(top4TrainX, trainY, alpha)
train_mse_cost = MSECost(linreg.predict(top4TrainX), trainY)
test_mse_cost = MSECost(linreg.predict(top4TestX), testY)
print('Train MSE::', train_mse_cost, " Test MSE::", test_mse_cost)
##########
print("\n\n###### 3.3 b FEATURE SELECTION: TOP 4 CORRELATIONS WITH RESIDUAL ERROR")
# top 4 from residual errors
X_train, Y_train = trainX, trainY
X_test, Y_test = testX, testY
names = boston.feature_names
Z_train = np.zeros(shape=(X_train.shape[0], 0))
Z_test = np.zeros(shape=(X_test.shape[0], 0))
pred_Y = None
for i in range(4):
# first time we use the target, then on we use the Y - predY
_Y = Y_train if pred_Y is None else np.subtract(Y_train, pred_Y)
corrs = dict(pearson_cor(X_train, _Y, names))
top1 = sorted(corrs, key=corrs.get)[-1]
print("Choosing: ", top1)
top1_col = names.tolist().index(top1)
x = np.array([X_train[:, top1_col]]).transpose()
Z_train = np.concatenate((Z_train, x), axis=1)
x = np.array([X_test[:, top1_col]]).transpose()
Z_test = np.concatenate((Z_test, x), axis=1)
linreg = LinearRegression(Z_train, Y_train, alpha)
pred_Y = linreg.predict(Z_train)
train_mse_cost = MSECost(pred_Y, Y_train)
test_mse_cost = MSECost(linreg.predict(Z_test), Y_test)
print('Train MSE::', train_mse_cost, " Test MSE::", test_mse_cost)
# for the next iteration
X_train = np.delete(X_train, top1_col, axis=1)
X_test = np.delete(X_test, top1_col, axis=1)
names = np.delete(names, top1_col)
######################################
print("\n\n###### 3.3 c FEATURE SELECTION: TOP 4 USING BRUTEFORCE")
# Brute force
import itertools
def brute_force_select():
least_test_cost = float('inf')
best_combination = None
train_cost = None
for cols in itertools.combinations(range(trainX.shape[1]), 4):
Z_train = np.zeros(shape=(X_train.shape[0], 0))
Z_test = np.zeros(shape=(X_test.shape[0], 0))
for col in cols:
x = np.array([trainX[:, col]]).transpose()
Z_train = np.concatenate((Z_train, x), axis=1)
x = np.array([testX[:, col]]).transpose()
Z_test = np.concatenate((Z_test, x), axis=1)
linreg = LinearRegression(Z_train, Y_train, alpha)
train_mse_cost = MSECost(linreg.predict(Z_train), Y_train)
test_mse_cost = MSECost(linreg.predict(Z_test), Y_test)
if test_mse_cost < least_test_cost:
best_combination = cols
least_test_cost = test_mse_cost
train_cost = train_mse_cost
# print(cols, 'Train MSE::', train_mse_cost, " Test MSE::", test_mse_cost)
return best_combination, least_test_cost, train_cost
try:
print("Performing bruteforce feature selection. Hang tight for results or press CTRL+C to skip")
best_combination, least_test_cost, train_cost = brute_force_select()
print('Best Combination:: ', best_combination, ", Least Test MSE::", least_test_cost, ", Train MSE::", train_cost)
except:
print("Skipping bruteforce selection...")
###############################
print("\n\n###### 3.4 FEATURE EXPANSION")
# Feature Expansion
Z_train = trainX
Z_test = testX
for i in range(n_attrs):
xi_train = np.array([trainX[:, i]]).transpose()
xi_test = np.array([testX[:, i]]).transpose()
for j in range(0, i+1):
xj_train = np.array([trainX[:, j]]).transpose()
xj_test = np.array([testX[:, j]]).transpose()
Z_train = np.concatenate((Z_train, xi_train * xj_train), axis=1)
Z_test = np.concatenate((Z_test, xi_test * xj_test), axis=1)
print('Train Shape:', Z_train.shape)
print('Test Shape:', Z_test.shape)
linreg = LinearRegression(Z_train, trainY, alpha)
pred_Y = linreg.predict(Z_train)
train_mse_cost = MSECost(pred_Y, trainY)
test_mse_cost = MSECost(linreg.predict(Z_test), testY)
print('Train MSE::', train_mse_cost, " Test MSE::", test_mse_cost)
|
# coding=utf-8
# Copyright 2018 The TF-Agents Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Loss utility code."""
from __future__ import absolute_import
from __future__ import division
# Using Type Annotations.
from __future__ import print_function
from typing import Optional, Text
import tensorflow as tf # pylint: disable=g-explicit-tensorflow-version-import
from tf_agents.typing import types
def pinball_loss(
y_true: types.Tensor,
y_pred: types.Tensor,
weights: types.Float = 1.0,
scope: Optional[Text] = None,
loss_collection: tf.compat.v1.GraphKeys = tf.compat.v1.GraphKeys.LOSSES,
reduction: tf.compat.v1.losses.Reduction = tf.compat.v1.losses.Reduction
.SUM_BY_NONZERO_WEIGHTS,
quantile: float = 0.5) -> types.Float:
"""Adds a Pinball loss for quantile regression.
```
loss = quantile * (y_true - y_pred) if y_true > y_pred
loss = (quantile - 1) * (y_true - y_pred) otherwise
```
See: https://en.wikipedia.org/wiki/Quantile_regression#Quantiles
`weights` acts as a coefficient for the loss. If a scalar is provided, then
the loss is simply scaled by the given value. If `weights` is a tensor of size
`[batch_size]`, then the total loss for each sample of the batch is rescaled
by the corresponding element in the `weights` vector. If the shape of
`weights` matches the shape of `predictions`, then the loss of each
measurable element of `predictions` is scaled by the corresponding value of
`weights`.
Args:
y_true: tensor of true targets.
y_pred: tensor of predicted targets.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`, and must be broadcastable to `labels` (i.e., all dimensions must
be either `1`, or the same as the corresponding `losses` dimension).
scope: The scope for the operations performed in computing the loss.
loss_collection: collection to which the loss will be added.
reduction: Type of reduction to apply to loss.
quantile: A float between 0. and 1., the quantile we want to regress.
Returns:
Weighted Pinball loss float `Tensor`. If `reduction` is `NONE`, this has the
same shape as `labels`; otherwise, it is scalar.
Raises:
ValueError: If the shape of `predictions` doesn't match that of `labels` or
if the shape of `weights` is invalid. Also if `labels` or `predictions`
is None.
@compatibility(eager)
The `loss_collection` argument is ignored when executing eagerly. Consider
holding on to the return value or collecting losses via a `tf.keras.Model`.
@end_compatibility
"""
if y_true is None:
raise ValueError('y_true must not be None.')
if y_pred is None:
raise ValueError('y_pred must not be None.')
with tf.compat.v1.name_scope(scope, 'pinball_loss',
(y_pred, y_true, weights)) as scope:
y_pred = tf.cast(y_pred, dtype=tf.float32)
y_true = tf.cast(y_true, dtype=tf.float32)
y_pred.get_shape().assert_is_compatible_with(y_true.get_shape())
error = tf.subtract(y_true, y_pred)
loss_tensor = tf.maximum(quantile * error, (quantile - 1) * error)
return tf.compat.v1.losses.compute_weighted_loss(
loss_tensor, weights, scope, loss_collection, reduction=reduction)
|
#!/usr/bin/env python3
# -*- coding:utf-8 -*-
# Copyright (c) Megvii, Inc. and its affiliates.
import itertools
from typing import Optional
import torch
import torch.distributed as dist
from torch.utils.data.sampler import Sampler
class InfiniteSampler(Sampler):
"""
In training, we only care about the "infinite stream" of training data.
So this sampler produces an infinite stream of indices and
all workers cooperate to correctly shuffle the indices and sample different indices.
The samplers in each worker effectively produces `indices[worker_id::num_workers]`
where `indices` is an infinite stream of indices consisting of
`shuffle(range(size)) + shuffle(range(size)) + ...` (if shuffle is True)
or `range(size) + range(size) + ...` (if shuffle is False)
"""
def __init__(
self,
size: int,
shuffle: bool = True,
seed: Optional[int] = 0,
rank=0,
world_size=1,
):
"""
Args:
size (int): the total number of data of the underlying dataset to sample from
shuffle (bool): whether to shuffle the indices or not
seed (int): the initial seed of the shuffle. Must be the same
across all workers. If None, will use a random seed shared
among workers (require synchronization among all workers).
"""
self._size = size
assert size > 0
self._shuffle = shuffle
self._seed = int(seed)
if dist.is_available() and dist.is_initialized():
self._rank = dist.get_rank()
self._world_size = dist.get_world_size()
else:
self._rank = rank
self._world_size = world_size
def __iter__(self):
start = self._rank
yield from itertools.islice(
self._infinite_indices(), start, None, self._world_size
)
def _infinite_indices(self):
g = torch.Generator()
g.manual_seed(self._seed)
while True:
if self._shuffle:
yield from torch.randperm(self._size, generator=g)
else:
yield from torch.arange(self._size)
def __len__(self):
return self._size // self._world_size
|
for i in range(10):
print("i", i)
if(i % 2 == 0):
continue
for j in range(10):
print("j", j) |
import bz2
from collections import Counter
from contextlib import contextmanager
from datetime import datetime
from functools import wraps
import gzip
import operator
import os
import re
import string
from typing import (
Any,
Callable,
ContextManager,
List,
Optional,
Sequence,
Tuple,
Type,
Union,
cast,
)
import warnings
import zipfile
import numpy as np
from pandas._config.localization import ( # noqa:F401
can_set_locale,
get_locales,
set_locale,
)
from pandas._typing import Dtype, FilePathOrBuffer, FrameOrSeries
from pandas.compat import get_lzma_file, import_lzma
from pandas.core.dtypes.common import (
is_datetime64_dtype,
is_datetime64tz_dtype,
is_period_dtype,
is_sequence,
is_timedelta64_dtype,
)
import pandas as pd
from pandas import (
Categorical,
CategoricalIndex,
DataFrame,
DatetimeIndex,
Index,
IntervalIndex,
MultiIndex,
RangeIndex,
Series,
bdate_range,
)
from pandas._testing.asserters import ( # noqa:F401
assert_almost_equal,
assert_attr_equal,
assert_categorical_equal,
assert_class_equal,
assert_contains_all,
assert_copy,
assert_datetime_array_equal,
assert_dict_equal,
assert_equal,
assert_extension_array_equal,
assert_frame_equal,
assert_index_equal,
assert_interval_array_equal,
assert_is_sorted,
assert_is_valid_plot_return_object,
assert_numpy_array_equal,
assert_period_array_equal,
assert_series_equal,
assert_sp_array_equal,
assert_timedelta_array_equal,
raise_assert_detail,
)
from pandas._testing.contexts import ( # noqa:F401
decompress_file,
ensure_clean,
ensure_clean_dir,
ensure_safe_environment_variables,
set_timezone,
use_numexpr,
with_csv_dialect,
)
from pandas.core.arrays import DatetimeArray, PeriodArray, TimedeltaArray, period_array
from pandas.io.common import urlopen
lzma = import_lzma()
_N = 30
_K = 4
_RAISE_NETWORK_ERROR_DEFAULT = False
UNSIGNED_INT_DTYPES: List[Dtype] = ["uint8", "uint16", "uint32", "uint64"]
UNSIGNED_EA_INT_DTYPES: List[Dtype] = ["UInt8", "UInt16", "UInt32", "UInt64"]
SIGNED_INT_DTYPES: List[Dtype] = [int, "int8", "int16", "int32", "int64"]
SIGNED_EA_INT_DTYPES: List[Dtype] = ["Int8", "Int16", "Int32", "Int64"]
ALL_INT_DTYPES = UNSIGNED_INT_DTYPES + SIGNED_INT_DTYPES
ALL_EA_INT_DTYPES = UNSIGNED_EA_INT_DTYPES + SIGNED_EA_INT_DTYPES
FLOAT_DTYPES: List[Dtype] = [float, "float32", "float64"]
FLOAT_EA_DTYPES: List[Dtype] = ["Float32", "Float64"]
COMPLEX_DTYPES: List[Dtype] = [complex, "complex64", "complex128"]
STRING_DTYPES: List[Dtype] = [str, "str", "U"]
DATETIME64_DTYPES: List[Dtype] = ["datetime64[ns]", "M8[ns]"]
TIMEDELTA64_DTYPES: List[Dtype] = ["timedelta64[ns]", "m8[ns]"]
BOOL_DTYPES = [bool, "bool"]
BYTES_DTYPES = [bytes, "bytes"]
OBJECT_DTYPES = [object, "object"]
ALL_REAL_DTYPES = FLOAT_DTYPES + ALL_INT_DTYPES
ALL_NUMPY_DTYPES = (
ALL_REAL_DTYPES
+ COMPLEX_DTYPES
+ STRING_DTYPES
+ DATETIME64_DTYPES
+ TIMEDELTA64_DTYPES
+ BOOL_DTYPES
+ OBJECT_DTYPES
+ BYTES_DTYPES
)
NULL_OBJECTS = [None, np.nan, pd.NaT, float("nan"), pd.NA]
EMPTY_STRING_PATTERN = re.compile("^$")
# set testing_mode
_testing_mode_warnings = (DeprecationWarning, ResourceWarning)
def set_testing_mode():
# set the testing mode filters
testing_mode = os.environ.get("PANDAS_TESTING_MODE", "None")
if "deprecate" in testing_mode:
# pandas\_testing.py:119: error: Argument 2 to "simplefilter" has
# incompatible type "Tuple[Type[DeprecationWarning],
# Type[ResourceWarning]]"; expected "Type[Warning]"
warnings.simplefilter(
"always", _testing_mode_warnings # type: ignore[arg-type]
)
def reset_testing_mode():
# reset the testing mode filters
testing_mode = os.environ.get("PANDAS_TESTING_MODE", "None")
if "deprecate" in testing_mode:
# pandas\_testing.py:126: error: Argument 2 to "simplefilter" has
# incompatible type "Tuple[Type[DeprecationWarning],
# Type[ResourceWarning]]"; expected "Type[Warning]"
warnings.simplefilter(
"ignore", _testing_mode_warnings # type: ignore[arg-type]
)
set_testing_mode()
def reset_display_options():
"""
Reset the display options for printing and representing objects.
"""
pd.reset_option("^display.", silent=True)
def round_trip_pickle(
obj: Any, path: Optional[FilePathOrBuffer] = None
) -> FrameOrSeries:
"""
Pickle an object and then read it again.
Parameters
----------
obj : any object
The object to pickle and then re-read.
path : str, path object or file-like object, default None
The path where the pickled object is written and then read.
Returns
-------
pandas object
The original object that was pickled and then re-read.
"""
_path = path
if _path is None:
_path = f"__{rands(10)}__.pickle"
with ensure_clean(_path) as temp_path:
pd.to_pickle(obj, temp_path)
return pd.read_pickle(temp_path)
def round_trip_pathlib(writer, reader, path: Optional[str] = None):
"""
Write an object to file specified by a pathlib.Path and read it back
Parameters
----------
writer : callable bound to pandas object
IO writing function (e.g. DataFrame.to_csv )
reader : callable
IO reading function (e.g. pd.read_csv )
path : str, default None
The path where the object is written and then read.
Returns
-------
pandas object
The original object that was serialized and then re-read.
"""
import pytest
Path = pytest.importorskip("pathlib").Path
if path is None:
path = "___pathlib___"
with ensure_clean(path) as path:
writer(Path(path))
obj = reader(Path(path))
return obj
def round_trip_localpath(writer, reader, path: Optional[str] = None):
"""
Write an object to file specified by a py.path LocalPath and read it back.
Parameters
----------
writer : callable bound to pandas object
IO writing function (e.g. DataFrame.to_csv )
reader : callable
IO reading function (e.g. pd.read_csv )
path : str, default None
The path where the object is written and then read.
Returns
-------
pandas object
The original object that was serialized and then re-read.
"""
import pytest
LocalPath = pytest.importorskip("py.path").local
if path is None:
path = "___localpath___"
with ensure_clean(path) as path:
writer(LocalPath(path))
obj = reader(LocalPath(path))
return obj
def write_to_compressed(compression, path, data, dest="test"):
"""
Write data to a compressed file.
Parameters
----------
compression : {'gzip', 'bz2', 'zip', 'xz'}
The compression type to use.
path : str
The file path to write the data.
data : str
The data to write.
dest : str, default "test"
The destination file (for ZIP only)
Raises
------
ValueError : An invalid compression value was passed in.
"""
args: Tuple[Any, ...] = (data,)
mode = "wb"
method = "write"
compress_method: Callable
if compression == "zip":
compress_method = zipfile.ZipFile
mode = "w"
args = (dest, data)
method = "writestr"
elif compression == "gzip":
compress_method = gzip.GzipFile
elif compression == "bz2":
compress_method = bz2.BZ2File
elif compression == "xz":
compress_method = get_lzma_file(lzma)
else:
raise ValueError(f"Unrecognized compression type: {compression}")
with compress_method(path, mode=mode) as f:
getattr(f, method)(*args)
def randbool(size=(), p: float = 0.5):
return np.random.rand(*size) <= p
RANDS_CHARS = np.array(list(string.ascii_letters + string.digits), dtype=(np.str_, 1))
RANDU_CHARS = np.array(
list("".join(map(chr, range(1488, 1488 + 26))) + string.digits),
dtype=(np.unicode_, 1),
)
def rands_array(nchars, size, dtype="O"):
"""
Generate an array of byte strings.
"""
retval = (
np.random.choice(RANDS_CHARS, size=nchars * np.prod(size))
.view((np.str_, nchars))
.reshape(size)
)
return retval.astype(dtype)
def randu_array(nchars, size, dtype="O"):
"""
Generate an array of unicode strings.
"""
retval = (
np.random.choice(RANDU_CHARS, size=nchars * np.prod(size))
.view((np.unicode_, nchars))
.reshape(size)
)
return retval.astype(dtype)
def rands(nchars):
"""
Generate one random byte string.
See `rands_array` if you want to create an array of random strings.
"""
return "".join(np.random.choice(RANDS_CHARS, nchars))
def close(fignum=None):
from matplotlib.pyplot import close as _close, get_fignums
if fignum is None:
for fignum in get_fignums():
_close(fignum)
else:
_close(fignum)
# -----------------------------------------------------------------------------
# Comparators
def equalContents(arr1, arr2) -> bool:
"""
Checks if the set of unique elements of arr1 and arr2 are equivalent.
"""
return frozenset(arr1) == frozenset(arr2)
def box_expected(expected, box_cls, transpose=True):
"""
Helper function to wrap the expected output of a test in a given box_class.
Parameters
----------
expected : np.ndarray, Index, Series
box_cls : {Index, Series, DataFrame}
Returns
-------
subclass of box_cls
"""
if box_cls is pd.array:
expected = pd.array(expected)
elif box_cls is pd.Index:
expected = pd.Index(expected)
elif box_cls is pd.Series:
expected = pd.Series(expected)
elif box_cls is pd.DataFrame:
expected = pd.Series(expected).to_frame()
if transpose:
# for vector operations, we need a DataFrame to be a single-row,
# not a single-column, in order to operate against non-DataFrame
# vectors of the same length.
expected = expected.T
elif box_cls is PeriodArray:
# the PeriodArray constructor is not as flexible as period_array
expected = period_array(expected)
elif box_cls is DatetimeArray:
expected = DatetimeArray(expected)
elif box_cls is TimedeltaArray:
expected = TimedeltaArray(expected)
elif box_cls is np.ndarray:
expected = np.array(expected)
elif box_cls is to_array:
expected = to_array(expected)
else:
raise NotImplementedError(box_cls)
return expected
def to_array(obj):
# temporary implementation until we get pd.array in place
dtype = getattr(obj, "dtype", None)
if is_period_dtype(dtype):
return period_array(obj)
elif is_datetime64_dtype(dtype) or is_datetime64tz_dtype(dtype):
return DatetimeArray._from_sequence(obj)
elif is_timedelta64_dtype(dtype):
return TimedeltaArray._from_sequence(obj)
else:
return np.array(obj)
# -----------------------------------------------------------------------------
# Others
def getCols(k):
return string.ascii_uppercase[:k]
# make index
def makeStringIndex(k=10, name=None):
return Index(rands_array(nchars=10, size=k), name=name)
def makeUnicodeIndex(k=10, name=None):
return Index(randu_array(nchars=10, size=k), name=name)
def makeCategoricalIndex(k=10, n=3, name=None, **kwargs):
""" make a length k index or n categories """
x = rands_array(nchars=4, size=n)
return CategoricalIndex(
Categorical.from_codes(np.arange(k) % n, categories=x), name=name, **kwargs
)
def makeIntervalIndex(k=10, name=None, **kwargs):
""" make a length k IntervalIndex """
x = np.linspace(0, 100, num=(k + 1))
return IntervalIndex.from_breaks(x, name=name, **kwargs)
def makeBoolIndex(k=10, name=None):
if k == 1:
return Index([True], name=name)
elif k == 2:
return Index([False, True], name=name)
return Index([False, True] + [False] * (k - 2), name=name)
def makeIntIndex(k=10, name=None):
return Index(list(range(k)), name=name)
def makeUIntIndex(k=10, name=None):
return Index([2 ** 63 + i for i in range(k)], name=name)
def makeRangeIndex(k=10, name=None, **kwargs):
return RangeIndex(0, k, 1, name=name, **kwargs)
def makeFloatIndex(k=10, name=None):
values = sorted(np.random.random_sample(k)) - np.random.random_sample(1)
return Index(values * (10 ** np.random.randint(0, 9)), name=name)
def makeDateIndex(k=10, freq="B", name=None, **kwargs):
dt = datetime(2000, 1, 1)
dr = bdate_range(dt, periods=k, freq=freq, name=name)
return DatetimeIndex(dr, name=name, **kwargs)
def makeTimedeltaIndex(k=10, freq="D", name=None, **kwargs):
return pd.timedelta_range(start="1 day", periods=k, freq=freq, name=name, **kwargs)
def makePeriodIndex(k=10, name=None, **kwargs):
dt = datetime(2000, 1, 1)
return pd.period_range(start=dt, periods=k, freq="B", name=name, **kwargs)
def makeMultiIndex(k=10, names=None, **kwargs):
return MultiIndex.from_product((("foo", "bar"), (1, 2)), names=names, **kwargs)
_names = [
"Alice",
"Bob",
"Charlie",
"Dan",
"Edith",
"Frank",
"George",
"Hannah",
"Ingrid",
"Jerry",
"Kevin",
"Laura",
"Michael",
"Norbert",
"Oliver",
"Patricia",
"Quinn",
"Ray",
"Sarah",
"Tim",
"Ursula",
"Victor",
"Wendy",
"Xavier",
"Yvonne",
"Zelda",
]
def _make_timeseries(start="2000-01-01", end="2000-12-31", freq="1D", seed=None):
"""
Make a DataFrame with a DatetimeIndex
Parameters
----------
start : str or Timestamp, default "2000-01-01"
The start of the index. Passed to date_range with `freq`.
end : str or Timestamp, default "2000-12-31"
The end of the index. Passed to date_range with `freq`.
freq : str or Freq
The frequency to use for the DatetimeIndex
seed : int, optional
The random state seed.
* name : object dtype with string names
* id : int dtype with
* x, y : float dtype
Examples
--------
>>> _make_timeseries()
id name x y
timestamp
2000-01-01 982 Frank 0.031261 0.986727
2000-01-02 1025 Edith -0.086358 -0.032920
2000-01-03 982 Edith 0.473177 0.298654
2000-01-04 1009 Sarah 0.534344 -0.750377
2000-01-05 963 Zelda -0.271573 0.054424
... ... ... ... ...
2000-12-27 980 Ingrid -0.132333 -0.422195
2000-12-28 972 Frank -0.376007 -0.298687
2000-12-29 1009 Ursula -0.865047 -0.503133
2000-12-30 1000 Hannah -0.063757 -0.507336
2000-12-31 972 Tim -0.869120 0.531685
"""
index = pd.date_range(start=start, end=end, freq=freq, name="timestamp")
n = len(index)
state = np.random.RandomState(seed)
columns = {
"name": state.choice(_names, size=n),
"id": state.poisson(1000, size=n),
"x": state.rand(n) * 2 - 1,
"y": state.rand(n) * 2 - 1,
}
df = pd.DataFrame(columns, index=index, columns=sorted(columns))
if df.index[-1] == end:
df = df.iloc[:-1]
return df
def index_subclass_makers_generator():
make_index_funcs = [
makeDateIndex,
makePeriodIndex,
makeTimedeltaIndex,
makeRangeIndex,
makeIntervalIndex,
makeCategoricalIndex,
makeMultiIndex,
]
yield from make_index_funcs
def all_timeseries_index_generator(k=10):
"""
Generator which can be iterated over to get instances of all the classes
which represent time-series.
Parameters
----------
k: length of each of the index instances
"""
make_index_funcs = [makeDateIndex, makePeriodIndex, makeTimedeltaIndex]
for make_index_func in make_index_funcs:
# pandas\_testing.py:1986: error: Cannot call function of unknown type
yield make_index_func(k=k) # type: ignore[operator]
# make series
def makeFloatSeries(name=None):
index = makeStringIndex(_N)
return Series(np.random.randn(_N), index=index, name=name)
def makeStringSeries(name=None):
index = makeStringIndex(_N)
return Series(np.random.randn(_N), index=index, name=name)
def makeObjectSeries(name=None):
data = makeStringIndex(_N)
data = Index(data, dtype=object)
index = makeStringIndex(_N)
return Series(data, index=index, name=name)
def getSeriesData():
index = makeStringIndex(_N)
return {c: Series(np.random.randn(_N), index=index) for c in getCols(_K)}
def makeTimeSeries(nper=None, freq="B", name=None):
if nper is None:
nper = _N
return Series(
np.random.randn(nper), index=makeDateIndex(nper, freq=freq), name=name
)
def makePeriodSeries(nper=None, name=None):
if nper is None:
nper = _N
return Series(np.random.randn(nper), index=makePeriodIndex(nper), name=name)
def getTimeSeriesData(nper=None, freq="B"):
return {c: makeTimeSeries(nper, freq) for c in getCols(_K)}
def getPeriodData(nper=None):
return {c: makePeriodSeries(nper) for c in getCols(_K)}
# make frame
def makeTimeDataFrame(nper=None, freq="B"):
data = getTimeSeriesData(nper, freq)
return DataFrame(data)
def makeDataFrame():
data = getSeriesData()
return DataFrame(data)
def getMixedTypeDict():
index = Index(["a", "b", "c", "d", "e"])
data = {
"A": [0.0, 1.0, 2.0, 3.0, 4.0],
"B": [0.0, 1.0, 0.0, 1.0, 0.0],
"C": ["foo1", "foo2", "foo3", "foo4", "foo5"],
"D": bdate_range("1/1/2009", periods=5),
}
return index, data
def makeMixedDataFrame():
return DataFrame(getMixedTypeDict()[1])
def makePeriodFrame(nper=None):
data = getPeriodData(nper)
return DataFrame(data)
def makeCustomIndex(
nentries, nlevels, prefix="#", names=False, ndupe_l=None, idx_type=None
):
"""
Create an index/multindex with given dimensions, levels, names, etc'
nentries - number of entries in index
nlevels - number of levels (> 1 produces multindex)
prefix - a string prefix for labels
names - (Optional), bool or list of strings. if True will use default
names, if false will use no names, if a list is given, the name of
each level in the index will be taken from the list.
ndupe_l - (Optional), list of ints, the number of rows for which the
label will repeated at the corresponding level, you can specify just
the first few, the rest will use the default ndupe_l of 1.
len(ndupe_l) <= nlevels.
idx_type - "i"/"f"/"s"/"u"/"dt"/"p"/"td".
If idx_type is not None, `idx_nlevels` must be 1.
"i"/"f" creates an integer/float index,
"s"/"u" creates a string/unicode index
"dt" create a datetime index.
"td" create a datetime index.
if unspecified, string labels will be generated.
"""
if ndupe_l is None:
ndupe_l = [1] * nlevels
assert is_sequence(ndupe_l) and len(ndupe_l) <= nlevels
assert names is None or names is False or names is True or len(names) is nlevels
assert idx_type is None or (
idx_type in ("i", "f", "s", "u", "dt", "p", "td") and nlevels == 1
)
if names is True:
# build default names
names = [prefix + str(i) for i in range(nlevels)]
if names is False:
# pass None to index constructor for no name
names = None
# make singleton case uniform
if isinstance(names, str) and nlevels == 1:
names = [names]
# specific 1D index type requested?
idx_func = {
"i": makeIntIndex,
"f": makeFloatIndex,
"s": makeStringIndex,
"u": makeUnicodeIndex,
"dt": makeDateIndex,
"td": makeTimedeltaIndex,
"p": makePeriodIndex,
}.get(idx_type)
if idx_func:
# pandas\_testing.py:2120: error: Cannot call function of unknown type
idx = idx_func(nentries) # type: ignore[operator]
# but we need to fill in the name
if names:
idx.name = names[0]
return idx
elif idx_type is not None:
raise ValueError(
f"{repr(idx_type)} is not a legal value for `idx_type`, "
"use 'i'/'f'/'s'/'u'/'dt'/'p'/'td'."
)
if len(ndupe_l) < nlevels:
ndupe_l.extend([1] * (nlevels - len(ndupe_l)))
assert len(ndupe_l) == nlevels
assert all(x > 0 for x in ndupe_l)
tuples = []
for i in range(nlevels):
def keyfunc(x):
import re
numeric_tuple = re.sub(r"[^\d_]_?", "", x).split("_")
return [int(num) for num in numeric_tuple]
# build a list of lists to create the index from
div_factor = nentries // ndupe_l[i] + 1
# pandas\_testing.py:2148: error: Need type annotation for 'cnt'
cnt = Counter() # type: ignore[var-annotated]
for j in range(div_factor):
label = f"{prefix}_l{i}_g{j}"
cnt[label] = ndupe_l[i]
# cute Counter trick
result = sorted(cnt.elements(), key=keyfunc)[:nentries]
tuples.append(result)
tuples = list(zip(*tuples))
# convert tuples to index
if nentries == 1:
# we have a single level of tuples, i.e. a regular Index
index = Index(tuples[0], name=names[0])
elif nlevels == 1:
name = None if names is None else names[0]
index = Index((x[0] for x in tuples), name=name)
else:
index = MultiIndex.from_tuples(tuples, names=names)
return index
def makeCustomDataframe(
nrows,
ncols,
c_idx_names=True,
r_idx_names=True,
c_idx_nlevels=1,
r_idx_nlevels=1,
data_gen_f=None,
c_ndupe_l=None,
r_ndupe_l=None,
dtype=None,
c_idx_type=None,
r_idx_type=None,
):
"""
Create a DataFrame using supplied parameters.
Parameters
----------
nrows, ncols - number of data rows/cols
c_idx_names, idx_names - False/True/list of strings, yields No names ,
default names or uses the provided names for the levels of the
corresponding index. You can provide a single string when
c_idx_nlevels ==1.
c_idx_nlevels - number of levels in columns index. > 1 will yield MultiIndex
r_idx_nlevels - number of levels in rows index. > 1 will yield MultiIndex
data_gen_f - a function f(row,col) which return the data value
at that position, the default generator used yields values of the form
"RxCy" based on position.
c_ndupe_l, r_ndupe_l - list of integers, determines the number
of duplicates for each label at a given level of the corresponding
index. The default `None` value produces a multiplicity of 1 across
all levels, i.e. a unique index. Will accept a partial list of length
N < idx_nlevels, for just the first N levels. If ndupe doesn't divide
nrows/ncol, the last label might have lower multiplicity.
dtype - passed to the DataFrame constructor as is, in case you wish to
have more control in conjunction with a custom `data_gen_f`
r_idx_type, c_idx_type - "i"/"f"/"s"/"u"/"dt"/"td".
If idx_type is not None, `idx_nlevels` must be 1.
"i"/"f" creates an integer/float index,
"s"/"u" creates a string/unicode index
"dt" create a datetime index.
"td" create a timedelta index.
if unspecified, string labels will be generated.
Examples
--------
# 5 row, 3 columns, default names on both, single index on both axis
>> makeCustomDataframe(5,3)
# make the data a random int between 1 and 100
>> mkdf(5,3,data_gen_f=lambda r,c:randint(1,100))
# 2-level multiindex on rows with each label duplicated
# twice on first level, default names on both axis, single
# index on both axis
>> a=makeCustomDataframe(5,3,r_idx_nlevels=2,r_ndupe_l=[2])
# DatetimeIndex on row, index with unicode labels on columns
# no names on either axis
>> a=makeCustomDataframe(5,3,c_idx_names=False,r_idx_names=False,
r_idx_type="dt",c_idx_type="u")
# 4-level multindex on rows with names provided, 2-level multindex
# on columns with default labels and default names.
>> a=makeCustomDataframe(5,3,r_idx_nlevels=4,
r_idx_names=["FEE","FI","FO","FAM"],
c_idx_nlevels=2)
>> a=mkdf(5,3,r_idx_nlevels=2,c_idx_nlevels=4)
"""
assert c_idx_nlevels > 0
assert r_idx_nlevels > 0
assert r_idx_type is None or (
r_idx_type in ("i", "f", "s", "u", "dt", "p", "td") and r_idx_nlevels == 1
)
assert c_idx_type is None or (
c_idx_type in ("i", "f", "s", "u", "dt", "p", "td") and c_idx_nlevels == 1
)
columns = makeCustomIndex(
ncols,
nlevels=c_idx_nlevels,
prefix="C",
names=c_idx_names,
ndupe_l=c_ndupe_l,
idx_type=c_idx_type,
)
index = makeCustomIndex(
nrows,
nlevels=r_idx_nlevels,
prefix="R",
names=r_idx_names,
ndupe_l=r_ndupe_l,
idx_type=r_idx_type,
)
# by default, generate data based on location
if data_gen_f is None:
data_gen_f = lambda r, c: f"R{r}C{c}"
data = [[data_gen_f(r, c) for c in range(ncols)] for r in range(nrows)]
return DataFrame(data, index, columns, dtype=dtype)
def _create_missing_idx(nrows, ncols, density, random_state=None):
if random_state is None:
random_state = np.random
else:
random_state = np.random.RandomState(random_state)
# below is cribbed from scipy.sparse
size = int(np.round((1 - density) * nrows * ncols))
# generate a few more to ensure unique values
min_rows = 5
fac = 1.02
extra_size = min(size + min_rows, fac * size)
def _gen_unique_rand(rng, _extra_size):
ind = rng.rand(int(_extra_size))
return np.unique(np.floor(ind * nrows * ncols))[:size]
ind = _gen_unique_rand(random_state, extra_size)
while ind.size < size:
extra_size *= 1.05
ind = _gen_unique_rand(random_state, extra_size)
j = np.floor(ind * 1.0 / nrows).astype(int)
i = (ind - j * nrows).astype(int)
return i.tolist(), j.tolist()
def makeMissingDataframe(density=0.9, random_state=None):
df = makeDataFrame()
# pandas\_testing.py:2306: error: "_create_missing_idx" gets multiple
# values for keyword argument "density" [misc]
# pandas\_testing.py:2306: error: "_create_missing_idx" gets multiple
# values for keyword argument "random_state" [misc]
i, j = _create_missing_idx( # type: ignore[misc]
*df.shape, density=density, random_state=random_state
)
df.values[i, j] = np.nan
return df
def optional_args(decorator):
"""
allows a decorator to take optional positional and keyword arguments.
Assumes that taking a single, callable, positional argument means that
it is decorating a function, i.e. something like this::
@my_decorator
def function(): pass
Calls decorator with decorator(f, *args, **kwargs)
"""
@wraps(decorator)
def wrapper(*args, **kwargs):
def dec(f):
return decorator(f, *args, **kwargs)
is_decorating = not kwargs and len(args) == 1 and callable(args[0])
if is_decorating:
f = args[0]
# pandas\_testing.py:2331: error: Incompatible types in assignment
# (expression has type "List[<nothing>]", variable has type
# "Tuple[Any, ...]")
args = [] # type: ignore[assignment]
return dec(f)
else:
return dec
return wrapper
# skip tests on exceptions with this message
_network_error_messages = (
# 'urlopen error timed out',
# 'timeout: timed out',
# 'socket.timeout: timed out',
"timed out",
"Server Hangup",
"HTTP Error 503: Service Unavailable",
"502: Proxy Error",
"HTTP Error 502: internal error",
"HTTP Error 502",
"HTTP Error 503",
"HTTP Error 403",
"HTTP Error 400",
"Temporary failure in name resolution",
"Name or service not known",
"Connection refused",
"certificate verify",
)
# or this e.errno/e.reason.errno
_network_errno_vals = (
101, # Network is unreachable
111, # Connection refused
110, # Connection timed out
104, # Connection reset Error
54, # Connection reset by peer
60, # urllib.error.URLError: [Errno 60] Connection timed out
)
# Both of the above shouldn't mask real issues such as 404's
# or refused connections (changed DNS).
# But some tests (test_data yahoo) contact incredibly flakey
# servers.
# and conditionally raise on exception types in _get_default_network_errors
def _get_default_network_errors():
# Lazy import for http.client because it imports many things from the stdlib
import http.client
return (IOError, http.client.HTTPException, TimeoutError)
def can_connect(url, error_classes=None):
"""
Try to connect to the given url. True if succeeds, False if IOError
raised
Parameters
----------
url : basestring
The URL to try to connect to
Returns
-------
connectable : bool
Return True if no IOError (unable to connect) or URLError (bad url) was
raised
"""
if error_classes is None:
error_classes = _get_default_network_errors()
try:
with urlopen(url):
pass
except error_classes:
return False
else:
return True
@optional_args
def network(
t,
url="https://www.google.com",
raise_on_error=_RAISE_NETWORK_ERROR_DEFAULT,
check_before_test=False,
error_classes=None,
skip_errnos=_network_errno_vals,
_skip_on_messages=_network_error_messages,
):
"""
Label a test as requiring network connection and, if an error is
encountered, only raise if it does not find a network connection.
In comparison to ``network``, this assumes an added contract to your test:
you must assert that, under normal conditions, your test will ONLY fail if
it does not have network connectivity.
You can call this in 3 ways: as a standard decorator, with keyword
arguments, or with a positional argument that is the url to check.
Parameters
----------
t : callable
The test requiring network connectivity.
url : path
The url to test via ``pandas.io.common.urlopen`` to check
for connectivity. Defaults to 'https://www.google.com'.
raise_on_error : bool
If True, never catches errors.
check_before_test : bool
If True, checks connectivity before running the test case.
error_classes : tuple or Exception
error classes to ignore. If not in ``error_classes``, raises the error.
defaults to IOError. Be careful about changing the error classes here.
skip_errnos : iterable of int
Any exception that has .errno or .reason.erno set to one
of these values will be skipped with an appropriate
message.
_skip_on_messages: iterable of string
any exception e for which one of the strings is
a substring of str(e) will be skipped with an appropriate
message. Intended to suppress errors where an errno isn't available.
Notes
-----
* ``raise_on_error`` supersedes ``check_before_test``
Returns
-------
t : callable
The decorated test ``t``, with checks for connectivity errors.
Example
-------
Tests decorated with @network will fail if it's possible to make a network
connection to another URL (defaults to google.com)::
>>> from pandas._testing import network
>>> from pandas.io.common import urlopen
>>> @network
... def test_network():
... with urlopen("rabbit://bonanza.com"):
... pass
Traceback
...
URLError: <urlopen error unknown url type: rabit>
You can specify alternative URLs::
>>> @network("https://www.yahoo.com")
... def test_something_with_yahoo():
... raise IOError("Failure Message")
>>> test_something_with_yahoo()
Traceback (most recent call last):
...
IOError: Failure Message
If you set check_before_test, it will check the url first and not run the
test on failure::
>>> @network("failing://url.blaher", check_before_test=True)
... def test_something():
... print("I ran!")
... raise ValueError("Failure")
>>> test_something()
Traceback (most recent call last):
...
Errors not related to networking will always be raised.
"""
from pytest import skip
if error_classes is None:
error_classes = _get_default_network_errors()
t.network = True
@wraps(t)
def wrapper(*args, **kwargs):
if (
check_before_test
and not raise_on_error
and not can_connect(url, error_classes)
):
skip()
try:
return t(*args, **kwargs)
except Exception as err:
errno = getattr(err, "errno", None)
if not errno and hasattr(errno, "reason"):
# pandas\_testing.py:2521: error: "Exception" has no attribute
# "reason"
errno = getattr(err.reason, "errno", None) # type: ignore[attr-defined]
if errno in skip_errnos:
skip(f"Skipping test due to known errno and error {err}")
e_str = str(err)
if any(m.lower() in e_str.lower() for m in _skip_on_messages):
skip(
f"Skipping test because exception message is known and error {err}"
)
if not isinstance(err, error_classes):
raise
if raise_on_error or can_connect(url, error_classes):
raise
else:
skip(f"Skipping test due to lack of connectivity and error {err}")
return wrapper
with_connectivity_check = network
@contextmanager
def assert_produces_warning(
expected_warning: Optional[Union[Type[Warning], bool]] = Warning,
filter_level="always",
check_stacklevel: bool = True,
raise_on_extra_warnings: bool = True,
match: Optional[str] = None,
):
"""
Context manager for running code expected to either raise a specific
warning, or not raise any warnings. Verifies that the code raises the
expected warning, and that it does not raise any other unexpected
warnings. It is basically a wrapper around ``warnings.catch_warnings``.
Parameters
----------
expected_warning : {Warning, False, None}, default Warning
The type of Exception raised. ``exception.Warning`` is the base
class for all warnings. To check that no warning is returned,
specify ``False`` or ``None``.
filter_level : str or None, default "always"
Specifies whether warnings are ignored, displayed, or turned
into errors.
Valid values are:
* "error" - turns matching warnings into exceptions
* "ignore" - discard the warning
* "always" - always emit a warning
* "default" - print the warning the first time it is generated
from each location
* "module" - print the warning the first time it is generated
from each module
* "once" - print the warning the first time it is generated
check_stacklevel : bool, default True
If True, displays the line that called the function containing
the warning to show were the function is called. Otherwise, the
line that implements the function is displayed.
raise_on_extra_warnings : bool, default True
Whether extra warnings not of the type `expected_warning` should
cause the test to fail.
match : str, optional
Match warning message.
Examples
--------
>>> import warnings
>>> with assert_produces_warning():
... warnings.warn(UserWarning())
...
>>> with assert_produces_warning(False):
... warnings.warn(RuntimeWarning())
...
Traceback (most recent call last):
...
AssertionError: Caused unexpected warning(s): ['RuntimeWarning'].
>>> with assert_produces_warning(UserWarning):
... warnings.warn(RuntimeWarning())
Traceback (most recent call last):
...
AssertionError: Did not see expected warning of class 'UserWarning'.
..warn:: This is *not* thread-safe.
"""
__tracebackhide__ = True
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter(filter_level)
yield w
if expected_warning:
expected_warning = cast(Type[Warning], expected_warning)
_assert_caught_expected_warning(
caught_warnings=w,
expected_warning=expected_warning,
match=match,
check_stacklevel=check_stacklevel,
)
if raise_on_extra_warnings:
_assert_caught_no_extra_warnings(
caught_warnings=w,
expected_warning=expected_warning,
)
def _assert_caught_expected_warning(
*,
caught_warnings: Sequence[warnings.WarningMessage],
expected_warning: Type[Warning],
match: Optional[str],
check_stacklevel: bool,
) -> None:
"""Assert that there was the expected warning among the caught warnings."""
saw_warning = False
matched_message = False
for actual_warning in caught_warnings:
if issubclass(actual_warning.category, expected_warning):
saw_warning = True
if check_stacklevel and issubclass(
actual_warning.category, (FutureWarning, DeprecationWarning)
):
_assert_raised_with_correct_stacklevel(actual_warning)
if match is not None and re.search(match, str(actual_warning.message)):
matched_message = True
if not saw_warning:
raise AssertionError(
f"Did not see expected warning of class "
f"{repr(expected_warning.__name__)}"
)
if match and not matched_message:
raise AssertionError(
f"Did not see warning {repr(expected_warning.__name__)} "
f"matching {match}"
)
def _assert_caught_no_extra_warnings(
*,
caught_warnings: Sequence[warnings.WarningMessage],
expected_warning: Optional[Union[Type[Warning], bool]],
) -> None:
"""Assert that no extra warnings apart from the expected ones are caught."""
extra_warnings = []
for actual_warning in caught_warnings:
if _is_unexpected_warning(actual_warning, expected_warning):
extra_warnings.append(
(
actual_warning.category.__name__,
actual_warning.message,
actual_warning.filename,
actual_warning.lineno,
)
)
if extra_warnings:
raise AssertionError(f"Caused unexpected warning(s): {repr(extra_warnings)}")
def _is_unexpected_warning(
actual_warning: warnings.WarningMessage,
expected_warning: Optional[Union[Type[Warning], bool]],
) -> bool:
"""Check if the actual warning issued is unexpected."""
if actual_warning and not expected_warning:
return True
expected_warning = cast(Type[Warning], expected_warning)
return bool(not issubclass(actual_warning.category, expected_warning))
def _assert_raised_with_correct_stacklevel(
actual_warning: warnings.WarningMessage,
) -> None:
from inspect import getframeinfo, stack
caller = getframeinfo(stack()[4][0])
msg = (
"Warning not set with correct stacklevel. "
f"File where warning is raised: {actual_warning.filename} != "
f"{caller.filename}. Warning message: {actual_warning.message}"
)
assert actual_warning.filename == caller.filename, msg
class RNGContext:
"""
Context manager to set the numpy random number generator speed. Returns
to the original value upon exiting the context manager.
Parameters
----------
seed : int
Seed for numpy.random.seed
Examples
--------
with RNGContext(42):
np.random.randn()
"""
def __init__(self, seed):
self.seed = seed
def __enter__(self):
self.start_state = np.random.get_state()
np.random.seed(self.seed)
def __exit__(self, exc_type, exc_value, traceback):
np.random.set_state(self.start_state)
def test_parallel(num_threads=2, kwargs_list=None):
"""
Decorator to run the same function multiple times in parallel.
Parameters
----------
num_threads : int, optional
The number of times the function is run in parallel.
kwargs_list : list of dicts, optional
The list of kwargs to update original
function kwargs on different threads.
Notes
-----
This decorator does not pass the return value of the decorated function.
Original from scikit-image:
https://github.com/scikit-image/scikit-image/pull/1519
"""
assert num_threads > 0
has_kwargs_list = kwargs_list is not None
if has_kwargs_list:
assert len(kwargs_list) == num_threads
import threading
def wrapper(func):
@wraps(func)
def inner(*args, **kwargs):
if has_kwargs_list:
update_kwargs = lambda i: dict(kwargs, **kwargs_list[i])
else:
update_kwargs = lambda i: kwargs
threads = []
for i in range(num_threads):
updated_kwargs = update_kwargs(i)
thread = threading.Thread(target=func, args=args, kwargs=updated_kwargs)
threads.append(thread)
for thread in threads:
thread.start()
for thread in threads:
thread.join()
return inner
return wrapper
class SubclassedSeries(Series):
_metadata = ["testattr", "name"]
@property
def _constructor(self):
return SubclassedSeries
@property
def _constructor_expanddim(self):
return SubclassedDataFrame
class SubclassedDataFrame(DataFrame):
_metadata = ["testattr"]
@property
def _constructor(self):
return SubclassedDataFrame
@property
def _constructor_sliced(self):
return SubclassedSeries
class SubclassedCategorical(Categorical):
@property
def _constructor(self):
return SubclassedCategorical
def _make_skipna_wrapper(alternative, skipna_alternative=None):
"""
Create a function for calling on an array.
Parameters
----------
alternative : function
The function to be called on the array with no NaNs.
Only used when 'skipna_alternative' is None.
skipna_alternative : function
The function to be called on the original array
Returns
-------
function
"""
if skipna_alternative:
def skipna_wrapper(x):
return skipna_alternative(x.values)
else:
def skipna_wrapper(x):
nona = x.dropna()
if len(nona) == 0:
return np.nan
return alternative(nona)
return skipna_wrapper
def convert_rows_list_to_csv_str(rows_list: List[str]):
"""
Convert list of CSV rows to single CSV-formatted string for current OS.
This method is used for creating expected value of to_csv() method.
Parameters
----------
rows_list : List[str]
Each element represents the row of csv.
Returns
-------
str
Expected output of to_csv() in current OS.
"""
sep = os.linesep
return sep.join(rows_list) + sep
def external_error_raised(expected_exception: Type[Exception]) -> ContextManager:
"""
Helper function to mark pytest.raises that have an external error message.
Parameters
----------
expected_exception : Exception
Expected error to raise.
Returns
-------
Callable
Regular `pytest.raises` function with `match` equal to `None`.
"""
import pytest
return pytest.raises(expected_exception, match=None)
cython_table = pd.core.base.SelectionMixin._cython_table.items()
def get_cython_table_params(ndframe, func_names_and_expected):
"""
Combine frame, functions from SelectionMixin._cython_table
keys and expected result.
Parameters
----------
ndframe : DataFrame or Series
func_names_and_expected : Sequence of two items
The first item is a name of a NDFrame method ('sum', 'prod') etc.
The second item is the expected return value.
Returns
-------
list
List of three items (DataFrame, function, expected result)
"""
results = []
for func_name, expected in func_names_and_expected:
results.append((ndframe, func_name, expected))
results += [
(ndframe, func, expected)
for func, name in cython_table
if name == func_name
]
return results
def get_op_from_name(op_name: str) -> Callable:
"""
The operator function for a given op name.
Parameters
----------
op_name : string
The op name, in form of "add" or "__add__".
Returns
-------
function
A function performing the operation.
"""
short_opname = op_name.strip("_")
try:
op = getattr(operator, short_opname)
except AttributeError:
# Assume it is the reverse operator
rop = getattr(operator, short_opname[1:])
op = lambda x, y: rop(y, x)
return op
# -----------------------------------------------------------------------------
# Indexing test helpers
def getitem(x):
return x
def setitem(x):
return x
def loc(x):
return x.loc
def iloc(x):
return x.iloc
|
import os
import matplotlib.pyplot as plt
import sys
import re
def parse_logs_to_lists(logs_path):
with open(logs_path, 'r') as f:
lines = f.readlines()
loss_list = []
psnr_noise_list = []
psnr_gt_list = []
iter_list = []
for line in lines:
if 'Iteration' not in line:
continue
cur_iter, cur_loss, cur_psnr_noisy, cur_psnt_gt, _ = re.findall(r"[-+]?\d*\.\d+|\d+",
line[line.find('INFO'):-1])
loss_list.append(float(cur_loss))
psnr_noise_list.append(float(cur_psnr_noisy))
psnr_gt_list.append(float(cur_psnt_gt))
iter_list.append(int(cur_iter))
return loss_list, psnr_noise_list, psnr_gt_list, iter_list
if __name__ == '__main__':
log_file_path = sys.argv[1]
dst_path = sys.argv[2]
plt.figure(figsize=(7, 7))
loss_list, psnr_noise_list, psnr_gt_list, iter_list = parse_logs_to_lists(log_file_path)
psnr_gt_hanlde = plt.plot(iter_list, psnr_gt_list, label='psnr_gt')
psnr_noise_handle = plt.plot(iter_list, psnr_noise_list, label='psnr_noise_gt')
plt.legend()
plt.title('PSNR-GT vs PSNR-NOISE-GT')
plt.savefig(os.path.join(dst_path, 'psnrs.png'))
plt.show()
|
from io import BytesIO
from typing import TYPE_CHECKING
from xml import sax
if TYPE_CHECKING:
from ..environment import Environment
class XmlParser(sax.ContentHandler):
def __init__(self, environment: 'Environment'):
self.environment = environment
self.template = None
self.act_parent = None
def parse(self, source: bytes, template_name: str):
self.template = self.environment.template_class()
self.act_parent = None
parser = sax.make_parser()
#parser.setFeature(sax.handler.feature_external_pes, False)
#parser.setFeature(sax.handler.feature_external_ges, False)
parser.setFeature(sax.handler.feature_namespaces, True)
#parser.setProperty(sax.handler.property_lexical_handler, self)
parser.setContentHandler(self)
isource = sax.xmlreader.InputSource()
isource.setByteStream(BytesIO(source))
isource.setSystemId(template_name)
parser.parse(isource)
return self.template
def startElementNS(self, name, qname, attrs):
ns, name = name
if ns:
fqdn = '{' + ns + '}' + name
else:
fqdn = name
klass = self.environment.registry.get_class_by_name(fqdn)
element = klass(xml_tag=name, xml_attrs=attrs)
if self.act_parent is None:
assert self.template.root_element is None
self.template.root_element = self.act_parent = element
else:
self.act_parent.add_child(element)
self.act_parent = element
def endElementNS(self, *args, **kwargs):
self.act_parent = self.act_parent.parent
def characters(self, content):
if len(content.strip()) == 0:
return
content = sax.saxutils.escape(content).strip()
element = self.environment.registry.anonymus_element_klass(text=content)
self.act_parent.add_child(element)
|
#!/usr/bin/env python
import os
import sys, time, math, cmath
from std_msgs.msg import String
import numpy as np
import roslib
import rospy
from hlpr_object_labeling.msg import LabeledObjects
from hlpr_knowledge_retrieval.msg import ObjectKnowledge, ObjectKnowledgeArray
def get_param(name, value=None):
private = "~%s" % name
if rospy.has_param(private):
return rospy.get_param(private)
elif rospy.has_param(name):
return rospy.get_param(name)
else:
return value
class lookup:
def __init__(self):
self.subscriber = rospy.Subscriber("/beliefs/labels", LabeledObjects, self.cbLabels, queue_size = 1)
self.knowPub = rospy.Publisher("/beliefs/knowledge", ObjectKnowledge)
fileref = get_param("data_file_location")
if fileref is not None:
self.filename = os.path.expanduser(fileref)
self.readObjectKnowledge(self.filename)
print "Reading knowledge data from " + self.filename
else:
self.filename = None
topicref = get_param("data_file_rostopic")
if topicref is not None:
self.rostopic = os.path.expanduser(topicref)
self.fileSub = rospy.Subscriber(self.rostopic, String, self.cbFile, queue_size = 1)
def cbFile(self, ros_data):
if self.filename is not ros_data.data:
self.filename = ros_data.data
self.readObjectKnowledge(self.filename)
self.initialized = True
print "Reading knowledge data from " + self.filename
def readObjectKnowledge(self, filename):
lines = None
with open(filename) as f:
lines = f.readlines()
dictionaries = []
knowledgeTypes = []
for l in lines:
newDict = dict()
lineList = l[:-1].split(";")
knowledgeTypes.append(lineList[0])
for o in lineList[1:]:
ftList = o.split(":")[1]
newDict[o.split(":")[0]] = ftList
dictionaries.append(newDict)
self.knowledgeTypes = knowledgeTypes
self.dictionaries = dictionaries
def cbLabels(self, ros_data):
labels = ros_data.labels
if labels is None or self.filename is None:
return
messages = []
idx = 0
for kType in self.knowledgeTypes:
#affList = affList + self.affDictionary[label.rsplit(None,1)[-1]] + ','
message = []
for label in labels:
msg = String()
if label.data in self.dictionaries[idx]:
msg.data = self.dictionaries[idx][label.data]
message.append(msg)
else:
print "Object " + label.data + " not found in knowledge source"
idx += 1
arrMsg = ObjectKnowledgeArray()
arrMsg.data = message
messages.append(arrMsg)
kmsg = ObjectKnowledge()
kmsg.knowledge = messages
kmsg.labels = labels
self.knowPub.publish(kmsg)
def get_param(name, value=None):
private = "~%s" % name
if rospy.has_param(private):
return rospy.get_param(private)
elif rospy.has_param(name):
return rospy.get_param(name)
else:
return value
def main(args):
rospy.init_node('knowledge_retrieval', anonymous=True)
lkup = lookup()
rospy.spin()
if __name__ == '__main__':
main(sys.argv)
|
{
"cells": [
{
"cell_type": "code",
"execution_count": null,
"id": "d27457c3",
"metadata": {},
"outputs": [],
"source": [
"import pandas as pd\n",
"import numpy as np\n",
"import math\n",
"class Node:\n",
" def __init__(self, l):\n",
" self.label = l\n",
" self.branch = {}\n",
"def entropy(data):\n",
" total_ex = len(data)\n",
" p_ex = len(data.loc[data['PlayTennis']=='Yes'])\n",
" n_ex = len(data.loc[data['PlayTennis']=='No'])\n",
" en = 0\n",
" if(p_ex>0):\n",
" en = -(p_ex/float(total_ex)) * (math.log(p_ex,2)-math.log(total_ex,2))\n",
" if(n_ex>0):\n",
" en += -(n_ex/float(total_ex)) * (math.log(n_ex,2)-math.log(total_ex,2))\n",
" return en\n",
"def gain(en_s,data_s,attrib):\n",
" values = set(data_s[attrib])\n",
" gain = en_s\n",
" for value in values:\n",
" gain -= len(data_s.loc[data_s[attrib]==value])/float(len(data_s)) *\n",
"entropy(data_s.loc[data_s[attrib]==value])\n",
" return gain\n",
"def get_attr(data):\n",
" en_s = entropy(data)\n",
" attribute = \"\"\n",
" max_gain = 0\n",
" for attr in data.columns[:len(data.columns)-1]:\n",
" g = gain(en_s, data, attr)\n",
" if g > max_gain:\n",
" max_gain = g\n",
" attribute = attr\n",
" return attribute\n",
"def decision_tree(data):\n",
" root = Node(\"NULL\")\n",
" if(entropy(data)==0):\n",
" if(len(data.loc[data[data.columns[-1]]==\"Yes\"]) == len(data)):\n",
" root.label = \"Yes\"\n",
" else:\n",
" root.label = \"No\"\n",
" return root\n",
" if(len(data.columns)==1):\n",
" return\n",
" else:\n",
" attr = get_attr(data)\n",
" root.label = attr\n",
" values = set(data[attr])\n",
" for value in values:\n",
" root.branch[value] =decision_tree(data.loc[data[attr]==value].drop(attr,axis=1))\n",
" return root\n",
" \n",
"def get_rules(root, rule, rules):\n",
" if not root.branch:\n",
" rules.append(rule[:-1]+\"=>\"+root.label)\n",
" return rules\n",
" for val in root.branch:\n",
" get_rules(root.branch[val], rule+root.label+\"=\"+str(val)+\"^\", rules)\n",
" return rules\n",
"def test(tree, test_str):\n",
" if not tree.branch:\n",
" return tree.label\n",
" return test(tree.branch[str(test_str[tree.label])], test_str)\n",
"data = pd.read_csv(\"tennis.csv\")\n",
"tree = decision_tree(data)\n",
"rules = get_rules(tree,\" \",[])\n",
"for rule in rules:\n",
" print(rule)\n",
"test_str = {}\n",
"print(\"Enter the test case input: \")\n",
"for attr in data.columns[:-1]:\n",
" test_str[attr] = input(attr+\": \")\n",
"print(test_str)\n",
"print(test(tree, test_str))\n"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.8.8"
}
},
"nbformat": 4,
"nbformat_minor": 5
}
|
from rest_framework.status import HTTP_400_BAD_REQUEST
ERROR_ADMIN_LISTING_INVALID_SORT_DIRECTION = (
"ERROR_ADMIN_LISTING_INVALID_SORT_DIRECTION",
HTTP_400_BAD_REQUEST,
"Attributes to sort by must be prefixed with one of '-' or '+'.",
)
ERROR_ADMIN_LISTING_INVALID_SORT_ATTRIBUTE = (
"ERROR_ADMIN_LISTING_INVALID_SORT_ATTRIBUTE",
HTTP_400_BAD_REQUEST,
"Invalid attribute name provided to sort by.",
)
|
from unittest.mock import MagicMock
from coworks.fixture import *
from tests.mockup import email_mock, smtp_mock
@pytest.fixture
def example_dir():
return os.getenv('EXAMPLE_DIR')
@pytest.fixture
def samples_docs_dir():
return os.getenv('SAMPLES_DOCS_DIR')
@pytest.fixture
def email_mock_fixture():
yield email_mock
@pytest.fixture
def smtp_mock_fixture():
yield smtp_mock
s3session_mock = MagicMock()
@pytest.fixture
def s3_session():
class S3Session:
mock = s3session_mock
def __new__(cls, *args, **kwargs):
return s3session_mock
yield S3Session
|
# Correlations from Churchill1974 paper
# see section "Functions Which Cross One Limiting Solution", pg. 41
# plot should be same as plot in Fig. 9, pg. 41
# use Python 3 print function
from __future__ import print_function
from __future__ import division
import numpy as np
import matplotlib.pyplot as py
py.close('all')
# parameters
# -----------------------------------------------------------------------------
# range of x = 0-6
x = np.linspace(np.spacing(1), 6)
# vector to store zinf values
zinf = np.zeros(len(x))
# vector to store z at n=4 values
z4 = np.zeros(len(x))
# vector to store z at n=2 values
z2 = np.zeros(len(x))
# vector to store z at n=1 values
z1 = np.zeros(len(x))
# vector to store z at n=3/4 values
z34 = np.zeros(len(x))
# functions
# -----------------------------------------------------------------------------
# where zi{x} = x, z{inf} = 1, xA = 5, alpha = 2
# zinf{x} function as seen in Fig. 9 and Eq. 8
# zinf{x} = 1 + (5/x)^2
def z_inf(x):
return 1 + (5.0 / x)**2
# z{x} function as seen in Fig. 9 and Eq. 9
# z{x} = x / ( [1 + t^n]^(1/n) ) where t = x / (1 + (5/x)^2)
def z(x, n):
t = x / (1 + (5.0/x)**2)
return x / (( 1 + t**n )**(1/n))
# calculations
# -----------------------------------------------------------------------------
# evaluate zi{x} = x
zi = x
# evaluate zinf{x}
k = 0
for i in x:
zinf[k] = z_inf(i)
k+=1
# evaluate z{x} for n = 4, 2, 1 and 3/4
k = 0
for i in x:
z4[k] = z(i, 4)
z2[k] = z(i, 2)
z1[k] = z(i, 1)
z34[k] = z(i, 0.75)
k+=1
# plot results
# -----------------------------------------------------------------------------
py.figure(1)
py.plot(x, zinf, 'g--', label=r'z$_\infty${x}')
py.plot(x, zi, 'b--', label=r'z$_0${x}')
py.plot(x, z4, 'y', label='z{x} n=4')
py.plot(x, z2, 'r', label='z{x} n=2')
py.plot(x, z1, 'm', label='z{x} n=1')
py.plot(x, z34, 'c', label='z{x} n=3/4')
py.axhline(y=1, color='k', label=r'z{$\infty$}=1')
py.ylim([0, 4])
py.legend(loc='best', numpoints=1)
py.xlabel('X')
py.ylabel('Z')
py.title('Churchill1974 - Figure 9\n'+r'z$_0${x}=x, z{$\infty$}=1, x$_A$=5, $\alpha$=2')
py.grid()
py.show()
|
import tensorflow as tf
from data_loader.data_generator import DataGenerator
from scripts.gan import GAN
from scripts.gan_trainer import GANTrainer
from utils.summarizer import Summarizer
from utils.dirs import create_dirs
def main(config):
# capture the config path from the run arguments
# then process the json configuration file
# create the experiments dirs
create_dirs([config.summary_dir, config.checkpoint_dir, config.step_generation_dir])
# create tensorflow session
sess = tf.Session()
# create your data generator
data = DataGenerator(config)
# create an instance of the model you want
model = GAN(config)
# create tensorboard logger
logger = Summarizer(sess, config)
# create trainer and pass all the previous components to it
trainer = GANTrainer(sess, model, data, config, logger)
# load model if exists
model.load(sess)
# here you train your model
trainer.train()
if __name__ == "__main__":
main()
|
import os
from app import create_app
app = create_app()
app.run(
host=os.environ.get("BIND_HOST", '0.0.0.0'),
port=os.environ.get("BIND_PORT", 80),
)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#MIT License (MIT)
#Copyright (c) <2014> <Rapp Project EU>
#Permission is hereby granted, free of charge, to any person obtaining a copy
#of this software and associated documentation files (the "Software"), to deal
#in the Software without restriction, including without limitation the rights
#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#copies of the Software, and to permit persons to whom the Software is
#furnished to do so, subject to the following conditions:
#The above copyright notice and this permission notice shall be included in
#all copies or substantial portions of the Software.
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
#FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
#AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
#LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
#OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
#THE SOFTWARE.
# Authors: Manos Tsardoulias
# contact: [email protected]
import rospy
import sys
import mmap
class GreekSupport():
def __init__(self):
pass
def configureLetters(self):
self.f_base_pre = [u'π', u'τ', u'κ', u'θ', u'χ', u'σ', u'ξ', u'ψ']
self.f_base = []
for l in self.f_base_pre:
self.f_base.append(l.encode('utf-8'))
self.v_base_pre = [u'δ', u'γ', u'ζ', u'λ', u'ρ', u'μ', u'ν', u'α', u'ά', u'ε',\
u'έ', u'η', u'ή', u'ι', u'ί', u'ϊ', u'ΐ', u'ο', u'ό', u'υ', u'ύ', u'ϋ'\
u'ΰ', u'ω', u'ώ']
self.v_base = []
for l in self.v_base_pre:
self.v_base.append(l.encode('utf-8'))
self.capital_letters = {}
self.capital_letters[(u'Α').encode('utf-8')] = (u'α').encode('utf-8')
self.capital_letters[(u'Ά').encode('utf-8')] = (u'ά').encode('utf-8')
self.capital_letters[(u'Β').encode('utf-8')] = (u'β').encode('utf-8')
self.capital_letters[(u'Γ').encode('utf-8')] = (u'γ').encode('utf-8')
self.capital_letters[(u'Δ').encode('utf-8')] = (u'δ').encode('utf-8')
self.capital_letters[(u'Ε').encode('utf-8')] = (u'ε').encode('utf-8')
self.capital_letters[(u'Έ').encode('utf-8')] = (u'έ').encode('utf-8')
self.capital_letters[(u'Ζ').encode('utf-8')] = (u'ζ').encode('utf-8')
self.capital_letters[(u'Η').encode('utf-8')] = (u'η').encode('utf-8')
self.capital_letters[(u'Ή').encode('utf-8')] = (u'ή').encode('utf-8')
self.capital_letters[(u'Θ').encode('utf-8')] = (u'θ').encode('utf-8')
self.capital_letters[(u'Ι').encode('utf-8')] = (u'ι').encode('utf-8')
self.capital_letters[(u'Ί').encode('utf-8')] = (u'ί').encode('utf-8')
self.capital_letters[(u'Ϊ').encode('utf-8')] = (u'ϊ').encode('utf-8')
self.capital_letters[(u'Κ').encode('utf-8')] = (u'κ').encode('utf-8')
self.capital_letters[(u'Λ').encode('utf-8')] = (u'λ').encode('utf-8')
self.capital_letters[(u'Μ').encode('utf-8')] = (u'μ').encode('utf-8')
self.capital_letters[(u'Ν').encode('utf-8')] = (u'ν').encode('utf-8')
self.capital_letters[(u'Ξ').encode('utf-8')] = (u'ξ').encode('utf-8')
self.capital_letters[(u'Ο').encode('utf-8')] = (u'ο').encode('utf-8')
self.capital_letters[(u'Ό').encode('utf-8')] = (u'ό').encode('utf-8')
self.capital_letters[(u'Π').encode('utf-8')] = (u'π').encode('utf-8')
self.capital_letters[(u'Ρ').encode('utf-8')] = (u'ρ').encode('utf-8')
self.capital_letters[(u'Σ').encode('utf-8')] = (u'σ').encode('utf-8')
self.capital_letters[(u'Τ').encode('utf-8')] = (u'τ').encode('utf-8')
self.capital_letters[(u'Υ').encode('utf-8')] = (u'γ').encode('utf-8')
self.capital_letters[(u'Ύ').encode('utf-8')] = (u'ύ').encode('utf-8')
self.capital_letters[(u'Ϋ').encode('utf-8')] = (u'ϋ').encode('utf-8')
self.capital_letters[(u'Φ').encode('utf-8')] = (u'φ').encode('utf-8')
self.capital_letters[(u'Χ').encode('utf-8')] = (u'χ').encode('utf-8')
self.capital_letters[(u'Ψ').encode('utf-8')] = (u'ψ').encode('utf-8')
self.capital_letters[(u'Ω').encode('utf-8')] = (u'ω').encode('utf-8')
self.capital_letters[(u'Ώ').encode('utf-8')] = (u'ώ').encode('utf-8')
self.phonems = {}
self.phonems[(u'ου').encode('utf-8')] = 'UW '
self.phonems[(u'ού').encode('utf-8')] = 'UW '
self.phonems[(u'μπ').encode('utf-8')] = 'B '
self.phonems[(u'ντ').encode('utf-8')] = 'D '
self.phonems[(u'γκ').encode('utf-8')] = 'G ' #?
self.phonems[(u'γγ').encode('utf-8')] = 'G ' #?
self.phonems[(u'τσ').encode('utf-8')] = 'CH ' #?
self.phonems[(u'τζ').encode('utf-8')] = 'JH ' #?
self.phonems[(u'σσ').encode('utf-8')] = 'S ' #?
self.phonems[(u'κκ').encode('utf-8')] = 'K '
self.two_digit_letters = {}
self.two_digit_letters[(u'αι').encode('utf-8')] = 'EH '
self.two_digit_letters[(u'αί').encode('utf-8')] = 'EH '
self.two_digit_letters[(u'ει').encode('utf-8')] = 'IH '
self.two_digit_letters[(u'εί').encode('utf-8')] = 'IH '
self.two_digit_letters[(u'οι').encode('utf-8')] = 'IH '
self.two_digit_letters[(u'οί').encode('utf-8')] = 'IH '
self.two_digit_letters[(u'υι').encode('utf-8')] = 'IH '
self.two_digit_letters[(u'υί').encode('utf-8')] = 'IH '
self.special_two_digit_letters = []
self.special_two_digit_letters.append((u'αυ').encode('utf-8'))
self.special_two_digit_letters.append((u'αύ').encode('utf-8'))
self.special_two_digit_letters.append((u'ευ').encode('utf-8'))
self.special_two_digit_letters.append((u'εύ').encode('utf-8'))
self.special_two_digit_letters_v = {}
self.special_two_digit_letters_v[(u'αυ').encode('utf-8')] = (u'αβ').encode('utf-8')
self.special_two_digit_letters_v[(u'αύ').encode('utf-8')] = (u'άβ').encode('utf-8')
self.special_two_digit_letters_v[(u'ευ').encode('utf-8')] = (u'εβ').encode('utf-8')
self.special_two_digit_letters_v[(u'εύ').encode('utf-8')] = (u'έβ').encode('utf-8')
self.special_two_digit_letters_f = {}
self.special_two_digit_letters_f[(u'αυ').encode('utf-8')] = (u'αφ').encode('utf-8')
self.special_two_digit_letters_f[(u'αύ').encode('utf-8')] = (u'άφ').encode('utf-8')
self.special_two_digit_letters_f[(u'ευ').encode('utf-8')] = (u'εφ').encode('utf-8')
self.special_two_digit_letters_f[(u'εύ').encode('utf-8')] = (u'έφ').encode('utf-8')
self.all_special_two_digit_letters = {}
for tdl in self.special_two_digit_letters:
for fb in self.f_base:
self.all_special_two_digit_letters[tdl + fb] = \
self.special_two_digit_letters_f[tdl] + fb
for tdl in self.special_two_digit_letters:
for vb in self.v_base:
self.all_special_two_digit_letters[tdl + vb] = \
self.special_two_digit_letters_v[tdl] + vb
self.s_specific_rules = {}
self.s_specific_rules[(u'σγ').encode('utf-8')] = 'Z W '
self.s_specific_rules[(u'σβ').encode('utf-8')] = 'Z V '
self.s_specific_rules[(u'σδ').encode('utf-8')] = 'Z DH '
self.letters = {}
self.letters[(u'α').encode('utf-8')] = 'AA ' # when AE?
self.letters[(u'ά').encode('utf-8')] = 'AA '
self.letters[(u'β').encode('utf-8')] = 'V '
self.letters[(u'γ').encode('utf-8')] = 'W '
self.letters[(u'δ').encode('utf-8')] = 'DH '
self.letters[(u'ε').encode('utf-8')] = 'EH '
self.letters[(u'έ').encode('utf-8')] = 'EH '
self.letters[(u'ζ').encode('utf-8')] = 'Z '
self.letters[(u'η').encode('utf-8')] = 'IH '
self.letters[(u'ή').encode('utf-8')] = 'IH '
self.letters[(u'θ').encode('utf-8')] = 'TH '
self.letters[(u'ι').encode('utf-8')] = 'IH '
self.letters[(u'ί').encode('utf-8')] = 'IH '
self.letters[(u'ϊ').encode('utf-8')] = 'IH '
self.letters[(u'ΐ').encode('utf-8')] = 'IH '
self.letters[(u'κ').encode('utf-8')] = 'K '
self.letters[(u'λ').encode('utf-8')] = 'L '
self.letters[(u'μ').encode('utf-8')] = 'M '
self.letters[(u'ν').encode('utf-8')] = 'N '
self.letters[(u'ξ').encode('utf-8')] = 'K S '
self.letters[(u'ο').encode('utf-8')] = 'OW '
self.letters[(u'ό').encode('utf-8')] = 'OW '
self.letters[(u'π').encode('utf-8')] = 'P '
self.letters[(u'ρ').encode('utf-8')] = 'R '
self.letters[(u'σ').encode('utf-8')] = 'S '
self.letters[(u'τ').encode('utf-8')] = 'T '
self.letters[(u'υ').encode('utf-8')] = 'IH '
self.letters[(u'ύ').encode('utf-8')] = 'IH '
self.letters[(u'ϋ').encode('utf-8')] = 'IH '
self.letters[(u'ΰ').encode('utf-8')] = 'IH '
self.letters[(u'φ').encode('utf-8')] = 'F '
self.letters[(u'χ').encode('utf-8')] = 'HH '
self.letters[(u'ψ').encode('utf-8')] = 'P S '
self.letters[(u'ω').encode('utf-8')] = 'OW '
self.letters[(u'ώ').encode('utf-8')] = 'OW '
self.letters[(u'ς').encode('utf-8')] = 'S '
self.literal_letters = {}
self.literal_letters[(u'α').encode('utf-8')] = 'a' # when AE?
self.literal_letters[(u'ά').encode('utf-8')] = 'a\''
self.literal_letters[(u'β').encode('utf-8')] = 'b'
self.literal_letters[(u'γ').encode('utf-8')] = 'g'
self.literal_letters[(u'δ').encode('utf-8')] = 'd'
self.literal_letters[(u'ε').encode('utf-8')] = 'e'
self.literal_letters[(u'έ').encode('utf-8')] = 'e\''
self.literal_letters[(u'ζ').encode('utf-8')] = 'z'
self.literal_letters[(u'η').encode('utf-8')] = 'h'
self.literal_letters[(u'ή').encode('utf-8')] = 'h\''
self.literal_letters[(u'θ').encode('utf-8')] = 'th'
self.literal_letters[(u'ι').encode('utf-8')] = 'i'
self.literal_letters[(u'ί').encode('utf-8')] = 'i\''
self.literal_letters[(u'ϊ').encode('utf-8')] = 'i:'
self.literal_letters[(u'ΐ').encode('utf-8')] = 'i\':'
self.literal_letters[(u'κ').encode('utf-8')] = 'k'
self.literal_letters[(u'λ').encode('utf-8')] = 'l'
self.literal_letters[(u'μ').encode('utf-8')] = 'm'
self.literal_letters[(u'ν').encode('utf-8')] = 'n'
self.literal_letters[(u'ξ').encode('utf-8')] = 'ks'
self.literal_letters[(u'ο').encode('utf-8')] = 'o'
self.literal_letters[(u'ό').encode('utf-8')] = 'o\''
self.literal_letters[(u'π').encode('utf-8')] = 'p'
self.literal_letters[(u'ρ').encode('utf-8')] = 'r'
self.literal_letters[(u'σ').encode('utf-8')] = 's'
self.literal_letters[(u'ς').encode('utf-8')] = 's\''
self.literal_letters[(u'τ').encode('utf-8')] = 't'
self.literal_letters[(u'υ').encode('utf-8')] = 'u'
self.literal_letters[(u'ύ').encode('utf-8')] = 'u\''
self.literal_letters[(u'ϋ').encode('utf-8')] = 'u:'
self.literal_letters[(u'ΰ').encode('utf-8')] = 'u\':'
self.literal_letters[(u'φ').encode('utf-8')] = 'f'
self.literal_letters[(u'χ').encode('utf-8')] = 'x'
self.literal_letters[(u'ψ').encode('utf-8')] = 'ps'
self.literal_letters[(u'ω').encode('utf-8')] = 'w'
self.literal_letters[(u'ώ').encode('utf-8')] = 'w\''
self.all_greek_letters = [\
u'Α', u'Ά', u'α', u'ά',\
u'Β', u'β',\
u'Γ', u'γ',\
u'Δ', u'δ',\
u'Ε', u'Έ', u'ε', u'έ',\
u'Ζ', u'ζ',\
u'Η', u'Ή', u'η', u'ή',\
u'Θ', u'θ',\
u'I', u'Ί', u'Ϊ', u'ι', u'ί', u'ϊ', u'ΐ',\
u'Κ', u'κ',\
u'Λ', u'λ',\
u'Μ', u'μ',\
u'Ν', u'ν',\
u'Ξ', u'ξ',\
u'Ο', u'Ό', u'ο', u'ό',\
u'Π', u'π',\
u'Ρ', u'ρ',\
u'Σ', u'σ', u'ς',\
u'Τ', u'τ',\
u'Υ', u'Ύ', u'Ϋ', u'υ', u'ύ', u'ϋ', u'ΰ',\
u'Φ', u'φ',\
u'Χ', u'χ',\
u'Ψ', u'ψ',\
u'Ω', u'Ώ', u'ω', u'ώ',\
u' '\
]
#tmp = []
#for l in self.all_greek_letters:
#tmp.append(l.encode('utf-8'))
#self.all_greek_letters = tmp
def transformWords(self, words):
enhanced_words = {}
englified_words = {}
for word in words:
initial_word = word
# transform capital letters
for cap in self.capital_letters:
initial_word = initial_word.replace(cap, self.capital_letters[cap])
# fix english version of letters
eng_w = initial_word
for lit in self.literal_letters:
eng_w = eng_w.replace(lit, self.literal_letters[lit])
englified_words[eng_w] = word
# check phonems
for ph in self.phonems:
initial_word = initial_word.replace(ph, self.phonems[ph])
# check special two digit letters
for stdl in self.all_special_two_digit_letters:
initial_word = initial_word.replace(stdl, \
self.all_special_two_digit_letters[stdl])
# check two-digit letters
for let in self.two_digit_letters:
initial_word = initial_word.replace(let, self.two_digit_letters[let])
# check specific rules
for sr in self.s_specific_rules:
initial_word = initial_word.replace(sr, self.s_specific_rules[sr])
# check the rest of the letters
for l in self.letters:
initial_word = initial_word.replace(l, self.letters[l])
enhanced_words[eng_w] = []
temp = initial_word.split(' ')
if len(temp) > 0:
temp = temp[:-1]
enhanced_words[eng_w] = temp
return [enhanced_words, englified_words]
def englify_words(self, words):
englified_words = []
for word in words:
eng_w = word
for lit in self.literal_letters:
eng_w = eng_w.replace(lit, self.literal_letters[lit])
englified_words.append(eng_w)
return englified_words
def main():
# The Greek support helper class
greek = GreekSupport()
greek.configureLetters()
# Open corpus file
corpus_file = open("corpus.txt")
corpus = corpus_file.read()
corpus_file.close()
# Split the corpus into expressions / sentences
split_chars = [".", ",", ")", "(", "\"", "[", "]", ":"]
sentences = [corpus]
for c in split_chars:
tmp = []
for s in sentences:
tmp += s.split(c)
sentences = tmp
# Erase all other words that are not Greek valid letters
to_be_erased = []
for s in sentences:
for l in s.decode('utf-8'):
if l not in greek.all_greek_letters:
if l not in to_be_erased:
to_be_erased.append(l)
tmp = []
for s in sentences:
if s == '':
continue
tmp_s = s.decode('utf-8')
for l in to_be_erased:
tmp_s = tmp_s.replace(l, "")
# Transform capital letters to lower case
tmp_w = tmp_s.encode('utf-8')
for cl in greek.capital_letters:
tmp_w = tmp_w.replace(cl, greek.capital_letters[cl])
tmp.append(tmp_w)
sentences = tmp
initial_sentences = tmp
# Sentences and initial sentences are unicode now
# Break the sentences into words
words = []
for s in sentences:
tmp_words = s.split(" ")
for w in tmp_words:
if w == "":
continue
if w not in words:
words.append(w)
# Words in utf-8 now
# Transform words in phonemes
[enh, engl] = greek.transformWords(words)
# Open custom.dict and write the words
custom_dict = open('custom.dict', 'w')
for w in enh:
custom_dict.write(w + " ")
for ph in enh[w]:
custom_dict.write(ph + " ")
custom_dict.write("\n")
custom_dict.close()
# Create the transliteration file
translit_file = open('transliteration.txt', 'w')
for i in engl:
translit_file.write(i + " " + engl[i] + "\n")
translit_file.close()
# Create the sentences file
sentences_file = open('sentences.txt', 'w')
# Create reverse translit dictionary
reverse_translit = {}
for tr in engl:
reverse_translit[engl[tr]] = tr
#print engl[tr], tr
# Create translit sentences
translit_sentences = []
for s in initial_sentences:
words = s.split(" ")
new_sentence = ""
for w in words:
if w == '':
continue
a = reverse_translit[w]
new_sentence += reverse_translit[w] + " "
sentences_file.write("<s>" + new_sentence + "</s>\n")
sentences_file.close()
if __name__ == "__main__":
main()
|
import re, os, datetime
file_shell_rex = "data_orig/200[0-9]_[0-9][0-9]_[a-z]*/*.2[0-9][0-9]"
site_rex = re.compile("^\*SITE NUMB\s*ER: ([0-9\s]+)$")
channel_rex = re.compile("^\*CHANNEL\s*:\s+([0-9]+)\s+OF")
date_rex = re.compile("^DATE\s+([0-9]?[0-9])\s*/([0-1]?[0-9])/([0-3]?[0-9])\s[0-9\s]+AVERAGE$")
count_rex = re.compile("[12]?[0-9]\s+[0-9-]+\s+[0-9-]+\s+[0-9-]+\s+[0-9-]+\s+[0-9-]+\s+[0-9-]+\s+[0-9-]+\s+[0-9-]+")
totals_rex = re.compile("^TOTALS$")
ifilenames = [s.strip() for s in os.popen("ls %s" % file_shell_rex).readlines()]
def ilines(ifilenames):
for ifilename in ifilenames:
ifile = open(ifilename) # pypy requires explicit close
for line in ifile:
if len(line)>2 and line[-1]=="\n": yield ifilename, line.strip()
ifile.close()
yield None, 'eof'
def events(lines):
for ifilename, line in lines:
if line=="eof":
yield 'eof', None
else:
m = re.match(site_rex, line)
if m:
yield 'site', (int(re.sub("\s*", "", m.groups()[0])), ifilename)
else:
m = re.match(channel_rex, line)
if m:
yield 'channel', (int(m.groups()[0]), ifilename)
else:
m = re.match(date_rex, line)
if m:
yield 'date', m.groups()
else:
m = re.match(count_rex, line)
if m:
try:
fields = [int(i) if i != "-" else None for i in line.split()]
yield 'hourly', (fields[0], fields[1:8])
except ValueError:
yield 'error', line
else:
m = re.match(totals_rex, line)
if m:
yield 'eod', None
def data_lines(evs):
errors = []
for evt, info in evs:
if evt=="error":
errors.append("%s %s %s format %s"%(filename, site, date, info))
continue
elif evt=="eof":
date, site, hdata = None, None, {}
elif evt=="site":
site, filename = info
channel = '0'
elif evt=="channel":
channel = info[0]
elif evt=="date":
date = info
hours, hdata = set(xrange(24)), {}
elif evt=="hourly":
if date==None or site==None:
continue
else:
hour, counts = info
if hour in hours:
hours.remove(hour)
hdata[hour] = counts
else:
errors.append("%s %s %s %s duplicate hour"%(filename, site, date, hour))
elif evt=="eod":
if len(hours)==0:
for hour, counts in hdata.iteritems():
for iday, count in enumerate(counts):
yield errors, (filename, site, channel, date, iday, hour, count)
else:
errors.append("%s %s %s incomplete"%(filename, site, date))
hdata = {}
def date_vars(dtuple, iday):
year, month, day = [int(i) for i in dtuple]
date = datetime.datetime(year+2000, month, day, 0, 0) + datetime.timedelta(int(iday))
days = (date - datetime.datetime(2003, 12, 1, 0, 0)).days
return date.strftime("%Y-%m-%d"), days, date.strftime("%A"), date.timetuple().tm_yday
print "file site channel date day weekday yday hour count"
for errors, (filename, site, channel, dtuple, iday, hour, count) in data_lines(events(ilines(ifilenames))):
print "%s %s %s %s %s %s %s %s %s" % (
(filename, site, channel) + date_vars(dtuple, iday) + (hour, "NA" if count==None else count))
logfile = open("model_data/errors.log", "w")
for error in errors:
print >>logfile, error
logfile.close()
|
#!/usr/bin/env python3
import rogue.hardware.rce
import pyrogue.mesh
import surf
import rceg3
class rce(object):
def __init__(self):
# Set base
self.dpmTest = pyrogue.Root('dpmTest','DPM Test Image')
# Create the AXI interfaces
self.rceMap = rogue.hardware.rce.MapMemory();
self.rceMap.addMap(0x80000000,0x10000)
self.rceMap.addMap(0x84000000,0x10000)
# Add Devices
self.dpmTest.add(rceg3.RceVersion(memBase=self.rceMap))
# Create mesh node
self.mNode = pyrogue.mesh.MeshNode('rce',iface='eth0',root=self.dpmTest)
self.mNode.start()
|
S = input()
if S[0] == 'A' and S[2:-1].count('C') == 1:
index_c = S[2:-1].index('C')
S = S[1:2 + index_c] + S[3 + index_c:]
if S.islower():
print('AC')
exit()
print('WA')
|
from keras.layers import Embedding, TimeDistributed, RepeatVector, LSTM
class AttentionLSTM(LSTM):
"""LSTM with attention mechanism
This is an LSTM incorporating an attention mechanism into its hidden states.
Currently, the context vector calculated from the attended vector is fed
into the model's internal states, closely following the model by Xu et al.
(2016, Sec. 3.1.2), using a soft attention model following
Bahdanau et al. (2014).
The layer expects two inputs instead of the usual one:
1. the "normal" layer input; and
2. a 3D vector to attend.
Args:
attn_activation: Activation function for attentional components
attn_init: Initialization function for attention weights
output_alpha (boolean): If true, outputs the alpha values, i.e.,
what parts of the attention vector the layer attends to at each
timestep.
References:
* Bahdanau, Cho & Bengio (2014), "Neural Machine Translation by Jointly
Learning to Align and Translate", <https://arxiv.org/pdf/1409.0473.pdf>
* Xu, Ba, Kiros, Cho, Courville, Salakhutdinov, Zemel & Bengio (2016),
"Show, Attend and Tell: Neural Image Caption Generation with Visual
Attention", <http://arxiv.org/pdf/1502.03044.pdf>
See Also:
`LSTM`_ in the Keras documentation.
.. _LSTM: http://keras.io/layers/recurrent/#lstm
"""
def __init__(self, *args, attn_activation='tanh', attn_init='orthogonal',
output_alpha=False, **kwargs):
self.attn_activation = activations.get(attn_activation)
self.attn_init = initializations.get(attn_init)
self.output_alpha = output_alpha
super().__init__(*args, **kwargs)
def build(self, input_shape):
if not (isinstance(input_shape, list) and len(input_shape) == 2):
raise Exception('Input to AttentionLSTM must be a list of '
'two tensors [lstm_input, attn_input].')
input_shape, attn_input_shape = input_shape
super().build(input_shape)
self.input_spec.append(InputSpec(shape=attn_input_shape))
# weights for attention model
self.U_att = self.inner_init((self.output_dim, self.output_dim),
name='{}_U_att'.format(self.name))
self.W_att = self.attn_init((attn_input_shape[-1], self.output_dim),
name='{}_W_att'.format(self.name))
self.v_att = self.init((self.output_dim, 1),
name='{}_v_att'.format(self.name))
self.b_att = K.zeros((self.output_dim,), name='{}_b_att'.format(self.name))
self.trainable_weights += [self.U_att, self.W_att, self.v_att, self.b_att]
# weights for incorporating attention into hidden states
if self.consume_less == 'gpu':
self.Z = self.init((attn_input_shape[-1], 4 * self.output_dim),
name='{}_Z'.format(self.name))
self.trainable_weights += [self.Z]
else:
self.Z_i = self.attn_init((attn_input_shape[-1], self.output_dim),
name='{}_Z_i'.format(self.name))
self.Z_f = self.attn_init((attn_input_shape[-1], self.output_dim),
name='{}_Z_f'.format(self.name))
self.Z_c = self.attn_init((attn_input_shape[-1], self.output_dim),
name='{}_Z_c'.format(self.name))
self.Z_o = self.attn_init((attn_input_shape[-1], self.output_dim),
name='{}_Z_o'.format(self.name))
self.trainable_weights += [self.Z_i, self.Z_f, self.Z_c, self.Z_o]
self.Z = K.concatenate([self.Z_i, self.Z_f, self.Z_c, self.Z_o])
# weights for initializing states based on attention vector
if not self.stateful:
self.W_init_c = self.attn_init((attn_input_shape[-1], self.output_dim),
name='{}_W_init_c'.format(self.name))
self.W_init_h = self.attn_init((attn_input_shape[-1], self.output_dim),
name='{}_W_init_h'.format(self.name))
self.b_init_c = K.zeros((self.output_dim,),
name='{}_b_init_c'.format(self.name))
self.b_init_h = K.zeros((self.output_dim,),
name='{}_b_init_h'.format(self.name))
self.trainable_weights += [self.W_init_c, self.b_init_c,
self.W_init_h, self.b_init_h]
if self.initial_weights is not None:
self.set_weights(self.initial_weights)
del self.initial_weights
def get_output_shape_for(self, input_shape):
# output shape is not affected by the attention component
return super().get_output_shape_for(input_shape[0])
def compute_mask(self, input, input_mask=None):
if input_mask is not None:
input_mask = input_mask[0]
return super().compute_mask(input, input_mask=input_mask)
def get_initial_states(self, x_input, x_attn, mask_attn):
# set initial states from mean attention vector fed through a dense
# activation
mean_attn = K.mean(x_attn * K.expand_dims(mask_attn), axis=1)
h0 = K.dot(mean_attn, self.W_init_h) + self.b_init_h
c0 = K.dot(mean_attn, self.W_init_c) + self.b_init_c
return [self.attn_activation(h0), self.attn_activation(c0)]
def call(self, x, mask=None):
assert isinstance(x, list) and len(x) == 2
x_input, x_attn = x
if mask is not None:
mask_input, mask_attn = mask
else:
mask_input, mask_attn = None, None
# input shape: (nb_samples, time (padded with zeros), input_dim)
input_shape = self.input_spec[0].shape
if K._BACKEND == 'tensorflow':
if not input_shape[1]:
raise Exception('When using TensorFlow, you should define '
'explicitly the number of timesteps of '
'your sequences.\n'
'If your first layer is an Embedding, '
'make sure to pass it an "input_length" '
'argument. Otherwise, make sure '
'the first layer has '
'an "input_shape" or "batch_input_shape" '
'argument, including the time axis. '
'Found input shape at layer ' + self.name +
': ' + str(input_shape))
if self.stateful:
initial_states = self.states
else:
initial_states = self.get_initial_states(x_input, x_attn, mask_attn)
constants = self.get_constants(x_input, x_attn, mask_attn)
preprocessed_input = self.preprocess_input(x_input)
last_output, outputs, states = K.rnn(self.step, preprocessed_input,
initial_states,
go_backwards=self.go_backwards,
mask=mask_input,
constants=constants,
unroll=self.unroll,
input_length=input_shape[1])
if self.stateful:
self.updates = []
for i in range(len(states)):
self.updates.append((self.states[i], states[i]))
if self.return_sequences:
return outputs
else:
return last_output
def step(self, x, states):
h_tm1 = states[0]
c_tm1 = states[1]
B_U = states[2]
B_W = states[3]
x_attn = states[4]
mask_attn = states[5]
attn_shape = self.input_spec[1].shape
#### attentional component
# alignment model
# -- keeping weight matrices for x_attn and h_s separate has the advantage
# that the feature dimensions of the vectors can be different
h_att = K.repeat(h_tm1, attn_shape[1])
att = time_distributed_dense(x_attn, self.W_att, self.b_att)
energy = self.attn_activation(K.dot(h_att, self.U_att) + att)
energy = K.squeeze(K.dot(energy, self.v_att), 2)
# make probability tensor
alpha = K.exp(energy)
if mask_attn is not None:
alpha *= mask_attn
alpha /= K.sum(alpha, axis=1, keepdims=True)
alpha_r = K.repeat(alpha, attn_shape[2])
alpha_r = K.permute_dimensions(alpha_r, (0, 2, 1))
# make context vector -- soft attention after Bahdanau et al.
z_hat = x_attn * alpha_r
z_hat = K.sum(z_hat, axis=1)
if self.consume_less == 'gpu':
z = K.dot(x * B_W[0], self.W) + K.dot(h_tm1 * B_U[0], self.U) \
+ K.dot(z_hat, self.Z) + self.b
z0 = z[:, :self.output_dim]
z1 = z[:, self.output_dim: 2 * self.output_dim]
z2 = z[:, 2 * self.output_dim: 3 * self.output_dim]
z3 = z[:, 3 * self.output_dim:]
else:
if self.consume_less == 'cpu':
x_i = x[:, :self.output_dim]
x_f = x[:, self.output_dim: 2 * self.output_dim]
x_c = x[:, 2 * self.output_dim: 3 * self.output_dim]
x_o = x[:, 3 * self.output_dim:]
elif self.consume_less == 'mem':
x_i = K.dot(x * B_W[0], self.W_i) + self.b_i
x_f = K.dot(x * B_W[1], self.W_f) + self.b_f
x_c = K.dot(x * B_W[2], self.W_c) + self.b_c
x_o = K.dot(x * B_W[3], self.W_o) + self.b_o
else:
raise Exception('Unknown `consume_less` mode.')
z0 = x_i + K.dot(h_tm1 * B_U[0], self.U_i) + K.dot(z_hat, self.Z_i)
z1 = x_f + K.dot(h_tm1 * B_U[1], self.U_f) + K.dot(z_hat, self.Z_f)
z2 = x_c + K.dot(h_tm1 * B_U[2], self.U_c) + K.dot(z_hat, self.Z_c)
z3 = x_o + K.dot(h_tm1 * B_U[3], self.U_o) + K.dot(z_hat, self.Z_o)
i = self.inner_activation(z0)
f = self.inner_activation(z1)
c = f * c_tm1 + i * self.activation(z2)
o = self.inner_activation(z3)
h = o * self.activation(c)
if self.output_alpha:
return alpha, [h, c]
else:
return h, [h, c]
def get_constants(self, x_input, x_attn, mask_attn):
constants = super().get_constants(x_input)
attn_shape = self.input_spec[1].shape
if mask_attn is not None:
if K.ndim(mask_attn) == 3:
mask_attn = K.all(mask_attn, axis=-1)
constants.append(x_attn)
constants.append(mask_attn)
return constants
def get_config(self):
cfg = super().get_config()
cfg['output_alpha'] = self.output_alpha
cfg['attn_activation'] = self.attn_activation.__name__
return cfg
@classmethod
def from_config(cls, config):
instance = super(AttentionLSTM, cls).from_config(config)
if 'output_alpha' in config:
instance.output_alpha = config['output_alpha']
if 'attn_activation' in config:
instance.attn_activation = activations.get(config['attn_activation'])
return instance |
import os
if not os.environ.get("AZURE_TESTING") and not is_re_worker_active():
new_user()
show_global_para()
run_pdf()
read_calib_file_new() # read file from: /nsls2/data/fxi-new/legacy/log/calib_new.csv
# check_latest_scan_id(init_guess=60000, search_size=100)
|
"""Hello World Class."""
class HelloWorld:
"""Class Hello World!"""
def __init__(self, my_name="Hello World!"):
"""Initializer for the HelloWorld class"""
self.my_name = my_name
def print_name(self):
"""Prints the value of my_name"""
print(str(self.my_name))
|
__version__ = "0.1.0"
from .config import Config
from .database import Database
__all__ = ["Database", "Config"]
|
from dcache_nagios_plugins.urlopen import urlopen
try: from xml.etree import cElementTree as etree
except ImportError:
from xml.etree import ElementTree as etree
# The full XML data is available from /info. The document is wrappend in
# dCache. Subelements can also be fetched independently under a sub-URL
# composed from the element names, at least down to a certain level. The
# toplevel elements are:
#
# doors
# summary
# linkgroups
# unitgroups
# domains
# links
# nas
# pools
# units
# reservations
# poolgroups
class DCacheTags(object):
def __getattr__(self, name):
return '{http://www.dcache.org/2008/01/Info}' + name
DCACHE = DCacheTags()
class PoolInfo(object):
def __init__(self, name):
self.name = name
self.enabled = None
self.read_only = None
self.last_heartbeat = None
self.poolgrouprefs = []
self.space_total = None
self.space_break_even = None
self.space_precious = None
self.space_removable = None
self.space_gap = None
self.space_LRU_seconds = None
self.space_used = None
self.space_free = None
class PoolgroupInfo(object):
def __init__(self, name, linkrefs=None, poolrefs=None):
self.name = name
self.linkrefs = linkrefs or []
self.space_total = None
self.space_free = None
self.space_removable = None
self.space_precious = None
self.space_used = None
self.poolrefs = poolrefs or []
@property
def available_space(self):
return self.space_removable + self.space_free
@property
def nonprecious_space(self):
return self.space_total - self.space_precious
def __repr__(self):
return 'PoolgroupInfo(%r, %d, %d, %d, %d, %d, {%s}, {%s})' \
% (self.name,
self.space_total,
self.space_free,
self.space_removable,
self.space_precious,
self.space_used,
', '.join(self.linkrefs),
', '.join(self.poolrefs))
def _scan_metric(metric_elt):
t = metric_elt.get('type')
s = metric_elt.text
if t == 'boolean':
x = {'true': True, 'false': False}[s]
elif t == 'integer':
x = int(s)
elif t == 'float':
x = float(s)
else:
raise AssertionError('Unsupported type %s.'%t)
return (metric_elt.get('name'), x)
def load_pools(url, certkey=None, cert=None):
fh = urlopen(url, certkey = certkey, cert = cert)
doc = etree.parse(fh)
for e_p in doc.findall('.//' + DCACHE.pools + '/' + DCACHE.pool):
name = e_p.get('name')
metrics = dict(map(_scan_metric, e_p.findall(DCACHE.metric)))
p = PoolInfo(name)
p.enabled = metrics.get('enabled')
p.read_only = metrics.get('read-only')
p.last_heartbeat = metrics.get('last-heartbeat')
p.poolgrouprefs = [e.get('name') for e in
e_p.findall(DCACHE.poolgroups + '/' + DCACHE.poolgroupref)]
e_space = e_p.find(DCACHE.space)
if e_space:
space_metrics = dict(map(_scan_metric, e_space.findall(DCACHE.metric)))
p.space_total = space_metrics.get('total')
p.space_break_even = space_metrics.get('break-even')
p.space_precious = space_metrics.get('precious')
p.space_removable = space_metrics.get('removable')
p.space_gap = space_metrics.get('gap')
p.space_LRU_seconds = space_metrics.get('LRU-seconds')
p.space_used = space_metrics.get('used')
p.space_free = space_metrics.get('free')
yield p
fh.close()
def load_pool(url, certkey = None, cert = None):
pools = list(load_pools(url, certkey = certkey, cert = cert))
if len(pools) == 1:
return pools[0]
elif len(pools) == 0:
return None
else:
raise RuntimeError('Request for single pool gave %d results.' % len(pools))
def load_domain_poolnames(info_url, certkey=None, cert = None):
fh = urlopen(info_url + '/domains', certkey = certkey, cert = cert)
doc = etree.parse(fh)
for domain_ele in doc.findall(DCACHE.domains + '/' + DCACHE.domain):
dn = domain_ele.get('name')
pns = set()
for pool_ele in domain_ele.findall(DCACHE.cells + '/' + DCACHE.cell):
for metric_ele in pool_ele.findall(DCACHE.metric):
if metric_ele.get('name') == 'class':
if metric_ele.text == 'Pool':
pns.add(pool_ele.get('name'))
break
if len(pns) > 0:
yield dn, pns
fh.close()
def load_domain_of_pool_dict(info_url, certkey = None, cert = None):
data = load_domain_poolnames(info_url, certkey = certkey, cert = cert)
return dict((pn, dn) for dn, pns in data for pn in pns)
def load_pools_of_domain_dict(info_url, certkey = None, cert = None):
data = load_domain_poolnames(info_url, certkey = certkey, cert = cert)
return dict((dn, pns) for dn, pns in data)
def load_poolgroups(url, certkey = None, cert = None):
fh = urlopen(url, certkey = certkey, cert = cert)
doc = etree.parse(fh)
for e_g in doc.findall('.//' + DCACHE.poolgroup):
name = e_g.get('name')
linkrefs = [e.get('name') for e in
e_g.findall(DCACHE.links + '/' + DCACHE.linkref)]
poolrefs = [e.get('name') for e in
e_g.findall(DCACHE.pools + '/' + DCACHE.poolref)]
space = dict(map(_scan_metric, e_g.findall(DCACHE.space+'/'+DCACHE.metric)))
pg = PoolgroupInfo(name, linkrefs = linkrefs, poolrefs = poolrefs)
pg.space_total = space['total']
pg.space_free = space['free']
pg.space_removable = space['removable']
pg.space_precious = space['precious']
pg.space_used = space['used']
yield pg
fh.close()
def load_poolgroup(url, certkey = None, cert = None):
poolgroups = list(load_poolgroups(url, certkey = certkey, cert = cert))
if len(poolgroups) == 1:
return poolgroups[0]
elif len(poolgroups) == 0:
return None
else:
raise RuntimeError('Request for a single pool group gave %d entries.'
% len(poolgroups))
|
import os
from cs50 import SQL
from flask import Flask, flash, redirect, render_template, request, session
from flask_session import Session
from tempfile import mkdtemp
from werkzeug.exceptions import default_exceptions
from werkzeug.security import check_password_hash, generate_password_hash
from time import sleep
from helpers import apology, login_required, lookup, usd
# API_KEY: FEZQRI69L2T14I0I
# export API_KEY=FEZQRI69L2T14I0I
# Ensure environment variable is set
if not os.environ.get("API_KEY"):
raise RuntimeError("API_KEY not set")
# Configure application
app = Flask(__name__)
# Ensure templates are auto-reloaded
app.config["TEMPLATES_AUTO_RELOAD"] = True
# Ensure responses aren't cached
@app.after_request
def after_request(response):
response.headers["Cache-Control"] = "no-cache, no-store, must-revalidate"
response.headers["Expires"] = 0
response.headers["Pragma"] = "no-cache"
return response
# Custom filter
app.jinja_env.filters["usd"] = usd
app.jinja_env.globals.update(usd=usd)
# Configure session to use filesystem (instead of signed cookies)
app.config["SESSION_FILE_DIR"] = mkdtemp()
app.config["SESSION_PERMANENT"] = False
app.config["SESSION_TYPE"] = "filesystem"
Session(app)
# Configure CS50 Library to use SQLite database
db = SQL("sqlite:///finance.db")
@app.route("/")
@login_required
def index():
"""Show portfolio of stocks"""
uid = session.get("user_id")
# retrieves stock details from database
cash = db.execute("SELECT cash FROM users WHERE id = :uid", uid=uid)
portfolio = db.execute("SELECT * FROM portfolio WHERE id = :uid", uid=uid)
add = cash[0]['cash']
value = {}
# retrieves each record for a particular stock
for record in portfolio:
comp = record['company']
while True:
look = lookup(comp)
if look == None:
sleep(2)
continue
value[comp] = look['price']
break
add += value[comp] * record['stocks']
return render_template("index.html", cash=usd(cash[0]['cash']),
portfolio=portfolio, value=value, add=usd(add))
@app.route("/buy", methods=["GET", "POST"])
@login_required
def buy():
"""Buy shares of stock"""
if request.method == "POST":
symbol = request.form.get("symbol")
while True:
company = lookup(symbol)
if company == None:
sleep(2)
continue
break
shares = request.form.get("shares")
# check for valid entry in form submission
if company == None:
return apology("Enter valid stock name")
if not shares.isdigit() or int(shares) < 1 or shares == None:
return apology("Enter valid share amount")
uid = session.get("user_id")
query = db.execute("SELECT username FROM users WHERE id = :uid", uid=uid)
uname = query[0]['username']
cash = db.execute("SELECT cash FROM users WHERE id = :userid", userid=uid)
total_price = company['price'] * float(shares)
comp = company['symbol']
price = company['price']
# checks if enough cash is there to purchase the stock
if cash[0]['cash'] > total_price:
res = db.execute("SELECT * FROM portfolio WHERE id = :uid AND company = :comp",
uid=uid, comp=comp)
if len(res) == 0:
db.execute("INSERT INTO portfolio (id, company, stocks) VALUES \
(:uid, :company, :stocks)", uid=uid, company=comp, stocks=shares)
else:
db.execute("UPDATE portfolio SET stocks = :stocks WHERE \
id = :uid AND company = :comp", stocks=int(shares) + res[0]['stocks'], uid=uid,
comp=comp)
db.execute("UPDATE users SET cash = cash - :total WHERE id = :uid",
uid=uid, total=total_price)
db.execute("INSERT INTO history (id, company, stocks, price) VALUES (:uid, :company, \
:stocks, :price)", uid=uid, company=comp, stocks=shares, price=price)
else:
return apology("Not enough cash left for your request")
sleep(1)
return redirect("/")
else:
return render_template("buy.html")
@app.route("/history")
@login_required
def history():
"""Show history of transactions"""
uid = session.get("user_id")
# gets history from a separate table for listing
history = db.execute("SELECT * FROM history WHERE id = :uid", uid=uid)
return render_template("history.html", history=history)
@app.route("/login", methods=["GET", "POST"])
def login():
"""Log user in"""
# Forget any user_id
session.clear()
# User reached route via POST (as by submitting a form via POST)
if request.method == "POST":
# Ensure username was submitted
if not request.form.get("username"):
return apology("must provide username", 403)
# Ensure password was submitted
elif not request.form.get("password"):
return apology("must provide password", 403)
# Query database for username
rows = db.execute("SELECT * FROM users WHERE username = :username",
username=request.form.get("username"))
# Ensure username exists and password is correct
if len(rows) != 1 or not check_password_hash(rows[0]["hash"], request.form.get("password")):
return apology("invalid username and/or password", 403)
# Remember which user has logged in
session["user_id"] = rows[0]["id"]
# Redirect user to home page
return redirect("/")
# User reached route via GET (as by clicking a link or via redirect)
else:
return render_template("login.html")
@app.route("/logout")
def logout():
"""Log user out"""
# Forget any user_id
session.clear()
# Redirect user to login form
return redirect("/")
@app.route("/quote", methods=["GET", "POST"])
@login_required
def quote():
"""Get stock quote."""
if request.method == "POST":
qte = lookup(request.form.get("symbol"))
if qte == None:
return apology("INVALID REQUEST")
else:
return render_template("quoted.html", qte=qte)
else:
return render_template("quote.html")
@app.route("/reset", methods=["GET", "POST"])
@login_required
def reset():
"""Resets Password"""
if request.method == "POST":
oldpass = request.form.get("oldpass")
newpass = request.form.get("newpass")
uid = session.get("user_id")
if not oldpass:
return apology("Enter old password")
if not newpass:
return apology("Enter new password")
row = db.execute("SELECT * FROM users WHERE id = :uid", uid=uid)
oldhash = row[0]['hash']
newhash = generate_password_hash(newpass)
# checks if old password matches for verification
if not check_password_hash(oldhash, oldpass):
return apology("Passwords don't match")
db.execute("UPDATE users SET hash = :newhash WHERE id = :uid", newhash=newhash, uid=uid)
sleep(1)
return redirect("/")
else:
return render_template("reset.html")
@app.route("/register", methods=["GET", "POST"])
def register():
"""Register user"""
session.clear()
if request.method == "POST":
name = request.form.get("username")
password = request.form.get("password")
confirm = request.form.get("confirmation")
if not name:
return apology("Username Required")
if not password:
return apology("Password Required")
if not confirm:
return apology("Confirmation Required")
if password != confirm:
return apology("Passwords not matching")
passhash = generate_password_hash(password)
result = db.execute("INSERT INTO users (username, hash) VALUES (:username, :hash)", username=name, hash=passhash)
if not result:
return apology("User already exits")
return redirect("/")
return render_template("register.html")
@app.route("/sell", methods=["GET", "POST"])
@login_required
def sell():
"""Sell shares of stock"""
if request.method == "POST":
symbol = request.form.get("symbol")
if symbol == None:
return apology("Select stock")
shares = request.form.get("shares")
if not shares.isdigit() or int(shares) < 1 or shares == None:
return apology("Enter valid share amount")
while True:
company = lookup(symbol)
if company == None:
sleep(2)
continue
break
uid = session.get("user_id")
query = db.execute("SELECT username FROM users WHERE id = :uid", uid=uid)
uname = query[0]['username']
cash = db.execute("SELECT cash FROM users WHERE id = :userid", userid=uid)
comp = request.form.get("symbol")
res = db.execute("SELECT * FROM portfolio WHERE id = :uid AND company = :comp",
uid=uid, comp=comp)
if res[0]['stocks'] >= int(shares):
price = company['price']
total_price = company['price'] * float(shares)
db.execute("UPDATE portfolio SET stocks = :stocks WHERE \
id = :uid AND company = :comp", stocks=res[0]['stocks'] - int(shares), uid=uid,
comp=comp)
db.execute("UPDATE users SET cash = cash + :total WHERE id = :uid",
uid=uid, total=total_price)
db.execute("INSERT INTO history (id, company, stocks, price) VALUES (:uid, :company, \
:stocks, :price)", uid=uid, company=comp, stocks=int(shares) * -1, price=price)
else:
return apology("Not enough shares with you")
sleep(1)
return redirect("/")
else:
uid = session.get("user_id")
query = db.execute("SELECT * FROM portfolio WHERE id = :uid", uid=uid)
companies = []
for company in query:
companies.append(company['company'])
return render_template("sell.html", companies=companies)
def errorhandler(e):
"""Handle error"""
return apology(e.name, e.code)
# listen for errors
for code in default_exceptions:
app.errorhandler(code)(errorhandler)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-\
import os
import unittest
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
import tensorlayerx
import tensorlayerx as tlx
import numpy as np
from tests.utils import CustomTestCase
class Layer_nested(CustomTestCase):
@classmethod
def setUpClass(cls):
print("##### begin testing nested layer #####")
@classmethod
def tearDownClass(cls):
pass
def test_nested_layer_with_inchannels(cls):
class MyLayer(tensorlayerx.nn.Module):
def __init__(self, name=None):
super(MyLayer, self).__init__(name=name)
self.input_layer = tlx.nn.Linear(in_features=50, out_features=20)
self.build(None)
self._built = True
def build(self, inputs_shape=None):
self.W = self._get_weights('weights', shape=(20, 10))
def forward(self, inputs):
inputs = self.input_layer(inputs)
output = tlx.ops.matmul(inputs, self.W)
return output
class model(tensorlayerx.nn.Module):
def __init__(self, name=None):
super(model, self).__init__(name=name)
self.layer = MyLayer()
def forward(self, inputs):
return self.layer(inputs)
input = tlx.nn.Input(shape=(100, 50))
model_dynamic = model()
model_dynamic.set_train()
cls.assertEqual(model_dynamic(input).shape, (100, 10))
cls.assertEqual(len(model_dynamic.all_weights), 3)
cls.assertEqual(len(model_dynamic.trainable_weights), 3)
model_dynamic.layer.input_layer.b.assign_add(tlx.ones((20, )))
cls.assertEqual(np.sum(model_dynamic.all_weights[-1].numpy() - tlx.ones(20, ).numpy()), 0)
if __name__ == '__main__':
tlx.logging.set_verbosity(tlx.logging.DEBUG)
unittest.main()
|
"""
lokalise.models.translation
~~~~~~~~~~~~~~~~~~~~~~~~~~~
Module containing translation model.
"""
from .base_model import BaseModel
class TranslationModel(BaseModel):
"""Describes translation model.
"""
DATA_KEY = 'translation'
ATTRS = [
'translation_id',
'key_id',
'language_iso',
'modified_at',
'modified_at_timestamp',
'modified_by',
'modified_by_email',
'translation',
'is_fuzzy',
'is_reviewed',
'reviewed_by',
'words',
'custom_translation_statuses',
'task_id'
]
|
from django.contrib.auth.models import User
from django.forms import model_to_dict
from django.urls import include, path, reverse
from rest_framework import status
from rest_framework.test import APITestCase, URLPatternsTestCase, APIClient
from oldp.apps.annotations.models import AnnotationLabel, CaseAnnotation
class AnnotationsAPITestCase(APITestCase, URLPatternsTestCase):
fixtures = [
'users/with_password_unittest.json', # password=unittest
'locations/countries.json',
'locations/states.json',
'locations/cities.json',
'courts/courts.json',
'cases/cases.json',
'annotations/labels.json',
]
urlpatterns = [
path('api/', include('oldp.api.urls')),
]
username = 'test'
password = 'test'
dummy_label = AnnotationLabel(
name='Some label',
slug='some-label',
trusted=False,
private=True,
)
dummy_annotation = CaseAnnotation(
belongs_to_id=1,
label_id=2,
value_str='Some annotation value'
)
def setUp(self):
self.user = User.objects.create_user(self.username, '[email protected]', self.password)
self.admin_client = APIClient()
self.admin_client.force_authenticate(user=User.objects.get(pk=1))
self.owner_client = APIClient()
self.owner_client.force_authenticate(user=User.objects.get(pk=2))
super().setUp()
def tearDown(self):
self.user.delete()
super().tearDown()
def test_create_case_annotation(self):
dummy_data = model_to_dict(self.dummy_annotation)
res = self.owner_client.post(reverse('caseannotation-list'), data=dummy_data, format='json')
self.assertEqual(res.status_code, status.HTTP_201_CREATED)
created_id = res.data['id']
# second time -> expect error: duplicated annotation
res = self.owner_client.post(reverse('caseannotation-list'), data=dummy_data, format='json')
# print(res.data['label'])
self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)
self.assertTrue('label' in res.data, 'Error should be for `label` field')
def test_create_case_annotation_as_guest(self):
dummy_data = model_to_dict(self.dummy_annotation)
res = self.client.post(reverse('caseannotation-list'), data=dummy_data, format='json')
self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)
def test_read_as_guest(self):
# GET list
res = self.client.get(reverse('annotationlabel-list'), format='json')
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(len(res.data['results']), 1)
# print(res.data['results'])
# GET public
res = self.client.get(reverse('annotationlabel-detail', args=(1, )), format='json')
self.assertEqual(res.status_code, status.HTTP_200_OK)
# GET private
res = self.client.get(reverse('annotationlabel-detail', args=(2,)), format='json')
self.assertEqual(res.status_code, status.HTTP_404_NOT_FOUND)
def test_read_as_owner(self):
res = self.owner_client.get(reverse('annotationlabel-list'), format='json')
# print(res.data)
self.assertEqual(len(res.data['results']), 3)
# GET private
res = self.owner_client.get(reverse('annotationlabel-detail', args=(2,)), format='json')
self.assertEqual(res.status_code, status.HTTP_200_OK)
def test_write_as_guest(self):
# Create
res = self.client.post(reverse('annotationlabel-list'), data=model_to_dict(self.dummy_label), format='json')
self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)
# Update
res = self.client.put(reverse('annotationlabel-detail', args=(2,)))
self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)
# Delete
res = self.client.delete(reverse('annotationlabel-detail', args=(2,)))
self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)
def test_write_as_owner(self):
dummy_data = model_to_dict(self.dummy_label)
# Create
res = self.owner_client.post(reverse('annotationlabel-list'), data=dummy_data, format='json')
self.assertEqual(res.status_code, status.HTTP_201_CREATED)
created_id = res.data['id']
# Partial update
updated_name = 'Updated name'
res = self.owner_client.patch(reverse('annotationlabel-detail', args=(created_id,)), data={'name': updated_name}, format='json')
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(res.data['name'], updated_name)
# Delete
res = self.owner_client.delete(reverse('annotationlabel-detail', args=(created_id,)))
self.assertEqual(res.status_code, status.HTTP_204_NO_CONTENT)
# Get to deleted item
res = self.owner_client.get(reverse('annotationlabel-detail', args=(created_id,)))
self.assertEqual(res.status_code, status.HTTP_404_NOT_FOUND)
|
from . import expedia
from . import scraper
import datetime
import requests
import json
def timeScorer(s):
if "pm" in s:
if len(s) == 7:
hours = int(s[0:2])
minutes = int(s[3:5])
else :
hours = int(s[0:1])
minutes = int(s[2:4])
score = hours * 60 + minutes + 720
else :
if len(s) == 7:
hours = int(s[0:2])
minutes = int(s[3:5])
else :
hours = int(s[0:1])
minutes = int(s[2:4])
score = hours * 60 + minutes
return score
def getTime(s):
return s.get('flight_hour') * 60
def getDistance(org, dest):
URL = "https://www.distance24.org/route.json?stops="
URL = URL + org + "|" + dest
r = requests.get(url = URL)
data = r.json()
return data["distance"]
def mergeSort(arr):
if len(arr) >1:
mid = len(arr)//2 #Finding the mid of the array
L = arr[:mid] # Dividing the array elements
R = arr[mid:] # into 2 halves
mergeSort(L) # Sorting the first half
mergeSort(R) # Sorting the second half
i = j = k = 0
# Copy data to temp arrays L[] and R[]
while i < len(L) and j < len(R):
if L[i]["rank"] < R[j]["rank"]:
arr[k] = L[i]
i+=1
else:
arr[k] = R[j]
j+=1
k+=1
# Checking if any element was left
while i < len(L):
arr[k] = L[i]
i+=1
k+=1
while j < len(R):
arr[k] = R[j]
j+=1
k+=1
def runner(source, destination, date, weight_time, weight_price):
formatDate = datetime.datetime.strptime(date, "%d/%m/%Y").strftime("%Y-%m-%d")
scraper.callPeoria(formatDate)
busses = []
busses = scraper.peoria(formatDate)
flights = expedia.parse(source,destination,date)
pairings = []
fuel = getDistance(source, destination)
for bus in busses:
busTime = timeScorer(bus)
for flight in flights:
flightTime = timeScorer(flight.get('departure_time'))
if (flightTime - busTime) > 420 and (flightTime - busTime) < 600:
pairingCurrent = {
'departure_airport':flight.get('departure_airport'),
'departure_time':flight.get('departure_time'),
'price': flight.get('price'),
'aircraft':flight.get('aircraft'),
'flight_number': flight.get('flight_number'),
'airline': flight.get('airline'),
'arrival_airport': flight.get('arrival_airport'),
'arrival_time':flight.get('arrival_time'),
'bus_time': bus,
'flight_hour': flight.get('flight_hour'),
'flight_minute': flight.get('flight_minute'),
'rank': 0.0,
'fuel': 0.0
}
file = open("projectFly/assets/FlightFuelData.json", "r")
flightData = json.loads(file.read())
pairingCurrent["fuel"] = float(fuel) / 100.0
for x in range(0,len(flightData)):
if flightData[x]["model"] == pairingCurrent["aircraft"]:
pairingCurrent["fuel"] = float(fuel) * float(flightData[x]["fuel"]) / 100.0
break
pairings.append(pairingCurrent)
sum_price = 0
sum_time = 0
sorted = []
for x in range(0,len(pairings)):
sum_time = sum_time + float(pairings[x]["flight_hour"])
sum_price = sum_price + float(pairings[x]["price"])
average_time = float(sum_time)/float(len(pairings))
average_price = float(sum_price)/float(len(pairings))
print(average_price)
print(average_time)
for x in range(0,len(pairings)):
pairings[x]["rank"] = ((float(pairings[x]["flight_hour"])) * weight_time / average_time) + ((float(pairings[x]["price"])) * weight_price / average_price)
mergeSort(pairings)
for x in range(0,len(pairings)):
print(pairings[x]["rank"])
return pairings |
import numpy as np
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('-n', type=int, help='number of iterations')
parser.add_argument('-x', type=float, help='X0')
parser.add_argument('-s', type=float, help='step')
parser.add_argument('-a', type=str, help='calculate automaticaly')
parser.add_argument('-m', type=float, help='maximum x value')
parser.add_argument('-e', type=float, help='epsilon')
args = parser.parse_args()
auto = False
A = np.array([[-500.005, 499.995], [499.995, -500.005]])
N = 10000
x = 0
xmax = 800
h = 0.001
eps = 0.00001
if args.n:
N = args.n
if args.x:
x = args.x
if args.s:
h = args.s
if args.a:
auto = False if 'False' == args.a else 'True'
if args.m:
xmax = args.m
if args.e:
eps = args.e
p = 2 # порядок метода
def U(x):
a1 = np.array([[1], [1]]) # вектор (1, 1)
a2 = np.array([[1], [-1]]) # вектор (1, -1)
temp1 = np.multiply(10.0, a1) # умножение 10 на вектор (1, 1)
temp1 = np.multiply(temp1, np.exp(-0.01 * x)) # умножение полученного вектора на 'e' в степени -0.01 * х
temp2 = np.multiply(3.0, a2) # умножение 3 на вектор (1, -1)
temp2 = np.multiply(temp2, np.exp(-1000 * x)) # умножение полученного вектора на 'e' в степени -1000 * х
return np.subtract(temp1, temp2) # вычитание из первого полученного вектора второго полученного вектора
def rk(v, h):
E = np.array([[1, 0], [0, 1]]) # единичная матрица 2х2
temp = np.multiply(h / 2, A) # умножение половины шага на матрицу А
temp = np.subtract(E, temp) # вычитание из единичной матрицы полученной матрицы
temp = np.linalg.inv(temp) # обратная матрица
temp = np.multiply(h, temp) # умножение шага на обратную матрицу
temp = temp.dot(A.dot(v)) # умножение матрицы А на вектор v
return np.add(v, temp) # сложение вектора v и полученного вектора temp
v = U(x)
print('i: ' + str(0) + ', ' \
'x: ' + str(x) + ', ' \
'h: ' + str(h) + ', ' \
'v:' + '[' + str(float(v[0])) + ', ' + str(float(v[1])) + ']\n')
Hprev = h
Vstep = v.copy()
Vhalf = v.copy()
Vprev = v.copy()
i = 1
c1 = 0
c2 = 0
max_dif = 0
while i <= N:
Hprev = h
Vstep = rk(Vprev, h)
Vhalf = rk(Vprev, h * 0.5)
Vhalf = rk(Vhalf, h * 0.5)
S1 = float((Vhalf[0] - Vstep[0]) / (np.power(2, p) - 1))
S2 = float((Vhalf[1] - Vstep[1]) / (np.power(2, p) - 1))
S = S1 if np.abs(S1) > np.abs(S2) else S2
e = U(x + h)
x += h
if np.abs(S) > eps:
x -= h
h *= 0.5
c1 += 1
continue
elif np.abs(S) < (eps / np.power(2, p + 1)):
c2 += 1
h *= 2.0
print('\n\ni: ' + str(i) + ', x: ' + str(x) + ', h: ' + str(Hprev) + ':\n')
print('Решение, вычисленное неявным методом 2 порядка:\n')
print('\t' + '[' + str(float(Vstep[0])) + ', ' + str(float(Vstep[1])) + ']\n')
print('Точное решение в точке х:\n')
print('\t[' + str(float(e[0])) + ', ' + str(float(e[1])) + ']\n')
print('|V(x) - Vточ(x)|:\n')
print('\t[' + str(abs(float(e[0]) - float(Vstep[0]))) + ', ' + str(abs(float(e[1]) - float(Vstep[1]))) + ']\n')
print('||V(x) - Vточ(x)||:\n')
print('\t' + str(np.power(np.linalg.norm(np.subtract(e, Vstep)), 2)), '\n')
print('S в точке x:\n')
print('\t' + str(S), '\n')
if not auto:
string = input()
if 'auto' == string:
auto = True
if x >= xmax:
break
Vprev = Vstep
i += 1
if np.power(np.linalg.norm(np.subtract(e, Vstep)), 2) > max_dif:
max_dif = np.power(np.linalg.norm(np.subtract(e, Vstep)), 2)
print('\n\nmax||V(x) - Vточ(x)||:\n')
print('\t' + str(max_dif), '\n')
print('Уменьшений шага:', c1)
print('Увеличений шага:', c2)
print()
exit(0)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import vtk
def main():
colors = vtk.vtkNamedColors()
# Set the background color.
colors.SetColor("BkgColor", [26, 51, 102, 255])
surface = vtk.vtkParametricSuperEllipsoid()
source = vtk.vtkParametricFunctionSource()
renderer = vtk.vtkRenderer()
mapper = vtk.vtkPolyDataMapper()
actor = vtk.vtkActor()
backProperty = vtk.vtkProperty()
backProperty.SetColor(colors.GetColor3d("Tomato"))
# Create a parametric function source, renderer, mapper, and actor
source.SetParametricFunction(surface)
mapper.SetInputConnection(source.GetOutputPort())
actor.SetMapper(mapper)
actor.SetBackfaceProperty(backProperty)
actor.GetProperty().SetDiffuseColor(colors.GetColor3d("Banana"))
actor.GetProperty().SetSpecular(.5)
actor.GetProperty().SetSpecularPower(20)
renderWindow = vtk.vtkRenderWindow()
renderWindow.SetWindowName("Parametric Objects Super Ellipsoid Demo")
renderWindow.AddRenderer(renderer)
renderWindow.SetSize(640, 480)
renderer.AddActor(actor)
renderer.SetBackground(colors.GetColor3d("BkgColor"))
renderer.ResetCamera()
renderer.GetActiveCamera().Azimuth(30)
renderer.GetActiveCamera().Elevation(-30)
renderer.GetActiveCamera().Zoom(0.9)
renderer.ResetCameraClippingRange()
interactor = vtk.vtkRenderWindowInteractor()
interactor.SetRenderWindow(renderWindow)
# Setup a slider widget for each varying parameter
tubeWidth = 0.008
sliderLength = 0.008
titleHeight = 0.04
labelHeight = 0.04
sliderRepN1 = vtk.vtkSliderRepresentation2D()
sliderRepN1.SetMinimumValue(0.0)
sliderRepN1.SetMaximumValue(4.0)
sliderRepN1.SetValue(1.0)
sliderRepN1.SetTitleText("Z squareness")
sliderRepN1.GetPoint1Coordinate().SetCoordinateSystemToNormalizedDisplay()
sliderRepN1.GetPoint1Coordinate().SetValue(.1, .1)
sliderRepN1.GetPoint2Coordinate().SetCoordinateSystemToNormalizedDisplay()
sliderRepN1.GetPoint2Coordinate().SetValue(.9, .1)
sliderRepN1.SetTubeWidth(tubeWidth)
sliderRepN1.SetSliderLength(sliderLength)
sliderRepN1.SetTitleHeight(titleHeight)
sliderRepN1.SetLabelHeight(labelHeight)
sliderWidgetN1 = vtk.vtkSliderWidget()
sliderWidgetN1.SetInteractor(interactor)
sliderWidgetN1.SetRepresentation(sliderRepN1)
sliderWidgetN1.SetAnimationModeToAnimate()
sliderWidgetN1.EnabledOn()
sliderWidgetN1.AddObserver(vtk.vtkCommand.InteractionEvent, SliderCallbackN1(surface))
sliderRepN2 = vtk.vtkSliderRepresentation2D()
sliderRepN2.SetMinimumValue(0.0001)
sliderRepN2.SetMaximumValue(4.0)
sliderRepN2.SetValue(1.0)
sliderRepN2.SetTitleText("XY squareness")
sliderRepN2.GetPoint1Coordinate().SetCoordinateSystemToNormalizedDisplay()
sliderRepN2.GetPoint1Coordinate().SetValue(.1, .9)
sliderRepN2.GetPoint2Coordinate().SetCoordinateSystemToNormalizedDisplay()
sliderRepN2.GetPoint2Coordinate().SetValue(.9, .9)
sliderRepN2.SetTubeWidth(tubeWidth)
sliderRepN2.SetSliderLength(sliderLength)
sliderRepN2.SetTitleHeight(titleHeight)
sliderRepN2.SetLabelHeight(labelHeight)
sliderWidgetN2 = vtk.vtkSliderWidget()
sliderWidgetN2.SetInteractor(interactor)
sliderWidgetN2.SetRepresentation(sliderRepN2)
sliderWidgetN2.SetAnimationModeToAnimate()
sliderWidgetN2.EnabledOn()
sliderWidgetN2.AddObserver(vtk.vtkCommand.InteractionEvent, SliderCallbackN2(surface))
sliderRepMinimumV = vtk.vtkSliderRepresentation2D()
sliderRepN1.SetMinimumValue(.0001)
sliderRepMinimumV.SetMaximumValue(.9999 * vtk.vtkMath.Pi())
sliderRepMinimumV.SetValue(.0001)
sliderRepMinimumV.SetTitleText("V min")
sliderRepMinimumV.GetPoint1Coordinate().SetCoordinateSystemToNormalizedDisplay()
sliderRepMinimumV.GetPoint1Coordinate().SetValue(.1, .1)
sliderRepMinimumV.GetPoint2Coordinate().SetCoordinateSystemToNormalizedDisplay()
sliderRepMinimumV.GetPoint2Coordinate().SetValue(.1, .9)
sliderRepMinimumV.SetTubeWidth(tubeWidth)
sliderRepMinimumV.SetSliderLength(sliderLength)
sliderRepMinimumV.SetTitleHeight(titleHeight)
sliderRepMinimumV.SetLabelHeight(labelHeight)
surface.SetN1(1.0)
surface.SetN2(1.0)
renderer.ResetCamera()
renderer.GetActiveCamera().Azimuth(30)
renderer.GetActiveCamera().Elevation(-30)
renderer.GetActiveCamera().Zoom(0.9)
renderer.ResetCameraClippingRange()
renderWindow.Render()
interactor.Initialize()
interactor.Start()
# These callbacks do the actual work.
# Callbacks for the interactions
class SliderCallbackN1():
def __init__(self, superEllipsoid):
self.superEllipsoid = superEllipsoid
def __call__(self, caller, ev):
sliderWidget = caller
value = sliderWidget.GetRepresentation().GetValue()
self.superEllipsoid.SetN1(value)
class SliderCallbackN2():
def __init__(self, superEllipsoid):
self.superEllipsoid = superEllipsoid
def __call__(self, caller, ev):
sliderWidget = caller
value = sliderWidget.GetRepresentation().GetValue()
self.superEllipsoid.SetN2(value)
if __name__ == '__main__':
main()
|
from flask import Flask
from flask_limiter import Limiter
from flask_limiter.util import get_ipaddr as get_remote_address
from core import config
limiter = Limiter(key_func=get_remote_address)
def init_limiter(app: Flask):
app.config["RATELIMIT_DEFAULT"] = config.RATELIMIT_DEFAULT
app.config["RATELIMIT_STORAGE_URL"] = config.RATELIMIT_STORAGE_URL
app.config["RATELIMIT_HEADERS_ENABLED"] = config.RATELIMIT_HEADERS_ENABLED
app.config["RATELIMIT_IN_MEMORY_FALLBACK"] = config.RATELIMIT_IN_MEMORY_FALLBACK
app.config["RATELIMIT_KEY_PREFIX"] = config.RATELIMIT_KEY_PREFIX
app.config["RATELIMIT_SWALLOW_ERRORS"] = config.RATELIMIT_SWALLOW_ERRORS
limiter.init_app(app)
|
class Silly:
def __setattr__(self, attr, value):
if attr == "silly" and value == 7:
raise AttributeError("you shall not set 7 for silly")
super().__setattr__(attr, value)
def __getattribute__(self, attr):
if attr == "silly":
return "Just Try and Change Me!"
return super().__getattribute__(attr)
@property
def silly(self):
"This is a silly property"
print("You are getting silly")
return self._silly
@silly.setter
def silly(self, value):
print("You are making silly {}".format(value))
self._silly = value
@silly.deleter
def silly(self):
print("Whoa, you killed silly!")
del self._silly
s = Silly()
s.silly = 4 # setter
print(s.silly) # getter
|
#!/usr/bin/python
# ex:ts=4:sw=4:sts=4:et
# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
# The unittest framwork doesn't play nice with pylint:
# pylint: disable-msg=C0103
from __future__ import absolute_import
import unittest
from svtplay_dl.service.tests import HandlesURLsTestMixin
from svtplay_dl.service.oppetarkiv import OppetArkiv
class handlesTest(unittest.TestCase, HandlesURLsTestMixin):
service = OppetArkiv
urls = {
'ok': [
"http://www.oppetarkiv.se/video/1129844/jacobs-stege-avsnitt-1-av-1"
],
'bad': [
"http://www.svtplay.se/video/1090393/del-9"
]
}
|
# kdtree的具体实现,包括构建和查找
import random
import math
import numpy as np
from result_set import KNNResultSet, RadiusNNResultSet
# Node类,Node是tree的基本组成元素
class Node:
def __init__(self, axis, value, left, right, point_indices):
self.axis = axis
self.value = value
self.left = left
self.right = right
self.point_indices = point_indices
def is_leaf(self):
if self.value is None:
return True
else:
return False
def __str__(self):
output = ''
output += 'axis %d, ' % self.axis
if self.value is None:
output += 'split value: leaf, '
else:
output += 'split value: %.2f, ' % self.value
output += 'point_indices: '
output += str(self.point_indices.tolist())
return output
# 功能:构建树之前需要对value进行排序,同时对一个的key的顺序也要跟着改变
# 输入:
# key:键
# value:值
# 输出:
# key_sorted:排序后的键
# value_sorted:排序后的值
def sort_key_by_value(key, value):
assert key.shape == value.shape
assert len(key.shape) == 1
sorted_idx = np.argsort(value)
key_sorted = key[sorted_idx]
value_sorted = value[sorted_idx]
return key_sorted, value_sorted
def axis_round_robin(axis, dim):
if axis == dim-1:
return 0
else:
return axis + 1
def axis_by_variance(data):
vars = np.var(data, axis=0)
return np.argmax(vars)
# 功能:通过递归的方式构建树
# 输入:
# root: 树的根节点
# db: 点云数据
# point_indices:排序后的键
# axis: scalar
# leaf_size: scalar
# 输出:
# root: 即构建完成的树
def kdtree_recursive_build(root, db, point_indices, axis, leaf_size, axis_method):
if root is None:
root = Node(axis, None, None, None, point_indices)
# determine whether to split into left and right
if len(point_indices) > leaf_size:
# --- get the split position ---
point_indices_sorted, _ = sort_key_by_value(point_indices, db[point_indices, axis]) # M
# 作业1
# 屏蔽开始
middle_left_idx = math.ceil(point_indices_sorted.shape[0] / 2) - 1 # ceil ensure left idx wil >= 0, minus 1 ensure right idx will < n
middle_left_point_idx = point_indices_sorted[middle_left_idx]
middle_left_point_value = db[middle_left_point_idx, axis] # get middle value in target axis
middle_right_idx = middle_left_idx + 1
middle_right_point_idx = point_indices_sorted[middle_right_idx]
middle_right_point_value = db[middle_right_point_idx, axis]
root.value = (middle_left_point_value + middle_right_point_value) * 0.5
next_axis_l = axis_round_robin(axis, db.shape[1])
next_axis_r = next_axis_l
if axis_method == 'by_variance':
if middle_right_point_idx > 0:
next_axis_l = axis_by_variance(db[point_indices_sorted[0:middle_right_point_idx]])
if middle_right_point_idx < len(point_indices_sorted):
next_axis_r = axis_by_variance(db[point_indices_sorted[middle_right_point_idx:]])
# --- get the split position --- #
root.left = kdtree_recursive_build(root.left,
db,
point_indices_sorted[0:middle_right_idx], # [0: middle_right_idx) equal to [0:middle_left_idx]
next_axis_l,
leaf_size, axis_method=axis_method)
root.right = kdtree_recursive_build(root.right,
db,
point_indices_sorted[middle_right_idx:],
next_axis_r,
leaf_size, axis_method=axis_method)
# 屏蔽结束
return root
# 功能:翻转一个kd树
# 输入:
# root:kd树
# depth: 当前深度
# max_depth:最大深度
def traverse_kdtree(root: Node, depth, max_depth):
depth[0] += 1
if max_depth[0] < depth[0]:
max_depth[0] = depth[0]
if root.is_leaf():
print(root)
else:
traverse_kdtree(root.left, depth, max_depth)
traverse_kdtree(root.right, depth, max_depth)
depth[0] -= 1
# 功能:构建kd树(利用kdtree_recursive_build功能函数实现的对外接口)
# 输入:
# db_np:原始数据
# leaf_size:scale
# 输出:
# root:构建完成的kd树
def kdtree_construction(db_np, leaf_size, axis_method='round_robin'):
N, dim = db_np.shape[0], db_np.shape[1]
if axis_method == 'round_robin':
axis = 0
elif axis_method == 'by_variance':
axis = axis_by_variance(db_np)
else:
raise NotImplementedError
# build kd_tree recursively
root = None
root = kdtree_recursive_build(root,
db_np,
np.arange(N),
axis,
leaf_size=leaf_size, axis_method=axis_method)
return root
# 功能:通过kd树实现knn搜索,即找出最近的k个近邻
# 输入:
# root: kd树
# db: 原始数据
# result_set:搜索结果
# query:索引信息
# 输出:
# 搜索失败则返回False
def kdtree_knn_search(root: Node, db: np.ndarray, result_set: KNNResultSet, query: np.ndarray):
if root is None:
return False
if root.is_leaf():
# compare the contents of a leaf
leaf_points = db[root.point_indices, :]
diff = np.linalg.norm(np.expand_dims(query, 0) - leaf_points, axis=1)
for i in range(diff.shape[0]):
result_set.add_point(diff[i], root.point_indices[i])
return False
# 作业2
# 提示:仍通过递归的方式实现搜索
# 屏蔽开始
if query[root.axis] <= root.value:
# search space where query inside
kdtree_knn_search(root.left, db, result_set, query)
if math.fabs(query[root.axis] - root.value) < result_set.worstDist():
# search other space if worstDist is still large
kdtree_knn_search(root.right, db, result_set, query)
else:
kdtree_knn_search(root.right, db, result_set, query)
if math.fabs(query[root.axis] - root.value) < result_set.worstDist():
kdtree_knn_search(root.left, db, result_set, query)
# 屏蔽结束
return False
# 功能:通过kd树实现radius搜索,即找出距离radius以内的近邻
# 输入:
# root: kd树
# db: 原始数据
# result_set:搜索结果
# query:索引信息
# 输出:
# 搜索失败则返回False
def kdtree_radius_search(root: Node, db: np.ndarray, result_set: RadiusNNResultSet, query: np.ndarray):
if root is None:
return False
if root.is_leaf():
# compare the contents of a leaf
leaf_points = db[root.point_indices, :]
diff = np.linalg.norm(np.expand_dims(query, 0) - leaf_points, axis=1)
for i in range(diff.shape[0]):
result_set.add_point(diff[i], root.point_indices[i])
return False
# 作业3
# 提示:通过递归的方式实现搜索
# 屏蔽开始
# same to knn search
if query[root.axis] <= root.value:
kdtree_radius_search(root.left, db, result_set, query)
if math.fabs(query[root.axis] - root.value) < result_set.worstDist():
kdtree_radius_search(root.right, db, result_set, query)
else:
kdtree_radius_search(root.right, db, result_set, query)
if math.fabs(query[root.axis] - root.value) < result_set.worstDist():
kdtree_radius_search(root.left, db, result_set, query)
# 屏蔽结束
return False
def main():
# configuration
db_size = 64
dim = 3
leaf_size = 4
k = 1
db_np = np.random.rand(db_size, dim)
root = kdtree_construction(db_np, leaf_size=leaf_size)
depth = [0]
max_depth = [0]
traverse_kdtree(root, depth, max_depth)
print("tree max depth: %d" % max_depth[0])
query = np.asarray([0, 0, 0])
result_set = KNNResultSet(capacity=k)
kdtree_knn_search(root, db_np, result_set, query)
print(result_set)
diff = np.linalg.norm(np.expand_dims(query, 0) - db_np, axis=1)
nn_idx = np.argsort(diff)
nn_dist = diff[nn_idx]
print(nn_idx[0:k])
print(nn_dist[0:k])
print("Radius search:")
query = np.asarray([0, 0, 0])
result_set = RadiusNNResultSet(radius = 0.5)
kdtree_radius_search(root, db_np, result_set, query)
print(result_set)
if __name__ == '__main__':
main()
|
students = []
references = []
benefits = []
MODE_SPECIFIC = '1. SPECIFIC'
MODE_HOSTEL = '2. HOSTEL'
MODE_MCDM = '3. MCDM'
MODE_REMAIN = '4. REMAIN'
|
from discord.ext import commands
import cogs.inactive.lists.listconf as lc
class Lists(commands.Cog):
def __init__(self, bot):
self.bot = bot
self.lists = {}
@commands.Cog.listener("on_ready")
async def on_ready(self):
"""Load JSON lists when ready"""
lc.backup_data()
self.lists = lc.get_data()
@commands.command(name="listnew", aliases=["newlist"])
@commands.cooldown(1, 5)
async def list_new(self, ctx, *, list_name: str):
"""
Create a new list.
"""
if not str(ctx.author.id) in self.lists:
self.lists.update({str(ctx.author.id): {}})
if list_name in self.lists[str(ctx.author.id)]:
await ctx.send("That list already exists!")
return
list_name = list_name.lower()
self.lists[str(ctx.author.id)].update({list_name: []})
lc.update_users(self.lists)
lc.save_data()
await ctx.send(f"Created new list `{list_name}`.")
@commands.command(name="deletelist", aliases=["dellist", "listdelete"])
@commands.cooldown(1, 5)
async def list_delete(self, ctx, *, list_name: str):
"""
Delete a list.
"""
try:
list_name = list_name.lower()
self.lists[str(ctx.author.id)].pop(list_name)
lc.update_users(self.lists)
lc.save_data()
await ctx.send(f"Deleted list `{list_name}`.")
except KeyError:
await ctx.send("Invalid list!")
@commands.command(name="listslist", aliases=["listlists", "lists"])
@commands.cooldown(1, 3)
async def lists_list(self, ctx):
"""
List current user lists.
"""
try:
user_lists = self.lists[str(ctx.author.id)]
lists = f"{ctx.author.name}'s Lists\n\n"
for t in sorted(user_lists):
lists += f"- {t}\n"
parts = [(lists[i:i + 750]) for i in range(0, len(lists), 750)]
for part in parts:
await ctx.send(f"```{part}```")
except KeyError:
await ctx.send("No lists available!")
@commands.command(name="listview", aliases=["viewlist", "list"])
@commands.cooldown(1, 3)
async def list_view(self, ctx, *, list_name: str):
"""
View a list.
"""
try:
list_name = list_name.lower()
list_content = self.lists[str(ctx.author.id)][list_name]
content = f"{ctx.author.name}'s List - {list_name.capitalize()}\n\n"
i = 0
for item in list_content:
i += 1
content += f"{i} - {item}\n"
parts = [(content[i:i + 1500]) for i in range(0, len(content), 1500)]
for part in parts:
await ctx.send(f"```{part}```")
except KeyError:
await ctx.send("No lists available!")
@commands.command(name="listitemadd", aliases=["additem"])
@commands.cooldown(1, 3)
async def list_item_add(self, ctx, list_name: str, *, item: str):
"""
Add an item to a list.
"""
list_name = list_name.lower()
if not str(ctx.author.id) in self.lists:
await ctx.send("No lists available!")
return
if list_name not in self.lists[str(ctx.author.id)]:
await ctx.send("That list does not exist!")
return
current_content = self.lists[str(ctx.author.id)][list_name]
current_content.append(item)
self.lists[str(ctx.author.id)].update({list_name: current_content})
lc.update_users(self.lists)
lc.save_data()
await ctx.send(f"Added `{item}` to list `{list_name}`.")
@commands.command(name="listitemdelete", aliases=["delitem"])
@commands.cooldown(1, 3)
async def list_item_delete(self, ctx, list_name: str, item_num: int):
"""
Remove an item from a list.
"""
list_name = list_name.lower()
if not str(ctx.author.id) in self.lists:
await ctx.send("No lists available!")
return
if list_name not in self.lists[str(ctx.author.id)]:
await ctx.send("That list does not exist!")
return
current_content = self.lists[str(ctx.author.id)][list_name]
del current_content[item_num-1]
self.lists[str(ctx.author.id)].update({list_name: current_content})
lc.update_users(self.lists)
lc.save_data()
await ctx.send(f"Removed item from list `{list_name}`.")
def setup(bot):
bot.add_cog(Lists(bot))
|
import uuid
from src.model.SqliteObject import SqliteObject
from src.server import databaseHandler
class Session(SqliteObject):
properties = [
"id",
"name",
"description"
]
table = "session"
def __init__(self,
name=None,
description=None,
id=None):
super(Session, self).__init__(id=id)
self.name = name or "A Whiteboard Story"
self.description = description or "Snowboard and the 7 stickyNotes"
def get_latest_canvas(self):
from src.model.Canvas import Canvas
if self.database:
c = self.database.cursor()
else:
c = databaseHandler().get_database().cursor()
query = 'SELECT (id) FROM canvases WHERE session="{}" ORDER BY datetime(derivedAt) DESC LIMIT 1;'.format(self.id)
c.execute(query)
data = c.fetchone()
if data is None:
return None
return Canvas.get(data[0])
def create_new_canvas(self):
from src.model.Canvas import Canvas
canvas = self.get_latest_canvas()
if canvas is None:
return Canvas(session=self.id)
return canvas.clone()
|
"""Created by sgoswami on 3/23/17 as part of leetcode"""
"""The string "PAYPALISHIRING" is written in a zigzag pattern on a given number of rows like this: (you may want to
display this pattern in a fixed font for better legibility).
P A H N
A P L S I I G
Y I R
And then read line by line: 'PAHNAPLSIIGYIR'
Write the code that will take a string and make this conversion given a number of rows:
string convert(string text, int nRows)"""
class Solution(object):
def convert(self, s, numRows):
"""
:type s: str
:type numRows: int
:rtype: str
""" |
from __future__ import print_function, absolute_import, unicode_literals, division
import six
from six.moves import (zip, filter, map, reduce, input, range)
from IPython.core.display import Image as display_image
from .dot import (
render_nx_as_dot, clear_formatting,
format_graph_for_lifespan,
format_graph_for_worm_counts,
format_graph_for_true_counts,
format_graph_for_moved
)
from ..subgraph import nearby, neartime
def look(graph, target, jumps, ref=False, ctype='lifespan'):
"""
In *graph*, a waldo.network.Graph, around *target*, show the network out to
an (undirected) distance of *jumps*. Optionally show a colored reference.
"""
subgraph = nearby(graph, target, jumps)
if ctype == 'lifespan':
format_graph_for_lifespan(subgraph, ref=ref, focus=graph.where_is(target))
elif ctype == 'worm_count':
format_graph_for_worm_counts(subgraph, ref=ref)
elif ctype == 'true_count':
format_graph_for_true_counts(subgraph, ref=ref)
elif ctype == 'moved_bool':
format_graph_for_moved(subgraph, ref=ref)
temp_file = render_nx_as_dot(subgraph)
return display_image(temp_file)
def save_graphs(ex_id, graph, target, jumps, ref=False):
"""
In *graph*, a waldo.network.Graph, around *target*, show the network out to
an (undirected) distance of *jumps*. Optionally show a colored reference.
"""
subgraph = nearby(graph, target, jumps)
format_graph_for_lifespan(subgraph, focus=graph.where_is(target))
of = '{eid}_lifespan.gv'.format(eid=ex_id)
print(of)
temp_file = render_nx_as_dot(subgraph, output_file=of)
clear_formatting(subgraph)
format_graph_for_worm_counts(subgraph)
of = '{eid}_worm_counts.gv'.format(eid=ex_id)
temp_file = render_nx_as_dot(subgraph, output_file=of)
clear_formatting(subgraph)
format_graph_for_true_counts(subgraph)
of = '{eid}_true_counts.gv'.format(eid=ex_id)
temp_file = render_nx_as_dot(subgraph, output_file=of)
clear_formatting(subgraph)
format_graph_for_moved(subgraph)
of = '{eid}_seed_counts.gv'.format(eid=ex_id)
temp_file = render_nx_as_dot(subgraph, output_file=of)
clear_formatting(subgraph)
return display_image(temp_file)
def look2(graph, target, jumps, ref=False):
"""
In *graph*, a waldo.network.Graph, around *target*, show the network out to
an (undirected) distance of *jumps*. Optionally show a colored reference.
"""
subgraph = nearby(graph, target, jumps)
format_graph_for_worm_counts(subgraph, ref=ref)
temp_file = render_nx_as_dot(subgraph)
return display_image(temp_file)
def look_time(graph, fstart, fend, ref=False):
subgraph = neartime(graph, fstart, fend)
temp_file = render_nx_as_dot(subgraph, ref=ref)
return display_image(temp_file)
|
import sys
import json
import argparse
from clickandcollectnz.countdown import Countdown
from clickandcollectnz.foodstuffs import NewWorld, PakNSave
classes = ['Countdown', 'NewWorld', 'PakNSave']
def print_usage():
print("Usage: python -m clickandcollectnz [chain] [store_id]")
print()
print(" chain: Countdown | PakNSave | NewWorld")
if __name__ == "__main__":
parser = argparse.ArgumentParser(prog='python -m clickandcollectnz', description='NZ Click and Collect time slots.')
parser.add_argument('chain', nargs="?", choices=classes)
parser.add_argument('store_id', nargs="?")
parser.add_argument('--json', dest='json', action='store_const',
const=True, default=False,
help='output in JSON format')
args = parser.parse_args()
if not args.chain:
parser.print_help()
sys.exit(0)
if args.chain and not args.store_id:
cls = eval(args.chain)
stores = cls.get_store_list()
if args.json:
print(json.dumps(stores, default=lambda o: o.to_json()))
else:
print("ID - Store Name")
for store in stores:
print(store)
sys.exit(0)
if args.chain and args.store_id:
cls = eval(args.chain)
stores = cls.get_store_list()
store = next((x for x in stores if x.id == args.store_id), None)
store.get_slots()
if args.json:
print(json.dumps(store, default=lambda o: o.to_json()))
else:
print("Not Implemented")
|
from django.contrib import admin
from django.contrib.auth.admin import UserAdmin
from .models import usuario_acesso, recursos
#admin.site.register(Usuario, UserAdmin)
class usuario_acesso_adm (admin.ModelAdmin):
list_display = ['user','escrita']#lista de campos mostrados na tela do admin
admin.site.register(usuario_acesso,usuario_acesso_adm) #registra model na tela do admin
class recursos_adm (admin.ModelAdmin):
list_display = ['nome','nome_google','tipo','email_responsavel']#lista de campos mostrados na tela do admin
admin.site.register(recursos,recursos_adm) #registra model na tela do admin |
import re
import torch
from torch import nn
import logging
from typing import Dict
logger = logging.getLogger(__name__)
class DefaultModelVocabResizer:
@classmethod
def set_embeddings(cls, model, token_ids):
# self.model.get_input_embeddings()
old_word_embeddings = model.embeddings.word_embeddings
old_word_embeddings_weight = old_word_embeddings.weight
pruned_word_embeddings_weight = torch.index_select(
old_word_embeddings_weight, 0, index=torch.LongTensor(token_ids).to(old_word_embeddings_weight.device))
pruned_num_tokens, embedding_dim = pruned_word_embeddings_weight.shape
pruned_word_embeddings = nn.Embedding(
pruned_num_tokens, embedding_dim).to(old_word_embeddings_weight.device)
pruned_word_embeddings.weight.data[:] = pruned_word_embeddings_weight[:]
model.embeddings.word_embeddings = pruned_word_embeddings
@classmethod
def set_lm_head(cls, model, token_ids) -> bool:
try:
output_embedding_layer = model.get_output_embeddings()
except AttributeError:
return False
if output_embedding_layer is None:
return False
output_embedding_layer.weight = model.get_input_embeddings().weight
output_embedding_layer.bias.data = torch.index_select(
output_embedding_layer.bias.data, 0, index=torch.LongTensor(token_ids).to(output_embedding_layer.weight.device))
return True
#bert, roberta, xlmr, ...
def get_word_embeddings(model):
state_dict = model.state_dict()
layer_template = "embeddings.word_embeddings"
layer_names = []
for key in state_dict:
if layer_template in key:
layer_names.append(key)
assert len(
layer_names) == 1, f"Invalid model structure with ambiguous word embeddings: {layer_names}"
word_embedding_weight = state_dict[layer_names[0]]
return word_embedding_weight
#bert, roberta, xlmr, ...
def get_num_of_trms(model):
layer_template_regex = "encoder.layer\.(\d+)\."
layer_template = "encoder.layer.LAYER_INDEX."
layer_indices = set()
layer_names = set()
state_dict = model.state_dict()
for key in state_dict:
matched = re.findall(layer_template_regex, key)
if len(matched) > 0:
assert len(
matched) == 1, f"Invalid model structure. Cannot parse {key}"
layer_index = int(matched[0])
layer_indices.add(layer_index)
layer_name = layer_template.replace("LAYER_INDEX", matched[0])
layer_name = key[:key.find(layer_name)]+layer_name
layer_names.add(layer_name)
print("Found transfomr layers:", layer_indices)
print("Layer name prefixes:", layer_names)
return len(layer_indices), layer_names
|
import sys
import os
import logging
import time
import json
# os.environ['PYTHON_EGG_CACHE'] = '/tmp'
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), 'vendored/'))
logger = logging.getLogger()
logger.setLevel(logging.INFO)
import Adafruit_DHT
import RPi.GPIO as GPIO
import greengrasssdk
logger.info('Initializing moistureHandler')
#setup moisture
channel_moisture = 17
GPIO.setmode(GPIO.BCM)
GPIO.setup(channel_moisture, GPIO.IN)
#setup temperature
channel_temperature = 4
sensor_temperature = Adafruit_DHT.DHT11
#setup greengrasssdk
iotData = greengrasssdk.client('iot-data')
payload = {"water_level": 0, "temperature": 0, "humidity":0, "deviceId": "" }
serial = ""
def __init__(self):
global serial
serial = getserial()
def getserial():
logger.info('getserial called')
# Extract serial from cpuinfo file
cpuserial = "0000000000000000"
try:
f = open('/proc/cpuinfo','r')
for line in f:
if line[0:6]=='Serial':
cpuserial = line[10:26]
f.close()
except:
cpuserial = "ERROR000000000"
logger.info('serial {}'.format(cpuserial))
return cpuserial
def publish_metrics():
shallow = payload.copy()
shallow['water_level'] = collect_moisture()
shallow['deviceId'] = getserial()
shallow['humidity'], shallow['temperature'] = collect_temperature()
iotData.publish(topic='SmartGarden/MoistureLevel', payload=json.dumps(shallow))
def collect_moisture():
if GPIO.input(channel_moisture):
logger.info('no water detected on channel {}'.format(channel_moisture))
return 0
else:
logger.info('water detected on channel {}'.format(channel_moisture))
return 1
def collect_temperature():
humidity, temperature = Adafruit_DHT.read_retry(sensor_temperature, channel_temperature)
logger.info("humidity: {} temperature: {}".format(humidity, temperature))
if humidity is not None and temperature is not None:
return humidity, temperature
logger.error("Falha ao ler dados do DHT11")
return 0, 0
#GPIO.add_event_detect(channel, GPIO.BOTH)
#GPIO.add_event_callback(channel, collect_moisture)
def pinned_handler(event, context):
"""
Mock function for pinned/long-lived Lambda
"""
pass
while True:
publish_metrics()
time.sleep(20)
|
from flask import Flask, url_for, redirect, Blueprint,render_template,session
from werkzeug.security import generate_password_hash
from prol import db
from prol.user.forms import RegisterForm, LoginForm
from prol.user.models import User
user_app = Blueprint('User',__name__)
@user_app.route('/register', methods=('GET','POST'))
def register():
form = RegisterForm()
if form.validate_on_submit():
hashed_password = generate_password_hash(form.password.data)
user = User(
form.first_name.data,
form.last_name.data,
form.email.data,
hashed_password
)
db.session.add(user)
db.session.commit()
user.query.filter_by(email=form.email.data).first()
session['id'] = user.id
session['first_name'] = user.first_name
return redirect(url_for('User.home'))
return render_template("register.html",form=form)
@user_app.route('/',methods=('GET','POST'))
def login():
form = LoginForm()
if form.validate_on_submit():
user = User.query.filter_by(email=form.email.data).first()
session['id'] = user.id
session['first_name'] = user.first_name
return redirect(url_for('User.home'))
return render_template('login.html',form=form)
@user_app.route('/home')
def home():
#first_name = session['first_name']
return render_template("index.html",first_name=session['first_name'])
@user_app.route('/logout')
def logout():
session.pop('id')
session.pop('first_name')
return redirect(url_for('User.login'))
|
import json
import requests
from webContent.constants import URL_AUTH_V2, TIMEOUT
from exception import Unauthorized
def keystone_auth(username, password, tenant='admin'):
# Authentication json
tenant = username
datajs = {"auth": {"tenantName": tenant, "passwordCredentials": {"username": username, "password": password}}}
headers = {'Accept': 'application/json', 'Content-Type': 'application/json'}
# Authentication request
resp = requests.post(URL_AUTH_V2, data=json.dumps(datajs), headers=headers, timeout=TIMEOUT)
if resp.status_code == 401:
raise Unauthorized('Keystone returns 401 Unauthorized')
resp.raise_for_status()
js_response = json.loads(resp.text)
return js_response["access"]["token"]["id"], js_response["access"]["user"]["id"]
def user_logout(request):
request.session.flush()
|
import numpy as np
import cv2
def calibrate_images(images, nx, ny):
objpoints = [] # 3d points from real world
imgpoints = [] # 2d points on image
#prepare object points - points of cheessboard in undistoretd space
objp = np.zeros((6 * 9, 3), np.float32)
objp[:, :2] = np.mgrid[0:9, 0:6].T.reshape(-1, 2)
size = (0, 0)
for imageFile in images:
image = cv2.imread(imageFile)
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
size = gray.shape[::-1]
ret, corners = cv2.findChessboardCorners(gray, (nx, ny), None)
if ret:
objpoints.append(objp)
imgpoints.append(corners)
ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(objpoints, imgpoints, size, None, None)
return ret, mtx, dist, rvecs, tvecs
def undistort(image, mtx, dist):
dst = cv2.undistort(image, mtx, dist, None, mtx)
return dst
def warp(image, src, dst):
size = image.shape
w = size[1]
h = size[0]
M = cv2.getPerspectiveTransform(src, dst)
warped = cv2.warpPerspective(image, M, (w, h))
return warped
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.4 on 2017-06-25 16:47
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Blacklist',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=256, verbose_name='Title')),
('blacklist_type', models.IntegerField(choices=[(0, 'Haritrex blacklist'), (1, 'Wayback blacklist')])),
('url_list', models.TextField(verbose_name='List of urls to block')),
],
options={
'verbose_name_plural': 'Blacklists',
'ordering': ('id',),
'verbose_name': 'Blacklist',
},
),
]
|
import pytest
from l5kit.configs import load_config_data
from l5kit.data import ChunkedDataset, LocalDataManager
@pytest.fixture(scope="session")
def dmg() -> LocalDataManager:
"""
Get a data manager for the artefacts folder.
Note: the scope of this fixture is "session"-> only one is created regardless the number of the tests
Returns:
LocalDataManager: the data manager object
"""
return LocalDataManager("./l5kit/tests/artefacts/")
@pytest.fixture(scope="function")
def cfg() -> dict:
"""
Get a config file from artefacts
Note: the scope of this fixture is "function"-> one per test function
Returns:
dict: the config python dict
"""
return load_config_data("./l5kit/tests/artefacts/config.yaml")
@pytest.fixture(scope="session")
def zarr_dataset() -> ChunkedDataset:
zarr_dataset = ChunkedDataset(path="./l5kit/tests/artefacts/single_scene.zarr")
zarr_dataset.open()
return zarr_dataset
|
def romberg(n0,steps,order,quadrature,f):
res = np.zeros((steps,steps))
for i in range(0,steps):
n = n0 * 1<<i
res[i,0] = quadrature(n,f)
for j in range(1,i+1):
res[i,j] = res[i,j-1] + \
(res[i,j-1]-res[i-1,j-1])/(2**(order*j)-1.)
return res
|
import time
from machine import Pin
pin0 = Pin(0, Pin.IN, Pin.PULL_UP)
def callback(pin):
# disable callback
pin.irq(trigger=False)
print("PUSH")
time.sleep_ms(200)
#re-enable callback
irq(pin)
def irq(pin):
pin.irq(trigger=Pin.IRQ_FALLING, handler=callback)
# Initial irq setup
irq(pin0)
print("Push the button!")
|
# AUTOGENERATED! DO NOT EDIT! File to edit: 10_matrix_multiply.ipynb (unless otherwise specified).
__all__ = ['dotp', 'mmult']
# Cell
def dotp(v1, v2):
"Get dot product of 2 vectors"
sum = 0
for i in range(0, len(v1)):
sum += v1[i] * v2[i]
return sum
# Cell
def mmult(m1, m2):
"Get product of 2 matrices using [dotp](/mmult#dotp) "
import numpy as np
assert m1.shape[1] == m2.shape[0]
vsize = m1.shape[1]
pmatrix = np.zeros((m1.shape[0],m2.shape[1]))
for i in range(0,m1.shape[0]):
for j in range(0,m2.shape[1]):
nv = dotp(m1[i,:], m2[:,j])
pmatrix[i,j] = nv
return pmatrix |
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for V2 Collective Operations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.eager import context
from tensorflow.python.eager import def_function
from tensorflow.python.framework import config
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import gen_collective_ops
from tensorflow.python.platform import test
class CollectiveOpsTest(test.TestCase):
def _setup_context(self, num_cpus=2):
context._reset_context()
cpus = config.list_physical_devices('CPU')
self.assertEqual(len(cpus), 1)
config.set_logical_device_configuration(cpus[0], [
context.LogicalDeviceConfiguration(),
context.LogicalDeviceConfiguration()
])
context.ensure_initialized()
@test_util.run_v2_only
def testReduceV2(self):
self._setup_context()
@def_function.function
def single_all_reduce(in_value, group_size, group_key, instance_key):
return gen_collective_ops.collective_reduce_v2(
in_value, group_size, group_key, instance_key, merge_op='Add',
final_op='Id', communication_hint='auto')
@def_function.function
def run_all_reduce_1cpu():
with ops.device('/device:CPU:0'):
in_value = constant_op.constant([1.])
group_size = constant_op.constant(1)
group_key = constant_op.constant(1)
instance_key = constant_op.constant(1)
return single_all_reduce(in_value, group_size, group_key, instance_key)
@def_function.function
def run_all_reduce_2cpus():
in_value = constant_op.constant([1.])
group_size = constant_op.constant(2)
group_key = constant_op.constant(2)
instance_key = constant_op.constant(2)
collectives = []
with ops.device('/device:CPU:0'):
collectives.append(single_all_reduce(in_value, group_size, group_key,
instance_key))
with ops.device('/device:CPU:1'):
collectives.append(single_all_reduce(in_value, group_size, group_key,
instance_key))
return collectives
self.assertAllClose(run_all_reduce_1cpu(), [1.], rtol=1e-5, atol=1e-5)
for result in run_all_reduce_2cpus():
self.assertAllClose(result, [2.], rtol=1e-5, atol=1e-5)
if __name__ == '__main__':
test.main()
|
import math
import os
import random
import re
import sys
def factorial(x):
if x == 1 or x == 0:
return 1
else:
return x * factorial(x-1)
def bino(n, x, p):
comb = factorial(n) / (factorial(n-x) * factorial(x))
return comb * pow(p, x) * pow((1-p), n-x)
def bd(prob, n):
ans = 0
for i in range(3):
ans += bino(n, i, prob/100)
print(round(ans, 3))
ans = 0
for i in range(2):
ans += bino(n, i, prob/100)
print(round(1-ans, 3))
if __name__ == '__main__':
a = list(map(str, input().rstrip().split()))
prob, n = float(a[0]), float(a[1])
bd(prob, n)
|
#!/usr/bin/env python3
"""
Created on Sat May 8 19:17:36 2021
@author: Gruppo Carboni - Ongaro
"""
from socket import AF_INET, socket, SOCK_STREAM
from threading import Thread
import threading
from Game.game import Game
from Game.gameStatus import GameStatus
import time as tm
""" La funzione che segue accetta le connessioni dei client in entrata."""
def accept_in_connections():
while True:
client, client_address = SERVER.accept()
print("%s:%s si è collegato." % client_address)
#Se il gioco è gia iniziato non accetto la connessione
if game.get_status() != GameStatus.NOT_STARTED:
print("Gioco già iniziato. Anche se inserito un nome non potrà giocare")
# ci serviamo di un dizionario per registrare i client
indirizzi[client] = client_address
# diamo inizio all'attività del Thread - uno per ciascun client
Thread(target=handle_client_request, args=(client,)).start()
"""La funzione seguente gestisce la connessione di un singolo client."""
def handle_client_request(client): # Prende il socket del client come argomento della funzione.
global matchIndex
global currentPlayer
global game
global gameStatus
nome = client.recv(BUFSIZ).decode("utf8") # --------------------------------SCELTA NOME
while game.check_name_player(nome):
# se il nome è già presente ci aggiunge un _ per distinguere i nomi
nome = "{}{}".format(nome, "_")
# CONTROLLO STATO GIOCO
if game.get_status() != GameStatus.NOT_STARTED:
client.send(bytes("Il gioco è attualmente in esecuzione", "utf8"))
client.send(bytes("\nNon ti è permesso giocare, ma puoi aspettare la fine del gioco\n", "utf8"))
game.add_player_to_queue(nome)
else:
game.add_player_to_game_list(nome)
# BENVENUTO
if len(game.get_players()) > 1:
client.send(bytes("Sono già presenti i seguenti giocatori: \n", "utf8"))
for player in game.get_players():
if player.get_name() != nome:
client.send(bytes("{}: {}\n".format(player.get_name(), player.get_role()), "utf8"))
client.send(bytes('Benvenuto %s! Se vuoi lasciare la Chat' % nome, "utf8"))
client.send(bytes('\nClicca il pulsante Pronto per dichiararti pronto', "utf8"))
client.send(bytes('\n{quit} per uscire dal gioco', "utf8"))
client.send(bytes('\nIl tuo ruolo è %s'%game.get_player(nome).get_role(), "utf8"))
#Se il giocatore è nella sala d'attesa gli dico altre cose in aggiunta
if game.check_queue_players(nome):
client.send(bytes('\nSei nella sala d\'attesa, non fare niente\n', "utf8"))
broadcast("%s è nella sala d'attesa" % nome)
else:
broadcast("%s si è unito alla chat!" % nome)
broadcast("Il ruolo di {} è {}.".format(nome, game.get_player(nome).get_role()))
clients[client] = nome
# si mette in ascolto del thread del singolo client e ne gestisce l'invio dei messaggi o l'uscita dalla Chat
while True:
msg = client.recv(BUFSIZ).decode("utf8")
gameStatus = game.get_status()
# --------COMANDI--------
# QUIT
if msg == QUIT:
broadcast("%s ha abbandonato la Chat." % nome)
game.remove_player(game.get_player(nome))
client.close()
del clients[client]
if (gameStatus != GameStatus.NOT_STARTED):
print(gameStatus)
broadcast("La partita termina.")
end_function()
break
# START se è gia cominciato
elif msg == START and gameStatus != GameStatus.NOT_STARTED and not game.check_queue_players(nome):
client.send("Gioco già cominciato!".encode())
# START se è gia pronto
elif msg == START and game.check_player_ready(nome) and not game.check_queue_players(nome):
client.send('Ti sei già dichiarato pronto'.encode())
# START
elif msg == START and gameStatus != GameStatus.STARTED and not game.check_queue_players(nome):
game.set_player_ready(nome)
broadcast("Il giocatore %s è pronto" % nome)
# Se i giocatori sono tutti pronti parte il gioco
if game.get_status() == GameStatus.STARTED:
broadcast("\nTutti pronti, si parte!")
matchIndex += 1
#il gioco iniza
game.start_game()
#inizia il timer del gioco principale
start_countdown(GAME_TIME, GameStatus.ENDED, True, None, end_function)
# BROADCAST MESSAGGIO (se non è un comando)
else:
if game.check_queue_players(nome):
client.send(bytes("NON DEVI FARE NIENTE", "utf8"))
else:
broadcast("{}: {}".format(nome, msg))
currentPlayer = ""
gameStatus = game.get_status()
if gameStatus != GameStatus.NOT_STARTED and gameStatus != GameStatus.ENDED: # se il gioco è partito
currentPlayer = game.get_current_player().get_name()
# GIOCO INIZIATO, stampo testo per la scelta della porta
if gameStatus == GameStatus.STARTED:
broadcast("\nTurno di: %s. " % currentPlayer)
broadcast("\nScegli una porta tra 1, 2 e 3.")
game.set_status(GameStatus.MENU_PHASE)
else:
# se chi scrive è chi deve dare una risposta
if currentPlayer == nome:
# è stata scelta la porta
if gameStatus == GameStatus.MENU_PHASE:
# controllo se il messaggio inviato è un numero tra 1, 2 e 3
print(msg)
if msg.isnumeric():
if int(msg) == 1 or int(msg) == 2 or int(msg) == 3:
if game.answer_menu(msg):
# stampo domanda
broadcast(game.get_question())
game.set_status(GameStatus.QUESTION_PHASE)
gameStatus = GameStatus.QUESTION_PHASE
# timer di 5 secondi per rispondere alla domanda
start_countdown(5, GameStatus.MENU_PHASE, False, GameStatus.QUESTION_PHASE,
stop_time_answer)
#il giocatore è entrato nella porta con la bomba
else:
broadcast("%s è entrato nella porta sbagliata!." % nome)
game.next_player()
game.kill_player(game.get_player(currentPlayer))
# se non rimane solo un giocatore il gioco termina
if game.check_end():
game.set_status(GameStatus.ENDED)
gameStatus = GameStatus.ENDED
end_function()
# se rimane più di un giocatore continuo il gioco
else:
game.set_status(GameStatus.STARTED)
broadcast("%s se ne va!." % nome)
currentPlayer = game.get_current_player().get_name()
broadcast("\nTurno di: %s. " % currentPlayer)
broadcast("\nScegli una porta tra 1, 2 e 3.")
game.set_status(GameStatus.MENU_PHASE)
gameStatus = GameStatus.MENU_PHASE
else:
client.send(bytes("Inserimento Errato, Scegli una porta tra 1, 2 , 3", "utf8"))
# se il messaggio per la scelta della porta non è un numero tra 1, 2 e 3
else:
client.send(bytes("Inserimento Errato, Scegli una porta tra 1, 2 , 3", "utf8"))
# se è stata scelta la risposta alla domanda
elif gameStatus == GameStatus.QUESTION_PHASE:
# se la risposta è corretta
if game.answer_question(msg):
game.add_points()
broadcast("Risposta esatta, punteggio aumentato!")
# se la risposta è sbagliata
else:
game.remove_points()
broadcast("Risposta errata!")
# passo al prossimo giocatore
game.next_player()
currentPlayer = game.get_current_player().get_name()
broadcast("\nTurno di: %s. " % currentPlayer)
broadcast("Scegli una porta tra 1, 2 e 3.")
game.set_status(GameStatus.MENU_PHASE)
# se chi scrive non è chi deve dare una risposta stampo errore
else:
client.send(bytes("Non è il tuo turno", "utf8"))
""" La funzione, che segue, invia un messaggio in broadcast a tutti i client."""
def broadcast(msg, prefisso=""): # il prefisso è usato per l'identificazione del nome.
for utente in clients:
utente.send(bytes(prefisso + msg, "utf8"))
""" La funzione countdown crea un timer di specificata durata, argomenti:
quitStatus: specifica lo stato nel quale il timer deve fermarsi
alwaysDo: specifica se va sempre fatto o vanno fatti controlli aggiuntivi
doStatus: specifica quando bisogna eseguire la funzione specificata
"""
def countdown(duration, quitStatus, alwaysDo, doStatus, function):
thisCountdownMatchIndex = matchIndex
thisPlayer = currentPlayer
# il countdown si ferma quando finisce i secondi o quando il gioco arriva allo stato definito
while duration > 0 and gameStatus != quitStatus:
tm.sleep(1)
duration -= 1
# timer ended - eseguo la funzione specificata quando il flag alwaysDo è true
# oppure quando sono in doStatus ed il giocatore è lo stesso
if alwaysDo and thisCountdownMatchIndex == matchIndex and game.get_status() != GameStatus.ENDED and game.get_status() != GameStatus.NOT_STARTED:
function()
else:
actualGameStatus = game.get_status()
if actualGameStatus == doStatus and thisCountdownMatchIndex == matchIndex and thisPlayer == currentPlayer:
function()
""" Avvia un thread per il countdown """
def start_countdown(duration, quitStatus, alwaysDo, doStatus, function):
countdown_thread = threading.Thread(target=countdown, args=(duration, quitStatus, alwaysDo, doStatus, function,))
countdown_thread.daemon = True # rendo il thread deamon, alla chiusura del server morirà
countdown_thread.start()
""" Funzione da eseguire quando termina il tempo per rispondere alla domanda """
def stop_time_answer():
broadcast("Tempo Scaduto per rispondere")
game.remove_points()
game.next_player()
currentPlayer = game.get_current_player().get_name()
broadcast("\nTurno di: %s. " % currentPlayer)
broadcast("\nScegli una porta tra 1, 2 e 3.")
game.set_status(GameStatus.MENU_PHASE)
""" Funzione che fa terminare il gioco e stampare la classifica """
def end_function():
rank = game.get_rank()
winner = rank[0]
broadcast("%s Ha vinto." % winner.get_name())
tm.sleep(1)
game.set_status(GameStatus.ENDED)
# stampa classifica
broadcast("\nGioco Terminato. Classifica:", "utf8")
i = 0
for player in rank:
i += 1
broadcast("\n{}°: {}, {}, {}\n".format(i, player.get_name(), player.get_score(), player.get_status()))
tm.sleep(1)
game.active_queue_players()
# reset gioco
game.reset_all()
broadcast("Premi Pronto per giocare ad una nuova partita!")
clients = {}
indirizzi = {}
HOST = ''
PORT = 53000
BUFSIZ = 1024
ADDR = (HOST, PORT)
game = Game()
START = "{start}"
QUIT = "{quit}"
GAME_TIME = 180 #Modificare questo valore per incrementare o diminuire il tempo di gioco
timerGame = ""
timerQuestion = ""
gameStatus = ""
currentPlayer = ""
matchIndex = 0
SERVER = socket(AF_INET, SOCK_STREAM)
SERVER.bind(ADDR)
if __name__ == "__main__":
SERVER.listen(5)
print("In attesa di connessioni...")
ACCEPT_THREAD = Thread(target=accept_in_connections)
ACCEPT_THREAD.start()
ACCEPT_THREAD.join()
SERVER.close()
|
# -*- coding: utf-8 -*-
#
# Copyright (c) nexB Inc. and others. All rights reserved.
# http://nexb.com and https://github.com/nexB/scancode-toolkit/
# The ScanCode software is licensed under the Apache License version 2.0.
# Data generated with ScanCode require an acknowledgment.
# ScanCode is a trademark of nexB Inc.
#
# You may not use this software except in compliance with the License.
# You may obtain a copy of the License at: http://apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software distributed
# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
# CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
#
# When you publish or redistribute any data created with ScanCode or any ScanCode
# derivative work, you must accompany this data with the following acknowledgment:
#
# Generated with ScanCode and provided on an "AS IS" BASIS, WITHOUT WARRANTIES
# OR CONDITIONS OF ANY KIND, either express or implied. No content created from
# ScanCode should be considered or used as legal advice. Consult an Attorney
# for any legal advice.
# ScanCode is a free software code scanning tool from nexB Inc. and others.
# Visit https://github.com/nexB/scancode-toolkit/ for support and download.
from __future__ import absolute_import
from __future__ import unicode_literals
from __future__ import print_function
import io
import os
import attr
import click
click.disable_unicode_literals_warning = True
from license_expression import Licensing
import saneyaml
from licensedcode import cache
from licensedcode import models
from licensedcode import match_hash
@attr.attrs(slots=True)
class LicenseRule(object):
data = attr.ib()
text = attr.ib()
raw_data = attr.ib(default=None)
def __attrs_post_init__(self, *args, **kwargs):
self.raw_data = rdat = '\n'.join(self.data).strip()
self.text = '\n'.join(self.text).strip()
# validate YAML syntax
try:
self.data = saneyaml.load(rdat)
except:
print('########################################################')
print('Invalid YAML:')
print(rdat)
print('########################################################')
raise
def load_data(location='00-new-licenses.txt'):
with io.open(location, encoding='utf-8') as o:
lines = o.read().splitlines(False)
rules = []
data = []
text = []
in_data = False
in_text = False
last_lines = []
for ln, line in enumerate(lines, 1):
last_lines.append(': '.join([str(ln), line]))
if line == '----------------------------------------':
if not (ln == 1 or in_text):
raise Exception('Invalid structure: #{ln}: {line}\n'.format(**locals()) +
'\n'.join(last_lines[-10:]))
in_data = True
in_text = True
if data and ''.join(text).strip():
rules.append(LicenseRule(data, text))
data = []
text = []
continue
if line == '---':
if not in_data:
raise Exception('Invalid structure: #{ln}: {line}\n'.format(**locals()) +
'\n'.join(last_lines[-10:]))
in_data = False
in_text = True
continue
if in_data:
data.append(line)
continue
if in_text:
text.append(line)
continue
return rules
def rule_exists(text):
"""
Return the matched rule identifier if the text is an existing rule matched
exactly, False otherwise.
"""
idx = cache.get_index()
matches = idx.match(query_string=text)
if not matches:
return False
if len(matches) > 1:
return False
match = matches[0]
if match.matcher == match_hash.MATCH_HASH:
return match.rule.identifier
def all_rule_tokens():
"""
Return a set of tuples of tokens, one corresponding to every existing and
added rules. Used to avoid duplicates.
"""
rule_tokens = set()
for rule in models.get_rules():
rule_tokens.add(tuple(rule.tokens()))
return rule_tokens
def find_rule_base_loc(license_expression):
"""
Return a new, unique and non-existing base name location suitable to create a new
rule.
"""
template = (license_expression
.lower()
.strip()
.replace(' ', '_')
.replace('(', '')
.replace(')', '')
+'_{}')
idx = 1
while True:
base_name = template.format(idx)
base_loc = os.path.join(models.rules_data_dir, base_name)
if not os.path.exists(base_loc + '.RULE'):
return base_loc
idx += 1
@click.command()
@click.argument('licenses_file', type=click.Path(), metavar='FILE')
@click.help_option('-h', '--help')
def cli(licenses_file):
"""
Create rules from a structured text file
For instance:
----------------------------------------
license_expression: lgpl-2.1
relevance: 100
is_license_notice: yes
---
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License
version 2.1 as published by the Free Software Foundation;
----------------------------------------
"""
rules_data = load_data(licenses_file)
rules_tokens = all_rule_tokens()
licenses = cache.get_licenses_db()
licensing = Licensing(licenses.values())
print()
for rule in rules_data:
existing = rule_exists(rule.text)
if existing:
print('Skipping existing rule:', existing, 'with text:\n', rule.text[:50].strip(), '...')
continue
if rule.data.get('is_negative'):
base_name = 'not-a-license'
else:
license_expression = rule.data.get('license_expression')
if not license_expression:
raise Exception('Missing license_expression for text:', rule)
licensing.parse(license_expression, validate=True, simple=True)
base_name = license_expression
base_loc = find_rule_base_loc(base_name)
data_file = base_loc + '.yml'
with io.open(data_file, 'w', encoding='utf-8') as o:
o.write(rule.raw_data)
text_file = base_loc + '.RULE'
with io.open(text_file, 'w', encoding='utf-8') as o:
o.write(rule.text)
rulerec = models.Rule(data_file=data_file, text_file=text_file)
rule_tokens = tuple(rulerec.tokens())
if rule_tokens in rules_tokens:
# cleanup
os.remove(text_file)
os.remove(data_file)
print('Skipping already added rule with text for:', base_name)
else:
rules_tokens.add(rule_tokens)
rulerec.dump()
models.update_ignorables(rulerec, verbose=True)
print('Rule added:', rulerec.identifier)
if __name__ == '__main__':
cli()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.