content
stringlengths 5
1.05M
|
---|
import oneflow as flow
from otrans.data import EOS, PAD
class Recognizer:
def __init__(self, model, idx2unit=None, lm=None, lm_weight=None, ngpu=1):
self.ngpu = ngpu
self.model = model
self.model.eval()
if self.ngpu > 0:
self.model.cuda()
self.lm = lm
if self.lm is not None:
self.lm.eval()
if self.ngpu > 0:
self.lm.eval()
self.idx2unit = idx2unit
self.lm_weight = lm_weight
def recognize(self, inputs, inputs_length):
raise NotImplementedError
def lm_decode(self, preds, hidden=None):
"""
Args:
preds: [batch_size, lens]
hidde: [time_step, batch_size, hidden_size] or ([time_step, batch_size, hidden_size], [time_step, batch_size, hidden_size])
"""
if self.lm.model_type == "transformer_lm":
log_probs = self.lm.predict(preds, last_frame=True)
else:
preds = preds[:, -1].unsqueeze(-1)
log_probs, hidden = self.lm.predict(preds, hidden)
return log_probs, hidden
def lm_decode_with_index(self, preds, index, hidden=None):
"""
Args:
preds: [batch_size, lens]
hidde: [time_step, batch_size, hidden_size] or ([time_step, batch_size, hidden_size], [time_step, batch_size, hidden_size])
"""
if self.lm.model_type == "transformer_lm":
log_probs = self.lm.predict(preds, last_frame=False)
log_probs = select_tensor_based_index(log_probs, index)
else:
preds = select_tensor_based_index(preds, index).unsqueeze(-1)
log_probs, hidden = self.lm.predict(preds, hidden)
return log_probs, hidden
def lm_rescoring(self, preds, pred_lens):
# preds [beam_size, lens]
# preds_len [beam_size]
if self.lm.model_type == "transformer_lm":
log_probs = self.lm.predict(preds, last_frame=False)
else:
log_probs = []
hidden = None
for t in range(preds.size(1)):
log_prob, hidden = self.lm.predict(preds[:, t].unsqueeze(-1), hidden)
log_probs.append(log_prob)
log_probs = flow.cat(log_probs, dim=1)
rescores = []
max_length = log_probs.size(1)
vocab_size = log_probs.size(-1)
for b in range(preds.size(0)):
base_index = flow.arange(max_length, device=preds.device)
bias_index = preds[b].reshape(-1)
index = base_index * vocab_size + bias_index
score = flow.index_select(log_probs[b].reshape(-1), dim=-1, index=index)
label_len = min(int(pred_lens[b]), score.size(0))
score[label_len - 1 :] = 0
rescores.append(flow.sum(score) / label_len)
rescores = flow.tensor(rescores, dtype=flow.float32)
_, indices = flow.sort(rescores, dim=-1, descending=True)
sorted_preds = preds[indices]
sorted_length = pred_lens[indices]
return sorted_preds, sorted_length
def translate(self, seqs):
results = []
for seq in seqs:
pred = []
for i in seq:
if int(i) == EOS:
break
if int(i) == PAD:
continue
pred.append(self.idx2unit[int(i)])
results.append(" ".join(pred))
return results
def nbest_translate(self, nbest_preds):
assert nbest_preds.dim() == 3
batch_size, nbest, lens = nbest_preds.size()
results = []
for b in range(batch_size):
nbest_list = []
for n in range(nbest):
pred = []
for i in range(lens):
token = int(nbest_preds[b, n, i].numpy())
if token == EOS:
break
pred.append(self.idx2unit[token])
nbest_list.append(" ".join(pred))
results.append(nbest_list)
return results
def select_tensor_based_index(tensor, index):
# tensor: [b, c, t, v]
# index: [b]
# return [b, t, v]
assert tensor.dim() >= 2
assert index.dim() == 1
batch_size = tensor.size(0)
tensor_len = tensor.size(1)
base_index = flow.arange(batch_size, device=tensor.device) * tensor_len
indices = base_index + index
if tensor.dim() == 2:
select_tensor = flow.index_select(
tensor.reshape(batch_size * tensor_len), 0, indices.long()
)
else:
assert tensor.dim() == 3
select_tensor = flow.index_select(
tensor.reshape(batch_size * tensor_len, tensor.size(-1)), 0, indices.long()
)
return select_tensor
|
import pandas as pd
if __name__ == '__main__':
website = 'https://www.peakbagger.com/'
links = pd.read_csv('raw_data/links.csv')
links['full_link'] = website + links['link']
full_links = links[['Mountain', 'full_link']]
full_links = full_links.rename(columns={'full_link': 'link'})
full_links.to_csv('cleaned_data/full_links.csv', index=False)
|
""" Simulation configuration for non-rl centralized experiments.
A guiding tool for preparing these configurations can be found in:
https://github.com/flow-project/flow/tree/master/tutorials
or in the directory:
flow/tutorials.
A heavy emphasis on:
1. tutorial 1 (https://github.com/flow-project/flow/blob/master/tutorials/tutorial01_sumo.ipynb)
2. tutorial 10 (https://github.com/flow-project/flow/blob/master/tutorials/tutorial10_traffic_lights.ipynb)
To run this file (while in flow/examples directory);
python simulate.py --exp_config grid_simulation_grid_non_rl
"""
from flow.core.params import SumoParams, EnvParams
from flow.core.params import TrafficLightParams
from flow.core.params import SumoCarFollowingParams, VehicleParams
from flow.envs.centralized_env import CentralizedGridEnv
from flow.networks import TrafficLightGridNetwork
from flow.controllers import SimCarFollowingController, GridRouter
from flow.core.traffic_light_utils import get_non_flow_params, get_flow_params
# Set up the number of vehicles to be inserted in the NS and EW directions
arterial = 1400
side_street = 420
# use inflows specified above
USE_INFLOWS = True
# set up road network parameters
v_enter = 5
inner_length = 240
long_length = 240
short_length = 240
n_rows = 2
n_columns = 2
# number of vehicles inflow (these inflows are used if USE_INFLOWS = False)
num_cars_left = 0 # up
num_cars_right = 0 # bottom
num_cars_top = 0 # right
num_cars_bot = 0 # left
tot_cars = (num_cars_left + num_cars_right) * n_columns \
+ (num_cars_top + num_cars_bot) * n_rows
grid_array = {
"short_length": short_length,
"inner_length": inner_length,
"long_length": long_length,
"row_num": n_rows,
"col_num": n_columns,
"cars_left": num_cars_left,
"cars_right": num_cars_right,
"cars_top": num_cars_top,
"cars_bot": num_cars_bot
}
# specify vehicle parameters to be added
vehicles = VehicleParams()
vehicles.add(
veh_id="human",
acceleration_controller=(SimCarFollowingController, {}),
car_following_params=SumoCarFollowingParams(
min_gap=2.5,
decel=7.5, # avoid collisions at emergency stops
speed_mode="right_of_way",
),
routing_controller=(GridRouter, {}),
num_vehicles=0)
# Set up traffic light parameters
tl_logic = TrafficLightParams(baseline=False)
phases = [{
"duration": "31",
"minDur": "8",
"maxDur": "45",
"state": "GrGr"
}, {
"duration": "4",
"minDur": "3",
"maxDur": "6",
"state": "yryr"
}, {
"duration": "31",
"minDur": "8",
"maxDur": "45",
"state": "rGrG"
}, {
"duration": "4",
"minDur": "3",
"maxDur": "6",
"state": "ryry"
}]
# add the specified phases and traffic lights: These should match the num_rows + num_col
# NOTE: Iif tls_type="actuated", SUMO activates the actuated phases timing plan
tl_logic.add("center0", phases=phases, programID=1, tls_type="actuated")
tl_logic.add("center1", phases=phases, programID=1, tls_type="actuated")
tl_logic.add("center2", phases=phases, programID=1, tls_type="actuated")
tl_logic.add("center3", phases=phases, programID=1, tls_type="actuated")
# specify network paramters
additional_net_params = {
"grid_array": grid_array,
"speed_limit": 11,
"horizontal_lanes": 1,
"vertical_lanes": 1
}
# add inflows specified above
if USE_INFLOWS:
initial_config, net_params = get_flow_params(
col_num=n_columns,
row_num=n_rows,
horizon=3600,
num_veh_per_row=arterial,
num_veh_per_column=side_street,
additional_net_params=additional_net_params)
else:
initial_config, net_params = get_non_flow_params(
enter_speed=v_enter,
add_net_params=additional_net_params)
# set up flow_params
flow_params = dict(
# name of the experiment
exp_tag='test',
# name of the flow environment the experiment is running on
env_name=CentralizedGridEnv,
# name of the network class the experiment is running on
network=TrafficLightGridNetwork,
# simulator that is used by the experiment
simulator='traci',
# sumo-related parameters (see flow.core.params.SumoParams)
sim=SumoParams(restart_instance=True,
sim_step=1,
render=False,
emission_path='~/flow/data',
),
# environment related parameters (see flow.core.params.EnvParams)
env=EnvParams(
horizon=5400,
additional_params={
"target_velocity": 11,
"switch_time": 4,
"yellow_phase_duration": 4,
"num_observed": 2,
"discrete": True,
"tl_type": "actuated",
"num_local_edges": 4,
"num_local_lights": 4,
"benchmark": "PressureLightGridEnv", # This should be the string name of the benchmark class
"benchmark_params": "BenchmarkParams"
}
),
# network-related parameters (see flow.core.params.NetParams and the
# network's documentation or ADDITIONAL_NET_PARAMS component)
net=net_params,
# vehicles to be placed in the network at the start of a rollout (see
# flow.core.params.VehicleParams)
veh=vehicles,
# parameters specifying the positioning of vehicles upon initialization/
# reset (see flow.core.params.InitialConfig)
initial=initial_config,
# traffic lights to be introduced to specific nodes (see
# flow.core.params.TrafficLightParams)
tls=tl_logic,
)
|
from random import randint
class CurrentAcc:
def validate(self, name, accnumber):
self.name = name
self.accnumber = accnumber
def balance(self, balance):
self.balance = balance
def withdraw(self, amount):
self.amount = amount
def deposit(self, amount):
self.deposit = amount
class CreateNewAcc:
def name(self, name):
self.name = name
def initial_deposit(self, deposit):
self.deposit = deposit
def account_number(self, number):
return self.number =randint(10000, 99999)
newacc = CreateNewAcc()
curracc = CreateNewAcc()
print("Do you have an account with bank? Y/N:")
uc = input()
if uc == "Y":
else:
name = newacc.name()
initial_deposit = newacc.initial_deposit()
saving_accnumber = newacc.account_number()
|
"""Provided Cluster class for simulation"""
import json
import cerberus
import numpy as np
from bson import json_util
from aries.core import utils
from aries.simulation import simulation_utils
# Schema for cluster validation
cluster_schema = {
'cluster_agents': {'type': 'list', 'schema': {'type': 'string'}},
'controller': {'type': 'string', 'required': True},
'priority': {'type': 'integer', 'required': False},
'delay': {'type': 'integer', 'required': False},
}
cluster_validator = cerberus.Validator(cluster_schema)
cluster_validator.ignore_none_values = True
class Cluster(object):
"""Representation of cluster in simulation"""
name = None
# List of agents name belonging to the cluster
cluster_agents = None
priority = None
controller = None
delay = None
def __init__(self, params_dict):
"""Initialization from dictionary"""
self.__dict__ = params_dict
@classmethod
def from_properties(cls, name, cluster_agents, controller, priority, delay):
"""Initialization from parameters"""
return cls({
"name": name,
"cluster_agents": cluster_agents,
"controller": controller,
"priority": priority,
"delay": delay
})
def dump(self):
"""Dump object to json string"""
return json_util.dumps(self, cls=ClusterEncoder)
@staticmethod
def validate(data):
"""Validate object according to agent_schema"""
return utils.validate(data, 'Cluster', cluster_validator)
def run(self, agents, lines, nodes, paths):
print('Cluster')
pass
class ClusterEncoder(json.JSONEncoder):
"""Cluster Encoder for JSON serialization"""
def default(self, o):
cluster_dict = dict(o.__dict__)
del cluster_dict['name']
return cluster_dict
class LoadSharingCluster(Cluster):
"""LoadSharingCluster ..."""
def run(self, agents, lines, nodes, paths):
total_active_power = 0
total_reactive_power = 0
# states_sum = 0
# params = {}
agents_names = self.cluster_agents
agents_number = len(agents_names)
passive_agents_number = int(agents_number / 2)
# passive_agents_list = list(np.random.choice(agents_names, passive_agents_number, replace=False))
passive_agents_list = agents_names[:passive_agents_number]
for agent_name in self.cluster_agents:
agent = agents[agent_name]
battery = agent.battery
if (agent_name in passive_agents_list) and (battery.active == 1):
tmp_ap = agent.power_rating
tmp_rp = simulation_utils.reactive_power(power_rating=agent.power_rating,
power_factor=agent.power_factor)
desired_power = np.abs(np.complex(tmp_ap, 1e-2 * tmp_rp))
required_amps = (desired_power / battery.voltage / battery.inverter_efficiency)
if battery.status - required_amps >= 0:
total_active_power += tmp_ap - tmp_ap * battery.contribution_active
total_reactive_power += tmp_rp - tmp_rp * battery.contribution_reactive
else:
total_active_power += tmp_ap
total_reactive_power += tmp_rp
else:
total_active_power += agent.power_rating
total_reactive_power += simulation_utils.reactive_power(power_rating=agent.power_rating,
power_factor=agent.power_factor)
# tmp_state = agent.battery.status / agent.battery.capacity
# params[agent_name] = tmp_state
# states_sum += tmp_state
# print('total_a_p: ', total_active__power)
for agent_name in self.cluster_agents:
# alpha = params[agent_name] / states_sum
# active_power_share = total_active__power * alpha
# reactive_power_share = total_reactive_power * alpha
# agents[agent_name].request_inject_power = active_power_share
# agents[agent_name].request_power_factor = active_power_share / np.abs(
# np.complex(active_power_share, reactive_power_share))
if agent_name not in passive_agents_list:
active_power = total_active_power / (agents_number - passive_agents_number)
# reactive_power = simulation_utils.reactive_power(power_rating=agent.power_rating,
# power_factor=agent.power_factor)
reactive_power = total_reactive_power / (agents_number - passive_agents_number)
# print(active_power, reactive_power)
agents[agent_name].request_inject_power = active_power
agents[agent_name].request_power_factor = active_power / np.abs(
np.complex(active_power, reactive_power))
agents[agent_name].battery.contribution_active = 0
agents[agent_name].battery.contribution_reactive = 0
class StayingAliveCluster(Cluster):
"""StayingAliveCluster ..."""
def run(self, agents, lines, nodes, paths):
print('StayingAliveCluster')
pass
|
# import modules
from bot.bot import Bot
import bot.constants as constants
# import discord.py api wrapper
import discord
from discord.ext import commands, tasks
# import python utility libraries
import os
import sys
from datetime import datetime
from bot import constants
# Import the configuration
try:
from config import bot_token, bot_prefix, bot_description, shutdown_admins, bot_user_id
except Exception as e:
print(e)
print("Turt bot is not configured. In order to run the bot, Turt must be configured in the config.py.template file.")
exit(-1)
# Determine if the bot has been setup
if not os.path.isfile(constants.db_file):
print("Turt bot has not been setup. Setup turt bot by running `python3 setup.py`")
sys.exit(-2)
# Turt instance
constants.bot = Bot(command_prefix=bot_prefix,
description=bot_description,
status=discord.Status.idle,
activity=discord.Game(name='Starting...'))
# Setup the Cogs
constants.bot.load_extension("bot.cogs.permissions")
constants.bot.load_extension("bot.cogs.elections")
constants.bot.load_extension("bot.cogs.channels")
constants.bot.load_extension("bot.cogs.bothosting")
constants.bot.load_extension("bot.cogs.database")
constants.bot.load_extension("bot.cogs.discipline")
@constants.bot.event
async def on_ready():
print("Discord.py " + discord.__version__)
print(f"{constants.bot.user.name}: {constants.bot.user.id}")
print("Bot started at " + datetime.now().strftime("%H:%M:%S"))
await constants.bot.change_presence(status=discord.Status.online, activity=discord.Game(name='Moderating'))
print("Putting all users in database...")
constants.bot.sql.setup_database_with_all_users(constants.bot)
print("Deleting unwanted reactions from elections...")
await (constants.bot.get_cog("Elections")).delete_unwanted_election_reactions()
print("Ready!")
@constants.bot.event
async def on_command_error(ctx, error):
print(error)
if isinstance(error, commands.errors.CheckFailure): return #Thats expected
if isinstance(error, commands.errors.NoPrivateMessage): return #Thats expected
else:
print(error)
await ctx.send_help(ctx.command)
#Run the bot
constants.bot.run(bot_token)
|
import numpy as np
import pandas as pd
from pydantic import BaseModel, Field, validator
from scipy import stats
from typing import Union, Callable, List
class ProbVar(BaseModel):
name: str
dist: str = Field('norm')
kw : dict = Field({'loc':0,'scale':1})
factor: float = Field(1.0)
constant: float = Field(None)
seed : int = Field(None)
class Config:
validate_assignment = True
extra = 'forbid'
@validator('kw')
def check_dist_build(cls,v,values):
if isinstance(getattr(stats,values['dist'])(**v),stats._distn_infrastructure.rv_frozen):
return v
else:
raise ValueError(f"{v} are not allowed")
def get_instance(self):
return getattr(stats,self.dist)(**self.kw)
def get_sample(self, size:Union[int,tuple]=None, ppf:float=None, seed=None):
if seed is None:
seed = self.seed
if self.constant is not None:
return self.constant
elif size:
return getattr(stats,self.dist)(**self.kw).rvs(size=size,random_state=seed)*self.factor
elif ppf is not None:
return getattr(stats,self.dist)(**self.kw).ppf(ppf)*self.factor
else:
return getattr(stats,self.dist)(**self.kw).mean()*self.factor
class MonteCarlo(BaseModel):
name: str
func: Callable[..., np.ndarray]
args: List[ProbVar]
class Config:
arbitrary_types_allowed = True
json_encoders = {np.ndarray: lambda x: x.tolist()}
def get_sample(self, size:Union[int,tuple]=None, ppf:float=None, seed=None):
list_vars = []
for arg in self.args:
var_values = arg.get_sample(size=size, ppf=ppf, seed=seed)
list_vars.append(var_values)
return self.func(*list_vars)
def get_sample_df(self, size:Union[int,tuple]=None, ppf:float=None, seed=None):
vars_df = pd.DataFrame()
list_vars = []
for arg in self.args:
var_values = arg.get_sample(size=size, ppf=ppf, seed=seed)
vars_df[arg.name] = var_values
list_vars.append(var_values)
vars_df[self.name] = self.func(*list_vars)
if ppf is not None:
vars_df.index = ppf
return vars_df
|
# Copyright 2020 The Kubeflow Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from absl.testing import parameterized
import unittest
from kfp.dsl import _pipeline_param
from kfp.dsl import importer_node
from kfp.pipeline_spec import pipeline_spec_pb2 as pb
from google.protobuf import json_format
class ImporterNodeTest(parameterized.TestCase):
@parameterized.parameters(
{
# artifact_uri is a constant value
'input_uri':
'gs://artifact',
'artifact_type_schema':
pb.ArtifactTypeSchema(schema_title='system.Dataset'),
'expected_result': {
'artifactUri': {
'constantValue': {
'stringValue': 'gs://artifact'
}
},
'typeSchema': {
'schemaTitle': 'system.Dataset'
}
}
},
{
# artifact_uri is from PipelineParam
'input_uri':
_pipeline_param.PipelineParam(name='uri_to_import'),
'artifact_type_schema':
pb.ArtifactTypeSchema(schema_title='system.Model'),
'expected_result': {
'artifactUri': {
'runtimeParameter': 'uri'
},
'typeSchema': {
'schemaTitle': 'system.Model'
}
},
})
def test_build_importer_spec(self, input_uri, artifact_type_schema,
expected_result):
expected_importer_spec = pb.PipelineDeploymentConfig.ImporterSpec()
json_format.ParseDict(expected_result, expected_importer_spec)
importer_spec = importer_node._build_importer_spec(
artifact_uri=input_uri, artifact_type_schema=artifact_type_schema)
self.maxDiff = None
self.assertEqual(expected_importer_spec, importer_spec)
@parameterized.parameters(
{
# artifact_uri is a constant value
'importer_name': 'importer-1',
'input_uri': 'gs://artifact',
'expected_result': {
'taskInfo': {
'name': 'importer-1'
},
'inputs': {
'parameters': {
'uri': {
'runtimeValue': {
'constantValue': {
'stringValue': 'gs://artifact'
}
}
}
}
},
'componentRef': {
'name': 'comp-importer-1'
},
}
},
{
# artifact_uri is from PipelineParam
'importer_name': 'importer-2',
'input_uri': _pipeline_param.PipelineParam(name='uri_to_import'),
'expected_result': {
'taskInfo': {
'name': 'importer-2'
},
'inputs': {
'parameters': {
'uri': {
'componentInputParameter': 'uri_to_import'
}
}
},
'componentRef': {
'name': 'comp-importer-2'
},
},
})
def test_build_importer_task_spec(self, importer_name, input_uri,
expected_result):
expected_task_spec = pb.PipelineTaskSpec()
json_format.ParseDict(expected_result, expected_task_spec)
task_spec = importer_node._build_importer_task_spec(
importer_base_name=importer_name, artifact_uri=input_uri)
self.maxDiff = None
self.assertEqual(expected_task_spec, task_spec)
def test_build_importer_component_spec(self):
expected_importer_component = {
'inputDefinitions': {
'parameters': {
'uri': {
'type': 'STRING'
}
}
},
'outputDefinitions': {
'artifacts': {
'artifact': {
'artifactType': {
'schemaTitle': 'system.Artifact'
}
}
}
},
'executorLabel': 'exec-importer-1'
}
expected_importer_comp_spec = pb.ComponentSpec()
json_format.ParseDict(expected_importer_component,
expected_importer_comp_spec)
importer_comp_spec = importer_node._build_importer_component_spec(
importer_base_name='importer-1',
artifact_type_schema=pb.ArtifactTypeSchema(
schema_title='system.Artifact'))
self.maxDiff = None
self.assertEqual(expected_importer_comp_spec, importer_comp_spec)
def test_import_with_invalid_artifact_uri_value_should_fail(self):
from kfp.dsl.io_types import Dataset
with self.assertRaisesRegex(
ValueError,
"Importer got unexpected artifact_uri: 123 of type: <class 'int'>."):
importer_node.importer(artifact_uri=123, artifact_class=Dataset)
if __name__ == '__main__':
unittest.main()
|
"""
Module to set up run time parameters for Clawpack.
The values set in the function setrun are then written out to data files
that will be read in by the Fortran code.
"""
import os
from pyclaw import data
from math import pi
def setrun(claw_pkg='classic'):
"""
Define the parameters used for running Clawpack.
INPUT:
claw_pkg expected to be "classic" for this setrun.
OUTPUT:
rundata - object of class ClawRunData
"""
assert claw_pkg.lower() == 'classic', "Expected claw_pkg = 'classic'"
ndim = 2
rundata = data.ClawRunData(claw_pkg, ndim)
# Problem-specific parameters to be written to setprob.data:
probdata = rundata.new_UserData(name='probdata',fname='setprob.data')
probdata.add_param('implicit_integration_scheme', 'Crank-Nicolson')
probdata.add_param('newton_max_iter', 10)
probdata.add_param('newton_tolerance', 1e-8)
probdata.add_param('newton_verbosity', 0)
probdata.add_param('linear_solver_tolerance', 1e-8)
probdata.add_param('linear_solver_verbosity', 0)
probdata.add_param('num_threads', 1)
probdata.add_param('bc_options', ['0', '1', '1', '1'])
clawdata = rundata.clawdata
clawdata.ndim = 2
clawdata.meqn = 1
clawdata.xlower = -1.
clawdata.xupper = 1
clawdata.ylower = -1.
clawdata.yupper = 1
clawdata.mx = 50
clawdata.my = 50
clawdata.tfinal = 1.0
clawdata.dt_initial = 0.1
clawdata.nout = 10
clawdata.verbosity = 1
clawdata.dt_variable = 0
clawdata.src_split = 1
clawdata.mbc = 2
return rundata
if __name__ == '__main__':
# Set up run-time parameters and write all data files.
import sys
if len(sys.argv) == 2:
rundata = setrun(sys.argv[1])
else:
rundata = setrun()
rundata.write()
|
class Quest:
def __init__(self, dbRow):
self.id = dbRow[0]
self.name = dbRow[1]
self.description = dbRow[2]
self.objective = dbRow[3]
self.questType = dbRow[4]
self.category = dbRow[5]
self.location = dbRow[6]
self.stars = dbRow[7]
self.zenny = dbRow[8]
def __repr__(self):
return f"{self.__dict__!r}" |
# -*- coding: utf-8 -*-
# Copyright (c) 2018 Matthias Urlichs <[email protected]>
# Based on work Copyright (c) 2018 Steven P. Goldsmith
"""
libgpiod CFFI interface
-------------
This is a stripped-down version which doesn't do "bulk" access (no point IMHO)
and doesn't implement an event loop (that's Trio's job).
"""
from cffi import FFI
__all__ = [
"DIRECTION_INPUT",
"DIRECTION_OUTPUT",
"ACTIVE_STATE_HIGH",
"ACTIVE_STATE_LOW",
"REQUEST_DIRECTION_AS_IS",
"REQUEST_DIRECTION_INPUT",
"REQUEST_DIRECTION_OUTPUT",
"REQUEST_EVENT_FALLING_EDGE",
"REQUEST_EVENT_RISING_EDGE",
"REQUEST_EVENT_BOTH_EDGES",
"REQUEST_FLAG_OPEN_DRAIN",
"REQUEST_FLAG_OPEN_SOURCE",
"REQUEST_FLAG_ACTIVE_LOW",
"EVENT_RISING_EDGE",
"EVENT_FALLING_EDGE",
"ffi",
"lib",
]
ffi = FFI()
ffi.cdef(
"""
enum {
GPIOD_CTXLESS_EVENT_CB_TIMEOUT = 1,
GPIOD_CTXLESS_EVENT_CB_RISING_EDGE,
GPIOD_CTXLESS_EVENT_CB_FALLING_EDGE,
};
enum {
GPIOD_CTXLESS_EVENT_CB_RET_ERR = -1,
GPIOD_CTXLESS_EVENT_CB_RET_OK = 0,
GPIOD_CTXLESS_EVENT_CB_RET_STOP = 1,
};
enum {
GPIOD_CTXLESS_EVENT_POLL_RET_STOP = -2,
GPIOD_CTXLESS_EVENT_POLL_RET_ERR = -1,
GPIOD_CTXLESS_EVENT_POLL_RET_TIMEOUT = 0,
};
enum {
GPIOD_LINE_DIRECTION_INPUT = 1,
GPIOD_LINE_DIRECTION_OUTPUT,
};
enum {
GPIOD_LINE_ACTIVE_STATE_HIGH = 1,
GPIOD_LINE_ACTIVE_STATE_LOW,
};
enum {
GPIOD_LINE_REQUEST_DIRECTION_AS_IS = 1,
GPIOD_LINE_REQUEST_DIRECTION_INPUT,
GPIOD_LINE_REQUEST_DIRECTION_OUTPUT,
GPIOD_LINE_REQUEST_EVENT_FALLING_EDGE,
GPIOD_LINE_REQUEST_EVENT_RISING_EDGE,
GPIOD_LINE_REQUEST_EVENT_BOTH_EDGES,
};
enum {
GPIOD_LINE_REQUEST_FLAG_OPEN_DRAIN = 1,
GPIOD_LINE_REQUEST_FLAG_OPEN_SOURCE = 2,
GPIOD_LINE_REQUEST_FLAG_ACTIVE_LOW = 4,
};
enum {
GPIOD_LINE_EVENT_RISING_EDGE = 1,
GPIOD_LINE_EVENT_FALLING_EDGE,
};
struct timespec {
long tv_sec;
long tv_nsec;
};
struct gpiod_line {
unsigned int offset;
int direction;
int active_state;
bool used;
bool open_source;
bool open_drain;
int state;
bool up_to_date;
struct gpiod_chip *chip;
int fd;
char name[32];
char consumer[32];
};
struct gpiod_chip {
struct gpiod_line **lines;
unsigned int num_lines;
int fd;
char name[32];
char label[32];
};
struct gpiod_ctxless_event_poll_fd {
int fd;
/**< File descriptor number. */
bool event;
/**< Indicates whether an event occurred on this file descriptor. */
};
struct gpiod_line_request_config {
const char *consumer;
int request_type;
int flags;
};
struct gpiod_line_event {
struct timespec ts;
int event_type;
};
struct gpiod_chip;
struct gpiod_line;
struct gpiod_chip_iter;
struct gpiod_line_iter;
struct gpiod_line_bulk;
typedef void (*gpiod_ctxless_set_value_cb)(void *);
typedef int (*gpiod_ctxless_event_handle_cb)(int, unsigned int,
const struct timespec *, void *);
typedef int (*gpiod_ctxless_event_poll_cb)(unsigned int,
struct gpiod_ctxless_event_poll_fd *,
const struct timespec *, void *);
int gpiod_ctxless_set_value(const char *device, unsigned int offset, int value,
bool active_low, const char *consumer,
gpiod_ctxless_set_value_cb cb,
void *data);
int gpiod_ctxless_set_value_multiple(const char *device,
const unsigned int *offsets,
const int *values, unsigned int num_lines,
bool active_low, const char *consumer,
gpiod_ctxless_set_value_cb cb,
void *data);
int gpiod_ctxless_find_line(const char *name, char *chipname,
size_t chipname_size,
unsigned int *offset);
int gpiod_chip_find_lines(struct gpiod_chip *chip, const char **names,
struct gpiod_line_bulk *bulk);
struct gpiod_chip *gpiod_chip_open(const char *path);
struct gpiod_chip *gpiod_chip_open_by_name(const char *name);
struct gpiod_chip *gpiod_chip_open_by_number(unsigned int num);
struct gpiod_chip *gpiod_chip_open_by_label(const char *label);
struct gpiod_chip *gpiod_chip_open_lookup(const char *descr);
void gpiod_chip_close(struct gpiod_chip *chip);
const char *gpiod_chip_name(struct gpiod_chip *chip);
const char *gpiod_chip_label(struct gpiod_chip *chip);
unsigned int gpiod_chip_num_lines(struct gpiod_chip *chip);
struct gpiod_line *
gpiod_chip_get_line(struct gpiod_chip *chip, unsigned int offset);
int gpiod_chip_get_lines(struct gpiod_chip *chip,
unsigned int *offsets, unsigned int num_offsets,
struct gpiod_line_bulk *bulk);
int gpiod_chip_get_all_lines(struct gpiod_chip *chip,
struct gpiod_line_bulk *bulk);
struct gpiod_line *
gpiod_chip_find_line(struct gpiod_chip *chip, const char *name);
unsigned int gpiod_line_offset(struct gpiod_line *line);
const char *gpiod_line_name(struct gpiod_line *line);
const char *gpiod_line_consumer(struct gpiod_line *line);
int gpiod_line_direction(struct gpiod_line *line);
int gpiod_line_active_state(struct gpiod_line *line);
bool gpiod_line_is_used(struct gpiod_line *line);
bool gpiod_line_is_open_drain(struct gpiod_line *line);
bool gpiod_line_is_open_source(struct gpiod_line *line);
int gpiod_line_update(struct gpiod_line *line);
bool gpiod_line_needs_update(struct gpiod_line *line);
int gpiod_line_request(struct gpiod_line *line,
const struct gpiod_line_request_config *config,
int default_val);
int gpiod_line_request_input(struct gpiod_line *line,
const char *consumer);
int gpiod_line_request_output(struct gpiod_line *line,
const char *consumer, int default_val);
int gpiod_line_request_rising_edge_events(struct gpiod_line *line,
const char *consumer);
int gpiod_line_request_falling_edge_events(struct gpiod_line *line,
const char *consumer);
int gpiod_line_request_both_edges_events(struct gpiod_line *line,
const char *consumer);
int gpiod_line_request_input_flags(struct gpiod_line *line,
const char *consumer, int flags);
int gpiod_line_request_output_flags(struct gpiod_line *line,
const char *consumer, int flags,
int default_val);
int gpiod_line_request_rising_edge_events_flags(struct gpiod_line *line,
const char *consumer,
int flags);
int gpiod_line_request_falling_edge_events_flags(struct gpiod_line *line,
const char *consumer,
int flags);
int gpiod_line_request_both_edges_events_flags(struct gpiod_line *line,
const char *consumer,
int flags);
void gpiod_line_release(struct gpiod_line *line);
bool gpiod_line_is_requested(struct gpiod_line *line);
bool gpiod_line_is_free(struct gpiod_line *line);
int gpiod_line_get_value(struct gpiod_line *line);
int gpiod_line_set_value(struct gpiod_line *line, int value);
int gpiod_line_set_value_bulk(struct gpiod_line_bulk *bulk,
const int *values);
int gpiod_line_event_wait(struct gpiod_line *line,
const struct timespec *timeout);
int gpiod_line_event_read(struct gpiod_line *line,
struct gpiod_line_event *event);
int gpiod_line_event_get_fd(struct gpiod_line *line);
int gpiod_line_event_read_fd(int fd, struct gpiod_line_event *event);
struct gpiod_line *
gpiod_line_get(const char *device, unsigned int offset);
struct gpiod_line *gpiod_line_find(const char *name);
void gpiod_line_close_chip(struct gpiod_line *line);
struct gpiod_chip *gpiod_line_get_chip(struct gpiod_line *line);
struct gpiod_chip_iter *gpiod_chip_iter_new(void);
void gpiod_chip_iter_free(struct gpiod_chip_iter *iter);
void gpiod_chip_iter_free_noclose(struct gpiod_chip_iter *iter);
struct gpiod_chip *
gpiod_chip_iter_next(struct gpiod_chip_iter *iter);
struct gpiod_chip *
gpiod_chip_iter_next_noclose(struct gpiod_chip_iter *iter);
struct gpiod_line_iter *
gpiod_line_iter_new(struct gpiod_chip *chip);
void gpiod_line_iter_free(struct gpiod_line_iter *iter);
struct gpiod_line *
gpiod_line_iter_next(struct gpiod_line_iter *iter);
const char *gpiod_version_string(void);
"""
)
try:
lib = ffi.dlopen("libgpiod.so.2")
except OSError:
lib = ffi.dlopen("c") # workaround if we're only building docs
DIRECTION_INPUT = lib.GPIOD_LINE_REQUEST_DIRECTION_INPUT
DIRECTION_OUTPUT = lib.GPIOD_LINE_REQUEST_DIRECTION_OUTPUT
ACTIVE_STATE_HIGH = lib.GPIOD_LINE_ACTIVE_STATE_HIGH
ACTIVE_STATE_LOW = lib.GPIOD_LINE_ACTIVE_STATE_LOW
REQUEST_DIRECTION_AS_IS = lib.GPIOD_LINE_REQUEST_DIRECTION_AS_IS
REQUEST_DIRECTION_INPUT = lib.GPIOD_LINE_REQUEST_DIRECTION_INPUT
REQUEST_DIRECTION_OUTPUT = lib.GPIOD_LINE_REQUEST_DIRECTION_OUTPUT
REQUEST_EVENT_FALLING_EDGE = lib.GPIOD_LINE_REQUEST_EVENT_FALLING_EDGE
REQUEST_EVENT_RISING_EDGE = lib.GPIOD_LINE_REQUEST_EVENT_RISING_EDGE
REQUEST_EVENT_BOTH_EDGES = lib.GPIOD_LINE_REQUEST_EVENT_BOTH_EDGES
REQUEST_FLAG_OPEN_DRAIN = lib.GPIOD_LINE_REQUEST_FLAG_OPEN_DRAIN
REQUEST_FLAG_OPEN_SOURCE = lib.GPIOD_LINE_REQUEST_FLAG_OPEN_SOURCE
REQUEST_FLAG_ACTIVE_LOW = lib.GPIOD_LINE_REQUEST_FLAG_ACTIVE_LOW
EVENT_RISING_EDGE = lib.GPIOD_LINE_EVENT_RISING_EDGE
EVENT_FALLING_EDGE = lib.GPIOD_LINE_EVENT_FALLING_EDGE
|
from streamlink.plugin.api.validate._exception import ValidationError # noqa: F401
# noinspection PyPep8Naming,PyShadowingBuiltins
from streamlink.plugin.api.validate._schemas import ( # noqa: I101, F401
SchemaContainer,
AllSchema as all,
AnySchema as any,
TransformSchema as transform,
OptionalSchema as optional,
GetItemSchema as get,
AttrSchema as attr,
UnionSchema as union,
UnionGetSchema as union_get,
XmlElementSchema as xml_element,
)
from streamlink.plugin.api.validate._validate import ( # noqa: F401
Schema,
validate,
)
# noinspection PyShadowingBuiltins
from streamlink.plugin.api.validate._validators import ( # noqa: I101, F401
validator_length as length,
validator_startswith as startswith,
validator_endswith as endswith,
validator_contains as contains,
validator_url as url,
validator_getattr as getattr,
validator_hasattr as hasattr,
validator_filter as filter,
validator_map as map,
validator_xml_find as xml_find,
validator_xml_findall as xml_findall,
validator_xml_findtext as xml_findtext,
validator_xml_xpath as xml_xpath,
validator_xml_xpath_string as xml_xpath_string,
validator_parse_json as parse_json,
validator_parse_html as parse_html,
validator_parse_xml as parse_xml,
validator_parse_qsd as parse_qsd,
)
text = str
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
estimate_gradient_norm.py
A multithreaded gradient norm sampler
Copyright (C) 2017-2018, IBM Corp.
Copyright (C) 2017, Lily Weng <[email protected]>
and Huan Zhang <[email protected]>
This program is licenced under the Apache 2.0 licence,
contained in the LICENCE file in this directory.
"""
from __future__ import division
import numpy as np
import random
import ctypes
import time
import sys
import os
import tensorflow as tf
from multiprocessing import Pool, current_process, cpu_count
from shmemarray import ShmemRawArray, NpShmemArray
from functools import partial
from randsphere import randsphere
from tensorflow.python.ops import gradients_impl
class EstimateLipschitz(object):
def __init__(self, sess, seed = 1215, nthreads = 0):
"""
sess: tensorflow session
Nsamp: number of samples to take per iteration
Niters: number of iterations, each iteration we return a max L
"""
self.sess = sess
self.seed = seed
# create a pool of workers to compute samples in advance
if nthreads == 0:
self.n_processes = max(cpu_count() // 2, 1)
else:
self.n_processes = nthreads
# set up random seed during initialization
def initializer(s):
np.random.seed(s + current_process()._identity[0])
# using only 1 OpenMP thread
os.environ['OMP_NUM_THREADS'] = "1"
self.pool = Pool(processes = self.n_processes, initializer = initializer, initargs=(self.seed,))
def load_model(self, dataset = "cifar", model_name = "vgg11", activation = "relu", model = None, batch_size = 0, compute_slope = False, order = 1, de=False, attack='fgsm', epoch=49):
"""
model: if set to None, then load dataset with model_name. Otherwise use the model directly.
dataset: mnist, cifar and imagenet. recommend to use mnist and cifar as a starting point.
model_name: possible options are 2-layer, distilled, and normal
"""
from setup_cifar import CIFAR, CIFARModel, TwoLayerCIFARModel
from setup_mnist import MNIST, MNISTModel, TwoLayerMNISTModel
from nlayer_model import NLayerModel
from setup_imagenet import ImageNet, ImageNetModel
# if set this to true, we will use the logit layer output instead of probability
# the logit layer's gradients are usually larger and more stable
output_logits = True
self.dataset = dataset
self.model_name = model_name
if model is None:
print('Loading model...')
if dataset == "mnist":
self.batch_size = 1024
model = MNISTModel(model_name, self.sess, not output_logits, de=de, attack=attack, epoch=epoch)
elif dataset == "cifar":
self.batch_size = 1024
model = CIFARModel(model_name, self.sess, not output_logits, de=de, attack=attack, epoch=epoch)
elif dataset == "imagenet":
self.batch_size = 32
model = ImageNetModel(self.sess, use_softmax = not output_logits, model_name = model_name, create_prediction = False)
else:
raise(RuntimeError("dataset unknown"))
#print("*** Loaded model successfully")
self.model = model
self.compute_slope = compute_slope
if batch_size != 0:
self.batch_size = batch_size
## placeholders: self.img, self.true_label, self.target_label
# img is the placeholder for image input
self.img = tf.placeholder(shape = [None, model.image_size, model.image_size, model.num_channels], dtype = tf.float32)
# output is the output tensor of the entire network
self.output = model.predict(self.img)
# create the graph to compute gradient
# get the desired true label and target label
self.true_label = tf.placeholder(dtype = tf.int32, shape = [])
self.target_label = tf.placeholder(dtype = tf.int32, shape = [])
true_output = self.output[:, self.true_label]
target_output = self.output[:, self.target_label]
# get the difference
self.objective = true_output - target_output
# get the gradient(deprecated arguments)
self.grad_op = tf.gradients(self.objective, self.img)[0]
# compute gradient norm: (in computation graph, so is faster)
grad_op_rs = tf.reshape(self.grad_op, (tf.shape(self.grad_op)[0], -1))
self.grad_2_norm_op = tf.norm(grad_op_rs, axis = 1)
self.grad_1_norm_op = tf.norm(grad_op_rs, ord=1, axis = 1)
self.grad_inf_norm_op = tf.norm(grad_op_rs, ord=np.inf, axis = 1)
### Lily: added Hessian-vector product calculation here for 2nd order bound:
if order == 2:
## _hessian_vector_product(ys, xs, v): return a list of tensors containing the product between the Hessian and v
## ys: a scalar valur or a tensor or a list of tensors to be summed to yield of scalar
## xs: a list of tensors that we should construct the Hessian over
## v: a list of tensors with the same shape as xs that we want to multiply by the Hessian
# self.randv: shape = (Nimg,28,28,1) (the v in _hessian_vector_product)
self.randv = tf.placeholder(shape = [None, model.image_size, model.image_size, model.num_channels], dtype = tf.float32)
# hv_op_tmp: shape = (Nimg,28,28,1) for mnist, same as self.img (the xs in _hessian_vector_product)
hv_op_tmp = gradients_impl._hessian_vector_product(self.objective, [self.img], [self.randv])[0]
# hv_op_rs: reshape hv_op_tmp to hv_op_rs whose shape = (Nimg, 784) for mnist
hv_op_rs = tf.reshape(hv_op_tmp, (tf.shape(hv_op_tmp)[0],-1))
# self.hv_norm_op: norm of hessian vector product, keep shape = (Nimg,1) using keepdims
self.hv_norm_op = tf.norm(hv_op_rs, axis = 1, keepdims=True)
# hv_op_rs_normalize: normalize Hv to Hv/||Hv||, shape = (Nimg, 784)
hv_op_rs_normalize = hv_op_rs/self.hv_norm_op
# self.hv_op: reshape hv_op_rs_normalize to shape = (Nimg,28,28,1)
self.hv_op = tf.reshape(hv_op_rs_normalize, tf.shape(hv_op_tmp))
## reshape randv and compute its norm
# shape: (Nimg, 784)
randv_rs = tf.reshape(self.randv, (tf.shape(self.randv)[0],-1))
# shape: (Nimg,)
self.randv_norm_op = tf.norm(randv_rs, axis = 1)
## compute v'Hv: use un-normalized Hv (hv_op_tmp, hv_op_rs)
# element-wise multiplication and then sum over axis = 1 (now shape: (Nimg,))
self.vhv_op = tf.reduce_sum(tf.multiply(randv_rs,hv_op_rs),axis=1)
## compute Rayleigh quotient: v'Hv/v'v (estimated largest eigenvalue), shape: (Nimg,)
# note: self.vhv_op and self.randv_norm_op has to be in the same dimension (either (Nimg,) or (Nimg,1))
self.eig_est = self.vhv_op/tf.square(self.randv_norm_op)
## Lily added the tf.while to compute the eigenvalue in computational graph later
# cond for computing largest abs/neg eigen-value
def cond(it, randv, eig_est, eig_est_prev, tfconst):
norm_diff = tf.norm(eig_est-eig_est_prev,axis=0)
return tf.logical_and(it < 500, norm_diff > 0.001)
# compute largest abs eigenvalue: tfconst = 0
# compute largest neg eigenvalue: tfconst = 10
def body(it, randv, eig_est, eig_est_prev, tfconst):
#hv_op_tmp = gradients_impl._hessian_vector_product(self.objective, [self.img], [randv])[0]-10*randv
hv_op_tmp = gradients_impl._hessian_vector_product(self.objective, [self.img], [randv])[0]-tf.multiply(tfconst,randv)
hv_op_rs = tf.reshape(hv_op_tmp, (tf.shape(hv_op_tmp)[0],-1))
hv_norm_op = tf.norm(hv_op_rs, axis = 1, keepdims=True)
hv_op_rs_normalize = hv_op_rs/hv_norm_op
hv_op = tf.reshape(hv_op_rs_normalize, tf.shape(hv_op_tmp))
randv_rs = tf.reshape(randv, (tf.shape(randv)[0],-1))
randv_norm_op = tf.norm(randv_rs, axis = 1)
vhv_op = tf.reduce_sum(tf.multiply(randv_rs,hv_op_rs),axis=1)
eig_est_prev = eig_est
eig_est = vhv_op/tf.square(randv_norm_op)
return (it+1, hv_op, eig_est, eig_est_prev, tfconst)
it = tf.constant(0)
# compute largest abs eigenvalue
result = tf.while_loop(cond, body, [it, self.randv, self.vhv_op, self.eig_est, tf.constant(0.0)])
# compute largest neg eigenvalue
self.shiftconst = tf.placeholder(shape = (), dtype = tf.float32)
result_1 = tf.while_loop(cond, body, [it, self.randv, self.vhv_op, self.eig_est, self.shiftconst])
# computing largest abs eig value and save result
self.it = result[0]
self.while_hv_op = result[1]
self.while_eig = result[2]
# computing largest neg eig value and save result
self.it_1 = result_1[0]
#self.while_eig_1 = tf.add(result_1[2], tfconst)
self.while_eig_1 = tf.add(result_1[2], result_1[4])
show_tensor_op = False
if show_tensor_op:
print("====================")
print("Define hessian_vector_product operator: ")
print("hv_op_tmp = {}".format(hv_op_tmp))
print("hv_op_rs = {}".format(hv_op_rs))
print("self.hv_norm_op = {}".format(self.hv_norm_op))
print("hv_op_rs_normalize = {}".format(hv_op_rs_normalize))
print("self.hv_op = {}".format(self.hv_op))
print("self.grad_op = {}".format(self.grad_op))
print("randv_rs = {}".format(randv_rs))
print("self.randv_norm_op = {}".format(self.randv_norm_op))
print("self.vhv_op = {}".format(self.vhv_op))
print("self.eig_est = {}".format(self.eig_est))
print("====================")
return self.img, self.output
def _estimate_Lipschitz_multiplerun(self, num, niters, input_image, target_label, true_label, sample_norm = "l2", transform=None, order = 1):
"""
num: number of samples per iteration
niters: number of iterations
input_image: original image (h*w*c)
"""
batch_size = self.batch_size
shape = (batch_size, self.model.image_size, self.model.image_size, self.model.num_channels)
dimension = self.model.image_size * self.model.image_size * self.model.num_channels
if num < batch_size:
print("Increasing num to", batch_size)
num = batch_size
"""
1. Compute input_image related quantities:
"""
# get the original prediction and gradient, gradient norms values on input image:
pred, grad_val, grad_2_norm_val, grad_1_norm_val, grad_inf_norm_val = self.sess.run(
[self.output, self.grad_op, self.grad_2_norm_op, self.grad_1_norm_op, self.grad_inf_norm_op],
feed_dict = {self.img: [input_image], self.true_label: true_label, self.target_label: target_label})
pred = np.squeeze(pred)
# print(pred)
# print(grad_val)
# class c and class j in Hein's paper. c is original class
c = true_label
j = target_label
# get g_x0 = f_c(x_0) - f_j(x_0)
g_x0 = pred[c] - pred[j]
# grad_z_norm should be scalar
g_x0_grad_2_norm = np.squeeze(grad_2_norm_val)
g_x0_grad_1_norm = np.squeeze(grad_1_norm_val)
g_x0_grad_inf_norm = np.squeeze(grad_inf_norm_val)
print("** Evaluating g_x0, grad_2_norm_val on the input image x0: ")
print("shape of input_image = {}".format(input_image.shape))
print("g_x0 = {:.3f}, grad_2_norm_val = {:3f}, grad_1_norm_val = {:.3f}, grad_inf_norm_val = {:3f}".format(g_x0, g_x0_grad_2_norm, g_x0_grad_1_norm, g_x0_grad_inf_norm))
##### Lily #####
if order == 2: # evaluate the hv and hv norm on input_image
# set randv as a random matrix with the same shape as input_image
print("** Evaluating hv and hv_norm on the input image x0:")
randv = np.random.randn(*input_image.shape)
hv, hv_norm = self.sess.run([self.hv_op, self.hv_norm_op],
feed_dict = {self.img: [input_image], self.randv:[randv], self.true_label: true_label, self.target_label: target_label})
print("hv shape = {}, hv_norm = {}".format(hv.shape, hv_norm))
"""
2. Prepare for sampling:
"""
def div_work_to_cores(njobs, nprocs):
process_item_list = []
while njobs > 0:
process_item_list.append(int(np.ceil(njobs / float(nprocs))))
njobs -= process_item_list[-1]
nprocs -= 1
return process_item_list
# n is the dimension
if self.dataset == "imagenet":
# for imagenet, generate random samples for this batch only
# array in shared memory storing results of all threads
total_item_size = batch_size
else:
# for cifar and mnist, generate random samples for this entire iteration
total_item_size = num
# divide the jobs evenly to all available threads
process_item_list = div_work_to_cores(total_item_size, self.n_processes)
self.n_processes = len(process_item_list)
# select random sample generation function
if sample_norm == "l2":
# the scaling constant in [a,b]: scale the L2 norm of each sample (has originally norm ~1)
a = 0; b = 3;
elif sample_norm == "li":
# for Linf we don't need the scaling
a = 0.1; b = 0.1;
elif sample_norm == "l1":
# TODO: make the sample ball radius adjustable
a = 0; b = 30;
else:
raise RuntimeError("Unknown sample_norm " + sample_norm)
print('Using sphere', sample_norm)
## create necessary shared array structures (saved in /dev/shm) and will be used (and written) in randsphere.py:
# result_arr, scale, input_example, all_inputs
# note: need to use scale[:] = ... not scale = ..., o.w. the contents will not be saved to the shared array
# inputs_0 is the image x_0
inputs_0 = np.array(input_image)
tag_prefix = str(os.getpid()) + "_"
result_arr = NpShmemArray(np.float32, (total_item_size, dimension), tag_prefix + "randsphere")
# we have an extra batch_size to avoid overflow
scale = NpShmemArray(np.float32, (num+batch_size), tag_prefix + "scale")
scale[:] = (b-a)*np.random.rand(num+batch_size)+a;
input_example = NpShmemArray(np.float32, inputs_0.shape, tag_prefix + "input_example")
# this is a read-only array
input_example[:] = inputs_0
# all_inputs is a shared memeory array and will be written in the randsphere to save the samples
# all_inputs holds the perturbations for one batch or all samples
all_inputs = NpShmemArray(np.float32, (total_item_size,) + inputs_0.shape, tag_prefix + "all_inputs")
# holds the results copied from all_inputs
clipped_all_inputs = np.empty(dtype=np.float32, shape = (total_item_size,) + inputs_0.shape)
# prepare the argument list
offset_list = [0]
for item in process_item_list[:-1]:
offset_list.append(offset_list[-1] + item)
print(self.n_processes, "threads launched with parameter", process_item_list, offset_list)
## create multiple process to generate samples
# randsphere: generate samples (see randsphere.py); partial is a function similar to lambda, now worker_func is a function of idx only
worker_func = partial(randsphere, n = dimension, input_shape = inputs_0.shape, total_size = total_item_size, scale_size = num+batch_size, tag_prefix = tag_prefix, r = 1.0, norm = sample_norm, transform = transform)
worker_args = list(zip(process_item_list, offset_list, [0] * self.n_processes))
# sample_results is an object to monitor if the process has ended (meaning finish generating samples in randsphere.py)
# this line of code will initiate the worker_func to start working (like initiate the job)
sample_results = self.pool.map_async(worker_func, worker_args)
# num: # of samples to be run, \leq samples.shape[0]
# number of iterations
Niters = niters;
if order == 1:
# store the max L in each iteration
L2_max = np.zeros(Niters)
L1_max = np.zeros(Niters)
Li_max = np.zeros(Niters)
# store the max G in each iteration
G2_max = np.zeros(Niters)
G1_max = np.zeros(Niters)
Gi_max = np.zeros(Niters)
# store computed Lispschitz constants in each iteration
L2 = np.zeros(num)
L1 = np.zeros(num)
Li = np.zeros(num)
# store computed gradient norm in each iteration
G2 = np.zeros(num)
G1 = np.zeros(num)
Gi = np.zeros(num)
elif order == 2:
# store the max H in each iteration
H2_max = np.zeros(Niters)
# store computed 2 norm of H in each iteration
H2 = np.zeros(num)
H2_neg = np.zeros(num)
# how many batches we have
Nbatches = num // batch_size
# timer
search_begin_time = time.time()
"""
3. Start performing sampling:
"""
## Start
# multiple runs: generating the samples
## use worker_func to generate x samples, and then use sess.run to evaluate the gradient norm operator
for iters in range(Niters):
iter_begin_time = time.time()
# shuffled index
# idx_shuffle = np.random.permutation(num);
# the scaling constant in [a,b]: scale the L2 norm of each sample (has originally norm ~1)
scale[:] = (b-a)*np.random.rand(num+batch_size)+a;
# number of L's we have computed
L_counter = 0
G_counter = 0
H_counter = 0
overhead_time = 0.0
overhead_start = time.time()
# for cifar and mnist, generate all the random input samples (x in the paper) at once
# for imagenet, generate one batch of input samples (x in the paper) for each iteration
if self.dataset != "imagenet":
# get samples for this iteration: make sure randsphere finished computing samples and stored in all_inputs
# if the samples have not yet done generating, then this line will block the codes until the processes are done, then it will return
sample_results.get()
# copy the results to a buffer and do clipping
np.clip(all_inputs, -0.5, 0.5, out = clipped_all_inputs)
# create multiple process again to generate samples for next batch (initiate a new job) because in below we will need to do sess.run in GPU which might be slow. So we want to generate samples on CPU while running sess.run on GPU to save time
sample_results = self.pool.map_async(worker_func, worker_args)
overhead_time += time.time() - overhead_start
## generate input samples "batch_inputs" and compute corresponding gradient norms samples "perturbed_grad_x_norm"
for i in range(Nbatches):
overhead_start = time.time()
# for imagenet, generate random samples for this batch only
if self.dataset == "imagenet":
# get samples for this batch
sample_results.get()
# copy the results to a buffer and do clipping
np.clip(all_inputs, -0.5, 0.5, out = clipped_all_inputs)
# create multiple threads to generate samples for next batch
worker_args = zip(process_item_list, offset_list, [(i + 1) * batch_size] * self.n_processes)
sample_results = self.pool.map_async(worker_func, worker_args)
if self.dataset == "imagenet":
# we generate samples for each batch at a time
batch_inputs = clipped_all_inputs
else:
# we generate samples for all batches
batch_inputs = clipped_all_inputs[i * batch_size: (i + 1) * batch_size]
# print(result_arr.shape, result_arr)
# print('------------------------')
# print(batch_inputs.shape, batch_inputs.reshape(result_arr.shape))
# print('------------------------')
overhead_time += time.time() - overhead_start
if order == 1:
# run inference and get the gradient
perturbed_predicts, perturbed_grad_2_norm, perturbed_grad_1_norm, perturbed_grad_inf_norm = self.sess.run(
[self.output, self.grad_2_norm_op, self.grad_1_norm_op, self.grad_inf_norm_op],
feed_dict = {self.img: batch_inputs, self.target_label: target_label, self.true_label: true_label})
if self.compute_slope:
# compute distance between consecutive samples: not use sequential samples
s12_2_norm = np.linalg.norm(s[0:batch_size-1:2] - s[1:batch_size:2], axis = 1)
s12_1_norm = np.linalg.norm(s[0:batch_size-1:2] - s[1:batch_size:2], ord=1, axis = 1)
s12_i_norm = np.linalg.norm(s[0:batch_size-1:2] - s[1:batch_size:2], ord=np.inf, axis = 1)
# compute function value differences: not use sequential samples
g_x1 = perturbed_predicts[0:batch_size-1:2, c] - perturbed_predicts[0:batch_size-1:2, j]
g_x2 = perturbed_predicts[1:batch_size:2, c] - perturbed_predicts[1:batch_size:2, j]
# estimated Lipschitz constants for this batch
# for slope estimate, we need the DUAL norm
batch_L2 = np.abs(g_x1 - g_x2) / s12_2_norm
batch_L1 = np.abs(g_x1 - g_x2) / s12_i_norm
batch_Li = np.abs(g_x1 - g_x2) / s12_1_norm
L2[L_counter : L_counter + batch_size//2] = batch_L2
L1[L_counter : L_counter + batch_size//2] = batch_L1
Li[L_counter : L_counter + batch_size//2] = batch_Li
G2[G_counter : G_counter + batch_size] = perturbed_grad_2_norm
G1[G_counter : G_counter + batch_size] = perturbed_grad_1_norm
Gi[G_counter : G_counter + batch_size] = perturbed_grad_inf_norm
L_counter += (batch_size//2)
G_counter += batch_size
elif order == 2:
##### Lily #####
randv_batch = np.random.randn(*batch_inputs.shape)
perturbed_hv, perturbed_hv_norm = self.sess.run([self.hv_op, self.hv_norm_op],
feed_dict = {self.img: batch_inputs, self.randv: randv_batch,
self.true_label: true_label, self.target_label: target_label})
show_tensor_dim = False
if show_tensor_dim:
print("====================")
print("** Evaluating perturbed_hv and perturbed_hv_norm in batch {}: ".format(iters))
print("pertubed_hv_prod shape = {}".format(perturbed_hv.shape))
print("randv_batch shape = {}".format(randv_batch.shape))
print("perturbed_hv_norm = {}".format(perturbed_hv_norm[:,0])) # size: (Nimg, 1)
print("perturbed_hv_norm shape = {}".format(perturbed_hv_norm.shape))
#print("perturbed_grad_2_norm= {}".format(perturbed_grad_2_norm))
#print("perturbed_grad_2_norm shape = {}".format(perturbed_grad_2_norm.shape))
pt_hvs = []
pt_hvs.append(perturbed_hv+0*randv_batch)
#print("************** Using tf.while_loop:********************")
# compute max eigenvalue
temp_hv, temp_eig, niter_eig = self.sess.run([self.while_hv_op, self.while_eig, self.it], feed_dict = {self.img: batch_inputs, self.randv: randv_batch, self.true_label: true_label, self.target_label: target_label})
##print("converge in {} steps, temp_eig = {}".format(niter_eig, temp_eig))
# if max eigenvalue is positive, compute the max neg eigenvalue by using the shiftconst
if max(temp_eig) > 0:
shiftconst = max(temp_eig)
temp_eig_1, niter_eig_1 = self.sess.run([self.while_eig_1, self.it_1], feed_dict = {self.img: batch_inputs, self.randv: randv_batch, self.true_label: true_label, self.target_label: target_label, self.shiftconst: shiftconst})
##print("converge in {} steps, temp_eig_1 = {}".format(niter_eig_1, temp_eig_1))
else:
temp_eig_1 = temp_eig
niter_eig_1 = -1
print("temp_eig (abs) converge in {} steps, temp_eig_1 (neg) converge in {} steps".format(niter_eig, niter_eig_1))
## use outer while_loop
#max_eig_iters = 10
#print_flag = True
#final_est_eig_1 = self._compute_max_abseig(pt_hvs, batch_inputs, true_label, target_label, max_eig_iters, print_flag)
#print("************** Using outer while_loop:********************")
#print("outer loop final_est_eig_1 = {}".format(final_est_eig_1))
## use tf while_loop
final_est_eig = temp_eig
final_est_eig_neg = temp_eig_1
H2[H_counter : H_counter + batch_size] = final_est_eig
H2_neg[H_counter : H_counter + batch_size] = final_est_eig_neg
H_counter += batch_size
if order == 1:
# at the end of each iteration: get the per-iteration max gradient norm
if self.compute_slope:
L2_max[iters] = np.max(L2)
L1_max[iters] = np.max(L1)
Li_max[iters] = np.max(Li)
G2_max[iters] = np.max(G2)
G1_max[iters] = np.max(G1)
Gi_max[iters] = np.max(Gi)
if self.compute_slope:
print('[STATS][L2] loop = {}, time = {:.5g}, iter_time = {:.5g}, overhead = {:.5g}, L2 = {:.5g}, L1 = {:.5g}, Linf = {:.5g}, G2 = {:.5g}, G1 = {:.5g}, Ginf = {:.5g}'.format(iters, time.time() - search_begin_time, time.time() - iter_begin_time, overhead_time, L2_max[iters], L1_max[iters], Li_max[iters], G2_max[iters], G1_max[iters], Gi_max[iters]))
else:
print('[STATS][L2] loop = {}, time = {:.5g}, iter_time = {:.5g}, overhead = {:.5g}, G2 = {:.5g}, G1 = {:.5g}, Ginf = {:.5g}'.format(iters, time.time() - search_begin_time, time.time() - iter_begin_time, overhead_time, G2_max[iters], G1_max[iters], Gi_max[iters]))
sys.stdout.flush()
# reset per iteration L and G by filling 0
if self.compute_slope:
L2.fill(0)
L1.fill(0)
Li.fill(0)
G2.fill(0)
G1.fill(0)
Gi.fill(0)
elif order == 2:
## consider -lambda_min
idx = H2 > 0
H2[idx] = H2_neg[idx]
idx_max = np.argmax(abs(H2))
H2_max[iters] = H2[idx_max]
print('[STATS][L2] loop = {}, time = {:.5g}, iter_time = {:.5g}, overhead = {:.5g}, H2 = {:.5g}'.format(iters, time.time() - search_begin_time, time.time() - iter_begin_time, overhead_time, H2_max[iters]))
if order == 1:
print('[STATS][L1] g_x0 = {:.5g}, L2_max = {:.5g}, L1_max = {:.5g}, Linf_max = {:.5g}, G2_max = {:.5g}, G1_max = {:.5g}, Ginf_max = {:.5g}'.format(
g_x0, np.max(L2_max), np.max(L1_max), np.max(Li_max), np.max(G2_max), np.max(G1_max), np.max(Gi_max)))
# when compute the bound we need the DUAL norm
if self.compute_slope:
print('[STATS][L1] bnd_L2_max = {:.5g}, bnd_L1_max = {:.5g}, bnd_Linf_max = {:.5g}, bnd_G2_max = {:.5g}, bnd_G1_max = {:.5g}, bnd_Ginf_max = {:.5g}'.format(g_x0/np.max(L2_max), g_x0/np.max(Li_max), g_x0/np.max(L1_max), g_x0/np.max(G2_max), g_x0/np.max(Gi_max), g_x0/np.max(G1_max)))
else:
print('[STATS][L1] bnd_G2_max = {:.5g}, bnd_G1_max = {:.5g}, bnd_Ginf_max = {:.5g}'.format(g_x0/np.max(G2_max), g_x0/np.max(Gi_max), g_x0/np.max(G1_max)))
sys.stdout.flush()
# discard the last batch of samples
sample_results.get()
return [L2_max,L1_max,Li_max,G2_max,G1_max,Gi_max,g_x0,pred]
elif order == 2:
# find positive eig value and substitute with its corresponding negative eig value, then we only need to sort once
#print("H2_max = {}".format(H2_max))
# find max abs(H2_max)
H2_max_val = max(abs(H2_max))
print('[STATS][L1] g_x0 = {:.5g}, g_x0_grad_2_norm = {:.5g}, g_x0_grad_1_norm = {:.5g}, g_x0_grad_inf_norm = {:.5g}, H2_max = {:.5g}'.format(g_x0, g_x0_grad_2_norm, g_x0_grad_1_norm, g_x0_grad_inf_norm, H2_max_val))
bnd = (-g_x0_grad_2_norm + np.sqrt(g_x0_grad_2_norm**2+2*g_x0*H2_max_val))/H2_max_val
print('[STATS][L1] bnd_H2_max = {:.5g}'.format(bnd))
sys.stdout.flush()
sample_results.get()
return [H2_max, g_x0, g_x0_grad_2_norm, g_x0_grad_1_norm, g_x0_grad_inf_norm, pred]
def _compute_max_abseig(self, pt_hvs, batch_inputs, true_label, target_label, max_eig_iters, print_flag):
## compute hv and est_eig:
i = 0
cond = False
pt_eigs = []
print("pt_hvs[0] shape = {}".format(pt_hvs[0].shape))
# perform power iteration loop outside tensorflow
while (i<max_eig_iters and cond==False):
tmp_hv, tmp_hv_norm, tmp_vhv, tmp_vnorm, tmp_est_eig = self.sess.run([self.hv_op, self.hv_norm_op, self.vhv_op, self.randv_norm_op, self.eig_est], feed_dict = {self.img: batch_inputs, self.randv: pt_hvs[i], self.true_label: true_label, self.target_label: target_label})
tmp_vhv = np.squeeze(tmp_vhv)
tmp_vnorm = np.squeeze(tmp_vnorm)
tmp_est_eig = np.squeeze(tmp_est_eig)
if print_flag:
#print("current step = {}, norm = {}".format(i, tmp_hv_norm[:,0]))
#print("current step = {}, pt_hv_prod.shape = {}, pt_hvs_norm.shape = {}".format(i,tmp_hv.shape, tmp_hv_norm.shape))
print("current step = {}, est_eig = {}".format(i,tmp_est_eig-0))
#print("current step = {}, vhv = {}".format(i,tmp_vhv))
#print("current step = {}, vnorm (check: should be 1) = {}".format(i,tmp_vnorm))
pt_hvs.append(tmp_hv+0*pt_hvs[i])
pt_eigs.append(tmp_est_eig)
# conditions
if i > 0:
cond_element = abs(tmp_est_eig-pt_eigs[i-1]) < 1e-3
if print_flag:
print("cond = {}".format(cond_element))
cond = cond_element.all()
i+=1
if i == max_eig_iters:
print("==== Reach max iterations!!! ====")
return pt_eigs[-1]
def __del__(self):
# terminate the pool
self.pool.terminate()
def estimate(self, x_0, true_label, target_label, Nsamp, Niters, sample_norm, transform, order):
result = self._estimate_Lipschitz_multiplerun(Nsamp,Niters,x_0,target_label,true_label,sample_norm, transform, order)
return result
|
import dash
import dash_html_components as html
import dash_core_components as dcc
import plotly.express as px
from dash.dependencies import Input, Output
from eda import calculate_basic_statistics
def show_dashboard(data):
app = dash.Dash('Product Reviews')
nr_of_all_reviews = calculate_basic_statistics(data)
text_style = dict(color='#444', fontFamily='sans-serif', fontWeight=300)
plotly_figure = px.histogram(data, x="Age", title='Number of reviews per Age:')
rating_pie = px.pie(data, values=data['Rating'].value_counts().values, names=data['Rating'].value_counts().index,
title='Rating')
class_name_nr_reviews = px.histogram(data, x="Class Name", title='Number of reviews per Class Name:')
app.layout = html.Div([
html.H2('Product Reviews', style=text_style),
html.P('Number of analyzed reviews: ' + str(nr_of_all_reviews), style=text_style),
dcc.Graph(id='plot1', figure=plotly_figure),
dcc.Dropdown(
id='names',
value=data['Class Name'].value_counts().index,
options=[{'value': x, 'label': x}
for x in set(data['Class Name'])],
clearable=False
),
dcc.Graph(id='plot2', figure=rating_pie),
dcc.Graph(id='plot3', figure=class_name_nr_reviews)
])
@app.callback(
Output("plot2", "figure"),
Input("names", "value"))
def generate_chart(names):
fig = px.pie(data, values=data[data['Class Name'] == names]['Rating'].value_counts().values,
names=data[data['Class Name'] == names]['Rating'].value_counts().index,
title='Rating')
return fig
app.server.run(debug=True)
|
"""
Copyright (c) Microsoft Corporation.
Licensed under the MIT license.
Pytorch modules
some classes are modified from HuggingFace
(https://github.com/huggingface/transformers)
"""
import copy
import json
import logging
from io import open
import torch
from torch import nn
from apex.normalization.fused_layer_norm import FusedLayerNorm
from transformers import AutoTokenizer
from .layer import BertLayer, BertPooler, BertOnlyMLMHead
logger = logging.getLogger(__name__)
def mixup(batch, mix_indices, lamb=0.5):
mix_input = batch.clone()
mix_input = lamb*mix_input + (1-lamb)*torch.index_select(batch, 0, mix_indices)
return torch.cat((batch, mix_input))
def concat(batch, mix_indices):
mix_input = batch.clone()
mix_input = torch.cat((mix_input, torch.index_select(batch, 0, mix_indices)), dim=1)
original_input = torch.cat((batch.clone(), batch.clone()), dim=1)
return original_input, mix_input
class UniterConfig(object):
"""Configuration class to store the configuration of a `UniterModel`.
"""
def __init__(self,
vocab_size_or_config_json_file,
hidden_size=768,
num_hidden_layers=12,
num_attention_heads=12,
intermediate_size=3072,
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=512,
type_vocab_size=2,
initializer_range=0.02):
"""Constructs UniterConfig.
Args:
vocab_size_or_config_json_file: Vocabulary size of `inputs_ids` in
`UniterModel`.
hidden_size: Size of the encoder layers and the pooler layer.
num_hidden_layers: Number of hidden layers in the Transformer
encoder.
num_attention_heads: Number of attention heads for each attention
layer in the Transformer encoder.
intermediate_size: The size of the "intermediate" (i.e.
feed-forward) layer in the Transformer encoder.
hidden_act: The non-linear activation function (function or string)
in the encoder and pooler. If string, "gelu", "relu" and
"swish" are supported.
hidden_dropout_prob: The dropout probabilitiy for all fully
connected layers in the embeddings, encoder, and pooler.
attention_probs_dropout_prob: The dropout ratio for the attention
probabilities.
max_position_embeddings: The maximum sequence length that this
model might ever be used with. Typically set this to something
large just in case (e.g., 512 or 1024 or 2048).
type_vocab_size: The vocabulary size of the `token_type_ids` passed
into `UniterModel`.
initializer_range: The sttdev of the truncated_normal_initializer
for initializing all weight matrices.
"""
if isinstance(vocab_size_or_config_json_file, str):
with open(vocab_size_or_config_json_file,
"r", encoding='utf-8') as reader:
json_config = json.loads(reader.read())
for key, value in json_config.items():
self.__dict__[key] = value
elif isinstance(vocab_size_or_config_json_file, int):
self.vocab_size = vocab_size_or_config_json_file
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.hidden_act = hidden_act
self.intermediate_size = intermediate_size
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.type_vocab_size = type_vocab_size
self.initializer_range = initializer_range
else:
raise ValueError("First argument must be either a vocabulary size "
"(int) or the path to a pretrained model config "
"file (str)")
@classmethod
def from_dict(cls, json_object):
"""Constructs a `UniterConfig` from a
Python dictionary of parameters."""
config = UniterConfig(vocab_size_or_config_json_file=-1)
for key, value in json_object.items():
config.__dict__[key] = value
return config
@classmethod
def from_json_file(cls, json_file):
"""Constructs a `UniterConfig` from a json file of parameters."""
with open(json_file, "r", encoding='utf-8') as reader:
text = reader.read()
return cls.from_dict(json.loads(text))
def __repr__(self):
return str(self.to_json_string())
def to_dict(self):
"""Serializes this instance to a Python dictionary."""
output = copy.deepcopy(self.__dict__)
return output
def to_json_string(self):
"""Serializes this instance to a JSON string."""
return json.dumps(self.to_dict(), indent=2, sort_keys=True) + "\n"
class UniterPreTrainedModel(nn.Module):
""" An abstract class to handle weights initialization and
a simple interface for dowloading and loading pretrained models.
"""
def __init__(self, config, *inputs, **kwargs):
super().__init__()
if not isinstance(config, UniterConfig):
raise ValueError(
"Parameter config in `{}(config)` should be an instance of "
"class `UniterConfig`. To create a model from a Google "
"pretrained model use "
"`model = {}.from_pretrained(PRETRAINED_MODEL_NAME)`".format(
self.__class__.__name__, self.__class__.__name__
))
self.config = config
def init_weights(self, module):
""" Initialize the weights.
"""
if isinstance(module, (nn.Linear, nn.Embedding)):
# Slightly different from the TF version which uses
# truncated_normal for initialization
# cf https://github.com/pytorch/pytorch/pull/5617
module.weight.data.normal_(mean=0.0,
std=self.config.initializer_range)
elif isinstance(module, FusedLayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
if isinstance(module, nn.Linear) and module.bias is not None:
module.bias.data.zero_()
@classmethod
def from_pretrained(cls, config_file, state_dict, *inputs, **kwargs):
"""
Instantiate a UniterPreTrainedModel from a pre-trained model file or a
pytorch state dict.
Params:
config_file: config json file
state_dict: an state dictionnary
*inputs, **kwargs: additional input for the specific Uniter class
"""
# Load config
config = UniterConfig.from_json_file(config_file)
logger.info("Model config {}".format(config))
# Instantiate model.
model = cls(config, *inputs, **kwargs)
# Load from a PyTorch state_dict
old_keys = []
new_keys = []
for key in state_dict.keys():
new_key = None
if 'gamma' in key:
new_key = key.replace('gamma', 'weight')
if 'beta' in key:
new_key = key.replace('beta', 'bias')
if new_key:
old_keys.append(key)
new_keys.append(new_key)
for old_key, new_key in zip(old_keys, new_keys):
state_dict[new_key] = state_dict.pop(old_key)
missing_keys = []
unexpected_keys = []
error_msgs = []
# copy state_dict so _load_from_state_dict can modify it
metadata = getattr(state_dict, '_metadata', None)
state_dict = state_dict.copy()
if metadata is not None:
state_dict._metadata = metadata
def load(module, prefix=''):
local_metadata = ({} if metadata is None
else metadata.get(prefix[:-1], {}))
module._load_from_state_dict(
state_dict, prefix, local_metadata, True, missing_keys,
unexpected_keys, error_msgs)
for name, child in module._modules.items():
if child is not None:
load(child, prefix + name + '.')
start_prefix = ''
if not hasattr(model, 'bert') and any(s.startswith('bert.')
for s in state_dict.keys()):
start_prefix = 'bert.'
load(model, prefix=start_prefix)
if len(missing_keys) > 0:
logger.info("Weights of {} not initialized from "
"pretrained model: {}".format(
model.__class__.__name__, missing_keys))
if len(unexpected_keys) > 0:
logger.info("Weights from pretrained model not used in "
"{}: {}".format(
model.__class__.__name__, unexpected_keys))
if len(error_msgs) > 0:
raise RuntimeError('Error(s) in loading state_dict for '
'{}:\n\t{}'.format(
model.__class__.__name__,
"\n\t".join(error_msgs)))
return model
class UniterTextEmbeddings(nn.Module):
def __init__(self, config):
super().__init__()
self.word_embeddings = nn.Embedding(config.vocab_size,
config.hidden_size, padding_idx=0)
self.position_embeddings = nn.Embedding(config.max_position_embeddings,
config.hidden_size)
self.token_type_embeddings = nn.Embedding(config.type_vocab_size,
config.hidden_size)
# self.LayerNorm is not snake-cased to stick with TensorFlow model
# variable name and be able to load any TensorFlow checkpoint file
self.LayerNorm = FusedLayerNorm(config.hidden_size, eps=1e-12)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, input_ids, position_ids, token_type_ids=None):
if token_type_ids is None:
token_type_ids = torch.zeros_like(input_ids)
words_embeddings = self.word_embeddings(input_ids)
position_embeddings = self.position_embeddings(position_ids)
token_type_embeddings = self.token_type_embeddings(token_type_ids)
embeddings = (words_embeddings
+ position_embeddings
+ token_type_embeddings)
embeddings = self.LayerNorm(embeddings)
embeddings = self.dropout(embeddings)
return embeddings
class UniterImageEmbeddings(nn.Module):
def __init__(self, config, img_dim):
super().__init__()
self.img_linear = nn.Linear(img_dim, config.hidden_size)
self.img_layer_norm = FusedLayerNorm(config.hidden_size, eps=1e-12)
self.pos_layer_norm = FusedLayerNorm(config.hidden_size, eps=1e-12)
self.pos_linear = nn.Linear(7, config.hidden_size)
self.mask_embedding = nn.Embedding(2, img_dim, padding_idx=0)
# tf naming convention for layer norm
self.LayerNorm = FusedLayerNorm(config.hidden_size, eps=1e-12)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, img_feat, img_pos_feat, type_embeddings, img_masks=None):
if img_masks is not None:
self.mask_embedding.weight.data[0, :].fill_(0)
mask = self.mask_embedding(img_masks.long())
img_feat = img_feat + mask
transformed_im = self.img_layer_norm(self.img_linear(img_feat))
transformed_pos = self.pos_layer_norm(self.pos_linear(img_pos_feat))
embeddings = transformed_im + transformed_pos + type_embeddings
embeddings = self.LayerNorm(embeddings)
embeddings = self.dropout(embeddings)
return embeddings
class UniterEncoder(nn.Module):
def __init__(self, config):
super().__init__()
layer = BertLayer(config)
self.layer = nn.ModuleList([copy.deepcopy(layer)
for _ in range(config.num_hidden_layers)])
def forward(self, input_, attention_mask,
output_all_encoded_layers=True):
all_encoder_layers = []
hidden_states = input_
for layer_module in self.layer:
hidden_states = layer_module(hidden_states, attention_mask)
if output_all_encoded_layers:
all_encoder_layers.append(hidden_states)
if not output_all_encoded_layers:
all_encoder_layers.append(hidden_states)
return all_encoder_layers
class UniterModel(UniterPreTrainedModel):
""" Modification for Joint Vision-Language Encoding
"""
def __init__(self, config, img_dim):
super().__init__(config)
self.embeddings = UniterTextEmbeddings(config)
self.img_embeddings = UniterImageEmbeddings(config, img_dim)
self.encoder = UniterEncoder(config)
self.pooler = BertPooler(config)
self.apply(self.init_weights)
def _compute_txt_embeddings(self, input_ids, position_ids,
txt_type_ids=None):
output = self.embeddings(input_ids, position_ids, txt_type_ids)
return output
def _compute_img_embeddings(self, img_feat, img_pos_feat, img_masks=None,
img_type_ids=None):
if img_type_ids is None:
img_type_ids = torch.ones_like(img_feat[:, :, 0].long())
img_type_embeddings = self.embeddings.token_type_embeddings(
img_type_ids)
output = self.img_embeddings(img_feat, img_pos_feat,
img_type_embeddings, img_masks)
return output
def _compute_img_txt_embeddings(self, input_ids, position_ids,
img_feat, img_pos_feat,
gather_index, img_masks=None,
txt_type_ids=None, img_type_ids=None,
mix_indices=None, lamb=0.5, da_type=None):
txt_emb = self._compute_txt_embeddings(
input_ids, position_ids, txt_type_ids)
img_emb = self._compute_img_embeddings(
img_feat, img_pos_feat, img_masks, img_type_ids)
if mix_indices != None:
if da_type == 'mixup':
txt_emb = mixup(txt_emb, mix_indices, lamb)
img_emb = mixup(img_emb, mix_indices, lamb)
mix_gather_index = torch.max(gather_index, torch.index_select(gather_index, 0, mix_indices))
gather_index = torch.cat((gather_index, mix_gather_index))
elif da_type == 'cat':
original_txt_emb, mix_txt_emb = concat(txt_emb, mix_indices)
original_img_emb, mix_img_emb = concat(img_emb, mix_indices)
original_emb = torch.cat((original_txt_emb, original_img_emb), dim=1)
mix_emb = torch.cat((mix_txt_emb, mix_img_emb), dim=1)
max_len = gather_index.shape[1]
original_gather_index = torch.cat((gather_index, gather_index + max_len), dim=1)
mix_gather_index = torch.cat((gather_index, torch.index_select(gather_index, 0, mix_indices)), dim=1)
gather_index = torch.cat((original_gather_index, mix_gather_index))
# align back to most compact input
gather_index = gather_index.unsqueeze(-1).expand(
-1, -1, self.config.hidden_size)
if da_type == 'cat' and mix_indices != None:
embedding_output = torch.gather(torch.cat((original_emb, mix_emb)),
dim=1, index=gather_index)
else:
embedding_output = torch.gather(torch.cat([txt_emb, img_emb], dim=1),
dim=1, index=gather_index)
# import ipdb; ipdb.set_trace()
return embedding_output
def forward(self, input_ids, position_ids,
img_feat, img_pos_feat,
attention_mask, gather_index=None, img_masks=None,
output_all_encoded_layers=True, mix_indices=None, lamb=0.5,
txt_type_ids=None, img_type_ids=None, da_type=None):
# input_ids: b x max_tl
# position_ids: [[0, 1, ..., max_tl-1]]
# img_feat: b x max_il x 2048
# img_pos_feat: b x max_il x 7
# attention_mask: b x max_l with all ones
# gather_index: b x max_l, img_idxs + txt_idxs
# import ipdb; ipdb.set_trace()
# compute self-attention mask
if mix_indices != None:
if da_type == 'mixup':
attention_mask = torch.cat((attention_mask, attention_mask))
#TODO: position
else:
original_mask, mix_mask = concat(attention_mask, mix_indices)
attention_mask = torch.cat((original_mask, mix_mask))
extended_attention_mask = attention_mask.unsqueeze(1).unsqueeze(2) # b x 1 x 1 x max_l
extended_attention_mask = extended_attention_mask.to(
dtype=next(self.parameters()).dtype) # fp16 compatibility
extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0
# embedding layer
if input_ids is None:
# image only
embedding_output = self._compute_img_embeddings(
img_feat, img_pos_feat, img_masks, img_type_ids)
elif img_feat is None:
# text only
embedding_output = self._compute_txt_embeddings(
input_ids, position_ids, txt_type_ids)
else:
embedding_output = self._compute_img_txt_embeddings(
input_ids, position_ids,
img_feat, img_pos_feat,
gather_index, img_masks, txt_type_ids, img_type_ids,
mix_indices, lamb, da_type) # b x max_l x d
encoded_layers = self.encoder(
embedding_output, extended_attention_mask,
output_all_encoded_layers=output_all_encoded_layers)
if not output_all_encoded_layers:
encoded_layers = encoded_layers[-1]
return encoded_layers
class UniterSoftPromptModel(UniterPreTrainedModel):
""" Modification for Soft-Prompt
"""
def __init__(self, config, img_dim, num_answer = 3129,
prompt_len=20,
prompt_type=None,
label_mapping=None,
pretrain_param_fixed=True,
prompt_param_fixed=False):
super().__init__(config)
self.uniter = UniterModel(config, img_dim)
# self.uniter.requires_grad_(True)
self.uniter.requires_grad_(not pretrain_param_fixed)
self.cls = BertOnlyMLMHead(
config, self.uniter.embeddings.word_embeddings.weight)
self.cls.requires_grad_(not pretrain_param_fixed)
self.apply(self.init_weights)
# add soft prompts
self.prompt_len = prompt_len
self.prompt_type = prompt_type
self.label_mapping = label_mapping
start_id = 1103 # id of 'the' in the vocabulary
prompt_emb = self.uniter.embeddings.word_embeddings.weight[start_id:start_id+prompt_len].clone().detach()
self.soft_prompt_embeddings = nn.parameter.Parameter(prompt_emb)
self.soft_prompt_embeddings.requires_grad_(not prompt_param_fixed)
self.prediction_pos = 0
self.tokenizer = AutoTokenizer.from_pretrained('bert-base-cased')
def set_hard_prompt(self, hard_prompt):
# hard_prompt = '[MASK] It is just .'
start_id = 1103
self.soft_prompt_embeddings.data = self.uniter.embeddings.word_embeddings.weight[start_id:start_id+self.prompt_len].clone().detach()
prompt_ids = self.tokenizer(hard_prompt, return_tensors="pt")['input_ids']
prompt_ids = prompt_ids[:, 1:][:, :-1][0]
prompt_emb = self.uniter.embeddings.word_embeddings(prompt_ids).clone().detach()
mask_id = self.tokenizer.convert_tokens_to_ids(self.tokenizer.mask_token)
min_len = min(self.soft_prompt_embeddings.shape[0], prompt_emb.shape[0])
self.soft_prompt_embeddings.data[-min_len:] = prompt_emb.data[-min_len:]
mask_pos = (prompt_ids[-min_len:] == mask_id).nonzero()
if mask_pos.shape[0] != 0:
self.prediction_pos = self.prompt_len - min_len + mask_pos[0].item() + 1
def forward(self, input_ids, position_ids,
img_feat, img_pos_feat,
attention_mask, gather_index=None, img_masks=None,
output_all_encoded_layers=True,
txt_type_ids=None, img_type_ids=None):
prompt_mask = torch.ones(attention_mask.shape[0], self.prompt_len).to(attention_mask)
# attention_mask = torch.cat([attention_mask, prompt_mask], dim=-1)
attention_mask = torch.cat([prompt_mask, attention_mask], dim=-1)
# compute self-attention mask
extended_attention_mask = attention_mask.unsqueeze(1).unsqueeze(2) # b x 1 x 1 x max_l
extended_attention_mask = extended_attention_mask.to(
dtype=next(self.parameters()).dtype) # fp16 compatibility
extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0
# embedding layer
if input_ids is None:
# image only
embedding_output = self._compute_img_embeddings(
img_feat, img_pos_feat, img_masks, img_type_ids)
elif img_feat is None:
# text only
embedding_output = self._compute_txt_embeddings(
input_ids, position_ids, txt_type_ids)
else:
embedding_output = self._compute_img_txt_embeddings(
input_ids, position_ids,
img_feat, img_pos_feat,
gather_index, img_masks, txt_type_ids, img_type_ids) # b x max_l x d
encoded_layers = self.uniter.encoder(
embedding_output, extended_attention_mask,
output_all_encoded_layers=output_all_encoded_layers)
if not output_all_encoded_layers:
encoded_layers = encoded_layers[-1]
return encoded_layers
def _compute_img_txt_embeddings(self, input_ids, position_ids,
img_feat, img_pos_feat,
gather_index, img_masks=None,
txt_type_ids=None, img_type_ids=None,
prompt_type='soft-prefix-first'):
if txt_type_ids is not None:
bos_type_ids = txt_type_ids[:, :1]
txt_type_ids = txt_type_ids[:, 1:]
else:
bos_type_ids = None
# seperate [CLS] from raw text
txt_emb = self.uniter._compute_txt_embeddings(
input_ids[:, 1:], position_ids[:, 1:] + self.prompt_len, txt_type_ids)
bos_emb = self.uniter._compute_txt_embeddings(
input_ids[:, :1], position_ids[:, :1], bos_type_ids)
gather_index = gather_index[:, 1:] - 1 #
img_emb = self.uniter._compute_img_embeddings(
img_feat, img_pos_feat, img_masks, img_type_ids)
# align back to most compact input
gather_index = gather_index.unsqueeze(-1).expand(
-1, -1, self.config.hidden_size)
embedding_output = torch.gather(torch.cat([txt_emb, img_emb], dim=1),
dim=1, index=gather_index)
prompt_pos_ids = (torch.arange(self.prompt_len) + 1).to(position_ids).unsqueeze(0)
prompt_pos_emb = self.uniter.embeddings.position_embeddings(prompt_pos_ids)
prompt_type_ids = torch.zeros_like(prompt_pos_ids)
prompt_type_emb = self.uniter.embeddings.token_type_embeddings(prompt_type_ids)
prompt_emb = (self.soft_prompt_embeddings.unsqueeze(0)
+ prompt_pos_emb
+ prompt_type_emb) # 1 x prompt_len x d
# prompt_emb = prompt_emb.expand(embedding_output.shape[0], -1, -1)
prompt_emb = prompt_emb.repeat(embedding_output.shape[0], 1, 1)
embedding_output = torch.cat([bos_emb, prompt_emb, embedding_output], dim=1)
return embedding_output |
class Solution:
# @param A : list of integers
# @param B : list of integers
# @return an integer
def mice(self, A, B):
mice = sorted(A)
holes = sorted(B)
return max([abs(a-b) for a, b in zip(mice, holes)]) |
#
# Copyright 2017 Luma Pictures
#
# Licensed under the Apache License, Version 2.0 (the "Apache License")
# with the following modification you may not use this file except in
# compliance with the Apache License and the following modification to it:
# Section 6. Trademarks. is deleted and replaced with:
#
# 6. Trademarks. This License does not grant permission to use the trade
# names, trademarks, service marks, or product names of the Licensor
# and its affiliates, except as required to comply with Section 4(c) of
# the License and to reproduce the content of the NOTICE file.
#
# You may obtain a copy of the Apache License at
#
# http:#www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the Apache License with the above modification is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the Apache License for the specific
# language governing permissions and limitations under the Apache License.
#
from __future__ import absolute_import
from treemodel.itemtree import ItemTree, TreeItem
from pxr.UsdQt._Qt import QtCore, QtGui
if False:
from typing import *
NULL_INDEX = QtCore.QModelIndex()
class AbstractTreeModelMixin(object):
"""Mixin class that implements the necessary methods for Qt model to reflect
the structure of an ``ItemTree`` instance.
"""
def __init__(self, itemTree=None, parent=None):
"""
Parameters
----------
itemTree : Optional[ItemTree]
parent
"""
super(AbstractTreeModelMixin, self).__init__(parent=parent)
self.itemTree = None # type: ItemTree
self.SetItemTree(itemTree or ItemTree())
# Qt methods ---------------------------------------------------------------
def hasChildren(self, parentIndex):
"""
Parameters
----------
parentIndex : QtCore.QModelIndex
Returns
-------
bool
"""
return bool(self.rowCount(parentIndex))
def index(self, row, column, parentIndex):
"""
Parameters
----------
row : int
column : int
parentIndex : QtCore.QModelIndex
Returns
-------
QtCore.QModelIndex
"""
if parentIndex.isValid():
parentItem = parentIndex.internalPointer()
else:
parentItem = self.itemTree.root
return self.ItemIndex(row, column, parentItem)
def parent(self, modelIndex):
"""
Parameters
----------
modelIndex : QtCore.QModelIndex
Returns
-------
QtCore.QModelIndex
"""
if modelIndex.isValid():
parent = self.itemTree.Parent(modelIndex.internalPointer())
if parent is not self.itemTree.root:
return self.createIndex(self.itemTree.RowIndex(parent), 0, parent)
return NULL_INDEX
def rowCount(self, parentIndex):
"""
Parameters
----------
parentIndex : QtCore.QModelIndex
Returns
-------
int
"""
if parentIndex.column() > 0:
return 0
if parentIndex.isValid():
parent = parentIndex.internalPointer()
else:
parent = self.itemTree.root
return self.itemTree.ChildCount(parent=parent)
# Custom methods -----------------------------------------------------------
def SetItemTree(self, itemTree):
"""
Parameters
----------
itemTree : ItemTree
"""
assert isinstance(itemTree, ItemTree)
self.beginResetModel()
self.itemTree = itemTree
self.endResetModel()
def ItemIndex(self, row, column, parentItem):
"""
Parameters
----------
row : int
column : int
parentItem: TreeItem
Returns
-------
QtCore.QModelIndex
"""
try:
childItem = self.itemTree.ChildAtRow(parentItem, row)
except (KeyError, IndexError):
return NULL_INDEX
else:
return self.createIndex(row, column, childItem)
def GetItemIndex(self, item, column=0):
"""
Parameters
----------
item : TreeItem
column : int
Returns
-------
QtCore.QModelIndex
"""
return self.ItemIndex(self.itemTree.RowIndex(item), column,
self.itemTree.Parent(item))
|
# File: Bowling.py
# Description: Calculates the score of a bowling match
# Student's Name: Minh-Tri Ho
# Student's UT EID: mh47723
# Course Name: CS 313E
# Unique Number: 50940
#
# Date Created: 01/29/16
# Date Last Modified: 02/02/16
#Shows the upper part of the scoring
def printHeader():
print(" 1 2 3 4 5 6 7 8 9 10")
print("+---+---+---+---+---+---+---+---+---+-----+")
#Shows the middle part of the scoring with the detailed scores
def printScore(score):
#Initialize the string that will contain the detailed scores
res = "|"
#Rewrite score to add a " " list cell after X
i = 0
while(i < len(score)):
if(score[i] == "X" and i < 18):
score.insert(i + 1, " ")
i += 1
i += 1
if(len(score) == 20):
score += " "
for i in range(len(score)):
res += str(score[i])
if(i%2 == 0 and i <= 19):
res += " "
elif(i%2 == 1 and i <= 18):
res += "|"
elif(i%2 == 1 and i == 19):
res += " "
res += "|"
print(res)
#Shows the middle part of the scoring with the accumulated scores
def printScoreTotal(score):
#Initialize the string that will contain the accumulated scores
res = "|"
#Make a copy of the score we imported
scoreNum = score[:]
#Transform symbols in numbers
for i in range(len(score)):
if(score[i] == "X"):
scoreNum[i] = 10
elif(score[i] == "-"):
scoreNum[i] = 0
elif(score[i] == " "):
scoreNum[i] = 0
elif(score[i] == "/"):
scoreNum[i] = 10 - (int)(scoreNum[i-1])
else:
scoreNum[i] = (int)(scoreNum[i])
#Calculate the score per frame
scoreTt = []
scoreTtCum = []
for i in range(0, len(score)-2, 2):
if(score[i] == "X"):
if(i <= 14 and score[i+2] == "X"):
scoreTt.append(scoreNum[i]+scoreNum[i+2]+scoreNum[i+4])
elif(i == 16 and score[i+2] == "X"):
scoreTt.append(scoreNum[i]+scoreNum[i+2]+scoreNum[i+3])
elif(i == 18):
scoreTt.append(scoreNum[i]+scoreNum[i+1]+scoreNum[i+2])
else:
scoreTt.append(scoreNum[i]+scoreNum[i+2]+scoreNum[i+3])
elif(score[i+1] == "/"):
scoreTt.append(scoreNum[i]+scoreNum[i+1]+scoreNum[i+2])
else:
scoreTt.append(scoreNum[i]+scoreNum[i+1])
#Add that score to the last accumulated score, if it exists
if(i == 0):
scoreTtCum.append(scoreTt[0])
else:
scoreTtCum.append((scoreTtCum[(int)(i/2)-1]) + scoreTt[(int)(i/2)])
#Add the aggregated scores to the result (with correct formatage)
for i in range(len(scoreTtCum)):
if(scoreTtCum[i] < 10):
if(i == 9):
res += " " + str(scoreTtCum[i]) + " |"
else:
res += " " + str(scoreTtCum[i]) + " |"
elif(scoreTtCum[i] < 100):
if(i == 9):
res += " " + str(scoreTtCum[i]) + " |"
else:
res += " " + str(scoreTtCum[i]) + "|"
else:
if(i == 9):
res += " " + str(scoreTtCum[i]) + " |"
else:
res += str(scoreTtCum[i]) + "|"
#Display the result
print(res)
#Shows the lower part of the scoring
def printFooter():
print("+---+---+---+---+---+---+---+---+---+-----+\n")
def main():
file_name = "scores.txt"
in_file = open(file_name, 'r')
for line in in_file:
printHeader()
score_raw = line.rstrip("\n")
score = score_raw.split()
printScore(score)
printScoreTotal(score)
printFooter()
main()
|
import sys
from PyQt5.QtWidgets import *
class MyWindow(QMainWindow):
def __init__(self):
super().__init__()
self.setWindowTitle("PyStock")
self.setGeometry(300, 300, 300, 400)
if __name__ == "__main__":
app = QApplication(sys.argv)
mywindow = MyWindow()
mywindow.show()
app.exec_() |
import re
from django import template
register = template.Library()
template.base.tag_re = re.compile(template.base.tag_re.pattern, re.DOTALL)
'''
Usage:
------
{% stash 'contact_link' %}
<a href='/v2/contacts/{{ contact_id }}'>{{ contact_name }}</a>
{% endstash %}
{% stash_apply 'contact_link'
contact_name='Ian Jabour'
contact_id=1
orgrole=user.orgtype
%}
{% stash_apply 'contact_link'
contact_name='Joe Schmoe'
contact_id=2
orgrole=user.orgtype
%}
{% stash 'header' %}
<header>SimpleLegal</header>
{% endstash %}
{% stash_apply 'header' %}
'''
@register.tag
def stash(parser, token):
nodelist = parser.parse(('endstash',))
parser.delete_first_token()
stash_name = with_prefix(token.split_contents()[1])
return StashSetNode(stash_name, nodelist)
class StashSetNode(template.Node):
def __init__(self, stash_name, nodelist):
self.stash_name = stash_name
self.nodelist = nodelist
def render(self, context):
context.push({self.stash_name: self.nodelist})
return ''
@register.tag
def stash_apply(parser, token):
token_contents = token.split_contents()
stash_name = with_prefix(token.split_contents()[1])
kwargs = {}
if len(token_contents) > 2:
raw_kwargs = [raw_kwarg.split('=')
for raw_kwarg in token_contents[2:]]
kwargs = {raw_kwarg[0]: raw_kwarg[1] for raw_kwarg in raw_kwargs}
return StashGetNode(stash_name, **kwargs)
class StashGetNode(template.Node):
def __init__(self, stash_name, **kwargs):
self.stash_name = stash_name
self.kwargs = kwargs
def render(self, context):
nodelist = context.get(self.stash_name)
with_vars = {
key: template.base.Variable(value).resolve(context)
for key, value in self.kwargs.items()
}
context.push(with_vars)
return nodelist.render(context)
def with_prefix(stash_name):
return '_stash__{0}'.format(stash_name)
|
import torch
from torch import nn, Tensor
from typing import Union, Tuple, List, Iterable, Dict
from .BatchHardTripletLoss import BatchHardTripletLoss
class BatchHardSoftMarginTripletLoss(BatchHardTripletLoss):
def __init__(self, sentence_embedder):
super(BatchHardSoftMarginTripletLoss, self).__init__(sentence_embedder)
self.sentence_embedder = sentence_embedder
def forward(self, sentence_features: Iterable[Dict[str, Tensor]], labels: Tensor):
reps = [self.sentence_embedder(sentence_feature)['sentence_embedding'] for sentence_feature in sentence_features]
return BatchHardSoftMarginTripletLoss.batch_hard_triplet_soft_margin_loss(labels, reps[0])
# Hard Triplet Loss with Soft Margin
# Paper: In Defense of the Triplet Loss for Person Re-Identification, https://arxiv.org/abs/1703.07737
@staticmethod
def batch_hard_triplet_soft_margin_loss(labels: Tensor, embeddings: Tensor, squared: bool = False) -> Tensor:
"""Build the triplet loss over a batch of embeddings.
For each anchor, we get the hardest positive and hardest negative to form a triplet.
Args:
labels: labels of the batch, of size (batch_size,)
embeddings: tensor of shape (batch_size, embed_dim)
squared: Boolean. If true, output is the pairwise squared euclidean distance matrix.
If false, output is the pairwise euclidean distance matrix.
Returns:
Label_Sentence_Triplet: scalar tensor containing the triplet loss
"""
# Get the pairwise distance matrix
pairwise_dist = BatchHardTripletLoss._pairwise_distances(embeddings, squared=squared)
#pairwise_dist = BatchHardTripletLoss._cosine_distance(embeddings)
# For each anchor, get the hardest positive
# First, we need to get a mask for every valid positive (they should have same label)
mask_anchor_positive = BatchHardTripletLoss._get_anchor_positive_triplet_mask(labels).float()
# We put to 0 any element where (a, p) is not valid (valid if a != p and label(a) == label(p))
anchor_positive_dist = mask_anchor_positive * pairwise_dist
# shape (batch_size, 1)
hardest_positive_dist, _ = anchor_positive_dist.max(1, keepdim=True)
# For each anchor, get the hardest negative
# First, we need to get a mask for every valid negative (they should have different labels)
mask_anchor_negative = BatchHardTripletLoss._get_anchor_negative_triplet_mask(labels).float()
# We add the maximum value in each row to the invalid negatives (label(a) == label(n))
max_anchor_negative_dist, _ = pairwise_dist.max(1, keepdim=True)
anchor_negative_dist = pairwise_dist + max_anchor_negative_dist * (1.0 - mask_anchor_negative)
# shape (batch_size,)
hardest_negative_dist, _ = anchor_negative_dist.min(1, keepdim=True)
# Combine biggest d(a, p) and smallest d(a, n) into final triplet loss with soft margin
#tl = hardest_positive_dist - hardest_negative_dist + margin
#tl[tl < 0] = 0
tl = torch.log1p(torch.exp(hardest_positive_dist - hardest_negative_dist))
triplet_loss = tl.mean()
return triplet_loss |
import tensorflow as tf
import numpy as np
from transformer import Transformer
from preprocess import DataPreprocesser
from vqa import VQA
from vqa_iter import VQAIter
from tokenizer import MyTokenizer
from encoder import Encoder
from decoder import Decoder
import tqdm
from loss import loss_cosine_similarity
from custom_schedule import get_cumtom_adam
import hyperparameters as hp
import pickle
class Trainer:
def __init__(self,train_iter,model,batch_size,max_qst_len,max_ans_len,checkpoint_path):
self.train_iter = train_iter
self.max_len = max(train_data.get_max_qst_len(),val_data.get_max_qst_len())
self.preprocesser = DataPreprocesser(MyTokenizer(),self.max_len)
self.model = model
self.train_loss = tf.keras.metrics.Mean(name='train_loss')
self.optimizer = get_cumtom_adam()
self.checkpoint_path = checkpoint_path
self.ckpt = tf.train.Checkpoint(model=self.model,
optimizer=self.optimizer)
self.ckpt_manager = tf.train.CheckpointManager(self.ckpt, checkpoint_path, max_to_keep=5)
# if a checkpoint exists, restore the latest checkpoint.
if self.ckpt_manager.latest_checkpoint:
self.ckpt.restore(ckpt_manager.latest_checkpoint)
print('Latest checkpoint restored!!')
def train_step(self,batch):
batch_qst_tokens,batch_tr_img_ids,batch_te_img_ids,batch_tr_ans_tokens,batch_te_ans_tokens = self.preprocesser(batch)
#predictions:[batch_size,768]
#test_ans_end:[batch_size,768]
with tf.GradientTape() as tape:
predictions,test_ans_enc = self.model(batch_qst_tokens,
batch_tr_img_ids,
batch_te_img_ids,
batch_tr_ans_tokens,
batch_te_ans_tokens)
loss = loss_function(tar_real, predictions)
gradients = tape.gradient(loss,self.model.trainable_variables)
optimizer.apply_gradients(zip(gradients,self.model.trainable_variables))
self.train_loss(loss)
def train(self,steps,steps_per_save,steps_per_chunk,steps_per_report):
for s in range(steps):
if s%steps_per_save == 0:
self.ckpt_manager.save()
print('model saved')
if s%steps == steps_per_chunk:
self.train_iter.next_chunk()
if s%steps_per_report == 0:
print('Steps {} Loss {:.4f}'.format(s,train_loss.result()))
self.train_step(self.train_iter.next())
print('Steps {} Loss {:.4f}'.format(steps,train_loss.result()))
self.model.save()
print('model saved')
print('training finished')
if __name__ == "__main__":
#train_data = VQA(r'D:\documents\coding\Data\coco\v2_mscoco_train2014_annotations.json',
#r'D:\documents\coding\Data\coco\v2_OpenEnded_mscoco_train2014_questions.json',
#r'D:\documents\coding\Data\coco\train2014\COCO_train2014_{0}.jpg',
#r'D:\documents\coding\Data\coco\v2_mscoco_train2014_complementary_pairs.json')
train_data = VQA(r'D:\lgy\Document\Python\Data\coco\v2_mscoco_train2014_annotations.json',
r'D:\lgy\Document\Python\Data\coco\v2_OpenEnded_mscoco_train2014_questions.json',
r'D:\lgy\Document\Python\Data\coco\train2014\COCO_train2014_{0}.jpg')
train_iter = VQAIter(train_data,train_data.getQuesIds(ansTypes = ['other','yes/no']),hp.batch_size,hp.num_chunks)
max_qst_len = hp.max_qst_len
max_ans_len = hp.max_ans_len
model = Transformer(hp.num_layers,hp.d_model,hp.num_heads,hp.dff,max_qst_len+3,hp.dropout_rate)
trainer = Trainer(train_iter,model,16,max_qst_len,max_ans_len)
trainer.train(hp.steps,hp.steps_per_save,hp.steps_per_chunk,hp.steps_per_report)
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.2 on 2017-10-14 21:56
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('logistics', '0012_shipment_unique_id'),
]
operations = [
migrations.AlterField(
model_name='shipment',
name='carrier',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='logistics.Carrier'),
),
migrations.AlterField(
model_name='shipment',
name='recipient_address',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='recipient_address', to='logistics.Address'),
),
migrations.AlterField(
model_name='shipment',
name='recipient_contact',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='recipient_contact', to='logistics.Contact'),
),
migrations.AlterField(
model_name='shipment',
name='shipper_address',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='shipper_address', to='logistics.Address'),
),
migrations.AlterField(
model_name='shipment',
name='shipper_contact',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='shipper_contact', to='logistics.Contact'),
),
migrations.AlterField(
model_name='shipment',
name='waybill_link',
field=models.URLField(blank=True, max_length=500),
),
]
|
import os
import sys
import csv
import pysftp
from constants import *
def downloadeFile(fileId, contentType):
FORMAT = ""
if contentType == "image":
FORMAT = IMAGE_FORMAT
elif contentType == "video":
FORMAT = VIDEO_FORMAT
try:
s = pysftp.Connection(SERVER, username=MY_USER, password=MY_PASSWORD)
remotepath = REMOTE_PATH+fileId+FORMAT
localpath = "./"+fileId+FORMAT
s.get(remotepath, localpath)
s.close()
except Exception:
print("Content downloading failed")
|
# Provision users/groups via direct invocation of the Databricks SCIM API
# given user/group config (may be exported from AAD)
import requests, json, logging
from requests.exceptions import HTTPError
log = logging.getLogger()
USERS_ENDPOINT = '/api/2.0/preview/scim/v2/Users'
GROUPS_ENDPOINT = '/api/2.0/preview/scim/v2/Groups'
USER_SCHEMA = 'urn:ietf:params:scim:schemas:core:2.0:User'
GROUP_SCHEMA = 'urn:ietf:params:scim:schemas:core:2.0:Group'
GROUP_OP_SCHEMA = 'urn:ietf:params:scim:api:messages:2.0:PatchOp'
def get_user_id(username, all_users):
return next(user for user in all_users if user['userName'] == username)['id']
def add_groups(groups, groups_uri, headers):
# Add each group (seperate requests)
for group in groups:
log.info('Adding group:', group['name'])
res = requests.post(groups_uri, headers=headers, json={
'schemas':[
GROUP_SCHEMA
],
'displayName':group['name']
})
try:
res.raise_for_status()
except HTTPError:
log.error('Failed to add group {} - Reason: \n\n{}\n\n'.format(group['name'], res.content))
else:
log.info('Added group:', group['name'])
def add_users(users, users_uri, headers):
# Add each user (seperate requests)
for user in users:
log.info('Adding user:', user['name'])
entitlements_arg = [{'value': e} for e in user['entitlements']]
res = requests.post(users_uri, headers=headers, json={
'schemas':[
USER_SCHEMA
],
'userName':user['name'],
'entitlements':entitlements_arg
})
try:
res.raise_for_status()
except HTTPError:
log.error('Failed to add user {} - Reason: \n\n{}\n\n'.format(user['name'], res.content))
else:
log.info('Added user:', user['name'])
def apply_group_memberships(users, groups, users_uri, groups_uri, headers):
# Get lists of all existing users and groups:
try:
log.info('Retrieving list of users')
all_users_res_raw = requests.get(users_uri, headers=headers)
all_users = json.loads(all_users_res_raw.text)['Resources']
except Exception as err:
log.error('Failed to get list of users id lookups. Unable to link users to groups - Reason: \n\n{}\n\n'.format(err))
return 1
# Apply group memberships
for group in groups:
log.info('Adding memberships to group ', group['name'])
member_ids = [get_user_id(member, all_users) for member in group['members']]
res = requests.post(groups_uri, headers=headers, json={
'schemas': [
GROUP_OP_SCHEMA
],
'Operations':[{
'op':'add',
'value':{
'members': [{'value': member_id} for member_id in member_ids]
}
}]
})
try:
res.raise_for_status()
except HTTPError:
log.error('Failed to add all memberships for group', group['name'], ' - Reason: \n\n{}\n\n'.format(res.content))
else:
log.info('Added memberships for group: {}'.format(group['name']))
def run(params):
headers = {
'Authorization': 'Bearer ' + params['db_pat'],
'Content-Type': 'application/scim+json',
'Accept': 'application/scim+json'
}
groups_uri = params['db_host'] + GROUPS_ENDPOINT
users_uri = params['db_host'] + USERS_ENDPOINT
with open(params['users_groups_path']) as users_groups_file:
users_groups_json = users_groups_file.read()
users_and_groups = json.loads(users_groups_json)
users = users_and_groups['users']
log.info('Read ' + str(len(users)) +' users')
groups = users_and_groups['groups']
log.info('Read ' + str(len(groups)) + ' users')
add_groups(groups, groups_uri, headers)
add_users(users, users_uri, headers)
apply_group_memberships(users, groups, users_uri, groups_uri, headers)
|
import rospy
from cv_bridge import CvBridge, CvBridgeError
from sensor_msgs.msg import Image, CompressedImage
class ImageReceiverROS:
def __init__(self, topic_name):
self.bridge = CvBridge()
self.image_sub = rospy.Subscriber(topic_name, Image, self.callback, queue_size=1)
self.cv_image = None
self.skip_cntr = 0
def callback(self, data):
try:
self.cv_image = self.bridge.imgmsg_to_cv2(data, "bgr8")
except CvBridgeError as e:
rospy.logwarn(e)
def getImage(self):
if self.cv_image is None:
self.skip_cntr += 1
if self.skip_cntr > 3:
self.skip_cntr = 0
rospy.logwarn('No image for 3 times...')
return self.cv_image
def yolo_bbox_2_ros_bbox(yolo_bbox, labels):
from wr8_ai.msg import BoundingBox
# print("Input: {}",format(yolo_bbox.get_str()))
ros_box = BoundingBox()
ros_box.Class = labels[yolo_bbox.get_label()]
ros_box.probability = yolo_bbox.c
ros_box.xmin = yolo_bbox.xmin
ros_box.ymin = yolo_bbox.ymin
ros_box.xmax = yolo_bbox.xmax
ros_box.ymax = yolo_bbox.ymax
# print("Output: {}",format(ros_box))
return ros_box
class ImageReceiverROS:
from cv_bridge import CvBridge, CvBridgeError
from sensor_msgs.msg import Image, CompressedImage
def __init__(self, topic_name):
self.bridge = CvBridge()
self.image_sub = rospy.Subscriber(topic_name, Image, self.callback_img, queue_size=1)
# self.image_sub_compres = rospy.Subscriber("camera_compr", CompressedImage, self.callback_img_compressed, queue_size=1)
self.cv_image = None
self.cv_image_comp = None
def callback_img(self, data):
try:
self.cv_image = self.bridge.imgmsg_to_cv2(data, "bgr8")
except CvBridgeError as e:
rospy.logwarn(e)
def callback_img_compressed(self, data):
np_arr = np.fromstring(data.data, np.uint8)
self.cv_image_comp = cv2.imdecode(np_arr, cv2.IMREAD_COLOR)
def get_image(self):
return self.cv_image
def get_image_compressed(self):
return self.cv_image_comp
import cv2
import numpy as np
class ImagePublisherROS:
def __init__(self, topic_name):
self.bridge = CvBridge()
self.image_pub = rospy.Publisher(topic_name + '/compressed', CompressedImage, queue_size=10)
def publish(self, cv_image):
msg = CompressedImage()
msg.header.stamp = rospy.Time.now()
msg.format = "png"
msg.data = np.array(cv2.imencode('.png', cv_image)[1]).tostring()
self.image_pub.publish(msg)
|
import uuid
import os
from flask_pymongo import PyMongo, wrappers
from database import mongo
from util import sanitize_input
from models.user import User
if os.path.exists(".installed"):
raise "webdock is installed"
print("Webdock installation")
username = sanitize_input(input("username: "))
password = sanitize_input(input("password: "))
uid = uuid.uuid4()
level = User.ADMIN
user = User(username, password, level, uid)
db: wrappers.Collection = mongo.db.users
db.insert_one(user.__dict__)
open(".installed", "wb").close() |
"""
Copyright 2020 ETH Zurich, Secure, Reliable, and Intelligent Systems Lab
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import numpy as np
from config import config, Device
if config.device == Device.CPU:
from fppoly import *
else:
from fppoly_gpu import *
from elina_interval import *
from elina_abstract0 import *
from elina_manager import *
from ai_milp import *
from functools import reduce
from refine_activation import *
def calc_bounds(man, element, nn, nlb, nub, relu_groups, is_refine_layer = False, destroy=True, use_krelu = False):
layerno = nn.calc_layerno()
bounds = box_for_layer(man, element, layerno)
num_neurons = get_num_neurons_in_layer(man, element, layerno)
itv = [bounds[i] for i in range(num_neurons)]
lbi = [x.contents.inf.contents.val.dbl for x in itv]
ubi = [x.contents.sup.contents.val.dbl for x in itv]
if is_refine_layer:
nlb.append(lbi)
nub.append(ubi)
if destroy:
elina_interval_array_free(bounds,num_neurons)
return lbi, ubi
return layerno, bounds, num_neurons, lbi, ubi
def add_input_output_information_deeppoly(self, input_names, output_name, output_shape):
"""
sets for an object the three fields:
- self.output_length
- self.input_names
- self.output_name
which will mainly be used by the Optimizer, but can also be used by the Nodes itself
Arguments
---------
self : Object
will be a DeepzonoNode, but could be any object
input_names : iterable
iterable of strings, each one being the name of another Deepzono-Node
output_name : str
name of self
output_shape : iterable
iterable of ints with the shape of the output of this node
Return
------
None
"""
if len(output_shape)==4:
self.output_length = reduce((lambda x, y: x*y), output_shape[1:len(output_shape)])
else:
self.output_length = reduce((lambda x, y: x*y), output_shape[0:len(output_shape)])
self.input_names = input_names
self.output_name = output_name
class DeeppolyInput:
def __init__(self, specLB, specUB, input_names, output_name, output_shape,
lexpr_weights=None, lexpr_cst=None, lexpr_dim=None,
uexpr_weights=None, uexpr_cst=None, uexpr_dim=None,
expr_size=0, spatial_constraints=None):
"""
Arguments
---------
specLB : numpy.ndarray
1D array with the lower bound of the input spec
specUB : numpy.ndarray
1D array with the upper bound of the input spec
lexpr_weights: numpy.ndarray
ndarray of doubles with coefficients of lower polyhedral expressions
lexpr_cst: numpy.ndarray
ndarray of doubles with the constants of lower polyhedral expressions
lexpr_dim: numpy.ndarray
ndarray of unsigned int with the indexes of pixels from the original image for the lower polyhedral expressions
uexpr_weights: numpy.ndarray
ndarray of doubles with coefficients of upper polyhedral expressions
uexpr_cst: numpy.ndarray
ndarray of doubles with the constants of upper polyhedral expressions
uexpr_dim: numpy.ndarray
ndarray of unsigned int with the indexes of pixels from the original image for the upper polyhedral expressions
expr_size: numpy.ndarray
unsigned int with the sizes of polyhedral expressions
"""
self.specLB = np.ascontiguousarray(specLB, dtype=np.double)
self.specUB = np.ascontiguousarray(specUB, dtype=np.double)
if lexpr_weights is not None:
self.lexpr_weights = np.ascontiguousarray(lexpr_weights, dtype=np.double)
else:
self.lexpr_weights = None
if lexpr_cst is not None:
self.lexpr_cst = np.ascontiguousarray(lexpr_cst, dtype=np.double)
else:
self.lexpr_cst = None
if lexpr_dim is not None:
self.lexpr_dim = np.ascontiguousarray(lexpr_dim, dtype=np.uintp)
else:
self.lexpr_dim = None
if uexpr_weights is not None:
self.uexpr_weights = np.ascontiguousarray(uexpr_weights, dtype=np.double)
else:
self.uexpr_weights = None
if uexpr_cst is not None:
self.uexpr_cst = np.ascontiguousarray(uexpr_cst, dtype=np.double)
else:
self.uexpr_cst = None
if uexpr_dim is not None:
self.uexpr_dim = np.ascontiguousarray(lexpr_dim, dtype=np.uintp)
else:
self.uexpr_dim = None
self.expr_size = expr_size
self.spatial_gamma = -1
self.spatial_indices = np.ascontiguousarray([], np.uint64)
self.spatial_neighbors = np.ascontiguousarray([], np.uint64)
if spatial_constraints is not None:
self.spatial_gamma = spatial_constraints['gamma']
self.spatial_indices = np.ascontiguousarray(
spatial_constraints['indices'], np.uint64
)
self.spatial_neighbors = np.ascontiguousarray(
spatial_constraints['neighbors'], np.uint64
)
add_input_output_information_deeppoly(self, input_names, output_name, output_shape)
def transformer(self, man):
"""
creates an abstract element from the input spec
Arguments
---------
man : ElinaManagerPtr
inside this manager the abstract element will be created
Return
------
output : ElinaAbstract0Ptr
new abstract element representing the element specified by self.specLB and self.specUB
"""
if self.expr_size == 0:
return fppoly_from_network_input(man, 0, len(self.specLB), self.specLB, self.specUB)
else:
return fppoly_from_network_input_poly(
man, 0, len(self.specLB), self.specLB, self.specUB,
self.lexpr_weights, self.lexpr_cst, self.lexpr_dim,
self.uexpr_weights, self.uexpr_cst, self.uexpr_dim,
self.expr_size, self.spatial_indices, self.spatial_neighbors,
len(self.spatial_indices), self.spatial_gamma
)
class DeeppolyNode:
"""
Parent class for all the classes that implement fully connected layers
"""
def __init__(self, weights, bias, input_names, output_name, output_shape):
"""
Arguments
---------
weights : numpy.ndarray
matrix of the fully connected layer (must be 2D)
bias : numpy.ndarray
bias of the fully connected layer
"""
self.weights = np.ascontiguousarray(weights, dtype=np.double)
self.bias = np.ascontiguousarray(bias, dtype=np.double)
add_input_output_information_deeppoly(self, input_names, output_name, output_shape)
def get_arguments(self):
"""
facilitates putting together all the arguments for the transformers in the child classes
Return
------
output : tuple
the four entries are pointers to the rows of the matrix, the bias, the length of the output, and the length of the input
"""
xpp = self.get_xpp()
return xpp, self.bias, self.weights.shape[0], self.weights.shape[1], self.predecessors, len(self.predecessors)
def get_xpp(self):
"""
helper function to get pointers to the rows of self.weights.
Return
------
output : numpy.ndarray
pointers to the rows of the matrix
"""
return (self.weights.__array_interface__['data'][0]+ np.arange(self.weights.shape[0])*self.weights.strides[0]).astype(np.uintp)
class DeeppolyFCNode(DeeppolyNode):
def transformer(self, nn, man, element, nlb, nub, relu_groups, refine, timeout_lp, timeout_milp, use_default_heuristic, testing):
"""
transformer for the first layer of a neural network, if that first layer is fully connected with relu
Arguments
---------
man : ElinaManagerPtr
man to which element belongs
element : ElinaAbstract0Ptr
abstract element onto which the transformer gets applied
Return
------
output : ElinaAbstract0Ptr
abstract element after the transformer
"""
handle_fully_connected_layer(man, element, *self.get_arguments())
calc_bounds(man, element, nn, nlb, nub, relu_groups, is_refine_layer=True, use_krelu=refine)
nn.ffn_counter+=1
if testing:
return element, nlb[-1], nub[-1]
return element
class DeeppolyNonlinearity:
def __init__(self, input_names, output_name, output_shape):
"""
Arguments
---------
input_names : iterable
iterable with the name of the vector you want to apply the non-linearity to
output_name : str
name of this node's output
output_shape : iterable
iterable of ints with the shape of the output of this node
"""
add_input_output_information_deeppoly(self, input_names, output_name, output_shape)
def get_arguments(self, man, element):
"""
used by the children of this class to easily get the inputs for their transformers
Arguments
---------
man : ElinaManagerPtr
man to which element belongs
element : ElinaAbstract0Ptr
abstract element onto which the transformer gets applied
Return
------
output : tuple
arguments for the non-linearity transformers like Relu or Sigmoid
"""
length = self.output_length
return man, element, length, self.predecessors, len(self.predecessors)
class DeeppolyReluNode(DeeppolyNonlinearity):
def transformer(self, nn, man, element, nlb, nub, relu_groups, refine, timeout_lp, timeout_milp, use_default_heuristic, testing, K=3, s=-2, use_milp=False, approx=True):
"""
transforms element with handle_relu_layer
Arguments
---------
man : ElinaManagerPtr
man to which element belongs
element : ElinaAbstract0Ptr
abstract element onto which the transformer gets applied
Return
------
output : ElinaAbstract0Ptr
abstract element after the transformer
"""
length = self.output_length
if refine:
refine_activation_with_solver_bounds(nn, self, man, element, nlb, nub, relu_groups, timeout_lp,
timeout_milp, use_default_heuristic, 'deeppoly',
K=K, s=s, use_milp=use_milp, approx=approx)
else:
handle_relu_layer(*self.get_arguments(man, element), use_default_heuristic)
calc_bounds(man, element, nn, nlb, nub, relu_groups, is_refine_layer=True, use_krelu=False)
nn.activation_counter+=1
if testing:
return element, nlb[-1], nub[-1]
return element
class DeeppolySignNode(DeeppolyNonlinearity):
def transformer(self, nn, man, element, nlb, nub, relu_groups, refine, timeout_lp, timeout_milp,
use_default_heuristic, testing, K=3, s=-2, approx=True):
"""
transforms element with handle_sign_layer
Arguments
---------
man : ElinaManagerPtr
man to which element belongs
element : ElinaAbstract0Ptr
abstract element onto which the transformer gets applied
Return
------
output : ElinaAbstract0Ptr
abstract element after the transformer
"""
#if refine:
# refine_activation_with_solver_bounds(nn, self, man, element, nlb, nub, relu_groups, timeout_lp, timeout_milp, use_default_heuristic, 'deeppoly')
#else:
handle_sign_layer(*self.get_arguments(man, element))
calc_bounds(man, element, nn, nlb, nub, relu_groups, is_refine_layer=True, use_krelu=False)
nn.activation_counter+=1
if testing:
return element, nlb[-1], nub[-1]
return element
class DeeppolySigmoidNode(DeeppolyNonlinearity):
def transformer(self, nn, man, element, nlb, nub, relu_groups, refine, timeout_lp, timeout_milp, use_default_heuristic, testing, K=3, s=-2, use_milp=False, approx=True):
"""
transforms element with handle_sigmoid_layer
Arguments
---------
man : ElinaManagerPtr
man to which element belongs
element : ElinaAbstract0Ptr
abstract element onto which the transformer gets applied
Return
------
output : ElinaAbstract0Ptr
abstract element after the transformer
"""
length = self.output_length
if refine:
refine_activation_with_solver_bounds(nn, self, man, element, nlb, nub, relu_groups, timeout_lp, timeout_milp, use_default_heuristic, 'deeppoly', K=K, s=s, use_milp=use_milp)
else:
handle_sigmoid_layer(*self.get_arguments(man, element), use_default_heuristic)
calc_bounds(man, element, nn, nlb, nub, relu_groups, is_refine_layer=True, use_krelu=refine)
nn.activation_counter+=1
if testing:
return element, nlb[-1], nub[-1]
return element
class DeeppolyTanhNode(DeeppolyNonlinearity):
def transformer(self, nn, man, element, nlb, nub, relu_groups, refine, timeout_lp, timeout_milp, use_default_heuristic, testing, K=3, s=-2, use_milp=False, approx=True):
"""
transforms element with handle_tanh_layer
Arguments
---------
man : ElinaManagerPtr
man to which element belongs
element : ElinaAbstract0Ptr
abstract element onto which the transformer gets applied
Return
------
output : ElinaAbstract0Ptr
abstract element after the transformer
"""
length = self.output_length
if refine:
refine_activation_with_solver_bounds(nn, self, man, element, nlb, nub, relu_groups, timeout_lp, timeout_milp, use_default_heuristic, 'deeppoly',K=K, s=s, use_milp=use_milp)
else:
handle_tanh_layer(*self.get_arguments(man, element), use_default_heuristic)
calc_bounds(man, element, nn, nlb, nub, relu_groups, is_refine_layer=True, use_krelu=refine)
nn.activation_counter+=1
if testing:
return element, nlb[-1], nub[-1]
return element
class DeeppolyLeakyReluNode(DeeppolyNonlinearity):
def transformer(self, nn, man, element, nlb, nub, relu_groups, refine, timeout_lp, timeout_milp, use_default_heuristic, testing, alpha=0.01):
"""
transforms element with handle_tanh_layer
Arguments
---------
man : ElinaManagerPtr
man to which element belongs
element : ElinaAbstract0Ptr
abstract element onto which the transformer gets applied
Return
------
output : ElinaAbstract0Ptr
abstract element after the transformer
"""
length = self.output_length
if False:
refine_activation_with_solver_bounds(nn, self, man, element, nlb, nub, relu_groups, timeout_lp, timeout_milp, use_default_heuristic, 'deeppoly')
else:
handle_leakyrelu_layer(*self.get_arguments(man, element), alpha, use_default_heuristic)
calc_bounds(man, element, nn, nlb, nub, relu_groups, is_refine_layer=True, use_krelu=refine)
nn.activation_counter+=1
if testing:
return element, nlb[-1], nub[-1]
return element
class DeeppolyConv2dNode:
def __init__(self, filters, strides, pad_top, pad_left, pad_bottom, pad_right, bias, image_shape, input_names, output_name, output_shape):
"""
collects the information needed for the conv_handle_intermediate_relu_layer transformer and brings it into the required shape
Arguments
---------
filters : numpy.ndarray
the actual 4D filter of the convolutional layer
strides : numpy.ndarray
1D with to elements, stride in height and width direction
bias : numpy.ndarray
the bias of the layer
image_shape : numpy.ndarray
1D array of ints with 3 entries [height, width, channels] representing the shape of the of the image that is passed to the conv-layer
"""
self.image_shape = np.ascontiguousarray(image_shape, dtype=np.uintp)
self.filters = np.ascontiguousarray(filters, dtype=np.double)
self.strides = np.ascontiguousarray(strides, dtype=np.uintp)
self.bias = np.ascontiguousarray(bias, dtype=np.double)
self.out_size = (c_size_t * 3)(output_shape[1], output_shape[2], output_shape[3])
self.pad_top = pad_top
self.pad_left = pad_left
self.pad_bottom = pad_bottom
self.pad_right = pad_right
add_input_output_information_deeppoly(self, input_names, output_name, output_shape)
def get_arguments(self):
"""
facilitates putting together all the arguments for the transformers in the child classes
Return
------
output : tuple
the 5 entries are:
1. the filter (numpy.ndarray)
2. the bias (numpy.ndarray)
3. the image_shape (numpy.ndarray)
4. length of a side of the square kernel (int)
5. number of filters (int)
"""
filter_size = (c_size_t * 2) (self.filters.shape[0], self.filters.shape[1])
numfilters = self.filters.shape[3]
strides = (c_size_t * 2)(self.strides[0], self.strides[1])
return self.filters, self.bias, self.image_shape, filter_size, numfilters, strides, self.out_size, self.pad_top, self.pad_left, self.pad_bottom, self.pad_right, True, self.predecessors, len(self.predecessors)
def transformer(self, nn, man, element, nlb, nub, relu_groups, refine, timeout_lp, timeout_milp, use_default_heuristic, testing):
"""
transformer for a convolutional layer, if that layer is an intermediate of the network
Arguments
---------
man : ElinaManagerPtr
man to which element belongs
element : ElinaAbstract0Ptr
abstract element onto which the transformer gets applied
Return
------
output : ElinaAbstract0Ptr
abstract element after the transformer
"""
handle_convolutional_layer(man, element, *self.get_arguments())
calc_bounds(man, element, nn, nlb, nub, relu_groups, is_refine_layer=True)
nn.conv_counter+=1
if testing:
return element, nlb[-1], nub[-1]
return element
class DeeppolyPaddingNode:
def __init__(self, pad_top, pad_left, pad_bottom, pad_right, image_shape, input_names,
output_name, output_shape):
"""
collects the information needed for the conv_handle_intermediate_relu_layer transformer and brings it into the required shape
Arguments
---------
filters : numpy.ndarray
the actual 4D filter of the convolutional layer
strides : numpy.ndarray
1D with to elements, stride in height and width direction
bias : numpy.ndarray
the bias of the layer
image_shape : numpy.ndarray
1D array of ints with 3 entries [height, width, channels] representing the shape of the of the image that is passed to the conv-layer
"""
self.image_shape = np.ascontiguousarray(image_shape, dtype=np.uintp)
self.out_size = (c_size_t * 3)(output_shape[1], output_shape[2], output_shape[3])
self.pad_top = pad_top
self.pad_left = pad_left
self.pad_bottom = pad_bottom
self.pad_right = pad_right
add_input_output_information_deeppoly(self, input_names, output_name, output_shape)
def get_arguments(self):
"""
facilitates putting together all the arguments for the transformers in the child classes
Return
------
output : tuple
the 5 entries are:
3. the image_shape (numpy.ndarray)
"""
return self.image_shape, self.out_size, self.pad_top, self.pad_left, self.pad_bottom, self.pad_right, \
self.predecessors, len(self.predecessors)
def transformer(self, nn, man, element, nlb, nub, relu_groups, refine, timeout_lp, timeout_milp,
use_default_heuristic, testing):
"""
transformer for a convolutional layer, if that layer is an intermediate of the network
Arguments
---------
man : ElinaManagerPtr
man to which element belongs
element : ElinaAbstract0Ptr
abstract element onto which the transformer gets applied
Return
------
output : ElinaAbstract0Ptr
abstract element after the transformer
"""
handle_padding_layer(man, element, *self.get_arguments())
calc_bounds(man, element, nn, nlb, nub, relu_groups, is_refine_layer=True)
nn.pad_counter += 1
if testing:
return element, nlb[-1], nub[-1]
return element
class DeeppolyPoolNode:
def __init__(self, input_shape, window_size, strides, pad_top, pad_left, pad_bottom, pad_right, input_names, output_name, output_shape, is_maxpool):
"""
collects the information needed for the handle_pool_layer transformer and brings it into the required shape
Arguments
---------
input_shape : numpy.ndarray
1D array of ints with 3 entries [height, width, channels] representing the shape of the of the image that is passed to the conv-layer
window_size : numpy.ndarray
1D array of ints with 2 entries [height, width] representing the window's size in these directions
strides : numpy.ndarray
1D array of ints with 2 entries [height, width] representing the stride in these directions
"""
self.input_shape = np.ascontiguousarray(input_shape, dtype=np.uintp)
self.window_size = np.ascontiguousarray(window_size, dtype=np.uintp)
self.strides = np.ascontiguousarray(strides, dtype=np.uintp)
self.pad_top = pad_top
self.pad_left = pad_left
self.pad_bottom = pad_bottom
self.pad_right = pad_right
self.output_shape = (c_size_t * 3)(output_shape[1],output_shape[2],output_shape[3])
self.is_maxpool = is_maxpool
add_input_output_information_deeppoly(self, input_names, output_name, output_shape)
def transformer(self, nn, man, element, nlb, nub, relu_groups, refine, timeout_lp, timeout_milp, use_default_heuristic, testing):
"""
transformer for a maxpool/averagepool layer, this can't be the first layer of a network
Arguments
---------
man : ElinaManagerPtr
man to which element belongs
element : ElinaAbstract0Ptr
abstract element onto which the transformer gets applied
Return
------
output : ElinaAbstract0Ptr
abstract element after the transformer
"""
h, w = self.window_size
H, W, C = self.input_shape
#assert self.pad_top==self.pad_bottom==self.pad_right==self.pad_left==0, "Padded pooling not implemented"
handle_pool_layer(man, element, (c_size_t *3)(h,w,1), (c_size_t *3)(H, W, C), (c_size_t *2)(self.strides[0], self.strides[1]), self.pad_top, self.pad_left, self.pad_bottom, self.pad_right, self.output_shape, self.predecessors, len(self.predecessors), self.is_maxpool)
calc_bounds(man, element, nn, nlb, nub, relu_groups, is_refine_layer=True, destroy=False)
nn.pool_counter += 1
if testing:
return element, nlb[-1], nub[-1]
return element
class DeeppolyResidualNode:
def __init__(self, input_names, output_name, output_shape):
"""
Arguments
---------
input_names : iterable
iterable with the names of the two nodes you want to add
output_name : str
name of this node's output
output_shape : iterable
iterable of ints with the shape of the output of this node
"""
add_input_output_information_deeppoly(self, input_names, output_name, output_shape)
def transformer(self, nn, man, element, nlb, nub, relu_groups, refine, timeout_lp, timeout_milp, use_default_heuristic, testing):
handle_residual_layer(man,element,self.output_length,self.predecessors, len(self.predecessors))
calc_bounds(man, element, nn, nlb, nub, relu_groups, use_krelu=refine, is_refine_layer=True)
# print("Residual ", nn.layertypes[layerno],layerno)
nn.residual_counter += 1
if testing:
return element, nlb[-1], nub[-1]
return element
class DeeppolyGather:
def __init__(self, indexes, input_names, output_name, output_shape):
"""
collects the information needed for the handle_gather_layer transformer and brings it into the required shape
Arguments
---------
indexes : numpy.ndarray
1D array of ints with 3 entries [height, width, channels] representing the shape of the of the image that is passed to the conv-layer
window_size : numpy.ndarray
1D array of ints with 2 entries [height, width] representing the window's size in these directions
strides : numpy.ndarray
1D array of ints with 2 entries [height, width] representing the stride in these directions
"""
self.indexes = np.ascontiguousarray(indexes, dtype=np.uintp)
add_input_output_information_deeppoly(self, input_names, output_name, output_shape)
def transformer(self, nn, man, element, nlb, nub, relu_groups, refine, timeout_lp, timeout_milp, use_default_heuristic, testing):
handle_gather_layer(man, element, self.indexes)
return element
class DeeppolyConcat:
def __init__(self, width, height, channels, input_names, output_name, output_shape):
add_input_output_information_deeppoly(self, input_names, output_name, output_shape)
self.width = width
self.height = height
self.channels = (c_size_t * len(channels))()
for i, channel in enumerate(channels):
self.channels[i] = channel
def transformer(self, nn, man, element, nlb, nub, relu_groups, refine, timeout_lp, timeout_milp, use_default_heuristic, testing):
handle_concatenation_layer(man, element, self.predecessors, len(self.predecessors), self.channels)
calc_bounds(man, element, nn, nlb, nub, relu_groups, is_refine_layer=True, destroy=False)
nn.concat_counter += 1
if testing:
return element, nlb[-1], nub[-1]
return element
class DeeppolyTile:
def __init__(self, repeats, input_names, output_name, output_shape):
add_input_output_information_deeppoly(self, input_names, output_name, output_shape)
self.repeats = repeats
def transformer(self, nn, man, element, nlb, nub, relu_groups, refine, timeout_lp, timeout_milp, use_default_heuristic, testing):
handle_tiling_layer(man, element, self.predecessors, len(self.predecessors), self.repeats)
calc_bounds(man, element, nn, nlb, nub, relu_groups, is_refine_layer=True, destroy=False)
nn.tile_counter += 1
if testing:
return element, nlb[-1], nub[-1]
return element
class DeeppolySubNode:
def __init__(self, bias, is_minuend, input_names, output_name, output_shape):
"""
collects the information needed for the handle_gather_layer transformer and brings it into the required shape
Arguments
---------
indexes : numpy.ndarray
1D array of ints with 3 entries [height, width, channels] representing the shape of the of the image that is passed to the conv-layer
window_size : numpy.ndarray
1D array of ints with 2 entries [height, width] representing the window's size in these directions
strides : numpy.ndarray
1D array of ints with 2 entries [height, width] representing the stride in these directions
"""
self.bias = np.ascontiguousarray(bias.reshape(-1), dtype=np.float64)
self.is_minuend = is_minuend
add_input_output_information_deeppoly(self, input_names, output_name, output_shape)
def transformer(self, nn, man, element, nlb, nub, relu_groups, refine, timeout_lp, timeout_milp, use_default_heuristic, testing):
layerno = nn.calc_layerno()
num_neurons = get_num_neurons_in_layer(man, element, layerno)
handle_sub_layer(man, element, self.bias, self.is_minuend, num_neurons, self.predecessors, len(self.predecessors))
calc_bounds(man, element, nn, nlb, nub, relu_groups, is_refine_layer=True)
nn.ffn_counter+=1
if testing:
return element, nlb[-1], nub[-1]
return element
class DeeppolyMulNode:
def __init__(self, bias, input_names, output_name, output_shape):
"""
collects the information needed for the handle_gather_layer transformer and brings it into the required shape
Arguments
---------
indexes : numpy.ndarray
1D array of ints with 3 entries [height, width, channels] representing the shape of the of the image that is passed to the conv-layer
window_size : numpy.ndarray
1D array of ints with 2 entries [height, width] representing the window's size in these directions
strides : numpy.ndarray
1D array of ints with 2 entries [height, width] representing the stride in these directions
"""
self.bias = np.ascontiguousarray(bias.reshape(-1), dtype=np.float64)
add_input_output_information_deeppoly(self, input_names, output_name, output_shape)
def transformer(self, nn, man, element, nlb, nub, relu_groups, refine, timeout_lp, timeout_milp, use_default_heuristic, testing):
handle_mul_layer(man, element, self.bias, len(self.bias.reshape(-1)), self.predecessors, len(self.predecessors))
calc_bounds(man, element, nn, nlb, nub, relu_groups, is_refine_layer=True)
nn.ffn_counter+=1
if testing:
return element, nlb[-1], nub[-1]
return element
|
"""
Simple cubic packing(scp) simulator
scp is the simplest packing structure. Each sphere is
coordinated by max. 6 neighbouring spheres.
Volume of a unit cell = 2 * (radius ^ 3)
No. of spheres in one unit cell = 1
This script/module calculates maximum no. of spheres added to a cuboid with
hcp structures.
Input parameters:
{
"radius":
"length":
"width":
"height":
}
Output:
1. eval(theory): theoretical value of capacity
Volume of container
----------------------- * no. of spheres in single unit cell
Volume of unit cell
Theoretical calculation suits for fast and large-scale
estimation, though there are no consideration on whether the spheres
are actually inside the container.
2. eval(model): simulation value of capacity
Lines of Spheres are added to a virtual container accordingly.
Each line/row has a certain no. of spheres depending on the length of
container and the packing structure. These lines of spheres fill the
entire layer, and subsequently fill another layer until the max. no.
of layers availability is reached.
"""
import math
class scp:
def __init__(self,param):
self.l=param['length']
self.w=param['width']
self.h=param['height']
self.r=param['radius']
self.welcome()
def welcome(self):
print("Entering scp estimation -")
print()
print("Radius :",self.r)
print("Length :",self.l)
print("Width :",self.w)
print("Height :",self.h)
print()
def eval_theory(self):
print("Theoretical scp calculation :")
volume_container = self.l * self.w * self.h
volume_unitcell = 8 * math.pow(self.r,3)
print(" Volume of container:",volume_container)
print(" Volume of single unit cell:",volume_unitcell)
print(" No. of spheres in one unit cell is 1.")
count = int(volume_container / volume_unitcell) *1
print(" No. of spheres in the container is ",count,"by SCP theory.")
print()
return count
def eval_model(self):
print("Building scp models:")
# Constant: each row/layer are separated by 2 * radius
sep = 2 * self.r
print(" Separation between layers:",sep)
# For height n, max. no. of layers = height/separation
# max_layers should be an integer
max_layers = int(self.h/sep)
lastword = "layer" if max_layers ==1 else "layers"
print(" This container can store a max of",max_layers,
lastword+".")
print(" Separation between rows of spheres within a layer:", sep)
# Accumulate the total numbers of spheres
count = 0
for i in range(0, max_layers):
max_rows = int(self.w/sep)
# Accumulate the total numbers of spheres in a layer
layer_count = 0
for j in range(0, max_rows):
num = int(self.l/(2* self.r))
layer_count = layer_count + num
lastword = "sphere" if layer_count ==1 else "spheres"
print(" Layer",i+1,"has",max_rows,"rows and a total of ",
layer_count, lastword+".")
count = count + layer_count
print(" No. of sphere in the container is ",count,"by SCP model.")
print()
print("---------------------------------------------------------------------------")
return count
|
import math
import numpy as np
from astropy.coordinates import SkyCoord
from ..constant import ALPHA_NGP, DELTA_NGP, L_NCP, AU, tropical_year
def parse_pairwise(arg):
"""Parse value with error"""
if (isinstance(arg, list) or isinstance(arg, tuple)) and \
len(arg)==2:
return arg
else:
raise ValueError
def parse_value_err(arg):
if isinstance(arg, float) or isinstance(arg, int):
return arg, None
elif (isinstance(arg, list) or isinstance(arg, tuple)) and \
len(arg)==2:
return arg
else:
raise ValueError
def compute_UVW(**kwargs):
# ra, dec, rv, parallax, pm_ra, pm_dec,
# rv_err=None, parallax_err=None, pm_ra_err=None, pm_dec_err=None,
# hand='left'):
"""Compute Galactic velocity components (*U*, *V*, *W*).
Args:
ra (float): Right Ascension in degree at epoch J2000.0.
dec (float): Declination in degree at epoch J2000.0.
eqcoord (:py:class:`astropy.coordinates.SkyCoord`): Sky coordinate of
object. Either (`ra`, `dec`) or `eqcoord` is necessary
pm (list or tuple): Proper motion in mas/yr. Either (`pm_RA`, `pm_Dec`)
or ((`pm_RA`, `pm_RA_err`), (`pm_Dec`, `pm_Dec_err`))
rv (float, list or tuple): Radial velocity in km/s. Either `rv` as a
float or (`rv`, `rv_err`)
parallax (float, list, or tuple): Parallax in mas. Either `parallax` as
a float or (`parallax`, `parallax_err`).
U_plus (str): Positive direction (towards Galactic center or
anti-center) of *U* componenet. Default is `center`.
[`center`\ \|\ `anticenter`]
Returns:
UVW: *tuple*
(*U*, *V*, *W*) velocities or ((*U*, *U_err*), (*V*, *V_err*),
(*W*, *W_err*)) if all the uncertainties to parallax, proper motion
and radial velocity are given.
Notes:
.. |kms| replace:: km s\ :sup:`−1`
Calculate the Galactic space velocity components (*U*, *V*, *W*) using
the formula given by `Johnson & Soderblom 1987
<http://adsabs.harvard.edu/abs/1987AJ.....93..864J>`_.
The coordinate, parallax, and proper motion are required. The resulting
velocities are relative to the Sun. The positive direction of *U* is
defined as towards Galactic center in right-handed system, and towards
Galactic anticenter in left-handed system. To correct to the local
standard of rest (LSR), the solar motion (*U*:sub:`LSR`, *V*:sub:`LSR`,
*W*:sub:`LSR`) are needed, e.g. (9.6 ± 3.9, 14.6 ± 5.0, 9.3 ± 1.0) |kms|
(`Reid et al. 2014
<http://adsabs.harvard.edu/abs/2014ApJ...783..130R>`_).
Examples
---------
Calculate (*U*, *V*, *W*) velocities relative to the sun for HD 9562 (HIP
7276). The heliocentric radial velocity is −13.3 |kms| (`Bensby et al. 2003
<http://adsabs.harvard.edu/abs/2003A&A...410..527B>`_, Table 2).
.. code-block:: python
from stella.catalog.find_catalog import find_HIP
from stella.kinetics.orbit import compute_UVW
hip = 7276
item = find_HIP(hip)
u, v, w = compute_UVW(ra=item['RAdeg'], dec=item['DEdeg'], parallax=item['Plx'],
rv=-13.3, pm=(item['pmRA'], item['pmDE']))
print('%+6.2f %+6.2f %+6.2f'%(u, v, w))
# output: -8.86 -26.35 +12.39
References
-----------
* `Bensby et al., 2003, A&A, 410, 527 <http://adsabs.harvard.edu/abs/2003A&A...410..527B>`_
* `Johnson & Soderblom, 1987, AJ, 93, 864 <http://adsabs.harvard.edu/abs/1987AJ.....93..864J>`_
* `Reid et al. 2014, ApJ, 783, 130 <http://adsabs.harvard.edu/abs/2014ApJ...783..130R>`_
"""
sin = math.sin
cos = math.cos
pi = math.pi
alpha = ALPHA_NGP/180.*pi
delta = DELTA_NGP/180.*pi
theta = L_NCP/180.*pi
# parse RA and Dec
if 'eqcoord' in kwargs:
eqcoord = kwargs.pop('eqcoord')
if isinstance(eqcoord, SkyCoord):
icrs = eqcoord.icrs
ra = icrs.ra.degree
dec = icrs.dec.degree
else:
ra, dec = parse_pairwise(eqcoord)
elif 'ra' in kwargs and 'dec' in kwargs:
ra = kwargs.pop('ra')
dec = kwargs.pop('dec')
else:
raise ValueError
ra = ra/180.*pi
dec = dec/180.*pi
# parse RV
if 'rv' in kwargs:
rv, rv_err = parse_value_err(kwargs.pop('rv'))
# parse distance
if 'distance' in kwargs:
d, d_err = parse_value_err(kwargs.pop('distance'))
elif 'parallax' in kwargs:
para, para_err = parse_value_err(kwargs.pop('parallax'))
d = 1000./para
if para_err is None:
d_err = None
else:
d_err = d*para_err/para
else:
raise ValueError
# parse proper motion
if 'pm' in kwargs:
input_pm_ra, input_pm_dec = parse_pairwise(kwargs.pop('pm'))
pm_ra, pm_ra_err = parse_value_err(input_pm_ra)
pm_dec, pm_dec_err = parse_value_err(input_pm_dec)
pm_ra *= 1e-3
pm_dec *= 1e-3
if pm_ra_err is not None:
pm_ra_err *= 1e-3
if pm_dec_err is not None:
pm_dec_err *= 1e-3
else:
raise ValueError
T1 = np.mat([[ cos(theta), sin(theta), 0],
[ sin(theta), -cos(theta), 0],
[ 0, 0, 1]])
T2 = np.mat([[-sin(delta), 0, cos(delta)],
[ 0, -1, 0],
[ cos(delta), 0, +sin(delta)]])
T3 = np.mat([[ cos(alpha), sin(alpha), 0],
[ sin(alpha), -cos(alpha), 0],
[ 0, 0, 1]])
T = T1*T2*T3
U_plus = kwargs.pop('U_plus', 'center')
if U_plus == 'center':
pass
elif U_plus == 'anticenter':
T[0][:] = -T[0][:]
else:
raise ValueError
A1 = np.mat([[ cos(ra), sin(ra), 0],
[ sin(ra), -cos(ra), 0],
[ 0, 0, -1]])
A2 = np.mat([[ cos(dec), 0, -sin(dec)],
[ 0, -1, 0],
[-sin(dec), 0, -cos(dec)]])
A = A1*A2
B = T*A
k = AU*1e-3/tropical_year/86400 # 1 AU/year in unit of km/s
x = np.mat([[rv],
[k*pm_ra*d],
[k*pm_dec*d]])
U, V, W = np.array(B*x).flatten()
if None in [pm_ra_err, pm_dec_err, rv_err, d_err]:
return (U, V, W)
else:
C = np.mat(np.array(B)**2)
e11 = rv_err
e12 = (k*d)**2*(pm_ra_err**2 + (pm_ra*d_err/d)**2)
e13 = (k*d)**2*(pm_dec_err**2 + (pm_dec*d_err/d)**2)
e1 = np.mat([[e11],[e12],[e13]])
e2c = 2.*pm_ra*pm_dec*k**2*d_err**2/d**4
e = C*e1
U_err = math.sqrt(e[0,0] + e2c*B[0,1]*B[0,2])
V_err = math.sqrt(e[1,0] + e2c*B[1,1]*B[1,2])
W_err = math.sqrt(e[2,0] + e2c*B[2,1]*B[2,2])
return ((U, U_err), (V, V_err), (W, W_err))
def compute_GalXYZ(**kwargs):
"""Compute Galactic position (*X*, *Y*, *Z*) in unit of kpc.
Args:
ra (float): Right Ascension in degree at epoch J2000.0
dec (float): Declination in degree at epoch J2000.0
eqcoord (:py:class:`astropy.coordinates.SkyCoord`, optional): Sky
coordinate of object
galactic (list or tuple, optional): Galactic coordinate (`l`, `b`)
l (float, optional): Galactic longitude in degree
b (float, optional): Galactic latitude in degree
distance (float, list, or tuple): Distance in pc. Either `distance` as a
float or (`distance`, `distance_err`)
parallax (float, list, or tuple): Parallax in mas. Either `parallax` as
a float or (`parallax`, `parallax_err`)
R0 (float): Solar distance to the Galactic center in kpc
Returns:
tuple: Galactic position (*x*, *y*, *z*) in unit of kpc
"""
# parse RA and Dec
if 'eqcoord' in kwargs:
eqcoord = kwargs.pop('eqcoord')
if not isinstance(eqcoord, SkyCoord):
ra, dec = parse_pairwise(eqcoord)
frame = kwargs.pop('frame', 'icrs')
eqcoord = SkyCoord(ra, dec, frame=frame, unit='deg')
gal = eqcoord.galactic
l, b = gal.l.degree, gal.b.degree
elif 'ra' in kwargs and 'dec' in kwargs:
ra = kwargs.pop('ra')
dec = kwargs.pop('dec')
frame = kwargs.pop('frame', 'icrs')
eqcoord = SkyCoord(ra, dec, frame=frame, unit='deg')
gal = eqcoord.galactic
l, b = gal.l.degree, gal.b.degree
elif 'galactic' in kwargs:
l, b = parse_pairwise(kwargs.pop('galactic'))
elif 'l' in kwargs and 'b' in kwargs:
l = kwargs.pop('l')
b = kwargs.pop('b')
else:
raise ValueError
l = l/180.*math.pi
b = b/180.*math.pi
# parse distance
if 'distance' in kwargs:
d, d_err = parse_value_err(kwargs.pop('distance'))
elif 'parallax' in kwargs:
para, para_err = parse_value_err(kwargs.pop('parallax'))
d = 1000./para
if para_err is None:
d_err = None
else:
d_err = d*para_err/para
else:
raise ValueError
R0 = kwargs.pop('R0', 8.5)
d *= 1e-3
x = R0 - d*math.cos(b)*math.cos(l)
y = d*math.cos(b)*math.sin(l)
z = d*math.sin(b)
return (x, y, z)
def compute_Galorbit(**kwargs):
"""Calculate the stellar orbit in the Milky Way.
Args:
potential (list): List of Galactic potentials.
xyz (tuple or list): Galactic positions
uvw (tuple or list): Galactic space velocity
solar_uvw (tuple or list): Solar space velocity
t (list): List of integration time
Returns:
A tuple containing:
* x_lst (:class:`numpy.ndarray`)
* y_lst (:class:`numpy.ndarray`)
* z_lst (:class:`numpy.ndarray`)
Examples:
Calculate the orbit of the Sun
.. code-block:: python
solar_uvw = (9.6, 255.2, 9.3) # from Reid et al. 2014
t_lst = np.arange(0, 0.4, 0.0001) # in Gyr
x_lst, y_lst, z_lst = orbit.compute_Galorbit(
potential = potential_lst,
xyz=(R0,0.,0.),
uvw=(0.,0.,0.),
solar_uvw=solar_uvw,
t=t_lst)
Calculate the orbit of `HD 122563
<http://simbad.u-strasbg.fr/simbad/sim-id?Ident=HD+122563>`_ (HIP 68594)
.. code-block:: python
from stella.catalog import find_catalog
hip = 68594
item = find_catalog.find_HIP2(hip)
ra, dec = item['RAdeg'], item['DEdeg']
rv = (-26.58, 0.15) # from SIMBAD
parallax = (item['Plx'], item['e_Plx'])
pm = ((item['pmRA'], item['e_pmRA']),(item['pmDE'], item['e_pmDE']))
uvw = orbit.compute_UVW(ra=ra,dec=dec,rv=rv,parallax=parallax,pm=pm,U_plus='center')
xyz = orbit.compute_GalXYZ(ra=ra,dec=dec,parallax=parallax,R0=R0)
x1_lst, y1_lst, z1_lst = orbit.compute_Galorbit(
potential = potential_lst,
xyz=xyz,
uvw=uvw,
solar_uvw=solar_uvw,
t=t_lst)
"""
from scipy.integrate import odeint
from ..constant import pc
potential_lst = kwargs.pop('potential')
if 'xyz' in kwargs:
x, y, z = kwargs.pop('xyz')
if isinstance(x, list) or isinstance(x, tuple):
x, y, z = x[0], y[0], z[0]
elif isinstance(x, float):
pass
else:
raise ValueError
if 'uvw' in kwargs:
uvw = kwargs.pop('uvw')
if isinstance(uvw, tuple) or isinstance(uvw, list):
u, _ = parse_value_err(uvw[0])
v, _ = parse_value_err(uvw[1])
w, _ = parse_value_err(uvw[2])
else:
raise ValueError
solar_uvw = kwargs.pop('solar_uvw')
if isinstance(solar_uvw, tuple) or isinstance(solar_uvw, list):
solar_u, _ = parse_value_err(solar_uvw[0])
solar_v, _ = parse_value_err(solar_uvw[1])
solar_w, _ = parse_value_err(solar_uvw[2])
else:
raise ValueError
target_u = u + solar_u
target_v = v + solar_v
target_w = w + solar_w
t = kwargs.pop('t')
t_lst = t*1e9*365.2422*86400 # convert Gyr to second
def derive(var, t, potential_lst):
x, y, z, vx, vy, vz = var
acce_lst = np.array([potential.get_acce_cartesian(x, y, z)
for potential in potential_lst])
ax = acce_lst[:,0].sum()
ay = acce_lst[:,1].sum()
az = acce_lst[:,2].sum()
return [vx/pc, vy/pc, vz/pc, ax, ay, az]
vx, vy, vz = -target_u, target_v, target_w
var0 = x, y, z, vx, vy, vz
sol = odeint(derive, var0, t_lst, args=(potential_lst,))
x_lst = sol[:,0]
y_lst = sol[:,1]
z_lst = sol[:,2]
r_lst = np.sqrt(x_lst**2 + y_lst**2 + z_lst**2)
return x_lst, y_lst, z_lst
|
from typing import List
class Solution:
"""
Input: nums1 = [1,3], nums2 = [2]
Output: 2.00000
Explanation: merged array = [1,2,3] and median is 2.
"""
@staticmethod
def find_median_sorted_arrays(nums1_list: List[int], nums2_list: List[int]) -> float:
nums1_list.extend(nums2_list)
nums1_list.sort()
length: int = len(nums1_list)
if length % 2 == 0:
i_1 = length / 2 - 1
i = length / 2
num = float((nums1_list[int(i_1)] + nums1_list[int(i)]) / 2)
else:
i = length // 2
num = float(nums1_list[i])
return num
|
import gdb, socket, cPickle, os, time
CWD = os.path.abspath(os.path.dirname(__file__))
sys.path.append(CWD)
from python_gdb_common import *
#import gdb_printers
class GdbDriver:
def __init__(self):
# We register some handlers for events
gdb.events.stop.connect (self.stopEvent)
# gdb.events.exited.connect(self.exitEvent)
self.gdbEvent = None
self.abortStatus = None
self.gdbFrames = []
self.waiting = False
# When this event gets set, we need to signal back to the driver immediately to proess the event data
def setGdbEvent(self, eventType, eventData):
assert isinstance(eventData, dict), "error gdb event data must be contained by a dict"
if self.gdbEvent is not None:
self.abortStatus = "GdbDriver error, gdb event structure is being set multiple times without reset"
raise Exception(self.abortStatus)
self.gdbEvent = PyGdbObject()
self.gdbEvent.type = eventType
self.gdbEvent.payload = eventData
def connect(self):
global handlerPort
self.socket = socket.socket()
self.socket.connect(("127.0.0.1", handlerPort))
def stopEvent(self, event):
eventData = {}
bp = event.breakpoint
bpAttributes = ["number", "location"]
eventData ["type"] = type(bp).__name__
for attr in bpAttributes:
if hasattr(bp, attr):
eventData [attr] = getattr(bp, attr)
#
if event.inferior_thread is not None:
eventData ["threadNumber"] = event.inferior_thread.num
else:
eventData ["threadNumber"] = None
#
self.setGdbEvent("stopEvent", eventData)
#
if self.waiting:
self.waiting = False
self.socket.send(cPickle.dumps(self.gdbEvent))
self.session()
return True
def exitEvent(self, event):
return True
def runInferior(self):
runCommand="run"
while True:
gdb.execute(runCommand)
runCommand = "continue"
print "** continue **"
def sendAck(self, payload={}):
assert isinstance(payload, dict), "ack payloads must be dictionaries"
self.socket.send(cPickle.dumps(PyGdbObject({"type":"ack", "payload":payload})))
def sendError(self, payload):
assert isinstance(payload, str), "error payloads must be strings"
self.socket.send(cPickle.dumps(PyGdbObject({"type":"error", "payload":payload})))
def execute(self, command):
self.gdbEvent = None
self.abortStatus = None
try:
print command
gdb.execute(command, True)
except gdb.error as e:
self.sendError("%s" % (e))
return
#print self.abortStatus, self.gdbEvent
if self.abortStatus is not None:
self.sendError("%s" % (self.abortStatus))
elif self.gdbEvent is not None:
#import rpdb2; rpdb2.start_embedded_debugger("stepaside")
self.socket.send(cPickle.dumps(self.gdbEvent))
else:
self.sendAck()
def callstack(self):
self.gdbFrames = []
frame = gdb.selected_frame()
#import rpdb2; rpdb2.start_embedded_debugger("stepaside")
pyGdbFrames = []
while frame is not None:
fullName = frame.function().symtab.fullname()
lineNumber = frame.find_sal().line
functionName = frame.function().name
pyGdbFrames.append(PyGdbObject({
"fullName":fullName,
"lineNumber":lineNumber,
"functionName":functionName
}))
self.gdbFrames.append(frame)
frame = frame.older()
self.sendAck({"frames":pyGdbFrames})
def session(self):
while True:
cmdObj = cPickle.loads(self.socket.recv(TransferBufferSize))
if cmdObj.type == "execute":
self.execute(cmdObj.payload)
elif cmdObj.type == "callstack":
self.callstack()
elif cmdObj.type == "waitforbreak":
self.waiting = True
return
else:
print "Unknown session command: %s" % (cmdObj.type)
driver = GdbDriver()
driver.connect()
driver.session()
|
import json # we need to use the JSON package to load the data, since the data is stored in JSON format
from data_process import generate_data
def read_data():
'''Open the original json file with the data'''
# popularity_score : a popularity score for this comment (based on the number of upvotes) (type: float)
# children : the number of replies to this comment (type: int)
# text : the text of this comment (type: string)
# controversiality : a score for how "controversial" this comment is (automatically computed by Reddit)
# is_root : if True, then this comment is a direct reply to a post; if False, this is a direct reply to another comment
with open("../data/reddit.json") as fp:
data = json.load(fp)
return data
if __name__ == '__main__':
stopwords = open('../data/stopwords.txt').read().splitlines()
data = read_data()
# Generate data according w/section 3.1/3.2
generate_data(data, ['text', 'is_root', 'controversiality', 'children'], "default_top160", top_words=160)
generate_data(data, ['text', 'is_root', 'controversiality', 'children'], "default_top60", top_words=60)
generate_data(data, ['is_root', 'controversiality', 'children'], "default_notext")
# Generate new features
generate_data(data, ['is_root', 'controversiality', 'children', 'text'], "default_tfidf", dictType='tfidf')
generate_data(data, ['is_root', 'controversiality', 'children', 'text'], "default_feeling", dictType='feelings')
generate_data(data, ['is_root', 'children', 'text'], "default_noroot")
generate_data(data, ['children', 'text'], "only_children_text")
generate_data(data, ['children'], "only_children")
generate_data(data, ['children', 'square_children'], "only_children_square")
generate_data(data, ['children', 'square_children', 'text'], "square")
generate_data(data, ['children', 'square_children', 'cube_children'], "only_cube")
generate_data(data, ['children', 'square_children', 'cube_children', 'fourth_children'], "only_fourth")
generate_data(data, ['children', 'text'], "stopwords_children_120", top_words=120, stop_words=stopwords)
generate_data(data, ['children', 'text'], "stopwords_children_50", top_words=50, stop_words=stopwords)
generate_data(data, ['children', 'text', 'len_text'], "stopwords_len_top50", top_words=50, stop_words=stopwords)
generate_data(data, ['children', 'text', 'len_text'], "len_top50", top_words=50, )
generate_data(data,
['children', 'text', 'len_text', 'len_sentence', 'sentiment_neg', 'sentiment_neu', 'sentiment_pos',
'sentiment_compound'], "children_sentiment_top10", top_words=10)
generate_data(data,
['children', 'len_text', 'len_sentence', 'sentiment_neg', 'sentiment_neu', 'sentiment_pos',
'sentiment_compound'], "children_sentiment")
generate_data(data, [
'children',
'square_children',
'len_text',
'sentiment_neg',
'sentiment_neu',
'sentiment_pos',
'text'
], "most_important_features",
# stop_words=stopwords,
top_words=57)
# two best models
generate_data(data, ['text', 'is_root', 'controversiality', 'children', 'len_text'], "lenght_text",
top_words=62)
generate_data(data, ['text', 'is_root', 'controversiality', 'children', 'square_children'], "children_all",
top_words=57)
generate_data(data, ['text', 'is_root', 'controversiality', 'children', 'len_text', 'square_children'],
"best_combination", top_words=57)
generate_data(data, ['text', 'is_root', 'controversiality', 'children', 'len_text', 'square_children'],
"best_combination1", top_words=62)
generate_data(data, ['text', 'is_root', 'controversiality', 'children', 'len_text', 'square_children'],
"best_combination2", top_words=60)
# Some experiments to find best features. Only the best selected.
# # Naive implementation for searching features
# f = [
# 'text',
# 'is_root', 'controversiality', 'children',
# # 'square_children',
# 'len_text',
# # 'len_sentence', 'sentiment_neg',
# # 'sentiment_neu',
# # 'sentiment_pos',
# # 'sentiment_compound'
# ]
#
# # import itertools
# # i = 0
# # for L in range(1, len(f) + 1):
# # for subset in itertools.combinations(f, L):
# # print(i, list(subset))
# # generate_data(data, list(subset), "test"+str(i))
# # i+=1
#
# # Vary words
#
# f = [
# # ['text', 'is_root', 'children', 'square_children'],
# # ['text', 'controversiality', 'children', 'square_children'],
# ['text', 'is_root', 'controversiality', 'children', 'square_children'], # 1
# # ['text', 'is_root', 'controversiality', 'children', 'sentiment_neg'],
# # ['text', 'is_root', 'controversiality', 'children', 'sentiment_neu'], # 2
# # ['text', 'is_root', 'controversiality', 'children', 'sentiment_pos'],
# # ['text', 'is_root', 'controversiality', 'children', 'len_text'],
# # ['text', 'is_root', 'controversiality', 'children', 'len_sentence'],
# # ['text', 'is_root', 'controversiality', 'children', 'square_children', 'cube_children', 'fourth_children'], # *
# # ['text', 'is_root', 'controversiality', 'children', 'square_children', 'fourth_children'], # *
# # ['text', 'is_root', 'controversiality', 'children', 'square_children', 'cube_children', 'fourth_children'], # *
# # ['text', 'is_root', 'controversiality', 'children', 'square_children', 'len_text'],
# # ['text', 'is_root', 'controversiality', 'children', 'square_children', 'sentiment_neg'],
# # ['text', 'is_root', 'controversiality', 'children', 'square_children', 'sentiment_neu'], # 3
# # ['text', 'is_root', 'controversiality', 'children', 'square_children', 'sentiment_pos'],
# # ['text', 'is_root', 'controversiality', 'children', 'square_children', 'len_text', 'sentiment_neg'],
# # ['text', 'is_root', 'controversiality', 'children', 'square_children', 'len_text', 'sentiment_neu'],
# # ['text', 'is_root', 'controversiality', 'children', 'square_children', 'len_text', 'sentiment_pos'],
# # ['text', 'is_root', 'controversiality', 'children', 'square_children', 'sentiment_neg', 'sentiment_neu'],
# # ['text', 'is_root', 'controversiality', 'children', 'square_children', 'sentiment_neg', 'sentiment_pos'],
# # ['text', 'is_root', 'controversiality', 'children', 'square_children', 'sentiment_neu', 'sentiment_pos']
# ]
#
# # i = 0
# # for l in f:
# # print(i, l)
# # generate_data(data, l, "test" + str(i), top_words=57)
# # i += 1
|
# --------------------------------------------------------
# mcan-vqa (Deep Modular Co-Attention Networks)
# Licensed under The MIT License [see LICENSE for details]
# Written by Yuhao Cui https://github.com/cuiyuhao1996
# --------------------------------------------------------
from cfgs.path_cfgs import PATH
# from path_cfgs import PATH
import os, torch, random
import numpy as np
from types import MethodType
class Cfgs(PATH):
def __init__(self):
super(Cfgs, self).__init__()
# Set Devices
# If use multi-gpu training, set e.g.'0, 1, 2' instead
self.gpu = '0,1'
# Set RNG For CPU And GPUs
self.seed = 444
# -------------------------
# ---- Version Control ----
# -------------------------
# Define a specific name to start new training
# self.VERSION = 'Anonymous_' + str(self.SEED)
self.version = str(self.seed)
# Resume training
self.resume = False
# Used in Resume training and testing
self.ckpt_version = self.version
self.ckpt_epoch = 0
# Absolutely checkpoint path, 'CKPT_VERSION' and 'CKPT_EPOCH' will be overridden
self.ckpt_path = '/drive/qiyuan/mcan-vqa/'
# Print loss every step
self.verbose = True
# ------------------------------
# ---- Data Provider Params ----
# ------------------------------
# {'train', 'val', 'test'}
self.run_mode = 'train'
# Set True to evaluate offline
self.eval_every_epoch = True
# Set True to save the prediction vector (Ensemble)
self.test_save_pred = False
# Pre-load the features into memory to increase the I/O speed
self.preload = False
# Define the 'train' 'val' 'test' data split
# (EVAL_EVERY_EPOCH triggered when set {'train': 'train'})
self.split = {
'train': 'train',
'val': 'val',
'test': 'test',
}
# A external method to set train split
self.train_split = 'train+val+vg'
# Set True to use pretrained word embedding
# (GloVe: spaCy https://spacy.io/)
self.use_glove = True
# Word embedding matrix size
# (token size x WORD_EMBED_SIZE)
self.word_embed_size = 300
# Max length of question sentences
self.max_token = 14
# Filter the answer by occurrence
# self.ANS_FREQ = 8
# Max length of extracted faster-rcnn 2048D features
# (bottom-up and Top-down: https://github.com/peteanderson80/bottom-up-attention)
self.img_feat_pad_size = 60
# Faster-rcnn 2048D features
self.img_feat_size = 1024 # was 2048
# Default training batch size: 64
self.batch_size = 256
# Multi-thread I/O
self.num_workers = 4
# Use pin memory
# (Warning: pin memory can accelerate GPU loading but may
# increase the CPU memory usage when NUM_WORKS is large)
self.pin_mem = True
# Large model can not training with batch size 64
# Gradient accumulate can split batch to reduce gpu memory usage
# (Warning: BATCH_SIZE should be divided by GRAD_ACCU_STEPS)
self.grad_accu_steps = 1
# Set 'external': use external shuffle method to implement training shuffle
# Set 'internal': use pytorch dataloader default shuffle method
self.shuffle_mode = 'internal'
# ------------------------
# ---- Network Params ----
# ------------------------
# Model deeps
# (Encoder and Decoder will be same deeps)
self.layer = 6
# Model hidden size
# (512 as default, bigger will be a sharp increase of gpu memory usage)
self.hidden_size = 512
# Multi-head number in MCA layers
# (Warning: HIDDEN_SIZE should be divided by MULTI_HEAD)
self.multi_head = 8
# Dropout rate for all dropout layers
# (dropout can prevent overfitting: [Dropout: a simple way to prevent neural networks from overfitting])
self.dropout_rate = 0.3
# MLP size in flatten layers
self.flat_mlp_size = 512
# Flatten the last hidden to vector with {n} attention glimpses
self.flat_glimpses = 1
self.flat_out_size = 512 # was 1024
# --------------------------
# ---- Optimizer Params ----
# --------------------------
# The base learning rate
self.lr_base = 1e-5 # vit is 3e-5
# Learning rate decay ratio
self.lr_decay_rate = 0.2
# Learning rate decay at {x, y, z...} epoch
self.lr_decay_list = [10, 20]
# Max training epoch
self.max_epoch = 30
# Gradient clip
# (default: -1 means not using)
self.grad_norm_clip = 0.3
# Adam optimizer betas and eps
self.opt_betas = (0.9, 0.98)
self.opt_eps = 1e-9
def parse_to_dict(self, args):
args_dict = {}
for arg in dir(args):
if not arg.startswith('_') and not isinstance(getattr(args, arg), MethodType):
if getattr(args, arg) is not None:
args_dict[arg] = getattr(args, arg)
return args_dict
def add_args(self, args_dict):
for arg in args_dict:
setattr(self, arg, args_dict[arg])
def proc(self):
"""
set args
"""
assert self.run_mode in ['train', 'val', 'test']
# ------------ Devices setup
os.environ['CUDA_VISIBLE_DEVICES'] = self.gpu
self.n_gpu = len(self.gpu.split(','))
self.devices = [_ for _ in range(self.n_gpu)]
# torch.set_num_threads(2)
# ------------ Seed setup
# fix pytorch seed
torch.manual_seed(self.seed)
if self.n_gpu < 2:
torch.cuda.manual_seed(self.seed)
else:
torch.cuda.manual_seed_all(self.seed)
torch.backends.cudnn.deterministic = True
# fix numpy seed
np.random.seed(self.seed)
# fix random seed
random.seed(self.seed)
if self.ckpt_path is not None:
print('Warning: you are now using CKPT_PATH args, '
'CKPT_VERSION and CKPT_EPOCH will not work')
self.ckpt_version = self.ckpt_path.split('/')[-1] + '_'
# ------------ Split setup
self.split['train'] = self.train_split
# if 'val' in self.split['train'].split('+') or self.run_mode not in ['train']:
# self.eval_every_epoch = False
if self.run_mode not in ['test']:
self.test_save_pred = False
# ------------ Gradient accumulate setup
assert self.batch_size % self.grad_accu_steps == 0
self.sub_batch_size = int(self.batch_size / self.grad_accu_steps)
# Use a small eval batch will reduce gpu memory usage
self.eval_batch_size = self.sub_batch_size
# ------------ Networks setup
# FeedForwardNet size in every MCA layer
self.ff_size = int(self.hidden_size * 4)
# A pipe line hidden size in attention compute
assert self.hidden_size % self.multi_head == 0
self.hidden_size_head = int(self.hidden_size / self.multi_head)
def __str__(self):
for attr in dir(self):
if not attr.startswith('__') and not isinstance(getattr(self, attr), MethodType):
print('{ %-17s }->' % attr, getattr(self, attr))
return ''
|
FreeSerifItalic9pt7bBitmaps = [
0x11, 0x12, 0x22, 0x24, 0x40, 0x0C, 0xDE, 0xE5, 0x40, 0x04, 0x82, 0x20,
0x98, 0x24, 0x7F, 0xC4, 0x82, 0x23, 0xFC, 0x24, 0x11, 0x04, 0x83, 0x20,
0x1C, 0x1B, 0x99, 0x4D, 0x26, 0x81, 0xC0, 0x70, 0x1C, 0x13, 0x49, 0xA4,
0xDA, 0xC7, 0xC1, 0x00, 0x80, 0x1C, 0x61, 0xCF, 0x0E, 0x28, 0x30, 0xA0,
0xC5, 0x03, 0x34, 0xE7, 0xAE, 0x40, 0xB1, 0x05, 0x84, 0x26, 0x20, 0x99,
0x84, 0x3C, 0x03, 0x80, 0x6C, 0x06, 0xC0, 0x78, 0x06, 0x01, 0xEF, 0x66,
0x24, 0x24, 0xC3, 0x8C, 0x10, 0xE3, 0x87, 0xCE, 0xFA, 0x08, 0x21, 0x08,
0x61, 0x8C, 0x30, 0xC3, 0x0C, 0x30, 0x41, 0x02, 0x00, 0x10, 0x40, 0x82,
0x0C, 0x30, 0xC3, 0x0C, 0x61, 0x84, 0x21, 0x08, 0x00, 0x30, 0xCA, 0x5E,
0x6A, 0x93, 0x08, 0x08, 0x04, 0x02, 0x01, 0x0F, 0xF8, 0x40, 0x20, 0x10,
0x08, 0x00, 0x56, 0xF0, 0xF0, 0x03, 0x02, 0x06, 0x04, 0x08, 0x08, 0x10,
0x30, 0x20, 0x60, 0x40, 0xC0, 0x0E, 0x0C, 0x8C, 0x6C, 0x36, 0x1F, 0x0F,
0x07, 0x87, 0xC3, 0x61, 0xB1, 0x88, 0x83, 0x80, 0x04, 0x70, 0xC3, 0x08,
0x21, 0x86, 0x10, 0x43, 0x08, 0xF8, 0x1C, 0x67, 0x83, 0x03, 0x02, 0x06,
0x0C, 0x08, 0x10, 0x20, 0x42, 0xFC, 0x0F, 0x08, 0xC0, 0x60, 0xC1, 0xE0,
0x38, 0x0C, 0x06, 0x03, 0x01, 0x01, 0x1F, 0x00, 0x01, 0x01, 0x81, 0x41,
0x61, 0x21, 0x11, 0x18, 0x88, 0xFF, 0x02, 0x03, 0x01, 0x00, 0x0F, 0x84,
0x04, 0x03, 0x80, 0x60, 0x18, 0x0C, 0x06, 0x03, 0x03, 0x03, 0x1E, 0x00,
0x01, 0x83, 0x87, 0x07, 0x03, 0x03, 0x73, 0xCD, 0x86, 0xC3, 0x61, 0xB1,
0x88, 0xC3, 0xC0, 0x7F, 0x40, 0x80, 0x80, 0x40, 0x40, 0x60, 0x20, 0x20,
0x10, 0x10, 0x18, 0x08, 0x00, 0x1E, 0x19, 0xCC, 0x66, 0x33, 0xB0, 0xE0,
0x50, 0xCC, 0xC3, 0x61, 0xB0, 0xCC, 0xC3, 0xC0, 0x0E, 0x19, 0x8C, 0x6C,
0x36, 0x1B, 0x0D, 0x86, 0xE6, 0x3F, 0x03, 0x03, 0x06, 0x0C, 0x00, 0x33,
0x00, 0x00, 0xCC, 0x33, 0x00, 0x00, 0x44, 0x48, 0x01, 0x83, 0x86, 0x1C,
0x0C, 0x03, 0x80, 0x30, 0x07, 0x00, 0x80, 0xFF, 0x80, 0x00, 0x00, 0x0F,
0xF8, 0xC0, 0x1C, 0x03, 0x80, 0x70, 0x18, 0x38, 0x70, 0xC0, 0x80, 0x00,
0x3C, 0x8C, 0x18, 0x30, 0xC3, 0x0C, 0x20, 0x40, 0x80, 0x06, 0x00, 0x0F,
0xC0, 0xC3, 0x0C, 0x04, 0xC7, 0xBC, 0x64, 0xE2, 0x27, 0x31, 0x39, 0x91,
0xCC, 0x93, 0x3B, 0x0E, 0x00, 0x1F, 0x80, 0x01, 0x00, 0x60, 0x14, 0x04,
0xC0, 0x98, 0x23, 0x07, 0xE1, 0x04, 0x20, 0x88, 0x1B, 0x8F, 0x80, 0x3F,
0xC1, 0x8C, 0x21, 0x8C, 0x31, 0x8C, 0x3E, 0x04, 0x61, 0x86, 0x30, 0xC4,
0x19, 0x86, 0x7F, 0x80, 0x07, 0x91, 0x86, 0x30, 0x26, 0x02, 0x60, 0x0C,
0x00, 0xC0, 0x0C, 0x00, 0xC0, 0x0C, 0x00, 0x61, 0x83, 0xE0, 0x3F, 0xC0,
0x63, 0x82, 0x0C, 0x30, 0x31, 0x81, 0x8C, 0x0C, 0x40, 0x66, 0x07, 0x30,
0x31, 0x03, 0x18, 0x71, 0xFE, 0x00, 0x3F, 0xF0, 0xC2, 0x08, 0x21, 0x80,
0x19, 0x81, 0xF8, 0x11, 0x03, 0x10, 0x30, 0x02, 0x04, 0x60, 0x8F, 0xF8,
0x3F, 0xF0, 0xC2, 0x08, 0x21, 0x80, 0x19, 0x81, 0xF8, 0x11, 0x03, 0x10,
0x30, 0x02, 0x00, 0x60, 0x0F, 0x80, 0x07, 0x91, 0x87, 0x30, 0x26, 0x02,
0x60, 0x0C, 0x00, 0xC1, 0xFC, 0x0C, 0xC0, 0xCC, 0x0C, 0x60, 0x83, 0xF0,
0x3E, 0x3C, 0x30, 0x60, 0x81, 0x06, 0x0C, 0x18, 0x30, 0x7F, 0x81, 0x06,
0x0C, 0x18, 0x30, 0x60, 0x81, 0x06, 0x0C, 0x3C, 0x78, 0x1E, 0x18, 0x20,
0xC1, 0x83, 0x04, 0x18, 0x30, 0x41, 0x87, 0x80, 0x0F, 0x81, 0x80, 0x80,
0xC0, 0x60, 0x20, 0x30, 0x18, 0x0C, 0x04, 0x36, 0x1E, 0x00, 0x3E, 0x78,
0x61, 0x82, 0x10, 0x31, 0x01, 0xB0, 0x0E, 0x00, 0x58, 0x06, 0x60, 0x33,
0x01, 0x0C, 0x18, 0x61, 0xE7, 0xC0, 0x3E, 0x01, 0x80, 0x20, 0x0C, 0x01,
0x80, 0x30, 0x04, 0x01, 0x80, 0x30, 0x04, 0x0D, 0x83, 0x7F, 0xE0, 0x1C,
0x07, 0x0C, 0x0E, 0x0C, 0x14, 0x14, 0x1C, 0x14, 0x2C, 0x16, 0x4C, 0x26,
0x48, 0x26, 0x98, 0x27, 0x18, 0x27, 0x10, 0x42, 0x30, 0xF4, 0x7C, 0x38,
0x78, 0x60, 0x83, 0x04, 0x2C, 0x41, 0x22, 0x09, 0x10, 0x4D, 0x84, 0x28,
0x21, 0x41, 0x06, 0x10, 0x21, 0xE1, 0x00, 0x07, 0x83, 0x18, 0xC1, 0xB0,
0x36, 0x07, 0xC0, 0xF0, 0x3E, 0x06, 0xC0, 0xD8, 0x31, 0x8C, 0x1E, 0x00,
0x3F, 0xC1, 0x9C, 0x21, 0x8C, 0x31, 0x86, 0x31, 0x87, 0xE1, 0x80, 0x30,
0x04, 0x01, 0x80, 0x78, 0x00, 0x07, 0x83, 0x18, 0xC1, 0x98, 0x36, 0x07,
0xC0, 0xF0, 0x1E, 0x06, 0xC0, 0xD8, 0x31, 0x04, 0x13, 0x01, 0x80, 0x70,
0xB7, 0xE0, 0x3F, 0xC1, 0x8C, 0x21, 0x8C, 0x31, 0x8C, 0x3F, 0x04, 0xC1,
0x98, 0x31, 0x84, 0x31, 0x86, 0x78, 0x70, 0x1E, 0x4C, 0x63, 0x08, 0xC0,
0x38, 0x07, 0x00, 0x60, 0x0C, 0x43, 0x10, 0xC6, 0x62, 0x70, 0x7F, 0xE9,
0x8E, 0x31, 0x04, 0x01, 0x80, 0x30, 0x06, 0x00, 0x80, 0x30, 0x06, 0x00,
0x80, 0x7E, 0x00, 0x7C, 0xF3, 0x02, 0x30, 0x46, 0x04, 0x60, 0x46, 0x04,
0x40, 0x8C, 0x08, 0xC0, 0x8C, 0x10, 0xE3, 0x03, 0xC0, 0xF8, 0xEC, 0x0C,
0x81, 0x18, 0x43, 0x08, 0x62, 0x0C, 0x81, 0x90, 0x14, 0x03, 0x00, 0x60,
0x08, 0x00, 0xFB, 0xCE, 0x43, 0x0C, 0x86, 0x11, 0x8C, 0x43, 0x38, 0x86,
0xB2, 0x0D, 0x24, 0x1C, 0x50, 0x38, 0xA0, 0x21, 0x80, 0x42, 0x01, 0x04,
0x00, 0x3E, 0x71, 0x82, 0x0C, 0x40, 0xC8, 0x07, 0x00, 0x60, 0x06, 0x00,
0xB0, 0x13, 0x02, 0x18, 0x61, 0x8F, 0x3E, 0xF9, 0xC8, 0x23, 0x10, 0xC8,
0x34, 0x05, 0x01, 0x80, 0x40, 0x30, 0x0C, 0x03, 0x03, 0xE0, 0x3F, 0xE4,
0x19, 0x03, 0x00, 0xC0, 0x30, 0x0C, 0x03, 0x00, 0x40, 0x18, 0x06, 0x05,
0x81, 0x7F, 0xE0, 0x0E, 0x10, 0x20, 0x81, 0x02, 0x04, 0x10, 0x20, 0x40,
0x82, 0x04, 0x08, 0x1C, 0x00, 0x81, 0x04, 0x18, 0x20, 0xC1, 0x04, 0x08,
0x20, 0x41, 0x38, 0x20, 0x82, 0x08, 0x41, 0x04, 0x10, 0xC2, 0x08, 0x20,
0x8C, 0x00, 0x18, 0x18, 0x2C, 0x24, 0x46, 0x42, 0x83, 0xFF, 0x80, 0xD8,
0x80, 0x1F, 0x98, 0x98, 0x4C, 0x2C, 0x36, 0x33, 0x3A, 0xEE, 0x38, 0x08,
0x04, 0x02, 0x03, 0x71, 0xCC, 0xC6, 0xC3, 0x63, 0x21, 0x93, 0x8F, 0x00,
0x1F, 0x33, 0x60, 0xC0, 0xC0, 0xC0, 0xC4, 0x78, 0x01, 0x80, 0x40, 0x60,
0x20, 0xF1, 0x89, 0x8C, 0xC4, 0xC2, 0x63, 0x33, 0xAE, 0xE0, 0x0E, 0x65,
0x8B, 0x2F, 0x98, 0x31, 0x3C, 0x01, 0xE0, 0x40, 0x08, 0x02, 0x00, 0x40,
0x3E, 0x03, 0x00, 0x40, 0x08, 0x01, 0x00, 0x60, 0x0C, 0x01, 0x00, 0x20,
0x04, 0x01, 0x00, 0xC0, 0x00, 0x1E, 0x19, 0xD8, 0xCC, 0xE1, 0xC3, 0x01,
0xE0, 0xBC, 0x82, 0x41, 0x31, 0x0F, 0x00, 0x38, 0x08, 0x04, 0x02, 0x03,
0x39, 0x6C, 0xC6, 0x46, 0x63, 0x21, 0x11, 0xB8, 0xE0, 0x30, 0x00, 0xE2,
0x44, 0xC8, 0xCE, 0x06, 0x00, 0x00, 0x00, 0xC0, 0x83, 0x04, 0x08, 0x10,
0x60, 0x81, 0x02, 0x04, 0x70, 0x38, 0x10, 0x10, 0x10, 0x37, 0x22, 0x24,
0x38, 0x78, 0x48, 0x4D, 0xC6, 0x73, 0x32, 0x26, 0x64, 0x4C, 0xDE, 0x77,
0x39, 0x5E, 0xCC, 0xCC, 0xCE, 0x66, 0x62, 0x22, 0x11, 0x11, 0xB9, 0x8E,
0x77, 0x3B, 0x33, 0x62, 0x62, 0x42, 0x4D, 0xCE, 0x0F, 0x18, 0xD8, 0x7C,
0x3C, 0x3E, 0x1B, 0x18, 0xF0, 0x3B, 0x87, 0x31, 0x8C, 0x43, 0x31, 0x88,
0x62, 0x30, 0xF0, 0x60, 0x10, 0x04, 0x03, 0x80, 0x0F, 0x18, 0x98, 0x4C,
0x2C, 0x26, 0x33, 0x38, 0xEC, 0x04, 0x02, 0x03, 0x03, 0xC0, 0x76, 0x50,
0xC1, 0x06, 0x08, 0x10, 0x60, 0x1A, 0x6C, 0xC8, 0xC0, 0xD1, 0xB3, 0x5C,
0x23, 0xC8, 0xC4, 0x21, 0x18, 0xE0, 0xC3, 0x42, 0x42, 0xC6, 0x86, 0x8C,
0x9D, 0xEE, 0x62, 0xC4, 0x89, 0xA3, 0x47, 0x0C, 0x10, 0xE2, 0x2C, 0x44,
0xD8, 0x9D, 0x23, 0xA4, 0x65, 0x0C, 0xC1, 0x10, 0x19, 0x95, 0x43, 0x01,
0x80, 0xC0, 0xA0, 0x91, 0x8E, 0x70, 0x88, 0x46, 0x23, 0x20, 0x90, 0x50,
0x28, 0x18, 0x08, 0x08, 0x08, 0x18, 0x00, 0x3F, 0x42, 0x04, 0x08, 0x10,
0x20, 0x40, 0x72, 0x0E, 0x08, 0x61, 0x04, 0x30, 0x86, 0x08, 0x61, 0x04,
0x30, 0xC3, 0x8F, 0x00, 0xFF, 0xF0, 0x1E, 0x0C, 0x10, 0x20, 0xC1, 0x82,
0x04, 0x1C, 0x30, 0x40, 0x83, 0x04, 0x08, 0x20, 0x60, 0x99, 0x8E ]
FreeSerifItalic9pt7bGlyphs = [
[ 0, 0, 0, 5, 0, 1 ], # 0x20 ' '
[ 0, 4, 12, 6, 1, -11 ], # 0x21 '!'
[ 6, 5, 4, 6, 3, -11 ], # 0x22 '"'
[ 9, 10, 12, 9, 0, -11 ], # 0x23 '#'
[ 24, 9, 15, 9, 1, -12 ], # 0x24 '$'
[ 41, 14, 12, 15, 1, -11 ], # 0x25 '%'
[ 62, 12, 12, 14, 1, -11 ], # 0x26 '&'
[ 80, 2, 4, 4, 3, -11 ], # 0x27 '''
[ 81, 6, 15, 6, 1, -11 ], # 0x28 '('
[ 93, 6, 15, 6, 0, -11 ], # 0x29 ')'
[ 105, 6, 8, 9, 3, -11 ], # 0x2A '#'
[ 111, 9, 9, 12, 1, -8 ], # 0x2B '+'
[ 122, 2, 4, 5, 0, -1 ], # 0x2C ','
[ 123, 4, 1, 6, 1, -3 ], # 0x2D '-'
[ 124, 2, 2, 5, 0, -1 ], # 0x2E '.'
[ 125, 8, 12, 5, 0, -11 ], # 0x2F '/'
[ 137, 9, 13, 9, 1, -12 ], # 0x30 '0'
[ 152, 6, 13, 9, 1, -12 ], # 0x31 '1'
[ 162, 8, 12, 9, 1, -11 ], # 0x32 '2'
[ 174, 9, 12, 9, 0, -11 ], # 0x33 '3'
[ 188, 9, 12, 9, 0, -11 ], # 0x34 '4'
[ 202, 9, 12, 9, 0, -11 ], # 0x35 '5'
[ 216, 9, 13, 9, 1, -12 ], # 0x36 '6'
[ 231, 9, 12, 9, 1, -11 ], # 0x37 '7'
[ 245, 9, 13, 9, 1, -12 ], # 0x38 '8'
[ 260, 9, 13, 9, 0, -12 ], # 0x39 '9'
[ 275, 4, 8, 4, 1, -7 ], # 0x3A ':'
[ 279, 4, 10, 4, 1, -7 ], # 0x3B ''
[ 284, 9, 9, 10, 1, -8 ], # 0x3C '<'
[ 295, 9, 5, 12, 2, -6 ], # 0x3D '='
[ 301, 9, 9, 10, 1, -8 ], # 0x3E '>'
[ 312, 7, 12, 8, 2, -11 ], # 0x3F '?'
[ 323, 13, 12, 14, 1, -11 ], # 0x40 '@'
[ 343, 11, 11, 12, 0, -10 ], # 0x41 'A'
[ 359, 11, 12, 11, 0, -11 ], # 0x42 'B'
[ 376, 12, 12, 11, 1, -11 ], # 0x43 'C'
[ 394, 13, 12, 13, 0, -11 ], # 0x44 'D'
[ 414, 12, 12, 10, 0, -11 ], # 0x45 'E'
[ 432, 12, 12, 10, 0, -11 ], # 0x46 'F'
[ 450, 12, 12, 12, 1, -11 ], # 0x47 'G'
[ 468, 14, 12, 13, 0, -11 ], # 0x48 'H'
[ 489, 7, 12, 6, 0, -11 ], # 0x49 'I'
[ 500, 9, 12, 8, 0, -11 ], # 0x4A 'J'
[ 514, 13, 12, 12, 0, -11 ], # 0x4B 'K'
[ 534, 11, 12, 10, 0, -11 ], # 0x4C 'L'
[ 551, 16, 12, 15, 0, -11 ], # 0x4D 'M'
[ 575, 13, 12, 12, 0, -11 ], # 0x4E 'N'
[ 595, 11, 12, 12, 1, -11 ], # 0x4F 'O'
[ 612, 11, 12, 10, 0, -11 ], # 0x50 'P'
[ 629, 11, 15, 12, 1, -11 ], # 0x51 'Q'
[ 650, 11, 12, 11, 0, -11 ], # 0x52 'R'
[ 667, 10, 12, 8, 0, -11 ], # 0x53 'S'
[ 682, 11, 12, 11, 2, -11 ], # 0x54 'T'
[ 699, 12, 12, 13, 2, -11 ], # 0x55 'U'
[ 717, 11, 12, 12, 2, -11 ], # 0x56 'V'
[ 734, 15, 12, 16, 2, -11 ], # 0x57 'W'
[ 757, 12, 12, 12, 0, -11 ], # 0x58 'X'
[ 775, 10, 12, 11, 2, -11 ], # 0x59 'Y'
[ 790, 11, 12, 10, 0, -11 ], # 0x5A 'Z'
[ 807, 7, 15, 7, 0, -11 ], # 0x5B '['
[ 821, 6, 12, 9, 2, -11 ], # 0x5C '\'
[ 830, 6, 15, 7, 1, -11 ], # 0x5D ']'
[ 842, 8, 7, 8, 0, -11 ], # 0x5E '^'
[ 849, 9, 1, 9, 0, 2 ], # 0x5F '_'
[ 851, 3, 3, 5, 2, -11 ], # 0x60 '`'
[ 853, 9, 8, 9, 0, -7 ], # 0x61 'a'
[ 862, 9, 12, 9, 0, -11 ], # 0x62 'b'
[ 876, 8, 8, 7, 0, -7 ], # 0x63 'c'
[ 884, 9, 12, 9, 0, -11 ], # 0x64 'd'
[ 898, 7, 8, 7, 0, -7 ], # 0x65 'e'
[ 905, 11, 17, 8, -1, -12 ], # 0x66 'f'
[ 929, 9, 12, 8, 0, -7 ], # 0x67 'g'
[ 943, 9, 12, 9, 0, -11 ], # 0x68 'h'
[ 957, 4, 12, 4, 1, -11 ], # 0x69 'i'
[ 963, 7, 16, 5, -1, -11 ], # 0x6A 'j'
[ 977, 8, 12, 8, 0, -11 ], # 0x6B 'k'
[ 989, 4, 12, 5, 1, -11 ], # 0x6C 'l'
[ 995, 13, 8, 13, 0, -7 ], # 0x6D 'm'
[ 1008, 8, 8, 9, 0, -7 ], # 0x6E 'n'
[ 1016, 9, 8, 9, 0, -7 ], # 0x6F 'o'
[ 1025, 10, 12, 8, -1, -7 ], # 0x70 'p'
[ 1040, 9, 12, 9, 0, -7 ], # 0x71 'q'
[ 1054, 7, 8, 7, 0, -7 ], # 0x72 'r'
[ 1061, 7, 8, 6, 0, -7 ], # 0x73 's'
[ 1068, 5, 9, 4, 0, -8 ], # 0x74 't'
[ 1074, 8, 8, 9, 1, -7 ], # 0x75 'u'
[ 1082, 7, 8, 8, 1, -7 ], # 0x76 'v'
[ 1089, 11, 8, 12, 1, -7 ], # 0x77 'w'
[ 1100, 9, 8, 8, -1, -7 ], # 0x78 'x'
[ 1109, 9, 12, 9, 0, -7 ], # 0x79 'y'
[ 1123, 8, 9, 7, 0, -7 ], # 0x7A 'z'
[ 1132, 6, 15, 7, 1, -11 ], # 0x7B '['
[ 1144, 1, 12, 5, 2, -11 ], # 0x7C '|'
[ 1146, 7, 16, 7, 0, -12 ], # 0x7D ']'
[ 1160, 8, 3, 10, 1, -5 ] ] # 0x7E '~'
FreeSerifItalic9pt7b = [
FreeSerifItalic9pt7bBitmaps,
FreeSerifItalic9pt7bGlyphs,
0x20, 0x7E, 22 ]
# Approx. 1835 bytes
|
import asyncio
import asyncpg
from config import DB_BIND
QUERIES = open('migrate.sql', 'r').read()
def log(connection, message):
print(message)
async def main():
db = await asyncpg.connect(DB_BIND)
db.add_log_listener(log)
async with db.transaction():
await db.execute(QUERIES)
# populate facts if empty
if await db.fetchval('SELECT COUNT(id) FROM facts') == 0:
for fact in facts.split('\n'):
await db.execute('INSERT INTO facts (content) VALUES ($1)', fact)
facts = """
If you somehow found a way to extract all of the gold from the bubbling core of our lovely little planet, you would be able to cover all of the land in a layer of gold up to your knees.
McDonalds calls frequent buyers of their food “heavy users.”
The average person spends 6 months of their lifetime waiting on a red light to turn green.
The largest recorded snowflake was in Keogh, MT during year 1887, and was 15 inches wide.
You burn more calories sleeping than you do watching television.
There are more lifeforms living on your skin than there are people on the planet.
Southern sea otters have flaps of skin under their forelegs that act as pockets. When diving, they use these pouches to store rocks and food.
In 1386 a pig in France was executed by public hanging for the murder of a child.
One in every five adults believe that aliens are hiding in our planet disguised as humans.
If you believe that you’re truly one in a million, there are still approximately 7,184 more people out there just like you.
A single cloud can weight more than 1 million pounds.
James Buchanan, the 15th U.S. president continuously bought slaves with his own money in order to free them.
There are more possible iterations of a game of chess than there are atoms in the observable universe.
The average person walks the equivalent of three times around the world in a lifetime.
Men are 6 times more likely to be struck by lightning than women.
Coca-Cola would be green if coloring wasn’t added to it.
You cannot snore and dream at the same time.
The world’s oldest piece of chewing gum is over 9,000 years old!
A coyote can hear a mouse moving underneath a foot of snow.
Bolts of lightning can shoot out of an erupting volcano.
New York drifts about one inch farther away from London each year.
A U.S. dollar bill can be folded approximately 4,000 times in the same place before it will tear.
A sneeze travels about 100 miles per hour.
Earth has traveled more than 5,000 miles in the past 5 minutes.
It would take a sloth one month to travel one mile.
10% of the World’s population is left handed.
A broken clock is right two times every day.
According to Amazon, the most highlighted books on Kindle are the Bible, the Steve Jobs biography, and The Hunger Games.
Bob Marley’s last words to his son before he died were “Money can’t buy life.”
A mole can dig a tunnel that is 300 feet long in only one night.
A hippo’s wide open mouth is big enough to fit a 4-foot-tall child in.
Chewing gum while you cut an onion will help keep you from crying.
If you were to stretch a Slinky out until it’s flat, it would measure 87 feet long.
Al Capone’s business card said he was a used furniture dealer.
There are more collect calls on Father’s Day than on any other day of the year.
Banging your head against a wall burns 150 calories an hour.
95% of people text things they could never say in person.
A crocodile can’t poke its tongue out.
It is physically impossible for pigs to look up into the sky.
Guinness Book of Records holds the record for being the book most often stolen from Public Libraries.
Drying fruit depletes it of 30-80% of its vitamin and antioxidant content.
A 2010 study found that 48% of soda fountains contained fecal bacteria, and 11% contained E. Coli.
9 out of 10 Americans are deficient in potassium.
Blueberries will not ripen until they are picked.
About 150 people per year are killed by coconuts.
About half of all Americans are on a diet on any given day.
A hardboiled egg will spin, but a soft-boiled egg will not.
Avocados are poisonous to birds.
Chewing gum burns about 11 calories per hour.
The number of animals killed for meat every hour in the U.S. is 500,000.
If you try to suppress a sneeze, you can rupture a blood vessel in your head or neck and die.
Celery has negative calories! It takes more calories to eat a piece of celery than the celery has in it to begin with. It’s the same with apples!
More people are allergic to cow’s milk than any other food.
Only 8% of dieters will follow a restrictive weight loss plan (like the HCG Drops Diet, garcinia cambogia diet).
Coconut water can be used as blood plasma.
The word “gorilla” is derived from a Greek word meaning, “A tribe of hairy women.”
Prisoners in Canadian war camps during World War II were treated so well that a lot of them didn’t want to leave when the war was over.
Gorillas burp when they are happy.
In New York, it is illegal to sell a haunted house without telling the buyer.
In 2006 someone tried to sell New Zealand on eBay. The price got up to $3,000 before eBay shut it down.
It is considered good luck in Japan when a sumo wrestler makes your baby cry.
A man from Britain changed his name to Tim Pppppppppprice to make it harder for telemarketers to pronounce.
A woman from California once tried to sue the makers of Cap’n Crunch, because the Crunch Berries contained “no berries of any kind.”
Apple launched a clothing line in 1986. It was described as a “train wreck” by others.
In Japan, crooked teeth are considered cute and attractive.
A Swedish woman lost her wedding ring, and found it 16 years later – growing on a carrot in her garden.
Donald Duck comics were banned from Finland because he doesn’t wear pants.
The chance of you dying on the way to get lottery tickets is actually greater than your chance of winning.
Cherophobia is the fear of fun.
The toothpaste “Colgate” in Spanish translates to “go hang yourself.”
Pirates wore earrings because they believed it improved their eyesight.
Human thigh bones are stronger than concrete.
Cockroaches can live for several weeks with their heads cut off, because their brains are located inside their body. They would eventually die from being unable to eat.
Scientists have tracked butterflies that travel over 3,000 miles.
To produce a single pound of honey, a single bee would have to visit 2 million flowers.
The population is expected to rise to 10.8 billion by the year 2080.
You breathe on average about 8,409,600 times a year.
More than 60,000 people are flying over the United States in an airplane right now.
Hamsters run up to 8 miles at night on a wheel.
A waterfall in Hawaii goes up sometimes instead of down.
A church in the Czech Republic has a chandelier made entirely of human bones.
Under the Code of Hammurabi, bartenders who watered down beer were punished by execution.
Our eyes are always the same size from birth, but our nose and ears never stop growing.
During your lifetime, you will produce enough saliva to fill two swimming pools.
You are 1% shorter in the evening than in the morning.
The elephant is the only mammal that can’t jump!
Most dust particles in your house are made from dead skin!
If 33 million people held hands, they could make it all the way around the equator.
Earth is the only planet that is not named after a god.
The bloodhound is the only animal whose evidence is admissible in court.
You are born with 300 bones, but by the time you are an adult you only have 206.
A ten-gallon hat will only hold ¾ of a gallon.
Just like fingerprints, everyone has different tongue prints.
ATMs were originally thought to be failures, because the only users were prostitutes and gamblers who didn’t want to deal with tellers face to face.
Of all the words in the English language, the word “set” has the most definitions. The word “run” comes in close second.
A “jiffy” is the scientific name for 1/100th of a second.
One fourth of the bones in your body are located in your feet.
111,111,111 × 111,111,111 = 12,345,678,987,654,321
Blue-eyed people tend to have the highest tolerance of alcohol.
A traffic jam lasted for more than 10 days, with cars only moving 0.6 miles a day.
Every year more than 2500 left-handed people are killed from using right-handed products.
More than 50% of the people in the world have never made or received a telephone call.
The cigarette lighter was invented before the match.
Sea otters hold hands when they sleep so that they do not drift apart.
The Golden Poison Dart Frog’s skin has enough toxins to kill 100 people.
The male ostrich can roar just like a lion.
Mountain lions can whistle.
Cows kill more people than sharks do.
Cats have 32 muscles in each of their ears.
A tarantula can live without food for more than two years.
The tongue of a blue whale weighs more than most elephants!
Ever wonder where the phrase “It’s raining cats and dogs” comes from? In the 17th century many homeless cats and dogs would drown and float down the streets of England, making it look like it literally rained cats and dogs.
It takes about 3,000 cows to supply enough leather for the NFL for only one year.
Male dogs lift their legs when they are urinating for a reason. They are trying to leave their mark higher so that it gives off the message that they are tall and intimidating.
A hummingbird weighs less than a penny.
An ostrich’s eye is bigger than its brain.
Dogs are capable of understanding up to 250 words and gestures and have demonstrated the ability to do simple mathematical calculations.
A sheep, a duck and a rooster were the first passengers in a hot air balloon.
Birds don’t urinate.
A flea can jump up to 200 times its own height. That is the equivalent of a human jumping the Empire State Building.
There are 5 temples in Kyoto, Japan that have blood stained ceilings. The ceilings are made from the floorboards of a castle where warriors killed themselves after a long hold-off against an army. To this day, you can still see the outlines and footprints.
There is a snake, called the boomslang, whose venom causes you to bleed out from every orifice on your body. You may even turn blue from internal bleeding, and it can take up to 5 days to die from the bleeding.
Saturn’s density is low enough that the planet would float in water.
68% of the universe is dark energy, and 27% is dark matter; both are invisible, even with our powerful telescopes. This means we have only seen 5% of the universe from earth.
The founders of Google were willing to sell Google for $1 million to Excite in 1999, but Excite turned them down. Google is now worth $527 Billion.
In the past 20 years, scientists have found over 1,000 planets outside of our solar system.
There are 60,000 miles of blood vessels in the human body.
If a pregnant woman has organ damage, the baby in her womb sends stem cells to help repair the organ.
If you started with $0.01 and doubled your money every day, it would take 27 days to become a millionaire.
Only one person in two billion will live to be 116 or older.
A person can live without food for about a month, but only about a week without water.
On average, 12 newborns will be given to the wrong parents daily.
You can’t kill yourself by holding your breath.
Human birth control pills work on gorillas.
There are no clocks in Las Vegas gambling casinos.
Beetles taste like apples, wasps like pine nuts, and worms like fried bacon.
Months that begin on a Sunday will always have a “Friday the 13th.”
The placement of a donkey’s eyes in its head enables it to see all four feet at all times!
Some worms will eat themselves if they can’t find any food!
Dolphins sleep with one eye open!
It is impossible to sneeze with your eyes open.
In France, it is legal to marry a dead person.
Russia has a larger surface area than Pluto.
There’s an opera house on the U.S.–Canada border where the stage is in one country and half the audience is in another.
The harder you concentrate on falling asleep, the less likely you are to fall asleep.
You can’t hum while holding your nose closed.
Women have twice as many pain receptors on their body than men. But a much higher pain tolerance.
There are more stars in space than there are grains of sand on every beach in the world.
For every human on Earth there are 1.6 million ants. The total weight of all those ants, however, is about the same as all the humans.
On Jupiter and Saturn it rains diamonds.
It is impossible to lick your elbow.
A shrimp’s heart is in its head.
People say "Bless you" when you sneeze because when you sneeze, your heart stops for a millisecond.
In a study of 200,000 ostriches over a period of 80 years, no one reported a single case where an ostrich buried its head in the sand.
Rats and horses can’t vomit.
If you sneeze too hard, you can fracture a rib.
If you keep your eyes open by force when you sneeze, you might pop an eyeball out.
Rats multiply so quickly that in 18 months, two rats could have over a million descendants.
Wearing headphones for just an hour will increase the bacteria in your ear by 700 times.
In every episode of Seinfeld there is a Superman somewhere.
35% of the people who use personal ads for dating are already married.
23% of all photocopier faults worldwide are caused by people sitting on them and photocopying their butts.
Most lipstick contains fish scales.
Over 75% of people who read this will try to lick their elbow.
A crocodile can’t move its tongue and cannot chew. Its digestive juices are so strong that it can digest a steel nail.
Money notes are not made from paper, they are made mostly from a special blend of cotton and linen. In 1932, when a shortage of cash occurred in Tenino, Washington, USA, notes were made out of wood for a brief period.
The Grammy Awards were introduced to counter the threat of rock music. In the late 1950s, a group of record executives were alarmed by the explosive success of rock ‘n’ roll, considering it a threat to “quality” music.
Tea is said to have been discovered in 2737 BC by a Chinese emperor when some tea leaves accidentally blew into a pot of boiling water. The tea bag was introduced in 1908 by Thomas Sullivan.
Over the last 150 years the average height of people in industrialized nations has increased about 4 inches. In the 19th century, American men were the tallest in the world, averaging 5′6″. Today, the average height for American men is 5′7″, compared to 5′8″ for Swedes, and 5′8.5″ for the Dutch. The tallest nation in the world is the Watusis of Burundi.
In 1955 the richest woman in the world was Mrs. Hetty Green Wilks, who left an estate of $95 million in a will that was found in a tin box with four pieces of soap. Queen Elizabeth of Britain and Queen Beatrix of the Netherlands count under the 10 wealthiest women in the world.
Joseph Niepce developed the world’s first photographic image in 1827. Thomas Edison and William Kennedy-Laurie Dickson introduced the film camera in 1894. But the first projection of an image on a screen was made by a German priest. In 1646, Athanasius Kircher used a candle or oil lamp to project hand-painted images onto a white screen.
In 1935 a writer named Dudley Nichols refused to accept the Oscar for his movie The Informer because the Writers Guild was on strike against the movie studios. In 1970 George C. Scott refused the Best Actor Oscar for Patton. In 1972 Marlon Brando refused the Oscar for his role in The Godfather.
The system of democracy was introduced 2,500 years ago in Athens, Greece. The oldest existing governing body operates in Althing, Iceland. It was established in 930 AD.
If the amount of water in your body is reduced by just 1%, you’ll feel thirsty. If it is reduced by 10%, you’ll die.
According to a study by the Economic Research Service, 27% of all food production in Western nations ends up in garbage cans. Yet, 1.2 billion people are underfed – the same number of people who are overweight.
Camels are called “ships of the desert” because of the way they move, not because of their transport capabilities. A dromedary has one hump and a Bactrian camel two humps. The humps are used as fat storage. Thus, an undernourished camel will not have a hump.
In the Durango desert in Mexico, there’s a creepy spot called the “Zone of Silence.” You can’t pick up clear TV or radio signals. And locals say fireballs sometimes appear in the sky.
Ethernet is a registered trademark of Xerox, Unix is a registered trademark of AT&T.
Bill Gates’ first business was Traf-O-Data, a company that created machines which recorded the number of vehicles passing a given point on a road.
Uranus’ orbital axis is tilted at over 90 degrees.
The famed U.S. Geological Survey astronomer Mr. Eugene Shoemaker trained the Apollo astronauts about craters, but never made it into space. Mr. Shoemaker had wanted to be an astronaut but was rejected because of a medical problem. His ashes were placed on board the Lunar Prospector spacecraft before it was launched on January 6, 1998. NASA crashed the probe into a crater on the moon in an attempt to learn if there is water on the moon.
Outside the U.S., Ireland is the largest software producing country in the world.
The first fossilized specimen of Australopithecus afarenisis was named Lucy after the paleontologists’ favorite song “Lucy in the Sky with Diamonds,” by the Beatles.
FIGlet, an ASCII font converter program, stands for Frank, Ian and Glenn’s LETters.
Every human spent about half an hour as a single cell.
Every year about 98% of atoms in your body are replaced.
Hot water is heavier than cold water.
Plutonium – first weighed on August 20th, 1942, by University of Chicago scientists Glenn Seaborg and his colleagues – was the first man-made element.
If you went out into space, you would explode before you suffocated because there’s no air pressure.
The radioactive substance Americium-241 is used in many smoke detectors.
The original IBM-PCs, that had hard drives, referred to the hard drives as Winchester drives. This is due to the fact that the original Winchester drive had a model number of 3030. This is, of course, a Winchester firearm.
Sound travels 15 times faster through steel than through the air.
On average, half of all false teeth have some form of radioactivity.
Only one satellite has been ever been destroyed by a meteor: the European Space Agency’s Olympus in 1993.
Starch is used as a binder in the production of paper. It is the use of a starch coating that controls ink penetration when printing. Cheaper papers do not use as much starch, and this is why your elbows get black when you are leaning over your morning paper.
Sterling silver is not pure silver. Because pure silver is too soft to be used in most tableware it is mixed with copper in the proportion of 92.5% silver to 7.5% copper.
A ball of glass will bounce higher than a ball of rubber. A ball of solid steel will bounce even higher.
A chip of silicon a quarter-inch square has the capacity of the original 1949 ENIAC computer, which occupied a city block.
An ordinary TNT bomb involves atomic reaction and, thus, could be called an atomic bomb. What we call an A-bomb involves nuclear reactions and should be called a nuclear bomb.
At a glance, the Celsius scale makes more sense than the Fahrenheit scale for temperature measuring. But its creator, Anders Celsius, was an oddball scientist. When he first developed his scale, he made the freezing of water 100 degrees and the boiling 0 degrees. No one dared point this out to him, so fellow scientists waited until Celsius died to change the scale.
At a jet plane’s speed of 620 mph, the length of the plane becomes one atom shorter than its original length.
The first full moon to occur on the winter solstice, December 22, commonly called the first day of winter, happened in 1999. Since a full moon on the winter solstice occurred in conjunction with a lunar perigee (point in the moon’s orbit that is closest to Earth), the moon appeared about 14% larger than it does at apogee (the point in its elliptical orbit that is farthest from Earth). Since the Earth is also several million miles closer to the sun at that time of the year than in the summer, sunlight striking the moon was about 7% stronger making it brighter. Also, this was the closest perigee of the Moon of the year since the moon’s orbit is constantly deforming. In places where the weather was clear and there was a snow cover, even car headlights were superfluous.
According to security equipment specialists, security systems that utilize motion detectors won’t function properly if walls and floors are too hot. When an infrared beam is used in a motion detector, it will pick up a person’s body temperature of 98.6 °F compared to the cooler walls and floor. If the room is too hot, the motion detector won’t register a change in the radiated heat of that person’s body when it enters the room and breaks the infrared beam. Your home’s safety might be compromised if you turn your air conditioning off or set the thermostat too high while on summer vacation.
Western Electric successfully brought sound to motion pictures and introduced systems of mobile communications which culminated in the cellular telephone.
On December 23, 1947, Bell Telephone Laboratories in Murray Hill, N.J., held a secret demonstration of the transistor which marked the foundation of modern electronics.
The wick of a trick candle has small amounts of magnesium in them. When you light the candle, you are also lighting the magnesium. When someone tries to blow out the flame, the magnesium inside the wick continues to burn and, in just a split second (or two or three), relights the wick.
Ostriches are often not taken seriously. They can run faster than horses, and the males can roar like lions.
Seals used for their fur get extremely sick when taken aboard ships.
Sloths take two weeks to digest their food.
Guinea pigs and rabbits can’t sweat.
The pet food company Ralston Purina recently introduced, from its subsidiary Purina Philippines, power chicken feed designed to help roosters build muscles for cockfighting, which is popular in many areas of the world. According to the Wall Street Journal, the cockfighting market is huge: The Philippines has five million roosters used for exactly that.
The porpoise is second to man as the most intelligent animal on the planet.
Young beavers stay with their parents for the first two years of their lives before going out on their own.
Skunks can accurately spray their smelly fluid as far as ten feet.
Deer can’t eat hay.
Gopher snakes in Arizona are not poisonous, but when frightened they may hiss and shake their tails like rattlesnakes.
On average, dogs have better eyesight than humans, although not as colorful.
The duckbill platypus can store as many as six hundred worms in the pouches of its cheeks.
The lifespan of a squirrel is about nine years.
North American oysters do not make pearls of any value.
Many sharks lay eggs, but hammerheads give birth to live babies that look like very small duplicates of their parents. Young hammerheads are usually born headfirst, with the tip of their hammer-shaped head folded backward to make them more streamlined for birth.
Gorillas sleep as much as fourteen hours per day.
A biological reserve has been made for golden toads because they are so rare.
There are more than fifty different kinds of kangaroos.
Jellyfish like salt water. A rainy season often reduces the jellyfish population by putting more fresh water into normally salty waters where they live.
The female lion does ninety percent of the hunting.
The odds of seeing three albino deer at once are one in seventy-nine billion, yet one man in Boulder Junction, Wisconsin, took a picture of three albino deer in the woods.
Cats often rub up against people and furniture to lay their scent and mark their territory. They do it this way, as opposed to the way dogs do it, because they have scent glands in their faces.
Cats sleep up to eighteen hours a day, but never quite as deep as humans. Instead, they fall asleep quickly and wake up intermittently to check to see if their environment is still safe.
Catnip, or Nepeta cataria, is an herb with nepetalactone in it. Many think that when cats inhale nepetalactone, it affects hormones that arouse sexual feelings, or at least alter their brain functioning to make them feel “high.” Catnip was originally made, using nepetalactone as a natural bug repellant, but roaming cats would rip up the plants before they could be put to their intended task.
The nematode Caenorhabditis elegans ages the equivalent of five human years for every day they live, so they usually die after about fourteen days. When stressed, though, the worm goes into a comatose state that can last for two or more months. The human equivalent would be to sleep for about two hundred years.
You can tell the sex of a horse by its teeth. Most males have 40, females have 36.
The 57 on Heinz ketchup bottle represents the varieties of pickle the company once had.
Your stomach produces a new layer of mucus every two weeks – otherwise it will digest itself.
The Declaration of Independence was written on hemp paper.
A raisin dropped in a glass of fresh champagne will bounce up and down continuously from the bottom of the glass to the top.
Susan Lucci is the daughter of Phyllis Diller.
315 entries in Webster’s 1996 Dictionary were misspelled.
During the chariot scene in “Ben-Hur” a small red car can be seen in the distance.
Warren Beatty and Shirley MacLaine are brother and sister.
Orcas (killer whales) kill sharks by torpedoing up into the shark’s stomach from underneath, causing the shark to explode.
Donald Duck comics were banned from Finland because he doesn’t wear any pants.
Ketchup was sold in the 1830s as medicine.
Upper and lower case letters are named “upper” and “lower” because in the time when all original print had to be set in individual letters, the “upper case” letters were stored in the case on top of the case that stored the smaller, “lower case” letters.
Leonardo da Vinci could write with one hand and draw with the other at the same time.
Because metal was scarce, the Oscars given out during World War II were made of wood.
The name Wendy was made up for the book Peter Pan, there was never a recorded Wendy before!
There are no words in the dictionary that rhyme with: orange, purple, and silver!
Leonardo Da Vinci invented scissors.
A tiny amount of liquor on a scorpion will make it instantly go mad and sting itself to death.
The mask used by Michael Myers in the original “Halloween” was a Captain Kirk mask painted white.
If you have three quarters, four dimes, and four pennies, you have $1.19. You also have the largest amount of money in coins without being able to make change for a dollar.
The glue on Israeli postage stamps is certified kosher.
Guinness Book of Records holds the record for being the book most often stolen from Public Libraries.
Astronauts are not allowed to eat beans before they go into space because passing wind in a space suit damages them.
The word “queue” is the only word in the English language that is still pronounced the same way when the last four letters are removed.
“Almost” is the longest word in the English language with all the letters in alphabetical order.
“Rhythm” is the longest English word without a vowel.
There is a city called Rome on every continent.
It’s against the law to have a pet dog in Iceland.
Your heart beats over 100,000 times a day.
Horatio Nelson, one of England’s most illustrious admirals was throughout his life, never able to find a cure for his sea-sickness.
The skeleton of Jeremy Bentham is present at all important meetings of the University of London.
Right-handed people live, on average, nine years longer than left-handed people.
Your ribs move about 5 million times a year, every time you breathe!
One quarter of the bones in your body, are in your feet!
The first known transfusion of blood was performed as early as 1667, when Jean-Baptiste, transfused two pints of blood from a sheep to a young man.
Fingernails grow nearly 4 times faster than toenails!
Women blink nearly twice as much as men.
Adolf Hitler was a vegetarian, and had only one testicle.
Honey is one of the only foods that do not spoil. Honey found in the tombs of Egyptian pharaohs has been tasted by archaeologists and found edible.
On average a hedgehog’s heart beats 300 times a minute.
More people are killed each year from bees than from snakes.
The average lead pencil will draw a line 35 miles long or write approximately 50,000 English words.
Camels have three eyelids to protect themselves from blowing sand.
The six official languages of the United Nations are: English, French, Arabic, Chinese, Russian and Spanish.
It’s against the law to burp, or sneeze in a church in Nebraska, USA.
The longest recorded flight of a chicken is 13 seconds.
Queen Elizabeth I. regarded herself as a paragon of cleanliness. She declared that she bathed once every three months, whether she needed it or not.
Slugs have 4 noses.
Owls are the only birds who can see the color blue.
A man named Charles Osborne had the hiccups for 69 years!
A giraffe can clean its ears with its 21-inch tongue!
The average person laughs 10 times a day!
If you yelled for 8 years, 7 months and 6 days you would have produced enough sound energy to heat one cup of coffee.
If you farted consistently for 6 years and 9 months, enough gas is produced to create the energy of an atomic bomb.
The human heart creates enough pressure when it pumps out to the body to squirt blood 30 feet.
A pig’s orgasm lasts 30 minutes.
The male praying mantis cannot copulate while its head is attached to its body. The female initiates sex by ripping the male’s head off.
The flea can jump 350 times its body length. It’s like a human jumping the length of a football field.
The catfish has over 27,000 taste buds.
Some lions mate over 50 times a day.
Butterflies taste with their feet.
The strongest muscle in the body is the tongue.
A cat’s urine glows under a black light.
Starfish have no brains.
Polar bears are left-handed.
Humans and dolphins are the only species that have sex for pleasure.
"""
if __name__ == '__main__':
asyncio.run(main())
|
'''
Creating the data in this format :
train_data = [
{
'context': "This tweet sentiment extraction challenge is great",
'qas': [
{
'id': "00001",
'question': "positive",
'answers': [
{
'text': "is great",
'answer_start': 43
}
]
}
]
}
]
Refer Docs : https://github.com/ThilinaRajapakse/simpletransformers#data-format
'''
import os
import json
import logging
import numpy as np
import pandas as pd
from simpletransformers.question_answering import QuestionAnsweringModel
BEST_MODEL_DIR = '../trained-models/simple-transformers'
def remove_nan_rows(df):
'''
Should be moved to utils.py
'''
pass
def return_answer_text(complete_text, selected_text):
index = complete_text.find(selected_text)
answer_dict = {'text' : selected_text , 'answer_start' : index}
return answer_dict
def generate_training_data_in_json(train_np_arr):
logging.info("Generating the json dump for training.")
output = []
for each_row in train_np_arr:
ctxt = each_row[1]
row_id = each_row[0]
question = each_row[3]
selected_answer = each_row[2]
answer = [return_answer_text(ctxt, selected_answer)]
question_and_answer_list = []
question_and_answer = {'id' : row_id, 'is_impossible' : False, 'question' : question , 'answers' : answer}
question_and_answer_list.append(question_and_answer)
output.append({'context' : ctxt, 'qas' : question_and_answer_list})
with open('../data/train.json', 'w') as output_file:
json.dump(output, output_file)
def generate_testing_data_in_json(test_np_arr):
logging.info("Logging.")
logging.info("Generating the json dump for training.")
output = []
for each_row in test_np_arr:
ctxt = each_row[1]
question = each_row[2]
question_and_answer = [{'question' : question, 'id' : each_row[0]}]
output.append({'context' : ctxt, 'qas' : question_and_answer})
with open('../data/test.json', 'w') as output_file:
json.dump(output, output_file)
return output
#def version_testing_data_in_json(test_np_arr):
def train():
model = QuestionAnsweringModel('distilbert',
'distilbert-base-uncased-distilled-squad',
args={'reprocess_input_data': True,
'overwrite_output_dir': True,
'learning_rate': 5e-5,
'num_train_epochs': 3,
'max_seq_length': 192,
'doc_stride': 64,
'fp16': False,
'best_model_dir': os.path.join(BEST_MODEL_DIR, 'distilbert-base-uncased-distilled-squad')
},
use_cuda=True)
model.train_model('../data/train.json')
def test(sub_df, test_array):
logging.info("Testing")
# Load the saved model.
model = QuestionAnsweringModel('distilbert', 'outputs/', args={})
predictions = model.predict(test_array)
predictions_df = pd.DataFrame.from_dict(predictions)
sub_df['selected_text'] = predictions_df['answer']
sub_df.to_csv('submission.csv', index=False)
print("sub file created")
def main():
train_df = pd.read_csv('../input/tweet-sentiment-extraction/train.csv')
test_df = pd.read_csv('../input/tweet-sentiment-extraction/test.csv')
sub_df = pd.read_csv('../input/tweet-sentiment-extraction/sample_submission.csv')
train_df.dropna(inplace=True)
test_df.dropna(inplace=True)
train_array = np.array(train_df)
test_array = np.array(test_df)
#generate_training_data_in_json(train_array)
#test_list = generate_testing_data_in_json(test_array)
#train()
test(sub_df, test_list)
if __name__ == "__main__":
main()
|
from typing import Sequence, Tuple
from sympy.physics.units import Dimension, DimensionSystem
from sympy.physics.units.systems.si import dimsys_default
def extend(*args: Tuple[str, str, Dimension], dimsys: DimensionSystem = None) -> Tuple[DimensionSystem, Sequence[Dimension]]:
'''Extends a dimension system by the given derived dimensions.
Registering new derived dimensions is useful in simplifying expressions
envolving these dimensions. Take density, which we could define simply by
density = si.mass/si.volume,
and use it in `pi_groups(density,...)`. However, the result will be less
readable than registering density as derived dimension
density, newdimsys = extend(('density','rho',si.mass/si.volume),...)
Params
------
args: Tuple[str, str, Dimension]
Sequence of derived dimensions to add. The first argument is the name,
the second a symbol (or None) and the last argument is the derived dimension.
dimsys: DimensionSystem, optional
Dimension system to extend. If not specified, extends the default system.
Returns
-------
dimsys: DimensionSystem
Extended dimension system
dims: Sequence[Dimension]
Derived dimensions
'''
if dimsys is None:
dimsys = dimsys_default
deps = dimsys.get_dimensional_dependencies
dims = [Dimension(dd[0], dd[1]) for dd in args]
depsdict = {dim: deps(dd[2]) for dim, dd in zip(dims, args)}
results = [dimsys.extend([], new_dim_deps=depsdict)] + dims
return results
def is_dimensionless(dim: Dimension, dimsys: DimensionSystem = None) -> bool:
'''Tests if the given dimension is dimensionless.'''
if dimsys is None:
dimsys = dimsys_default
return len(dimsys.get_dimensional_dependencies(dim)) == 0
|
import gym
from gym import error, spaces, utils
from gym.utils import seeding
import numpy as np
import random
class TenArmedBanditGaussianRewardEnv(gym.Env):
metadata = {'render.modes': ['human']}
def __init__(self, seed=42):
self._seed(seed)
self.num_bandits = 10
# each reward distribution is a gaussian described using mean and standard deviation
self.reward_dist = [[random.uniform(0, 1), 0.5] for _ in range(self.num_bandits)]
self.action_space = spaces.Discrete(self.num_bandits)
self.observation_space = spaces.Discrete(1)
def _seed(self, seed=None):
self.np_random, seed = seeding.np_random(seed)
return [seed]
def step(self, action):
assert self.action_space.contains(action)
done = True
# sample reward using the corresponding reward distribution
reward = np.random.normal(self.reward_dist[action][0], self.reward_dist[action][1])
return 0, reward, done, {}
def reset(self):
return 0
def render(self, mode='human'):
pass
def close(self):
pass
|
from pyrete.core.nodes import (
ReteGraph,
)
from pyrete.core.engine import (
RuleEngine,
)
from pyrete.core.data_layer import (
DataLayer,
)
from pyrete.core.variable_processor import (
VariableProcessor,
)
rule = {
'key': 'sample_rule',
'description': 'A sample rule',
'collections': [
'vehicles',
'persons',
'companies',
],
'variables': [
# {
# 'name': '$male_count',
# 'value': 'persons>>gender__get_frequency::M'
# },
# {
# 'name': '$persons_price',
# 'value': 'persons>>total_price'
# },
# {
# 'name': '$persons_sample',
# 'value': '__add::$persons_price||10'
# }
],
'when': {
'any': [
],
'all': [
# {
# 'name': '$male_count',
# 'operator': 'equal_to',
# 'value': 1
# },
{
'name': 'persons>>fav_color',
'operator': 'equal_to',
'value': 'red'
},
{
'name': 'vehicles>>wheels',
'operator': 'equal_to',
'value': 4
},
{
'name': 'persons>>fav_color',
'operator': 'equal_to',
'value': '^^vehicles>>color'
},
{
'name': 'persons>>budget',
'operator': 'greater_than_equal_to',
'value': '^^vehicles>>price'
},
{
'name': 'companies>>name',
'operator': 'equal_to',
'value': '^^vehicles>>company'
},
]},
'then': [
{
'key': 'award_points',
'trigger_type': 'print',
'params': [
{
'name': 'vehicle_model',
'value': 'vehicles>>model'
},
{
'name': 'person_name',
'value': 'persons>>name'
},
{
'name': 'company_name',
'value': 'companies>>name'
}
]
}]}
graph = ReteGraph()
graph.load_rule(rule)
# ---------------------- Fetch data from DB
data = DataLayer().get_data(
rules=[rule],
filter={},
limit=10)
# data['webhook'] = [{
# "_id": "1234",
# "gender": "F",
# "name": "akshata",
# "preference": "Maruti",
# "fav_color": "red"}]
# ---------------------- Rule variables
print('\n\nPROCESSING VARIABLES...')
VariableProcessor().process_variables(
data=data,
variable_objs=rule['variables'])
# ---------------------- Initiate rule engine
print('\n\nPROCESSING RULES...')
engine = RuleEngine()
trigger = engine.run_efficiently(
graphs=[graph],
data=data,
key=rule['key'],
email='[email protected]')
# ---------------------- Expected Output
# ....................................
# For ObjectNode: vehicles
# Items Processed: 5
# Items Passed: 3
# ....................................
# For ObjectNode: persons
# Items Processed: 4
# Items Passed: 1
# ....................................
# For ObjectNode: companies
# Items Processed: 3
# Items Passed: 3
# ...........................
# Data: {'persons': {u'fav_color': u'red', u'_id': ObjectId('5a1bec38814511659779e534'), u'name': u'akshata', u'budget': 50000000.0}, 'vehicles': {u'model': u'Omni', u'color': u'red', u'company': u'Maruti', u'wheels': 4.0, u'_id': ObjectId('5a0422c6bae3828177788d49'), u'price': 500000.0}, 'companies': {u'_id': ObjectId('5a1d26db4e6bc7ebf228ab9a'), u'name': u'Maruti'}}
# ...........................
# Data: {'persons': {u'fav_color': u'red', u'_id': ObjectId('5a1bec38814511659779e534'), u'name': u'akshata', u'budget': 50000000.0}, 'vehicles': {u'model': u'Swift', u'color': u'red', u'company': u'Maruti', u'wheels': 4.0, u'_id': ObjectId('5a042309bae3828177788d4b'), u'price': 6500000.0}, 'companies': {u'_id': ObjectId('5a1d26db4e6bc7ebf228ab9a'), u'name': u'Maruti'}}
|
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
from setuptools import setup
import os
VERSION = "2.0.0"
def get_long_description():
with open(
os.path.join(os.path.dirname(os.path.abspath(__file__)), "README.md"),
encoding="utf8",
) as fp:
return fp.read()
setup(
name="iothub-client",
description="iothub-client is now azure-iot-device",
long_description=get_long_description(),
long_description_content_type="text/markdown",
version=VERSION,
author="Microsoft Corporation",
author_email="[email protected]",
license="MIT License",
license_files=("LICENSE",),
classifiers=["Development Status :: 7 - Inactive"],
install_requires=["azure-iot-device"],
)
|
# -*- coding: utf-8 -*-
import os
import logging
import requests
from wagtail.core import signals as wtcsig # type: ignore[import]
logger = logging.getLogger(__name__)
def trigger_netlify_build_hook(sender, **kwargs):
"""Tigger Netlify build hook."""
instance = kwargs['instance']
logger.info(
'Netlify build tiggered by: {0} - {1}'.format(sender, instance)
)
netlify_build_hook_url = os.getenv('NETLIFY_BUILD_HOOK_URL')
if not netlify_build_hook_url:
logger.error(
'No environment variable NETLIFY_BUILD_HOOK_URL found. '
+ 'Can not trigger frontend build.',
)
return None
response = requests.post(netlify_build_hook_url, data={})
try:
response.raise_for_status()
except requests.exceptions.HTTPError as error:
logger.error(
'Received error response from Netlify build hook:\n'
+ '\t{0}'.format(error),
)
else:
logger.info('Netlify build hook triggered successfully.')
wtcsig.page_published.connect(trigger_netlify_build_hook)
wtcsig.page_unpublished.connect(trigger_netlify_build_hook)
|
import pickle
classifier_f = open("naivebayes.pickle", "rb")
classifier = pickle.load(classifier_f)
classifier_f.close() |
from flask import Flask, request, redirect, render_template, send_file
from PrivateLib.PEKS.Othertools.utils import base64_to_byte, byte_to_base64
from werkzeug.datastructures import FileStorage
from PrivateLib.Receiver import *
from PrivateLib.Sender import *
from PrivateLib.Parser import *
import os, hashlib, requests
app = Flask(__name__)
UPLOAD_FOLDER = '/home/ubuntu/private_server/attachments/'
app.config['MAX_CONTENT_LENGTH'] = 16 * 1024 * 1024
messages = {}
@app.route("/parse", methods=['GET'])
def subject_parse():
subject = request.args.get('subject')
attributes = parse_subject(subject)
attr_demo = { 'response': attributes }
return attr_demo
@app.route("/compose", methods=['GET', 'POST'])
def compose():
if request.method == 'GET':
token = request.args.get('token')
refresh_token = request.args.get('refresh_token')
return render_template("compose.html", token=token, refresh_token=refresh_token)
form_data = request.form
receiver = form_data.get('to', None)
cc_receivers = form_data.get('cc', None)
subject = form_data.get('subject', None)
attributs = form_data.get('AttrsList[]', None)
message = form_data.get('message', None)
files = request.files.getlist('attachments[]', None)
print(attributs.split(','))
if cc_receivers:
cc_receivers.replace(' ', '')
cc_receivers = cc_receivers.split(',')
the_attachments = None
if files[0].filename:
the_attachments = []
for file in files:
h = hashlib.blake2b(digest_size=8)
h.update(file.filename.encode('utf-8'))
hash_filename = h.hexdigest()
file.save(os.path.join(UPLOAD_FOLDER, hash_filename))
with open(os.path.join(UPLOAD_FOLDER, hash_filename), 'rb') as f:
data = f.read()
f.close()
fchead, enc_fakename, enc_fdata = send(file.filename, byte_to_base64(data))
the_attachments.append( { 'name': file.filename, 'header': fchead, 'value': enc_fdata } )
cookies = {}
cookies['token'] = form_data['Cookies_token']
cookies['refresh_token'] = form_data['Cookies_refresh_token']
chead, enc_subject, enc_body = send(subject, message, AttrsList=attributs.split(','))
sending_data = { "Cookies": cookies, "Receiver": receiver, "Cc": cc_receivers,
"Subject": enc_subject, "Message": enc_body,
"Chead": chead, "Attachments": the_attachments }
result = requests.post('https://nsysunmail.ml/send', json=sending_data)
if result.text == "success":
print("Mail sends successfully.")
return redirect("https://nsysunmail.ml/")
else: return "Sorry,<br />Your authorization is expired,<br />please <a href='https://nsysunmail.ml'>re-authorize</a> and compose again !!"
@app.route("/show")
def show():
random_key = request.args.get('random_key')
if random_key in messages:
return render_template("read.html", userAddress=messages[random_key]['User'],
Receiver=messages[random_key]['Receiver'],
SUBJECT=messages[random_key]['Subject'],
SENDER=messages[random_key]['Sender'],
DATE=messages[random_key]['Date'],
mail_body=messages[random_key]['Message'],
attachments=messages[random_key]['Attachments'] )
else:
return redirect("https://nsysunmail.ml/404")
@app.route("/decrypt", methods=['POST'])
def decrypt():
random_key = os.urandom(16)
random_key = hashlib.sha224(random_key).hexdigest()
enc_data = request.get_json(force=True)
mail_body = enc_data['mail']
chead = enc_data['chead']
user = enc_data['user']
sender = enc_data['sender']
receiver = enc_data['receiver']
date_str = enc_data['date']
search_token = enc_data['token']
attachments_list = enc_data['attachments']
for file in attachments_list:
d_index = file['Content'].find('\n')
header = file['Content'][:d_index]
value = file['Content'][d_index:]
search_word = parse_subject(file['Name'])
search_word = ' '.join(search_word)
file_search_token = tokenGen(search_word)
file_name, file_content = unlock_mail(header, file_search_token, value)
file['File_url'] = "https://owenchen.cf/downloads/" + file_name
file['Content'] = None
h = hashlib.blake2b(digest_size=8)
h.update(file_name.encode('utf-8'))
name = h.hexdigest()
filepath = UPLOAD_FOLDER + name
with open(filepath, "wb") as f:
f.write(base64_to_byte(file_content))
f.close()
subject, message = unlock_mail(chead, search_token, mail_body)
decrypted_data = {
"Subject": subject,
"Message": message,
"User": user,
"Sender": sender,
"Receiver": receiver,
"Date": date_str,
"Attachments": attachments_list
}
messages[random_key] = decrypted_data
return random_key
@app.route('/downloads/<path:filename>', methods=['GET'])
def download(filename):
h = hashlib.blake2b(digest_size=8)
h.update(filename.encode('utf-8'))
hash_filename = h.hexdigest()
path = "/home/ubuntu/private_server/attachments/" + hash_filename
return send_file(path, as_attachment=True, download_name=filename)
@app.route("/GetSearchToken", methods=['POST'])
def generate_token():
form_data = (request.form)['query']
classes = (request.form)['classes']
if form_data == "@all": return redirect('https://nsysunmail.ml/inbox/'+classes+'?query=all')
search_token = tokenGen(form_data)
print("Token generates successfully.")
search_url = 'https://nsysunmail.ml/inbox/'+classes+'?query=' + search_token
return redirect(search_url)
|
import re
from ..schema import types
from .external_documentation import ExternalDocumentation
from .paths import Paths
from .info import Info
from .tag import Tag
from .server import Server
from .components import Components
from .extensions import SpecificationExtensions
OPENAPI_VERSION = '3.0.1'
OpenApi = types.Schema(
name='OpenApi',
pattern_properties=SpecificationExtensions,
additional_properties=False,
properties={
'openapi': types.StringType(required=True, pattern=re.compile(r'^3\.\d+\.\d+$')),
'info': types.ObjectType(Info, required=True),
'servers': types.ArrayType(types.ObjectType(Server)),
'paths': types.ObjectType(
Paths,
required=True,
messages={'additional_properties': "Path MUST be starts with slash"}
),
'components': types.ObjectType(Components),
'security': types.DictType(types.ArrayType(types.StringType())),
'tags': types.ArrayType(types.ObjectType(Tag), unique_items=True, key='name'),
'externalDocs': types.ObjectType(ExternalDocumentation)
}
)
|
import os
import shutil
import random
import numpy as np
import cv2
import json
from detectron2.structures import BoxMode
"""
Setup directories and {train, val, test} splits according to a given configuration.
Also create a meta_info.json file un each subdirectories contains information
relevant to detectron2 input format (to be read by get_dataset(path), get_cls_dataset(path)).
"""
def generate_datasets(data_path, config):
# Several assertions
assert(config["train"] > 0)
assert(config["val"] > 0)
assert(config["test"] >=0)
norm = config["train"] + config["val"] + config["test"]
# Create {train,val[,test]} directories
train_dir = os.path.join(data_path, 'train')
val_dir = os.path.join(data_path, 'val')
test_dir = os.path.join(data_path, 'test')
if os.path.exists(train_dir): shutil.rmtree(train_dir)
if os.path.exists(val_dir): shutil.rmtree(val_dir)
if os.path.exists(test_dir): shutil.rmtree(test_dir)
os.makedirs(train_dir)
os.makedirs(val_dir)
if config["test"] > 0:
os.makedirs(test_dir)
# Check if every label file has a corresponding image
images_dir = os.path.join(data_path, 'images')
labels_dir = os.path.join(data_path, 'labels')
label_files = os.listdir(labels_dir)
for label_file in label_files:
file_name = str(label_file).rsplit(".txt")[0]
assert(os.path.exists(os.path.join(images_dir, file_name+'.jpg')))
# Shuffle the labels list
random.seed(config["seed"])
random.shuffle(label_files)
# Get the splitting values
n_total = len(label_files)
n_train = int(np.floor(config["train"] * n_total / norm))
n_val = int(np.floor(config["val"] * n_total / norm) + n_train)
# Loop over every labels and store them into the according directory,
# along with a json file containing annotations {bbox, segmentation, category}
# for each image
dataset_dicts = []
split_offset = 0
output_dir = os.path.join(data_path, 'train')
for n, label_file in enumerate(label_files):
if n == n_train:
with open(os.path.join(train_dir, 'meta_info.json'), 'w') as f:
info = { "name": "train_dataset", "data": dataset_dicts }
json.dump(info, f, indent=4, separators=(',', ': '))
dataset_dicts = []
split_offset = n_train
output_dir = os.path.join(data_path, 'val')
elif n == n_val:
with open(os.path.join(val_dir, 'meta_info.json'), 'w') as f:
info = { "name": "val_dataset", "data": dataset_dicts }
json.dump(info, f, indent=4, separators=(',', ': '))
dataset_dicts = []
split_offset = n_val
output_dir = os.path.join(data_path, 'test')
else:
pass
img_name = str(label_file).replace('.txt', '.jpg')
shutil.copy(os.path.join(images_dir, img_name),os.path.join(output_dir, img_name))
record = {}
objs = []
filename = os.path.join(output_dir, img_name)
height, width = cv2.imread(filename).shape[:2]
record["file_name"] = filename
record["image_id"] = n-split_offset
record["height"] = height
record["width"] = width
with open(os.path.join(labels_dir, label_file)) as f:
for line in f.readlines():
words = line.split(' ')
assert(len(words) == 5)
category = words[0]
xmin = int(words[1])
xmax = int(words[2])
ymin = int(words[3])
ymax = int(words[4])
px = [xmin, xmax, xmax, xmin, xmin]
py = [ymin, ymin, ymax, ymax, ymin]
poly = [(x + 0.5, y + 0.5) for x, y in zip(px, py)]
poly = [p for x in poly for p in x]
obj = {
"bbox": [xmin, ymin, xmax, ymax],
"bbox_mode": BoxMode.XYXY_ABS,
"segmentation": [poly],
"category_id": int(config["mapping"][category]),
"iscrowd": 0
}
objs.append(obj)
record["annotations"] = objs
dataset_dicts.append(record)
if config["test"] > 0:
with open(os.path.join(test_dir, 'meta_info.json'), 'w') as f:
info = { "name": "test_dataset", "data": dataset_dicts }
json.dump(info, f, indent=4, separators=(',', ': '))
else:
with open(os.path.join(val_dir, 'meta_info.json'), 'w') as f:
info = { "name": "val_dataset", "data": dataset_dicts }
json.dump(info, f, indent=4, separators=(',', ': '))
def get_dataset(dataset_dir):
with open(os.path.join(dataset_dir, 'meta_info.json')) as f:
meta_info = json.load(f)
for element in meta_info['data']:
for anno in element['annotations']:
# it seems that because we save the dict in a json file,
# the <BoxMode.XYXY_ABS: 0> object gets convert to (int) 0.
# Later on when converting to coco, it calls the dict method .value()
# on a int, which raise an error
anno['bbox_mode'] = BoxMode.XYXY_ABS
return meta_info['data']
def get_cls_dataset(dataset_dir):
with open(os.path.join(dataset_dir, 'meta_info.json')) as f:
meta_info = json.load(f)
dict_list = []
image_dict = {}
for element in meta_info['data']:
image_dict = {
"file_name": element["file_name"],
"image_id": element["image_id"],
"height": element["height"],
"width": element["width"],
"label": 0 # intact
}
if len(element["annotations"]) > 0:
image_dict["label"] = 1 # defective
dict_list.append(image_dict)
return dict_list
if __name__ == '__main__':
# PATH to dataset
path = 'C:/Users/sprum/Workspace/Anaconda3/TFE/defect-detection/datasets/ADRIC-XRIS-FAL-SYN-SIMP'
# CONFIG
config = {
"seed": 1234, # seed to control randomness in split generation
"train": 0.7, # percentage of images in training dataset (must be >0)
"val": 0.1, # percentage of images in validation dataset (must be >0)
"test": 0.2, # percentage of images in test dataset (can be 0)
"mapping": { # mapping from cytomine label to detectron label
"42459392": 0, # here 2 kinds of annotation are merged into one
"42459398": 0
}
}
generate_datasets(path, config) |
from bs4 import BeautifulSoup
from bs4 import NavigableString
import requests
import json
import datetime
import re
monsterHolder = {}
monsterHolder['name'] = 'Pathfinder 2.0 monster list'
monsterHolder['date'] = datetime.date.today().strftime("%B %d, %Y")
attackEle = set(("Critical Success", "Success", "Failure", "Effect", "Frequency", "Requirement"))
def get_single(link):
details = {}
itemDetails = {}
res2 = requests.get(link)
res2.raise_for_status()
soup2 = BeautifulSoup(res2.text, 'lxml')
detail = soup2.find(lambda tag: tag.name=='span' and tag.has_attr('id') and tag['id']=="ctl00_MainContent_DetailedOutput")
attacks = soup2.find_all("span", {'class':'hanging-indent'})
knowledgeCheck = ""
inDamage = False
attack = {}
attackHolder = []
spellProc = False
spellHolder = []
spell = {}
for att in attacks:
inDamage = False
attack = {}
children2 = att.contents
for child2 in children2:
stringContents2 = str(child2)
if stringContents2.startswith("<"):
if child2.name == "b":
if "Spells" in stringContents2:
spellProc = True
if 'name' in spell:
spellHolder.append(spell)
spell = {}
spell['name'] = child2.text
else:
x = re.search("\A\d+(th|st|rd|nd)", child2.text)
if 'Cantrips' in child2.text:
if 'text' in spell:
spell['text'] += child2.text
else:
spell['text'] = child2.text
elif x:
if 'text' in spell:
spell['text'] += child2.text
else:
spell['text'] = child2.text
else:
spellProc = False
#not sure if this should be in the iff or not
if child2.text == "Damage":
inDamage = True
elif child2.text in attackEle:
attack['text'] += child2.text
else:
attack['name'] = child2.text
if child2.name == "i":
pass
if child2.name == "img":
attack['actions'] = child2['alt']
if child2.name == "a":
if spellProc:
if 'text' in spell:
spell['text'] += child2.text
else:
spell['text'] = child2.text
elif 'text' in attack:
attack['text'] += child2.text
else:
attack['text'] = child2.text
else:
if inDamage:
attack['damage'] = stringContents2
else:
if spellProc:
if 'text' in spell:
spell['text'] += stringContents2
else:
spell['text'] = stringContents2
else:
if 'text' in attack:
attack['text'] += stringContents2
else:
attack['text'] = stringContents2
attackHolder.append(attack)
traits = detail.find_all("span", {"class" : lambda L: L and L.startswith('trai')})
traitHolder = []
for trait in traits:
traitHolder.append(trait.text)
details['traits'] = traitHolder
children = detail.contents
detailHolder = []
tagType = ""
h1Count = 0
hrCount = 0
inActions = False
inAttacks = False
pastHp = False
actionHolder = []
action = {}
itemHolder = []
skillsHolder = []
string = ","
langHolder = []
attack = {}
for child in children:
stringContents = str(child)
if "All Monsters in" in stringContents:
break
if stringContents.startswith("<"):
#print(child.name,"|",tagType)
if child.name == "hr":
hrCount += 1
if hrCount == 1:
inActions = True
if hrCount == 2:
inActions = False
inAttacks = True
tagType = ""
if child.name == "h1":
h1Count += 1
if child.name == "h3":
tagType = ""
if child.name == "a":
try:
if child['class'][0] == "external-link" :
details['source'] = child.text
#print("In here 5")
tagType = ""
except:
pass
if not child.text.isspace():
#print(child.text,"|",tagType)
if spellProc:
if 'text' in spell:
spell['text'] += child.text
else:
spell['text'] = child.text
elif tagType == "Skills":
skillsHolder.append(child.text)
elif tagType == "Items":
itemHolder.append(child.text)
elif tagType == "Languages":
langHolder.append(child.text)
elif tagType == "Resistances":
if tagType in details:
details[tagType] += child.text
else:
details[tagType] = child.text
elif inActions:
if 'text' in action:
action['text'] += child.text
else:
action['text'] = child.text
if child.name == "u":
if not child.text.isspace():
if tagType == "Skills":
skillsHolder.append(child.text)
if tagType == "Items":
itemHolder.append(child.text)
if child.name == "b":
spellProc = False
if inActions and pastHp:
if child.text != "Trigger" and child.text != "Immunities" and child.text != "Resistances" and child.text != "Effect" and child.text != "Weaknesses":
#print("in here 1")
tagType = ""
if (len(action.keys()) > 0):
actionHolder.append(action)
action = {}
action['name'] = child.text
else:
#print(child.text)
if "Spells" not in child.text:
tagType = child.text
elif inAttacks:
if "Spells" in child.text:
spellProc = True
#print("in here 2")
tagType = ""
if 'name' in spell:
spellHolder.append(spell)
spell = {}
spell['name'] = child.text
continue
else:
spellProc = False
x = re.search("\A\d+(th|st|rd|nd)", child.text)
esc = re.escape("(")
y = re.search("\A"+esc +"\d+(th|st|rd|nd)",child.text)
if 'Cantrips' in child.text:
spellProc = True
if 'text' in spell:
spell['text'] += child.text
else:
spell['text'] = child.text
#print("IN here 3")
tagtype = ""
elif x:
spellProc = True
if 'text' in spell:
spell['text'] += child.text
else:
spell['text'] = child.text
#print("In here 6")
tagtype = ""
elif y:
spellProc = True
if 'text' in spell:
spell['text'] += child.text
else:
spell['text'] = child.text
#print("In here 7")
tagtype = ""
else:
spellProc = False
#print("2nd:",child.text)
if 'name' in attack and (not tagType.startswith("Speed")) :
pass
else:
tagType = child.text
#print("3rd:",tagType)
else:
if(child.text != "Source" and child.text != "Trigger" and "Spells" not in child.text):
tagType = child.text
if "Recall Knowledge" in tagType:
startParen = stringContents.find("(")
endParen = stringContents.find(")")
knowledgeCheckStr = stringContents[startParen:endParen]
endForReal = knowledgeCheckStr.find("</a></u>")
knowledgeCheck = knowledgeCheckStr[32:endForReal]
if tagType == "HP":
pastHp = True
#print("In here 11:",tagType)
if child.name == "img":
if inActions:
action['action'] = child['alt']
else:
details['actions'] = child['alt']
if child.name == "i":
#print(spellProc,child.text)
if spellProc:
if 'text' in spell:
spell['text'] += child.text
else:
spell['text'] = child.text
elif tagType != "":
if not stringContents.isspace():
details[tagType] = stringContents
#print("In here 10:",tagType)
if child.name == "li":
if inActions:
action['Effect'] += child.text
if child.name == "ul":
if inActions:
action['Effect'] += child.text
#else:
#if not stringContents.isspace() :
#detailHolder.append(child.text)
else:
if tagType != "" :
if not stringContents.isspace():
if inActions and ('name' in action) and (tagType != "Speed"):
action[tagType] = stringContents
else:
if tagType == "Skills":
skillsHolder.append(stringContents)
elif tagType == "Items":
itemHolder.append(stringContents)
elif "Recall Knowledge" in tagType:
details['recallKnowledge'] = knowledgeCheck + stringContents
else:
if tagType in details:
details[tagType] += stringContents
else:
details[tagType] = stringContents
else:
if inActions:
if 'text' in action:
action['text'] += stringContents
else:
action['text'] = stringContents
elif h1Count < 2:
detailHolder.append(stringContents)
elif spellProc:
if 'text' in spell:
spell['text'] += stringContents
else:
spell['text'] = stringContents
#print("4th:",stringContents,"|type:",tagType)
#print(child)
#print("final:",stringContents)
if 'name' in action:
actionHolder.append(action)
if 'name' in spell:
spellHolder.append(spell)
details['spells'] = spellHolder
details['Languages'] = langHolder
details['skills'] = skillsHolder
details['items'] = itemHolder
details['actions'] = actionHolder
details['attacks'] = attackHolder
details['text'] = string.join(detailHolder)
return details
def get_all():
monsters = []
res = requests.get("https://2e.aonprd.com/Monsters.aspx?Letter=All")
res.raise_for_status()
soup = BeautifulSoup(res.text, 'html.parser')
table = soup.find(lambda tag: tag.name=='table' and tag.has_attr('id') and tag['id']=="ctl00_MainContent_TableElement")
rows = table.findAll(lambda tag: tag.name=='tr')
t = 0
for row in rows:
t += 1
#print(row)
#print("-----------------------------------")
monster = {}
entries = row.find_all(lambda tag: tag.name=='td')
#print(len(entries))
if entries is not None:
if len(entries) > 0:
monster['name'] = entries[0].find("a").text
monster['link'] = "https://2e.aonprd.com/"+entries[0].find("a")['href']
monster['family'] = entries[1].text
monster['level'] = int(entries[2].text)
monster['alignment'] = entries[3].text
monster['type'] = entries[4].text
monster['size'] = entries[5].text
monsters.append(monster)
#if t > 3:
#break
for monster in monsters:
print("Getting details for :",monster['name'])
monsterDetails = get_single(monster['link'])
for key in monsterDetails.keys():
monster[key] = monsterDetails[key]
return monsters
monsterHolder['monsters'] = get_all()
json_data = json.dumps(monsterHolder, indent=4)
#print(monsters)
filename = "monsters-v2-pf2.json"
f = open(filename, "w")
f.write(json_data)
f.close
|
# -*- encoding: utf-8 -*-
# django apps
from django.db import models
class CardStatus(models.Model):
''' 银行卡的状态 '''
name = models.CharField(max_length=16, verbose_name='名称')
remark = models.TextField(blank=True, verbose_name='备注')
def __str__(self):
return self.name
class CardOperateType(models.Model):
''' 银行卡的操作类型 '''
name = models.CharField(max_length=16, verbose_name='名称')
remark = models.TextField(blank=True, verbose_name='备注')
def __str__(self):
return self.name
class Card(models.Model):
''' 银行卡 '''
balance = models.IntegerField(verbose_name='余额', default=0)
balance_available = models.IntegerField(verbose_name='可用金额', default=0)
balance_freeze = models.IntegerField(verbose_name='冻结金额', default=0)
status = models.ForeignKey(
'CardStatus',
on_delete=models.CASCADE,
verbose_name='状态',
)
def __str__(self):
return '{card_id} - {balance}'.format(
card_id=self.id,
balance=self.balance,
)
def name(self):
return self.cardinfo.name
name.short_description = '姓名'
def to_json(self):
info = {
'id': self.id,
'balance': self.balance,
'balance_available': self.balance_available,
'balance_freeze': self.balance_freeze,
'status': self.status_id,
}
return info
class CardInfo(models.Model):
''' 用户信息 '''
name = models.CharField(max_length=64, verbose_name='姓名')
phone = models.CharField(max_length=64, verbose_name='电话', blank=True)
email = models.EmailField(blank=True)
card = models.OneToOneField(
'Card',
on_delete=models.DO_NOTHING,
)
def __str__(self):
return self.name
class CardHistory(models.Model):
''' 银行卡的流水帐 '''
time = models.DateTimeField(auto_now_add=True, verbose_name='时间')
remark = models.TextField(verbose_name='说明')
card = models.ForeignKey(
'Card',
on_delete=models.DO_NOTHING,
verbose_name='银行卡',
)
operate = models.ForeignKey(
'CardOperateType',
on_delete=models.DO_NOTHING,
verbose_name='操作类型',
)
def __str__(self):
return '{time} - {card_id} - {operator}'.format(
time=self.time.isoformat(),
card_id=self.card.id,
operator=self.operator_type.name,
)
|
import unittest
import sys
import os
sys.path.append(os.path.dirname(os.getcwd()))
import cjkstr
class TestCJKStr(unittest.TestCase):
def test_count_cjk_chars(self):
self.assertEqual(cjkstr.count_cjk_chars("hello"), 0)
self.assertEqual(cjkstr.count_cjk_chars("測試一下"), 4)
self.assertEqual(cjkstr.count_cjk_chars("測試 English"), 2)
self.assertEqual(cjkstr.count_cjk_chars("試してみる"), 5)
self.assertEqual(cjkstr.count_cjk_chars("Python 的日文叫パイソン"), 8)
self.assertEqual(cjkstr.count_cjk_chars("파파이썬 테스트"), 7)
self.assertEqual(cjkstr.count_cjk_chars("Python 的韓文叫파이썬"), 7)
with self.assertRaises(TypeError):
cjkstr.count_cjk_chars(1)
with self.assertRaises(TypeError):
cjkstr.count_cjk_chars(23.4)
with self.assertRaises(TypeError):
cjkstr.count_cjk_chars(True)
with self.assertRaises(TypeError):
cjkstr.count_cjk_chars(5+3j)
|
class persona ():
def __init__ (self, nombre,apellido,cedula,telefono,direccion):
self.nombre=nombre
self.apellido=apellido
self.cedula=cedula
self.telefono=telefono
self.direccion=direccion
def __repr__(self):
return "nombre: "+self.nombre+" apellido: "+self,apellido+" cedula: "+str(self.cedula)+" telefono: "+str(self.telefono)+" direccion: "+self.direccion
def guardar (lista):
archivo=open("persona.csv","w")
for p in lista:
archivo.write(p.nombre+","+p.apellido+","+p.cedula+","+p.telefono+","+p.direccion+"\n")
archivo.close()
def cargar():
archivo=open("persona.csv")
lista=[]
for lista in archivo:
dato=linea.strip().split(",")
per=persona(dato[0],dato[1],dato[2],dato[3],dato[4])
lista.append(per)
archivo.close()
return lista
def nueva ():
nombre= input("ingrese nombre")
apellido=input("ingrese apellido")
cedula=input("ingrese cedula")
telefono=input("Ingrese telefono")
direccion=input("ingrese direccion")
return persona (nombre,apellido,cedula,telefono,direccion)
def buscar (lista,nombre):
resultado=[]
for p in lista:
if p.nombre==nombre:
resultado.append(p)
return resultado
print("AGENDA")
print ("MENU PRINCIPAL \n 1-Guardar un Contacto \n 2-Cargar datos \n 3-Buscar un contacto \n 4-SALIR")
opciones=["1","2","3","4"]
x=""
lista_personas=[]
while x!="4":
x=input ("ingrese opcion deseada")
if x=="1":
p1=persona.nueva()
lista_personas.append(p1)
persona.guardar(lista_personas)
elif x=="2":
lista_personas= persona.cargar()
elif x=="3":
nombre=input("ingrese nombre que desea buscar: ")
lis=persona.buscar(lista_personas,nombre)
for p in lis:
print(p)
else:
print ("Usted ha finalizado el programa.")
a=input()
#terminar
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2016-08-03 15:59
from __future__ import unicode_literals
import ckeditor.fields
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('markets3', '0005_auto_20160803_1024'),
]
operations = [
migrations.AlterField(
model_name='market',
name='local_customer_service_notes',
field=models.CharField(blank=True, max_length=200, null=True, verbose_name='notes'),
),
migrations.AlterField(
model_name='market',
name='misc10',
field=ckeditor.fields.RichTextField(blank=True, help_text='Website traffic - grey box1', null=True),
),
migrations.AlterField(
model_name='market',
name='misc11',
field=ckeditor.fields.RichTextField(blank=True, help_text='Website traffic - grey box2', null=True),
),
migrations.AlterField(
model_name='market',
name='misc12',
field=ckeditor.fields.RichTextField(blank=True, help_text='Website traffic - grey box3', null=True),
),
migrations.AlterField(
model_name='market',
name='misc13',
field=ckeditor.fields.RichTextField(blank=True, help_text='Website traffic - grey box4', null=True),
),
migrations.AlterField(
model_name='market',
name='misc14',
field=ckeditor.fields.RichTextField(blank=True, help_text='Demographic profile', null=True),
),
migrations.AlterField(
model_name='market',
name='misc15',
field=ckeditor.fields.RichTextField(blank=True, help_text='Product upload process', null=True),
),
migrations.AlterField(
model_name='market',
name='misc16',
field=ckeditor.fields.RichTextField(blank=True, help_text='Customer support', null=True),
),
migrations.AlterField(
model_name='market',
name='misc17',
field=ckeditor.fields.RichTextField(blank=True, help_text='Local return address (Yes/No)', null=True),
),
migrations.AlterField(
model_name='market',
name='misc18',
field=ckeditor.fields.RichTextField(blank=True, help_text='Return rates', null=True),
),
migrations.AlterField(
model_name='market',
name='misc19',
field=ckeditor.fields.RichTextField(blank=True, help_text='Marketing and merchandising', null=True),
),
migrations.AlterField(
model_name='market',
name='misc20',
field=ckeditor.fields.RichTextField(blank=True, help_text='Local incorporation', null=True),
),
migrations.AlterField(
model_name='market',
name='misc21',
field=ckeditor.fields.RichTextField(blank=True, help_text='Local bank account', null=True),
),
migrations.AlterField(
model_name='market',
name='misc22',
field=ckeditor.fields.RichTextField(blank=True, help_text='Exclusivity', null=True),
),
migrations.AlterField(
model_name='market',
name='misc23',
field=ckeditor.fields.RichTextField(blank=True, help_text='Translation', null=True),
),
migrations.AlterField(
model_name='market',
name='misc24',
field=ckeditor.fields.RichTextField(blank=True, help_text='Payment time', null=True),
),
migrations.AlterField(
model_name='market',
name='misc25',
field=ckeditor.fields.RichTextField(blank=True, help_text='Exchange rate', null=True),
),
migrations.AlterField(
model_name='market',
name='misc26',
field=ckeditor.fields.RichTextField(blank=True, help_text='Bond required', null=True),
),
]
|
from typing import List
def transform_to_string(list: List[List[str]]) -> str:
return ';'.join([','.join([str(x) for x in sorted(sublist)]) for sublist in list]) |
import argparse
from pprint import pprint
import platform
def parse_args(version):
'''
Parse arguments.
'''
# Basic argument parsing.
parser = argparse.ArgumentParser(
description='EDAPI: Elite Dangerous API Tool',
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
# Version
parser.add_argument('--version',
action='version',
version='%(prog)s ' + version)
# Debug
parser.add_argument("--debug",
action="store_true",
default=False,
help="Output additional debug info.")
# colors
default = (platform.system() == 'Windows')
parser.add_argument("--no-color",
dest="nocolor",
action="store_true",
default=default,
help="Disable the use of ansi colors in output.")
# Base file name.
parser.add_argument("--basename",
default="edapi",
help='Base file name. This is used to construct the\
cookie and vars file names.')
# vars file
parser.add_argument("--vars",
action="store_true",
default=False,
help="Output a file that sets environment variables\
for credits and current system/station.")
# Import from JSON
parser.add_argument("--import",
metavar="FILE",
dest="json_file",
default=None,
help="Import API info from a JSON file instead of the\
API. Used mostly for debugging purposes.")
# Export to JSON
parser.add_argument("--export",
metavar="FILE",
default=None,
help="Export API response to a file as JSON.")
# EDDN
parser.add_argument("--eddn",
action="store_true",
default=False,
help="Post price, shipyards, and outfitting to the \
EDDN.")
# keys
parser.add_argument("--keys",
action="append",
nargs="*",
help="Instead of normal import, display raw API data\
given a set of dictionary keys.")
# tree
parser.add_argument("--tree",
action="store_true",
default=False,
help="Used with --keys. If present will print all\
content below the specificed key.")
# Hashing CMDR name
parser.add_argument("--hash",
action="store_true",
default=False,
help="Obfuscate commander name for EDDN.")
# Force login
parser.add_argument("--login",
action="store_true",
default=False,
help="Clear any cached user login cookies and force\
login. (Doesn't clear the machine token)")
# Parse the command line.
args = parser.parse_args()
if args.debug:
pprint(args)
return args
|
# Copyright (C) [2022] by Cambricon, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall self.tcp included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS self.tcp LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
# pylint: disable=useless-object-inheritance, too-many-instance-attributes
# pylint: disable=attribute-defined-outside-init, too-many-statements
# pylint: disable=too-many-arguments, too-many-locals
"""Expm1 operator implementation using BANGPy TCP API."""
import bangpy
from bangpy import tcp
from bangpy.tcp.util import round_up, round_down
from bangpy.common import utils, load_op_by_type
from bangpy.platform.bang_config import ALIGN_LENGTH, TARGET
from bangpy.tcp.runtime import TaskType
DTYPES = [bangpy.float16, bangpy.float32]
TARGET_LIST = ["mlu370-s4", "mlu220-m2", "mlu270", "mlu290"]
KERNEL_NAME = "expm1"
class Expm1(object):
def __init__(self, dtype, target, task_num):
self.dtype = dtype
self.target = target
self.task_num = task_num
self.bp = tcp.TCP(target)
self.dim_n = self.bp.SizeVar("dim_n")
self.dim_h = self.bp.SizeVar("dim_h")
self.dim_w = self.bp.SizeVar("dim_w")
self.dim_c = self.bp.SizeVar("dim_c")
self.nram_size = TARGET(target).nram_size
self.dtype_sz = dtype.bytes
self.buffer_one = self.bp.Scalar(
name="ONE",
dtype=self.dtype,
value=1,
)
self.buffer_in = self.bp.Buffer(
shape=(self.dim_n, self.dim_h, self.dim_w, self.dim_c),
dtype=self.dtype,
name="buffer_in",
scope="global"
)
self.buffer_out = self.bp.Buffer(
shape=(self.dim_n, self.dim_h, self.dim_w, self.dim_c),
dtype=self.dtype,
name="buffer_out",
scope="global"
)
self.single_nram_size = round_down(
(self.nram_size - 30 * 1024) // 4 // self.dtype_sz, ALIGN_LENGTH
)
self.bp.launch_task(self.task_num, 1, 1)
def compute(self):
self.bp.exp(self.buffer_in_n, self.buffer_in_n)
self.bp.subtract(self.buffer_out_n, self.buffer_in_n, self.buffer_one)
def compute_body(self):
data_num = self.bp.Scalar(dtype=bangpy.int32, name="data_num")
data_num.assign(self.dim_n * self.dim_h * self.dim_w * self.dim_c)
average_core = self.bp.Scalar(dtype=bangpy.int32, name="average_core")
average_core.assign(data_num / self.task_num)
remain_core = self.bp.Scalar(dtype=bangpy.int32, name="remain")
remain_core.assign(data_num % self.task_num)
# flatten
flatten_buffer_in = self.buffer_in.reshape((data_num,))
flatten_buffer_out = self.buffer_out.reshape((data_num,))
task_id = self.bp.taskId
core_start = task_id * average_core
core_end = core_start + average_core
repeat = average_core // self.single_nram_size
remain = average_core % self.single_nram_size
with self.bp.for_range(0, repeat, stage=1) as i:
start = core_start + i * self.single_nram_size
end = start + self.single_nram_size
# nram
self.buffer_in_n = self.bp.Buffer(
shape=(self.single_nram_size,),
name="INPUT_N",
dtype=self.dtype,
scope="nram",
)
self.buffer_out_n = self.bp.Buffer(
shape=(self.single_nram_size,),
name="OUTPUT_N",
dtype=self.dtype,
scope="nram",
)
with self.bp.block(stage_scope="data_copy"):
self.bp.memcpy(self.buffer_in_n, flatten_buffer_in[start:end])
with self.bp.block(stage_scope="compute"):
self.compute()
with self.bp.block(stage_scope="data_copy"):
self.bp.memcpy(flatten_buffer_out[start:end], self.buffer_out_n)
with self.bp.if_scope(remain != 0):
start = core_start + repeat * self.single_nram_size
end = start + remain
self.bp.memcpy(self.buffer_in_n[:remain], flatten_buffer_in[start:end])
self.compute()
self.bp.memcpy(flatten_buffer_out[start:end], self.buffer_out_n[:remain])
with self.bp.if_scope(remain_core != 0):
with self.bp.if_scope(task_id == self.task_num - 1):
start = task_id * average_core
end = start + remain_core
self.bp.memcpy(self.buffer_in_n[:remain_core], flatten_buffer_in[start:end])
self.compute()
self.bp.memcpy(flatten_buffer_out[start:end], self.buffer_out_n[:remain_core])
self.buffer_out = flatten_buffer_out.reshape((self.dim_n, self.dim_h, self.dim_w, self.dim_c))
return self.bp.BuildBANG(
inputs=[
self.buffer_in
],
outputs=[
self.buffer_out
],
kernel_name=KERNEL_NAME,
)
@tcp.register_mlu_op(DTYPES, TARGET_LIST, KERNEL_NAME)
def build_expm1(dtype=None, target=None):
task_num = TARGET(target).cluster_num * TARGET(target).core_num
f = Expm1(dtype, target, task_num).compute_body()
return f
|
##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, [email protected], All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the LICENSE file for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class Nekbone(Package):
"""NEK5000 emulation software called NEKbone. Nekbone captures the basic
structure and user interface of the extensive Nek5000 software.
Nek5000 is a high order, incompressible Navier-Stokes solver based on
the spectral element method."""
homepage = "https://github.com/Nek5000/Nekbone"
url = "https://github.com/Nek5000/Nekbone/tarball/v17.0"
tags = ['proxy-app', 'ecp-proxy-app']
version('17.0', 'cc339684547614a0725959e41839fec1', git='https://github.com/Nek5000/Nekbone.git')
version('develop', git='https://github.com/Nek5000/Nekbone.git')
# Variants
variant('mpi', default=True, description='Build with MPI')
# dependencies
depends_on('mpi', when='+mpi')
@run_before('install')
def fortran_check(self):
if not self.compiler.fc:
msg = 'Nekbone can not be built without a Fortran compiler.'
raise RuntimeError(msg)
def install(self, spec, prefix):
mkdir(prefix.bin)
fc = self.compiler.fc
cc = self.compiler.cc
if '+mpi' in spec:
fc = spec['mpi'].mpif77
cc = spec['mpi'].mpicc
# Install Nekbone in prefix.bin
install_tree("../Nekbone", prefix.bin.Nekbone)
# Install scripts in prefix.bin
nekpmpi = 'test/example1/nekpmpi'
makenek = 'test/example1/makenek'
install(makenek, prefix.bin)
install(nekpmpi, prefix.bin)
with working_dir(prefix.bin):
filter_file(r'^SOURCE_ROOT\s*=.*', 'SOURCE_ROOT=\"' +
prefix.bin.Nekbone + '/src\"', 'makenek')
filter_file(r'^CC\s*=.*', 'CC=\"' + cc + '\"', 'makenek')
filter_file(r'^F77\s*=.*', 'F77=\"' + fc + '\"', 'makenek')
if '+mpi' not in spec:
filter_file(r'^#IFMPI=\"false\"', 'IFMPI=\"false\"', 'makenek')
|
# Copyright 2018 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from pants.task.target_restriction_mixins import (
DeprecatedSkipAndDeprecatedTransitiveGoalOptionsRegistrar,
HasSkipAndTransitiveGoalOptionsMixin,
)
class FmtGoalRegistrar(DeprecatedSkipAndDeprecatedTransitiveGoalOptionsRegistrar):
@classmethod
def register_options(cls, register):
super().register_options(register)
register("--only", type=str, default=None, fingerprint=True, advanced=True,
help="Only run the specified formatter. Currently the only accepted values are "
"`scalafix` or not setting any value.")
class FmtTaskMixin(HasSkipAndTransitiveGoalOptionsMixin):
"""A mixin to combine with code formatting tasks."""
goal_options_registrar_cls = FmtGoalRegistrar
target_filtering_enabled = True
|
from rest_framework import APIView
class user_list(APIView):
queryset = Users.objects.all()
serializer_class = UsersSerializer
def get(self, request, *args, **kwargs):
return self.list(request, *args, **kwargs) |
'''
Storage for common doc strings and templates shared in non-related classes and methods.
'''
class DOC_TEMPLATE:
#---------------------------------------------------------------------------
# functions
to_html = '''
Return an HTML table representation of this {class_name} using standard TABLE, TR, and TD tags. This is not a complete HTML page.
Args:
config: Optional :py:class:`static_frame.DisplayConfig` instance.
'''
to_html_datatables = '''
Return a complete HTML representation of this {class_name} using the DataTables JS library for table naviagation and search. The page links to CDNs for JS resources, and thus will not fully render without an internet connection.
Args:
fp: optional file path to write; if not provided, a temporary file will be created. Note: the caller is responsible for deleting this file.
show: if True, the file will be opened with a webbrowser.
config: Optional :py:class:`static_frame.DisplayConfig` instance.
Returns:
Absolute file path to the file written.
'''
reindex = dict(
count='''Positive integer values drop that many outer-most levels; negative integer values drop that many inner-most levels.'''
)
clip = '''Apply a clip opertion to this {class_name}. Note that clip operations can be applied to object types, but cannot be applied to non-numerical objects (e.g., strings, None)'''
index_init = dict(
args = '''
Args:
labels: Iterable of hashable values to be used as the index labels.
name: A hashable object to name the Index.
loc_is_iloc: Optimization when a contiguous integer index is provided as labels. Generally only set by internal clients.
dtype: Optional dtype to be used for labels.'''
)
from_pandas = dict(
own_data='''own_data: If True, the underlying NumPy data array will be made immutable and used without a copy.''',
own_index='''own_index: If True, the underlying NumPy index label array will be made immutable and used without a copy.''',
own_columns='''own_columns: If True, the underlying NumPy column label array will be made immutable and used without a copy.''',
)
container_init = dict(
index='''index: Optional index initializer. If provided in addition to data values, lengths must be compatible.''',
columns='''columns: Optional column initializer. If provided in addition to data values, lengths must be compatible.
''',
own_index='''own_index: Flag passed index as ownable by this {class_name}. Primarily used by internal clients.''',
own_data='''own_data: Flag the data values as ownable by this {class_name}. Primarily used by internal clients.''',
own_columns='''own_columns: Flag passed columns as ownable by this {class_name}. Primarily used by internal clients.'''
)
def doc_inject(*, selector=None, **kwargs):
def decorator(f):
nonlocal selector
selector = f.__name__ if selector is None else selector
# get doc string, template with decorator args, then template existing doc string
doc_src = getattr(DOC_TEMPLATE, selector)
if isinstance(doc_src, str):
doc = doc_src.format(**kwargs)
f.__doc__ = f.__doc__.format(doc)
else: # assume it is a dictionary
# try to format each value
doc = {k: v.format(**kwargs) for k, v in doc_src.items()}
f.__doc__ = f.__doc__.format(**doc)
return f
return decorator
|
import rospy
from rospy.exceptions import ROSInterruptException
def handle_shutdown_exception(func):
def wrapped(*args, **kwargs):
try:
return func(*args, **kwargs)
except ROSInterruptException as e:
if str(e) == 'rospy shutdown':
rospy.logwarn('Service failed to connect due to a shutdown interrupt.')
else:
raise e
return wrapped
|
# -*- coding: utf-8 -*-
# pylint: disable=missing-docstring,unused-import,reimported
import json
import pytest # type: ignore
import json_schema_tangle_weave.cli as cli
def test_main_ok_json():
job = ['name_of_schema.json']
assert cli.main(job) is 0
def test_main_ok_json_md():
job = ['name_of_prose.json.md']
assert cli.main(job) is 0
def test_main_nok_unforeseen():
job = ['name_of_prose.unforeseen']
assert cli.main(job) is 1
|
from jeri.core.backends.backend import Backend
import re
import requests
def _get(url, params=None):
response = requests.get(url, params=params)
if response.status_code != 200:
raise RuntimeError('API call failed')
return response.json()
def _get_all(url, offset=0):
parameters = {'offset': offset}
response = _get(url, parameters)
return (response['objects'], response['meta']['total_count'])
class TastypieBackend(Backend):
def __init__(self, base_url):
if base_url is None or base_url == "":
raise RuntimeError('TastypieBackend cannot have an empty base_url')
self.base_url = base_url if base_url.endswith('/') else base_url + '/'
def get(self, model, **kwargs):
if 'uri' in kwargs:
object = _get(self._uri_to_url(model, kwargs['uri']))
return object
def get_all(self, model, **kwargs):
objects, count = _get_all(self._url(model))
offset = len(objects)
while offset != count:
objects.extend(_get_all(self._url(model), offset)[0])
offset = len(objects)
return objects
@property
def id_key(self):
return 'id'
@property
def uri_key(self):
return 'resource_uri'
def _url(self, model):
return self.base_url + model._meta.endpoint + '/'
def _uri_to_url(self, model, uri):
expr = r'.*/{}/(?P<id>[0-9]*)/?'.format(model._meta.endpoint)
match = re.match(expr, uri)
return self._url(model) + match['id'] + '/'
|
#!/usr/bin/python
###########################################################################
#
# Copyright 2019 Dell, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
###########################################################################
import sys
import time
import json
import ast
from rpipe_utils import pipestr
import cli_client as cc
from scripts.render_cli import show_cli_output
import urllib3
urllib3.disable_warnings()
nat_type_map = {"snat" : "SNAT", "dnat": "DNAT"}
nat_protocol_map = {"icmp": "1", "tcp": "6", "udp": "17"}
clear_nat_map = {"translations": "ENTRIES", "statistics": "STATISTICS"}
config = True
def invoke_api(func, args=[]):
global config
api = cc.ApiClient()
# Enable/Disable NAT Feature
if func == 'patch_openconfig_nat_nat_instances_instance_config_enable':
path = cc.Path('/restconf/data/openconfig-nat:nat/instances/instance={id}/config/enable', id=args[0])
if args[1] == "True":
body = { "openconfig-nat:enable": True }
else:
body = { "openconfig-nat:enable": False }
return api.patch(path,body)
# Config NAT Timeout
elif func == 'patch_openconfig_nat_nat_instances_instance_config_timeout':
path = cc.Path('/restconf/data/openconfig-nat:nat/instances/instance={id}/config/timeout', id=args[0])
body = { "openconfig-nat:timeout": int(args[1]) }
return api.patch(path, body)
# Config NAT TCP Timeout
elif func == 'patch_openconfig_nat_nat_instances_instance_config_tcp_timeout':
path = cc.Path('/restconf/data/openconfig-nat:nat/instances/instance={id}/config/tcp-timeout', id=args[0])
body = { "openconfig-nat:tcp-timeout": int(args[1]) }
return api.patch(path, body)
# Config NAT UDP Timeout
elif func == 'patch_openconfig_nat_nat_instances_instance_config_udp_timeout':
path = cc.Path('/restconf/data/openconfig-nat:nat/instances/instance={id}/config/udp-timeout', id=args[0])
body = { "openconfig-nat:udp-timeout": int(args[1]) }
return api.patch(path, body)
# Config NAT Static basic translation entry
elif func == 'patch_openconfig_nat_nat_instances_instance_nat_mapping_table_nat_mapping_entry_config':
path = cc.Path('/restconf/data/openconfig-nat:nat/instances/instance={id}/nat-mapping-table/nat-mapping-entry={externaladdress}/config', id=args[0], externaladdress=args[1])
body = { "openconfig-nat:config" : { "internal-address": args[2]} }
l = len(args)
if l >= 4:
body["openconfig-nat:config"].update( {"type": nat_type_map[args[3]] } )
if l == 5:
body["openconfig-nat:config"].update( {"twice-nat-id": int(args[4])} )
return api.patch(path, body)
# Remove NAT Static basic translation entry
elif func == 'delete_openconfig_nat_nat_instances_instance_nat_mapping_table_nat_mapping_entry':
path = cc.Path('/restconf/data/openconfig-nat:nat/instances/instance={id}/nat-mapping-table/nat-mapping-entry={externaladdress}', id=args[0], externaladdress=args[1])
return api.delete(path)
# Remove all NAT Static basic translation entries
elif func == 'delete_openconfig_nat_nat_instances_instance_nat_mapping_table':
path = cc.Path('/restconf/data/openconfig-nat:nat/instances/instance={id}/nat-mapping-table', id=args[0])
return api.delete(path)
# Config NAPT Static translation entry
elif func == 'patch_openconfig_nat_nat_instances_instance_napt_mapping_table_napt_mapping_entry_config':
path = cc.Path('/restconf/data/openconfig-nat:nat/instances/instance={id}/napt-mapping-table/napt-mapping-entry={externaladdress},{protocol},{externalport}/config', id=args[0],externaladdress=args[1],protocol=nat_protocol_map[args[2]],externalport=args[3])
body = { "openconfig-nat:config" : {"internal-address": args[4], "internal-port": int(args[5])} }
l = len(args)
if l >= 7:
body["openconfig-nat:config"].update( {"type": nat_type_map[args[6]] } )
if l == 8:
body["openconfig-nat:config"].update( {"twice-nat-id": int(args[7])} )
return api.patch(path, body)
# Remove NAPT Static translation entry
elif func == 'delete_openconfig_nat_nat_instances_instance_napt_mapping_table_napt_mapping_entry':
path = cc.Path('/restconf/data/openconfig-nat:nat/instances/instance={id}/napt-mapping-table/napt-mapping-entry={externaladdress},{protocol},{externalport}', id=args[0],externaladdress=args[1],protocol=nat_protocol_map[args[2]],externalport=args[3])
return api.delete(path)
# Config NAT Pool
elif func == 'patch_openconfig_nat_nat_instances_instance_nat_pool_nat_pool_entry_config':
path = cc.Path('/restconf/data/openconfig-nat:nat/instances/instance={id}/nat-pool/nat-pool-entry={poolname}/config', id=args[0],poolname=args[1])
ip = args[2].split("-")
if len(ip) == 1:
body = { "openconfig-nat:config": {"IP-ADDRESS": args[2]} }
else:
body = { "openconfig-nat:config": {"IP-ADDRESS-RANGE": args[2]} }
if len(args) > 3:
body["openconfig-nat:config"].update( {"nat-port": args[3] } )
return api.patch(path, body)
# Remove all NAPT Static basic translation entries
elif func == 'delete_openconfig_nat_nat_instances_instance_napt_mapping_table':
path = cc.Path('/restconf/data/openconfig-nat:nat/instances/instance={id}/napt-mapping-table', id=args[0])
return api.delete(path)
# Remove NAT Pool
elif func == 'delete_openconfig_nat_nat_instances_instance_nat_pool_nat_pool_entry':
path = cc.Path('/restconf/data/openconfig-nat:nat/instances/instance={id}/nat-pool/nat-pool-entry={poolname}',id=args[0],poolname=args[1])
return api.delete(path)
# Remove all NAT Pools
elif func == 'delete_openconfig_nat_nat_instances_instance_nat_pool':
path = cc.Path('/restconf/data/openconfig-nat:nat/instances/instance={id}/nat-pool', id=args[0])
return api.delete(path)
# Config NAT Binding
elif func == 'patch_openconfig_nat_nat_instances_instance_nat_acl_pool_binding_nat_acl_pool_binding_entry_config':
path = cc.Path('/restconf/data/openconfig-nat:nat/instances/instance={id}/nat-acl-pool-binding/nat-acl-pool-binding-entry={name}/config', id=args[0],name=args[1])
body = { "openconfig-nat:config": {"nat-pool": args[2] }}
# ACL Name
acl_name = args[3].split("=")[1]
if acl_name != "" :
body["openconfig-nat:config"].update( {"access-list": acl_name } )
# NAT Type
nat_type = args[4].split("=")[1]
if nat_type != "":
body["openconfig-nat:config"].update( {"type": nat_type_map[nat_type] } )
# Twice NAT ID
twice_nat_id = args[5].split("=")[1]
if twice_nat_id != "":
body["openconfig-nat:config"].update( {"twice-nat-id": int(twice_nat_id)} )
return api.patch(path, body)
# Remove NAT Binding
elif func == 'delete_openconfig_nat_nat_instances_instance_nat_acl_pool_binding_nat_acl_pool_binding_entry_config':
path = cc.Path('/restconf/data/openconfig-nat:nat/instances/instance={id}/nat-acl-pool-binding/nat-acl-pool-binding-entry={name}/config', id=args[0],name=args[1])
return api.delete(path)
# Remove all NAT Bindings
elif func == 'delete_openconfig_nat_nat_instances_instance_nat_acl_pool_binding':
path = cc.Path('/restconf/data/openconfig-nat:nat/instances/instance={id}/nat-acl-pool-binding', id=args[0])
return api.delete(path)
# Config NAT Zone
elif func == 'patch_openconfig_interfaces_ext_interfaces_interface_nat_zone_config_nat_zone':
path = cc.Path('/restconf/data/openconfig-interfaces:interfaces/interface={name}/openconfig-interfaces-ext:nat-zone/config/nat-zone', name=args[1])
body = { "openconfig-interfaces-ext:nat-zone": int(args[2]) }
return api.patch(path, body)
# Remove NAT Zone
elif func == 'delete_openconfig_interfaces_ext_interfaces_interface_nat_zone_config_nat_zone':
path = cc.Path('/restconf/data/openconfig-interfaces:interfaces/interface={name}/openconfig-interfaces-ext:nat-zone/config/nat-zone', name=args[1])
return api.delete(path)
# Clear NAT Translations/Statistics
elif func == 'rpc_nat_clear':
path = cc.Path('/restconf/operations/sonic-nat:clear_nat')
body = {"sonic-nat:input":{"nat-param": clear_nat_map[args[1]]}}
return api.post(path,body)
# Get NAT Global Config
elif func == 'get_openconfig_nat_nat_instances_instance_config':
config = False
path = cc.Path('/restconf/data/openconfig-nat:nat/instances/instance={id}/config', id=args[0])
return api.get(path)
# Get NAT Bindings
elif func == 'get_openconfig_nat_nat_instances_instance_nat_acl_pool_binding':
config = False
path = cc.Path('/restconf/data/openconfig-nat:nat/instances/instance={id}/nat-acl-pool-binding', id=args[0])
return api.get(path)
# Get NAT Pools
elif func == 'get_openconfig_nat_nat_instances_instance_nat_pool':
config = False
path = cc.Path('/restconf/data/openconfig-nat:nat/instances/instance={id}/nat-pool', id=args[0])
return api.get(path)
## Get NAT Translations
elif func == 'get_openconfig_nat_nat_instances_instance_nat_mapping_table':
config = False
path = cc.Path('/restconf/data/openconfig-nat:nat/instances/instance={id}/nat-mapping-table', id=args[0])
return api.get(path)
elif func == 'get_openconfig_nat_nat_instances_instance_napt_mapping_table':
config = False
path = cc.Path('/restconf/data/openconfig-nat:nat/instances/instance={id}/napt-mapping-table', id=args[0])
return api.get(path)
elif func == 'get_openconfig_nat_nat_instances_instance_nat_twice_mapping_table':
config = False
path = cc.Path('/restconf/data/openconfig-nat:nat/instances/instance={id}/nat-twice-mapping-table', id=args[0])
return api.get(path)
elif func == 'get_openconfig_nat_nat_instances_instance_napt_twice_mapping_table':
config = False
path = cc.Path('/restconf/data/openconfig-nat:nat/instances/instance={id}/napt-twice-mapping-table', id=args[0])
return api.get(path)
# Get all L3 interfaces (needed for NAT Zone)
elif func == 'get_sonic_interface_sonic_interface_interface':
config = False
path = cc.Path('/restconf/data/sonic-interface:sonic-interface/INTERFACE')
return api.get(path)
elif func == 'get_sonic_vlan_interface_sonic_vlan_interface_vlan_interface':
config = False
path = cc.Path('/restconf/data/sonic-vlan-interface:sonic-vlan-interface/VLAN_INTERFACE')
return api.get(path)
elif func == 'get_sonic_portchannel_interface_sonic_portchannel_interface_portchannel_interface':
config = False
path = cc.Path('/restconf/data/sonic-portchannel-interface:sonic-portchannel-interface/PORTCHANNEL_INTERFACE')
return api.get(path)
elif func == 'get_sonic_loopback_interface_sonic_loopback_interface_loopback_interface':
config = False
path = cc.Path('/restconf/data/sonic-loopback-interface:sonic-loopback-interface/LOOPBACK_INTERFACE')
return api.get(path)
else:
return api.cli_not_implemented(func)
def get_response_dict(response):
api_response = {}
if response.ok():
if response.content is not None:
# Get Command Output
api_response = response.content
else:
print response.error_message()
return api_response
def get_nat_napt_tables(args):
response = {}
resp = invoke_api('get_openconfig_nat_nat_instances_instance_nat_mapping_table', args)
resp = get_response_dict(resp)
response.update(resp)
resp = invoke_api('get_openconfig_nat_nat_instances_instance_napt_mapping_table', args)
resp = get_response_dict(resp)
response.update(resp)
return response
def get_twice_nat_napt_tables(args):
response = {}
resp = invoke_api('get_openconfig_nat_nat_instances_instance_nat_twice_mapping_table', args)
resp = get_response_dict(resp)
response.update(resp)
resp = invoke_api('get_openconfig_nat_nat_instances_instance_napt_twice_mapping_table', args)
resp = get_response_dict(resp)
response.update(resp)
return response
def get_nat_translations(func, args):
response = {}
resp = get_nat_napt_tables(args)
response.update(resp)
resp = get_twice_nat_napt_tables(args)
response.update(resp)
return response
def get_nat_zones(func,args):
output = {}
# Get INTERFACE Table
response = invoke_api("get_sonic_interface_sonic_interface_interface")
api_response = get_response_dict(response)
if 'sonic-interface:INTERFACE' in api_response and \
'INTERFACE_LIST' in api_response['sonic-interface:INTERFACE']:
for val in api_response['sonic-interface:INTERFACE']['INTERFACE_LIST']:
if 'nat_zone' in val and 'portname' in val:
output.update( {val['portname']: val['nat_zone']} )
# Get VLAN_INTERFACE table
response1 = invoke_api("get_sonic_vlan_interface_sonic_vlan_interface_vlan_interface")
api_response1 = get_response_dict(response1)
if 'sonic-vlan-interface:VLAN_INTERFACE' in api_response1 and \
'VLAN_INTERFACE_LIST' in api_response1['sonic-vlan-interface:VLAN_INTERFACE']:
for val in api_response1['sonic-vlan-interface:VLAN_INTERFACE']['VLAN_INTERFACE_LIST']:
if 'nat_zone' in val and 'vlanName' in val:
output.update( {val['vlanName']: val['nat_zone']} )
# Get PORTCHANNEL_INTERFACE table
response2 = invoke_api("get_sonic_portchannel_interface_sonic_portchannel_interface_portchannel_interface")
api_response2 = get_response_dict(response2)
if 'sonic-portchannel-interface:PORTCHANNEL_INTERFACE' in api_response2 and \
'PORTCHANNEL_INTERFACE_LIST' in api_response2['sonic-portchannel-interface:PORTCHANNEL_INTERFACE']:
for val in api_response2['sonic-portchannel-interface:PORTCHANNEL_INTERFACE']['PORTCHANNEL_INTERFACE_LIST']:
if 'nat_zone' in val and 'pch_name' in val:
output.update( {val['pch_name']: val['nat_zone']} )
# Get LOOPBACK_INTERFACE table
response3 = invoke_api("get_sonic_loopback_interface_sonic_loopback_interface_loopback_interface")
api_response3 = get_response_dict(response3)
if 'sonic-loopback-interface:LOOPBACK_INTERFACE' in api_response3 and \
'LOOPBACK_INTERFACE_LIST' in api_response3['sonic-loopback-interface:LOOPBACK_INTERFACE']:
for val in api_response3['sonic-loopback-interface:LOOPBACK_INTERFACE']['LOOPBACK_INTERFACE_LIST']:
if 'nat_zone' in val and 'loIfName' in val:
output.update( {val['loIfName']: val['nat_zone']} )
return output
def get_count(count, table_name, l):
table_count_map = {
'openconfig-nat:nat-mapping-table': ['static_nat', 'dynamic_nat'],
'openconfig-nat:napt-mapping-table': ['static_napt', 'dynamic_napt'],
'openconfig-nat:nat-twice-mapping-table': ['static_twice_nat', 'dynamic_twice_nat'],
'openconfig-nat:napt-twice-mapping-table': ['static_twice_napt', 'dynamic_twice_napt'],
}
if 'state' in l:
count['total_entries'] += 1
if 'state' in l and 'entry-type' in l['state']:
if l['state']['entry-type'] == 'openconfig-nat:STATIC':
count[table_count_map[table_name][0]]+=1;
else:
count[table_count_map[table_name][1]]+=1;
if 'state' in l and 'type' in l['state']:
if l['state']['type'] == 'openconfig-nat:SNAT':
count['snat_snapt']+=1;
else:
count['dnat_dnapt']+=1
return
def get_nat_translations_count(func, args):
response = get_nat_translations(func, args)
count = { 'static_nat': 0,
'static_napt': 0,
'dynamic_nat': 0,
'dynamic_napt': 0,
'static_twice_nat': 0,
'static_twice_napt': 0,
'dynamic_twice_nat': 0,
'dynamic_twice_napt': 0,
'snat_snapt': 0,
'dnat_dnapt': 0,
'total_entries': 0
}
for key in response:
for entry in response[key]:
for l in response[key][entry]:
get_count(count, key, l)
return count
def get_stats(key, l):
stats = { "protocol": "all",
"source": "---",
"destination": "---",
"pkts": "0",
"bytes": "0"}
if key == "openconfig-nat:napt-mapping-table" or key == "openconfig-nat:napt-twice-mapping-table" :
for k,v in nat_protocol_map.items():
if v == str(l["protocol"]):
stats["protocol"] = k
if key == "openconfig-nat:nat-mapping-table":
if 'type' in l['state'] and l['state']['type'] == 'openconfig-nat:SNAT' :
stats["source"] = l['external-address']
elif 'type' in l['state'] and l['state']['type'] == 'openconfig-nat:DNAT' :
stats["destination"] = l['external-address']
elif key == "openconfig-nat:napt-mapping-table":
addr = l['external-address']+":"+str(l['external-port'])
if 'type' in l['state'] and l['state']['type'] == 'openconfig-nat:SNAT' :
stats["source"] = addr
elif 'type' in l['state'] and l['state']['type'] == 'openconfig-nat:DNAT' :
stats["destination"] = addr
elif key == "openconfig-nat:nat-twice-mapping-table":
stats["source"] = l["src-ip"]
stats["destination"] = l["dst-ip"]
else:
stats["source"] = l["src-ip"]+":"+str(l["src-port"])
stats["destination"] = l["dst-ip"]+":"+str(l["dst-port"])
if 'state' in l and 'counters' in l['state']:
if 'nat-translations-bytes' in l['state']['counters']:
stats["bytes"] = l['state']['counters']['nat-translations-bytes']
if 'nat-translations-pkts' in l['state']['counters']:
stats["pkts"] = l['state']['counters']['nat-translations-pkts']
return stats
def get_nat_statistics(func, args):
resp = []
response = get_nat_translations(func, args)
for key in response:
for entry in response[key]:
for l in response[key][entry]:
if 'state' in l and 'counters' in l['state']:
stats = get_stats(key, l)
if stats is not None:
resp.append(stats)
return resp
def get_configs(table_name, l):
configs = {
'nat_type': "dnat",
'ip_protocol': "all",
'global_ip': "",
'global_l4_port': "----",
'local_ip': "",
'local_l4_port': "----",
'twice_nat_id': "----"
}
if 'config' not in l:
return None
# IP Protocol
if 'openconfig-nat:napt-mapping-table' == table_name:
if 'config' in l and 'protocol' in l['config']:
proto = l['config']['protocol']
for key,val in nat_protocol_map.items():
if val == str(proto):
configs['ip_protocol'] = key
# Nat Type
if 'config' in l and 'type' in l['config']:
if l['config']['type'] == "openconfig-nat:SNAT":
configs['nat_type'] = "snat"
# Global IP
if 'config' in l and 'external-address' in l['config']:
configs['global_ip'] = l['config']['external-address']
# Global L4 Port
if 'config' in l and 'external-port' in l['config']:
configs['global_l4_port'] = l['config']['external-port']
# Local IP
if 'config' in l and 'internal-address' in l['config']:
configs['local_ip'] = l['config']['internal-address']
# Local L4 Port
if 'config' in l and 'internal-port' in l['config']:
configs['local_l4_port'] = l['config']['internal-port']
# Twice NAT ID
if 'config' in l and 'twice-nat-id' in l['config']:
configs['twice_nat_id'] = l['config']['twice-nat-id']
return configs
def get_nat_static_configs(func,args):
response = get_nat_napt_tables(args)
resp = []
for key in response:
for entry in response[key]:
for l in response[key][entry]:
configs = get_configs(key, l)
if configs is not None:
resp.append(configs)
return resp
def get_nat_configs(func, args):
api_response = {}
# Get Global data
response = invoke_api('get_openconfig_nat_nat_instances_instance_config', args)
api_response['globals'] = get_response_dict(response)
# Get Static configs
api_response['static'] = get_nat_static_configs(func,args)
# Get Pools
response = invoke_api('get_openconfig_nat_nat_instances_instance_nat_pool', args)
api_response['pools'] = get_response_dict(response)
# Get Bindings
response = invoke_api('get_openconfig_nat_nat_instances_instance_nat_acl_pool_binding', args)
api_response['bindings'] = get_response_dict(response)
# Get Zones
api_response['zones'] = get_nat_zones(func,args)
return api_response
def run(func, args):
global config
try:
args.insert(0,"0") # NAT instance 0
if func == 'get_nat_translations':
api_response = get_nat_translations(func,args)
elif func == 'get_nat_zones':
api_response = get_nat_zones(func,args)
elif func == 'get_nat_statistics':
api_response = get_nat_statistics(func,args)
elif func == 'get_nat_translations_count':
api_response = get_nat_translations_count(func,args)
elif func == 'get_nat_static_configs':
api_response = get_nat_static_configs(func,args)
elif func == 'get_nat_configs':
api_response = get_nat_configs(func,args)
else:
response = invoke_api(func, args)
api_response = get_response_dict(response)
if config == False:
show_cli_output(args[1], api_response)
except Exception as e:
print("Failure: %s\n" %(e))
if __name__ == '__main__':
pipestr().write(sys.argv)
func = sys.argv[1]
run(func, sys.argv[2:])
|
import subprocess
import pyautogui
import time
repeattime = 20
i = 0
subprocess.call([r"BROWSER_PATH"]) # browser launching
time.sleep(5)
while(i <= repeattime):
pyautogui.click(819, 58) # click on url field
pyautogui.hotkey("ctrl", "v")# past the target link from clipboard
pyautogui.press("enter")
time.sleep(6)
pyautogui.click(711, 730) #disagree the privacy policy (doesn't appear every time)
pyautogui.click(618, 461)# click on the pixel adress of the choice
pyautogui.click(697, 511)# click to confirm the vote
time.sleep(2)
pyautogui.hotkey("ctrl", "shift", "u") #reboot the user on Tor, other browser will need other shortcut
i = i+1
time.sleep(2)
# new identity = crtl shift + u
# new path to website = ctrl shift + l https://strawpoll.com/paqzfx7v6
|
#!/usr/bin/python3
# coding: utf-8
import random
import sys
import string
def chooseWord():
word='swordfish'
return word
def displayWord(word, letters):
"""
Fonction permettant d'afficher le mot word
en n'affichant que les lettres contenues dans
le tableau letters
Renvoit True si le mot est totalement découvert
False sinon.
"""
covered = True
for i in range(len(word)):
if letters.__contains__(word[i]):
print(word[i], end='')
else:
print('*', end='')
covered = False
print()
return covered
def extractLetter(line):
line = line.lower()
ret = None
for i in range(len(line)):
if string.ascii_lowercase.__contains__(line[i]):
ret = line[i]
break
return ret
# Initialisation des variables :
compteur = 6 # Nombre de chance restantes
letters = []
secretWord = chooseWord()
print('#Debug:', secretWord)
found = displayWord(secretWord, letters)
while (not found) and compteur > 0:
# On affiche le score actuel
print('Il vous reste', compteur, 'mauvaises réponses.')
# Un tour de jeu
print('Entrer une lettre:', end=' ', flush=True)
# On lit une lettre de l'input clavier
# letter = input()[0]
line = sys.stdin.readline()
letter = extractLetter(line)
letters.append(letter)
if secretWord.__contains__(letter):
# Si la lettre est là
compteur += 0
else:
# Sinon
compteur -= 1
found = displayWord(secretWord, letters)
if compteur > 0:
print('Bien joué !')
else:
print('Perdu, pendu !')
|
'''
Worldbank schema
=================
Schema for Worldbank sociodemographic data. Excuse the verbose
variable names, but the alternative would have been raw codes like
'NYGDPMKTPSAKD', so I opted for an auto-generated human-readable schema.
'''
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.dialects.mysql import INTEGER, DECIMAL
from sqlalchemy.types import FLOAT, VARCHAR
from sqlalchemy import Column
Base = declarative_base()
class WorldbankCountry(Base):
__tablename__ = 'worldbank_countries'
# Metadata
id = Column(VARCHAR(3), primary_key=True)
capitalCity = Column(VARCHAR(19))
incomeLevel = Column(VARCHAR(19))
iso2Code = Column(VARCHAR(2), index=True)
latitude = Column(DECIMAL(6, 4), index=True)
longitude = Column(DECIMAL(6, 4), index=True)
year = Column(INTEGER, primary_key=True)
lendingType = Column(VARCHAR(14))
name = Column(VARCHAR(54))
region = Column(VARCHAR(26), index=True)
adminregion = Column(VARCHAR(26))
# Data (note that long names have been truncated to 64 chars in line with MySQL rules)
age_dependency_ratio_pc_of_working_age_population = Column(FLOAT)
barro_lee_perce_of_popul_age_25_with_tertia_school_comple_tertia = Column(FLOAT)
barro_lee_percentage_of_population_age_25_with_no_education = Column(FLOAT)
gdp_constant_2010_us_millions_seas_adj = Column(FLOAT)
gini_index_world_bank_estimate = Column(FLOAT)
life_expectancy_at_birth_total_years = Column(FLOAT)
mortality_rate_infant_per_1_000_live_births = Column(FLOAT)
population_total = Column(FLOAT)
poverty_headcoun_ratio_at_national_poverty_lines_pc_of_populatio = Column(FLOAT)
rural_population_pc_of_total_population = Column(FLOAT)
urban_population_pc_of_total = Column(FLOAT)
|
from django.contrib.postgres.aggregates.general import ArrayAgg, BoolOr
from django.contrib.postgres.search import SearchRank
from django.db.models import (
Exists,
F,
IntegerField,
Q,
Value,
Case,
When,
BooleanField,
OuterRef,
FilteredRelation,
)
from django.db.models import QuerySet
from dataworkspace.apps.datasets.constants import DataSetType, UserAccessType, TagType
from dataworkspace.apps.datasets.forms import SearchDatasetsFilters
from dataworkspace.apps.datasets.models import (
ReferenceDataset,
DataSet,
DataSetVisualisation,
VisualisationCatalogueItem,
)
from dataworkspace.apps.datasets.utils import (
dataset_type_to_manage_unpublished_permission_codename,
)
def _get_datasets_data_for_user_matching_query(
datasets: QuerySet,
query,
id_field,
user,
):
#####################################################################
# Filter out datasets that the user is not allowed to even know about
visibility_filter = Q(published=True)
if datasets.model is ReferenceDataset:
if user.has_perm(
dataset_type_to_manage_unpublished_permission_codename(DataSetType.REFERENCE)
):
visibility_filter |= Q(published=False)
if datasets.model is DataSet:
if user.has_perm(
dataset_type_to_manage_unpublished_permission_codename(DataSetType.MASTER)
):
visibility_filter |= Q(published=False, type=DataSetType.MASTER)
if user.has_perm(
dataset_type_to_manage_unpublished_permission_codename(DataSetType.DATACUT)
):
visibility_filter |= Q(published=False, type=DataSetType.DATACUT)
if datasets.model is VisualisationCatalogueItem:
if user.has_perm(
dataset_type_to_manage_unpublished_permission_codename(DataSetType.VISUALISATION)
):
visibility_filter |= Q(published=False)
datasets = datasets.filter(visibility_filter)
#######################################################
# Filter out datasets that don't match the search terms
search_filter = Q()
if datasets.model is DataSet and query:
search_filter |= Q(sourcetable__table=query)
if query:
search_filter |= Q(search_vector=query)
datasets = datasets.filter(search_filter)
# Annotate with rank so we can order by this
datasets = datasets.annotate(search_rank=SearchRank(F("search_vector"), query))
#########################################################################
# Annotate datasets for filtering in Python and showing totals in filters
# has_access
if datasets.model is ReferenceDataset:
datasets = datasets.annotate(has_access=Value(True, BooleanField()))
if datasets.model is DataSet or datasets.model is VisualisationCatalogueItem:
if datasets.model is DataSet:
datasets = datasets.annotate(
user_permission=FilteredRelation(
"datasetuserpermission", condition=Q(datasetuserpermission__user=user)
),
)
if datasets.model is VisualisationCatalogueItem:
datasets = datasets.annotate(
user_permission=FilteredRelation(
"visualisationuserpermission",
condition=Q(visualisationuserpermission__user=user),
),
)
datasets = datasets.annotate(
has_access=BoolOr(
Case(
When(
Q(
user_access_type__in=[
UserAccessType.REQUIRES_AUTHENTICATION,
UserAccessType.OPEN,
]
)
| (
Q(
user_access_type=UserAccessType.REQUIRES_AUTHORIZATION,
user_permission__user__isnull=False,
)
)
| Q(authorized_email_domains__contains=[user.email.split("@")[1]]),
then=True,
),
default=False,
output_field=BooleanField(),
)
),
)
# is_bookmarked
if datasets.model is ReferenceDataset:
datasets = datasets.annotate(
user_bookmark=FilteredRelation(
"referencedatasetbookmark", condition=Q(referencedatasetbookmark__user=user)
)
)
if datasets.model is DataSet:
datasets = datasets.annotate(
user_bookmark=FilteredRelation(
"datasetbookmark", condition=Q(datasetbookmark__user=user)
)
)
if datasets.model is VisualisationCatalogueItem:
datasets = datasets.annotate(
user_bookmark=FilteredRelation(
"visualisationbookmark", condition=Q(visualisationbookmark__user=user)
)
)
datasets = datasets.annotate(
is_bookmarked=BoolOr(
Case(
When(user_bookmark__user__isnull=False, then=True),
default=False,
output_field=BooleanField(),
)
),
)
# is_subscribed
if datasets.model is ReferenceDataset or datasets.model is VisualisationCatalogueItem:
datasets = datasets.annotate(is_subscribed=Value(False, BooleanField()))
if datasets.model is DataSet:
datasets = datasets.annotate(
user_subscription=FilteredRelation(
"subscriptions", condition=Q(subscriptions__user=user)
),
)
datasets = datasets.annotate(
is_subscribed=BoolOr(
Case(
When(user_subscription__user__isnull=False, then=True),
default=False,
output_field=BooleanField(),
)
)
)
# tags
datasets = datasets.annotate(
source_tag_ids=ArrayAgg("tags", filter=Q(tags__type=TagType.SOURCE), distinct=True)
)
datasets = datasets.annotate(
topic_tag_ids=ArrayAgg("tags", filter=Q(tags__type=TagType.TOPIC), distinct=True)
)
# data_type
if datasets.model is ReferenceDataset:
datasets = datasets.annotate(data_type=Value(DataSetType.REFERENCE, IntegerField()))
if datasets.model is DataSet:
datasets = datasets.annotate(data_type=F("type"))
if datasets.model is VisualisationCatalogueItem:
datasets = datasets.annotate(data_type=Value(DataSetType.VISUALISATION, IntegerField()))
# is_open_data
if datasets.model is ReferenceDataset:
datasets = datasets.annotate(is_open_data=Value(False, BooleanField()))
if datasets.model is DataSet or datasets.model is VisualisationCatalogueItem:
datasets = datasets.annotate(
is_open_data=Case(
When(user_access_type=UserAccessType.OPEN, then=True),
default=False,
output_field=BooleanField(),
)
)
# has_visuals
if datasets.model is ReferenceDataset or datasets.model is VisualisationCatalogueItem:
datasets = datasets.annotate(has_visuals=Value(False, BooleanField()))
if datasets.model is DataSet:
datasets = datasets.annotate(
has_visuals=Case(
When(
Exists(DataSetVisualisation.objects.filter(dataset_id=OuterRef("id"))),
then=True,
),
default=False,
output_field=BooleanField(),
)
)
return datasets.values(
id_field,
"name",
"slug",
"short_description",
"search_rank",
"source_tag_ids",
"topic_tag_ids",
"data_type",
"published",
"published_at",
"is_open_data",
"has_visuals",
"has_access",
"is_bookmarked",
"is_subscribed",
)
def _sorted_datasets_and_visualisations_matching_query_for_user(query, user, sort_by):
"""
Retrieves all master datasets, datacuts, reference datasets and visualisations (i.e. searchable items)
and returns them, sorted by incoming sort field, default is desc(search_rank).
"""
master_and_datacut_datasets = _get_datasets_data_for_user_matching_query(
DataSet.objects.live(),
query,
id_field="id",
user=user,
)
reference_datasets = _get_datasets_data_for_user_matching_query(
ReferenceDataset.objects.live(),
query,
id_field="uuid",
user=user,
)
visualisations = _get_datasets_data_for_user_matching_query(
VisualisationCatalogueItem.objects.live(), query, id_field="id", user=user
)
# Combine all datasets and visualisations and order them.
sort_fields = sort_by.split(",")
all_datasets = (
master_and_datacut_datasets.union(reference_datasets)
.union(visualisations)
.order_by(*sort_fields)
)
return all_datasets
def search_for_datasets(user, filters: SearchDatasetsFilters, matcher) -> tuple:
all_datasets_visible_to_user_matching_query = (
_sorted_datasets_and_visualisations_matching_query_for_user(
query=filters.query,
user=user,
sort_by=filters.sort_type,
)
)
# Filter out any records that don't match the selected filters. We do this in Python, not the DB, because we need
# to run varied aggregations on the datasets in order to count how many records will be available if users apply
# additional filters and this was difficult to do in the DB. This process will slowly degrade over time but should
# be sufficient while the number of datasets is relatively low (hundreds/thousands).
datasets_matching_query_and_filters = list(
filter(
lambda d: matcher(
d,
bool(filters.unpublished),
bool(filters.open_data),
bool(filters.with_visuals),
filters.use,
filters.data_type,
filters.source_ids,
filters.topic_ids,
filters.user_accessible,
filters.user_inaccessible,
filters.my_datasets,
),
all_datasets_visible_to_user_matching_query,
)
)
return all_datasets_visible_to_user_matching_query, datasets_matching_query_and_filters
|
class Environment(object):
def __init__(self, base_url):
self.base_url = base_url
|
import logging
from ..job import Job
from ..types import QueueName, WorkerNumber, SysExcInfoType
logger = logging.getLogger(__name__)
class LoggingMiddleware:
def process_job(self, job: Job, queue: QueueName, worker_num: WorkerNumber) -> None:
logger.info("Running job {}".format(job))
def process_result(self, job: Job, result: bool, duration: float) -> None:
logger.info("Finished job => {!r} (Time taken: {:.2f}s)".format(
result,
duration,
))
def process_exception(self, job: Job, duration: float, *exc_info: SysExcInfoType) -> None:
logger.exception("Exception when processing job (duration: {:.2f}s)".format(
duration,
))
|
'''
Crie um programa que mostre na tela todos os números pares que estão no intervalo entre 1 e 50.
'''
#minha resolução
for par in range(2,51,2):
print(par)
print('Fim!')
#resolução do curso
'''A resolução ficou parecida com a minha.'''
|
from django.test import TestCase
from django.utils import timezone
from django.core import mail
from mock import patch
from bluebottle.test.factory_models.accounts import BlueBottleUserFactory
from bluebottle.test.models import TestBaseUser
from bluebottle.test.factory_models.tasks import TaskFactory, TaskMemberFactory
from bluebottle.utils.model_dispatcher import get_taskmember_model
from bluebottle.test.factory_models.orders import OrderFactory
from bluebottle.test.factory_models.donations import DonationFactory
from bluebottle.test.factory_models.projects import ProjectPhaseFactory, ProjectFactory
from bluebottle.test.factory_models.fundraisers import FundraiserFactory
from bluebottle.test.utils import InitProjectDataMixin
TASKS_MEMBER_MODEL = get_taskmember_model()
class BlueBottleUserManagerTestCase(TestCase):
"""
Test case for the model manager of the abstract user model.
"""
def test_create_user(self):
"""
Tests the manager ``create_user`` method.
"""
user = TestBaseUser.objects.create_user(email='[email protected]')
self.assertEqual(user.username, 'john_doe')
self.assertTrue(user.is_active)
self.assertFalse(user.is_superuser)
self.assertFalse(user.is_staff)
def test_create_user_no_email_provided(self):
"""
Tests exception raising when trying to create a new user without
providing an email.
"""
self.assertRaisesMessage(
ValueError,
'The given email address must be set',
TestBaseUser.objects.create_user,
email='')
class BlueBottleUserTestCase(InitProjectDataMixin, TestCase):
"""
Test case for the implementation of the abstract user model.
"""
def setUp(self):
self.user = BlueBottleUserFactory.create()
@patch('django.utils.timezone.now')
def test_update_deleted_timestamp(self, mock):
"""
Tests the ``update_deleted_timestamp`` method, checking that the
timestamp is properly set up when the user is not active any more.
"""
timestamp = timezone.now()
mock.return_value = timestamp
self.user.is_active = False
self.user.update_deleted_timestamp()
self.assertEqual(self.user.deleted, timestamp)
def test_update_deleted_timestamp_active_user(self):
"""
Tests that the ``update_deleted_timestamp`` method resets the timestamp
to ``None`` if the user becomes active again.
"""
self.user.is_active = False
self.user.update_deleted_timestamp()
# Now the user is inactive, so ``deleted`` attribute is set. Let's
# reactivate it again and check that is reset.
self.user.is_active = True
self.user.update_deleted_timestamp()
self.assertIsNone(self.user.deleted)
def test_generate_username_from_email(self):
"""
Tests the ``generate_username`` method when no username was provided.
It should create the username from the name of the user email.
"""
user = BlueBottleUserFactory.create(username='', first_name='', last_name='')
user.generate_username()
email_name, domain_part = user.email.strip().rsplit('@', 1)
self.assertEqual(user.username, email_name)
def test_generate_username_from_names(self):
"""
Tests the ``generate_username`` method when no username was provided
but ``first_name`` and ``last_name`` are defined.
"""
user = BlueBottleUserFactory.create(username='', first_name=u'John', last_name=u'Doe')
user.generate_username()
self.assertEqual(user.username, 'johndoe')
def test_get_full_name(self):
"""
Tests the ``get_full_name`` method.
"""
self.user.first_name = 'John'
self.user.last_name = 'Doe'
self.user.save()
self.assertEqual(self.user.get_full_name(), 'John Doe')
def test_get_short_name(self):
"""
Tests the ``get_short_name`` method.
"""
self.user.first_name = 'John'
self.user.last_name = 'Doe'
self.user.save()
self.assertEqual(self.user.get_short_name(), 'John')
def test_welcome_mail(self):
"""
Test that a welcome mail is sent when a user is created when the setting are enabled
In settings SEND_WELCOME_MAIL is set to False
"""
from django.conf import settings
settings.SEND_WELCOME_MAIL = True
mail.outbox = []
self.assertEqual(len(mail.outbox), 0)
new_user = TestBaseUser.objects.create_user(email='[email protected]')
self.assertEqual(len(mail.outbox), 1)
self.assertTrue("Welcome" in mail.outbox[0].subject) #We need a better way to verify the right mail is loaded
self.assertEqual(mail.outbox[0].recipients()[0], new_user.email)
settings.SEND_WELCOME_MAIL = False
def test_no_welcome_mail(self):
"""
Test that a welcome mail is sent when a user is created when the setting are disabled (= default)
"""
mail.outbox = []
self.assertEqual(len(mail.outbox), 0) #The setup function also creates a user and generates a mail
new_user = TestBaseUser.objects.create_user(email='[email protected]')
self.assertEqual(len(mail.outbox), 0)
def test_calculate_task_count(self):
"""
Test that the task_count property on a user is calculated correctly. We count a) tasks where a user is a task author and
b) TaskMembers where a user is applied, accepted or realized
"""
self.init_projects()
self.assertEqual(self.user.task_count, 0)
task = TaskFactory.create(author=self.user)
self.assertEqual(self.user.task_count, 1)
taskmember = TaskMemberFactory.create(
member=self.user,
status=TASKS_MEMBER_MODEL.TaskMemberStatuses.applied,
task=task
)
self.assertEqual(self.user.task_count, 2)
uncounted_taskmember = TaskMemberFactory.create(
member=self.user,
status=TASKS_MEMBER_MODEL.TaskMemberStatuses.stopped,
task=task
)
self.assertEqual(self.user.task_count, 2)
def test_calculate_donation_count(self):
""" Test the counter for the number of donations a user has done """
self.init_projects()
self.assertEqual(self.user.donation_count, 0)
order = OrderFactory.create(user=self.user)
donation = DonationFactory.create(amount=1000, order=order)
self.assertEqual(self.user.donation_count, 1)
def test_calculate_project_count(self):
""" Test the counter for the number of projects a user has started """
self.init_projects()
self.assertEqual(self.user.project_count, 0)
project = ProjectFactory.create(owner=self.user)
self.assertEqual(self.user.project_count, 1)
project2 = ProjectFactory.create(owner=self.user)
self.assertEqual(self.user.project_count, 2)
def test_calculate_fundraiser_count(self):
""" Test the counter for the number of fundraisers a user is owner of """
self.init_projects()
self.assertEqual(self.user.fundraiser_count, 0)
fundraiser = FundraiserFactory.create(amount=4000, owner=self.user)
self.assertEqual(self.user.fundraiser_count, 1)
fundraiser2 = FundraiserFactory.create(amount=4000, owner=self.user)
self.assertEqual(self.user.fundraiser_count, 2)
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Sep 17 17:37:48 2019
@author: salim
@Practica
"""
import os
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt #para dibujos
#-------------------------- 1- Connectarse al repertorio----------------------------------------------------
# Change working directory --> Cambiar de directorio de trabajo
os.chdir('/Users/salim/Desktop/EDEM/Python/Code')
#----------------------------leer el fichero de 2011------------------------------
rentals_2011 = pd.read_csv ("washington_bike_rentals_2011.csv", sep=';', decimal =',')
#-------------------------------Controle de calidad--------------------------------
#para verificar las dimenciones (en funcion de lo que lleva el interior de nuestro fichero es un control de calidad)
rentals_2011.shape
#Que me muestre la cabezera del fichero // nombre del fichero. head()
rentals_2011.head()
#Que me muestre el final del fichero // nombre del fichero. tail()
rentals_2011.tail()
#QC ---> OK
x=rentals_2011.loc[:, "workingday"]
x.describe()
#Plot dibujar
plt.hist(x)
# Hay que ver el tipo de variable, es nominal entonces porcentage.
#Media mean, y descripción std
#Percentage
#CREAR PORCENTAGE
#CREAR UNA TABLA , QUE QUIERO ANALIZAR ? COLUMNA WEATHERSIT COMO TIENE QUE LLAMARSE LA COLUMNA -> COUNT
mytable = pd.crosstab(index=rentals_2011["workingday"], columns="number_of_days_off_on")
#sumo todos los numeros que estan en mi tabla %
n = mytable.sum()
mytable2 = (mytable/n)*100
mytable2.describe()
#representar en tabla pintar barplot --> Grafico de barras // aqui porqué ?
plt.bar(mytable2.index, mytable2['number_of_days_off_on'], color ='Red')
#TUNEAR EL DIBUJO PONIENDO ETIQUETAS DE CAMPOS
#BARCHART decir como llamar y en que campo
#objects = ('DayOff', 'WorkingDay')
#AQUI LE DECIMOS CUAL SE TENDRÁ QUE COGER COMO INDICE
plt.bar(mytable2.index, mytable2['number_of_days_off_on'])
plt.xticks(mytable2.index, objects)
plt.show()
plt.bar(mytable2.index, mytable2["number_of_days_off_on"], edgecolor="black")
objects= ('DAY OFF','WorkingDay')
plt.xticks(mytable2.index, objects)
plt.ylabel ("Percentage")
#plt.xlabel ("Figure 1. Percentage of DAY OFF/ON")
plt.title ("Figure 1. Percentage of DAY OFF/ON")
z=rentals_2011['cnt']
# plot
plt.hist(z, edgecolor='black')
#saltos de 1000 para ordenar abajo
plt.xticks(np.arange(0, 10000, step=1000))
plt.xticks(ticks)
#plt.title('Figure 1.Daily Bicycle rentals in washington DC ' '\n' 'by Capital bikeshar')
plt.ylabel('Frecuency')
plt.xlabel('Number of rented bicycles')
#ADD DESCRIPTIVE guardandolo en una variable para poder despues acceder en ello
plt.hist(x, bin=10, edgecolor ='black')
plt.xticks(np.arange(0,))
rentals_weather_2012 = pd.read_csv ('rentals_weather_2012.csv', sep=';', decimal=',')
weather_2011 = pd.read_csv ('weather_washington_2011.csv', sep=';', decimal=',')
rentals_weather_2011 = pd.merge(weather_2011, rentals_2011, on='day')
rentals_weather_2011 = rentals_weather_2011.drop(columns=['dteday_y'])
rentals_weather_2011.to_csv('rentals_weather_2011.csv')
rentals_weather_11_12 = rentals_weather_2011.append(rentals_weather_2012, ignore_index=True)
rentals_weather_11_12.to_csv('rentals_weather_11_12.csv')
|
"""
Constants for DCA
"""
# App imports.
from dca.utils import default
DCA_DIR = default('DCA_DIR', 'dca_dir')
DCA_PORT = default('DCA_PORT', '50051')
DCA_SUFFIX = default('DCA_SUFFIX', 'dca')
DCA_CUSTOM = default('DCA_CUSTOM', 'dca_custom')
DCA_AUTO_PACKAGE = default('DCA_AUTO_PACKAGE', 'auto_dca')
# Derived constants
DCA_AUTO_MODULE = f'{DCA_DIR}.{DCA_AUTO_PACKAGE}'
DCA_AUTO_FILE = f'{DCA_DIR}/{DCA_AUTO_PACKAGE}.py'
# Fields to ifnore while dictifying
DCA_IGNORE_FIELDS = default(
'DCA_IGNORE_FIELDS', ['created_on', 'updated_on', 'id'])
# Fields to remove while generating model
DCA_REMOVE_FIELDS = default('DCA_REMOVE_FIELDS', [])
# `DCA_AST_MAP` basically says how to extract data
# from a given AST `Assign` object
DCA_AST_MAP = default('DCA_AST_MAP', dict(
Num=lambda kw: kw.value.n,
Str=lambda kw: kw.value.s,
Name=lambda kw: kw.value.id,
NameConstant=lambda kw: kw.value.value,
Attribute=lambda kw: '%s.%s' % (kw.value.value.id, kw.value.attr),
)) |
#!/usr/bin/env python3
import mistune
def parse_markdown(filename):
""" Takes a .md file and returns a parsed version in HTML.
Parses a markdown file using mistune, returns a string which
contains the parsed information in HTML.
Args:
filename: the file to be parse, must be a .md file.
Returns:
Variable which contains the parsed markdown as HTML.
"""
with open(filename, mode="r", encoding="utf8") as f:
file_md = [lines for lines in f]
file_string = " ".join(file_md)
markdown = mistune.markdown(file_string)
return markdown
|
# -*- coding: utf-8 -*-
"""
Created on Sat Aug 29 23:01:19 2020
@author: GÜRAY
"""
##################################################
## ## ## ## ### ## ## ### ##
#### #### ### ## ## ## ## ## ### ## ### ## ### ##
#### #### ### ## ### # # ## ### ## ### ## ### ##
#### #### ### ## ## ## ## ## ## ### ## ### ##
#### #### ## ## ## ### ## ### ## ### ##
##################################################
# Aim: Converting .nc files automatically to .csv
"""
Content:
I already downloaded a hell amount of .nc files of MERRA-2. I will first
combine all of them to a single netCDF file then I will convert that guy to
a .csv file which will be easier to work with in Microsoft Excel later.
"""
# Part 0: Importing relevant packages
import os
import xarray
# Part 1: NetCDF merging part
# Here set your working directory accordingly!!!! Be careful
# This is the file all of your .nc are present.
os.chdir("####")
"""
'MERRA2_400 is present in all the files of .nc I want to integrate, so
change this MERRA2_400 to whatever your target files collectively includes
concat_dim is what you want to concatenate accordingly. Since this is
expected to be a time-series integration, and the time is specified with
"time" in this .nc files, it can remain like that.
combine="by_coords" is related to coordinates, but here we have only one
coordinate so for this purpose it is relatively ineffective.
"""
ds = xarray.open_mfdataset('MERRA2_400*.nc', combine='by_coords',
concat_dim="time")
ds.to_netcdf('merrarad.nc') # This creates the '' file in your current wd
# Part 2: NetCDF to csv or xlsx format
import xarray as xr
# In the following ("") specify the place of your time-series combined .nc
nc = xr.open_dataset("####")
# Now 'rad.csv' is actually name of the about to be generated .csv file
nc.to_dataframe().to_csv('rad.csv')
# Happy Birthday to your combined time-series .csv file!
|
# -*- coding: utf-8 -*-
from django.contrib import admin
from .models import *
from django import forms
from ckeditor.widgets import CKEditorWidget
from django.core.mail import send_mail
from django.core.mail import EmailMultiAlternatives
from cambiaahora.utils import *
class ResultadoInline(admin.TabularInline):
model = Resultado
extra = 1
def formfield_for_dbfield(self, db_field, **kwargs):
# This method will turn all TextFields into giant TextFields
if isinstance(db_field, models.TextField):
return forms.CharField(label=u'Descripción',
widget=forms.Textarea(attrs={'cols': 60, 'rows':4, 'class': 'docx'}))
return super(ResultadoInline, self).formfield_for_dbfield(db_field, **kwargs)
class ProyectoAdmin(admin.ModelAdmin):
list_display = ['codigo', 'organizacion', 'inicio', 'finalizacion', 'contacto']
filter_horizontal = ['municipios']
inlines = [ResultadoInline, ]
#sobreescribiendo el metodo para filtrar los objetos
def queryset(self, request):
if request.user.is_superuser or request.user.has_perm('fadcanic.view_programa'):
return Proyecto.objects.all()
return Proyecto.objects.filter(organizacion__admin=request.user)
def get_form(self, request, obj=None, ** kwargs):
if request.user.is_superuser:
form = super(ProyectoAdmin, self).get_form(request, ** kwargs)
else:
form = super(ProyectoAdmin, self).get_form(request, ** kwargs)
form.base_fields['organizacion'].queryset = request.user.organizacion_set.all()
#form.base_fields['proyecto'].queryset = request.user.organizacion_set.all()
return form
admin.site.register(Proyecto, ProyectoAdmin)
admin.site.register(Resultado)
admin.site.register(Organizador)
class Precedencia_Participantes_Inline(admin.TabularInline):
model = Precedencia_Participantes
extra = 1
class ActividadAdmin(admin.ModelAdmin):
list_filter = ['resultado__aporta_a', 'organizacion', 'proyecto', 'persona_organiza', 'fecha']
search_fields = ['nombre_actividad', 'organizacion__nombre_corto', 'persona_organiza__nombre','municipio__nombre']
list_display = ['nombre_actividad', 'organizacion', 'fecha', 'tipo']
formfield_overrides = {
models.TextField: {'widget': forms.Textarea(attrs={'cols': 50, 'rows':4, 'class': 'docx'})},
models.TextField: {'widget': CKEditorWidget()},
}
inlines = [Precedencia_Participantes_Inline]
def get_form(self, request, obj=None, ** kwargs):
if request.user.is_superuser:
self.exclude = ()
self.fieldsets = [
(None, {'fields': [('organizacion', 'proyecto'), 'tipo', 'persona_organiza', 'comite', 'nombre_actividad','objetivo_actividad','fecha',
'municipio', 'comunidad']}),
('Tipo, tema y ejes de actividad', {'fields': ['tipo_actividad', 'tema_actividad', 'ejes_transversales']}),
('Participantes por sexo', {'fields': [('hombres', 'mujeres'),]}),
('Participantes por edad', {'fields': [('menor_12', 'mayor_12', 'mayor_18', 'mayor_30', 'no_dato'),]}),
('Participantes por identidad étnica', {'fields': [('creole', 'miskito', 'ulwa', 'no_dato1'),
('rama', 'mestizo', 'mayagna', 'garifuna'),
('extranjero',)]}),
('Participantes por tipo', {'fields': [('estudiante', 'docente', 'periodista', 'no_dato2'),
('lideres', 'representantes', 'comunitarios')]}),
(None, {'fields': ['resultado',]}),
('Evaluacion de hombres', {'fields': [('relevancia', 'efectividad'), ('aprendizaje', 'empoderamiento'), 'participacion']}),
('Evaluacion de mujeres', {'fields': [('relevancia_m', 'efectividad_m'), ('aprendizaje_m', 'empoderamiento_m'), 'participacion_m']}),
('Recursos', {'fields': [('foto1', 'foto2', 'foto3'), 'video', 'comentarios','logros','dificultades','acuerdos']}),
(None, {'fields': ['aprobacion','user']}),
]
else:
self.exclude = ('aprobacion',)
self.fieldsets = [
(None, {'fields': [('organizacion', 'proyecto'), 'tipo', 'persona_organiza', 'comite', 'nombre_actividad','objetivo_actividad','fecha',
'municipio', 'comunidad']}),
('Tipo, tema y ejes de actividad', {'fields': ['tipo_actividad', 'tema_actividad', 'ejes_transversales']}),
('Participantes por sexo', {'fields': [('hombres', 'mujeres'),]}),
('Participantes por edad', {'fields': [('menor_12', 'mayor_12', 'mayor_18', 'mayor_30', 'no_dato'),]}),
('Participantes por identidad étnica', {'fields': [('creole', 'miskito', 'ulwa', 'no_dato1'),
('rama', 'mestizo', 'mayagna', 'garifuna'),
('extranjero',)]}),
('Participantes por tipo', {'fields': [('estudiante', 'docente', 'periodista', 'no_dato2'),
('lideres', 'representantes', 'comunitarios')]}),
(None, {'fields': ['resultado',]}),
('Evaluacion de hombres', {'fields': [('relevancia', 'efectividad'), ('aprendizaje', 'empoderamiento'), 'participacion']}),
('Evaluacion de mujeres', {'fields': [('relevancia_m', 'efectividad_m'), ('aprendizaje_m', 'empoderamiento_m'), 'participacion_m']}),
('Recursos', {'fields': [('foto1', 'foto2', 'foto3'), 'video', 'comentarios','logros','dificultades','acuerdos']}),
(None, {'fields': ['user',]}),
]
#form.base_fields['organizacion'].queryset = request.user.organizacion_set.all()
return super(ActividadAdmin, self).get_form(request, obj=None, **kwargs)
#sobreescribiendo el metodo para filtrar los objetos
def queryset(self, request):
if request.user.is_superuser or request.user.has_perm('fadcanic.view_programa'):
return Actividad.objects.all()
return Actividad.objects.filter(organizacion__admin=request.user)
def save_model(self, request, obj, form, change):
#guarda todos los objectos
obj.save()
#envio de correo
if not obj.user.is_superuser:
try:
subject, from_email, to = 'Nueva Actividad de Cambia ahora', '[email protected]', arreglo_mail
text_content = "Una nueva actividad ha sido enviada, del usuario " + \
str(obj.user) + ', ' + \
' Si decia revisarla dar clic al siguiente enlace' + \
' http://www.cambiaahora.com/admin/contraparte/actividad/' + str(obj.id)
html_content = "Una nueva actividad ha sido enviada, del usuario " + \
str(obj.user) + ', ' + \
' Si decia revisarla dar clic al siguiente enlace' + \
' http://www.cambiaahora.com/admin/contraparte/actividad/' + str(obj.id)
msg = EmailMultiAlternatives(subject, text_content, from_email, arreglo_mail)
msg.attach_alternative(html_content, "text/html")
msg.send()
except:
pass
class Media:
js = ('/static/actividades/js/actividad.js', )
admin.site.register(Actividad, ActividadAdmin)
class OutputAdmin(admin.ModelAdmin):
list_display = ['_hash', 'date', 'time']
#admin.site.register(Output, OutputAdmin)
|
import itertools
import rpy2.rlike.indexing as rli
class OrdDict(dict):
""" Implements the Ordered Dict API defined in PEP 372.
When `odict` becomes part of collections, this class
should inherit from it rather than from `dict`.
This class differs a little from the Ordered Dict
proposed in PEP 372 by the fact that:
not all elements have to be named. None as a key value means
an absence of name for the element.
"""
def __init__(self, c=[]):
if isinstance(c, TaggedList) or isinstance(c, OrdDict):
c = c.iteritems()
elif isinstance(c, dict):
#FIXME: allow instance from OrdDict ?
raise ValueError('A regular dictionnary does not ' +\
'conserve the order of its keys.')
super(OrdDict, self).__init__()
self.__l = []
l = self.__l
for k,v in c:
self[k] = v
def __copy__(self):
cp = OrdDict(c = tuple(self.iteritems()))
return cp
def __cmp__(self, o):
raise(Exception("Not yet implemented."))
def __eq__(self):
raise(Exception("Not yet implemented."))
def __getitem__(self, key):
if key is None:
raise ValueError("Unnamed items cannot be retrieved by key.")
i = super(OrdDict, self).__getitem__(key)
return self.__l[i][1]
def __iter__(self):
l = self.__l
for e in l:
k = e[0]
if k is None:
continue
else:
yield k
def __len__(self):
return len(self.__l)
def __ne__(self):
raise(Exception("Not yet implemented."))
def __repr__(self):
s = 'o{'
for k,v in self.iteritems():
s += "'" + str(k) + "': " + str(v) + ", "
s += '}'
return s
def __reversed__(self):
raise(Exception("Not yet implemented."))
def __setitem__(self, key, value):
""" Replace the element if the key is known,
and conserve its rank in the list, or append
it if unknown. """
if key is None:
self.__l.append((key, value))
return
if self.has_key(key):
i = self.index(key)
self.__l[i] = (key, value)
else:
self.__l.append((key, value))
super(OrdDict, self).__setitem__(key, len(self.__l)-1)
def byindex(self, i):
""" Fetch a value by index (rank), rather than by key."""
return self.__l[i]
def index(self, k):
""" Return the index (rank) for the key 'k' """
return super(OrdDict, self).__getitem__(k)
def items(self):
""" Return an ordered list of all key/value pairs """
res = [self.byindex(i) for i in xrange(len(self.__l))]
return tuple(res)
def iteritems(self):
""" OD.iteritems() -> an iterator over the (key, value) items of D """
return iter(self.__l)
def reverse(self):
""" Reverse the order of the elements in-place (no copy)."""
l = self.__l
n = len(self.__l)
for i in xrange(n/2):
tmp = l[i]
l[i] = l[n-i-1]
kv = l[i]
if kv is not None:
super(OrdDict, self).__setitem__(kv[0], i)
l[n-i-1] = tmp
kv = tmp
if kv is not None:
super(OrdDict, self).__setitem__(kv[0], n-i-1)
def sort(self, cmp=None, key=None, reverse=False):
raise(Exception("Not yet implemented."))
class TaggedList(list):
""" A list for which each item has a 'tag'.
:param l: list
:param tag: optional sequence of tags
"""
def __add__(self, tl):
try:
tags = tl.tags
except AttributeError, ae:
raise ValueError('Can only concatenate TaggedLists.')
res = TaggedList(list(self) + list(tl),
tags = self.tags + tl.tags)
return res
def __delitem__(self, y):
super(TaggedList, self).__delitem__(y)
self.__tags.__delitem__(y)
def __delslice__(self, i, j):
super(TaggedList, self).__delslice__(i, j)
self.__tags.__delslice__(i, j)
def __iadd__(self, y):
super(TaggedList, self).__iadd__(y)
if isinstance(y, TaggedList):
self.__tags.__iadd__(y.tags)
else:
self.__tags.__iadd__([None, ] * len(y))
return self
def __imul__(self, y):
restags = self.__tags.__imul__(y)
resitems = super(TaggedList, self).__imul__(y)
return self
@staticmethod
def from_iteritems(tagval):
res = TaggedList([])
for k,v in tagval.iteritems():
res.append(v, tag=k)
return res
def __init__(self, seq, tags = None):
super(TaggedList, self).__init__(seq)
if tags is None:
tags = [None, ] * len(seq)
if len(tags) != len(seq):
raise ValueError("There must be as many tags as seq")
self.__tags = list(tags)
def __setslice__(self, i, j, y):
super(TaggedList, self).__setslice__(i, j, y)
#FIXME: handle TaggedList ?
#self.__tags.__setslice__(i, j, [None, ])
def append(self, obj, tag = None):
""" Append an object to the list
:param obj: object
:param tag: object
"""
super(TaggedList, self).append(obj)
self.__tags.append(tag)
def extend(self, iterable):
""" Extend the list with an iterable object.
:param iterable: iterable object
"""
if isinstance(iterable, TaggedList):
itertags = iterable.itertags()
else:
itertags = [None, ] * len(iterable)
for tag, item in itertools.izip(itertags, iterable):
self.append(item, tag=tag)
def insert(self, index, obj, tag=None):
"""
Insert an object in the list
:param index: integer
:param obj: object
:param tag: object
"""
super(TaggedList, self).insert(index, obj)
self.__tags.insert(index, tag)
def items(self):
"""
Return a tuple of all pairs (tag, item).
:rtype: tuple of 2-element tuples (tag, item)
"""
res = [(tag, item) for tag, item in itertools.izip(self.__tags, self)]
return tuple(res)
def iterontag(self, tag):
"""
iterate on items marked with one given tag.
:param tag: object
"""
i = 0
for onetag in self.__tags:
if tag == onetag:
yield self[i]
i += 1
def iteritems(self):
""" OD.iteritems() -> an iterator over the (key, value) items of D """
for tag, item in itertools.izip(self.__tags, self):
yield (tag, item)
def itertags(self):
"""
iterate on tags.
:rtype: iterator
"""
for tag in self.__tags:
yield tag
def pop(self, index=None):
"""
Pop the item at a given index out of the list
:param index: integer
"""
if index is None:
index = len(self) - 1
res = super(TaggedList, self).pop(index)
self.__tags.pop(index)
return res
def remove(self, value):
"""
Remove a given value from the list.
:param value: object
"""
found = False
for i in xrange(len(self)):
if self[i] == value:
found = True
break
if found:
self.pop(i)
def reverse(self):
""" Reverse the order of the elements in the list. """
super(TaggedList, self).reverse()
self.__tags.reverse()
def sort(self, reverse = False):
"""
Sort in place
"""
o = rli.order(self, reverse = reverse)
super(TaggedList, self).sort(reverse = reverse)
self.__tags = [self.__tags[i] for i in o]
def __get_tags(self):
return tuple(self.__tags)
def __set_tags(self, tags):
if len(tags) == len(self.__tags):
self.__tags = tuple(tags)
else:
raise ValueError("The new list of tags should have the same length as the old one")
tags = property(__get_tags, __set_tags)
def settag(self, i, t):
"""
Set tag 't' for item 'i'.
:param i: integer (index)
:param t: object (tag)
"""
self.__tags[i] = t
# class DataFrame(ArgsDict):
# def __init__(self, s):
# super(ArgsDict, self).__init__(s)
# if len(self) > 0:
# nrows = len(self[0])
# for i, v in enumerate(self):
# if len(v) != nrows:
# raise ValueError("Expected length %i for element %i"
# %(nrows, i))
|
import sys
import matplotlib.pyplot as plt
import torch
sys.path.append("..")
from pfhedge.instruments import BrownianStock
from pfhedge.instruments import EuropeanOption
from pfhedge.nn import BlackScholes
if __name__ == "__main__":
options_list = []
strikes_list = []
for call in (True, False):
for strike in torch.arange(70, 180, 10):
option = EuropeanOption(BrownianStock(), call=call, strike=strike)
options_list.append(option)
strikes_list.append(strike)
spot = torch.linspace(50, 200, 100)
t = options_list[0].maturity
v = options_list[0].ul().sigma
plt.figure()
total_vega = torch.zeros_like(spot)
for option, strike in zip(options_list, strikes_list):
lm = (spot / strike).log()
vega = BlackScholes(option).vega(lm, t, v) / (strike ** 2)
total_vega += vega
if option.call:
# 2 is for call and put
plt.plot(spot.numpy(), 2 * vega.numpy())
plt.plot(spot.numpy(), total_vega.numpy(), color="k", lw=2)
plt.savefig("./output/options-vega.png")
|
from typing import List
from math import gcd
from fractions import Fraction
"""
example matrix:
[
[0, 1, 0, 0, 0, 1],
[4, 0, 0, 3, 2, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0]
]
"""
def transform_matrix(m: List[List]):
"""
example matrix becomes
[
[0, 1/2, 0, 0, 0, 1/2],
[4/9, 0, 0, 1/3, 2/9, 0],
[0, 0, 1, 0, 0, 0],
[0, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 1]
]
"""
l = len(m)
for i in range(l):
row_sum = sum(m[i])
if row_sum == 0:
m[i][i] = 1
else:
for j in range(l):
m[i][j] = Fraction(m[i][j], row_sum)
def get_submatrix(matrix: List[List], rows: List, cols: List) -> List[List]:
new_matrix = []
for row in rows:
current_row = []
for col in cols:
current_row.append(matrix[row][col])
new_matrix.append(current_row)
return new_matrix
def get_q_matrix(matrix: List[List], transient_states: List) -> List[List]:
"""
For example matrix
q_matrix = [
[0, 1/2],
[4/9, 0]
]
"""
return get_submatrix(matrix, transient_states, transient_states)
def get_r_matrix(matrix: List[List], transient_states: List, absorbing_states: List) -> List[List]:
"""
For example matrix
r_matrix = [
[0, 0, 0, 1/2],
[0, 1/3, 2/9, 0]
]
"""
return get_submatrix(matrix, transient_states, absorbing_states)
def make_2d_list(n: int, m: int) -> List[List]:
a = []
for row in range(n):
a += [[0]*m]
return a
def make_identity(n: int) -> List[List]:
"""
for n=3, it returns
[
[1, 0, 0],
[0, 1, 0],
[0, 0, 1]
]
"""
matrix = make_2d_list(n, n)
for i in range(n):
matrix[i][i] = 1
return matrix
def subtract_matrices(a: List[List], b: List[List]) -> List[List]:
new_matrix = []
n, m = len(a), len(b)
for i in range(n):
row = []
for j in range(m):
row.append(a[i][j] - b[i][j])
new_matrix.append(row)
return new_matrix
def multiply_matrices(a: List[List], b: List[List]) -> List[List]:
"""
Multiply two matrices a and b
matrix a of size A X B and matrix b of size B X C
would yield a matrix c of size A X C
"""
ar, ac, bc = len(a), len(a[0]), len(b[0])
c = make_2d_list(ar, bc)
for i in range(ar):
for j in range(bc):
prod = Fraction(0, 1)
for k in range(ac):
prod += a[i][k] * b[k][j]
c[i][j] = prod
return c
def multiply_row_of_square_matrix(matrix: List[List], row: int, k: int) -> List[List]:
n = len(matrix)
identity = make_identity(n)
identity[row][row] = k
return multiply_matrices(identity, matrix)
def add_multiple_of_row_of_square_matrix(matrix: List[List], source_row: int, k: int, target_row: int):
"""
add k * source_row to target_row of matrix m
"""
n = len(matrix)
row_operator = make_identity(n)
row_operator[target_row][source_row] = k
return multiply_matrices(row_operator, matrix)
def invert_matrix(matrix: List[List]) -> List[List]:
n = len(matrix)
inverse = make_identity(n)
for col in range(n):
diagonal_row = col
k = Fraction(1, matrix[diagonal_row][col])
matrix = multiply_row_of_square_matrix(matrix, diagonal_row, k)
inverse = multiply_row_of_square_matrix(inverse, diagonal_row, k)
source_row = diagonal_row
for target_row in range(n):
if source_row != target_row:
k = -matrix[target_row][col]
matrix = add_multiple_of_row_of_square_matrix(matrix, source_row, k, target_row)
inverse = add_multiple_of_row_of_square_matrix(inverse, source_row, k, target_row)
return inverse
def lcm(a: int, b: int) -> int:
result = a * b // gcd(a, b)
return result
def lcm_for_arrays(args: List) -> int:
array_length = len(args)
if array_length <= 2:
return lcm(*args)
initial = lcm(args[0], args[1])
i = 2
while i < array_length:
initial = lcm(initial, args[i])
i += 1
return initial
def solution(m):
"""
For example matrix
transient_states = [0, 1]
absorbing_states = [2, 3, 4, 5]
"""
transient_states = []
absorbing_states = []
for i in range(len(m)):
row = m[i]
if sum(row) == 0:
absorbing_states.append(i)
else:
transient_states.append(i)
transform_matrix(m)
q = get_q_matrix(m, transient_states)
r = get_r_matrix(m, transient_states, absorbing_states)
identity = make_identity(len(q))
diff = subtract_matrices(identity, q)
inverse = invert_matrix(diff)
result = multiply_matrices(inverse, r)
print('initial result', result)
denominator = lcm_for_arrays([item.denominator for item in result[0]])
result = [item.numerator * denominator // item.denominator for item in result[0]]
result.append(denominator)
return result
if __name__ == "__main__":
m1 = [
[0, 1, 0, 0, 0, 1],
[4, 0, 0, 3, 2, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0]
]
print(solution(m1))
|
"""
Class for reading pre-processed OpenFOAM data in the PyTorch tensor format
Alternatively once can use foamReader for directly readin OpenFOAM output files.
However the latter is slower, thus pre-processing is encouraged.
===
Distributed by: Notre Dame CICS (MIT Liscense)
- Associated publication:
url: https://www.sciencedirect.com/science/article/pii/S0021999119300464
doi: https://doi.org/10.1016/j.jcp.2019.01.021
github: https://github.com/cics-nd/rans-uncertainty
===
"""
from utils.log import Log
import sys, random, re, os
import torch as th
# Default tensor type
dtype = th.DoubleTensor
class TorchReader():
"""
Utility for reading in pre-processed OpenFoam tensor files.
"""
def __init__(self):
self.lg = Log()
def loadTensor(self, fileName):
"""
Read in tensor
"""
try:
self.lg.log('Attempting to read file: '+str(fileName))
self.lg.log('Parsing file...')
t0 = th.load(fileName)
self.lg.success('Data field file successfully read.')
except OSError as err:
print("OS error: {0}".format(err))
return
except IOError as err:
print("File read error: {0}".format(err))
return
except:
print("Unexpected error:{0}".format(sys.exc_info()[0]))
return
return t0
def readScalarTh(self, timeStep, fieldName, dirPath = '.'):
data0 = self.loadTensor('{}/{}/{}-torch.th'.format(str(dirPath),str(timeStep),fieldName))
try:
data = data0.squeeze(1)
except:
data = data0
return data
def readVectorTh(self, timeStep, fieldName, dirPath = '.'):
return self.loadTensor('{}/{}/{}-torch.th'.format(str(dirPath),str(timeStep),fieldName)).type(dtype)
def readTensorTh(self, timeStep, fieldName, dirPath = '.'):
data0 = self.loadTensor('{}/{}/{}-torch.th'.format(str(dirPath),str(timeStep),fieldName)).type(dtype)
#Reshape into [nCells,3,3] Tensor
return data0.view(data0.size()[0],3,-1)
def readSymTensorTh(self, timeStep, fieldName, dirPath = '.'):
data0 = self.loadTensor('{}/{}/{}-torch.th'.format(str(dirPath),str(timeStep),fieldName)).type(dtype)
#Reshape into [nCells,3,3] Tensor
return data0.view(data0.size()[0],3,-1)
def readCellCenters(self, timeStep, dirPath='.'):
return self.loadTensor('{}/{}/cellCenters-torch.th'.format(str(dirPath),str(timeStep))).type(dtype) |
from basis_modules.helpers.testing import TestImporter
from basis_modules.modules import square
test_payments = TestImporter(
function_key="square.import_payments",
module=square,
# params={},
params_from_env={"access_token": "TEST_SQUARE_ACCESS_TOKEN"},
expected_records_cnt=100,
expected_records_field="total_money",
)
test_orders = TestImporter(
function_key="square.import_orders",
module=square,
# params={},
params_from_env={"access_token": "TEST_SQUARE_ACCESS_TOKEN"},
expected_records_cnt=1,
expected_records_field="id",
)
def test(interactive=False):
test_payments.run(interactive=interactive)
test_orders.run(interactive=interactive)
if __name__ == "__main__":
test(interactive=True)
|
import unittest
import pandas as pd
import os
import pytest
import numpy as np
from pandas.testing import assert_frame_equal
import src.join_df as join_df
class Test_join_df(unittest.TestCase):
def test_main(self):
emotion = pd.read_csv("test/csv_test/join_emotion_test.csv")
characters = pd.read_csv("test/csv_test/join_characters_test.csv")
res = join_df.main("", "", "test/csv_test/out.csv", emotion, characters)
out = pd.read_csv("test/csv_test/join_result_test.csv")
out.sort_index(axis=1) == res.sort_index(axis=1)
out = out[
[
"Unnamed: 0",
"bow_emotion",
"emotion",
"similarity_emotion",
"tb_emotion",
"text",
"characters",
]
]
# assert_frame_equal(res, out)
|
# Internal libraries
import os
import itertools
# Internal modules
from contact import Contact
from ticket import Ticket
# External libraries
from pymongo import MongoClient, errors
class Database(object):
"""docstring for Database."""
def __init__(self):
self.client = MongoClient(os.getenv('DB_HOST'), 27017)
self.db = self.client['db_freshdesk']
self.tickets = self.db.tickets
self.contacts = self.db.contacts
self._ticket = Ticket()
self._contact = Contact()
def insert_data(self, collection='contact'):
if not collection in ('ticket'):
raise ValueError('The collection must be ticket')
data = self._contact.data_contact()
db_collection = self.contacts
if collection == 'ticket':
data = self._ticket.data_tickets()
db_collection = self.tickets
for row in data:
try:
db_collection.insert_one(row)
except errors.DuplicateKeyError:
print("User already exists")
return True
def check_data(self, collection):
if not collection in ('ticket', 'contact'):
raise ValueError('The collection must be ticket or contact')
db_collection = self.db.tickets if collection == 'ticket' else self.db.contacts
return list(db_collection.find())
def update_date_big(self, collection):
key = 'updated_at'
try:
return collection.find(
{},{key: 1, '_id': 0}
).sort(key, -1).limit(1)[0][key]
except IndexError as ie:
return '1-1-1'
def update_data(self, collection):
updated_at_big = self.update_date_big(collection)
update_records_found = self._ticket.data_tickets(
f'&updated_since={updated_at_big}')
for update in itertools.islice(update_records_found, 1, None):
collection.update_one(
{'_id': update['_id']},
{'$set': update},
upsert=True
)
return update_records_found
def update_in_db(self):
self.update_data(self.db.tickets)
return True
|
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Monitors a directory tree for changes using mtime polling."""
import os
import threading
import warnings
class MtimeFileWatcher(object):
"""Monitors a directory tree for changes using mtime polling."""
def __init__(self, directory):
self._directory = directory
self._quit_event = threading.Event()
self._filename_to_mtime = None
self._has_changes = False
self._has_changes_lock = threading.Lock()
self._watcher_thread = threading.Thread(target=self._watch_changes)
self._watcher_thread.daemon = True
def start(self):
"""Start watching a directory for changes."""
self._watcher_thread.start()
def quit(self):
"""Stop watching a directory for changes."""
self._quit_event.set()
def has_changes(self):
"""Returns True if the watched directory has changed since the last call.
start() must be called before this method.
Returns:
Returns True if the watched directory has changed since the last call to
has_changes or, if has_changes has never been called, since start was
called.
"""
with self._has_changes_lock:
has_changes = self._has_changes
self._has_changes = False
return has_changes
def _watch_changes(self):
while not self._quit_event.wait(1):
self._check_for_changes()
def _check_for_changes(self):
if self._has_changed_paths():
with self._has_changes_lock:
self._has_changes = True
def _has_changed_paths(self):
self._filename_to_mtime, old_filename_to_mtime = (
self._generate_filename_to_mtime(), self._filename_to_mtime)
return (old_filename_to_mtime is not None and
self._filename_to_mtime != old_filename_to_mtime)
def _generate_filename_to_mtime(self):
filename_to_mtime = {}
num_files = 0
for dirname, dirnames, filenames in os.walk(self._directory,
followlinks=True):
for filename in filenames + dirnames:
if num_files == 10000:
warnings.warn(
'There are too many files in your application for '
'changes in all of them to be monitored. You may have to '
'restart the development server to see some changes to your '
'files.')
return filename_to_mtime
num_files += 1
path = os.path.join(dirname, filename)
try:
mtime = os.path.getmtime(path)
except (IOError, OSError):
pass
else:
filename_to_mtime[path] = mtime
return filename_to_mtime
|
from tractseg.config.PeakRegHP import HP as PeakRegHP
class HP(PeakRegHP):
DATASET = "HCP_32g"
RESOLUTION = "2.5mm"
FEATURES_FILENAME = "32g_25mm_peaks" |
from aiogram.dispatcher.filters.state import StatesGroup, State
class CheckoutState(StatesGroup):
check_cart = State()
name = State()
phone = State()
address = State()
confirm = State() |
# -*- coding: utf-8 -*-
import fileinput
from lxml import etree
import os
from geodata.address_formatting.formatter import AddressFormatter
#class AddressFormatter(object):
# CATEGORY = 'category'
# NEAR = 'near'
# ATTENTION = 'attention'
# CARE_OF = 'care_of'
# HOUSE = 'house'
# HOUSE_NUMBER = 'house_number'
# PO_BOX = 'po_box'
# ROAD = 'road'
# BUILDING = 'building'
# ENTRANCE = 'entrance'
# STAIRCASE = 'staircase'
# LEVEL = 'level'
# UNIT = 'unit'
# INTERSECTION = 'intersection'
# SUBDIVISION = 'subdivision'
# METRO_STATION = 'metro_station'
# SUBURB = 'suburb'
# CITY_DISTRICT = 'city_district'
# CITY = 'city'
# ISLAND = 'island'
# STATE = 'state'
# STATE_DISTRICT = 'state_district'
# POSTCODE = 'postcode'
# COUNTRY_REGION = 'country_region'
# COUNTRY = 'country'
# WORLD_REGION = 'world_region'
def license_xml_gz_reader(gz_filename):
with fileinput.hook_compressed(gz_filename, "rb") as fi:
parser = etree.iterparse(fi, events=("start", "end"))
match_tag = license_xml_gz_header()
is_address_place_begin = False
for (event, elem) in parser:
#print(event, elem.tag)
if event == 'start' and elem.tag == 'address_place':
is_address_place_begin = True
record = {}
continue
if event == 'end' and elem.tag == 'address_place':
is_address_place_begin = False
empty = len(record)
for val in record.values():
if val == None:
empty -= 1
if empty > 0:
yield record
continue
if event == 'end':
continue
if is_address_place_begin and elem.tag in match_tag:
idx = match_tag.index(elem.tag)
record[idx] = elem.text
continue
"""
http://lxml.de/parsing.html#modifying-the-tree
Based on Liza Daly's fast_iter
http://www.ibm.com/developerworks/xml/library/x-hiperfparse/
See also http://effbot.org/zone/element-iterparse.htm
"""
elem.clear()
# Also eliminate now-empty references from the root node to elem
for ancestor in elem.xpath('ancestor-or-self::*'):
while ancestor.getprevious() is not None:
del ancestor.getparent()[0]
def license_xml_gz_header():
return ["index", "region", "city", "street"] |
# Copyright 2019 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import storage_disable_uniform_bucket_level_access
import storage_enable_uniform_bucket_level_access
import storage_get_uniform_bucket_level_access
def test_get_uniform_bucket_level_access(bucket, capsys):
storage_get_uniform_bucket_level_access.get_uniform_bucket_level_access(
bucket.name
)
out, _ = capsys.readouterr()
assert (
"Uniform bucket-level access is disabled for {}.".format(bucket.name)
in out
)
def test_enable_uniform_bucket_level_access(bucket, capsys):
short_name = storage_enable_uniform_bucket_level_access
short_name.enable_uniform_bucket_level_access(
bucket.name
)
out, _ = capsys.readouterr()
assert (
"Uniform bucket-level access was enabled for {}.".format(bucket.name)
in out
)
def test_disable_uniform_bucket_level_access(bucket, capsys):
short_name = storage_disable_uniform_bucket_level_access
short_name.disable_uniform_bucket_level_access(
bucket.name
)
out, _ = capsys.readouterr()
assert (
"Uniform bucket-level access was disabled for {}.".format(bucket.name)
in out
)
|
import json
import requests
from loguru import logger
import time
import re
import execjs
import base64
import os
import pandas as pd
from urllib.parse import urljoin
import urllib.parse
import random
# 随机延时 0~y 秒
def delay_0_y_s(random_delay_num):
y = float(random_delay_num)
time.sleep(random.random() * y)
sess = requests.Session()
HEADERS = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/94.0.4606.81 Safari/537.36 Edg/94.0.992.50',
"Content-Type": "application/x-www-form-urlencoded; charset=UTF-8",
"Origin": "https://www.aqistudy.cn"
}
def getJS():
# 请求首页,
r = sess.get('https://www.aqistudy.cn/historydata/daydata.php?city=%E6%9D%AD%E5%B7%9E&month=202110',
headers=HEADERS)
logger.info(f'首页的状态码:{r.status_code}')
text = r.text
# 获取首页加载js的名字
url = re.findall('<script type="text/javascript" src="(.*?)"></script>', text)[1]
t = int(time.time())
params = (
('v', str(t)),
)
# 请求获取js代码
response = sess.get(f'https://www.aqistudy.cn/historydata/{url}', headers=HEADERS,
params=params)
logger.info(f"请求js的状态码:{response.status_code}")
eval_text = re.search(r'eval\((.*)\)', response.text).group(1) # 返回一段js函数,去掉eval
eval_text = execjs.eval(eval_text) # 执行js获得base64加密后的js
num = eval_text.count('dweklxde') # 计算进行了几次base64,有0,1,2,三种情况
logger.info('num={}'.format(num))
if num != 0: # 如果有进行base64加密
eval_text = re.search(r"'(.*?)'", eval_text).group(1) # 取出base64加密后的代码解密
for i in range(num):
eval_text = base64.b64decode(eval_text.encode()).decode()
enc_func_name = re.search('if \(!(.*?)\)(.*?){(.*?)var (.*?)=(.*?)\((.*?)ajax', eval_text, re.S).group(
5).strip() # 获取加密函数的函数名
dec_func_name = re.search('success: function \((.*?)\)(.*?)= (.*?)\(', eval_text, re.S).group(3) # 解密函数的函数名
data_name = re.search('data: {(.*?):(.*?)}', eval_text).group(1).strip() # post请求时请求的参数名
else: # 生成的js没有加密
enc_func_name = re.search('if\(!(.*?)\)\{var (.*?)=(.*?)\((.*?)ajax', eval_text).group(3).strip()
dec_func_name = func_name = re.search('success:function\((.*?)\)(.*?)=(.*?)\(', eval_text).group(3)
data_name = re.search('data:{(.*?):(.*?)}', eval_text).group(1).strip()
logger.info("data_name={}".format(data_name))
logger.info("enc_func_name={}".format(enc_func_name))
logger.info("dec_func_name={}".format(dec_func_name))
# logger.info("eval_text={}".format(eval_text))
return data_name, enc_func_name, dec_func_name, eval_text
def getParames(func_name, text, query):
# 根据解密出来的js与扣出来的静态js拼接执行
node = execjs.get()
with open('./1.js', 'r', encoding='utf-8') as f:
buf = f.read() + text
with open('tmp.js', 'w', encoding='utf-8') as jsf:
jsf.write(buf)
ctx = node.compile(buf)
# print('call=', func_name, query);
sign = ctx.call(func_name, 'GETDAYDATA', query)
# logger.info(sign)
return sign, ctx
def decrypt(data, dec_func_name, ctx):
# 解密请求的数据
data = ctx.call(dec_func_name, data)
logger.info(data)
return data
def getEncryptData(data_name, sign):
# 请求api获取加密的数据
data = {}
data[data_name] = sign
response = sess.post('https://www.aqistudy.cn/historydata/api/historyapi.php', headers=HEADERS, data=data)
logger.info(f"请求数据的状态码:{response.status_code}")
# logger.info(response.text)
return response.text, response.status_code
def get_year_months(start_year, start_month, end_year, end_month):
start_year, start_month, end_year, end_month = [int(i) for i in [start_year, start_month, end_year, end_month]]
year_months = []
if start_year < end_year:
for year in range(start_year, end_year + 1):
if year == start_year:
if start_month > 12 or start_month < 1:
raise ValueError
else:
for month in range(start_month, 13):
year_months.append(year * 100 + month)
elif year == end_year:
if end_month > 12 or end_month < 1:
raise ValueError
else:
for month in range(1, end_month + 1):
year_months.append(year * 100 + month)
else:
for month in range(1, 13):
year_months.append(year * 100 + month)
elif start_year == end_year:
if start_month <= end_month:
for month in range(start_month, end_month + 1):
year_months.append(start_year * 100 + month)
return year_months
if __name__ == '__main__':
city_set = ['郑州', '开封', '洛阳', '平顶山', '安阳', '鹤壁', '新乡', '焦作', '濮阳', '许昌', '漯河', '三门峡', '南阳', '商丘', '周口', '驻马店', '济源']
year_months = get_year_months(2014, 1, 2021, 10) # 包括最后年的最后月
os.environ["EXECJS_RUNTIME"] = 'Node'
if execjs.get().name != 'Node.js (V8)':
logger.error('未能获取到node js')
exit(-1)
logger.info(execjs.get().name)
rps_code = 0
data_name = None
enc_func_name = None
dec_func_name = None
text = None
dataFrame = None
for k in range(0, len(city_set)):
city_chinese_name = city_set[k]
# 将城市中文名进行URL编码
city_name = urllib.parse.quote(city_chinese_name)
# print(city_name)
city_name = urllib.parse.unquote(city_name)
# print(city_name)
dataFrame = None
for year_month in year_months:
delay_0_y_s(2)
# 请求数据
while 1:
logger.info(f'开启请求数据:{city_chinese_name}{year_month}')
try:
if rps_code == 0:
data_name, enc_func_name, dec_func_name, text = getJS()
query = {'city': city_chinese_name, 'month': str(year_month)} # 查询参数
sign, ctx = getParames(enc_func_name, text, query)
data, rps_code = getEncryptData(data_name, sign)
aqi_rsp_data = decrypt(data, dec_func_name, ctx)
except:
logger.warning('except retry!!')
rps_code = 0
else:
if rps_code == 200:
break
# 处理数据
dataFrame_tmp = pd.json_normalize(json.loads(aqi_rsp_data), record_path=['result', 'data', 'items'])
dataFrame_tmp.set_index('time_point', inplace=True)
# 增加数据
if dataFrame is None:
dataFrame = pd.DataFrame(dataFrame_tmp)
else:
dataFrame = pd.concat([dataFrame, dataFrame_tmp], axis=0)
# 保存数据
dataFrame_test = pd.DataFrame(dataFrame)
write = pd.ExcelWriter(f'{city_chinese_name}.xlsx')
dataFrame_test.to_excel(write)
logger.info(f'保存为:{city_chinese_name}.xlsx')
write.save()
|
# __all__ = [
# "ClinicalData",
# "ClinicalYesNoEnum",
# "DiseaseStageEnum",
# "DiseaseGradeEnum",
# "EcogPsEnum",
# "EthnicityEnum",
# "GenderEnum",
# "ModAnnArborEnum",
# "MostRecentTreatmentEnum",
# "PriorTherapyTypeEnum",
# "RaceEnum",
# "RecentTreatmentResponseEnum",
# ]
# from sqlalchemy import (
# CheckConstraint,
# Column,
# Enum,
# ForeignKeyConstraint,
# Integer,
# Numeric,
# String,
# )
# from .model_core import MetadataModel
# from .trial_metadata import Participant
# RaceEnum = Enum(
# "White",
# "American Indian or Alaska Native",
# "Black or African American",
# "Asian",
# "Native Hawaiian or Other Pacific Islander",
# "Other",
# "Unknown",
# "Not reported",
# "Not allowed to collect",
# name="race_enum",
# )
# GenderEnum = Enum(
# "Female", "Male", "Unknown", "Unspecified", "Not reported", name="gender_enum",
# )
# EthnicityEnum = Enum(
# "Hispanic or Latino",
# "Not Hispanic or Latino",
# "Unknown",
# "Not reported",
# "Not allowed to collect",
# name="ethnicity_enum",
# )
# ClinicalYesNoEnum = Enum(
# "Yes", "No", "Unknown", "Not applicable", name="clinical_yes_no_enum",
# )
# PriorTherapyTypeEnum = Enum(
# "Anti-Retroviral Therapy",
# "Antisense",
# "Bone Marrow Transplant",
# "Chemotherapy not otherwise specified (NOS)",
# "Chemotherapy multiple agents systemic",
# "Chemotherapy non-cytotoxic",
# "Chemotherapy single agent systemic",
# "Drug and/or immunotherapy",
# "Gene Transfer",
# "Hematopoietic stem cell",
# "Hormonal Therapy",
# "Image Directed local therapy",
# "No prior therapy",
# "Oncolytic virotherapy",
# "Prior therapy not otherwise specified (NOS)",
# "Radiation Therapy",
# "Surgery",
# "Therapy (NOS)",
# "Vaccine",
# name="prior_therapy_type_enum",
# )
# ModAnnArborEnum = Enum(
# "I", "II", "III", "IV", "Unknown", "Not Applicable", name="mod_ann_arbor_enum",
# )
# EcogPsEnum = Enum(
# "0",
# "1",
# "2",
# "3",
# "4",
# "5",
# "Data Missing",
# "Unknown",
# "Not Applicable",
# "Not Reported",
# name="ecog_ps_enum",
# )
# MostRecentTreatmentEnum = Enum(
# "ABVD",
# "BEACOPP",
# "MOPP",
# "ICE",
# "Gemcitabine",
# "Vinblastine",
# "Bendamustine",
# "Brentuximab Vedotin",
# "Lenalidomide (Revlimid)",
# "RAD001 (Everolimus)",
# "Rituximab",
# "Autologous Stem Cell Transplant",
# "Allogeneic Stem Cell Transplant",
# "Other - Experimental Targeted",
# "Therapy",
# "Other - Immunotherapy",
# "Other - not specified",
# name="most_recent_treatment_enum",
# )
# RecentTreatmentResponseEnum = Enum(
# "Complete Remission",
# "Partial Remission",
# "Stable Disease",
# "Progressive Disease",
# "Unevaluable",
# "Not Reported",
# "Unknown",
# name="recent_treatment_response_enum",
# )
# DiseaseStageEnum = Enum(
# "0",
# "0a",
# "0is",
# "I",
# "IA",
# "IA1",
# "IA2",
# "IB",
# "IB1",
# "IB2",
# "IC",
# "II",
# "IIA",
# "IIA1",
# "IIA2",
# "IIB",
# "IIC",
# "IIC1",
# "III",
# "IIIA",
# "IIIB",
# "IIIC",
# "IIIC1",
# "IIIC2",
# "IS",
# "IV",
# "IVA",
# "IVB",
# "IVC",
# "Tis",
# "X",
# "Unknown",
# "Not Reported",
# name="disease_stage_enum",
# )
# DiseaseGradeEnum = Enum(
# "G1",
# "G2",
# "G3",
# "G4",
# "GX",
# "GB",
# "High Grade",
# "Low Grade",
# "Unknown",
# "Not Reported",
# "Not Applicable",
# "Data Missing",
# name="disease_grade_enum",
# )
# class ClinicalData(MetadataModel):
# __tablename__ = "clinical_data"
# trial_id = Column(String, primary_key=True)
# cimac_participant_id = Column(String, primary_key=True)
# __table_args__ = (
# ForeignKeyConstraint(
# [trial_id, cimac_participant_id],
# [Participant.trial_id, Participant.cimac_participant_id],
# ),
# )
# # demographics
# race = Column(
# RaceEnum,
# doc="An arbitrary classification based on physical characteristics; a group of persons related by common descent or heredity (U.S. Center for Disease Control).",
# )
# gender = Column(GenderEnum, doc="Sex of the participant.",)
# ethnicity = Column(EthnicityEnum, doc="Ethnicity of the participant.",)
# age = Column(
# Numeric,
# CheckConstraint("age >= 0"),
# doc="The age of the subject expressed in years",
# )
# # history
# prior_surgery = Column(
# ClinicalYesNoEnum,
# doc="An indication whether or not there was any surgery history to report.",
# )
# prior_radiation_therapy = Column(
# ClinicalYesNoEnum,
# doc="An indication whether or not there was any radiation therapy history to report.",
# )
# prior_immunotherapy = Column(
# ClinicalYesNoEnum,
# doc="An indication whether or not there was any immunotherapy history to report.",
# )
# number_prior_systemic_treatments = Column(
# Integer,
# CheckConstraint("number_prior_systemic_treatments >= 0"),
# doc="Total number of patient's prior systemic treatments, if any.",
# )
# prior_therapy_type = Column(
# PriorTherapyTypeEnum,
# doc="Text term that describes the kind of treatment administered.",
# )
# # disease & baseline
# mod_ann_arbor_stage = Column(
# ModAnnArborEnum,
# doc="Text term that represents the clinical stage for lymphoma using the Ann Arbor Lymphoma Staging System.",
# )
# ecog_ps = Column(
# EcogPsEnum,
# doc="The ECOG functional performance status of the patient/participant.",
# )
# years_from_initial_diagnosis = Column(
# Numeric,
# CheckConstraint("years_from_initial_diagnosis >= 0"),
# doc="Time at enrollment since initial diagnosis, in years.",
# )
# type_of_most_recent_treatment = Column(
# MostRecentTreatmentEnum,
# doc="Type of most recent treatment administered to the patient.",
# )
# response_to_most_recent_treatment = Column(
# RecentTreatmentResponseEnum,
# doc="Type of most recent treatment administered to the patient.",
# )
# duration_of_remission = Column(
# Numeric,
# CheckConstraint("duration_of_remission >= 0"),
# doc="Duration of remission, in months.",
# )
# years_from_recent_treatment = Column(
# Numeric,
# CheckConstraint("years_from_recent_treatment >= 0"),
# doc="Time at enrollment since most recent treatment, in years.",
# )
# disease_stage = Column(
# DiseaseStageEnum,
# doc="Neoplasm American Joint Committee on Cancer Clinical Group Stage. Stage group determined from clinical information on the tumor (T), regional node (N) and metastases (M) and by grouping cases with similar prognosis for cancer.",
# )
# disease_grade = Column(
# DiseaseGradeEnum,
# doc="Numeric value to express the degree of abnormality of cancer cells, a measure of differentiation and aggressiveness.",
# )
|
import pytest
from pyomrx.utils.cv2_utils import *
from pyomrx.core.circle import Circle
@pytest.fixture
def empty_circle(res_folder):
image_path = str(Path(res_folder) / 'empty_circle.png')
image = load_and_check_image(image_path)
return Circle(image, 6)
def test_empty_circle_is_not_filled(empty_circle):
assert not empty_circle.is_filled
@pytest.fixture
def filled_circle(res_folder):
image_path = str(Path(res_folder) / 'filled_circle.png')
image = load_and_check_image(image_path)
return Circle(image, 6)
def test_filled_circle_is_filled(filled_circle):
assert filled_circle.is_filled
if __name__ == '__main__':
pytest.main(['-k', 'test_circle', '-svv'])
|
from typing import List, Optional, Iterable, Any
class SerializerFieldGenerator:
# Default django serializer field names.
UUID_SERIALIZER_FIELD_NAME = 'serializers.UUIDField'
DATE_SERIALIZER_FIELD_NAME = 'serializers.DateField'
DATETIME_SERIALIZER_FIELD_NAME = 'serializers.DateTimeField'
FOREIGN_KEY_SERIALIZER_FIELD_NAME = 'LazyField'
INTEGER_SERIALIZER_FIELD_NAME = 'serializers.IntegerField'
JSON_SERIALIZER_FIELD_NAME = 'serializers.JSONField'
CHAR_SERIALIZER_FIELD_NAME = 'serializers.CharField'
BOOLEAN_SERIALIZER_FIELD_NAME = 'serializers.BooleanField'
DECIMAL_SERIALIZER_FIELD_NAME = 'serializers.DecimalField'
CHOICES_SERIALIZER_FIELD_NAME = 'serializers.ChoiceField'
# Additional options.
DEFAULT_SERIALIZER_FIELD_NAME = JSON_SERIALIZER_FIELD_NAME
DEFAULT_CHAR_SERIALIZER_FIELD_MAX_LENGTH = 255
DEFAULT_DECIMAL_SERIALIZER_FIELD_DECIMAL_PLACES = 2
DEFAULT_DECIMAL_SERIALIZER_FIELD_MAX_DIGITS = 10
# Schema and django serializer field name mapping.
STRING_TYPE = 'string'
DECIMAL_TYPE = 'number'
INTEGER_TYPE = 'integer'
BOOLEAN_TYPE = 'boolean'
# Schema and django serializer strings` field name mapping.
STRING_FORMAT_TO_FIELD_NAME = {
'uuid': UUID_SERIALIZER_FIELD_NAME,
'date': DATE_SERIALIZER_FIELD_NAME,
'date-time': DATETIME_SERIALIZER_FIELD_NAME,
}
EXCLUDED_FIELD_NAMES = []
@staticmethod
def _to_python_kwargs(val: dict) -> str:
"""
>>> SerializerFieldGenerator._to_python_kwargs({'foo': 1})
'foo=1'
>>> SerializerFieldGenerator._to_python_kwargs({'foo': 1, 'b': 'xx'})
'foo=1, b=xx'
>>> SerializerFieldGenerator._to_python_kwargs({})
''
"""
return ', '.join([f"{k}={v}" for k, v in val.items()])
@staticmethod
def get_field_ref(schema: dict) -> Optional[str]:
if '$ref' in schema:
return schema['$ref']
try:
return next(
item['$ref']
for keyword in ['oneOf', 'anyOf', 'allOf'] if
keyword in schema
for item in schema[keyword] if '$ref' in item)
except StopIteration:
return None
def _get_field_name_from_format(self, field_format: str) -> str:
""" Get django serializer field name
according to a schema string field format"""
return self.STRING_FORMAT_TO_FIELD_NAME.get(
field_format, self.CHAR_SERIALIZER_FIELD_NAME,
)
def _get_field_args(self, schema: dict, field_name: str) -> List[str]:
"""Get django ref serializer names"""
if field_name == self.FOREIGN_KEY_SERIALIZER_FIELD_NAME \
and self.get_field_ref(schema):
ref = self.get_field_ref(schema)
serializer_ref = ref.split('/')[-1]
try:
domain = next(
schema.get(key)[0].get('x-domain')
for key in ('oneOf', 'anyOf', 'allOf')
if schema.get(key))
except StopIteration:
domain = None
if domain:
return [f"'{domain.lower()}.{serializer_ref}'"]
return [f"'{serializer_ref}'"]
return []
def _str_to_python(self, item: Any):
mapping = {
'true': True,
'false': False,
'none': None,
}
if isinstance(item, str) and item.lower() in mapping:
return mapping.get(item.lower())
return item
def _to_python_value(self, iterable: Iterable):
return list(map(self._str_to_python, iterable))
def _get_field_kwargs( # noqa: too-complex
self, schema: dict, serializer_field_name: str,
name: str, serializer_name: str, required: List[str],
) -> dict:
"""Get django serializer field kwargs options"""
kwargs = {
'label': repr(schema.get('title', '')),
'help_text': repr(schema.get('description', '')),
'allow_blank': True,
# TODO: need to review.
# 'allow_null': schema.get('nullable', False),
'allow_null': True,
}
if schema.get('enum'):
enums = self._to_python_value(schema.get('enum'))
kwargs['choices'] = (repr(tuple(zip(enums, enums)))
if not schema.get('x-enumNames')
else repr(tuple(zip(enums, schema
.get('x-enumNames')))))
if name == 'guid':
kwargs.pop('allow_null', None)
kwargs.pop('allow_blank', None)
kwargs['read_only'] = True
if serializer_field_name == self.DECIMAL_SERIALIZER_FIELD_NAME:
kwargs[
'max_digits'] = \
self.DEFAULT_DECIMAL_SERIALIZER_FIELD_MAX_DIGITS
kwargs['decimal_places'] = (
self.DEFAULT_DECIMAL_SERIALIZER_FIELD_DECIMAL_PLACES)
kwargs.pop('allow_blank', None)
if serializer_field_name == self.BOOLEAN_SERIALIZER_FIELD_NAME:
kwargs.pop('allow_blank', None)
if serializer_field_name == self.INTEGER_SERIALIZER_FIELD_NAME:
kwargs.pop('allow_blank', None)
if serializer_field_name == self.UUID_SERIALIZER_FIELD_NAME:
kwargs.pop('allow_blank', None)
if serializer_field_name == self.CHAR_SERIALIZER_FIELD_NAME:
kwargs['max_length'] = schema.get(
'maxLength', self.DEFAULT_CHAR_SERIALIZER_FIELD_MAX_LENGTH)
elif serializer_field_name in \
self.STRING_FORMAT_TO_FIELD_NAME.values():
# TODO: need to review.
# kwargs.pop('allow_null', None)
kwargs.pop('allow_blank', None)
elif serializer_field_name == self.FOREIGN_KEY_SERIALIZER_FIELD_NAME:
enum = schema.get('properties', {}).get('type', {}).get('enum',
[])
if enum and isinstance(enum, list):
kwargs['path'] = repr(f'Base{enum[0]}Serializer')
kwargs.pop('allow_blank', None)
elif serializer_field_name == self.JSON_SERIALIZER_FIELD_NAME:
items = schema.get('items')
if items and isinstance(items, dict) and items.get('title'):
kwargs['help_text'] = repr(items.get('title'))
kwargs.pop('allow_null', None)
kwargs.pop('allow_blank', None)
kwargs['default'] = 'dict'
return kwargs
def _construct_field_definition(
self, serializer_field_name, field_args, field_kwargs,
) -> str:
args = ''
if field_args:
args = ", ".join(field_args) + ', '
kwargs = self._to_python_kwargs(field_kwargs)
return f"{serializer_field_name}({args}{kwargs})"
def get_serializer_field_name(self, schema: dict) -> str:
"""Returns django field name by the schema key"""
property_type = schema.get('type')
refs = self.get_field_ref(schema)
if refs or (property_type == "object"
and schema.get('properties', {}).get('type', {}).get(
'enum', [])):
serializer_field_name = self.FOREIGN_KEY_SERIALIZER_FIELD_NAME
elif schema.get('enum'):
serializer_field_name = self.CHOICES_SERIALIZER_FIELD_NAME
elif property_type == self.STRING_TYPE:
field_format = schema.get('format', 'unknown')
serializer_field_name = self._get_field_name_from_format(
field_format)
elif property_type == self.INTEGER_TYPE:
serializer_field_name = self.INTEGER_SERIALIZER_FIELD_NAME
elif property_type == self.BOOLEAN_TYPE:
serializer_field_name = self.BOOLEAN_SERIALIZER_FIELD_NAME
elif property_type == self.DECIMAL_TYPE:
serializer_field_name = self.DECIMAL_SERIALIZER_FIELD_NAME
else:
serializer_field_name = self.DEFAULT_SERIALIZER_FIELD_NAME
return serializer_field_name
def __call__(self, schema: dict, name: str, serializer_name: str,
required: List[str]):
serializer_field_name = self.get_serializer_field_name(schema)
args = self._get_field_args(schema, serializer_field_name)
kwargs = self._get_field_kwargs(
schema, serializer_field_name, name, serializer_name, required)
return self._construct_field_definition(serializer_field_name, args,
kwargs)
|
import json
from dataclasses import dataclass
from typing import Optional
from konduto.api.resources.konduto_order_status import KondutoOrderStatus
@dataclass
class KondutoOrderStatusRequest:
status: KondutoOrderStatus
comments: Optional[str] = None
@property
def json(self) -> str:
return json.dumps(dict(status=self.status.value, comments=self.comments or ""))
|
import re
import json
import logging
from cassandra.cluster import Cluster
from flask import Flask, render_template, url_for, request, redirect, session, Response, jsonify
app = Flask(__name__)
app.secret_key = 'walnutfish774'
@app.route('/', methods=['GET', 'POST'])
def index():
if request.method == "POST":
if request.form.get('loginButton') == 'Login':
return render_template('login.html')
elif request.form.get('registerButton') == 'Register':
return render_template('register.html')
else:
return render_template('index.html')
else:
return render_template('index.html')
##################### LOGIN PAGE ROUTE #####################
@app.route('/login', methods=['GET', 'POST'])
def login():
if request.method == "POST":
if request.form.get('homeButton') == 'backToHome':
return render_template('index.html')
elif request.form.get('loginButtonFinal') == 'loginButton':
uname_email = request.form['uname_email']
password = request.form['password']
cluster = Cluster(['192.168.1.236'], port=9042)
keyspace_connect = cluster.connect('accounts')
rows_username_status = keyspace_connect.execute("select status from loginusername where username='" + uname_email + "'")
row_status_username = []
for row in rows_username_status:
row_status_username.append(row.status)
keyspace_connect.shutdown()
cluster.shutdown()
if len(row_status_username) == 0:
cluster = Cluster(['192.168.1.236'], port=9042)
keyspace_connect = cluster.connect('accounts')
rows_email_status = keyspace_connect.execute("select status from loginemail where email='" + uname_email + "'")
keyspace_connect.shutdown()
cluster.shutdown()
row_status_email = []
for row in rows_email_status:
row_status_email.append(row.status)
if len(row_status_email) == 0:
msg = "No Such Username or Email in Records"
return render_template('login.html', msg=msg)
elif len(row_status_email) > 0:
row_status_email = jsonify(row_status_email)
row_status_email = str(row_status_email.data)
row_status_email = row_status_email[8:]
row_status_email = row_status_email[:-7]
if row_status_email == 'inactive':
msg = "Email Depreciated"
return render_template('login.html', msg=msg)
elif row_status_email == 'active':
cluster = Cluster(['192.168.1.236'], port=9042)
keyspace_connect = cluster.connect('accounts')
rows_email_password = keyspace_connect.execute("select password from loginemail where email='" + uname_email + "'")
keyspace_connect.shutdown()
cluster.shutdown()
row_password_email = []
for row in rows_email_password:
row_password_email.append(row.password)
row_password_email = jsonify(row_password_email)
row_password_email = str(row_password_email.data)
row_password_email = row_password_email[8:]
row_password_email = row_password_email[:-7]
if password == row_password_email:
cluster = Cluster(['192.168.1.236'], port=9042)
keyspace_connect = cluster.connect('accounts')
rows_email_password = keyspace_connect.execute("select * from loginemail where email='" + uname_email + "'")
keyspace_connect.shutdown()
cluster.shutdown()
row_email_email = []
row_email_username = []
for row in rows_email_password:
row_email_email.append(row.email)
row_email_username.append(row.username)
row_email_email = jsonify(row_email_email)
row_email_email = str(row_email_email.data)
row_email_email = row_email_email[8:]
row_email_email = row_email_email[:-7]
row_email_username = jsonify(row_email_username)
row_email_username = str(row_email_username.data)
row_email_username = row_email_username[8:]
row_email_username = row_email_username[:-7]
session['loggedin'] = True
session['id'] = row_email_email
session['username'] = row_email_username
return redirect(url_for('tutorial'))
#return render_template('tutorial.html', uname_email=uname_email, password=password)
#Start Session
else:
msg = "Incorrect Password"
return render_template('login.html', msg=msg)
elif len(row_status_username) > 0:
row_status_username = jsonify(row_status_username)
row_status_username = str(row_status_username.data)
row_status_username = row_status_username[8:]
row_status_username = row_status_username[:-7]
if row_status_username == 'inactive':
msg = "Username Depreciated"
return render_template('login.html', msg=msg)
elif row_status_username == 'active':
cluster = Cluster(['192.168.1.236'], port=9042)
keyspace_connect = cluster.connect('accounts')
rows_username_password = keyspace_connect.execute("select password from loginusername where username='" + uname_email + "'")
keyspace_connect.shutdown()
cluster.shutdown()
row_password_username = []
for row in rows_username_password:
row_password_username.append(row.password)
row_password_username = jsonify(row_password_username)
row_password_username = str(row_password_username.data)
row_password_username = row_password_username[8:]
row_password_username = row_password_username[:-7]
if password == row_password_username:
cluster = Cluster(['192.168.1.236'], port=9042)
keyspace_connect = cluster.connect('accounts')
rows_username_password = keyspace_connect.execute("select * from loginusername where username='" + uname_email + "'")
keyspace_connect.shutdown()
cluster.shutdown()
row_username_email = []
row_username_username = []
for row in rows_username_password:
row_username_email.append(row.email)
row_username_username.append(row.username)
row_username_email = jsonify(row_username_email)
row_username_email = str(row_username_email.data)
row_username_email = row_username_email[8:]
row_username_email = row_username_email[:-7]
row_username_username = jsonify(row_username_username)
row_username_username = str(row_username_username.data)
row_username_username = row_username_username[8:]
row_username_username = row_username_username[:-7]
session['loggedin'] = True
session['id'] = row_username_email
session['username'] = row_username_username
return redirect(url_for('tutorial'))
else:
msg = "Incorrect Password"
return render_template('login.html', msg=msg)
else:
return render_template('login.html')
else:
return render_template('login.html')
####################### REGISTER PAGE ROUTE ###################
@app.route('/register', methods=['GET', 'POST'])
def register():
if request.method == "POST":
if request.form.get('homeButton') == 'backToHome':
return render_template('index.html')
else:
return render_template('register.html')
else:
return render_template('register.html')
###################### TUTORIAL PAGE ROUTE ####################
@app.route('/tutorial', methods=['GET', 'POST'])
def tutorial():
if 'loggedin' in session:
cluster = Cluster(['192.168.1.236'], port=9042)
keyspace_connect = cluster.connect('accounts')
rows_email_data = keyspace_connect.execute("select * from loginemail where email='" + session['id'] + "'")
keyspace_connect.shutdown()
cluster.shutdown()
row_email_username2 = []
for row in rows_email_data:
row_email_username2.append(row.username)
row_email_username2 = jsonify(row_email_username2)
row_email_username2 = str(row_email_username2.data)
row_email_username2 = row_email_username2[8:]
row_email_username2 = row_email_username2[:-7]
return render_template('tutorial.html', row_email_username2=row_email_username2)
if request.method == "POST":
if request.form.get('logoutButton') == 'logout':
session.pop('loggedin', None)
session.pop('id', None)
session.pop('username', None)
return redirect(url_for('/'))
#change this to logout session later
elif request.form.get('tutorialButton') == 'tutorial':
if 'loggedin' in session:
cluster = Cluster(['192.168.1.236'], port=9042)
keyspace_connect = cluster.connect('accounts')
rows_email_data2 = keyspace_connect.execute("select * from loginemail where email='" + session['id'] + "'")
keyspace_connect.shutdown()
cluster.shutdown()
row_email_username3 = []
for row in rows_email_data2:
row_email_username3.append(row.username)
row_email_username3 = jsonify(row_email_username3)
row_email_username3 = str(row_email_username3.data)
row_email_username3 = row_email_username3[8:]
row_email_username3 = row_email_username3[:-7]
return render_template('tutorial.html', row_email_username3=row_email_username3)
elif request.form.get('addRecordButton') == 'addRecord':
return render_template('addRecords.html')
elif request.form.get('checkRecordButton') == 'checkRecord':
return render_template('checkRecords.html')
elif request.form.get('dataPolicyButton') == 'dataPolicy':
return render_template('dataPolicy.html')
elif request.form.get('profileButton') == 'profile':
return render_template('profile.html')
else:
return redirect(url_for('tutorial'))
else:
return redirect(url_for('tutorial'))
else:
return redirect(url_for('/'))
@app.route('/addrecords', methods=['GET', 'POST'])
def addrecord():
if request.method == "POST":
if request.form.get('logoutButton') == 'logout':
return render_template('index.html')
#change this to logout session later
elif request.form.get('tutorialButton') == 'tutorial':
return render_template('tutorial.html')
elif request.form.get('addRecordButton') == 'addRecord':
return render_template('addrecords.html')
elif request.form.get('checkRecordButton') == 'checkRecord':
return render_template('checkrecords.html')
elif request.form.get('dataPolicyButton') == 'dataPolicy':
return render_template('datapolicy.html')
elif request.form.get('profileButton') == 'profile':
return render_template('profile.html')
#All other Elif Statements with Form Validation and Cassandra Input
else:
return render_template('addrecords.html')
else:
return render_template('addrecords.html')
@app.route('/checkrecords', methods=['GET', 'POST'])
def checkrecord():
if request.method == "POST":
if request.form.get('logoutButton') == 'logout':
return render_template('index.html')
#change this to logout session later
elif request.form.get('tutorialButton') == 'tutorial':
return render_template('tutorial.html')
elif request.form.get('addRecordButton') == 'addRecord':
return render_template('addrecords.html')
elif request.form.get('checkRecordButton') == 'checkRecord':
return render_template('checkrecords.html')
elif request.form.get('dataPolicyButton') == 'dataPolicy':
return render_template('datapolicy.html')
elif request.form.get('profileButton') == 'profile':
return render_template('profile.html')
#All other Elif Statements with Form Validation and Cassandra Input
else:
return render_template('addrecords.html')
else:
return render_template('addrecords.html')
@app.route('/datapolicy', methods=['GET', 'POST'])
def datapolicy():
if request.method == "POST":
if request.form.get('logoutButton') == 'logout':
return render_template('index.html')
#change this to logout session later
elif request.form.get('tutorialButton') == 'tutorial':
return render_template('tutorial.html')
elif request.form.get('addRecordButton') == 'addRecord':
return render_template('addrecords.html')
elif request.form.get('checkRecordButton') == 'checkRecord':
return render_template('checkrecords.html')
elif request.form.get('dataPolicyButton') == 'dataPolicy':
return render_template('datapolicy.html')
elif request.form.get('profileButton') == 'profile':
return render_template('profile.html')
#All other Elif Statements with Form Validation and Cassandra Input
else:
return render_template('addrecords.html')
else:
return render_template('addrecords.html')
@app.route('/profile', methods=['GET', 'POST'])
def profiley():
if request.method == "POST":
if request.form.get('logoutButton') == 'logout':
return render_template('index.html')
#change this to logout session later
elif request.form.get('tutorialButton') == 'tutorial':
return render_template('tutorial.html')
elif request.form.get('addRecordButton') == 'addRecord':
return render_template('addrecords.html')
elif request.form.get('checkRecordButton') == 'checkRecord':
return render_template('checkrecords.html')
elif request.form.get('dataPolicyButton') == 'dataPolicy':
return render_template('datapolicy.html')
elif request.form.get('profileButton') == 'profile':
return render_template('profile.html')
#All other Elif Statements with Form Validation and Cassandra Input
else:
return render_template('profile.html')
else:
return render_template('profile.html')
if __name__ == '__main__':
app.run(debug=True) |
# 双向链表求解
class DlinkedNode():
def __init__(self):
self.key = 0
self.value = 0
self.next = None
self.prev = None
class LRUCache():
def __init__(self, capacity: int):
self.capacity = capacity
self.size = 0
self.cache = {}
self.head = DlinkedNode()
self.tail = DlinkedNode()
self.head.next = self.tail
self.tail.prev = self.head
def _add_node(self, node):
""" 始终放在head的右边 """
node.prev = self.head
node.next = self.head.next
self.head.next.prev = node
self.head.next = node
def _remove_node(self, node):
"""删除一个节点"""
_prev = node.prev
_next = node.next
_prev.next = _next
_next.prev = _prev
def _move_to_head(self, node):
"""
先删除再增加
:param node:
:return:
"""
self._remove_node(node)
self._add_node(node)
def _pop_tail(self):
"""
删除最后一个节点的前一个
:return:
"""
res = self.tail.prev
self._remove_node(res)
return res
def get(self, key: int) -> int:
node = self.cache.get(key, None)
if not node:
return -1
self._move_to_head(node)
return node.value
def put(self, key: int, value: int) -> None:
node = self.cache.get(key, None)
if not node:
node = DlinkedNode()
node.key = key
node.value = value
self.size += 1
self.cache[key] = node
self._add_node(node)
if self.size > self.capacity:
tail = self._pop_tail()
del self.cache[tail.key]
self.size -= 1
else:
node.value = value
self._move_to_head(node)
if __name__ == "__main__" :
lru = LRUCache(2)
lru.put(1,1)
lru.put(2,2)
a = lru.get(1)
lru.put(3,3)
b = lru.get(2)
lru.put(4,4)
c = lru.get(1)
d = lru.get(3)
e = lru.get(4)
print()
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Copyright 2020 Alibaba Group Holding Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import hashlib
import json
from graphscope.framework.utils import unify_type
from graphscope.proto import types_pb2
class GraphSchema:
"""Hold schema of a graph.
Attributes:
oid_type (str): Original ID type
vid_type (str): Internal ID representation
vdata_type (str): Type of the data that holding by vertex (simple graph only)
edata_type (str): Type of the data that holding by edge (simple graph only)
vertex_labels (list): Label names of vertex
edge_labels (list): Label names of edge
vertex_properties (list(dict)): Properties of each vertex label
edge_properties (list(dict)): Properties of each edge label
edge_relationships (list(list(tuple))): Source label and destination label of each edge label
"""
def __init__(self):
self._oid_type = None
self._vid_type = None
# simple graph only
self._vdata_type = types_pb2.INVALID
# simple graph only
self._edata_type = types_pb2.INVALID
self._schema = None
# list of names
self._vertex_labels = []
# list of names
self._edge_labels = []
# relationships of edges
self._edge_relationships = []
# list of dict (each label corresponds to a dict: {property_name: type})
self._vertex_properties = []
# list of dict (each label corresponds to a dict: {property_name: type})
self._edge_properties = []
def get_schema_from_def(self, schema_def):
"""Decode informations from proto message, generated by engine.
Args:
schema_def (`GraphSchemaDef`): Proto message defined in `proto/graph_def.proto`.
Raises:
ValueError: If the schema is not valid.
"""
self._oid_type = schema_def.oid_type
self._vid_type = schema_def.vid_type
# simple graph schema.
if schema_def.vdata_type:
self._vdata_type = unify_type(schema_def.vdata_type)
if schema_def.edata_type:
self._edata_type = unify_type(schema_def.edata_type)
# property graph schema
if schema_def.property_schema_json:
try:
self._schema = json.loads(schema_def.property_schema_json)
if self._schema:
for item in self._schema["types"]:
if item["type"] == "VERTEX":
self._vertex_labels.append(item["label"])
self._vertex_properties.append(
{
prop["name"]: unify_type(prop["data_type"])
for prop in item["propertyDefList"]
}
)
elif item["type"] == "EDGE":
self._edge_labels.append(item["label"])
self._edge_properties.append(
{
prop["name"]: unify_type(prop["data_type"])
for prop in item["propertyDefList"]
}
)
relation = [
(rel["srcVertexLabel"], rel["dstVertexLabel"])
for rel in item["rawRelationShips"]
]
self._edge_relationships.append(relation)
except Exception as e:
raise ValueError("Invalid property graph schema") from e
def init_nx_schema(self, gs_schema=None):
"""Schema for `nx.Graph`
Args:
gs_schema (`GraphSchema`, optional): schema of a graphscope `Graph`. Defaults to None.
"""
if gs_schema is not None:
for props in gs_schema.vertex_properties:
self._vertex_properties[0].update(props)
for props in gs_schema.edge_properties:
self._edge_properties[0].update(props)
else:
self._vertex_labels.append("_")
self._edge_labels.append("_")
self._vertex_properties.append(dict())
self._edge_properties.append(dict())
self._edge_relationships.append([("_", "_")])
def __repr__(self):
s = "oid_type: {}\nvid_type: {}\n".format(self._oid_type, self._vid_type)
if (
self._vdata_type != types_pb2.INVALID
and self._edata_type != types_pb2.INVALID
):
s += "vdata_type: {}\nedata_type: {}\n".format(
types_pb2.DataType.Name(self._vdata_type),
types_pb2.DataType.Name(self._edata_type),
)
for index, label in enumerate(self._vertex_labels):
props = [
(prop_name, types_pb2.DataType.Name(prop_type))
for prop_name, prop_type in self._vertex_properties[index].items()
]
s += "label: {}\ntype: VERTEX\nproperties: {}\n\n".format(label, props)
for index, label in enumerate(self.edge_labels):
props = [
(prop_name, types_pb2.DataType.Name(prop_type))
for prop_name, prop_type in self._edge_properties[index].items()
]
s += """label: {}\ntype: EDGE\nproperties: {}\nrelations: {}\n\n""".format(
label, props, self._edge_relationships[index]
)
return s
def __str__(self):
return self.__repr__()
@property
def oid_type(self):
return self._oid_type
@property
def vid_type(self):
return self._vid_type
@property
def vdata_type(self):
# NB: simple graph only contain a single vertex property.
return self._vdata_type
@property
def edata_type(self):
# NB: simple graph only contain a single edge property.
return self._edata_type
@property
def vertex_labels(self):
return self._vertex_labels
@property
def edge_labels(self):
return self._edge_labels
@property
def edge_relationships(self):
return self._edge_relationships
@property
def vertex_label_num(self):
return len(self._vertex_labels)
@property
def edge_label_num(self):
return len(self._edge_labels)
@property
def vertex_properties(self):
return self._vertex_properties
@property
def edge_properties(self):
return self._edge_properties
def vertex_label_index(self, label):
return self.vertex_labels.index(label)
def edge_label_index(self, label):
return self.edge_labels.index(label)
def vertex_property_index(self, label_id, prop):
return list(self.vertex_properties[label_id].keys()).index(prop)
def edge_property_index(self, label_id, prop):
return list(self.edge_properties[label_id].keys()).index(prop)
def add_vertex_properties(self, properties) -> bool:
for key, value in properties.items():
try:
prop_type = unify_type(type(value))
if key in self._vertex_properties[0]:
if prop_type != self._vertex_properties[0][key]:
return False
else:
self._vertex_properties[0][key] = prop_type
except TypeError:
return False
return True
def add_edge_properties(self, properties) -> bool:
for key, value in properties.items():
try:
prop_type = unify_type(type(value))
if key in self._edge_properties[0]:
if prop_type != self._edge_properties[0][key]:
return False
else:
self._edge_properties[0][key] = prop_type
except TypeError:
return False
return True
def clear(self):
self._oid_type = None
self._vid_type = None
self._vdata_type = None
self._edata_type = None
self._schema = None
self._vertex_labels.clear()
self._edge_labels.clear()
self._vertex_properties.clear()
self._edge_properties.clear()
def signature(self):
return hashlib.sha256("{}".format(self.__repr__()).encode("utf-8")).hexdigest()
|
"""Module that creates and initialises application."""
import logging
import os
from flask import Flask
from flask_bootstrap import Bootstrap
from flask_moment import Moment
from flask_login import LoginManager
from flask_session import Session
from flask_migrate import Migrate
# from flask_paranoid import Paranoid
from logging.handlers import SMTPHandler, RotatingFileHandler
from .database import db, User
from .views import web
from .errors import error
from .auth.views import auth
from config import config
from .email import mail
sess = Session()
bootstrap = Bootstrap()
moment = Moment()
login_manager = LoginManager()
login_manager.session_protection = 'strong'
login_manager.login_view = 'auth.login'
migrate = Migrate()
# paranoid = Paranoid()
@login_manager.user_loader
def load_user(user_id):
"""Load a user for Flask-Login."""
return User.query.get(int(user_id))
def create_app(config_name):
"""Create Flask app."""
app = Flask(__name__)
app.config.from_object(config[config_name])
config[config_name].init_app(app)
sess.init_app(app)
bootstrap.init_app(app)
moment.init_app(app)
db.init_app(app)
login_manager.init_app(app)
migrate.init_app(app, db)
mail.init_app(app)
app.register_blueprint(web)
app.register_blueprint(error)
app.register_blueprint(auth)
# paranoid.init_app(app)
# paranoid.redirect_view = '/'
if not app.debug:
if app.config['MAIL_SERVER']:
authentication = None
if app.config['MAIL_USERNAME'] or app.config['MAIL_PASSWORD']:
authentication = (
app.config['MAIL_USERNAME'], app.config['MAIL_PASSWORD'])
secure = None
if app.config['MAIL_USE_TLS']:
secure = ()
mail_handler = SMTPHandler(
mailhost=(app.config['MAIL_SERVER'], app.config['MAIL_PORT']),
fromaddr='no-reply@' + app.config['MAIL_SERVER'],
toaddrs=app.config['ADMINS'], subject='PFT Failure',
credentials=authentication, secure=secure)
mail_handler.setLevel(logging.ERROR)
app.logger.addHandler(mail_handler)
if not os.path.exists('logs'):
os.mkdir('logs')
file_handler = RotatingFileHandler(
'logs/btt.log', maxBytes=10240, backupCount=10)
formatter = (
'%(asctime)s %(levelname)s: %(message)s '
'[in %(pathname)s:%(lineno)d]'
)
file_handler.setFormatter(logging.Formatter(formatter))
file_handler.setLevel(logging.INFO)
app.logger.addHandler(file_handler)
app.logger.setLevel(logging.INFO)
app.logger.info('PFT startup')
return app
|
import math
from . import _singleton_music_analyzer
name = "Music Expand"
start_string = name + " started!"
description = "Changes color with music"
schema = {}
schema.update(_singleton_music_analyzer.music_vis_schema)
def update(lights, step, state):
app = state[_singleton_music_analyzer.MUSIC_VIS_FIELD]
center = lights.size / 2
hues = [(1, 0)]*lights.size
bands = [app.bands[0], (app.bands[1] + app.bands[2] + app.bands[3])/3, (app.bands[4] + app.bands[5] + app.bands[6])/3]
for i, band in enumerate(bands):
color_h = i/len(bands)
val = (1/(1+math.exp(-(10*(band/1023)-7))))*(lights.size/2)
for n in range(int(center-val), int(center+val)):
h, s = hues[n]
hues[n] = (h*color_h, s+1)
lights.clear()
for n, (h, s) in enumerate(hues):
if s is not 0:
lights.set_pixel_hsv(n, h, 1, 1)
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import uuid
from cliff import command
from osc_placement_tree import dot
from osc_placement_tree import tree
# These fields are provided by placement but after processing they are
# represented by the model itself so these fields can be dropped from the data
# store
DROP_DATA_FIELDS = [
"links", # unused
"parent_provider_uuid", # represented by node relationships
"root_provider_uuid",
] # unused
# These fields are not included in the generated output if they are not
# explicitly requested by the user
DEFAULT_HIDDEN_FIELDS = [
"generation",
"resource_provider_generation",
"min_unit",
"max_unit",
"step_size",
]
class ClientAdapter(object):
def __init__(self, client):
self.client = client
def get(self, url):
return self.client.request("GET", url).json()
def _get_field_filter(parsed_args):
if parsed_args.fields:
fields = parsed_args.fields.split(",")
return lambda name: name in fields
else:
return lambda name: name not in DEFAULT_HIDDEN_FIELDS
def _get_uuid_form_name_or_uuid(client, uuid_or_name):
try:
uuid.UUID(uuid_or_name)
rp_uuid = uuid_or_name
except ValueError:
rp_name_to_uuid = {
rp["name"]: rp["uuid"]
for rp in client.get("/resource_providers")["resource_providers"]
}
rp_uuid = rp_name_to_uuid.get(uuid_or_name)
if not rp_uuid:
raise ValueError("%s does not exists" % uuid_or_name)
return rp_uuid
# This inherits directly from cliff as it wants to emit other than a simple
# table on the output
class ShowProviderTree(command.Command):
"""Show the tree of resource providers"""
def get_parser(self, prog_name):
parser = super(ShowProviderTree, self).get_parser(prog_name)
parser.add_argument(
"uuid_or_name",
metavar="<uuid_or_name>",
help="UUID or name of one of the provider in the tree to show",
)
parser.add_argument(
"--fields",
metavar="<fields>",
help="The coma separated list of field names of the resource "
"provider to include in the output.",
default="",
)
parser.add_argument(
"--show_consumers",
help="Includes consumers in the result",
nargs="?",
const=True,
default=False,
)
return parser
def take_action(self, parsed_args):
http = self.app.client_manager.placement_tree
client = ClientAdapter(http)
rp_uuid = _get_uuid_form_name_or_uuid(client, parsed_args.uuid_or_name)
graph = tree.make_rp_tree(
client, rp_uuid, drop_fields=DROP_DATA_FIELDS
)
if parsed_args.show_consumers:
tree.extend_rp_graph_with_consumers(client, graph)
print(
dot.graph_to_dot(
graph, field_filter=_get_field_filter(parsed_args)
)
)
class ListProviderTree(command.Command):
"""Show the whole RP graph"""
def get_parser(self, prog_name):
parser = super(ListProviderTree, self).get_parser(prog_name)
parser.add_argument(
"--fields",
metavar="<fields>",
help="The coma separated list of field names of the resource "
"provider to include in the output.",
default="",
)
parser.add_argument(
"--show_consumers",
help="Includes consumers in the result",
nargs="?",
const=True,
default=False,
)
return parser
def take_action(self, parsed_args):
http = self.app.client_manager.placement_tree
client = ClientAdapter(http)
graph = tree.make_rp_trees(client, drop_fields=DROP_DATA_FIELDS)
if parsed_args.show_consumers:
tree.extend_rp_graph_with_consumers(client, graph)
print(
dot.graph_to_dot(
graph, field_filter=_get_field_filter(parsed_args)
)
)
|
from django.contrib import admin
from django.urls import path
from django.conf import settings
from django.conf.urls.static import static
from app.views.home_views import home_page
from app.views.notes_view import create_note, delete_note, edit_note, details_note
from app.views.profile_views import profile_view, profile_create, profile_delete
urlpatterns = [
path('', home_page, name='home page'),
path('profile', profile_view, name='profile'),
path('profile/create', profile_create, name='profile create'),
path('profile/delete', profile_delete, name='profile delete'),
path('note/create', create_note, name='create note'),
path('note/edit/<int:pk>/', edit_note, name='edit note'),
path('note/delete/<int:pk>/', delete_note, name='delete note'),
path('note/details/<int:pk>/', details_note, name='details note')
]
|
from django.test import TestCase
from nose.tools import *
from django.contrib.auth.models import User
from django.contrib.contenttypes.models import ContentType
from opinions.forms import OpinionStatementForm
from opinions.models import Opinion
from opinions.models import StatementRevision
from opinions.models import Voice
from phillyleg.models import LegFile
class Test__Opinion_latest:
def setup(self):
User.objects.all().delete()
Opinion.objects.all().delete()
LegFile.objects.all().delete()
self.me = User.objects.create_user('me', 'pass', 'email')
self.legfile = LegFile.objects.create(key=123, id='456')
@istest
def should_return_the_latest_revision (self):
opinion = Opinion.objects.create(opiner=self.me, target=self.legfile)
StatementRevision.objects.create(opinion=opinion, statement='blah1', position='support')
StatementRevision.objects.create(opinion=opinion, statement='blah2', position='oppose')
StatementRevision.objects.create(opinion=opinion, statement='blah3', position='abstain')
StatementRevision.objects.create(opinion=opinion, statement='blah4', position='oppose')
assert_equal(opinion.latest.statement, 'blah4')
assert_equal(opinion.latest.position, 'oppose')
class Test__Voice_expressOpinionAbout:
def setup(self):
User.objects.all().delete()
Opinion.objects.all().delete()
LegFile.objects.all().delete()
self.me = User.objects.create_user('me', 'pass', 'email')
self.legfile = LegFile.objects.create(key=123, id='456')
@istest
def should_create_an_opinion_with_the_given_target (self):
voice = Voice(self.me)
voice.express_opinion_about(self.legfile, statement='blah1', position='support')
opinion = Opinion.objects.all()[0]
assert_equal(opinion.opiner, self.me)
assert_equal(opinion.target, self.legfile)
assert_equal(opinion.latest.statement, 'blah1')
assert_equal(opinion.latest.position, 'support')
@istest
def sets_up_deferred_saving_if_commit_is_False (self):
voice = Voice(self.me)
opinion = voice.express_opinion_about(self.legfile, statement='blah1', position='support', commit=False)
assert_is_none(opinion.pk)
assert hasattr(voice, 'save_revision')
class Test__Voice_revise:
def setup(self):
User.objects.all().delete()
Opinion.objects.all().delete()
LegFile.objects.all().delete()
self.me = User.objects.create_user('me', 'pass', 'email')
self.legfile = LegFile.objects.create(key=123, id='456')
self.opinion = Opinion.objects.create(opiner=self.me, target=self.legfile)
StatementRevision.objects.create(opinion=self.opinion, statement='blah1', position='oppose')
@istest
def should_add_a_revision_with_the_given_properties (self):
voice = Voice(self.me)
voice.revise(self.opinion, statement='blah2')
revision = StatementRevision.objects.order_by('-datetime')[0]
assert_equal(revision.statement, 'blah2')
assert_equal(revision.position, 'oppose')
voice.revise(self.opinion, position='support')
revision = StatementRevision.objects.order_by('-datetime')[0]
assert_equal(revision.statement, 'blah2')
assert_equal(revision.position, 'support')
voice.revise(self.opinion, statement='blah3', position='abstain')
revision = StatementRevision.objects.order_by('-datetime')[0]
assert_equal(revision.statement, 'blah3')
assert_equal(revision.position, 'abstain')
@istest
def sets_up_deferred_saving_if_commit_is_False (self):
voice = Voice(self.me)
voice.revise(self.opinion, statement='blah1', position='support', commit=False)
num_revisions = len(self.opinion.revisions.all())
assert_equal(num_revisions, 1)
assert hasattr(voice, 'save_revision')
class Test__OpinionStatementForm_save:
def setup(self):
User.objects.all().delete()
Opinion.objects.all().delete()
LegFile.objects.all().delete()
self.me = User.objects.create_user('me', 'pass', 'email')
self.legfile = LegFile.objects.create(key=123, id='456')
@istest
def creates_a_new_opinion_if_one_by_the_user_for_the_target_does_not_exist (self):
form_data = {
'opiner': self.me.pk,
'target_type': ContentType.objects.get_for_model(LegFile).pk,
'target_id': self.legfile.pk,
'position': 'support',
'statement': 'It is a good idea.',
}
form = OpinionStatementForm(data=form_data)
assert form.is_valid(), form.errors
form.save()
num_opinions = len(Opinion.objects.all())
num_revisions = len(StatementRevision.objects.all())
assert_equal(num_opinions, 1)
assert_equal(num_revisions, 1)
@istest
def creates_a_new_revision_if_an_opinion_by_the_user_for_the_target_exists (self):
opinion = Opinion.objects.create(opiner=self.me, target=self.legfile)
StatementRevision.objects.create(opinion=opinion, statement='blah1', position='oppose')
form_data = {
'opiner': self.me.pk,
'target_type': ContentType.objects.get_for_model(LegFile).pk,
'target_id': self.legfile.pk,
'position': 'support',
'statement': 'It is a good idea.',
}
form = OpinionStatementForm(data=form_data)
assert form.is_valid(), form.errors
form.save()
num_opinions = len(Opinion.objects.all())
num_revisions = len(StatementRevision.objects.all())
assert_equal(num_opinions, 1)
assert_equal(num_revisions, 2)
@istest
def sets_up_deferred_saving_if_commit_is_False (self):
form_data = {
'opiner': self.me.pk,
'target_type': ContentType.objects.get_for_model(LegFile).pk,
'target_id': self.legfile.pk,
'position': 'support',
'statement': 'It is a good idea.',
}
form = OpinionStatementForm(data=form_data)
assert form.is_valid(), form.errors
opinion = form.save(commit=False)
assert_is_none(opinion.pk)
assert hasattr(form, 'save_m2m')
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.