max_stars_repo_path
stringlengths 3
269
| max_stars_repo_name
stringlengths 4
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.05M
| score
float64 0.23
5.13
| int_score
int64 0
5
|
---|---|---|---|---|---|---|
changes/buildsteps/lxc.py | bowlofstew/changes | 0 | 7000 | from __future__ import absolute_import
from changes.buildsteps.default import DefaultBuildStep
class LXCBuildStep(DefaultBuildStep):
"""
Similar to the default build step, except that it runs the client using
the LXC adapter.
"""
def can_snapshot(self):
return True
def get_label(self):
return 'Build via Changes Client (LXC)'
def get_client_adapter(self):
return 'lxc'
def get_allocation_params(self, jobstep):
params = super(LXCBuildStep, self).get_allocation_params(jobstep)
params['memory'] = str(self.resources['mem'])
params['cpus'] = str(self.resources['cpus'])
return params
| 2.359375 | 2 |
swapidemo1.py | anvytran-dev/mycode | 0 | 7001 | <gh_stars>0
#!/usr/bin/env python3
"""Star Wars API HTTP response parsing"""
# requests is used to send HTTP requests (get it?)
import requests
URL= "https://swapi.dev/api/people/1"
def main():
"""sending GET request, checking response"""
# SWAPI response is stored in "resp" object
resp= requests.get(URL)
# what kind of python object is "resp"?
print("This object class is:", type(resp), "\n")
# what can we do with it?
print("Methods/Attributes include:", dir(resp))
if __name__ == "__main__":
main()
| 3.640625 | 4 |
src/biota_models/vegetation/model/constants_json_create.py | Deltares/NBSDynamics | 2 | 7002 | <reponame>Deltares/NBSDynamics
import json
schema = {
"Spartina": {
"ColStart": "2000-04-01",
"ColEnd": "2000-05-31",
"random": 7,
"mud_colonization": [0.0, 0.0],
"fl_dr": 0.005,
"Maximum age": 20,
"Number LifeStages": 2,
"initial root length": 0.05,
"initial shoot length": 0.015,
"initial diameter": 0.003,
"start growth period": "2000-04-01",
"end growth period": "2000-10-31",
"start winter period": "2000-11-30",
"maximum plant height": [0.8, 1.3],
"maximum diameter": [0.003, 0.005],
"maximum root length": [0.2, 1],
"maximum years in LifeStage": [1, 19],
"numStem": [700, 700], # 3.5. number of stems per m2
"iniCol_frac": 0.6, # 3.6. initial colonization fraction (0-1)
"Cd": [1.1, 1.15], # 3.7. drag coefficient
"desMort_thres": [400, 400], # 3.9. dessication mortality threshold
"desMort_slope": [0.75, 0.75], # 3.10. dessication mortality slope
"floMort_thres": [0.4, 0.4], # 3.11. flooding mortality threshold
"floMort_slope": [0.25, 0.25], # 3.12. flooding mortality slope
"vel_thres": [0.15, 0.25], # 3.13. flow velocity threshold
"vel_slope": [3, 3], # 3.14. flow velocity slope
"maxH_winter": [0.4, 0.4], # 3.15 max height during winter time
},
"Salicornia": {
"ColStart": "2000-02-15",
"ColEnd": "2000-04-30",
"random": 20,
"mud_colonization": [0.0, 0.0],
"fl_dr": 0.005,
"Maximum age": 1,
"Number LifeStages": 1,
"initial root length": 0.15,
"initial shoot length": 0.05,
"initial diameter": 0.01,
"start growth period": "2000-02-15",
"end growth period": "2000-10-15",
"start winter period": "2000-11-01",
"maximum plant height": [0.4, 0],
"maximum diameter": [0.015, 0],
"maximum root length": [0.05, 0],
"maximum years in LifeStage": [1, 0],
"numStem": [190, 0], # 3.5. number of stems per m2
"iniCol_frac": 0.2, # 3.6. initial colonization fraction (0-1)
"Cd": [0.7, 0], # 3.7. drag coefficient
"desMort_thres": [400, 1], # 3.9. dessication mortality threshold
"desMort_slope": [0.75, 1], # 3.10. dessication mortality slope
"floMort_thres": [0.5, 1], # 3.11. flooding mortality threshold
"floMort_slope": [0.12, 1], # 3.12. flooding mortality slope
"vel_thres": [0.15, 1], # 3.13. flow velocity threshold
"vel_slope": [3, 1], # 3.14. flow velocity slope
"maxH_winter": [0.0, 0.0], # 3.15 max height during winter time
},
"Puccinellia": {
"ColStart": "2000-03-01",
"ColEnd": "2000-04-30",
"random": 7,
"mud_colonization": [0.0, 0.0],
"fl_dr": 0.005,
"Maximum age": 20,
"Number LifeStages": 2,
"initial root length": 0.02,
"initial shoot length": 0.05,
"initial diameter": 0.004,
"start growth period": "2000-03-01",
"end growth period": "2000-11-15",
"start winter period": "2000-11-30",
"maximum plant height": [0.2, 0.35],
"maximum diameter": [0.004, 0.005],
"maximum root length": [0.15, 0.15],
"maximum years in LifeStage": [1, 19],
"numStem": [6500, 6500], # 3.5. number of stems per m2
"iniCol_frac": 0.3, # 3.6. initial colonization fraction (0-1)
"Cd": [0.7, 0.7], # 3.7. drag coefficient
"desMort_thres": [400, 400], # 3.9. dessication mortality threshold
"desMort_slope": [0.75, 0.75], # 3.10. dessication mortality slope
"floMort_thres": [0.35, 0.35], # 3.11. flooding mortality threshold
"floMort_slope": [0.4, 0.4], # 3.12. flooding mortality slope
"vel_thres": [0.25, 0.5], # 3.13. flow velocity threshold
"vel_slope": [3, 3], # 3.14. flow velocity slope
"maxH_winter": [0.2, 0.2], # 3.15 max height during winter time
},
}
with open("constants_veg.json", "w") as write_file:
json.dump(schema, write_file, indent=4)
| 1.78125 | 2 |
format/format.bzl | harshad-deo/TorchVI | 0 | 7003 | def _replace_formatted(ctx, manifest, files):
out = ctx.actions.declare_file(ctx.label.name)
# this makes it easier to add variables
file_lines = [
"""#!/bin/bash -e
WORKSPACE_ROOT="${1:-$BUILD_WORKSPACE_DIRECTORY}" """,
"""RUNPATH="${TEST_SRCDIR-$0.runfiles}"/""" + ctx.workspace_name,
"""RUNPATH=(${RUNPATH//bin/ })
RUNPATH="${RUNPATH[0]}"bin
echo $WORKSPACE_ROOT
echo $RUNPATH
while read original formatted; do
if [[ ! -z "$original" ]] && [[ ! -z "$formatted" ]]; then
if ! cmp -s "$WORKSPACE_ROOT/$original" "$RUNPATH/$formatted"; then
echo "Formatting $original"
cp "$RUNPATH/$formatted" "$WORKSPACE_ROOT/$original"
fi
fi
done < "$RUNPATH"/""" + manifest.short_path,
]
file_content = "\n".join(file_lines)
ctx.actions.write(
output = out,
content = file_content,
)
files.append(manifest)
return [DefaultInfo(files = depset(files), executable = out)]
def _build_format_py(ctx):
files = []
manifest_content = []
for src in ctx.files.srcs:
if src.is_source:
file = ctx.actions.declare_file("{}.format.output".format(src.short_path))
files.append(file)
ctx.actions.run(
arguments = [src.path, file.path],
executable = ctx.executable._fmt,
outputs = [file],
inputs = [src, ctx.file._style],
)
manifest_content.append("{} {}".format(src.short_path, file.short_path))
manifest = ctx.actions.declare_file("format/{}/manifest.txt".format(ctx.label.name))
ctx.actions.write(manifest, "\n".join(manifest_content) + "\n")
return manifest, files
def _format_py_impl(ctx):
manifest, files = _build_format_py(ctx)
return _replace_formatted(ctx, manifest, files)
format_py = rule(
implementation = _format_py_impl,
executable = True,
attrs = {
"srcs": attr.label_list(
allow_files = [".py"],
mandatory = True,
),
"_fmt": attr.label(
cfg = "host",
default = "//format:format_py",
executable = True,
),
"_style": attr.label(
allow_single_file = True,
default = ":setup.cfg",
),
},
)
| 2.359375 | 2 |
Stream-3/Full-Stack-Development/10.Custom-User-And-Email-Authentication/2.Custom-User-Model/auth_demo/accounts/models.py | GunnerJnr/_CodeInstitute | 4 | 7004 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.contrib.auth.models import AbstractUser, UserManager
from django.db import models
from django.utils import timezone
# Create your models here.
# Create our new user class
class AccountUserManager(UserManager):
def _create_user(self, username, email, password, is_staff, is_supervisor, **extra_fields):
"""
Creates and saves a User with the given username, email and password.
:param username:
:param email:
:param password:
:param is_staff:
:param is_supervisor:
:param extra_fields:
:return:
"""
now = timezone.now()
if not email:
raise ValueError('The given username must be set')
email = self.normalize_email(email)
user = self.model(username=email, email=email,
is_staff=is_staff, is_active=True,
is_supervisor=is_supervisor,
date_joined=now, **extra_fields)
user.set_password(password)
user.save(using=self.db)
return user
class User(AbstractUser):
# now that we've abstracted this class we can add any
# number of custom attribute to our user class
# in later units we'll be adding things like payment details!
object = AccountUserManager()
| 2.984375 | 3 |
histoGAN.py | mahmoudnafifi/HistoGAN | 169 | 7005 | <filename>histoGAN.py
"""
If you find this code useful, please cite our paper:
<NAME>, <NAME>, and <NAME>. "HistoGAN:
Controlling Colors of GAN-Generated and Real Images via Color Histograms."
In CVPR, 2021.
@inproceedings{afifi2021histogan,
title={Histo{GAN}: Controlling Colors of {GAN}-Generated and Real Images via
Color Histograms},
author={<NAME> Brubaker, <NAME>. and Brown, <NAME>.},
booktitle={CVPR},
year={2021}
}
"""
from tqdm import tqdm
from histoGAN import Trainer, NanException
from histogram_classes.RGBuvHistBlock import RGBuvHistBlock
from datetime import datetime
import torch
import argparse
from retry.api import retry_call
import os
from PIL import Image
from torchvision import transforms
import numpy as np
SCALE = 1 / np.sqrt(2.0)
def train_from_folder(
data='./dataset/',
results_dir='./results',
models_dir='./models',
name='test',
new=False,
load_from=-1,
image_size=128,
network_capacity=16,
transparent=False,
batch_size=2,
gradient_accumulate_every=8,
num_train_steps=150000,
learning_rate=2e-4,
num_workers=None,
save_every=1000,
generate=False,
save_noise_latent=False,
target_noise_file=None,
target_latent_file=None,
num_image_tiles=8,
trunc_psi=0.75,
fp16=False,
fq_layers=[],
fq_dict_size=256,
attn_layers=[],
hist_method='inverse-quadratic',
hist_resizing='sampling',
hist_sigma=0.02,
hist_bin=64,
hist_insz=150,
alpha=2,
target_hist=None,
aug_prob=0.0,
dataset_aug_prob=0.0,
aug_types=None):
model = Trainer(
name,
results_dir,
models_dir,
batch_size=batch_size,
gradient_accumulate_every=gradient_accumulate_every,
image_size=image_size,
network_capacity=network_capacity,
transparent=transparent,
lr=learning_rate,
num_workers=num_workers,
save_every=save_every,
trunc_psi=trunc_psi,
fp16=fp16,
fq_layers=fq_layers,
fq_dict_size=fq_dict_size,
attn_layers=attn_layers,
hist_insz=hist_insz,
hist_bin=hist_bin,
hist_sigma=hist_sigma,
hist_resizing=hist_resizing,
hist_method=hist_method,
aug_prob=aug_prob,
dataset_aug_prob=dataset_aug_prob,
aug_types=aug_types
)
if not new:
model.load(load_from)
else:
model.clear()
if generate:
now = datetime.now()
timestamp = now.strftime("%m-%d-%Y_%H-%M-%S")
if save_noise_latent and not os.path.exists('temp'):
os.mkdir('./temp')
if save_noise_latent and not os.path.exists(f'./temp/{name}'):
os.mkdir(f'./temp/{name}')
if target_hist is None:
raise Exception('No target histogram or image is given')
extension = os.path.splitext(target_hist)[1]
if extension == '.npy':
hist = np.load(target_hist)
h = torch.from_numpy(hist).to(device=torch.cuda.current_device())
if num_image_tiles > 1:
num_image_tiles = num_image_tiles - num_image_tiles % 2
for i in range(int(np.log2(num_image_tiles))):
h = torch.cat((h, h), dim=0)
samples_name = ('generated-' +
f'{os.path.basename(os.path.splitext(target_hist)[0])}'
f'-{timestamp}')
model.evaluate(samples_name, hist_batch=h,
num_image_tiles=num_image_tiles,
save_noise_latent=save_noise_latent,
load_noise_file=target_noise_file,
load_latent_file=target_latent_file)
print(f'sample images generated at {results_dir}/{name}/{samples_name}')
elif str.lower(extension) == '.jpg' or str.lower(extension) == '.png':
histblock = RGBuvHistBlock(insz=hist_insz, h=hist_bin,
resizing=hist_resizing, method=hist_method,
sigma=hist_sigma,
device=torch.cuda.current_device())
transform = transforms.Compose([transforms.ToTensor()])
img = Image.open(target_hist)
img = torch.unsqueeze(transform(img), dim=0).to(
device=torch.cuda.current_device())
h = histblock(img)
if num_image_tiles > 1:
num_image_tiles = num_image_tiles - num_image_tiles % 2
for i in range(int(np.log2(num_image_tiles))):
h = torch.cat((h, h), dim=0)
samples_name = ('generated-' +
f'{os.path.basename(os.path.splitext(target_hist)[0])}'
f'-{timestamp}')
model.evaluate(samples_name, hist_batch=h,
num_image_tiles=num_image_tiles,
save_noise_latent=save_noise_latent,
load_noise_file=target_noise_file,
load_latent_file=target_latent_file)
print(f'sample images generated at {results_dir}/{name}/{samples_name}')
elif extension == '':
files = [os.path.join(target_hist, f) for f in os.listdir(target_hist) if
os.path.isfile(os.path.join(target_hist, f))]
histblock = RGBuvHistBlock(insz=hist_insz, h=hist_bin,
resizing=hist_resizing, method=hist_method,
sigma=hist_sigma,
device=torch.cuda.current_device())
transform = transforms.Compose([transforms.ToTensor()])
for f in files:
extension = os.path.splitext(f)[1]
if extension == '.npy':
hist = np.load(f)
h = torch.from_numpy(hist).to(device=torch.cuda.current_device())
elif (extension == str.lower(extension) == '.jpg' or str.lower(
extension) == '.png'):
img = Image.open(f)
img = torch.unsqueeze(transform(img), dim=0).to(
device=torch.cuda.current_device())
h = histblock(img)
else:
print(f'Warning: File extension of {f} is not supported.')
continue
if num_image_tiles > 1:
num_image_tiles = num_image_tiles - num_image_tiles % 2
for i in range(int(np.log2(num_image_tiles))):
h = torch.cat((h, h), dim=0)
samples_name = ('generated-' +
f'{os.path.basename(os.path.splitext(f)[0])}'
f'-{timestamp}')
model.evaluate(samples_name, hist_batch=h,
num_image_tiles=num_image_tiles,
save_noise_latent=save_noise_latent,
load_noise_file=target_noise_file,
load_latent_file=target_latent_file)
print(f'sample images generated at {results_dir}/{name}/'
f'{samples_name}')
else:
print('The file extension of target image is not supported.')
raise NotImplementedError
return
print('\nStart training....\n')
print(f'Alpha = {alpha}')
model.set_data_src(data)
for _ in tqdm(range(num_train_steps - model.steps), mininterval=10.,
desc=f'{name}<{data}>'):
retry_call(model.train, fargs=[alpha], tries=3, exceptions=NanException)
if _ % 50 == 0:
model.print_log()
def get_args():
parser = argparse.ArgumentParser(description='Train/Test HistoGAN.')
parser.add_argument('--data', dest='data', default='./dataset/')
parser.add_argument('--results_dir', dest='results_dir',
default='./results_HistoGAN')
parser.add_argument('--models_dir', dest='models_dir', default='./models')
parser.add_argument('--target_hist', dest='target_hist', default=None)
parser.add_argument('--name', dest='name', default='histoGAN_model')
parser.add_argument('--new', dest='new', default=False)
parser.add_argument('--load_from', dest='load_from', default=-1)
parser.add_argument('--image_size', dest='image_size', default=256, type=int)
parser.add_argument('--network_capacity', dest='network_capacity', default=16,
type=int)
parser.add_argument('--transparent', dest='transparent', default=False)
parser.add_argument('--batch_size', dest='batch_size', default=2, type=int)
parser.add_argument('--gradient_accumulate_every',
dest='gradient_accumulate_every', default=8, type=int)
parser.add_argument('--num_train_steps', dest='num_train_steps',
default=1500000, type=int)
parser.add_argument('--learning_rate', dest='learning_rate', default=2e-4,
type=float)
parser.add_argument('--num_workers', dest='num_workers', default=None)
parser.add_argument('--save_every', dest='save_every', default=5000,
type=int)
parser.add_argument('--generate', dest='generate', default=False)
parser.add_argument('--save_noise_latent', dest='save_n_l', default=False)
parser.add_argument('--target_noise_file', dest='target_n', default=None)
parser.add_argument('--target_latent_file', dest='target_l', default=None)
parser.add_argument('--num_image_tiles', dest='num_image_tiles',
default=16, type=int)
parser.add_argument('--trunc_psi', dest='trunc_psi', default=0.75,
type=float)
parser.add_argument('--fp 16', dest='fp16', default=False)
parser.add_argument('--fq_layers', dest='fq_layers', default=[])
parser.add_argument('--fq_dict_size', dest='fq_dict_size', default=256,
type=int)
parser.add_argument('--attn_layers', dest='attn_layers', default=[])
parser.add_argument('--gpu', dest='gpu', default=0, type=int)
parser.add_argument('--hist_bin', dest='hist_bin', default=64, type=int)
parser.add_argument('--hist_insz', dest='hist_insz', default=150, type=int)
parser.add_argument('--hist_method', dest='hist_method',
default='inverse-quadratic')
parser.add_argument('--hist_resizing', dest='hist_resizing',
default='interpolation')
parser.add_argument('--hist_sigma', dest='hist_sigma', default=0.02,
type=float)
parser.add_argument('--alpha', dest='alpha', default=2, type=float)
parser.add_argument('--aug_prob', dest='aug_prob', default=0.0, type=float,
help='Probability of discriminator augmentation. It '
'applies operations specified in --aug_types.')
parser.add_argument('--dataset_aug_prob', dest='dataset_aug_prob',
default=0.0, type=float,
help='Probability of dataset augmentation. It applies '
'random cropping')
parser.add_argument('--aug_types', dest='aug_types',
default=['translation', 'cutout'], nargs='+',
help='Options include: translation, cutout, and color')
return parser.parse_args()
if __name__ == "__main__":
args = get_args()
torch.cuda.set_device(args.gpu)
train_from_folder(
data=args.data,
results_dir=args.results_dir,
models_dir=args.models_dir,
name=args.name,
new=args.new,
load_from=args.load_from,
image_size=args.image_size,
network_capacity=args.network_capacity,
transparent=args.transparent,
batch_size=args.batch_size,
gradient_accumulate_every=args.gradient_accumulate_every,
num_train_steps=args.num_train_steps,
learning_rate=args.learning_rate,
num_workers=args.num_workers,
save_every=args.save_every,
generate=args.generate,
save_noise_latent=args.save_n_l,
target_noise_file=args.target_n,
target_latent_file=args.target_l,
num_image_tiles=args.num_image_tiles,
trunc_psi=args.trunc_psi,
fp16=args.fp16,
fq_layers=args.fq_layers,
fq_dict_size=args.fq_dict_size,
attn_layers=args.attn_layers,
hist_method=args.hist_method,
hist_resizing=args.hist_resizing,
hist_sigma=args.hist_sigma,
hist_bin=args.hist_bin,
hist_insz=args.hist_insz,
target_hist=args.target_hist,
alpha=args.alpha,
aug_prob=args.aug_prob,
dataset_aug_prob=args.dataset_aug_prob,
aug_types=args.aug_types
)
| 2.03125 | 2 |
apps/careeropportunity/migrations/0003_careeropportunity_deadline.py | Kpaubert/onlineweb4 | 32 | 7006 | # -*- coding: utf-8 -*-
# Generated by Django 1.9.10 on 2016-10-05 18:52
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [("careeropportunity", "0002_careeropportunity_job_type")]
operations = [
migrations.AddField(
model_name="careeropportunity",
name="deadline",
field=models.DateField(blank=True, null=True, verbose_name="søknadsfrist"),
)
]
| 1.515625 | 2 |
benchmark/python/ffi/benchmark_ffi.py | grygielski/incubator-mxnet | 211 | 7007 | <filename>benchmark/python/ffi/benchmark_ffi.py
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import timeit
import itertools
import argparse
import os
class OpArgMngr(object):
"""Operator argument manager for storing operator workloads."""
args = {}
@staticmethod
def add_workload(funcname, *args, **kwargs):
if "_specifier" not in kwargs:
_specifier = funcname
else:
_specifier = kwargs["_specififer"]
del kwargs["_specififer"]
if _specifier in OpArgMngr.args:
raise ValueError("duplicate {}".format(_specifier))
OpArgMngr.args[_specifier] = {'args': args, 'kwargs': kwargs, 'funcname': funcname}
def generate_workloads():
array_pool = {}
shapes = []
for ndim in range(4):
shapes.extend(list(itertools.product(range(4), repeat=ndim)))
for shape in shapes:
name = 'x'.join(str(i) for i in shape)
if name in array_pool:
raise ValueError("duplicate array {}".format(name))
array_pool[name] = dnp.ones(shape)
return array_pool
def prepare_workloads():
pool = generate_workloads()
OpArgMngr.add_workload("zeros", (2, 2))
OpArgMngr.add_workload("full", (2, 2), 10)
OpArgMngr.add_workload("identity", 3)
OpArgMngr.add_workload("ones", (2, 2))
OpArgMngr.add_workload("einsum", "ii", pool['2x2'], optimize=False)
OpArgMngr.add_workload("unique", pool['1'], return_index=True, return_inverse=True, return_counts=True, axis=-1)
OpArgMngr.add_workload("dstack", (pool['2x1'], pool['2x1'], pool['2x1'], pool['2x1']))
OpArgMngr.add_workload("polyval", dnp.arange(10), pool['2x2'])
OpArgMngr.add_workload("ediff1d", pool['2x2'], pool['2x2'], pool['2x2'])
OpArgMngr.add_workload("nan_to_num", pool['2x2'])
OpArgMngr.add_workload("tri", 2, 3, 4)
OpArgMngr.add_workload("tensordot", pool['2x2'], pool['2x2'], ((1, 0), (0, 1)))
OpArgMngr.add_workload("cumsum", pool['3x2'], axis=0, out=pool['3x2'])
OpArgMngr.add_workload("random.shuffle", pool['3'])
OpArgMngr.add_workload("equal", pool['2x2'], pool['2x2'])
OpArgMngr.add_workload("not_equal", pool['2x2'], pool['2x2'])
OpArgMngr.add_workload("less", pool['2x2'], pool['2x2'])
OpArgMngr.add_workload("greater_equal", pool['2x2'], pool['2x2'])
OpArgMngr.add_workload("less_equal", pool['2x2'], pool['2x2'])
OpArgMngr.add_workload("maximum", pool['2x2'], pool['2x2'])
OpArgMngr.add_workload("minimum", pool['2x2'], pool['2x2'])
OpArgMngr.add_workload("sum", pool['2x2'], axis=0, keepdims=True, out=pool['1x2'])
OpArgMngr.add_workload("std", pool['2x2'], axis=0, ddof=0, keepdims=True, out=pool['1x2'])
OpArgMngr.add_workload("var", pool['2x2'], axis=0, ddof=1, keepdims=True, out=pool['1x2'])
OpArgMngr.add_workload("average", pool['2x2'], weights=pool['2'], axis=1, returned=True)
OpArgMngr.add_workload("histogram", pool['2x2'], bins=10, range=(0.0, 10.0))
OpArgMngr.add_workload("add", pool['2x2'], pool['2x2'])
OpArgMngr.add_workload("cross", pool['2'], pool['2'])
OpArgMngr.add_workload("linalg.eig", pool['3x3'])
OpArgMngr.add_workload("linalg.eigh", pool['3x3'])
OpArgMngr.add_workload("linalg.det", pool['3x3'])
OpArgMngr.add_workload("linalg.slogdet", pool['3x3'])
OpArgMngr.add_workload("linalg.matrix_rank", pool['3x3'], pool['1'], hermitian=False)
OpArgMngr.add_workload("linalg.svd", pool['3x3'])
OpArgMngr.add_workload("linalg.cholesky", pool['1x1'])
OpArgMngr.add_workload("linalg.qr", pool['3x3'])
OpArgMngr.add_workload("linalg.lstsq", pool['2x1'], pool['2'], rcond=None)
OpArgMngr.add_workload("linalg.eigvals", pool['1x1'])
OpArgMngr.add_workload("linalg.eigvalsh", pool['1x1'], UPLO='L')
OpArgMngr.add_workload("linalg.inv", pool['1x1'])
OpArgMngr.add_workload("linalg.pinv", pool['2x3x3'], pool['1'], hermitian=False)
OpArgMngr.add_workload("linalg.solve", pool['1x1'], pool['1'])
OpArgMngr.add_workload("linalg.tensorinv", pool['1x1'], ind=2)
OpArgMngr.add_workload("linalg.norm", pool['3x3'])
OpArgMngr.add_workload("linalg.tensorsolve", pool['1x1x1'], pool['1x1x1'], (2, 0, 1))
OpArgMngr.add_workload("tile", pool['2x2'], 1)
OpArgMngr.add_workload("trace", pool['2x2'])
OpArgMngr.add_workload("transpose", pool['2x2'])
OpArgMngr.add_workload("split", pool['3x3'], (0, 1, 2), axis=1)
OpArgMngr.add_workload("vstack", (pool['3x3'], pool['3x3'], pool['3x3']))
OpArgMngr.add_workload("argmax", pool['3x2'], axis=-1)
OpArgMngr.add_workload("argmin", pool['3x2'], axis=-1)
OpArgMngr.add_workload("atleast_1d", pool['2'], pool['2x2'])
OpArgMngr.add_workload("atleast_2d", pool['2'], pool['2x2'])
OpArgMngr.add_workload("atleast_3d", pool['2'], pool['2x2'])
OpArgMngr.add_workload("argsort", pool['3x2'], axis=-1)
OpArgMngr.add_workload("sort", pool['3x2'], axis=-1)
OpArgMngr.add_workload("indices", dimensions=(1, 2, 3))
OpArgMngr.add_workload("subtract", pool['2x2'], pool['2x2'])
OpArgMngr.add_workload("multiply", pool['2x2'], pool['2x2'])
OpArgMngr.add_workload("mod", pool['2x2'], pool['2x2'])
OpArgMngr.add_workload("remainder", pool['2x2'], pool['2x2'])
OpArgMngr.add_workload("divide", pool['2x2'], pool['2x2'])
OpArgMngr.add_workload("true_divide", pool['2x2'], pool['2x2'])
OpArgMngr.add_workload("power", pool['2x2'], pool['2x2'])
OpArgMngr.add_workload("lcm", pool['2x2'].astype('int32'), pool['2x2'].astype('int32'))
OpArgMngr.add_workload("diff", pool['2x2'], n=1, axis=-1)
OpArgMngr.add_workload("inner", pool['2x2'], pool['2x2'])
OpArgMngr.add_workload("random.multinomial", n=2, pvals=[1/6.]*6, size=(2,2))
OpArgMngr.add_workload("random.rand", 3, 2)
OpArgMngr.add_workload("random.randn", 2, 2)
OpArgMngr.add_workload("nonzero", pool['2x2'])
OpArgMngr.add_workload("tril", pool['2x2'], k=0)
OpArgMngr.add_workload("random.choice", pool['2'], size=(2, 2))
OpArgMngr.add_workload("take", pool['2'], dnp.array([1,0], dtype='int64'))
OpArgMngr.add_workload("clip", pool['2x2'], 0, 1)
OpArgMngr.add_workload("expand_dims", pool['2x2'], axis=0)
OpArgMngr.add_workload("broadcast_to", pool['2x2'], (2, 2, 2))
OpArgMngr.add_workload("full_like", pool['2x2'], 2)
OpArgMngr.add_workload("zeros_like", pool['2x2'])
OpArgMngr.add_workload("ones_like", pool['2x2'])
OpArgMngr.add_workload("bitwise_and", pool['2x2'].astype(int), pool['2x2'].astype(int))
OpArgMngr.add_workload("bitwise_xor", pool['2x2'].astype(int), pool['2x2'].astype(int))
OpArgMngr.add_workload("bitwise_or", pool['2x2'].astype(int), pool['2x2'].astype(int))
OpArgMngr.add_workload("copysign", pool['2x2'], pool['2x2'])
OpArgMngr.add_workload("arctan2", pool['2x2'], pool['2x2'])
OpArgMngr.add_workload("hypot", pool['2x2'], pool['2x2'])
OpArgMngr.add_workload("ldexp", pool['2x2'].astype(int), pool['2x2'].astype(int))
OpArgMngr.add_workload("logical_and", pool['2x2'], pool['2x2'])
OpArgMngr.add_workload("logical_or", pool['2x2'], pool['2x2'])
OpArgMngr.add_workload("logical_xor", pool['2x2'], pool['2x2'])
OpArgMngr.add_workload("random.uniform", low=0, high=1, size=1)
OpArgMngr.add_workload("random.exponential", scale=2, size=(2,2))
OpArgMngr.add_workload("random.rayleigh", scale=2, size=(2,2))
OpArgMngr.add_workload("random.weibull", a=2, size=(2,2))
OpArgMngr.add_workload("random.pareto", a=2, size=(2,2))
OpArgMngr.add_workload("random.power", a=2, size=(2,2))
OpArgMngr.add_workload("random.logistic", loc=2, scale=2, size=(2,2))
OpArgMngr.add_workload("random.gumbel", loc=2, scale=2, size=(2,2))
OpArgMngr.add_workload("where", pool['2x3'], pool['2x3'], pool['2x1'])
OpArgMngr.add_workload("may_share_memory", pool['2x3'][:0], pool['2x3'][:1])
OpArgMngr.add_workload('squeeze', pool['2x2'], axis=None)
OpArgMngr.add_workload("pad", pool['2x2'], pad_width=((1,2),(1,2)), mode="constant")
OpArgMngr.add_workload("prod", pool['2x2'], axis=1, dtype="float64", keepdims=False)
OpArgMngr.add_workload("around", pool['2x2'], decimals=0)
OpArgMngr.add_workload("round", pool['2x2'], decimals=1)
OpArgMngr.add_workload("repeat", pool['2x2'], repeats=1, axis=None)
OpArgMngr.add_workload("diagflat", pool['2x2'], k=1)
OpArgMngr.add_workload("diag", pool['2x2'], k=1)
OpArgMngr.add_workload("diagonal", pool['2x2x2'], offset=-1, axis1=0, axis2=1)
OpArgMngr.add_workload("diag_indices_from", pool['2x2'])
OpArgMngr.add_workload("bincount", dnp.arange(3, dtype=int), pool['3'], minlength=4)
OpArgMngr.add_workload("percentile", pool['2x2x2'], 80, axis=0, out=pool['2x2'],\
interpolation='midpoint')
OpArgMngr.add_workload("quantile", pool['2x2x2'], 0.8, axis=0, out=pool['2x2'],\
interpolation='midpoint')
OpArgMngr.add_workload("all", pool['2x2x2'], axis=(0, 1),\
out=dnp.array([False, False], dtype=bool), keepdims=False)
OpArgMngr.add_workload("any", pool['2x2x2'], axis=(0, 1),\
out=dnp.array([False, False], dtype=bool), keepdims=False)
OpArgMngr.add_workload("roll", pool["2x2"], 1, axis=0)
OpArgMngr.add_workload("rot90", pool["2x2"], 2)
OpArgMngr.add_workload("column_stack", (pool['3x3'], pool['3x3'], pool['3x3']))
OpArgMngr.add_workload("hstack", (pool['3x3'], pool['3x3'], pool['3x3']))
OpArgMngr.add_workload("triu", pool['3x3'])
OpArgMngr.add_workload("array_split", pool['2x2'], 2, axis=1)
OpArgMngr.add_workload("vsplit", pool['2x2'], 2)
OpArgMngr.add_workload("hsplit", pool['2x2'], 2)
OpArgMngr.add_workload("dsplit", pool['2x2x2'], 2)
OpArgMngr.add_workload("arange", 10)
OpArgMngr.add_workload("concatenate", (pool['1x2'], pool['1x2'], pool['1x2']), axis=0)
OpArgMngr.add_workload("append", pool['2x2'], pool['1x2'], axis=0)
OpArgMngr.add_workload("insert", pool['3x2'], 1, pool['1x1'], axis=0)
OpArgMngr.add_workload("delete", pool['3x2'], 1, axis=0)
OpArgMngr.add_workload("blackman", 12)
OpArgMngr.add_workload("eye", 5)
OpArgMngr.add_workload("hamming", 12)
OpArgMngr.add_workload("hanning", 12)
OpArgMngr.add_workload("linspace", 0, 10, 8, endpoint=False)
OpArgMngr.add_workload("logspace", 2.0, 3.0, num=4, base=2.0, dtype=onp.float32)
OpArgMngr.add_workload("matmul", pool['2x2'], pool['2x2'])
OpArgMngr.add_workload("mean", pool['2x2'], axis=0, keepdims=True)
OpArgMngr.add_workload("random.gamma", 1, size=(2, 3))
OpArgMngr.add_workload("random.normal", 1, size=(2, 3))
OpArgMngr.add_workload("max", pool["2x2"], axis=0, out=pool['2'], keepdims=False)
OpArgMngr.add_workload("min", pool["2x2"], axis=0, out=pool['2'], keepdims=False)
OpArgMngr.add_workload("amax", pool["2x2"], axis=1, out=pool['2'], keepdims=False)
OpArgMngr.add_workload("amin", pool["2x2"], axis=1, out=pool['2'], keepdims=False)
unary_ops = ['negative', 'reciprocal', 'abs', 'sign', 'rint', 'ceil', 'floor',
'bitwise_not', 'trunc', 'fix', 'square', 'sqrt', 'cbrt', 'exp',
'log', 'log10', 'log2', 'log1p', 'expm1', 'logical_not', 'isnan',
'isinf', 'isposinf', 'isneginf', 'isfinite', 'sin', 'cos', 'tan',
'arcsin', 'arccos', 'arctan', 'degrees', 'radians', 'sinh', 'cosh',
'tanh', 'arcsinh', 'arccosh', 'arctanh'] # 'rad2deg', 'deg2rad' cannot run without tvm
for unary_op in unary_ops:
if unary_op == "bitwise_not":
OpArgMngr.add_workload(unary_op, dnp.ones((2, 2), dtype=int))
else:
OpArgMngr.add_workload(unary_op, pool['2x2'])
def benchmark_helper(f, *args, **kwargs):
number = 10000
return timeit.timeit(lambda: f(*args, **kwargs), number=number) / number
def get_op(module, funcname):
funcname = funcname.split(".")
for fname in funcname:
module = getattr(module, fname)
return module
def run_benchmark(packages):
results = {}
for (k, v) in OpArgMngr.args.items():
result = {}
for (name, package) in packages.items():
print('{}.{} running...'.format(name, k))
op = get_op(package["module"], v["funcname"])
args = [package["data"](arg) for arg in v["args"]]
kwargs = {k: package["data"](v) for (k, v) in v["kwargs"].items()}
benchmark = benchmark_helper(op, *args, **kwargs)
result[name] = benchmark
results[k] = result
return results
def show_results(results):
print("{:>24}{:>24}{:>24}".format("name", "package", "time(us)"))
for (specifier, d) in results.items():
for (k, v) in d.items():
print("{:>24}{:>24}{:>24}".format(specifier, k, v * 10 ** 6))
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('ffi_type')
parsed = parser.parse_args()
if parsed.ffi_type == "cython":
os.environ['MXNET_ENABLE_CYTHON'] = '1'
os.environ['MXNET_ENFORCE_CYTHON'] = '1'
elif parsed.ffi_type == "ctypes":
os.environ['MXNET_ENABLE_CYTHON'] = '0'
else:
raise ValueError("unknown ffi_type {}",format(parsed.ffi_type))
os.environ["MXNET_ENGINE_TYPE"] = "NaiveEngine"
import mxnet as mx
import numpy as onp
from mxnet import np as dnp
mx.npx.set_np(dtype=False)
packages = {
"onp": {
"module": onp,
"data": lambda arr: arr.asnumpy() if isinstance(arr, dnp.ndarray) else arr
},
"dnp": {
"module": dnp,
"data": lambda arr: arr
}
}
prepare_workloads()
results = run_benchmark(packages)
show_results(results)
| 2.0625 | 2 |
first-floor.py | levabd/smart-climat-daemon | 0 | 7008 | #!/usr/bin/env python3
import json
import argparse
import re
import datetime
import paramiko
import requests
# cmd ['ssh', 'smart',
# 'mkdir -p /home/levabd/smart-home-temp-humidity-monitor;
# cat - > /home/levabd/smart-home-temp-humidity-monitor/lr.json']
from miio import chuangmi_plug
from btlewrap import available_backends, BluepyBackend
from mitemp_bt.mitemp_bt_poller import MiTempBtPoller, \
MI_TEMPERATURE, MI_HUMIDITY, MI_BATTERY
state = {}
f = open('/home/pi/smart-climat-daemon/ac_state.json')
state = json.load(f)
plug_type = 'chuangmi.plug.m1'
def valid_mitemp_mac(mac, pat=re.compile(r"[0-9A-F]{2}:[0-9A-F]{2}:[0-9A-F]{2}:[0-9A-F]{2}:[0-9A-F]{2}:[0-9A-F]{2}")):
"""Check for valid mac addresses."""
if not pat.match(mac.upper()):
raise argparse.ArgumentTypeError(
'The MAC address "{}" seems to be in the wrong format'.format(mac))
return mac
def turn_on_humidifier():
"""Turn on humidifier on a first floor."""
hummidifier_plug = chuangmi_plug.ChuangmiPlug(
ip='192.168.19.59',
token='14f5b868a58ef4ffaef6fece61c65b16',
start_id=0,
debug=1,
lazy_discover=True,
model=plug_type)
hummidifier_plug.on()
def turn_off_humidifier():
"""Turn off humidifier on a first floor."""
hummidifier_plug = chuangmi_plug.ChuangmiPlug(
ip='192.168.19.59',
token='14f5b868a58ef4ffaef6fece61c65b16',
start_id=0,
debug=1,
lazy_discover=True,
model=plug_type)
hummidifier_plug.off()
def check_if_ac_off():
"""Check if AC is turned off."""
status_url = 'http://smart.levabd.pp.ua:2002/status-bedroom?key=<KEY>'
response = requests.get(status_url, timeout=(20, 30))
if ('address' not in response.json()) and ('name' not in response.json()):
return None
if ((response.json()['name'] == "08bc20043df8") and (response.json()['address'] == "192.168.19.54")):
if response.json()['props']['boot'] == 0:
return True
return False
return None
def check_if_ac_cool():
"""Check if AC is turned for a automate cooling."""
status_url = 'http://smart.levabd.pp.ua:2002/status-bedroom?key=<KEY>'
response = requests.get(status_url, timeout=(20, 30))
if ('address' not in response.json()) or ('name' not in response.json()):
return None
if ((response.json()['name'] == "08bc20043df8") and (response.json()['address'] == "192.168.19.54")):
if not response.json()['props']['boot'] == 1:
return False
if not response.json()['props']['runMode'] == '001':
return False
if not response.json()['props']['wdNumber'] == 25:
return False
if not response.json()['props']['windLevel'] == '001':
return False
return True
return None
def check_if_ac_heat():
"""Check if AC is turned for a automate heating."""
status_url = 'http://smart.levabd.pp.ua:2003/status/key/27fbc501b51b47663e77c46816a'
response = requests.get(status_url, timeout=(20, 30))
if ('address' not in response.json()) and ('name' not in response.json()):
return None
if ((response.json()['name'] == "08bc20043df8") and (response.json()['address'] == "192.168.19.54")):
if not response.json()['props']['boot'] == 1:
return False
if not response.json()['props']['runMode'] == '100':
return False
if not response.json()['props']['wdNumber'] == 23:
return False
if not response.json()['props']['windLevel'] == '001':
return False
return True
return None
def turn_on_heat_ac():
"""Turn on AC on a first floor for a heating if it was not."""
if (state['wasTurnedHeat'] == 1) and not state['triedTurnedHeat'] == 1:
return
heat_url = 'http://smart.levabd.pp.ua:2003/heat/key/27fbc501b51b47663e77c46816a'
ac_heat = check_if_ac_heat()
if ac_heat is not None:
if not ac_heat:
state['triedTurnedHeat'] = 1
state['wasTurnedHeat'] = 0
with open('/home/pi/smart-climat-daemon/ac_state.json', 'w') as file:
json.dump(state, file)
response = requests.get(heat_url)
print(response.json())
else:
if state['triedTurnedHeat'] == 1:
state['triedTurnedOff'] = 0
state['wasTurnedOff'] = 0
state['triedTurnedCool'] = 0
state['wasTurnedCool'] = 0
state['triedTurnedHeat'] = 0
state['wasTurnedHeat'] = 1
with open('/home/pi/smart-climat-daemon/ac_state.json', 'w') as file:
json.dump(state, file)
def turn_on_cool_ac():
"""Turn on AC on a first floor for a cooling if it was not."""
if (state['wasTurnedCool'] == 1) and not state['triedTurnedCool'] == 1:
return
cool_url = 'http://smart.levabd.pp.ua:2003/cool/key/<KEY>'
ac_cool = check_if_ac_cool()
if ac_cool is not None:
if not ac_cool:
state['triedTurnedCool'] = 1
state['wasTurnedCool'] = 0
with open('/home/pi/smart-climat-daemon/ac_state.json', 'w') as file:
json.dump(state, file)
response = requests.get(cool_url)
print(response.json())
else:
if state['triedTurnedCool'] == 1:
state['triedTurnedOff'] = 0
state['wasTurnedOff'] = 0
state['triedTurnedCool'] = 0
state['wasTurnedCool'] = 1
state['triedTurnedHeat'] = 0
state['wasTurnedHeat'] = 0
with open('/home/pi/smart-climat-daemon/ac_state.json', 'w') as file:
json.dump(state, file)
def turn_off_ac():
"""Turn off AC on a first floor."""
if (state['wasTurnedOff'] == 1) and not state['triedTurnedOff'] == 1:
return
turn_url = 'http://smart.levabd.pp.ua:2003/power-off/key/27fbc501b51b47663e77c46816a'
ac_off = check_if_ac_off()
if ac_off is not None:
if not ac_off:
state['triedTurnedOff'] = 1
state['wasTurnedOff'] = 0
with open('/home/pi/smart-climat-daemon/ac_state.json', 'w') as file:
json.dump(state, file)
response = requests.get(turn_url)
print(response.json())
else:
if state['triedTurnedOff'] == 1:
state['triedTurnedOff'] = 0
state['wasTurnedOff'] = 1
state['triedTurnedCool'] = 0
state['wasTurnedCool'] = 0
state['triedTurnedHeat'] = 0
state['wasTurnedHeat'] = 0
with open('/home/pi/smart-climat-daemon/ac_state.json', 'w') as file:
json.dump(state, file)
def record_temp_humid(temperature, humidity):
"""Record temperature and humidity data for web interface monitor"""
dicty = {
"temperature": temperature,
"humidity": humidity
}
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
ssh.connect('smart.levabd.pp.ua', port = 2001, username='levabd', password='<PASSWORD>.')
sftp = ssh.open_sftp()
with sftp.open('smart-home-temp-humidity-monitor/lr.json', 'w') as outfile:
json.dump(dicty, outfile)
ssh.close()
def poll_temp_humidity():
"""Poll data frstate['triedTurnedOff']om the sensor."""
today = datetime.datetime.today()
backend = BluepyBackend
poller = MiTempBtPoller('58:2d:34:38:c0:91', backend)
temperature = poller.parameter_value(MI_TEMPERATURE)
humidity = poller.parameter_value(MI_HUMIDITY)
print("Month: {}".format(today.month))
print("Getting data from Mi Temperature and Humidity Sensor")
print("FW: {}".format(poller.firmware_version()))
print("Name: {}".format(poller.name()))
print("Battery: {}".format(poller.parameter_value(MI_BATTERY)))
print("Temperature: {}".format(poller.parameter_value(MI_TEMPERATURE)))
print("Humidity: {}".format(poller.parameter_value(MI_HUMIDITY)))
return (today, temperature, humidity)
# scan(args):
# """Scan for sensors."""
# backend = _get_backend(args)
# print('Scanning for 10 seconds...')
# devices = mitemp_scanner.scan(backend, 10)
# devices = []
# print('Found {} devices:'.format(len(devices)))
# for device in devices:
# print(' {}'.format(device))
def list_backends(_):
"""List all available backends."""
backends = [b.__name__ for b in available_backends()]
print('\n'.join(backends))
def main():
"""Main function."""
# check_if_ac_cool()
(today, temperature, humidity) = poll_temp_humidity()
# Record temperature and humidity for monitor
record_temp_humid(temperature, humidity)
try:
if (humidity > 49) and (today.month < 10) and (today.month > 4):
turn_off_humidifier()
if (humidity < 31) and (today.month < 10) and (today.month > 4):
turn_on_humidifier()
if (humidity < 31) and ((today.month > 9) or (today.month < 5)):
turn_on_humidifier()
if (humidity > 49) and ((today.month > 9) or (today.month < 5)):
turn_off_humidifier()
# Prevent Sleep of Xiaomi Smart Plug
hummidifier_plug = chuangmi_plug.ChuangmiPlug(
ip='192.168.19.59',
token='14f5b868a58ef4ffaef6fece61c65b16',
start_id=0,
debug=0,
lazy_discover=True,
model='chuangmi.plug.m1')
print(hummidifier_plug.status())
except Exception:
print("Can not connect to humidifier")
# clear env at night
if today.hour == 4:
state['triedTurnedOff'] = 0
state['wasTurnedOff'] = 0
state['triedTurnedCool'] = 0
state['wasTurnedCool'] = 0
state['triedTurnedHeat'] = 0
state['wasTurnedHeat'] = 0
with open('/home/pi/smart-climat-daemon/ac_state.json', 'w') as file:
json.dump(state, file)
if (today.hour > -1) and (today.hour < 7):
turn_off_ac()
if (temperature > 26.4) and (today.month < 6) and (today.month > 4) and (today.hour < 24) and (today.hour > 10):
turn_on_cool_ac()
if (temperature > 26.4) and (today.month < 10) and (today.month > 8) and (today.hour < 24) and (today.hour > 10):
turn_on_cool_ac()
if (temperature > 27.3) and (today.month < 9) and (today.month > 5) and (today.hour < 24) and (today.hour > 10):
turn_on_cool_ac()
if (temperature < 23.5) and (today.month < 10) and (today.month > 4):
turn_off_ac()
# _if (temperature < 20) and ((today.month > 9) or (today.month < 5)) and (today.hour < 24) and (today.hour > 9):
# turn_on_heat_ac()
if (temperature > 22) and ((today.month > 9) or (today.month < 5)):
turn_off_ac()
if __name__ == '__main__':
main()
| 2.296875 | 2 |
reservior_classification.py | Optimist-Prime/QML-for-MNIST-classification | 1 | 7009 | <reponame>Optimist-Prime/QML-for-MNIST-classification
import pickle
from sklearn.neural_network import MLPClassifier
train = pickle.load(open('train_pca_reservoir_output_200samples.pickle','rb'))
test = pickle.load(open('test_pca_reservoir_output_50samples.pickle','rb'))
train_num = 200
test_num = 50
mlp = MLPClassifier(hidden_layer_sizes=(2000,), max_iter=100, alpha=1e-5,
solver='sgd', verbose=10, tol=1e-4, random_state=1,
learning_rate_init=.1, batch_size= 20)
mlp.fit(train[0], train[1][:train_num])
print("Training set score: %f" % mlp.score(train[0], train[1][:train_num]))
print("Test set score: %f" % mlp.score(test[0], test[1][:test_num]))
| 2.765625 | 3 |
util.py | delmarrerikaine/LPG-PCA | 1 | 7010 | <filename>util.py
import numpy as np
import pandas as pd
from skimage import io
import skimage.measure as measure
import os
from lpg_pca_impl import denoise
def getNoisedImage(originalImage, variance):
# return random_noise(originalImage, mode='gaussian', var=variance)
np.random.seed(42)
noise = np.random.normal(size=originalImage.shape)
noise = noise/np.sqrt(np.power(noise, 2).mean())
noisedImage = originalImage + variance*noise
return noisedImage
def clip(img):
img = np.minimum(np.ones(img.shape), img)
img = np.maximum(np.zeros(img.shape), img)
return img
def readImg(path):
return io.imread(path, as_gray=True).astype('float64')/255.0
def showImg(img, name):
print(name)
img = clip(img)
io.imshow((img*255.0).astype('uint8'))
def saveImg(img, path):
img = clip(img)
io.imsave(path, (img*255.0).astype('uint8'))
def compare_psnr(img1, img2):
return measure.compare_psnr(img1, img2)
def compare_ssim(img1, img2):
return measure.compare_ssim(img1, img2)
def generate_images(img_name='mri'):
experiments_folder = 'experiments'
noise_variances = [10, 20, 30, 40]
for noise_variance in noise_variances:
corrected_noise_variance = noise_variance / 255.0
original_img = readImg(os.path.join('images', img_name + '.png'))
noised_img = getNoisedImage(original_img, corrected_noise_variance)
noised_file_name = img_name + '_noised_' + str(noise_variance) + '.png'
saveImg(noised_img, os.path.join(experiments_folder, noised_file_name))
print(noised_file_name + ' started.')
denoised_img = denoise(noised_img, noise_variance)
denoised_file_name = img_name + '_denoised_' + str(noise_variance) + '.png'
saveImg(denoised_img, os.path.join(experiments_folder, denoised_file_name))
print(denoised_file_name + ' finished.')
print("noised PSNR: " + str(compare_psnr(original_img, noised_img)) + ", SSIM: " + str(compare_ssim(original_img, noised_img)))
print("denoised PSNR: " + str(compare_psnr(original_img, denoised_img)) + ", SSIM: " + str(compare_ssim(original_img, denoised_img)))
def generate_latex_tables():
df = pd.read_csv('data.csv')
df = df.round(2)
image_texts = np.array([])
temp_directory = os.path.join(os.path.dirname(__file__), 'temp')
if not os.path.exists(temp_directory):
os.makedirs(temp_directory)
for image_name in list(set(df['image_name'])):
image_df = df[df['image_name'] == image_name]
image_df['denoise_lpg_pca'] = image_df['denoise_psnr_lpg_pca'].map(str) + '(' + image_df['denoise_ssim_lpg_pca'].map(str) + ')'
image_df['denoise_mf'] = image_df['denoise_psnr_mf'].map(str) + '(' + image_df['denoise_ssim_mf'].map(str) + ')'
image_df['denoise_nlm'] = image_df['denoise_psnr_nlm'].map(str) + '(' + image_df['denoise_ssim_nlm'].map(str) + ')'
image_df['denoise_bm3d'] = image_df['denoise_psnr_bm3d'].map(str) + '(' + image_df['denoise_ssim_bm3d'].map(str) + ')'
image_df = image_df[['sigma', 'denoise_lpg_pca', 'denoise_mf', 'denoise_nlm', 'denoise_bm3d']]
image_df['sigma'] = image_df['sigma'].map(int)
image_df.columns = ['sigma', 'LPG-PCA', 'MF', "NLM", 'BM3D']
path = os.path.join(temp_directory, image_name + '.tex')
image_df.to_latex(path, index=False, column_format='lrrrr')
with open(path, 'r') as file:
image_text = file.read()
image_text = image_text.replace(' ', '').replace(r'\toprule', r'\toprule &&' + image_name + r'\\ \midrule')
image_text = r'\noindent\begin{minipage}{.5\linewidth}' + '\n' + image_text + '\n' + r'\end{minipage}'
image_text = image_text.replace('\n\n', '\n').replace('sigma&', '$\\sigma$&')
image_texts = np.append(image_texts, image_text)
os.remove(path)
result = '\n'.join(image_texts)
filename = 'tables.tex'
with open(filename, "w+") as file:
file.write(result)
if(len(os.listdir(temp_directory))) == 0:
os.rmdir(temp_directory)
| 2.5 | 2 |
ui/ui.py | kringen/wingnut | 0 | 7011 | <filename>ui/ui.py
import redis
from rq import Queue, Connection
from flask import Flask, render_template, Blueprint, jsonify, request
import tasks
import rq_dashboard
from wingnut import Wingnut
app = Flask(
__name__,
template_folder="./templates",
static_folder="./static",
)
app.config.from_object(rq_dashboard.default_settings)
app.register_blueprint(rq_dashboard.blueprint, url_prefix="/rq")
@app.route("/", methods=["GET"])
def home():
return render_template("main/home.html")
@app.route("/tasks", methods=["POST"])
def run_task():
task_type = request.form["type"]
with Connection(redis.from_url("redis://localhost:6379")):
q = Queue()
task = q.enqueue(tasks.create_task, task_type)
response_object = {
"status": "success",
"data": {
"task_id": task.get_id()
}
}
return jsonify(response_object), 202
@app.route("/mode", methods=["POST"])
def set_mode():
task_type = request.form["type"]
with Connection(redis.from_url("redis://localhost:6379")):
q = Queue("mode")
task = q.enqueue(tasks.set_mode, task_type)
response_object = {
"status": "success",
"data": {
"task_id": task.get_id()
}
}
return jsonify(response_object), 202
@app.route("/tasks/<task_id>", methods=["GET"])
def get_status(task_id):
with Connection(redis.from_url("redis://localhost:6379")):
q = Queue()
task = q.fetch_job(task_id)
if task:
response_object = {
"status": "success",
"data": {
"task_id": task.get_id(),
"task_status": task.get_status(),
"task_result": task.result,
},
}
else:
response_object = {"status": "error"}
return jsonify(response_object)
@app.route("/configuration", methods=["GET"])
def get_configuration():
wingnut = Wingnut()
response_object = {
"status": "success",
"data": {
"servoPin": wingnut.servoPin,
"leftMotorPin1": wingnut.leftMotorPin1,
"leftMotorPin1": wingnut.leftMotorPin2,
"leftMotorEnablePin": wingnut.leftMotorEnablePin,
"rightMotorPin1": wingnut.rightMotorPin1,
"rightMotorPin1": wingnut.rightMotorPin2,
"rightMotorEnablePin": wingnut.rightMotorEnablePin,
"sonarTriggerPin": wingnut.sonarTriggerPin,
"sonarEchoPin": wingnut.sonarEchoPin
}
}
return jsonify(response_object)
@app.route("/diagnostics", methods=["GET"])
def get_diagnostics():
r = redis.Redis()
diagnostics = {}
diagnostics["power_level"] = r.get("power_level").decode("utf-8")
diagnostics["temperature"] = r.get("temperature").decode("utf-8")
diagnostics["free_memory_mb"] = r.get("free_memory_mb").decode("utf-8")
diagnostics["free_disk_space"] = r.get("free_disk_space").decode("utf-8")
response_object = {
"status": "success",
"data": {
"diagnostics": diagnostics
}
}
return jsonify(response_object)
if __name__ == "__main__":
app.run(host="0.0.0.0",debug=1)
| 2.359375 | 2 |
pytaboola/__init__.py | Openmail/pytaboola | 0 | 7012 | <reponame>Openmail/pytaboola<filename>pytaboola/__init__.py<gh_stars>0
from pytaboola.client import TaboolaClient | 1.09375 | 1 |
omkar/code.py | omi28/ga-learner-dst-repo | 0 | 7013 | # --------------
# Importing header files
import numpy as np
import warnings
warnings.filterwarnings('ignore')
new_record=[[50, 9, 4, 1, 0, 0, 40, 0]]
#New record
#Reading file
data = np.genfromtxt(path, delimiter=",", skip_header=1)
data.shape
cenus=np.concatenate((new_record,data),axis=0)
cenus.shape
print(cenus)
age=cenus[:,0]
max_age=age.max()
print(max_age)
min_age=age.min()
mean_age=np.mean(age)
age_std=np.std(age)
race=cenus[:,2]
print(race)
race_0=(race==0)
len_0=len(race[race_0])
print(len_0)
race_1=(race==1)
len_1=len(race[race_1])
race_2=(race==2)
race_3=(race==3)
race_4=(race==4)
len_2=len(race[race_2])
len_3=len(race[race_3])
len_4=len(race[race_4])
minority_race=3
print(minority_race)
senior_citizen=(age>60)
working_hour_sum=sum(cenus[:,6][senior_citizen])
print(working_hour_sum)
senior_citizen_len=len(age[senior_citizen])
avg_working_hours=working_hour_sum/senior_citizen_len
avg_working_hours=round(avg_working_hours,2)
education_num=cenus[:,1]
print(education_num)
high=education_num>10
#high=education_num[high]
print(high)
low=education_num<=10
#low=education_num[low]
print(low)
INCOME=cenus[:,7][high]
print(INCOME)
print(np.mean(INCOME))
avg_pay_high=round(np.mean(INCOME),2)
print(avg_pay_high)
LOW_AVG=cenus[:,7][low]
avg_pay_low=round(np.mean(LOW_AVG),2)
print(avg_pay_low)
#Code starts here
| 2.828125 | 3 |
test/present.py | jchampio/apache-websocket | 8 | 7014 | <reponame>jchampio/apache-websocket<filename>test/present.py
#! /usr/bin/env python
#
# Presents the results of an Autobahn TestSuite run in TAP format.
#
# Copyright 2015 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from distutils.version import StrictVersion
import json
import os.path
import sys
import textwrap
import yamlish
def filter_report(report):
"""Filters a test report dict down to only the interesting keys."""
INTERESTING_KEYS = [
'behavior',
'behaviorClose',
'expected',
'received',
'expectedClose',
'remoteCloseCode'
]
return { key: report[key] for key in INTERESTING_KEYS }
def prepare_description(report):
"""Constructs a description from a test report."""
raw = report['description']
# Wrap to at most 80 characters.
wrapped = textwrap.wrap(raw, 80)
description = wrapped[0]
if len(wrapped) > 1:
# If the text is longer than one line, add an ellipsis.
description += '...'
return description
#
# MAIN
#
# Read the index.
results_dir = 'test-results'
with open(os.path.join(results_dir, 'index.json'), 'r') as index_file:
index = json.load(index_file)['AutobahnPython']
# Sort the tests by numeric ID so we print them in a sane order.
test_ids = list(index.keys())
test_ids.sort(key=StrictVersion)
# Print the TAP header.
print('TAP version 13')
print('1..{0!s}'.format(len(test_ids)))
count = 0
skipped_count = 0
failed_count = 0
for test_id in test_ids:
count += 1
passed = True
skipped = False
report = None
result = index[test_id]
# Try to get additional information from this test's report file.
try:
path = os.path.join(results_dir, result['reportfile'])
with open(path, 'r') as f:
report = json.load(f)
description = prepare_description(report)
except Exception as e:
description = '[could not load report file: {0!s}]'.format(e)
test_result = result['behavior']
close_result = result['behaviorClose']
# Interpret the result for this test.
if test_result != 'OK' and test_result != 'INFORMATIONAL':
if test_result == 'UNIMPLEMENTED':
skipped = True
else:
passed = False
elif close_result != 'OK' and close_result != 'INFORMATIONAL':
passed = False
# Print the TAP result.
print(u'{0} {1} - [{2}] {3}{4}'.format('ok' if passed else 'not ok',
count,
test_id,
description,
' # SKIP unimplemented' if skipped
else ''))
# Print a YAMLish diagnostic for failed tests.
if report and not passed:
output = filter_report(report)
diagnostic = yamlish.dumps(output)
for line in diagnostic.splitlines():
print(' ' + line)
if not passed:
failed_count += 1
if skipped:
skipped_count += 1
# Print a final result.
print('# Autobahn|TestSuite {0}'.format('PASSED' if not failed_count else 'FAILED'))
print('# total {0}'.format(count))
print('# passed {0}'.format(count - failed_count - skipped_count))
print('# skipped {0}'.format(skipped_count))
print('# failed {0}'.format(failed_count))
exit(0 if not failed_count else 1)
| 2.296875 | 2 |
softwarecollections/scls/migrations/0004_other_repos_default_values.py | WEBZCC/softwarecollections | 39 | 7015 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('scls', '0003_other_repos'),
]
operations = [
migrations.AlterField(
model_name='otherrepo',
name='arch',
field=models.CharField(default='', blank=True, verbose_name='Architecture', max_length=20),
),
migrations.AlterField(
model_name='otherrepo',
name='command',
field=models.TextField(default='', blank=True, verbose_name='Command'),
),
migrations.AlterField(
model_name='otherrepo',
name='icon',
field=models.CharField(default='', blank=True, verbose_name='Icon', choices=[('centos', 'centos'), ('epel', 'epel'), ('fedora', 'fedora'), ('rhel', 'rhel')], max_length=20),
),
migrations.AlterField(
model_name='otherrepo',
name='version',
field=models.CharField(default='', blank=True, verbose_name='Distribution version', max_length=20),
),
]
| 1.539063 | 2 |
python/Excel/enumerateCells.py | davidgjy/arch-lib | 0 | 7016 | <filename>python/Excel/enumerateCells.py<gh_stars>0
import openpyxl
wb = openpyxl.load_workbook('example.xlsx')
sheet = wb.get_sheet_by_name('Sheet1')
rows = sheet.get_highest_row()
cols = sheet.get_highest_column()
for i in range(1, rows + 1):
for j in range(1, cols + 1):
print('%s: %s' % (sheet.cell(row=i, column=j).coordinate, sheet.cell(row=i, column=j).value))
print('---------------------------------------------')
| 3.34375 | 3 |
plugins/polio/migrations/0029_campaign_country.py | BLSQ/iaso-copy | 29 | 7017 | <filename>plugins/polio/migrations/0029_campaign_country.py
# Generated by Django 3.1.13 on 2021-10-04 11:44
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
("iaso", "0107_auto_20211001_1845"),
("polio", "0028_remove_campaign_budget_first_draft_submitted_at"),
]
operations = [
migrations.AddField(
model_name="campaign",
name="country",
field=models.ForeignKey(
blank=True,
help_text="Country for campaign, set automatically from initial_org_unit",
null=True,
on_delete=django.db.models.deletion.SET_NULL,
related_name="campaigns_country",
to="iaso.orgunit",
),
),
]
| 1.617188 | 2 |
CurrencyExchange.py | aarana14/CurrencyExchange | 0 | 7018 | #import external libraries used in code
import requests, json
import pycountry
print('Currency Exchange')
currencies = []
def findCurrency():
#Finds all avaliable currencies
allCurrency = (list(pycountry.currencies))
for x in allCurrency:
y = str(x)
y = y[18:21]
#Adds the value of their ISO to the "currencies" list
currencies.append(y)
#Organizes all values in "currency" list
currecyDisplay = ''
inline = 0
for cs in currencies:
currecyDisplay += cs + ' | '
inline += 1
#Allows up to 26 ISOs to be in one line
if inline >= 26:
currecyDisplay += '\n '
inline = 0
#Displays all currency ISOs to user
print('Avaliable Currencies:\n',currecyDisplay)
def help():
#Ask user if they need help
questions = input('Type ? for help or Enter to continue: ')
#If user inputs "?" run help procedure
if questions == '?':
#Display information order
print('--------\nCurrency Exchange Help\nISO currency codes are three-letter alphabetic codes that represent the various currencies\n\nCurrency ISO:\nCurrency Name:\n--------')
#Obtains information of all currencies
allCurrency = (list(pycountry.currencies))
#For each currency obtain the ISO and the name of currency
#Display ISO and Data
for x in allCurrency:
y = str(x)
w = y[18:21]
n = int(y.index(',', y.index(',') + 1))
z = y[30:n-1]
print(w)
print(z + '\n')
print('--------\n')
#Else user does not input "?" continue program
else:
pass
def userInput():
#Program try asking user for data input
try:
fromCurrency = input('From (ISO): ').upper()
toCurrency = input('To (ISO): ').upper()
currencyAmount = input('Amount: ')
currencyAmount = int(currencyAmount.replace(',', ''))
#If data inputed is not the correct type of data inform user
except ValueError:
print('Amount Is A Number Value')
#Return inputed data
return currencyAmount, fromCurrency, toCurrency
def checkInfo(fromC, toC, currencyA, check):
#"validCurrency" value increses as data inputed if verified
validCurrency = 0
#Check if inputed ISO is valid
#If values are valid the vlue of "validCurrency" is increased
for givenCurrencies in currencies:
if fromC == givenCurrencies:
validCurrency += 1
for givenCurrencies in currencies:
if toC == givenCurrencies:
validCurrency += 1
#Check if "validCurrency" meets necessary verification value
#Check if "validCurrency" is not 2 (Data is not valid) or inputed amount data is not the correct value
if validCurrency != 2 or type(currencyA) != int:
#Let user know data is invalid
print('Information Invalid\n')
#Ask user if they need help
help()
#Reset "validCurrency"
validCurrency = 0
#Set "check" as False
checks = False
#If type of data is correct and valid "check" is set to True
else:
checks = True
return fromC, toC, currencyA, checks
def dataInput():
#Data has not been checked yet, therefore "check" is False
check = False
#While the data is not valid or not checked repeat data input and data check
while check == False:
currencyAmount, fromCurrency, toCurrency = userInput()
fromC, toC, currencyA, check = checkInfo(fromCurrency, toCurrency, currencyAmount, check)
#Once data is valid and checked return values
return fromC, toC, currencyA
def userData():
#No data if the information provided is correct
correctInfo = ''
#While the user does not approve of data, repeat data input and data check
while correctInfo != 'y':
fromC, toC, currencyA = dataInput()
#Display data user has inputed after being checked and validated
print('\nFrom:',fromC)
print('To:',toC)
print('Amount:', currencyA)
#Ask user if the data provided is correct
correctInfo = input('Is the information correct (y/n)?: ').lower()
print('')
help()
#Once data is approved by user, return values
return currencyA, fromC, toC
def realTimeRate(from_currency, to_currency):
#API key provided by Alpha Vanatage
api_key = "<KEY>"
#Define "url" where data is stored
#"url" varies from user selected data
url = ('https://www.alphavantage.co/query?function=CURRENCY_EXCHANGE_RATE&from_currency=%s&to_currency=%s&apikey=%s' % (from_currency, to_currency, api_key))
#Get response from reqest of "url"
req = requests.get(url)
#Obtain json format and set data for python to read
#"Result" has nested dictionaries
result = req.json()
#Display exchange rate information to user
print("Realtime Currency Exchange Rate for",
result["Realtime Currency Exchange Rate"]
["2. From_Currency Name"], "to",
result["Realtime Currency Exchange Rate"]
["4. To_Currency Name"], "is",
result["Realtime Currency Exchange Rate"]
['5. Exchange Rate'], to_currency)
#Return the value of exchange
return float(result["Realtime Currency Exchange Rate"]
['5. Exchange Rate'])
def completeExchange(rate, cAmount, fCurrency, tCurrency):
#Total of the "to" currency is the rate times the amount of the "from" currency
total = rate * cAmount
end = ' '
#Maintain program Running until user has inputed the Enter key
while end == ' ':
print('\n%s %s is %.2f %s' % (cAmount, fCurrency, total, tCurrency))
end = input('Press Enter To Close')
if __name__ == "__main__":
findCurrency()
help()
currencyAmount, fromCurrency, toCurrency = userData()
rate = realTimeRate(fromCurrency, toCurrency)
completeExchange(rate, currencyAmount, fromCurrency, toCurrency)
| 3.828125 | 4 |
atcoder/corp/codethxfes2014a_e.py | knuu/competitive-programming | 1 | 7019 | <gh_stars>1-10
r, c, m = map(int, input().split())
n = int(input())
op = [list(map(lambda x: int(x) - 1, input().split())) for _ in range(n)]
board = [[0 for _ in range(c)] for _ in range(r)]
for ra, rb, ca, cb in op:
for j in range(ra, rb + 1):
for k in range(ca, cb + 1):
board[j][k] += 1
cnt = 0
for i in range(r):
for j in range(c):
board[i][j] %= 4
if board[i][j] == 0:
cnt += 1
for i in range(n):
ra, rb, ca, cb = op[i]
cnti = cnt
for j in range(ra, rb + 1):
for k in range(ca, cb + 1):
if board[j][k] == 0:
cnti -= 1
elif board[j][k] == 1:
cnti += 1
if cnti == m:
print(i + 1)
| 2.59375 | 3 |
scripts/analyse_bse.py | QU-XIAO/yambopy | 21 | 7020 | # Copyright (C) 2018 <NAME>, <NAME>
# All rights reserved.
#
# This file is part of yambopy
#
from __future__ import print_function
from builtins import range
from yambopy import *
from qepy import *
import json
import matplotlib.pyplot as plt
import numpy as np
import sys
import argparse
import operator
def analyse_bse( folder, var, exc_n, exc_int, exc_degen, exc_max_E, pack ):
"""
Using ypp, you can study the convergence of BSE calculations in 2 ways:
Create a .png of all absorption spectra relevant to the variable you study
Look at the eigenvalues of the first n "bright" excitons (given a threshold intensity)
The script reads from <folder> all results from <variable> calculations for processing.
The resulting pictures and data files are saved in the ./analyse_bse/ folder.
By default, the graphical interface is deactivated (assuming you run on a cluster because of ypp calls).
See line 2 inside the script.
"""
# Packing results (o-* files) from the calculations into yambopy-friendly .json files
if pack: # True by default, False if -np used
print('Packing ...')
pack_files_in_folder(folder,mask=var)
pack_files_in_folder(folder,mask='reference')
print('Packing done.')
else:
print('Packing skipped.')
# importing data from .json files in <folder>
print('Importing...')
data = YamboAnalyser(folder)
# extract data according to relevant var
invars = data.get_inputfiles_tag(var)
# Get only files related to the convergence study of the variable,
# ordered to have a smooth plot
keys=[]
sorted_invars = sorted(list(invars.items()), key=operator.itemgetter(1))
for i in range(0,len(sorted_invars)):
key=sorted_invars[i][0]
if key.startswith(var) or key=='reference.json':
keys.append(key)
print('Files detected: ',keys)
# unit of the input value
unit = invars[keys[0]]['variables'][var][1]
######################
# Output-file filename
######################
os.system('mkdir -p analyse_bse')
outname = './analyse_%s/%s_%s'%(folder,folder,var)
# Array that will contain the output
excitons = []
# Loop over all calculations
for key in keys:
jobname=key.replace('.json','')
print(jobname)
# input value
# BndsRn__ is a special case
if var.startswith('BndsRnX'):
# format : [1, nband, ...]
inp = invars[key]['variables'][var][0][1]
else:
inp = invars[key]['variables'][var][0]
print('Preparing JSON file. Calling ypp if necessary.')
### Creating the 'absorptionspectra.json' file
# It will contain the exciton energies
y = YamboOut(folder=folder,save_folder=folder)
# Args : name of job, SAVE folder path, folder where job was run path
a = YamboBSEAbsorptionSpectra(jobname,path=folder)
# Get excitons values (runs ypp once)
a.get_excitons(min_intensity=exc_int,max_energy=exc_max_E,Degen_Step=exc_degen)
# Write .json file with spectra and eigenenergies
a.write_json(filename=outname)
### Loading data from .json file
f = open(outname+'.json')
data = json.load(f)
f.close()
print('JSON file prepared and loaded.')
### Plotting the absorption spectra
# BSE spectra
plt.plot(data['E/ev[1]'], data['EPS-Im[2]'],label=jobname,lw=2)
# # Axes : lines for exciton energies (disabled, would make a mess)
# for n,exciton in enumerate(data['excitons']):
# plt.axvline(exciton['energy'])
### Creating array with exciton values (according to settings)
l = [inp]
for n,exciton in enumerate(data['excitons']):
if n <= exc_n-1:
l.append(exciton['energy'])
excitons.append(l)
if text:
header = 'Columns : '+var+' (in '+unit+') and "bright" excitons eigenenergies in order.'
print(excitons)
np.savetxt(outname+'.dat',excitons,header=header)
#np.savetxt(outname,excitons,header=header,fmt='%1f')
print(outname+'.dat')
else:
print('-nt flag : no text produced.')
if draw:
plt.xlabel('$\omega$ (eV)')
plt.gca().yaxis.set_major_locator(plt.NullLocator())
plt.legend()
#plt.draw()
#plt.show()
plt.savefig(outname+'.png', bbox_inches='tight')
print(outname+'.png')
else:
print('-nd flag : no plot produced.')
print('Done.')
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Study convergence on BS calculations using ypp calls.')
pa = parser.add_argument
pa('folder', help='Folder containing SAVE and convergence runs.' )
pa('variable', help='Variable tested (e.g. FFTGvecs)' )
pa('-ne','--numbexc', help='Number of excitons to read beyond threshold', default=2,type=int)
pa('-ie','--intexc', help='Minimum intensity for excitons to be considered bright', default=0.05,type=float)
pa('-de','--degenexc', help='Energy threshold under which different peaks are merged (eV)', default=0.01,type=float)
pa('-me','--maxexc', help='Energy threshold after which excitons are not read anymore (eV)', default=8.0,type=float)
pa('-np','--nopack', help='Skips packing o- files into .json files', action='store_false')
pa('-nt','--notext', help='Skips writing the .dat file', action='store_false')
pa('-nd','--nodraw', help='Skips drawing (plotting) the abs spectra', action='store_false')
if len(sys.argv)==1:
parser.print_help()
sys.exit(1)
args = parser.parse_args()
folder = args.folder
var = args.variable
exc_n = args.numbexc
exc_int = args.intexc
exc_degen = args.degenexc
exc_max_E = args.maxexc
pack = args.nopack
text = args.text
draw = args.draw
analyse_bse( folder, var, exc_n, exc_int, exc_degen, exc_max_E, pack=pack, text=text, draw=draw )
| 2.859375 | 3 |
halmodule.py | richteer/pyfatafl | 0 | 7021 | <reponame>richteer/pyfatafl<gh_stars>0
from module import XMPPModule
import halutils
import pyfatafl
class Game():
self.players = []
self.xmpp = None
self.b = None
self.turn = ""
self.mod = None
def __init__(self, mod, p1, p2):
self.players = [p1, p2]
self.mod = mod
self.xmpp = mod.xmpp
self.xmpp.sendMsg(p2, "You have been challenged to play Hnefatafl by {}, reply with '!hnefatafl accept' to begin!".format(p1))
def begin():
# Send initial board state
self.b = hnefatafl.Board()
self.turn = False # For now, make the challenger be first
self._sendBoard()
def _sendBoard(self)
for i in players:
self.xmpp.sendMsg(i, self.b.getPtBoard() + "\n\n" + "It is '{}''s ({}) turn".format(self.players[self.turn]), "white" if self.turn else "black")
def msg(player, string):
if player != self.players[self.turn]:
self.xmpp.sendMsg(player, "Sorry, it is not your turn!")
m = hnefatafl.Move()
string = "{} {}".format("w" if self.turn else "b", string)
try:
m.parse(string, self.b)
except:
self.xmpp.sendMsg(player, "Invalid move format, see !help hnefatafl")
try:
self.b.move(m)
self._sendBoard()
except Exception as e: # TODO: Have been errors
self.xmpp.sendMsg(player, str(e))
if self.over:
for i in self.players:
self.xmpp.sendMsg(i, "Game over! {} wins!".format(self.b.over))
del self.mod.sessions[i]
# Commented to avoid loading before its ready
class Hnefatafl(XMPPModule):
sessions = {}
def recvMsg(self, msg):
cmd, args = halutils.splitArgList(msg)
if cmd == "!hnefatafl":
if args[0] == "challenge":
if len(args) != 2:
self.xmpp.reply(msg, "Need to the JID of a target")
return
elif arg[1] == msg['body'].bare:
self.xmpp.reply(msg, "You can't challenge yourself...")
# TODO: Validate JID here
g = Game(self, msg['from'].bare, args[1])
self.sessions[msg['from']].bare = g
self.sessions[args[1]] = g
self.xmpp.reply(msg, "Challenge sent!")
elif args[0] == "accept":
if msg['from'].bare not in self.sessions:
self.xmpp.reply(msg, "You have not been challenged!")
return
self.sessions[msg['from'].bare].begin()
elif args[0] == "surrender":
if msg['from'].bare not in self.sessions:
self.xmpp.reply(msg, "You aren't currently in a session")
return
for p in [p for p in self.sessions[msg['from'].bare].players]:
del self.sessions[p]
elif msg['from'].bare in sessions:
self.sessions[msg['from'].bare].msg(msg['from'].bare, msg['body'])
def help(self, string):
if string in ["!hnefatafl", "hnefatafl"]:
return '''
usage: !hnefatafl <command> [arg]
Commands:
challenge <jid> - Send a challenge to JID
accept - Accept a challenge from JID, and begin game
surrender - Surrender the game
'''
return '''
Hnefatafl by XMPP! Play a game against someone through this bot.
Features:
!hnefatafl - Command to challenge, accept, and surrender games
Note: This module will ignore any MUC messages, or other indirect messages
Another Note: This will likely be unplayable if not using a monospace font :)
'''
| 2.71875 | 3 |
tools/acetz.py | arkhipenko/AceTime | 1 | 7022 | <gh_stars>1-10
from typing import cast, Optional
from datetime import datetime, tzinfo, timedelta
from zonedbpy import zone_infos
from zone_processor.zone_specifier import ZoneSpecifier
from zone_processor.inline_zone_info import ZoneInfo
__version__ = '1.1'
class acetz(tzinfo):
"""An implementation of datetime.tzinfo using the ZoneSpecifier class
from AceTime/tools.
"""
def __init__(self, zone_info: ZoneInfo):
self.zone_info = zone_info
self.zs = ZoneSpecifier(zone_info, use_python_transition=True)
def utcoffset(self, dt: Optional[datetime]) -> timedelta:
assert dt
self.zs.init_for_year(dt.year)
offset_info = self.zs.get_timezone_info_for_datetime(dt)
if not offset_info:
raise Exception(
f'Unknown timezone info for '
f'{dt.year:04}-{dt.month:02}-{dt.day:02} '
f'{dt.hour:02}:{dt.minute:02}:{dt.second:02}'
)
return timedelta(seconds=offset_info.total_offset)
def dst(self, dt: Optional[datetime]) -> timedelta:
assert dt
self.zs.init_for_year(dt.year)
offset_info = self.zs.get_timezone_info_for_datetime(dt)
if not offset_info:
raise Exception(
f'Unknown timezone info for '
f'{dt.year:04}-{dt.month:02}-{dt.day:02} '
f'{dt.hour:02}:{dt.minute:02}:{dt.second:02}'
)
return timedelta(seconds=offset_info.dst_offset)
def tzname(self, dt: Optional[datetime]) -> str:
assert dt
self.zs.init_for_year(dt.year)
offset_info = self.zs.get_timezone_info_for_datetime(dt)
if not offset_info:
raise Exception(
f'Unknown timezone info for '
f'{dt.year:04}-{dt.month:02}-{dt.day:02} '
f'{dt.hour:02}:{dt.minute:02}:{dt.second:02}'
)
return offset_info.abbrev
def zone_specifier(self) -> ZoneSpecifier:
return self.zs
def gettz(zone_name: str) -> acetz:
zone_info = cast(ZoneInfo, zone_infos.ZONE_INFO_MAP.get(zone_name))
if not zone_info:
raise Exception(f"Zone '{zone_name}' not found")
return acetz(zone_info)
| 2.796875 | 3 |
z2/part2/interactive/jm/random_fuzzy_arrows_1/554539540.py | kozakusek/ipp-2020-testy | 1 | 7023 | from part1 import (
gamma_board,
gamma_busy_fields,
gamma_delete,
gamma_free_fields,
gamma_golden_move,
gamma_golden_possible,
gamma_move,
gamma_new,
)
"""
scenario: test_random_actions
uuid: 554539540
"""
"""
random actions, total chaos
"""
board = gamma_new(6, 8, 3, 17)
assert board is not None
assert gamma_move(board, 1, 7, 4) == 0
assert gamma_move(board, 1, 4, 3) == 1
assert gamma_busy_fields(board, 1) == 1
assert gamma_move(board, 2, 5, 1) == 1
assert gamma_move(board, 2, 1, 7) == 1
assert gamma_busy_fields(board, 2) == 2
assert gamma_golden_possible(board, 2) == 1
assert gamma_move(board, 3, 1, 0) == 1
assert gamma_golden_move(board, 3, 3, 4) == 0
assert gamma_busy_fields(board, 2) == 2
assert gamma_move(board, 3, 1, 3) == 1
assert gamma_move(board, 1, 3, 5) == 1
assert gamma_move(board, 1, 2, 3) == 1
assert gamma_golden_possible(board, 1) == 1
assert gamma_move(board, 2, 1, 0) == 0
assert gamma_move(board, 3, 2, 2) == 1
assert gamma_golden_possible(board, 3) == 1
assert gamma_move(board, 1, 0, 2) == 1
assert gamma_move(board, 1, 1, 1) == 1
assert gamma_move(board, 2, 5, 4) == 1
assert gamma_move(board, 3, 0, 4) == 1
assert gamma_golden_possible(board, 3) == 1
assert gamma_move(board, 1, 1, 2) == 1
assert gamma_move(board, 2, 1, 4) == 1
assert gamma_move(board, 2, 1, 6) == 1
assert gamma_move(board, 3, 1, 2) == 0
assert gamma_move(board, 1, 0, 3) == 1
assert gamma_move(board, 1, 4, 2) == 1
board251673140 = gamma_board(board)
assert board251673140 is not None
assert board251673140 == (".2....\n"
".2....\n"
"...1..\n"
"32...2\n"
"131.1.\n"
"113.1.\n"
".1...2\n"
".3....\n")
del board251673140
board251673140 = None
assert gamma_move(board, 2, 4, 3) == 0
assert gamma_move(board, 2, 5, 1) == 0
assert gamma_move(board, 3, 4, 5) == 1
assert gamma_move(board, 3, 3, 0) == 1
assert gamma_free_fields(board, 3) == 29
assert gamma_move(board, 2, 1, 7) == 0
assert gamma_move(board, 2, 3, 5) == 0
assert gamma_move(board, 3, 0, 5) == 1
assert gamma_move(board, 3, 0, 1) == 1
assert gamma_golden_possible(board, 3) == 1
assert gamma_move(board, 1, 3, 0) == 0
assert gamma_move(board, 1, 0, 7) == 1
board281476409 = gamma_board(board)
assert board281476409 is not None
assert board281476409 == ("12....\n"
".2....\n"
"3..13.\n"
"32...2\n"
"131.1.\n"
"113.1.\n"
"31...2\n"
".3.3..\n")
del board281476409
board281476409 = None
assert gamma_move(board, 2, 5, 1) == 0
assert gamma_move(board, 2, 5, 4) == 0
assert gamma_golden_possible(board, 2) == 1
assert gamma_move(board, 3, 7, 3) == 0
assert gamma_move(board, 3, 5, 1) == 0
assert gamma_busy_fields(board, 3) == 8
assert gamma_move(board, 1, 5, 4) == 0
assert gamma_move(board, 1, 0, 0) == 1
assert gamma_move(board, 2, 6, 3) == 0
assert gamma_move(board, 2, 4, 4) == 1
assert gamma_move(board, 3, 0, 5) == 0
assert gamma_move(board, 3, 0, 1) == 0
assert gamma_free_fields(board, 3) == 24
assert gamma_move(board, 1, 1, 7) == 0
assert gamma_move(board, 1, 2, 1) == 1
board412285252 = gamma_board(board)
assert board412285252 is not None
assert board412285252 == ("12....\n"
".2....\n"
"3..13.\n"
"32..22\n"
"131.1.\n"
"113.1.\n"
"311..2\n"
"13.3..\n")
del board412285252
board412285252 = None
assert gamma_move(board, 2, 1, 6) == 0
assert gamma_move(board, 2, 2, 1) == 0
assert gamma_move(board, 3, 1, 2) == 0
assert gamma_free_fields(board, 3) == 23
assert gamma_golden_move(board, 3, 4, 4) == 1
assert gamma_move(board, 1, 0, 2) == 0
assert gamma_move(board, 1, 3, 6) == 1
assert gamma_golden_possible(board, 1) == 1
assert gamma_move(board, 2, 7, 4) == 0
assert gamma_free_fields(board, 2) == 22
assert gamma_move(board, 3, 5, 5) == 1
assert gamma_move(board, 3, 5, 5) == 0
assert gamma_free_fields(board, 3) == 21
assert gamma_move(board, 1, 0, 5) == 0
assert gamma_move(board, 1, 5, 7) == 1
assert gamma_move(board, 2, 0, 6) == 1
assert gamma_move(board, 2, 5, 6) == 1
assert gamma_move(board, 3, 2, 2) == 0
assert gamma_move(board, 1, 5, 2) == 1
assert gamma_move(board, 2, 7, 4) == 0
assert gamma_move(board, 3, 2, 3) == 0
assert gamma_move(board, 3, 3, 1) == 1
assert gamma_move(board, 1, 5, 1) == 0
assert gamma_free_fields(board, 1) == 16
assert gamma_move(board, 2, 4, 2) == 0
assert gamma_move(board, 3, 4, 1) == 1
assert gamma_move(board, 3, 5, 2) == 0
assert gamma_move(board, 1, 7, 4) == 0
assert gamma_move(board, 1, 4, 1) == 0
assert gamma_move(board, 2, 0, 2) == 0
assert gamma_move(board, 2, 0, 5) == 0
assert gamma_busy_fields(board, 2) == 7
assert gamma_move(board, 3, 5, 2) == 0
assert gamma_move(board, 1, 1, 5) == 1
assert gamma_move(board, 2, 3, 5) == 0
assert gamma_move(board, 2, 4, 1) == 0
assert gamma_move(board, 3, 0, 3) == 0
assert gamma_move(board, 3, 1, 5) == 0
assert gamma_move(board, 1, 2, 4) == 1
assert gamma_move(board, 1, 3, 0) == 0
assert gamma_busy_fields(board, 1) == 16
assert gamma_move(board, 2, 3, 5) == 0
assert gamma_move(board, 2, 3, 1) == 0
assert gamma_move(board, 3, 5, 2) == 0
assert gamma_move(board, 1, 0, 4) == 0
assert gamma_move(board, 1, 0, 6) == 0
assert gamma_move(board, 2, 5, 5) == 0
assert gamma_golden_move(board, 2, 2, 2) == 1
assert gamma_move(board, 1, 5, 5) == 0
assert gamma_free_fields(board, 1) == 13
assert gamma_move(board, 2, 2, 6) == 1
assert gamma_move(board, 2, 5, 6) == 0
assert gamma_move(board, 3, 4, 3) == 0
assert gamma_move(board, 1, 4, 3) == 0
assert gamma_move(board, 1, 3, 5) == 0
assert gamma_move(board, 2, 2, 0) == 1
assert gamma_move(board, 3, 0, 4) == 0
assert gamma_move(board, 1, 7, 3) == 0
assert gamma_move(board, 2, 7, 3) == 0
assert gamma_move(board, 2, 3, 1) == 0
assert gamma_move(board, 3, 7, 3) == 0
assert gamma_move(board, 3, 0, 2) == 0
assert gamma_move(board, 1, 3, 3) == 1
assert gamma_move(board, 2, 7, 2) == 0
assert gamma_move(board, 2, 2, 3) == 0
assert gamma_free_fields(board, 2) == 10
assert gamma_move(board, 3, 7, 3) == 0
assert gamma_move(board, 3, 5, 1) == 0
assert gamma_move(board, 1, 7, 2) == 0
board481507094 = gamma_board(board)
assert board481507094 is not None
assert board481507094 == ("12...1\n"
"2221.2\n"
"31.133\n"
"321.32\n"
"13111.\n"
"112.11\n"
"311332\n"
"1323..\n")
del board481507094
board481507094 = None
assert gamma_move(board, 2, 2, 4) == 0
assert gamma_move(board, 2, 5, 4) == 0
assert gamma_busy_fields(board, 2) == 10
assert gamma_move(board, 1, 7, 2) == 0
assert gamma_move(board, 2, 7, 4) == 0
assert gamma_move(board, 3, 0, 4) == 0
assert gamma_busy_fields(board, 3) == 11
assert gamma_golden_possible(board, 3) == 0
assert gamma_move(board, 2, 7, 2) == 0
assert gamma_move(board, 2, 1, 4) == 0
assert gamma_free_fields(board, 2) == 10
assert gamma_move(board, 3, 0, 5) == 0
assert gamma_busy_fields(board, 3) == 11
assert gamma_move(board, 1, 7, 2) == 0
assert gamma_move(board, 1, 1, 6) == 0
assert gamma_move(board, 2, 2, 0) == 0
assert gamma_move(board, 2, 1, 7) == 0
assert gamma_move(board, 3, 3, 1) == 0
assert gamma_move(board, 1, 6, 4) == 0
assert gamma_move(board, 2, 0, 4) == 0
assert gamma_move(board, 2, 2, 7) == 1
board984249076 = gamma_board(board)
assert board984249076 is not None
assert board984249076 == ("122..1\n"
"2221.2\n"
"31.133\n"
"321.32\n"
"13111.\n"
"112.11\n"
"311332\n"
"1323..\n")
del board984249076
board984249076 = None
assert gamma_move(board, 1, 4, 1) == 0
assert gamma_golden_possible(board, 1) == 1
board492321582 = gamma_board(board)
assert board492321582 is not None
assert board492321582 == ("122..1\n"
"2221.2\n"
"31.133\n"
"321.32\n"
"13111.\n"
"112.11\n"
"311332\n"
"1323..\n")
del board492321582
board492321582 = None
assert gamma_move(board, 2, 2, 3) == 0
assert gamma_move(board, 2, 2, 4) == 0
assert gamma_golden_possible(board, 2) == 0
assert gamma_move(board, 3, 2, 3) == 0
assert gamma_move(board, 1, 7, 3) == 0
assert gamma_move(board, 1, 4, 3) == 0
assert gamma_move(board, 2, 2, 4) == 0
assert gamma_move(board, 1, 0, 4) == 0
assert gamma_move(board, 2, 0, 4) == 0
assert gamma_move(board, 2, 2, 6) == 0
assert gamma_move(board, 3, 5, 2) == 0
assert gamma_move(board, 1, 0, 5) == 0
assert gamma_move(board, 2, 3, 2) == 1
assert gamma_move(board, 3, 0, 5) == 0
assert gamma_move(board, 1, 0, 5) == 0
assert gamma_move(board, 1, 2, 3) == 0
assert gamma_golden_possible(board, 1) == 1
assert gamma_move(board, 2, 2, 0) == 0
assert gamma_move(board, 3, 5, 6) == 0
assert gamma_move(board, 3, 2, 1) == 0
gamma_delete(board)
| 2.234375 | 2 |
examples/run_chemistry_parser.py | ZhuoyuWei/transformers | 0 | 7024 | <filename>examples/run_chemistry_parser.py<gh_stars>0
# coding=utf-8
# Copyright 2019 The HuggingFace Inc. team.
# Copyright (c) 2019 The HuggingFace Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Finetuning seq2seq models for sequence generation."""
import argparse
import functools
import logging
import os
import random
import sys
sys.path.append(r'../')
import numpy as np
from tqdm import tqdm, trange
import torch
from torch.optim import Adam
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler
from transformers import (
AutoTokenizer,
BertForMaskedLM,
BertConfig,
PreTrainedEncoderDecoder,
Model2Models,
)
from utils_summarization import (
CNNDailyMailDataset,
encode_for_summarization,
fit_to_block_size,
build_lm_labels,
build_mask,
compute_token_type_ids,
)
from utils_chemistry import (ChemistryDataset,)
'''
class InputExample(object):
def __init__(self,example_id,question_input,question_varible_output=None,condition_output=None):
self.example_id=example_id
self.question_input=question_input
self.question_varible_output=question_varible_output
self.condition_output=condition_output
'''
logger = logging.getLogger(__name__)
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
def set_seed(args):
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
# ------------
# Load dataset
# ------------
def load_and_cache_examples(args, tokenizer, prefix="train"):
dataset = ChemistryDataset(tokenizer, prefix=prefix, data_dir=args.data_dir)
return dataset
def collate(data, tokenizer, input_block_size,output_block_size):
""" List of tuple as an input. """
question_inputs=[]
question_varible_outputs=[]
condition_outputs=[]
for i,example in enumerate(data):
question_input=tokenizer.encode(example.question_input)
question_input=fit_to_block_size(question_input, input_block_size, tokenizer.pad_token_id)
question_inputs.append(question_input)
if example.question_varible_output is not None:
question_varible_output=tokenizer.encode(example.question_varible_output)
else:
question_varible_output=tokenizer.build_inputs_with_special_tokens([])
question_varible_output=fit_to_block_size(question_varible_output, output_block_size, tokenizer.pad_token_id)
question_varible_outputs.append(question_varible_output)
if example.condition_output is not None:
condition_output=tokenizer.encode(example.condition_output)
else:
condition_output=tokenizer.build_inputs_with_special_tokens([])
condition_output=fit_to_block_size(condition_output, output_block_size, tokenizer.pad_token_id)
condition_outputs.append(condition_output)
question_inputs = torch.tensor(question_inputs)
question_varible_outputs = torch.tensor(question_varible_outputs)
condition_outputs = torch.tensor(condition_outputs)
question_inputs_mask = build_mask(question_inputs, tokenizer.pad_token_id)
question_varible_outputs_mask = build_mask(question_varible_outputs, tokenizer.pad_token_id)
condition_outputs_mask = build_mask(condition_outputs, tokenizer.pad_token_id)
question_varible_outputs_mask_lm_labels = build_lm_labels(question_varible_outputs, tokenizer.pad_token_id)
condition_outputs_mask_lm_labels = build_lm_labels(condition_outputs, tokenizer.pad_token_id)
return (
question_inputs,
[question_varible_outputs,condition_outputs],
question_inputs_mask,
[question_varible_outputs_mask,condition_outputs_mask],
[question_varible_outputs_mask_lm_labels,condition_outputs_mask_lm_labels],
)
# ----------
# Optimizers
# ----------
class BertSumOptimizer(object):
""" Specific optimizer for BertSum.
As described in [1], the authors fine-tune BertSum for abstractive
summarization using two Adam Optimizers with different warm-up steps and
learning rate. They also use a custom learning rate scheduler.
[1] Liu, Yang, and <NAME>. "Text summarization with pretrained encoders."
arXiv preprint arXiv:1908.08345 (2019).
"""
def __init__(self, model, lr, warmup_steps, beta_1=0.99, beta_2=0.999, eps=1e-8):
self.encoder = model.encoder
self.decoders = model.decoders
self.lr = lr
self.warmup_steps = warmup_steps
self.decoders_parameters=[]
for decoder in model.decoders:
self.decoders_parameters+=decoder.parameters()
self.optimizers = {
"encoder": Adam(
model.encoder.parameters(),
lr=lr["encoder"],
betas=(beta_1, beta_2),
eps=eps,
),
"decoder": Adam(
self.decoders_parameters,
lr=lr["decoder"],
betas=(beta_1, beta_2),
eps=eps,
),
}
self._step = 0
def _update_rate(self, stack):
return self.lr[stack] * min(
self._step ** (-0.5), self._step * self.warmup_steps[stack] ** (-0.5)
)
def zero_grad(self):
self.optimizer_decoder.zero_grad()
self.optimizer_encoder.zero_grad()
def step(self):
self._step += 1
for stack, optimizer in self.optimizers.items():
new_rate = self._update_rate(stack)
for param_group in optimizer.param_groups:
param_group["lr"] = new_rate
optimizer.step()
# ------------
# Train
# ------------
def train(args, model, tokenizer):
""" Fine-tune the pretrained model on the corpus. """
set_seed(args)
# Load the data
args.train_batch_size = args.per_gpu_train_batch_size * max(1, args.n_gpu)
train_dataset = load_and_cache_examples(args, tokenizer, "train")
train_sampler = RandomSampler(train_dataset)
model_collate_fn = functools.partial(collate, tokenizer=tokenizer,
input_block_size=args.input_block_size,output_block_size=args.output_block_size)
train_dataloader = DataLoader(
train_dataset,
sampler=train_sampler,
batch_size=args.train_batch_size,
collate_fn=model_collate_fn,
)
# Training schedule
if args.max_steps > 0:
t_total = args.max_steps
args.num_train_epochs = t_total // (
len(train_dataloader) // args.gradient_accumulation_steps + 1
)
else:
t_total = (
len(train_dataloader)
// args.gradient_accumulation_steps
* args.num_train_epochs
)
# Prepare the optimizer
#lr = {"encoder": 0.002, "decoder": 0.2}
lr = {"encoder": args.encoder_lr, "decoder": args.decoder_lr}
#warmup_steps = {"encoder": 20000, "decoder": 10000}
warmup_steps = {"encoder": args.encoder_warmup, "decoder": args.decoder_warmup}
optimizer = BertSumOptimizer(model, lr, warmup_steps)
# Train
logger.info("***** Running training *****")
logger.info(" Num examples = %d", len(train_dataset))
logger.info(" Num Epochs = %d", args.num_train_epochs)
logger.info(
" Instantaneous batch size per GPU = %d", args.per_gpu_train_batch_size
)
logger.info(
" Total train batch size (w. parallel, distributed & accumulation) = %d",
args.train_batch_size * args.gradient_accumulation_steps
# * (torch.distributed.get_world_size() if args.local_rank != -1 else 1),
)
logger.info(" Gradient Accumulation steps = %d", args.gradient_accumulation_steps)
logger.info(" Total optimization steps = %d", t_total)
model.zero_grad()
train_iterator = trange(args.num_train_epochs, desc="Epoch", disable=False)
global_step = 0
tr_loss = 0.0
for _ in train_iterator:
epoch_iterator = tqdm(train_dataloader, desc="Iteration", disable=False)
for step, batch in enumerate(epoch_iterator):
source, target, encoder_mask, decoder_mask, lm_labels = batch
#print('source: {}'.format(source))
#print('target: {}'.format(target))
feed_source=None
feed_targets=[None]*len(target)
feed_encoder_mask=None
feed_decoder_masks=[None]*len(decoder_mask)
feed_lm_labels=[None]*len(lm_labels)
feed_source = source.to(args.device)
for i in range(len(target)):
feed_targets[i] = target[i].to(args.device)
feed_encoder_mask = encoder_mask.to(args.device)
for i in range(len(decoder_mask)):
feed_decoder_masks[i] = decoder_mask[i].to(args.device)
for i in range(len(lm_labels)):
feed_lm_labels[i] = lm_labels[i].to(args.device)
model.train()
#print('debug by zhuoyu: source = {}'.format(source))
#print('debug by zhuoyu: target = {}'.format(target))
#print('debug by zhuoyu, device:')
#print('feed source {}'.format(feed_source.device))
#print('feed target {}'.format([str(feed_target.device) for feed_target in feed_targets]))
#print('feed encoder mask {}'.format(feed_encoder_mask.device))
#print('feed decoder masks {}'.format([str(feed_decoder_mask.device) for feed_decoder_mask in feed_decoder_masks]))
#print('feed lm labels {}'.format([str(feed_lm_label.device) for feed_lm_label in feed_lm_labels]))
outputs = model(
feed_source,
feed_targets,
encoder_attention_mask=feed_encoder_mask,
decoder_attention_mask=feed_decoder_masks,
decoder_lm_labels=feed_lm_labels,
)
loss=0
for i in range(len(model.decoders)):
#print('outputs[{}][0] type: {}'.format(i,type(outputs[i][0])))
loss += outputs[i][0]
#print(loss)
if args.gradient_accumulation_steps > 1:
loss /= args.gradient_accumulation_steps
loss.backward()
tr_loss += loss.item()
if (step + 1) % args.gradient_accumulation_steps == 0:
torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)
optimizer.step()
model.zero_grad()
global_step += 1
if args.max_steps > 0 and global_step > args.max_steps:
epoch_iterator.close()
break
if args.max_steps > 0 and global_step > args.max_steps:
train_iterator.close()
break
return global_step, tr_loss / global_step
# ------------
# Train
# ------------
def evaluate(args, model, tokenizer, prefix=""):
set_seed(args)
args.eval_batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu)
eval_dataset = load_and_cache_examples(args, tokenizer, prefix="dev")
#for example in eval_dataset.examples:
# print(example.example_id)
# print(example.question_input)
# print(example.question_varible_output)
# print(example.condition_output)
#exit(-1)
eval_sampler = SequentialSampler(eval_dataset)
model_collate_fn = functools.partial(collate, tokenizer=tokenizer,
input_block_size=args.input_block_size,output_block_size=args.output_block_size)
eval_dataloader = DataLoader(
eval_dataset, sampler=eval_sampler, batch_size=args.eval_batch_size,collate_fn=model_collate_fn,
)
# multi-gpu evaluate
#if args.n_gpu > 1:
# model = torch.nn.DataParallel(model)
logger.info("***** Running evaluation {} *****".format(prefix))
logger.info(" Num examples = %d", len(eval_dataset))
logger.info(" Batch size = %d", args.eval_batch_size)
eval_loss = 0.0
nb_eval_steps = 0
model.eval()
fout=open(os.path.join(args.output_dir,"dev.res"),'w',encoding='utf-8')
fdebug=open(os.path.join(args.output_dir,"dev.debug.res"),'w',encoding='utf-8')
for batch in tqdm(eval_dataloader, desc="Evaluating"):
source, target, encoder_mask, decoder_mask, lm_labels = batch
#print('[SOURCE]: {}'.format(source))
#print('[TARGET]: {}'.format(target))
#source = source.to(args.device)
#target = target.to(args.device)
#encoder_mask = encoder_mask.to(args.device)
#decoder_mask = decoder_mask.to(args.device)
#lm_labels = lm_labels.to(args.device)
feed_source = None
feed_targets = [None] * len(target)
feed_encoder_mask = None
feed_decoder_masks = [None] * len(decoder_mask)
feed_lm_labels = [None] * len(lm_labels)
feed_source = source.to(args.device)
for i in range(len(target)):
feed_targets[i] = target[i].to(args.device)
feed_encoder_mask = encoder_mask.to(args.device)
for i in range(len(decoder_mask)):
feed_decoder_masks[i] = decoder_mask[i].to(args.device)
for i in range(len(lm_labels)):
feed_lm_labels[i] = lm_labels[i].to(args.device)
with torch.no_grad():
if args.decoding_type=='decoding':
tokens_roles=[]
for i in range(len(feed_targets)):
outputs_ids=model.decoding(
feed_source,
feed_targets[i],
encoder_attention_mask=feed_encoder_mask,
decoder_attention_mask=feed_decoder_masks[i],
decoder_lm_labels=feed_lm_labels[i],
decoder=model.decoders[i]
#fdebug=fdebug,
)
print('outputs size: {}'.format(outputs_ids.size()))
outputs_ids =outputs_ids.cpu().numpy()
batch_tokens=[]
for idx in outputs_ids:
tokens = []
for id in idx:
#print('{}\t{}'.format(id,type(id)))
tokens.append(tokenizer.ids_to_tokens.get(int(id), tokenizer.unk_token))
batch_tokens.append(tokens)
tokens_roles.append(batch_tokens)
def subtoken2token(subtokens):
token=""
tokens=[]
for subtoken in subtokens:
if subtoken.startswith("##"):
token+=subtoken[2:]
else:
if token!="":
tokens.append(token)
token=subtoken
if token!="":
tokens.append(token)
return tokens
for i in range(len(tokens_roles[0])):
fout.write('\t'.join([' '.join(subtoken2token(tokens_roles[0][i]))
,' '.join(subtoken2token(tokens_roles[1][i]))]) + '\n')
else:
print('debug eva input:')
print('feed_source={}'.format(feed_source))
print('feed_targets={}'.format(feed_targets))
print('feed_encoder_mask={}'.format(feed_encoder_mask))
print('feed_decoder_masks={}'.format(feed_decoder_masks))
print('feed_lm_labels={}'.format(feed_lm_labels))
outputs = model(
feed_source,
feed_targets,
encoder_attention_mask=feed_encoder_mask,
decoder_attention_mask=feed_decoder_masks,
decoder_lm_labels=feed_lm_labels,
#fdebug=fdebug,
)
ans_seqs=[[],[]]
for i in range(len(model.decoders)):
print(outputs[i][1].size())
predicted_scores=outputs[i][1].argmax(-1).cpu().numpy().tolist()
for idx in predicted_scores:
tokens = []
for id in idx:
tokens.append(tokenizer.ids_to_tokens.get(id, tokenizer.unk_token))
ans_seqs[i].append(tokens)
for i in range(len(ans_seqs[0])):
fout.write('\t'.join([' '.join(ans_seqs[0][i]),' '.join(ans_seqs[1][i])]) + '\n')
# print('debug by zhuoyu, predicted_scores size={}'.format(predicted_scores.size()))
#eval_loss += lm_loss.mean().item()
nb_eval_steps += 1
eval_loss = eval_loss / nb_eval_steps
perplexity = torch.exp(torch.tensor(eval_loss))
result = {"perplexity": perplexity}
# Save the evaluation's results
output_eval_file = os.path.join(args.output_dir, "eval_results.txt")
if not os.path.exists(args.output_dir):
os.makedirs(args.output_dir)
with open(output_eval_file, "w") as writer:
logger.info("***** Eval results {} *****".format(prefix))
for key in sorted(result.keys()):
logger.info(" %s = %s", key, str(result[key]))
writer.write("%s = %s\n" % (key, str(result[key])))
#with open(os.path.join(args.output_dir,"dev.res"),'w',encoding='utf-8') as fout:
fout.flush()
fout.close()
fdebug.flush()
fdebug.close()
return result
def main():
parser = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--data_dir",
default=None,
type=str,
required=True,
help="The input training data file (a text file).",
)
parser.add_argument(
"--output_dir",
default=None,
type=str,
required=True,
help="The output directory where the model predictions and checkpoints will be written.",
)
# Optional parameters
parser.add_argument(
"--gradient_accumulation_steps",
type=int,
default=1,
help="Number of updates steps to accumulate before performing a backward/update pass.",
)
parser.add_argument(
"--do_evaluate",
type=bool,
default=False,
help="Run model evaluation on out-of-sample data.",
)
parser.add_argument("--do_train", type=bool, default=False, help="Run training.")
parser.add_argument(
"--do_overwrite_output_dir",
type=bool,
default=False,
help="Whether to overwrite the output dir.",
)
parser.add_argument(
"--encoder_model_name_or_path",
default="bert-base-cased",
type=str,
help="The model checkpoint to initialize the encoder's weights with.",
)
parser.add_argument(
"--decoder_model_name_or_path",
default="/data/zhuoyu/semantic_parsing/models",
type=str,
help="The model checkpoint to initialize the decoder's weights with.",
)
parser.add_argument(
"--model_type",
default="bert",
type=str,
help="The decoder architecture to be fine-tuned.",
)
parser.add_argument(
"--max_grad_norm", default=1.0, type=float, help="Max gradient norm."
)
parser.add_argument(
"--max_steps",
default=-1,
type=int,
help="If > 0: set total number of training steps to perform. Override num_train_epochs.",
)
parser.add_argument(
"--to_cpu", default=False, type=bool, help="Whether to force training on CPU."
)
parser.add_argument(
"--num_train_epochs",
default=10,
type=int,
help="Total number of training epochs to perform.",
)
parser.add_argument(
"--per_gpu_eval_batch_size",
default=4,
type=int,
help="Batch size per GPU/CPU for eval.",
)
parser.add_argument(
"--per_gpu_train_batch_size",
default=4,
type=int,
help="Batch size per GPU/CPU for training.",
)
parser.add_argument(
"--input_block_size",
default=256,
type=int,
help="Max seq length for input",
)
parser.add_argument(
"--output_block_size",
default=64,
type=int,
help="Max seq length for output",
)
parser.add_argument(
"--trained_checkpoints",
default="",
type=str,
help="trained_checkpoints",
)
parser.add_argument(
"--decoding_type",
default="pnt",
type=str,
help="",
)
parser.add_argument(
"--encoder_lr",
default=5e-4,
type=float,
help="encoder's learning rate",
)
parser.add_argument(
"--decoder_lr",
default=5e-4,
type=float,
help="encoder's learning rate",
)
parser.add_argument(
"--encoder_warmup",
default=10,
type=int,
help="encoder's learning rate",
)
parser.add_argument(
"--decoder_warmup",
default=100,
type=int,
help="encoder's learning rate",
)
parser.add_argument("--seed", default=42, type=int)
parser.add_argument(
"--decoder_version",
default="v1",
type=str,
help="",
)
args = parser.parse_args()
if (
os.path.exists(args.output_dir)
and os.listdir(args.output_dir)
and args.do_train
and not args.do_overwrite_output_dir
):
raise ValueError(
"Output directory ({}) already exists and is not empty. Use --do_overwrite_output_dir to overwrite.".format(
args.output_dir
)
)
# Set up training device
if args.to_cpu or not torch.cuda.is_available():
args.device = torch.device("cpu")
args.n_gpu = 0
else:
args.device = torch.device("cuda")
args.n_gpu = torch.cuda.device_count()
print(args.n_gpu)
# Load pretrained model and tokenizer. The decoder's weights are randomly initialized.
tokenizer = AutoTokenizer.from_pretrained(args.encoder_model_name_or_path
,never_split=['[unused0]','[unused1]','[unused2]','[unused3]'])
#config = BertConfig.from_pretrained(args.model_name_or_path)
#config.num_hidden_layers=3
#config.is_decoder=True
#decoder_model = BertForMaskedLM(config)
decoder_models=[BertForMaskedLM.from_pretrained(args.decoder_model_name_or_path),
BertForMaskedLM.from_pretrained(args.decoder_model_name_or_path)]
model = Model2Models.from_pretrained(
args.encoder_model_name_or_path, decoder_model=decoder_models
)
#model = Model2Model.from_pretrained(
# args.model_name_or_path, decoder_model=None
#)
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO,
)
logger.warning(
"Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s",
0,
args.device,
args.n_gpu,
False,
False,
)
logger.info("Training/evaluation parameters %s", args)
# Train the model
if not os.path.exists(args.output_dir):
os.makedirs(args.output_dir)
if args.do_train:
model.to(args.device)
global_step, tr_loss = train(args, model, tokenizer)
logger.info(" global_step = %s, average loss = %s", global_step, tr_loss)
logger.info("Saving model checkpoint to %s", args.output_dir)
# Save a trained model, configuration and tokenizer using `save_pretrained()`.
# They can then be reloaded using `from_pretrained()`
model_to_save = (
model.module if hasattr(model, "module") else model
) # Take care of distributed/parallel training
model_to_save.save_pretrained(args.output_dir)
tokenizer.save_pretrained(args.output_dir)
torch.save(args, os.path.join(args.output_dir, "training_arguments.bin"))
# Evaluate the model
results = {}
if args.do_evaluate:
checkpoints = [args.trained_checkpoints]
logger.info("Evaluate the following checkpoints: %s", checkpoints)
for checkpoint in checkpoints:
encoder_checkpoint = os.path.join(checkpoint, "encoder")
decoder_checkpoint_question_varibles = os.path.join(checkpoint, "decoder_0")
decoder_checkpoint_conditions = os.path.join(checkpoint, "decoder_1")
decoder_models = [BertForMaskedLM.from_pretrained(decoder_checkpoint_question_varibles),
BertForMaskedLM.from_pretrained(decoder_checkpoint_conditions)]
model = Model2Models.from_pretrained(
encoder_checkpoint, decoder_model=decoder_models
)
model.to(args.device)
#model = PreTrainedEncoderDecoder.from_pretrained(
# encoder_checkpoint, decoder_checkpoint
#)
#model = Model2Model.from_pretrained(encoder_checkpoint)
#model.to(args.device)
results = "placeholder"
evaluate(args,model,tokenizer,"test")
return results
if __name__ == "__main__":
main()
| 2.109375 | 2 |
envergo/geodata/management/commands/import_shapefiles.py | MTES-MCT/envergo | 0 | 7025 | from django.contrib.gis.gdal import DataSource
from django.contrib.gis.utils import LayerMapping
from django.core.management.base import BaseCommand
from envergo.geodata.models import Zone
class Command(BaseCommand):
help = "Importe des zones à partir de shapefiles."
def add_arguments(self, parser):
parser.add_argument("shapefile", type=str)
def handle(self, *args, **options):
shapefile = options["shapefile"]
ds = DataSource(shapefile)
mapping = {"code": "CODEZONE", "polygon": "POLYGON"}
lm = LayerMapping(Zone, ds, mapping)
self.stdout.write(self.style.SUCCESS("Importing"))
lm.save(verbose=True)
| 2.09375 | 2 |
vimfiles/bundle/ultisnips/test/test_AnonymousExpansion.py | duanqiaobb/vim-for-java | 0 | 7026 | from test.vim_test_case import VimTestCase as _VimTest
from test.constant import *
# Anonymous Expansion {{{#
class _AnonBase(_VimTest):
args = ''
def _extra_options_pre_init(self, vim_config):
vim_config.append('inoremap <silent> %s <C-R>=UltiSnips#Anon(%s)<cr>'
% (EA, self.args))
class Anon_NoTrigger_Simple(_AnonBase):
args = '"simple expand"'
keys = 'abc' + EA
wanted = 'abcsimple expand'
class Anon_NoTrigger_AfterSpace(_AnonBase):
args = '"simple expand"'
keys = 'abc ' + EA
wanted = 'abc simple expand'
class Anon_NoTrigger_BeginningOfLine(_AnonBase):
args = r"':latex:\`$1\`$0'"
keys = EA + 'Hello' + JF + 'World'
wanted = ':latex:`Hello`World'
class Anon_NoTrigger_FirstCharOfLine(_AnonBase):
args = r"':latex:\`$1\`$0'"
keys = ' ' + EA + 'Hello' + JF + 'World'
wanted = ' :latex:`Hello`World'
class Anon_NoTrigger_Multi(_AnonBase):
args = '"simple $1 expand $1 $0"'
keys = 'abc' + EA + '123' + JF + '456'
wanted = 'abcsimple 123 expand 123 456'
class Anon_Trigger_Multi(_AnonBase):
args = '"simple $1 expand $1 $0", "abc"'
keys = '123 abc' + EA + '123' + JF + '456'
wanted = '123 simple 123 expand 123 456'
class Anon_Trigger_Simple(_AnonBase):
args = '"simple expand", "abc"'
keys = 'abc' + EA
wanted = 'simple expand'
class Anon_Trigger_Twice(_AnonBase):
args = '"simple expand", "abc"'
keys = 'abc' + EA + '\nabc' + EX
wanted = 'simple expand\nabc' + EX
class Anon_Trigger_Opts(_AnonBase):
args = '"simple expand", ".*abc", "desc", "r"'
keys = 'blah blah abc' + EA
wanted = 'simple expand'
# End: Anonymous Expansion #}}}
| 2.078125 | 2 |
data_converter/data_converter.py | jkchen2/JshBot-plugins | 1 | 7027 | import discord
from jshbot import utilities, data, configurations, plugins, logger
from jshbot.exceptions import BotException, ConfiguredBotException
from jshbot.commands import (
Command, SubCommand, Shortcut, ArgTypes, Attachment, Arg, Opt, MessageTypes, Response)
__version__ = '0.1.0'
CBException = ConfiguredBotException('0.3 to 0.4 plugin')
@plugins.command_spawner
def get_commands(bot):
return [Command('convertdata', hidden=True, elevated_level=3)]
async def get_response(bot, context):
for guild in bot.guilds:
convert_core(bot, guild)
if 'tags.py' in bot.plugins:
convert_tags(bot, guild)
return Response("Converted.")
def convert_core(bot, guild):
if data.get(bot, 'core', None, guild_id=guild.id):
logger.warn("Guild %s (%s) already had core converted", guild.name, guild.id)
return
base_data = data.get(bot, 'base', None, guild_id=guild.id, default={})
if 'disabled' in base_data:
# TODO: Iterate through toggled commands
pass
if 'blocked' in base_data:
replacement = []
for entry in base_data['blocked']:
replacement.append(int(entry))
base_data['blocked'] = replacement
if 'muted_channels' in base_data:
replacement = []
for entry in base_data['muted_channels']:
replacement.append(int(entry))
base_data['muted_channels'] = replacement
if 'moderators' in base_data:
del base_data['moderators']
if base_data:
for key, value in base_data.items():
data.add(bot, 'core', key, value, guild_id=guild.id)
data.remove(bot, 'base', None, guild_id=guild.id)
def convert_tags(bot, guild):
if not data.get(bot, 'tags.py', 'tags', guild_id=guild.id):
logger.warn("Guild %s (%s) already had tags converted", guild.name, guild.id)
return
tags = data.get(bot, 'tags.py', 'tags', guild_id=guild.id, default={})
add_tag = bot.plugins['tags.py']._add_tag
#key,value,length,volume,name,flags,author,hits,created,last_used,last_used_by,complex,extra
for key, tag in tags.items():
to_insert = [
key, # key
tag['value'], # value
tag['length'], # length
tag['volume'], # volume
tag['name'], # name
tag['flags'], # flags
int(tag['author']), # author
tag['hits'], # hits
int(tag['created']), # created
int(tag['last_used']), # last_used
None, # last_used_by
{}, # complex
{} # extra
]
add_tag(bot, to_insert, guild.id)
data.remove(bot, 'tags.py', 'tags', guild_id=guild.id, safe=True)
| 2.171875 | 2 |
tut2.py | ankit98040/TKINTER-JIS | 0 | 7028 | from tkinter import *
from PIL import Image, ImageTk
#python image library
#imagetk supports jpg image
a1 = Tk()
a1.geometry("455x244")
#for png image
#photo = PhotoImage(file="filename.png")
#a2 = Label(image = photo)
#a2.pack()
image = Image.open("PJXlVd.jpg")
photo = ImageTk.PhotoImage(image)
a2 = Label(image = photo)
a2.pack()
a1.mainloop() | 3.34375 | 3 |
dataset.py | mintanwei/IPCLs-Net | 0 | 7029 | import os
import torch
from PIL import Image
from read_csv import csv_to_label_and_bbx
import numpy as np
from torch.utils.data import Subset, random_split, ConcatDataset
class NBIDataset(object):
def __init__(self, root, transforms, nob3=False):
self.root = root
self.transforms = transforms
# load all image files, sorting them to ensure that they are aligned
self.imgs = list(sorted(os.listdir(os.path.join(root, "images"))))
self.boxes = csv_to_label_and_bbx(os.path.join(self.root, "annotations.csv"), nob3)
def __getitem__(self, idx):
img_path = os.path.join(self.root, "images", self.imgs[idx])
img = Image.open(img_path).convert("RGB")
annotations = self.boxes[self.imgs[idx]]
boxes = annotations['bbx']
labels = annotations['labels']
# FloatTensor[N, 4]
boxes = torch.as_tensor(boxes, dtype=torch.float32)
# Int64Tensor[N]
labels = torch.as_tensor(labels, dtype=torch.int64)
image_id = torch.tensor([idx])
area = (boxes[:, 3] - boxes[:, 1]) * (boxes[:, 2] - boxes[:, 0])
iscrowd = torch.zeros((labels.size()[0],), dtype=torch.int64)
target = {}
target["boxes"] = boxes
target["labels"] = labels
target["image_id"] = image_id
# target["image_path"] = img_path
target["area"] = area
target["iscrowd"] = iscrowd
if self.transforms is not None:
img = self.transforms(img)
# target = self.transforms(target)
return img, target
def __len__(self):
return len(self.imgs)
class NBINewDataset(object):
def __init__(self, root, transforms, train=True):
self.root = root
self.transforms = transforms
if train:
self.path = os.path.join(root, "train")
else:
self.path = os.path.join(root, "test")
self.imgs = list(sorted(os.listdir(self.path)))
self.boxes = csv_to_label_and_bbx(os.path.join(self.root, "annotations_all.csv"), img_names=self.imgs)
def __getitem__(self, idx):
img_path = os.path.join(self.path, self.imgs[idx])
img = Image.open(img_path).convert("RGB")
annotations = self.boxes[self.imgs[idx]]
boxes = annotations['bbx']
labels = annotations['labels']
# FloatTensor[N, 4]
boxes = torch.as_tensor(boxes, dtype=torch.float32)
# Int64Tensor[N]
labels = torch.as_tensor(labels, dtype=torch.int64)
image_id = torch.tensor([idx])
area = (boxes[:, 3] - boxes[:, 1]) * (boxes[:, 2] - boxes[:, 0])
iscrowd = torch.zeros((labels.size()[0],), dtype=torch.int64)
target = {}
target["boxes"] = boxes
target["labels"] = labels
target["image_id"] = image_id
# target["image_path"] = img_path
target["area"] = area
target["iscrowd"] = iscrowd
if self.transforms is not None:
img = self.transforms(img)
# target = self.transforms(target)
return img, target
def __len__(self):
return len(self.imgs)
class NBIFullDataset(object):
def __init__(self, root, transforms):
self.root = root
self.transforms = transforms
self.path = os.path.join(root, "all")
self.imgs = list(sorted(os.listdir(self.path)))
self.boxes = csv_to_label_and_bbx(os.path.join(self.root, "annotations.csv"), img_names=self.imgs)
def __getitem__(self, idx):
img_path = os.path.join(self.path, self.imgs[idx])
img = Image.open(img_path).convert("RGB")
annotations = self.boxes[self.imgs[idx]]
boxes = annotations['bbx']
labels = annotations['labels']
# FloatTensor[N, 4]
boxes = torch.as_tensor(boxes, dtype=torch.float32)
# Int64Tensor[N]
labels = torch.as_tensor(labels, dtype=torch.int64)
image_id = torch.tensor([idx])
area = (boxes[:, 3] - boxes[:, 1]) * (boxes[:, 2] - boxes[:, 0])
iscrowd = torch.zeros((labels.size()[0],), dtype=torch.int64)
target = {}
target["boxes"] = boxes
target["labels"] = labels
target["image_id"] = image_id
# target["image_path"] = img_path
target["area"] = area
target["iscrowd"] = iscrowd
if self.transforms is not None:
img = self.transforms(img)
# target = self.transforms(target)
return img, target
def __len__(self):
return len(self.imgs)
class NBIDenseDataset(object):
def __init__(self, root, transforms):
self.root = root
self.transforms = transforms
# load all image files, sorting them to ensure that they are aligned
self.imgs = list(sorted(os.listdir(os.path.join(root, "images"))))
def __getitem__(self, idx):
img_path = os.path.join(self.root, "images", self.imgs[idx])
img = Image.open(img_path).convert("RGB")
density_path = os.path.join(self.root, "density_maps")
density_map = np.load(os.path.join(density_path, self.imgs[idx][:-4] + ".npy"))
density_map = torch.from_numpy(density_map)
if self.transforms is not None:
img = self.transforms(img)
# target = self.transforms(target)
return img, density_map
def __len__(self):
return len(self.imgs)
class NBIPatchDataset(object):
def __init__(self, root, transforms):
self.root = root
self.transforms = transforms
# load all image files, sorting them to ensure that they are aligned
self.imgs = [x for x in list(sorted(os.listdir(root))) if x[-3:] == "png"]
self.ans = np.load(os.path.join(root, "ans.npy"), allow_pickle=True).item()
def __getitem__(self, idx):
# img_path = os.path.join(self.root, "images", self.imgs[idx])
# img = Image.open(img_path).convert("RGB")
# density_path = os.path.join(self.root, "density_maps")
# density_map = np.load(os.path.join(density_path, self.imgs[idx][:-4] + ".npy"))
# density_map = torch.from_numpy(density_map)
#
# if self.transforms is not None:
# img = self.transforms(img)
# # target = self.transforms(target)
return self.imgs[idx]
def __len__(self):
return len(self.imgs)
def split_index(K=5, len=100):
idx = list(range(len))
final_list = []
for i in range(K):
final_list.append(idx[(i*len)//K:((i+1)*len)//K])
return final_list
def k_fold_index(K=5, len=100, fold=0):
split = split_index(K, len)
val = split[fold]
train = []
for i in range(K):
if i != fold:
train = train + split[i]
return train, val
def stat_dataset(dataset):
class_ids = {1: "A", 2: "B1", 3: "B2", 4: "B3"}
stats = {"A": 0, "B1": 0, "B2": 0, "B3": 0}
for img, target in dataset:
for k in target['labels']:
stats[class_ids[int(k)]] += 1
print(stats)
def NBIFiveFoldDataset(transforms):
ds = NBIFullDataset(root="./NBI_full_dataset/", transforms=transforms)
# n = len(ds)
# for i in range(5):
# train_idx, val_idx = k_fold_index(5, n, i)
# train_subset = Subset(ds, train_idx)
# val_subset = Subset(ds, val_idx)
# print("Fold: %d" % i, len(train_subset), len(val_subset))
# stat_dataset(train_subset)
# stat_dataset(val_subset)
torch.manual_seed(13)
all_subsets = random_split(ds, [46, 46, 46, 45, 45])
fold_i_subsets = []
for i in range(5):
val_subset = all_subsets[i]
train_subset = ConcatDataset([all_subsets[j] for j in range(5) if j != i])
fold_i_subsets.append({"train": train_subset, "val": val_subset})
# print("Fold: %d" % i, len(train_subset), len(val_subset))
# stat_dataset(train_subset)
# stat_dataset(val_subset)
return fold_i_subsets
if __name__ == '__main__':
# ds = NBIFiveFoldDataset(None)
di = "aaa".encode("UTF-8")
result = eval(di)
print(result)
| 2.625 | 3 |
design_patterns/chapter5/mymath.py | FeliciaMJ/PythonLearningJourney | 0 | 7030 | # coding: utf-8
import functools
def memoize(fn):
known = dict()
@functools.wraps(fn)
def memoizer(*args):
if args not in known:
known[args] = fn(*args)
return known[args]
return memoizer
@memoize
def nsum(n):
'''返回前n个数字的和'''
assert(n >= 0), 'n must be >= 0'
return 0 if n == 0 else n + nsum(n-1)
@memoize
def fibonacci(n):
'''返回斐波那契数列的第n个数'''
assert(n >= 0), 'n must be >= 0'
return n if n in (0, 1) else fibonacci(n-1) + fibonacci(n-2)
if __name__ == '__main__':
from timeit import Timer
measure = [{'exec': 'fibonacci(100)', 'import': 'fibonacci',
'func': fibonacci}, {'exec': 'nsum(200)', 'import': 'nsum',
'func': nsum}]
for m in measure:
t = Timer('{}'.format(m['exec']), 'from __main__ import \
{}'.format(m['import']))
print('name: {}, doc: {}, executing: {}, time: \
{}'.format(m['func'].__name__, m['func'].__doc__,
m['exec'], t.timeit()))
| 2.796875 | 3 |
transforms/__init__.py | yangyuke001/emotion-expression.shufflenetv2 | 3 | 7031 | <reponame>yangyuke001/emotion-expression.shufflenetv2
from .transforms import *
| 0.867188 | 1 |
codes/elastoplasticity_spectralAnalysis/planeStress/slowWavePlaneStressSigDriven.py | adRenaud/research | 1 | 7032 | <filename>codes/elastoplasticity_spectralAnalysis/planeStress/slowWavePlaneStressSigDriven.py
# !\usr\bin\python
import numpy as np
from mpl_toolkits import mplot3d
import matplotlib.pyplot as plt
import scipy.optimize
from matplotlib import animation
from scipy.integrate import ode
import pdb
# Material parameters
rho = 7800.
E = 2.e11
nu = 0.3
mu = 0.5*E/(1.+nu)
kappa = E/(3.*(1.-2.*nu))
lamb = kappa-2.*mu/3.
sigy = 100.0e6
H = 100.08e6
beta=(6.*mu**2)/(3.*mu+H)
def tangentModulus(sigma,lamb,mu,beta,tangent):
H=np.zeros((3,3))
# |H1111 H1112 H1122|
# H =|H1211 H1212 H1222|
# |H2211 H2212 H2222|
# sigma = [sig11 , sig12 , sig22 , sig33 ]
sigDev = computeDeviatoricPart(sigma)
sigdnorm2=np.dot(sigDev,sigDev)
BETA=beta/sigdnorm2
s11=sigDev[0];s12=sigDev[1]/np.sqrt(2.);s22=sigDev[2];s33=sigDev[3]
## Plane stress tangent modulus Hijkl = Hijkl - Hij33*H33kl/H3333
H1133=(lamb -BETA*s11*s33)
H1233=(-BETA*s12*s33)
H1122=(lamb -BETA*s11*s22)
H2222=(lamb+2.*mu -BETA*s22**2)
H1222=(-BETA*s12*s22)
H2233=(lamb-BETA*s22*s33)
H3333=(lamb+2.*mu-BETA*s33*s33)
if tangent=='planeStress':
H[0,0]=lamb+2.*mu - BETA*s11**2 -H1133*H1133/H3333
H[0,1]=-BETA*s11*s12 -H1133*H1233/H3333
H[0,2]=lamb-BETA*s11*s22 -H1133*H2233/H3333
H[1,0]=-BETA*s12*s11-H1233*H1133/H3333
H[1,1]=mu-BETA*s12**2 -H1233*H1233/H3333
H[1,2]=-BETA*s12*s22-H1233*H2233/H3333
H[2,0]=lamb - BETA*s11*s22 -H2233*H1133/H3333
H[2,1]=-BETA*s22*s12 -H2233*H1233/H3333
H[2,2]=lamb+2.*mu-BETA*s22**2 -H2233*H2233/H3333
elif tangent=='thinWalled':
H[0,0]=lamb+2.*mu - BETA*s11**2 -H1122*(H1122+H1133)/(H2233+H2222)
H[0,1]=-BETA*s11*s12 -H1222*(H1122+H1133)/(H2233+H2222)
H[0,2]=lamb-BETA*s11*s22
H[1,0]=-BETA*s12*s11-H1122*(H1222+H1233)/(H2233+H2222)
H[1,1]=mu-BETA*s12**2-H1222*(H1222+H1233)/(H2233+H2222)
H[1,2]=-BETA*s12*s22
H[2,0]=lamb - BETA*s11*s22
H[2,1]=-BETA*s22*s12
H[2,2]=lamb+2.*mu-BETA*s22**2
else :
H[0,0]=lamb+2.*mu - BETA*s11**2
H[0,1]=-BETA*s11*s12
H[0,2]=lamb-BETA*s11*s12
H[1,0]=-BETA*s12*s11
H[1,1]=mu-BETA*s12**2
H[1,2]=-BETA*s12*s22
H[2,0]=lamb-BETA*s11*s22
H[2,1]=-BETA*s12*s22
H[2,2]=lamb+2.*mu-BETA*s22**2
return H
def acousticTensor(H,n):
n1=n[0] ; n2=n[1]
C11 = H[0,0]*n1**2 + H[1,1]*n2**2 + 2.*H[0,1]*n1*n2
C12 = H[0,1]*n1**2 + H[1,2]*n2**2 + (H[0,2]+H[1,1])*n1*n2
C22 = H[1,1]*n1**2 + H[2,2]*n2**2 + 2.*H[2,1]*n1*n2
return np.array([C11,C12,C22])
def acousticEigenStructure(C):
C11=C[0];C12=C[1];C22=C[2]
## omega1,w1 associated to cf
## omega2,w2 associated to cs
omega1=0.5*(C11+C22 + np.sqrt((C11-C22)**2+4.*C12**2))
omega2=0.5*(C11+C22 - np.sqrt((C11-C22)**2+4.*C12**2))
w1=np.array([-C12,C11-omega1])
w2=np.array([-C12,C11-omega2])
return [omega1,w1],[omega2,w2]
def vonMisesYieldSurface(sigy):
radius=np.sqrt((2./3.)*sigy**2)
theta=np.linspace(0,2*np.pi,50)
s2 = radius*np.cos(theta)
s3 = radius*np.sin(theta)
s1=0.
c=np.sqrt(2.)/2.;
s=np.sqrt(2.)/2.;
P2=np.array([[c,-c,0.],[c,c,0.],[0.,0.,1.]])
P1=np.array([[c,0.,-c],[0.,1.,0.],[c,0.,c]])
c=np.cos(np.arctan(1./np.sqrt(2.0)))
s=np.sin(np.arctan(1./np.sqrt(2.0)))
P1=np.array([[c,0.,-s],[0.,1.,0.],[s,0.,c]])
cylindre=np.zeros((3,len(s2)))
for i in range(len(s2)):
cylindre[:,i] = np.dot(P2,np.dot(P1,np.array([s1,s2[i],s3[i]])))
return cylindre
def computeDeviatoricPart(T):
# T = [T11 T21 T22 T33]
Pdev=np.array([[1.-1/3.,0.,-1./3.,-1./3.],[0.,1.,0.,0.],[-1./3.,0.,1.-1./3.,-1./3.],[-1./3.,0.,-1./3.,1.-1./3.]])
Tdev=np.dot(Pdev,T)
return np.array([Tdev[0],np.sqrt(2.)*Tdev[1],Tdev[2],Tdev[3]])
def computeCriterion(sig11,sig22,sig12,sig33,sigy):
# deviatoric stress
sDev=computeDeviatoricPart(np.array([sig11,sig12,sig22,sig33]))
normSDev=np.sqrt(np.dot(sDev,sDev))
f=np.sqrt(3./2.)*normSDev - sigy
return f
def computePsiSlow(sig11,sigma,sig33,lamb,mu,beta,tangent,rho):
# sig11 driven
n1=1.;n2=0.
sig12=sigma[0];sig22=sigma[1]
H=tangentModulus(np.array([sig11,sig12,sig22,sig33]),lamb,mu,beta,tangent)
C=acousticTensor(H,np.array([n1,n2]))
eigenf,eigens=acousticEigenStructure(C)
alpha11=H[0,1]*H[1,2]- H[1,1]*H[0,2]
alpha12=-H[0,1]*H[0,2]-H[0,0]*H[2,1]
alpha22=H[0,0]*H[1,1]-H[0,1]**2
w1=eigenf[1][0];w2=eigenf[1][1]
psi12=-2.*w1/w2
psi22=(2.*w1*alpha12/w2-alpha11)/alpha22
"""
n1=1.;n2=0.
JN=-np.array([[0.,0.,n1/rho,n2/rho,0.],[0.,0.,0.,n1/rho,n2/rho],[H[0,0]*n1+H[0,1]*n2,H[0,1]*n1+H[0,2]*n2,0.,0.,0.],[H[0,1]*n1+H[1,1]*n2,H[1,1]*n1+H[1,2]*n2,0,0,0],[H[2,0]*n1+H[2,1]*n2,H[2,1]*n1+H[2,2]*n2,0,0,0]])
eigenStructure=np.linalg.eig(JN.T)
contact=np.where(eigenStructure[0]==0)[0][0]
cfplus=np.where(eigenStructure[0]==np.max(eigenStructure[0]))[0][0]
cfminus=np.where(eigenStructure[0]==np.min(eigenStructure[0]))[0][0]
index=np.ones(5);index[[contact,cfminus,cfplus]]-=1
cs=np.where(index!=0.)[0]
csminus=np.where(eigenStructure[0]==np.min(eigenStructure[0][cs]))[0][0]
csplus=np.where(eigenStructure[0]==np.max(eigenStructure[0][cs]))[0][0]
lcfminus=eigenStructure[1][:,cfminus];lcfplus=eigenStructure[1][:,cfplus]
lcontact=eigenStructure[1][:,contact]
dl=lcfminus-lcfplus
if not (dl[4]!=0. and dl[0]!=0. and dl[1]!=0.):
psi12=-dl[2]/dl[3]
if not (lcontact[0]>1.e-6 and lcontact[1]>1.e-6):
psi22=(lcontact[3]*(dl[2]/dl[3])-lcontact[2])/lcontact[4]
"""
return np.array([psi12,psi22])
def computeLodeAngle(sig11,sig22,sig12,sig33):
# deviatoric stress
sDev=computeDeviatoricPart(np.array([sig11,sig12,sig22,sig33]))
s11=sDev[0];s12=sDev[1]/np.sqrt(2.);s22=sDev[2];s33=sDev[3]
# deviator 2nd and 3rd invariants
J3=s33*(s11*s22-s12**2) ; sqrtJ2=np.sqrt(0.5*np.dot(sDev,sDev))
theta=np.arccos((3./2.)*np.sqrt(3.)*J3/(sqrtJ2**3))/3.
theta=theta*360./(2.*np.pi)
return theta
def updateEquivalentPlasticStrain(sig,sign,H):
# sig=[sig11^n , sqrt(2)*sig12^n , sig22 , sig33^n]
# sign=[sig11^n+1 , sqrt(2)*sig12^n+1 , sig22 , sig33^n+1]
sigDev=computeDeviatoricPart(np.array([sign[0],sign[1]/np.sqrt(2.),sign[2],sign[3]]))
norm=np.sqrt(np.dot(sigDev,sigDev))
flow=sigDev/norm
dSig=sign-sig
dp=(1./H)*np.sqrt(3./2.)*np.dot(flow,dSig)
return dp
def plasticResidual(sig,sign,p,pn,H):
# sig=[sig11^n , sqrt(2)*sig12^n , sig22 , sig33^n]
# sign=[sig11^n+1 , sqrt(2)*sig12^n+1 , sig22 , sig33^n+1]
sigDev=computeDeviatoricPart(np.array([sign[0],sign[1]/np.sqrt(2.),sign[2],sign[3]]))
norm=np.sqrt(np.dot(sigDev,sigDev))
flow=sigDev/norm
dSig=sign-sig
dp=(1./H)*np.sqrt(3./2.)*np.dot(flow,dSig)
res=pn-p-dp
return res
def computeEigenStresses(sig):
# | sig11 sig12 0 |
#sig=| sig12 sig22 0 |
# | 0 0 sig33 |
s3=sig[2,2]
delta=(sig[0,0]-sig[1,1])**2+4.*sig[0,1]**2
s1=0.5*(sig[0,0]+sig[1,1]-np.sqrt(delta))
s2=0.5*(sig[0,0]+sig[1,1]+np.sqrt(delta))
return np.array([s1,s2,s3])
from mpl_toolkits.mplot3d import proj3d
def orthogonal_proj(zfront, zback):
a = (zfront+zback)/(zfront-zback)
b = -2*(zfront*zback)/(zfront-zback)
return np.array([[1,0,0,0],
[0,1,0,0],
[0,0,a,b],
[0,0,0,zback]])
proj3d.persp_transformation = orthogonal_proj
Samples=5
# Sample constant stress component sig22
sig22=np.linspace(0.,sigy,Samples)
#sig22=np.linspace(-sigy/np.sqrt(1-nu+nu**2),sigy/np.sqrt(1-nu+nu**2),Samples)
Samples*=10
sig=np.zeros((Samples,Samples))
tau=np.zeros((Samples,Samples))
frames=[10,20,40]
frames=[5,10,15,20]
col=["r","g","b","y","c","m","k","p"]
tauM=1.5*sigy/np.sqrt(3.)
sigM=1.5*sigy/np.sqrt(1-nu+nu**2)
tauM=sigM
Niter=1000
TAU=np.zeros((Niter,len(frames),len(sig22)))
SIG11=np.zeros((Niter,len(frames),len(sig22)))
SIG22=np.zeros((Niter,len(frames),len(sig22)))
eigsigS=np.zeros((Niter,len(frames),len(sig22),3))
criterionS=np.zeros((Niter,len(frames)))
PsiS=np.zeros((Samples,len(sig22)))
plast_S=np.zeros((Niter,len(frames)))
LodeAngle_S=np.zeros((Niter,len(frames)))
# Boolean to plot the upadted yield surface
updated_criterion=False
for k in range(len(sig22)-1):
s22=sig22[k]
Delta=(4.*sigy**2- 3.*s22**2)
sigMax=(s22+np.sqrt(Delta))/2.
sigMin=(s22-np.sqrt(Delta))/2.
# Sample stress component sig11
sig[:,k]=np.linspace(sigMin,sigMax,Samples)
sig[:,k]=np.linspace(0.,sigMax,Samples)
# Compute shear stress satisfying the criterion given sig11 and sig22
for i in range(Samples):
s11=sig[i,k]
delta=(s11*s22 -s11**2-s22**2 + sigy**2)/3.
if np.abs(delta)<10. : delta=np.abs(delta)
tauMax=np.sqrt(delta)
f_vm=lambda x:computeCriterion(s11,s22,x,0.,sigy)
tau[i,k]=np.sqrt(delta)
## LOADING PATHS PLOTS
for k in range(len(sig22)-1)[1:]:
s22=sig22[k]
sigM=1.25*np.max(sig[:,k])
tauM=1.25*np.max(tau[:,k])
## For each value of sig22 trace the loading paths given by psis from yield surface to an arbitrary shear stress level
approx=np.zeros((len(frames),2))
ordonnees=np.zeros((len(frames),Samples))
abscisses=np.zeros((len(frames),Samples))
radius_S=np.zeros(len(frames))
for s,i in enumerate(frames):
if i==0:
continue
sig0=sig[-1-i,k]
tau0=tau[-1-i,k]
dsig=(sigM-sig0)/Niter
SIG11[:,s,k]=np.linspace(sig0,sigM,Niter)
TAU[0,s,k]=tau0
SIG22[0,s,k]=s22
#rSlow = ode(computePsiSlow).set_integrator('vode',method='bdf')
rSlow = ode(computePsiSlow).set_integrator('vode',method='adams',order=12)
rSlow.set_initial_value(np.array([TAU[0,s,k],SIG22[0,s,k]]),SIG11[0,s,k]).set_f_params(0.,lamb,mu,beta,'planeStress',rho)
sigma = np.matrix([[SIG11[0,s,k],TAU[0,s,k],0.],[TAU[0,s,k],SIG22[0,s,k],0.],[0.,0.,0.]])
eigsig=np.linalg.eig(sigma)[0]
eigsigS[0,s,k,:]=eigsig
LodeAngle_S[0,s]=computeLodeAngle(sigma[0,0],SIG22[0,s,k],sigma[0,1],0.)
p=0.
epsp33=0.
for j in range(Niter-1):
rSlow.set_f_params(np.array([TAU[j,s,k],SIG22[j,s,k]]),0.,lamb,mu,beta,'planeStress',rho)
if not rSlow.successful():
print "Integration issues in slow wave path"
break
rSlow.integrate(rSlow.t+dsig)
TAU[j+1,s,k],SIG22[j+1,s,k]=rSlow.y
sigma = np.array([SIG11[j,s,k],np.sqrt(2.)*TAU[j,s,k],SIG22[j,s,k],0.])
sigman = np.array([SIG11[j+1,s,k],np.sqrt(2.)*TAU[j+1,s,k],SIG22[j+1,s,k],0.])
f_vm=computeCriterion(SIG11[j+1,s,k],SIG22[j+1,s,k],TAU[j+1,s,k],0.,sigy+H*p)
#if f_vm>0. :
#p+=updateEquivalentPlasticStrain(sigma,sigman,H)
#residual=lambda x: plasticResidual(sigma,sigman,p,x,H)
residual=lambda x: computeCriterion(SIG11[j+1,s,k],SIG22[j+1,s,k],TAU[j+1,s,k],0.,sigy+H*x)
p=scipy.optimize.root(residual,p,method='hybr',options={'xtol':1.e-12}).x[0]
criterionS[j+1,s]=computeCriterion(SIG11[j+1,s,k],SIG22[j+1,s,k],TAU[j+1,s,k],0.,sigy+H*p)
plast_S[j+1,s]=p
LodeAngle_S[j+1,s]=computeLodeAngle(sigman[0],sigman[2],sigman[1]/np.sqrt(2.),0.)
# Eigenvalues of sigma (for deviatoric plane plots)
sigma = np.matrix([[SIG11[j+1,s,k],TAU[j+1,s,k],0.],[TAU[j+1,s,k],SIG22[j+1,s,k],0.],[0.,0.,0.]])
eigsigS[j+1,s,k,:]=computeEigenStresses(sigma)
print "Final equivalent plastic strain after slow wave : ",p
radius_S[s]=sigy+H*p
TAU_MAX_S=np.max(ordonnees)
SIG_MAX_S=np.max(abscisses)
### SUBPLOTS SETTINGS
fig = plt.figure()
ax2=plt.subplot2grid((1,2),(0,1),projection='3d')
ax1d1=plt.subplot2grid((1,2),(0,0))
ax1d1.grid()
ax1d1.set_xlabel(r'$\Theta$', fontsize=24)
ax1d1.set_ylabel('p', fontsize=24)
fvm1=ax1d1.twinx()
fvm1.set_ylabel('f',fontsize=18.)
fvm1.ticklabel_format(style='sci', axis='y', scilimits=(0,0))
cylindre=vonMisesYieldSurface(sigy)
ax2.plot_wireframe(cylindre[0,:],cylindre[1,:],cylindre[2,:], color="k")
elevation_Angle_radian=np.arctan(1./np.sqrt(2.0))
angle_degree= 180.*elevation_Angle_radian/np.pi
radius=1.*np.sqrt((2./3.)*sigy**2)
ax2.set_xlim(-1.*radius,1.*radius)
ax2.set_ylim(-1.*radius,1.*radius)
ax2.set_zlim(-1.*radius,1.*radius)
ax2.view_init(angle_degree,45.)
ax2.plot([0.,sigy],[0.,sigy],[0.,sigy],color="k")
ax2.set_xlabel(r'$\sigma_1$',size=24.)
ax2.set_ylabel(r'$\sigma_2$',size=24.)
ax2.set_zlabel(r'$\sigma_3$',size=24.)
for p in range(len(frames)):
if updated_criterion :
cylindre=vonMisesYieldSurface(radius_S[p])
ax2.plot_wireframe(cylindre[0,:],cylindre[1,:],cylindre[2,:], color=col[p],linestyle='--')
## 2D plot of equivalent plastic strain evolution
ax1d1.plot(LodeAngle_S[:Niter/5,p],plast_S[:Niter/5,p],col[p])
#ax1d1_2.plot(LodeAngle_S[:Niter/5,p],SIG33_S[:Niter/5,p,k],col[p],marker='o')
fvm1.plot(LodeAngle_S[:,p],criterionS[:,p],col[p],linestyle='--')
## 3D plots of loading paths (deviatoric plane)
ax2.plot(eigsigS[:,p,k,0],eigsigS[:,p,k,1],eigsigS[:,p,k,2],color=col[p],marker="o")
ax2.plot([-sigy,sigy],[0.,0.],[0.,0.],color="k",linestyle="--",lw=1.)
ax2.plot([0.,0.],[-sigy,sigy],[0.,0.],color="k",linestyle="--",lw=1.)
ax2.plot([-radius,radius],[radius,-radius],[0.,0.],color="k",linestyle="--",lw=1.)
#plt.show()
fig = plt.figure()
ax1=plt.subplot2grid((1,2),(0,0))
ax2=plt.subplot2grid((1,2),(0,1))
ax1.set_xlabel(r'$\sigma_{11}$',size=28.)
ax1.set_ylabel(r'$\sigma_{12}$',size=28.)
#ax1.set_zlabel(r'$\sigma_{22}$',size=28.)
ax2.set_xlabel(r'$\sigma_{22}$',size=28.)
ax2.set_ylabel(r'$\sigma_{12}$',size=28.)
#ax2.set_zlabel(r'$\sigma_{11}$',size=28.)
ax1.grid()
ax2.grid()
#ax2.view_init(-90.,-0.)
#ax1.view_init(-90.,0.)
for s,i in enumerate(frames):
sig0=sig[-1-i,k]
s22max=(sig0+np.sqrt(4*sigy**2-3.*sig0**2))/2.
s22min=(sig0-np.sqrt(4*sigy**2-3.*sig0**2))/2.
s22=np.linspace(s22min,s22max,Samples)
s12=np.sqrt((sigy**2- sig0**2-s22**2+sig0*s22)/3.)
ax2.plot(s22,s12,color=col[s])
ax1.plot(sig[:,k],tau[:,k],'k')
#ax2.plot(sig[:,k],tau[:,k],sig22[k],'k')
for p in range(len(frames)):
ax1.plot(SIG11[:,p,k],TAU[:,p,k],color=col[p])
ax2.plot(SIG22[:,p,k],TAU[:,p,k],color=col[p])
plt.show()
| 1.992188 | 2 |
pyhsms/core/connectionstate.py | cherish-web/pyhsms | 2 | 7033 | <reponame>cherish-web/pyhsms
# _*_ coding: utf-8 _*_
#@Time : 2020/7/29 上午 09:49
#@Author : cherish_peng
#@Email : <EMAIL>
#@File : connectionstate.py
#@Software : PyCharm
from enum import Enum
class ConnectionState(Enum):
'''
ConnectionState enum
'''
DisConnected = 0
Connecting=1
Connected=2
Selected=3
Retry=4 | 1.757813 | 2 |
lifelines/fitters/coxph_fitter.py | msanpe/lifelines | 0 | 7034 | # -*- coding: utf-8 -*-
import time
from datetime import datetime
import warnings
from textwrap import dedent, fill
import numpy as np
import pandas as pd
from numpy.linalg import norm, inv
from scipy.linalg import solve as spsolve, LinAlgError
from scipy.integrate import trapz
from scipy import stats
from lifelines.fitters import BaseFitter, Printer
from lifelines.plotting import set_kwargs_drawstyle
from lifelines.statistics import chisq_test, proportional_hazard_test, TimeTransformers, StatisticalResult
from lifelines.utils.lowess import lowess
from lifelines.utils.concordance import _concordance_summary_statistics, _concordance_ratio
from lifelines.utils import (
_get_index,
_to_list,
_to_tuple,
_to_1d_array,
inv_normal_cdf,
normalize,
qth_survival_times,
coalesce,
check_for_numeric_dtypes_or_raise,
check_low_var,
check_complete_separation,
check_nans_or_infs,
StatError,
ConvergenceWarning,
StatisticalWarning,
StepSizer,
ConvergenceError,
string_justify,
interpolate_at_times_and_return_pandas,
CensoringType,
interpolate_at_times,
format_p_value,
)
__all__ = ["CoxPHFitter"]
class BatchVsSingle:
@staticmethod
def decide(batch_mode, n_unique, n_total, n_vars):
frac_dups = n_unique / n_total
if batch_mode or (
# https://github.com/CamDavidsonPilon/lifelines/issues/591 for original issue.
# new values from from perf/batch_vs_single script.
(batch_mode is None)
and (
(
6.876218e-01
+ -1.796993e-06 * n_total
+ -1.204271e-11 * n_total ** 2
+ 1.912500e00 * frac_dups
+ -8.121036e-01 * frac_dups ** 2
+ 4.916605e-06 * n_total * frac_dups
+ -5.888875e-03 * n_vars
+ 5.473434e-09 * n_vars * n_total
)
< 1
)
):
return "batch"
return "single"
class CoxPHFitter(BaseFitter):
r"""
This class implements fitting Cox's proportional hazard model:
.. math:: h(t|x) = h_0(t) \exp((x - \overline{x})' \beta)
Parameters
----------
alpha: float, optional (default=0.05)
the level in the confidence intervals.
tie_method: string, optional
specify how the fitter should deal with ties. Currently only
'Efron' is available.
penalizer: float, optional (default=0.0)
Attach an L2 penalizer to the size of the coefficients during regression. This improves
stability of the estimates and controls for high correlation between covariates.
For example, this shrinks the absolute value of :math:`\beta_i`.
The penalty is :math:`\frac{1}{2} \text{penalizer} ||\beta||^2`.
strata: list, optional
specify a list of columns to use in stratification. This is useful if a
categorical covariate does not obey the proportional hazard assumption. This
is used similar to the `strata` expression in R.
See http://courses.washington.edu/b515/l17.pdf.
Examples
--------
>>> from lifelines.datasets import load_rossi
>>> from lifelines import CoxPHFitter
>>> rossi = load_rossi()
>>> cph = CoxPHFitter()
>>> cph.fit(rossi, 'week', 'arrest')
>>> cph.print_summary()
Attributes
----------
params_ : Series
The estimated coefficients. Changed in version 0.22.0: use to be ``.hazards_``
hazard_ratios_ : Series
The exp(coefficients)
confidence_intervals_ : DataFrame
The lower and upper confidence intervals for the hazard coefficients
durations: Series
The durations provided
event_observed: Series
The event_observed variable provided
weights: Series
The event_observed variable provided
variance_matrix_ : numpy array
The variance matrix of the coefficients
strata: list
the strata provided
standard_errors_: Series
the standard errors of the estimates
score_: float
the concordance index of the model.
baseline_hazard_: DataFrame
baseline_cumulative_hazard_: DataFrame
baseline_survival_: DataFrame
"""
_KNOWN_MODEL = True
def __init__(self, alpha=0.05, tie_method="Efron", penalizer=0.0, strata=None):
super(CoxPHFitter, self).__init__(alpha=alpha)
if penalizer < 0:
raise ValueError("penalizer parameter must be >= 0.")
if tie_method != "Efron":
raise NotImplementedError("Only Efron is available at the moment.")
self.alpha = alpha
self.tie_method = tie_method
self.penalizer = penalizer
self.strata = strata
@CensoringType.right_censoring
def fit(
self,
df,
duration_col=None,
event_col=None,
show_progress=False,
initial_point=None,
strata=None,
step_size=None,
weights_col=None,
cluster_col=None,
robust=False,
batch_mode=None,
):
"""
Fit the Cox proportional hazard model to a dataset.
Parameters
----------
df: DataFrame
a Pandas DataFrame with necessary columns `duration_col` and
`event_col` (see below), covariates columns, and special columns (weights, strata).
`duration_col` refers to
the lifetimes of the subjects. `event_col` refers to whether
the 'death' events was observed: 1 if observed, 0 else (censored).
duration_col: string
the name of the column in DataFrame that contains the subjects'
lifetimes.
event_col: string, optional
the name of thecolumn in DataFrame that contains the subjects' death
observation. If left as None, assume all individuals are uncensored.
weights_col: string, optional
an optional column in the DataFrame, df, that denotes the weight per subject.
This column is expelled and not used as a covariate, but as a weight in the
final regression. Default weight is 1.
This can be used for case-weights. For example, a weight of 2 means there were two subjects with
identical observations.
This can be used for sampling weights. In that case, use `robust=True` to get more accurate standard errors.
show_progress: boolean, optional (default=False)
since the fitter is iterative, show convergence
diagnostics. Useful if convergence is failing.
initial_point: (d,) numpy array, optional
initialize the starting point of the iterative
algorithm. Default is the zero vector.
strata: list or string, optional
specify a column or list of columns n to use in stratification. This is useful if a
categorical covariate does not obey the proportional hazard assumption. This
is used similar to the `strata` expression in R.
See http://courses.washington.edu/b515/l17.pdf.
step_size: float, optional
set an initial step size for the fitting algorithm. Setting to 1.0 may improve performance, but could also hurt convergence.
robust: boolean, optional (default=False)
Compute the robust errors using the Huber sandwich estimator, aka Wei-Lin estimate. This does not handle
ties, so if there are high number of ties, results may significantly differ. See
"The Robust Inference for the Cox Proportional Hazards Model", Journal of the American Statistical Association, Vol. 84, No. 408 (Dec., 1989), pp. 1074- 1078
cluster_col: string, optional
specifies what column has unique identifiers for clustering covariances. Using this forces the sandwich estimator (robust variance estimator) to
be used.
batch_mode: bool, optional
enabling batch_mode can be faster for datasets with a large number of ties. If left as None, lifelines will choose the best option.
Returns
-------
self: CoxPHFitter
self with additional new properties: ``print_summary``, ``hazards_``, ``confidence_intervals_``, ``baseline_survival_``, etc.
Note
----
Tied survival times are handled using Efron's tie-method.
Examples
--------
>>> from lifelines import CoxPHFitter
>>>
>>> df = pd.DataFrame({
>>> 'T': [5, 3, 9, 8, 7, 4, 4, 3, 2, 5, 6, 7],
>>> 'E': [1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 0],
>>> 'var': [0, 0, 0, 0, 1, 1, 1, 1, 1, 2, 2, 2],
>>> 'age': [4, 3, 9, 8, 7, 4, 4, 3, 2, 5, 6, 7],
>>> })
>>>
>>> cph = CoxPHFitter()
>>> cph.fit(df, 'T', 'E')
>>> cph.print_summary()
>>> cph.predict_median(df)
>>> from lifelines import CoxPHFitter
>>>
>>> df = pd.DataFrame({
>>> 'T': [5, 3, 9, 8, 7, 4, 4, 3, 2, 5, 6, 7],
>>> 'E': [1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 0],
>>> 'var': [0, 0, 0, 0, 1, 1, 1, 1, 1, 2, 2, 2],
>>> 'weights': [1.1, 0.5, 2.0, 1.6, 1.2, 4.3, 1.4, 4.5, 3.0, 3.2, 0.4, 6.2],
>>> 'month': [10, 3, 9, 8, 7, 4, 4, 3, 2, 5, 6, 7],
>>> 'age': [4, 3, 9, 8, 7, 4, 4, 3, 2, 5, 6, 7],
>>> })
>>>
>>> cph = CoxPHFitter()
>>> cph.fit(df, 'T', 'E', strata=['month', 'age'], robust=True, weights_col='weights')
>>> cph.print_summary()
>>> cph.predict_median(df)
"""
if duration_col is None:
raise TypeError("duration_col cannot be None.")
self._time_fit_was_called = datetime.utcnow().strftime("%Y-%m-%d %H:%M:%S") + " UTC"
self.duration_col = duration_col
self.event_col = event_col
self.robust = robust
self.cluster_col = cluster_col
self.weights_col = weights_col
self._n_examples = df.shape[0]
self._batch_mode = batch_mode
self.strata = coalesce(strata, self.strata)
X, T, E, weights, original_index, self._clusters = self._preprocess_dataframe(df)
self.durations = T.copy()
self.event_observed = E.copy()
self.weights = weights.copy()
if self.strata is not None:
self.durations.index = original_index
self.event_observed.index = original_index
self.weights.index = original_index
self._norm_mean = X.mean(0)
self._norm_std = X.std(0)
X_norm = normalize(X, self._norm_mean, self._norm_std)
params_ = self._fit_model(
X_norm, T, E, weights=weights, initial_point=initial_point, show_progress=show_progress, step_size=step_size
)
self.params_ = pd.Series(params_, index=X.columns, name="coef") / self._norm_std
self.hazard_ratios_ = pd.Series(np.exp(self.params_), index=X.columns, name="exp(coef)")
self.variance_matrix_ = -inv(self._hessian_) / np.outer(self._norm_std, self._norm_std)
self.standard_errors_ = self._compute_standard_errors(X_norm, T, E, weights)
self.confidence_intervals_ = self._compute_confidence_intervals()
self._predicted_partial_hazards_ = (
self.predict_partial_hazard(X)
.rename(columns={0: "P"})
.assign(T=self.durations.values, E=self.event_observed.values, W=self.weights.values)
.set_index(X.index)
)
self.baseline_hazard_ = self._compute_baseline_hazards()
self.baseline_cumulative_hazard_ = self._compute_baseline_cumulative_hazard()
self.baseline_survival_ = self._compute_baseline_survival()
if hasattr(self, "_concordance_score_"):
# we have already fit the model.
del self._concordance_score_
return self
def _preprocess_dataframe(self, df):
# this should be a pure function
df = df.copy()
if self.strata is not None:
df = df.sort_values(by=_to_list(self.strata) + [self.duration_col])
original_index = df.index.copy()
df = df.set_index(self.strata)
else:
df = df.sort_values(by=self.duration_col)
original_index = df.index.copy()
# Extract time and event
T = df.pop(self.duration_col)
E = (
df.pop(self.event_col)
if (self.event_col is not None)
else pd.Series(np.ones(self._n_examples), index=df.index, name="E")
)
W = (
df.pop(self.weights_col)
if (self.weights_col is not None)
else pd.Series(np.ones((self._n_examples,)), index=df.index, name="weights")
)
_clusters = df.pop(self.cluster_col).values if self.cluster_col else None
X = df.astype(float)
T = T.astype(float)
# we check nans here because converting to bools maps NaNs to True..
check_nans_or_infs(E)
E = E.astype(bool)
self._check_values(X, T, E, W)
return X, T, E, W, original_index, _clusters
def _check_values(self, X, T, E, W):
check_for_numeric_dtypes_or_raise(X)
check_nans_or_infs(T)
check_nans_or_infs(X)
check_low_var(X)
check_complete_separation(X, E, T, self.event_col)
# check to make sure their weights are okay
if self.weights_col:
if (W.astype(int) != W).any() and not self.robust:
warnings.warn(
"""It appears your weights are not integers, possibly propensity or sampling scores then?
It's important to know that the naive variance estimates of the coefficients are biased. Instead a) set `robust=True` in the call to `fit`, or b) use Monte Carlo to
estimate the variances. See paper "Variance estimation when using inverse probability of treatment weighting (IPTW) with survival analysis"
""",
StatisticalWarning,
)
if (W <= 0).any():
raise ValueError("values in weight column %s must be positive." % self.weights_col)
def _fit_model(
self,
X,
T,
E,
weights=None,
initial_point=None,
step_size=None,
precision=1e-07,
show_progress=True,
max_steps=50,
): # pylint: disable=too-many-statements,too-many-branches
"""
Newton Rhaphson algorithm for fitting CPH model.
Note
----
The data is assumed to be sorted on T!
Parameters
----------
X: (n,d) Pandas DataFrame of observations.
T: (n) Pandas Series representing observed durations.
E: (n) Pandas Series representing death events.
weights: (n) an iterable representing weights per observation.
initial_point: (d,) numpy array of initial starting point for
NR algorithm. Default 0.
step_size: float, optional
> 0.001 to determine a starting step size in NR algorithm.
precision: float, optional
the convergence halts if the norm of delta between
successive positions is less than epsilon.
show_progress: boolean, optional
since the fitter is iterative, show convergence
diagnostics.
max_steps: int, optional
the maximum number of iterations of the Newton-Rhaphson algorithm.
Returns
-------
beta: (1,d) numpy array.
"""
self.path = []
assert precision <= 1.0, "precision must be less than or equal to 1."
_, d = X.shape
# make sure betas are correct size.
if initial_point is not None:
assert initial_point.shape == (d,)
beta = initial_point
else:
beta = np.zeros((d,))
step_sizer = StepSizer(step_size)
step_size = step_sizer.next()
# Method of choice is just efron right now
if self.tie_method == "Efron":
decision = BatchVsSingle.decide(self._batch_mode, T.nunique(), X.shape[0], X.shape[1])
get_gradients = getattr(self, "_get_efron_values_%s" % decision)
self._batch_mode = decision == "batch"
else:
raise NotImplementedError("Only Efron is available.")
i = 0
converging = True
ll, previous_ll = 0, 0
start = time.time()
while converging:
self.path.append(beta.copy())
i += 1
if self.strata is None:
h, g, ll = get_gradients(X.values, T.values, E.values, weights.values, beta)
else:
g = np.zeros_like(beta)
h = np.zeros((beta.shape[0], beta.shape[0]))
ll = 0
for _h, _g, _ll in self._partition_by_strata_and_apply(X, T, E, weights, get_gradients, beta):
g += _g
h += _h
ll += _ll
if i == 1 and np.all(beta == 0):
# this is a neat optimization, the null partial likelihood
# is the same as the full partial but evaluated at zero.
# if the user supplied a non-trivial initial point, we need to delay this.
self._ll_null_ = ll
if self.penalizer > 0:
# add the gradient and hessian of the l2 term
g -= self.penalizer * beta
h.flat[:: d + 1] -= self.penalizer
# reusing a piece to make g * inv(h) * g.T faster later
try:
inv_h_dot_g_T = spsolve(-h, g, assume_a="pos", check_finite=False)
except ValueError as e:
if "infs or NaNs" in str(e):
raise ConvergenceError(
"""Hessian or gradient contains nan or inf value(s). Convergence halted. Please see the following tips in the lifelines documentation:
https://lifelines.readthedocs.io/en/latest/Examples.html#problems-with-convergence-in-the-cox-proportional-hazard-model
""",
e,
)
else:
# something else?
raise e
except LinAlgError as e:
raise ConvergenceError(
"""Convergence halted due to matrix inversion problems. Suspicion is high collinearity. Please see the following tips in the lifelines documentation:
https://lifelines.readthedocs.io/en/latest/Examples.html#problems-with-convergence-in-the-cox-proportional-hazard-model
""",
e,
)
delta = inv_h_dot_g_T
if np.any(np.isnan(delta)):
raise ConvergenceError(
"""delta contains nan value(s). Convergence halted. Please see the following tips in the lifelines documentation:
https://lifelines.readthedocs.io/en/latest/Examples.html#problems-with-convergence-in-the-cox-proportional-hazard-model
"""
)
# Save these as pending result
hessian, gradient = h, g
norm_delta = norm(delta)
# reusing an above piece to make g * inv(h) * g.T faster.
newton_decrement = g.dot(inv_h_dot_g_T) / 2
if show_progress:
print(
"\rIteration %d: norm_delta = %.5f, step_size = %.4f, ll = %.5f, newton_decrement = %.5f, seconds_since_start = %.1f"
% (i, norm_delta, step_size, ll, newton_decrement, time.time() - start),
end="",
)
# convergence criteria
if norm_delta < precision:
converging, completed = False, True
elif previous_ll != 0 and abs(ll - previous_ll) / (-previous_ll) < 1e-09:
# this is what R uses by default
converging, completed = False, True
elif newton_decrement < precision:
converging, completed = False, True
elif i >= max_steps:
# 50 iterations steps with N-R is a lot.
# Expected convergence is ~10 steps
converging, completed = False, False
elif step_size <= 0.00001:
converging, completed = False, False
elif abs(ll) < 0.0001 and norm_delta > 1.0:
warnings.warn(
"The log-likelihood is getting suspiciously close to 0 and the delta is still large. There may be complete separation in the dataset. This may result in incorrect inference of coefficients. \
See https://stats.stackexchange.com/q/11109/11867 for more.\n",
ConvergenceWarning,
)
converging, completed = False, False
beta += step_size * delta
previous_ll = ll
step_size = step_sizer.update(norm_delta).next()
self._hessian_ = hessian
self._score_ = gradient
self.log_likelihood_ = ll
if show_progress and completed:
print("Convergence completed after %d iterations." % (i))
elif show_progress and not completed:
print("Convergence failed. See any warning messages.")
# report to the user problems that we detect.
if completed and norm_delta > 0.1:
warnings.warn(
"Newton-Rhaphson convergence completed but norm(delta) is still high, %.3f. This may imply non-unique solutions to the maximum likelihood. Perhaps there is collinearity or complete separation in the dataset?\n"
% norm_delta,
ConvergenceWarning,
)
elif not completed:
warnings.warn(
"Newton-Rhaphson failed to converge sufficiently in %d steps.\n" % max_steps, ConvergenceWarning
)
return beta
def _get_efron_values_single(self, X, T, E, weights, beta):
"""
Calculates the first and second order vector differentials, with respect to beta.
Note that X, T, E are assumed to be sorted on T!
A good explanation for Efron. Consider three of five subjects who fail at the time.
As it is not known a priori that who is the first to fail, so one-third of
(φ1 + φ2 + φ3) is adjusted from sum_j^{5} φj after one fails. Similarly two-third
of (φ1 + φ2 + φ3) is adjusted after first two individuals fail, etc.
From https://cran.r-project.org/web/packages/survival/survival.pdf:
"Setting all weights to 2 for instance will give the same coefficient estimate but halve the variance. When
the Efron approximation for ties (default) is employed replication of the data will not give exactly the same coefficients as the
weights option, and in this case the weighted fit is arguably the correct one."
Parameters
----------
X: array
(n,d) numpy array of observations.
T: array
(n) numpy array representing observed durations.
E: array
(n) numpy array representing death events.
weights: array
(n) an array representing weights per observation.
beta: array
(1, d) numpy array of coefficients.
Returns
-------
hessian:
(d, d) numpy array,
gradient:
(1, d) numpy array
log_likelihood: float
"""
n, d = X.shape
hessian = np.zeros((d, d))
gradient = np.zeros((d,))
log_lik = 0
# Init risk and tie sums to zero
x_death_sum = np.zeros((d,))
risk_phi, tie_phi = 0, 0
risk_phi_x, tie_phi_x = np.zeros((d,)), np.zeros((d,))
risk_phi_x_x, tie_phi_x_x = np.zeros((d, d)), np.zeros((d, d))
# Init number of ties and weights
weight_count = 0.0
tied_death_counts = 0
scores = weights * np.exp(np.dot(X, beta))
phi_x_is = scores[:, None] * X
phi_x_x_i = np.empty((d, d))
# Iterate backwards to utilize recursive relationship
for i in range(n - 1, -1, -1):
# Doing it like this to preserve shape
ti = T[i]
ei = E[i]
xi = X[i]
w = weights[i]
# Calculate phi values
phi_i = scores[i]
phi_x_i = phi_x_is[i]
# https://stackoverflow.com/a/51481295/1895939
phi_x_x_i = np.multiply.outer(xi, phi_x_i)
# Calculate sums of Risk set
risk_phi = risk_phi + phi_i
risk_phi_x = risk_phi_x + phi_x_i
risk_phi_x_x = risk_phi_x_x + phi_x_x_i
# Calculate sums of Ties, if this is an event
if ei:
x_death_sum = x_death_sum + w * xi
tie_phi = tie_phi + phi_i
tie_phi_x = tie_phi_x + phi_x_i
tie_phi_x_x = tie_phi_x_x + phi_x_x_i
# Keep track of count
tied_death_counts += 1
weight_count += w
if i > 0 and T[i - 1] == ti:
# There are more ties/members of the risk set
continue
elif tied_death_counts == 0:
# Only censored with current time, move on
continue
# There was atleast one event and no more ties remain. Time to sum.
# This code is near identical to the _batch algorithm below. In fact, see _batch for comments.
weighted_average = weight_count / tied_death_counts
if tied_death_counts > 1:
increasing_proportion = np.arange(tied_death_counts) / tied_death_counts
denom = 1.0 / (risk_phi - increasing_proportion * tie_phi)
numer = risk_phi_x - np.outer(increasing_proportion, tie_phi_x)
a1 = np.einsum("ab,i->ab", risk_phi_x_x, denom) - np.einsum(
"ab,i->ab", tie_phi_x_x, increasing_proportion * denom
)
else:
denom = 1.0 / np.array([risk_phi])
numer = risk_phi_x
a1 = risk_phi_x_x * denom
summand = numer * denom[:, None]
a2 = summand.T.dot(summand)
gradient = gradient + x_death_sum - weighted_average * summand.sum(0)
log_lik = log_lik + np.dot(x_death_sum, beta) + weighted_average * np.log(denom).sum()
hessian = hessian + weighted_average * (a2 - a1)
# reset tie values
tied_death_counts = 0
weight_count = 0.0
x_death_sum = np.zeros((d,))
tie_phi = 0
tie_phi_x = np.zeros((d,))
tie_phi_x_x = np.zeros((d, d))
return hessian, gradient, log_lik
@staticmethod
def _trivial_log_likelihood_batch(T, E, weights):
# used for log-likelihood test
n = T.shape[0]
log_lik = 0
_, counts = np.unique(-T, return_counts=True)
risk_phi = 0
pos = n
for count_of_removals in counts:
slice_ = slice(pos - count_of_removals, pos)
weights_at_t = weights[slice_]
phi_i = weights_at_t
# Calculate sums of Risk set
risk_phi = risk_phi + phi_i.sum()
# Calculate the sums of Tie set
deaths = E[slice_]
tied_death_counts = deaths.astype(int).sum()
if tied_death_counts == 0:
# no deaths, can continue
pos -= count_of_removals
continue
weights_deaths = weights_at_t[deaths]
weight_count = weights_deaths.sum()
if tied_death_counts > 1:
tie_phi = phi_i[deaths].sum()
factor = np.log(risk_phi - np.arange(tied_death_counts) * tie_phi / tied_death_counts).sum()
else:
factor = np.log(risk_phi)
log_lik = log_lik - weight_count / tied_death_counts * factor
pos -= count_of_removals
return log_lik
@staticmethod
def _trivial_log_likelihood_single(T, E, weights):
# assumes sorted on T!
log_lik = 0
n = T.shape[0]
# Init risk and tie sums to zero
risk_phi, tie_phi = 0, 0
# Init number of ties and weights
weight_count = 0.0
tied_death_counts = 0
# Iterate backwards to utilize recursive relationship
for i in range(n - 1, -1, -1):
# Doing it like this to preserve shape
ti = T[i]
ei = E[i]
# Calculate phi values
phi_i = weights[i]
w = weights[i]
# Calculate sums of Risk set
risk_phi = risk_phi + phi_i
# Calculate sums of Ties, if this is an event
if ei:
tie_phi = tie_phi + phi_i
# Keep track of count
tied_death_counts += 1
weight_count += w
if i > 0 and T[i - 1] == ti:
# There are more ties/members of the risk set
continue
elif tied_death_counts == 0:
# Only censored with current time, move on
continue
if tied_death_counts > 1:
factor = np.log(risk_phi - np.arange(tied_death_counts) * tie_phi / tied_death_counts).sum()
else:
factor = np.log(risk_phi)
log_lik = log_lik - weight_count / tied_death_counts * factor
# reset tie values
tied_death_counts = 0
weight_count = 0.0
tie_phi = 0
return log_lik
def _get_efron_values_batch(self, X, T, E, weights, beta): # pylint: disable=too-many-locals
"""
Assumes sorted on ascending on T
Calculates the first and second order vector differentials, with respect to beta.
A good explanation for how Efron handles ties. Consider three of five subjects who fail at the time.
As it is not known a priori that who is the first to fail, so one-third of
(φ1 + φ2 + φ3) is adjusted from sum_j^{5} φj after one fails. Similarly two-third
of (φ1 + φ2 + φ3) is adjusted after first two individuals fail, etc.
Returns
-------
hessian: (d, d) numpy array,
gradient: (1, d) numpy array
log_likelihood: float
"""
n, d = X.shape
hessian = np.zeros((d, d))
gradient = np.zeros((d,))
log_lik = 0
# weights = weights[:, None]
# Init risk and tie sums to zero
risk_phi, tie_phi = 0, 0
risk_phi_x, tie_phi_x = np.zeros((d,)), np.zeros((d,))
risk_phi_x_x, tie_phi_x_x = np.zeros((d, d)), np.zeros((d, d))
# counts are sorted by -T
_, counts = np.unique(-T, return_counts=True)
scores = weights * np.exp(np.dot(X, beta))
pos = n
ZERO_TO_N = np.arange(counts.max())
for count_of_removals in counts:
slice_ = slice(pos - count_of_removals, pos)
X_at_t = X[slice_]
weights_at_t = weights[slice_]
deaths = E[slice_]
phi_i = scores[slice_, None]
phi_x_i = phi_i * X_at_t
phi_x_x_i = np.dot(X_at_t.T, phi_x_i)
# Calculate sums of Risk set
risk_phi = risk_phi + phi_i.sum()
risk_phi_x = risk_phi_x + (phi_x_i).sum(0)
risk_phi_x_x = risk_phi_x_x + phi_x_x_i
# Calculate the sums of Tie set
tied_death_counts = deaths.sum()
if tied_death_counts == 0:
# no deaths, can continue
pos -= count_of_removals
continue
"""
I think there is another optimization that can be made if we sort on
T and E. Using some accounting, we can skip all the [death] indexing below.
"""
xi_deaths = X_at_t[deaths]
weights_deaths = weights_at_t[deaths]
x_death_sum = np.einsum("a,ab->b", weights_deaths, xi_deaths)
weight_count = weights_deaths.sum()
weighted_average = weight_count / tied_death_counts
if tied_death_counts > 1:
# a lot of this is now in Einstein notation for performance, but see original "expanded" code here
# https://github.com/CamDavidsonPilon/lifelines/blob/e7056e7817272eb5dff5983556954f56c33301b1/lifelines/fitters/coxph_fitter.py#L755-L789
# it's faster if we can skip computing these when we don't need to.
phi_x_i_deaths = phi_x_i[deaths]
tie_phi = phi_i[deaths].sum()
tie_phi_x = (phi_x_i_deaths).sum(0)
tie_phi_x_x = np.dot(xi_deaths.T, phi_x_i_deaths)
increasing_proportion = ZERO_TO_N[:tied_death_counts] / tied_death_counts
denom = 1.0 / (risk_phi - increasing_proportion * tie_phi)
numer = risk_phi_x - np.outer(increasing_proportion, tie_phi_x)
# computes outer products and sums them together.
# Naive approach is to
# 1) broadcast tie_phi_x_x and increasing_proportion into a (tied_death_counts, d, d) matrix
# 2) broadcast risk_phi_x_x and denom into a (tied_death_counts, d, d) matrix
# 3) subtract them, and then sum to (d, d)
# Alternatively, we can sum earlier without having to explicitly create (_, d, d) matrices. This is used here.
#
a1 = np.einsum("ab,i->ab", risk_phi_x_x, denom) - np.einsum(
"ab,i->ab", tie_phi_x_x, increasing_proportion * denom
)
else:
# no tensors here, but do some casting to make it easier in the converging step next.
denom = 1.0 / np.array([risk_phi])
numer = risk_phi_x
a1 = risk_phi_x_x * denom
summand = numer * denom[:, None]
# This is a batch outer product.
# given a matrix t, for each row, m, compute it's outer product: m.dot(m.T), and stack these new matrices together.
# which would be: np.einsum("Bi, Bj->Bij", t, t)
a2 = summand.T.dot(summand)
gradient = gradient + x_death_sum - weighted_average * summand.sum(0)
log_lik = log_lik + np.dot(x_death_sum, beta) + weighted_average * np.log(denom).sum()
hessian = hessian + weighted_average * (a2 - a1)
pos -= count_of_removals
return hessian, gradient, log_lik
def _partition_by_strata(self, X, T, E, weights, as_dataframes=False):
for stratum, stratified_X in X.groupby(self.strata):
stratified_E, stratified_T, stratified_W = (E.loc[[stratum]], T.loc[[stratum]], weights.loc[[stratum]])
if not as_dataframes:
yield (stratified_X.values, stratified_T.values, stratified_E.values, stratified_W.values), stratum
else:
yield (stratified_X, stratified_T, stratified_E, stratified_W), stratum
def _partition_by_strata_and_apply(self, X, T, E, weights, function, *args):
for (stratified_X, stratified_T, stratified_E, stratified_W), _ in self._partition_by_strata(X, T, E, weights):
yield function(stratified_X, stratified_T, stratified_E, stratified_W, *args)
def _compute_martingale(self, X, T, E, _weights, index=None):
# TODO: _weights unused
partial_hazard = self.predict_partial_hazard(X)[0].values
if not self.strata:
baseline_at_T = self.baseline_cumulative_hazard_.loc[T, "baseline cumulative hazard"].values
else:
baseline_at_T = np.empty(0)
for name, T_ in T.groupby(by=self.strata):
baseline_at_T = np.append(baseline_at_T, self.baseline_cumulative_hazard_[name].loc[T_])
martingale = E - (partial_hazard * baseline_at_T)
return pd.DataFrame(
{self.duration_col: T.values, self.event_col: E.values, "martingale": martingale.values}, index=index
)
def _compute_deviance(self, X, T, E, weights, index=None):
df = self._compute_martingale(X, T, E, weights, index)
rmart = df.pop("martingale")
with np.warnings.catch_warnings():
np.warnings.filterwarnings("ignore")
log_term = np.where((E.values - rmart.values) <= 0, 0, E.values * np.log(E.values - rmart.values))
deviance = np.sign(rmart) * np.sqrt(-2 * (rmart + log_term))
df["deviance"] = deviance
return df
def _compute_scaled_schoenfeld(self, X, T, E, weights, index=None):
r"""
Let s_k be the kth schoenfeld residuals. Then E[s_k] = 0.
For tests of proportionality, we want to test if \beta_i(t) is \beta_i (constant) or not.
Let V_k be the contribution to the information matrix at time t_k. A main result from Grambsch and Therneau is that
\beta(t) = E[s_k*V_k^{-1} + \hat{beta}]
so define s_k^* = s_k*V_k^{-1} + \hat{beta} as the scaled schoenfeld residuals.
We can approximate V_k with Hessian/d, so the inverse of Hessian/d is (d * variance_matrix_)
Notes
-------
lifelines does not add the coefficients to the final results, but R does when you call residuals(c, "scaledsch")
"""
n_deaths = self.event_observed.sum()
scaled_schoenfeld_resids = n_deaths * self._compute_schoenfeld(X, T, E, weights, index).dot(
self.variance_matrix_
)
scaled_schoenfeld_resids.columns = self.params_.index
return scaled_schoenfeld_resids
def _compute_schoenfeld(self, X, T, E, weights, index=None):
# TODO: should the index by times, i.e. T[E]?
# Assumes sorted on T and on strata
# cluster does nothing to this, as expected.
_, d = X.shape
if self.strata is not None:
schoenfeld_residuals = np.empty((0, d))
for schoenfeld_residuals_in_strata in self._partition_by_strata_and_apply(
X, T, E, weights, self._compute_schoenfeld_within_strata
):
schoenfeld_residuals = np.append(schoenfeld_residuals, schoenfeld_residuals_in_strata, axis=0)
else:
schoenfeld_residuals = self._compute_schoenfeld_within_strata(X.values, T.values, E.values, weights.values)
# schoenfeld residuals are only defined for subjects with a non-zero event.
df = pd.DataFrame(schoenfeld_residuals[E, :], columns=self.params_.index, index=index[E])
return df
def _compute_schoenfeld_within_strata(self, X, T, E, weights):
"""
A positive value of the residual shows an X value that is higher than expected at that death time.
"""
# TODO: the diff_against is gross
# This uses Efron ties.
n, d = X.shape
if not np.any(E):
# sometimes strata have no deaths. This means nothing is returned
# in the below code.
return np.zeros((n, d))
# Init risk and tie sums to zero
risk_phi, tie_phi = 0, 0
risk_phi_x, tie_phi_x = np.zeros((1, d)), np.zeros((1, d))
# Init number of ties and weights
weight_count = 0.0
tie_count = 0
scores = weights * np.exp(np.dot(X, self.params_))
diff_against = []
schoenfeld_residuals = np.empty((0, d))
# Iterate backwards to utilize recursive relationship
for i in range(n - 1, -1, -1):
# Doing it like this to preserve shape
ti = T[i]
ei = E[i]
xi = X[i : i + 1]
score = scores[i : i + 1]
w = weights[i]
# Calculate phi values
phi_i = score
phi_x_i = phi_i * xi
# Calculate sums of Risk set
risk_phi = risk_phi + phi_i
risk_phi_x = risk_phi_x + phi_x_i
# Calculate sums of Ties, if this is an event
diff_against.append((xi, ei))
if ei:
tie_phi = tie_phi + phi_i
tie_phi_x = tie_phi_x + phi_x_i
# Keep track of count
tie_count += 1 # aka death counts
weight_count += w
if i > 0 and T[i - 1] == ti:
# There are more ties/members of the risk set
continue
elif tie_count == 0:
for _ in diff_against:
schoenfeld_residuals = np.append(schoenfeld_residuals, np.zeros((1, d)), axis=0)
diff_against = []
continue
# There was atleast one event and no more ties remain. Time to sum.
weighted_mean = np.zeros((1, d))
for l in range(tie_count):
numer = risk_phi_x - l * tie_phi_x / tie_count
denom = risk_phi - l * tie_phi / tie_count
weighted_mean += numer / (denom * tie_count)
for xi, ei in diff_against:
schoenfeld_residuals = np.append(schoenfeld_residuals, ei * (xi - weighted_mean), axis=0)
# reset tie values
tie_count = 0
weight_count = 0.0
tie_phi = 0
tie_phi_x = np.zeros((1, d))
diff_against = []
return schoenfeld_residuals[::-1]
def _compute_delta_beta(self, X, T, E, weights, index=None):
"""
approximate change in betas as a result of excluding ith row. Good for finding outliers / specific
subjects that influence the model disproportionately. Good advice: don't drop these outliers, model them.
"""
score_residuals = self._compute_score(X, T, E, weights, index=index)
d = X.shape[1]
scaled_variance_matrix = self.variance_matrix_ * np.tile(self._norm_std.values, (d, 1)).T
delta_betas = score_residuals.dot(scaled_variance_matrix)
delta_betas.columns = self.params_.index
return delta_betas
def _compute_score(self, X, T, E, weights, index=None):
_, d = X.shape
if self.strata is not None:
score_residuals = np.empty((0, d))
for score_residuals_in_strata in self._partition_by_strata_and_apply(
X, T, E, weights, self._compute_score_within_strata
):
score_residuals = np.append(score_residuals, score_residuals_in_strata, axis=0)
else:
score_residuals = self._compute_score_within_strata(X.values, T, E.values, weights.values)
return pd.DataFrame(score_residuals, columns=self.params_.index, index=index)
def _compute_score_within_strata(self, X, _T, E, weights):
# https://www.stat.tamu.edu/~carroll/ftp/gk001.pdf
# lin1989
# https://www.ics.uci.edu/~dgillen/STAT255/Handouts/lecture10.pdf
# Assumes X already sorted by T with strata
# TODO: doesn't handle ties.
# TODO: _T unused
n, d = X.shape
# we already unnormalized the betas in `fit`, so we need normalize them again since X is
# normalized.
beta = self.params_.values * self._norm_std
E = E.astype(int)
score_residuals = np.zeros((n, d))
phi_s = np.exp(np.dot(X, beta))
# need to store these histories, as we access them often
# this is a reverse cumulative sum. See original code in https://github.com/CamDavidsonPilon/lifelines/pull/496/files#diff-81ee0759dbae0770e1a02cf17f4cfbb1R431
risk_phi_x_history = (X * (weights * phi_s)[:, None])[::-1].cumsum(0)[::-1]
risk_phi_history = (weights * phi_s)[::-1].cumsum()[::-1][:, None]
# Iterate forwards
for i in range(0, n):
xi = X[i : i + 1]
phi_i = phi_s[i]
score = -phi_i * (
(
E[: i + 1] * weights[: i + 1] / risk_phi_history[: i + 1].T
).T # this is constant-ish, and could be cached
* (xi - risk_phi_x_history[: i + 1] / risk_phi_history[: i + 1])
).sum(0)
if E[i]:
score = score + (xi - risk_phi_x_history[i] / risk_phi_history[i])
score_residuals[i, :] = score
return score_residuals * weights[:, None]
def compute_residuals(self, training_dataframe, kind):
"""
Parameters
----------
training_dataframe : pandas DataFrame
the same training DataFrame given in `fit`
kind : string
{'schoenfeld', 'score', 'delta_beta', 'deviance', 'martingale', 'scaled_schoenfeld'}
"""
ALLOWED_RESIDUALS = {"schoenfeld", "score", "delta_beta", "deviance", "martingale", "scaled_schoenfeld"}
assert kind in ALLOWED_RESIDUALS, "kind must be in %s" % ALLOWED_RESIDUALS
warnings.filterwarnings("ignore", category=ConvergenceWarning)
X, T, E, weights, shuffled_original_index, _ = self._preprocess_dataframe(training_dataframe)
resids = getattr(self, "_compute_%s" % kind)(X, T, E, weights, index=shuffled_original_index)
return resids
def _compute_confidence_intervals(self):
ci = 100 * (1 - self.alpha)
z = inv_normal_cdf(1 - self.alpha / 2)
se = self.standard_errors_
hazards = self.params_.values
return pd.DataFrame(
np.c_[hazards - z * se, hazards + z * se],
columns=["%g%% lower-bound" % ci, "%g%% upper-bound" % ci],
index=self.params_.index,
)
def _compute_standard_errors(self, X, T, E, weights):
if self.robust or self.cluster_col:
se = np.sqrt(self._compute_sandwich_estimator(X, T, E, weights).diagonal())
else:
se = np.sqrt(self.variance_matrix_.diagonal())
return pd.Series(se, name="se", index=self.params_.index)
def _compute_sandwich_estimator(self, X, T, E, weights):
delta_betas = self._compute_delta_beta(X, T, E, weights)
if self.cluster_col:
delta_betas = delta_betas.groupby(self._clusters).sum()
sandwich_estimator = delta_betas.T.dot(delta_betas)
return sandwich_estimator.values
def _compute_z_values(self):
return self.params_ / self.standard_errors_
def _compute_p_values(self):
U = self._compute_z_values() ** 2
return stats.chi2.sf(U, 1)
@property
def summary(self):
"""Summary statistics describing the fit.
Set alpha property in the object before calling.
Returns
-------
df : DataFrame
Contains columns coef, np.exp(coef), se(coef), z, p, lower, upper"""
ci = 100 * (1 - self.alpha)
z = inv_normal_cdf(1 - self.alpha / 2)
with np.errstate(invalid="ignore", divide="ignore", over="ignore", under="ignore"):
df = pd.DataFrame(index=self.params_.index)
df["coef"] = self.params_
df["exp(coef)"] = self.hazard_ratios_
df["se(coef)"] = self.standard_errors_
df["coef lower %g%%" % ci] = self.confidence_intervals_["%g%% lower-bound" % ci]
df["coef upper %g%%" % ci] = self.confidence_intervals_["%g%% upper-bound" % ci]
df["exp(coef) lower %g%%" % ci] = self.hazard_ratios_ * np.exp(-z * self.standard_errors_)
df["exp(coef) upper %g%%" % ci] = self.hazard_ratios_ * np.exp(z * self.standard_errors_)
df["z"] = self._compute_z_values()
df["p"] = self._compute_p_values()
df["-log2(p)"] = -np.log2(df["p"])
return df
def print_summary(self, decimals=2, **kwargs):
"""
Print summary statistics describing the fit, the coefficients, and the error bounds.
Parameters
-----------
decimals: int, optional (default=2)
specify the number of decimal places to show
kwargs:
print additional metadata in the output (useful to provide model names, dataset names, etc.) when comparing
multiple outputs.
"""
# Print information about data first
justify = string_justify(25)
headers = []
headers.append(("duration col", "'%s'" % self.duration_col))
if self.event_col:
headers.append(("event col", "'%s'" % self.event_col))
if self.weights_col:
headers.append(("weights col", "'%s'" % self.weights_col))
if self.cluster_col:
headers.append(("cluster col", "'%s'" % self.cluster_col))
if self.penalizer > 0:
headers.append(("penalizer", self.penalizer))
if self.robust or self.cluster_col:
headers.append(("robust variance", True))
if self.strata:
headers.append(("strata", self.strata))
headers.extend(
[
("number of observations", "{:g}".format(self.weights.sum())),
("number of events observed", "{:g}".format(self.weights[self.event_observed > 0].sum())),
("partial log-likelihood", "{:.{prec}f}".format(self.log_likelihood_, prec=decimals)),
("time fit was run", self._time_fit_was_called),
]
)
p = Printer(headers, self, justify, decimals, kwargs)
p.print()
def log_likelihood_ratio_test(self):
"""
This function computes the likelihood ratio test for the Cox model. We
compare the existing model (with all the covariates) to the trivial model
of no covariates.
"""
if hasattr(self, "_ll_null_"):
ll_null = self._ll_null_
else:
if self._batch_mode:
ll_null = self._trivial_log_likelihood_batch(
self.durations.values, self.event_observed.values, self.weights.values
)
else:
ll_null = self._trivial_log_likelihood_single(
self.durations.values, self.event_observed.values, self.weights.values
)
ll_alt = self.log_likelihood_
test_stat = 2 * ll_alt - 2 * ll_null
degrees_freedom = self.params_.shape[0]
p_value = chisq_test(test_stat, degrees_freedom=degrees_freedom)
return StatisticalResult(
p_value,
test_stat,
name="log-likelihood ratio test",
null_distribution="chi squared",
degrees_freedom=degrees_freedom,
)
def predict_partial_hazard(self, X):
r"""
Parameters
----------
X: numpy array or DataFrame
a (n,d) covariate numpy array or DataFrame. If a DataFrame, columns
can be in any order. If a numpy array, columns must be in the
same order as the training data.
Returns
-------
partial_hazard: DataFrame
Returns the partial hazard for the individuals, partial since the
baseline hazard is not included. Equal to :math:`\exp{(x - mean(x_{train}))'\beta}`
Notes
-----
If X is a DataFrame, the order of the columns do not matter. But
if X is an array, then the column ordering is assumed to be the
same as the training dataset.
"""
return np.exp(self.predict_log_partial_hazard(X))
def predict_log_partial_hazard(self, X):
r"""
This is equivalent to R's linear.predictors.
Returns the log of the partial hazard for the individuals, partial since the
baseline hazard is not included. Equal to :math:`(x - \text{mean}(x_{\text{train}})) \beta`
Parameters
----------
X: numpy array or DataFrame
a (n,d) covariate numpy array or DataFrame. If a DataFrame, columns
can be in any order. If a numpy array, columns must be in the
same order as the training data.
Returns
-------
log_partial_hazard: DataFrame
Notes
-----
If X is a DataFrame, the order of the columns do not matter. But
if X is an array, then the column ordering is assumed to be the
same as the training dataset.
"""
hazard_names = self.params_.index
if isinstance(X, pd.Series) and ((X.shape[0] == len(hazard_names) + 2) or (X.shape[0] == len(hazard_names))):
X = X.to_frame().T
return self.predict_log_partial_hazard(X)
elif isinstance(X, pd.Series):
assert len(hazard_names) == 1, "Series not the correct argument"
X = X.to_frame().T
return self.predict_log_partial_hazard(X)
index = _get_index(X)
if isinstance(X, pd.DataFrame):
order = hazard_names
X = X.reindex(order, axis="columns")
X = X.astype(float)
X = X.values
X = X.astype(float)
X = normalize(X, self._norm_mean.values, 1)
return pd.DataFrame(np.dot(X, self.params_), index=index)
def predict_cumulative_hazard(self, X, times=None, conditional_after=None):
"""
Parameters
----------
X: numpy array or DataFrame
a (n,d) covariate numpy array or DataFrame. If a DataFrame, columns
can be in any order. If a numpy array, columns must be in the
same order as the training data.
times: iterable, optional
an iterable of increasing times to predict the cumulative hazard at. Default
is the set of all durations (observed and unobserved). Uses a linear interpolation if
points in time are not in the index.
conditional_after: iterable, optional
Must be equal is size to X.shape[0] (denoted `n` above). An iterable (array, list, series) of possibly non-zero values that represent how long the
subject has already lived for. Ex: if :math:`T` is the unknown event time, then this represents
:math`T | T > s`. This is useful for knowing the *remaining* hazard/survival of censored subjects.
The new timeline is the remaining duration of the subject, i.e. reset back to starting at 0.
Returns
-------
cumulative_hazard_ : DataFrame
the cumulative hazard of individuals over the timeline
"""
if isinstance(X, pd.Series):
return self.predict_cumulative_hazard(X.to_frame().T, times=times, conditional_after=conditional_after)
n = X.shape[0]
if times is not None:
times = np.atleast_1d(times).astype(float)
if conditional_after is not None:
conditional_after = _to_1d_array(conditional_after).reshape(n, 1)
if self.strata:
cumulative_hazard_ = pd.DataFrame()
for stratum, stratified_X in X.groupby(self.strata):
try:
strata_c_0 = self.baseline_cumulative_hazard_[[stratum]]
except KeyError:
raise StatError(
dedent(
"""The stratum %s was not found in the original training data. For example, try
the following on the original dataset, df: `df.groupby(%s).size()`. Expected is that %s is not present in the output."""
% (stratum, self.strata, stratum)
)
)
col = _get_index(stratified_X)
v = self.predict_partial_hazard(stratified_X)
times_ = coalesce(times, self.baseline_cumulative_hazard_.index)
n_ = stratified_X.shape[0]
if conditional_after is not None:
times_to_evaluate_at = np.tile(times_, (n_, 1)) + conditional_after
c_0_ = interpolate_at_times(strata_c_0, times_to_evaluate_at)
c_0_conditional_after = interpolate_at_times(strata_c_0, conditional_after)
c_0_ = np.clip((c_0_ - c_0_conditional_after).T, 0, np.inf)
else:
times_to_evaluate_at = np.tile(times_, (n_, 1))
c_0_ = interpolate_at_times(strata_c_0, times_to_evaluate_at).T
cumulative_hazard_ = cumulative_hazard_.merge(
pd.DataFrame(c_0_ * v.values[:, 0], columns=col, index=times_),
how="outer",
right_index=True,
left_index=True,
)
else:
v = self.predict_partial_hazard(X)
col = _get_index(v)
times_ = coalesce(times, self.baseline_cumulative_hazard_.index)
if conditional_after is not None:
times_to_evaluate_at = np.tile(times_, (n, 1)) + conditional_after
c_0 = interpolate_at_times(self.baseline_cumulative_hazard_, times_to_evaluate_at)
c_0_conditional_after = interpolate_at_times(self.baseline_cumulative_hazard_, conditional_after)
c_0 = np.clip((c_0 - c_0_conditional_after).T, 0, np.inf)
else:
times_to_evaluate_at = np.tile(times_, (n, 1))
c_0 = interpolate_at_times(self.baseline_cumulative_hazard_, times_to_evaluate_at).T
cumulative_hazard_ = pd.DataFrame(c_0 * v.values[:, 0], columns=col, index=times_)
return cumulative_hazard_
def predict_survival_function(self, X, times=None, conditional_after=None):
"""
Predict the survival function for individuals, given their covariates. This assumes that the individual
just entered the study (that is, we do not condition on how long they have already lived for.)
Parameters
----------
X: numpy array or DataFrame
a (n,d) covariate numpy array or DataFrame. If a DataFrame, columns
can be in any order. If a numpy array, columns must be in the
same order as the training data.
times: iterable, optional
an iterable of increasing times to predict the cumulative hazard at. Default
is the set of all durations (observed and unobserved). Uses a linear interpolation if
points in time are not in the index.
conditional_after: iterable, optional
Must be equal is size to X.shape[0] (denoted `n` above). An iterable (array, list, series) of possibly non-zero values that represent how long the
subject has already lived for. Ex: if :math:`T` is the unknown event time, then this represents
:math`T | T > s`. This is useful for knowing the *remaining* hazard/survival of censored subjects.
The new timeline is the remaining duration of the subject, i.e. normalized back to starting at 0.
Returns
-------
survival_function : DataFrame
the survival probabilities of individuals over the timeline
"""
return np.exp(-self.predict_cumulative_hazard(X, times=times, conditional_after=conditional_after))
def predict_percentile(self, X, p=0.5, conditional_after=None):
"""
Returns the median lifetimes for the individuals, by default. If the survival curve of an
individual does not cross 0.5, then the result is infinity.
http://stats.stackexchange.com/questions/102986/percentile-loss-functions
Parameters
----------
X: numpy array or DataFrame
a (n,d) covariate numpy array or DataFrame. If a DataFrame, columns
can be in any order. If a numpy array, columns must be in the
same order as the training data.
p: float, optional (default=0.5)
the percentile, must be between 0 and 1.
conditional_after: iterable, optional
Must be equal is size to X.shape[0] (denoted `n` above). An iterable (array, list, series) of possibly non-zero values that represent how long the
subject has already lived for. Ex: if :math:`T` is the unknown event time, then this represents
:math`T | T > s`. This is useful for knowing the *remaining* hazard/survival of censored subjects.
The new timeline is the remaining duration of the subject, i.e. normalized back to starting at 0.
Returns
-------
percentiles: DataFrame
See Also
--------
predict_median
"""
subjects = _get_index(X)
return qth_survival_times(p, self.predict_survival_function(X, conditional_after=conditional_after)[subjects]).T
def predict_median(self, X, conditional_after=None):
"""
Predict the median lifetimes for the individuals. If the survival curve of an
individual does not cross 0.5, then the result is infinity.
Parameters
----------
X: numpy array or DataFrame
a (n,d) covariate numpy array or DataFrame. If a DataFrame, columns
can be in any order. If a numpy array, columns must be in the
same order as the training data.
Returns
-------
percentiles: DataFrame
the median lifetimes for the individuals. If the survival curve of an
individual does not cross 0.5, then the result is infinity.
See Also
--------
predict_percentile
"""
return self.predict_percentile(X, 0.5, conditional_after=conditional_after)
def predict_expectation(self, X):
r"""
Compute the expected lifetime, :math:`E[T]`, using covariates X. This algorithm to compute the expectation is
to use the fact that :math:`E[T] = \int_0^\inf P(T > t) dt = \int_0^\inf S(t) dt`. To compute the integral, we use the trapizoidal rule to approximate the integral.
Caution
--------
However, if the survival function doesn't converge to 0, the the expectation is really infinity and the returned
values are meaningless/too large. In that case, using ``predict_median`` or ``predict_percentile`` would be better.
Parameters
----------
X: numpy array or DataFrame
a (n,d) covariate numpy array or DataFrame. If a DataFrame, columns
can be in any order. If a numpy array, columns must be in the
same order as the training data.
Returns
-------
expectations : DataFrame
Notes
-----
If X is a DataFrame, the order of the columns do not matter. But
if X is an array, then the column ordering is assumed to be the
same as the training dataset.
See Also
--------
predict_median
predict_percentile
"""
subjects = _get_index(X)
v = self.predict_survival_function(X)[subjects]
return pd.DataFrame(trapz(v.values.T, v.index), index=subjects)
def _compute_baseline_hazard(self, partial_hazards, name):
# https://stats.stackexchange.com/questions/46532/cox-baseline-hazard
ind_hazards = partial_hazards.copy()
ind_hazards["P"] *= ind_hazards["W"]
ind_hazards["E"] *= ind_hazards["W"]
ind_hazards_summed_over_durations = ind_hazards.groupby("T")[["P", "E"]].sum()
ind_hazards_summed_over_durations["P"] = ind_hazards_summed_over_durations["P"].loc[::-1].cumsum()
baseline_hazard = pd.DataFrame(
ind_hazards_summed_over_durations["E"] / ind_hazards_summed_over_durations["P"], columns=[name]
)
baseline_hazard.index.name = None
return baseline_hazard
def _compute_baseline_hazards(self):
if self.strata:
index = self.durations.unique()
baseline_hazards_ = pd.DataFrame(index=index).sort_index()
for name, stratum_predicted_partial_hazards_ in self._predicted_partial_hazards_.groupby(self.strata):
baseline_hazards_ = baseline_hazards_.merge(
self._compute_baseline_hazard(stratum_predicted_partial_hazards_, name),
left_index=True,
right_index=True,
how="left",
)
return baseline_hazards_.fillna(0)
return self._compute_baseline_hazard(self._predicted_partial_hazards_, name="baseline hazard")
def _compute_baseline_cumulative_hazard(self):
cumulative = self.baseline_hazard_.cumsum()
if not self.strata:
cumulative = cumulative.rename(columns={"baseline hazard": "baseline cumulative hazard"})
return cumulative
def _compute_baseline_survival(self):
"""
Importantly, this agrees with what the KaplanMeierFitter produces. Ex:
Example
-------
>>> from lifelines.datasets import load_rossi
>>> from lifelines import CoxPHFitter, KaplanMeierFitter
>>> rossi = load_rossi()
>>> kmf = KaplanMeierFitter()
>>> kmf.fit(rossi['week'], rossi['arrest'])
>>> rossi2 = rossi[['week', 'arrest']].copy()
>>> rossi2['var1'] = np.random.randn(432)
>>> cph = CoxPHFitter()
>>> cph.fit(rossi2, 'week', 'arrest')
>>> ax = cph.baseline_survival_.plot()
>>> kmf.plot(ax=ax)
"""
survival_df = np.exp(-self.baseline_cumulative_hazard_)
if not self.strata:
survival_df = survival_df.rename(columns={"baseline cumulative hazard": "baseline survival"})
return survival_df
def plot(self, columns=None, hazard_ratios=False, ax=None, **errorbar_kwargs):
"""
Produces a visual representation of the coefficients (i.e. log hazard ratios), including their standard errors and magnitudes.
Parameters
----------
columns : list, optional
specify a subset of the columns to plot
hazard_ratios: bool, optional
by default, `plot` will present the log-hazard ratios (the coefficients). However, by turning this flag to True, the hazard ratios are presented instead.
errorbar_kwargs:
pass in additional plotting commands to matplotlib errorbar command
Examples
---------
>>> from lifelines import datasets, CoxPHFitter
>>> rossi = datasets.load_rossi()
>>> cph = CoxPHFitter().fit(rossi, 'week', 'arrest')
>>> cph.plot(hazard_ratios=True)
Returns
-------
ax: matplotlib axis
the matplotlib axis that be edited.
"""
from matplotlib import pyplot as plt
if ax is None:
ax = plt.gca()
errorbar_kwargs.setdefault("c", "k")
errorbar_kwargs.setdefault("fmt", "s")
errorbar_kwargs.setdefault("markerfacecolor", "white")
errorbar_kwargs.setdefault("markeredgewidth", 1.25)
errorbar_kwargs.setdefault("elinewidth", 1.25)
errorbar_kwargs.setdefault("capsize", 3)
z = inv_normal_cdf(1 - self.alpha / 2)
user_supplied_columns = True
if columns is None:
user_supplied_columns = False
columns = self.params_.index
yaxis_locations = list(range(len(columns)))
log_hazards = self.params_.loc[columns].values.copy()
order = list(range(len(columns) - 1, -1, -1)) if user_supplied_columns else np.argsort(log_hazards)
if hazard_ratios:
exp_log_hazards = np.exp(log_hazards)
upper_errors = exp_log_hazards * (np.exp(z * self.standard_errors_[columns].values) - 1)
lower_errors = exp_log_hazards * (1 - np.exp(-z * self.standard_errors_[columns].values))
ax.errorbar(
exp_log_hazards[order],
yaxis_locations,
xerr=np.vstack([lower_errors[order], upper_errors[order]]),
**errorbar_kwargs
)
ax.set_xlabel("HR (%g%% CI)" % ((1 - self.alpha) * 100))
else:
symmetric_errors = z * self.standard_errors_[columns].values
ax.errorbar(log_hazards[order], yaxis_locations, xerr=symmetric_errors[order], **errorbar_kwargs)
ax.set_xlabel("log(HR) (%g%% CI)" % ((1 - self.alpha) * 100))
best_ylim = ax.get_ylim()
ax.vlines(1 if hazard_ratios else 0, -2, len(columns) + 1, linestyles="dashed", linewidths=1, alpha=0.65)
ax.set_ylim(best_ylim)
tick_labels = [columns[i] for i in order]
ax.set_yticks(yaxis_locations)
ax.set_yticklabels(tick_labels)
return ax
def plot_covariate_groups(self, covariates, values, plot_baseline=True, **kwargs):
"""
Produces a plot comparing the baseline survival curve of the model versus
what happens when a covariate(s) is varied over values in a group. This is useful to compare
subjects' survival as we vary covariate(s), all else being held equal. The baseline survival
curve is equal to the predicted survival curve at all average values in the original dataset.
Parameters
----------
covariates: string or list
a string (or list of strings) of the covariate(s) in the original dataset that we wish to vary.
values: 1d or 2d iterable
an iterable of the specific values we wish the covariate(s) to take on.
plot_baseline: bool
also display the baseline survival, defined as the survival at the mean of the original dataset.
kwargs:
pass in additional plotting commands.
Returns
-------
ax: matplotlib axis, or list of axis'
the matplotlib axis that be edited.
Examples
---------
>>> from lifelines import datasets, CoxPHFitter
>>> rossi = datasets.load_rossi()
>>> cph = CoxPHFitter().fit(rossi, 'week', 'arrest')
>>> cph.plot_covariate_groups('prio', values=np.arange(0, 15, 3), cmap='coolwarm')
.. image:: images/plot_covariate_example1.png
>>> # multiple variables at once
>>> cph.plot_covariate_groups(['prio', 'paro'], values=[
>>> [0, 0],
>>> [5, 0],
>>> [10, 0],
>>> [0, 1],
>>> [5, 1],
>>> [10, 1]
>>> ], cmap='coolwarm')
.. image:: images/plot_covariate_example2.png
>>> # if you have categorical variables, you can do the following to see the
>>> # effect of all the categories on one plot.
>>> cph.plot_covariate_groups(['dummy1', 'dummy2', 'dummy3'], values=[[1, 0, 0], [0, 1, 0], [0, 0, 1]])
>>> # same as:
>>> cph.plot_covariate_groups(['dummy1', 'dummy2', 'dummy3'], values=np.eye(3))
"""
from matplotlib import pyplot as plt
covariates = _to_list(covariates)
n_covariates = len(covariates)
values = np.asarray(values)
if len(values.shape) == 1:
values = values[None, :].T
if n_covariates != values.shape[1]:
raise ValueError("The number of covariates must equal to second dimension of the values array.")
for covariate in covariates:
if covariate not in self.params_.index:
raise KeyError("covariate `%s` is not present in the original dataset" % covariate)
set_kwargs_drawstyle(kwargs, "steps-post")
if self.strata is None:
axes = kwargs.pop("ax", None) or plt.figure().add_subplot(111)
x_bar = self._norm_mean.to_frame().T
X = pd.concat([x_bar] * values.shape[0])
if np.array_equal(np.eye(n_covariates), values):
X.index = ["%s=1" % c for c in covariates]
else:
X.index = [", ".join("%s=%g" % (c, v) for (c, v) in zip(covariates, row)) for row in values]
for covariate, value in zip(covariates, values.T):
X[covariate] = value
self.predict_survival_function(X).plot(ax=axes, **kwargs)
if plot_baseline:
self.baseline_survival_.plot(ax=axes, ls=":", color="k", drawstyle="steps-post")
else:
axes = []
for stratum, baseline_survival_ in self.baseline_survival_.iteritems():
ax = plt.figure().add_subplot(1, 1, 1)
x_bar = self._norm_mean.to_frame().T
for name, value in zip(_to_list(self.strata), _to_tuple(stratum)):
x_bar[name] = value
X = pd.concat([x_bar] * values.shape[0])
if np.array_equal(np.eye(len(covariates)), values):
X.index = ["%s=1" % c for c in covariates]
else:
X.index = [", ".join("%s=%g" % (c, v) for (c, v) in zip(covariates, row)) for row in values]
for covariate, value in zip(covariates, values.T):
X[covariate] = value
self.predict_survival_function(X).plot(ax=ax, **kwargs)
if plot_baseline:
baseline_survival_.plot(
ax=ax, ls=":", label="stratum %s baseline survival" % str(stratum), drawstyle="steps-post"
)
plt.legend()
axes.append(ax)
return axes
def check_assumptions(
self, training_df, advice=True, show_plots=False, p_value_threshold=0.01, plot_n_bootstraps=10, columns=None
):
"""
Use this function to test the proportional hazards assumption. See usage example at
https://lifelines.readthedocs.io/en/latest/jupyter_notebooks/Proportional%20hazard%20assumption.html
Parameters
-----------
training_df: DataFrame
the original DataFrame used in the call to ``fit(...)`` or a sub-sampled version.
advice: boolean, optional
display advice as output to the user's screen
show_plots: boolean, optional
display plots of the scaled schoenfeld residuals and loess curves. This is an eyeball test for violations.
This will slow down the function significantly.
p_value_threshold: float, optional
the threshold to use to alert the user of violations. See note below.
plot_n_bootstraps:
in the plots displayed, also display plot_n_bootstraps bootstrapped loess curves. This will slow down
the function significantly.
columns: list, optional
specify a subset of columns to test.
Examples
----------
>>> from lifelines.datasets import load_rossi
>>> from lifelines import CoxPHFitter
>>>
>>> rossi = load_rossi()
>>> cph = CoxPHFitter().fit(rossi, 'week', 'arrest')
>>>
>>> cph.check_assumptions(rossi)
Notes
-------
The ``p_value_threshold`` is arbitrarily set at 0.01. Under the null, some covariates
will be below the threshold (i.e. by chance). This is compounded when there are many covariates.
Similarly, when there are lots of observations, even minor deviances from the proportional hazard
assumption will be flagged.
With that in mind, it's best to use a combination of statistical tests and eyeball tests to
determine the most serious violations.
References
-----------
section 5 in https://socialsciences.mcmaster.ca/jfox/Books/Companion/appendices/Appendix-Cox-Regression.pdf,
http://www.mwsug.org/proceedings/2006/stats/MWSUG-2006-SD08.pdf,
http://eprints.lse.ac.uk/84988/1/06_ParkHendry2015-ReassessingSchoenfeldTests_Final.pdf
"""
if not training_df.index.is_unique:
raise IndexError(
"`training_df` index should be unique for this exercise. Please make it unique or use `.reset_index(drop=True)` to force a unique index"
)
residuals = self.compute_residuals(training_df, kind="scaled_schoenfeld")
test_results = proportional_hazard_test(
self, training_df, time_transform=["rank", "km"], precomputed_residuals=residuals
)
residuals_and_duration = residuals.join(training_df[self.duration_col])
counter = 0
n = residuals_and_duration.shape[0]
for variable in self.params_.index.intersection(columns or self.params_.index):
minumum_observed_p_value = test_results.summary.loc[variable, "p"].min()
if np.round(minumum_observed_p_value, 2) > p_value_threshold:
continue
counter += 1
if counter == 1:
if advice:
print(
fill(
"""The ``p_value_threshold`` is set at %g. Even under the null hypothesis of no violations, some covariates will be below the threshold by chance. This is compounded when there are many covariates. Similarly, when there are lots of observations, even minor deviances from the proportional hazard assumption will be flagged."""
% p_value_threshold,
width=100,
)
)
print()
print(
fill(
"""With that in mind, it's best to use a combination of statistical tests and visual tests to determine the most serious violations. Produce visual plots using ``check_assumptions(..., show_plots=True)`` and looking for non-constant lines. See link [A] below for a full example.""",
width=100,
)
)
print()
test_results.print_summary()
print()
print()
print(
"%d. Variable '%s' failed the non-proportional test: p-value is %s."
% (counter, variable, format_p_value(4)(minumum_observed_p_value)),
end="\n\n",
)
if advice:
values = training_df[variable]
value_counts = values.value_counts()
n_uniques = value_counts.shape[0]
# Arbitrary chosen 10 and 4 to check for ability to use strata col.
# This should capture dichotomous / low cardinality values.
if n_uniques <= 10 and value_counts.min() >= 5:
print(
fill(
" Advice: with so few unique values (only {0}), you can include `strata=['{1}', ...]` in the call in `.fit`. See documentation in link [E] below.".format(
n_uniques, variable
),
width=100,
)
)
else:
print(
fill(
""" Advice 1: the functional form of the variable '{var}' might be incorrect. That is, there may be non-linear terms missing. The proportional hazard test used is very sensitive to incorrect functional forms. See documentation in link [D] below on how to specify a functional form.""".format(
var=variable
),
width=100,
),
end="\n\n",
)
print(
fill(
""" Advice 2: try binning the variable '{var}' using pd.cut, and then specify it in `strata=['{var}', ...]` in the call in `.fit`. See documentation in link [B] below.""".format(
var=variable
),
width=100,
),
end="\n\n",
)
print(
fill(
""" Advice 3: try adding an interaction term with your time variable. See documentation in link [C] below.""",
width=100,
),
end="\n\n",
)
if show_plots:
from matplotlib import pyplot as plt
fig = plt.figure()
# plot variable against all time transformations.
for i, (transform_name, transformer) in enumerate(TimeTransformers().iter(["rank", "km"]), start=1):
p_value = test_results.summary.loc[(variable, transform_name), "p"]
ax = fig.add_subplot(1, 2, i)
y = residuals_and_duration[variable]
tt = transformer(self.durations, self.event_observed, self.weights)[self.event_observed.values]
ax.scatter(tt, y, alpha=0.75)
y_lowess = lowess(tt.values, y.values)
ax.plot(tt, y_lowess, color="k", alpha=1.0, linewidth=2)
# bootstrap some possible other lowess lines. This is an approximation of the 100% confidence intervals
for _ in range(plot_n_bootstraps):
ix = sorted(np.random.choice(n, n))
tt_ = tt.values[ix]
y_lowess = lowess(tt_, y.values[ix])
ax.plot(tt_, y_lowess, color="k", alpha=0.30)
best_xlim = ax.get_xlim()
ax.hlines(0, 0, tt.max(), linestyles="dashed", linewidths=1)
ax.set_xlim(best_xlim)
ax.set_xlabel("%s-transformed time\n(p=%.4f)" % (transform_name, p_value), fontsize=10)
fig.suptitle("Scaled Schoenfeld residuals of '%s'" % variable, fontsize=14)
plt.tight_layout()
plt.subplots_adjust(top=0.90)
if advice and counter > 0:
print(
dedent(
r"""
---
[A] https://lifelines.readthedocs.io/en/latest/jupyter_notebooks/Proportional%20hazard%20assumption.html
[B] https://lifelines.readthedocs.io/en/latest/jupyter_notebooks/Proportional%20hazard%20assumption.html#Bin-variable-and-stratify-on-it
[C] https://lifelines.readthedocs.io/en/latest/jupyter_notebooks/Proportional%20hazard%20assumption.html#Introduce-time-varying-covariates
[D] https://lifelines.readthedocs.io/en/latest/jupyter_notebooks/Proportional%20hazard%20assumption.html#Modify-the-functional-form
[E] https://lifelines.readthedocs.io/en/latest/jupyter_notebooks/Proportional%20hazard%20assumption.html#Stratification
"""
)
)
if counter == 0:
print("Proportional hazard assumption looks okay.")
@property
def score_(self):
"""
The concordance score (also known as the c-index) of the fit. The c-index is a generalization of the ROC AUC
to survival data, including censorships.
For this purpose, the ``score_`` is a measure of the predictive accuracy of the fitted model
onto the training dataset.
References
----------
https://stats.stackexchange.com/questions/133817/stratified-concordance-index-survivalsurvconcordance
"""
# pylint: disable=access-member-before-definition
if not hasattr(self, "_concordance_score_"):
if self.strata:
# https://stats.stackexchange.com/questions/133817/stratified-concordance-index-survivalsurvconcordance
num_correct, num_tied, num_pairs = 0, 0, 0
for _, _df in self._predicted_partial_hazards_.groupby(self.strata):
if _df.shape[0] == 1:
continue
_num_correct, _num_tied, _num_pairs = _concordance_summary_statistics(
_df["T"].values, -_df["P"].values, _df["E"].values
)
num_correct += _num_correct
num_tied += _num_tied
num_pairs += _num_pairs
else:
df = self._predicted_partial_hazards_
num_correct, num_tied, num_pairs = _concordance_summary_statistics(
df["T"].values, -df["P"].values, df["E"].values
)
self._concordance_score_ = _concordance_ratio(num_correct, num_tied, num_pairs)
return self._concordance_score_
return self._concordance_score_
| 1.726563 | 2 |
nlp_server/config/test/test_config.py | asevans48/NLPServer | 0 | 7035 | """
Test configuration loading
@author aevans
"""
import os
from nlp_server.config import load_config
def test_load_config():
"""
Test loading a configuration
"""
current_dir = os.path.curdir
test_path = os.path.sep.join([current_dir, 'data', 'test_config.json'])
cfg = load_config.load_config(test_path)
assert cfg is not None
assert cfg.use_gpu is False
| 2.484375 | 2 |
frappe/utils/safe_exec.py | ektai/frappe3 | 0 | 7036 |
import os, json, inspect
import mimetypes
from html2text import html2text
from RestrictedPython import compile_restricted, safe_globals
import RestrictedPython.Guards
import frappe
import frappe.utils
import frappe.utils.data
from frappe.website.utils import (get_shade, get_toc, get_next_link)
from frappe.modules import scrub
from frappe.www.printview import get_visible_columns
import frappe.exceptions
class ServerScriptNotEnabled(frappe.PermissionError): pass
def safe_exec(script, _globals=None, _locals=None):
# script reports must be enabled via site_config.json
if not frappe.conf.server_script_enabled:
frappe.msgprint('Please Enable Server Scripts')
raise ServerScriptNotEnabled
# build globals
exec_globals = get_safe_globals()
if _globals:
exec_globals.update(_globals)
# execute script compiled by RestrictedPython
exec(compile_restricted(script), exec_globals, _locals) # pylint: disable=exec-used
def get_safe_globals():
datautils = frappe._dict()
if frappe.db:
date_format = frappe.db.get_default("date_format") or "yyyy-mm-dd"
time_format = frappe.db.get_default("time_format") or "HH:mm:ss"
else:
date_format = "yyyy-mm-dd"
time_format = "HH:mm:ss"
add_module_properties(frappe.utils.data, datautils, lambda obj: hasattr(obj, "__call__"))
if "_" in getattr(frappe.local, 'form_dict', {}):
del frappe.local.form_dict["_"]
user = getattr(frappe.local, "session", None) and frappe.local.session.user or "Guest"
out = frappe._dict(
# make available limited methods of frappe
json=json,
dict=dict,
frappe=frappe._dict(
_=frappe._,
_dict=frappe._dict,
flags=frappe.flags,
format=frappe.format_value,
format_value=frappe.format_value,
date_format=date_format,
time_format=time_format,
format_date=frappe.utils.data.global_date_format,
form_dict=getattr(frappe.local, 'form_dict', {}),
get_meta=frappe.get_meta,
get_doc=frappe.get_doc,
get_cached_doc=frappe.get_cached_doc,
get_list=frappe.get_list,
get_all=frappe.get_all,
get_system_settings=frappe.get_system_settings,
utils=datautils,
get_url=frappe.utils.get_url,
render_template=frappe.render_template,
msgprint=frappe.msgprint,
user=user,
get_fullname=frappe.utils.get_fullname,
get_gravatar=frappe.utils.get_gravatar_url,
full_name=frappe.local.session.data.full_name if getattr(frappe.local, "session", None) else "Guest",
request=getattr(frappe.local, 'request', {}),
session=frappe._dict(
user=user,
csrf_token=frappe.local.session.data.csrf_token if getattr(frappe.local, "session", None) else ''
),
socketio_port=frappe.conf.socketio_port,
get_hooks=frappe.get_hooks,
),
style=frappe._dict(
border_color='#d1d8dd'
),
get_toc=get_toc,
get_next_link=get_next_link,
_=frappe._,
get_shade=get_shade,
scrub=scrub,
guess_mimetype=mimetypes.guess_type,
html2text=html2text,
dev_server=1 if os.environ.get('DEV_SERVER', False) else 0
)
add_module_properties(frappe.exceptions, out.frappe, lambda obj: inspect.isclass(obj) and issubclass(obj, Exception))
if not frappe.flags.in_setup_help:
out.get_visible_columns = get_visible_columns
out.frappe.date_format = date_format
out.frappe.time_format = time_format
out.frappe.db = frappe._dict(
get_list = frappe.get_list,
get_all = frappe.get_all,
get_value = frappe.db.get_value,
set_value = frappe.db.set_value,
get_single_value = frappe.db.get_single_value,
get_default = frappe.db.get_default,
escape = frappe.db.escape,
)
if frappe.response:
out.frappe.response = frappe.response
out.update(safe_globals)
# default writer allows write access
out._write_ = _write
out._getitem_ = _getitem
# allow iterators and list comprehension
out._getiter_ = iter
out._iter_unpack_sequence_ = RestrictedPython.Guards.guarded_iter_unpack_sequence
out.sorted = sorted
return out
def _getitem(obj, key):
# guard function for RestrictedPython
# allow any key to be accessed as long as it does not start with underscore
if isinstance(key, str) and key.startswith('_'):
raise SyntaxError('Key starts with _')
return obj[key]
def _write(obj):
# guard function for RestrictedPython
# allow writing to any object
return obj
def add_module_properties(module, data, filter_method):
for key, obj in module.__dict__.items():
if key.startswith("_"):
# ignore
continue
if filter_method(obj):
# only allow functions
data[key] = obj | 2.09375 | 2 |
simplejson/ordered_dict.py | BarracudaPff/code-golf-data-pythpn | 0 | 7037 | <gh_stars>0
"""Drop-in replacement for collections.OrderedDict by <NAME>
http://code.activestate.com/recipes/576693/
"""
try:
all
except NameError:
def all(seq):
for elem in seq:
if not elem:
return False
return True
class OrderedDict(dict, DictMixin):
def __init__(self, *args, **kwds):
if len(args) > 1:
raise TypeError("expected at most 1 arguments, got %d" % len(args))
try:
self.__end
except AttributeError:
self.clear()
self.update(*args, **kwds)
def clear(self):
self.__end = end = []
end += [None, end, end]
self.__map = {}
dict.clear(self)
def __setitem__(self, key, value):
if key not in self:
end = self.__end
curr = end[1]
curr[2] = end[1] = self.__map[key] = [key, curr, end]
dict.__setitem__(self, key, value)
def __delitem__(self, key):
dict.__delitem__(self, key)
key, prev, next = self.__map.pop(key)
prev[2] = next
next[1] = prev
def __iter__(self):
end = self.__end
curr = end[2]
while curr is not end:
yield curr[0]
curr = curr[2]
def __reversed__(self):
end = self.__end
curr = end[1]
while curr is not end:
yield curr[0]
curr = curr[1]
def popitem(self, last=True):
if not self:
raise KeyError("dictionary is empty")
if last:
key = reversed(self).next()
else:
key = iter(self).next()
value = self.pop(key)
return key, value
def __reduce__(self):
items = [[k, self[k]] for k in self]
tmp = self.__map, self.__end
del self.__map, self.__end
inst_dict = vars(self).copy()
self.__map, self.__end = tmp
if inst_dict:
return (self.__class__, (items,), inst_dict)
return self.__class__, (items,)
def keys(self):
return list(self)
setdefault = DictMixin.setdefault
update = DictMixin.update
pop = DictMixin.pop
values = DictMixin.values
items = DictMixin.items
iterkeys = DictMixin.iterkeys
itervalues = DictMixin.itervalues
iteritems = DictMixin.iteritems
def __repr__(self):
if not self:
return "%s()" % (self.__class__.__name__,)
return "%s(%r)" % (self.__class__.__name__, self.items())
def copy(self):
return self.__class__(self)
@classmethod
def fromkeys(cls, iterable, value=None):
d = cls()
for key in iterable:
d[key] = value
return d
def __eq__(self, other):
if isinstance(other, OrderedDict):
return len(self) == len(other) and all(p == q for p, q in zip(self.items(), other.items()))
return dict.__eq__(self, other)
def __ne__(self, other):
return not self == other | 2.59375 | 3 |
Water.py | KRHS-GameProgramming-2015/Adlez | 0 | 7038 | <reponame>KRHS-GameProgramming-2015/Adlez
from HardBlock import *
class Water(HardBlock):
def __init__(self, pos=[0,0], blockSize = 25):
image = "Block/Block Images/water.png"
HardBlock.__init__(self, image, pos, blockSize)
def update(*args):
pass
| 2.40625 | 2 |
baselines/bc.py | bgalbraith/minerl-haiku-baselines | 2 | 7039 | <filename>baselines/bc.py<gh_stars>1-10
import dill
import haiku as hk
import jax
from jax.experimental import optix
import jax.numpy as jnp
from dataset import load_data
MINERL_ENV = 'MineRLTreechopVectorObf-v0'
PARAMS_FILENAME = 'bc_params_treechop.pkl'
class PovStack(hk.Module):
""" PovStack is a module for processing the point-of-view image data that
comes from the agent's viewport. This input is in NHWC format for a shape
of (N, 64, 64, 3).
This model is inspired from
https://github.com/minerllabs/baselines/blob/master/general/chainerrl/baselines/behavioral_cloning.py
"""
def __init__(self, name=None):
super().__init__(name=name)
conv_0 = hk.Conv2D(output_channels=32,
kernel_shape=(8, 8),
stride=4,
padding='SAME',
name='conv_0')
layer_0 = (conv_0, jax.nn.relu)
conv_1 = hk.Conv2D(output_channels=64,
kernel_shape=(4, 4),
stride=2,
padding='SAME',
name='conv_1')
layer_1 = (conv_1, jax.nn.relu)
conv_2 = hk.Conv2D(output_channels=64,
kernel_shape=(3, 3),
stride=1,
padding='SAME',
name='conv_2')
layer_2 = (conv_2, jax.nn.relu)
layer_3 = (hk.Flatten(),
hk.Linear(512, name='fc_0'), jax.nn.relu,
hk.Linear(128, name='fc_1'), jax.nn.relu)
self.layers = layer_0 + layer_1 + layer_2 + layer_3
def __call__(self, x):
for layer in self.layers:
x = layer(x)
return x
class VectorStack(hk.Module):
""" VectorStack is a module for processing the obfuscated "vector" data that
is included in the agent's observation. This is a densely encoded form of
the discrete information regarding the state of the agent other than the
viewport, e.g. current inventory. The input is of shape (N, 64)
"""
def __init__(self, name=None):
super().__init__(name=name)
layer_0 = (hk.Linear(32, name='fc_0'), jax.nn.relu)
self.layers = layer_0
def __call__(self, x):
for layer in self.layers:
x = layer(x)
return x
def behavioral_cloning(batch):
""" The full forward model definition """
x_0 = PovStack(name='pov_stack')(batch[0])
x_1 = VectorStack(name='vector_stack')(batch[1])
x = jnp.concatenate((x_0, x_1), axis=1)
return jnp.tanh(hk.Linear(64)(x))
@jax.jit
def mse_loss(logits, labels):
""" Mean Squared Error loss """
return jnp.mean(jnp.power(logits - labels, 2))
def main():
net = hk.transform(behavioral_cloning)
opt = optix.adam(0.001)
@jax.jit
def loss(params, batch):
""" The loss criterion for our model """
logits = net.apply(params, None, batch)
return mse_loss(logits, batch[2])
@jax.jit
def update(opt_state, params, batch):
grads = jax.grad(loss)(params, batch)
updates, opt_state = opt.update(grads, opt_state)
params = optix.apply_updates(params, updates)
return params, opt_state
@jax.jit
def accuracy(params, batch):
""" Simply report the loss for the current batch """
logits = net.apply(params, None, batch)
return mse_loss(logits, batch[2])
train_dataset, val_dataset = load_data(MINERL_ENV,
batch_size=32, epochs=100)
rng = jax.random.PRNGKey(2020)
batch = next(train_dataset)
params = net.init(rng, batch)
opt_state = opt.init(params)
for i, batch in enumerate(train_dataset):
params, opt_state = update(opt_state, params, batch)
if i % 1000 == 0:
print(accuracy(params, val_dataset))
if i % 10000 == 0:
with open(PARAMS_FILENAME, 'wb') as fh:
dill.dump(params, fh)
with open(PARAMS_FILENAME, 'wb') as fh:
dill.dump(params, fh)
if __name__ == '__main__':
main()
| 2.5 | 2 |
qiskit/circuit/library/templates/__init__.py | ajavadia/qiskit-sdk-py | 15 | 7040 | <reponame>ajavadia/qiskit-sdk-py
# This code is part of Qiskit.
#
# (C) Copyright IBM 2020.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""
A library of template circuits.
Templates are circuits that compute the identity. They find use
in circuit optimization where matching part of the template allows the compiler
to replace the match with the inverse of the remainder from the template.
"""
from .nct.template_nct_2a_1 import template_nct_2a_1
from .nct.template_nct_2a_2 import template_nct_2a_2
from .nct.template_nct_2a_3 import template_nct_2a_3
from .nct.template_nct_4a_1 import template_nct_4a_1
from .nct.template_nct_4a_2 import template_nct_4a_2
from .nct.template_nct_4a_3 import template_nct_4a_3
from .nct.template_nct_4b_1 import template_nct_4b_1
from .nct.template_nct_4b_2 import template_nct_4b_2
from .nct.template_nct_5a_1 import template_nct_5a_1
from .nct.template_nct_5a_2 import template_nct_5a_2
from .nct.template_nct_5a_3 import template_nct_5a_3
from .nct.template_nct_5a_4 import template_nct_5a_4
from .nct.template_nct_6a_1 import template_nct_6a_1
from .nct.template_nct_6a_2 import template_nct_6a_2
from .nct.template_nct_6a_3 import template_nct_6a_3
from .nct.template_nct_6a_4 import template_nct_6a_4
from .nct.template_nct_6b_1 import template_nct_6b_1
from .nct.template_nct_6b_2 import template_nct_6b_2
from .nct.template_nct_6c_1 import template_nct_6c_1
from .nct.template_nct_7a_1 import template_nct_7a_1
from .nct.template_nct_7b_1 import template_nct_7b_1
from .nct.template_nct_7c_1 import template_nct_7c_1
from .nct.template_nct_7d_1 import template_nct_7d_1
from .nct.template_nct_7e_1 import template_nct_7e_1
from .nct.template_nct_9a_1 import template_nct_9a_1
from .nct.template_nct_9c_1 import template_nct_9c_1
from .nct.template_nct_9c_2 import template_nct_9c_2
from .nct.template_nct_9c_3 import template_nct_9c_3
from .nct.template_nct_9c_4 import template_nct_9c_4
from .nct.template_nct_9c_5 import template_nct_9c_5
from .nct.template_nct_9c_6 import template_nct_9c_6
from .nct.template_nct_9c_7 import template_nct_9c_7
from .nct.template_nct_9c_8 import template_nct_9c_8
from .nct.template_nct_9c_9 import template_nct_9c_9
from .nct.template_nct_9c_10 import template_nct_9c_10
from .nct.template_nct_9c_11 import template_nct_9c_11
from .nct.template_nct_9c_12 import template_nct_9c_12
from .nct.template_nct_9d_1 import template_nct_9d_1
from .nct.template_nct_9d_2 import template_nct_9d_2
from .nct.template_nct_9d_3 import template_nct_9d_3
from .nct.template_nct_9d_4 import template_nct_9d_4
from .nct.template_nct_9d_5 import template_nct_9d_5
from .nct.template_nct_9d_6 import template_nct_9d_6
from .nct.template_nct_9d_7 import template_nct_9d_7
from .nct.template_nct_9d_8 import template_nct_9d_8
from .nct.template_nct_9d_9 import template_nct_9d_9
from .nct.template_nct_9d_10 import template_nct_9d_10
| 1.570313 | 2 |
Tests/test_ironmath.py | btddg28/ironpython | 0 | 7041 | #####################################################################################
#
# Copyright (c) Microsoft Corporation. All rights reserved.
#
# This source code is subject to terms and conditions of the Apache License, Version 2.0. A
# copy of the license can be found in the License.html file at the root of this distribution. If
# you cannot locate the Apache License, Version 2.0, please send an email to
# <EMAIL>. By using this source code in any fashion, you are agreeing to be bound
# by the terms of the Apache License, Version 2.0.
#
# You must not remove this notice, or any other, from this software.
#
#
#####################################################################################
#
# test Microsoft.Scripting.Math
#
from iptest.assert_util import *
skiptest("win32")
from System import *
import clr
#silverlight already has this
if is_cli:
math_assembly = (1).GetType().Assembly
clr.AddReference(math_assembly)
load_iron_python_test()
import IronPythonTest
if is_net40:
from System.Numerics import BigInteger, Complex
else:
from Microsoft.Scripting.Math import BigInteger
from Microsoft.Scripting.Math import Complex64 as Complex
class myFormatProvider(IFormatProvider):
def ToString():pass
p = myFormatProvider()
def test_bigint():
AreEqual(BigInteger.Add(1,99999999999999999999999999999999999999999999999999999999999) ,BigInteger.Subtract(100000000000000000000000000000000000000000000000000000000001,1))
AreEqual(BigInteger.Multiply(400,500) , BigInteger.Divide(1000000,5))
AreEqual(BigInteger.Multiply(400,8) , BigInteger.LeftShift(400,3))
AreEqual(BigInteger.Divide(400,8) , BigInteger.RightShift(400,3))
AreEqual(BigInteger.RightShift(BigInteger.LeftShift(400,100),100) , 400)
AreEqual(BigInteger.RightShift(BigInteger.LeftShift(-12345678987654321,100),100) , -12345678987654321)
if is_net40:
AssertError(ValueError, BigInteger.RightShift, 400, -100)
AssertError(ValueError, BigInteger.LeftShift, 400, -100)
AssertError(ValueError, BigInteger.RightShift, -12345678987654321, -100)
AssertError(ValueError, BigInteger.LeftShift, -12345678987654321, -100)
else:
AreEqual(BigInteger.LeftShift(BigInteger.RightShift(400,-100),-100) , 400)
AreEqual(BigInteger.LeftShift(BigInteger.RightShift(-12345678987654321,-100),-100) , -12345678987654321)
AreEqual(BigInteger(-123456781234567812345678123456781234567812345678123456781234567812345678).OnesComplement().OnesComplement() , -123456781234567812345678123456781234567812345678123456781234567812345678)
AreEqual(BigInteger(-1234567812345678123456781234567812345678123456781234567812345678123456781234567812345678).OnesComplement() , -(-1234567812345678123456781234567812345678123456781234567812345678123456781234567812345678 + 1 ))
Assert(BigInteger.Xor(-1234567812345678123456781234567812345678123456781234567812345678123456781234567812345678,BigInteger(-1234567812345678123456781234567812345678123456781234567812345678123456781234567812345678).OnesComplement()) , -1)
AreEqual(BigInteger.BitwiseAnd(0xff00ff00,BigInteger.BitwiseOr(0x00ff00ff,0xaabbaabb)) , BigInteger(0xaa00aa00))
AreEqual(BigInteger.Mod(BigInteger(-9999999999999999999999999999999999999999),1000000000000000000) , -BigInteger.Mod(9999999999999999999999999999999999999999,BigInteger(-1000000000000000000)))
AreEqual(BigInteger.ToInt64(0x7fffffffffffffff) , 9223372036854775807)
AssertError(OverflowError, BigInteger.ToInt64, 0x8000000000000000)
AreEqual(BigInteger(-0).ToBoolean(p) , False )
AreEqual(BigInteger(-1212321.3213).ToBoolean(p) , True )
AreEqual(BigInteger(1212321384892342394723947).ToBoolean(p) , True )
AreEqual(BigInteger(0).ToChar(p) , Char.MinValue)
AreEqual(BigInteger(65).ToChar(p) , IConvertible.ToChar('A', p))
AreEqual(BigInteger(0xffff).ToChar(p) , Char.MaxValue)
AssertError(OverflowError, BigInteger(-1).ToChar, p)
AreEqual(BigInteger(100).ToDouble(p) , 100.0)
AreEqual(BigInteger(BigInteger(100).ToDouble(p)).ToSingle(p) , BigInteger(100.1213123).ToFloat())
Assert(BigInteger(100) != 100.32)
AreEqual(BigInteger(100) , 100.0)
Assert( 100.32 != BigInteger(100))
AreEqual(100.0 , BigInteger(100) )
def test_big_1():
for (a, m, t,x) in [
(7, "ToSByte", SByte,2),
(8, "ToByte", Byte, 0),
(15, "ToInt16", Int16,2),
(16, "ToUInt16", UInt16,0),
(31, "ToInt32", Int32,2),
(32, "ToUInt32", UInt32,0),
(63, "ToInt64", Int64,2),
(64, "ToUInt64", UInt64,0)
]:
b = BigInteger(-x ** a )
left = getattr(b, m)(p)
right = t.MinValue
AreEqual(left, right)
b = BigInteger(2 ** a -1)
left = getattr(b, m)(p)
right = t.MaxValue
AreEqual(left, right)
b = BigInteger(0)
left = getattr(b, m)(p)
right = t.MaxValue - t.MaxValue
AreEqual(left, 0)
AssertError(OverflowError,getattr(BigInteger(2 ** a ), m),p)
AssertError(OverflowError,getattr(BigInteger(-1 - x ** a ), m),p)
def test_big_2():
for (a, m, t,x) in [
(31, "ToInt32",Int32,2),
(32, "ToUInt32",UInt32,0),
(63, "ToInt64",Int64,2),
(64, "ToUInt64",UInt64,0)
]:
b = BigInteger(-x ** a )
left = getattr(b, m)()
right = t.MinValue
AreEqual(left, right)
b = BigInteger(2 ** a -1)
left = getattr(b, m)()
right = t.MaxValue
AreEqual(left, right)
b = BigInteger(0)
left = getattr(b, m)()
right = t.MaxValue - t.MaxValue
AreEqual(left, right)
AssertError(OverflowError,getattr(BigInteger(2 ** a ), m))
AssertError(OverflowError,getattr(BigInteger(-1 - x ** a ), m))
#complex
def test_complex():
AreEqual(
Complex.Add(
Complex(BigInteger(9999), -1234),
Complex.Conjugate(Complex(9999, -1234)) ),
Complex.Multiply(BigInteger(9999), 2) )
AreEqual(
Complex.Add(
Complex(99999.99e-200, 12345.88e+100),
Complex.Negate(Complex(99999.99e-200, 12345.88e+100)) ),
Complex.Subtract(
Complex(99999.99e-200, 12345.88e+100),
Complex(99999.99e-200, 12345.88e+100) ))
AreEqual(
Complex.Divide(4+2j,2),
(2 + 1j) )
Assert(not hasattr(Complex, "Mod")) #IP 1.x had limited support for modulo which has been removed
def test_bool_misc():
if is_net40:
def is_zero(bigint):
return bigint.IsZero
else:
def is_zero(bigint):
return bigint.IsZero()
AreEqual(BigInteger(-1234).Sign, -1)
AreEqual(is_zero(BigInteger(-1234)), False)
AreEqual(BigInteger(-1234).IsNegative(), True)
AreEqual(BigInteger(-1234).IsPositive(), False)
AreEqual(BigInteger(0).Sign, 0)
AreEqual(is_zero(BigInteger(0)), True)
AreEqual(BigInteger(0).IsNegative(), False)
AreEqual(BigInteger(0).IsPositive(), False)
AreEqual(BigInteger(1234).Sign, 1)
AreEqual(is_zero(BigInteger(1234)), False)
AreEqual(BigInteger(1234).IsNegative(), False)
AreEqual(BigInteger(1234).IsPositive(), True)
def test_byte_conversions():
def CheckByteConversions(bigint, bytes):
SequencesAreEqual(bigint.ToByteArray(), bytes)
AreEqual(BigInteger.Create(Array[Byte](bytes)), bigint)
CheckByteConversions(BigInteger(0x00), [0x00])
CheckByteConversions(BigInteger(-0x01), [0xff])
CheckByteConversions(BigInteger(-0x81), [0x7f, 0xff])
CheckByteConversions(BigInteger(-0x100), [0x00, 0xff])
CheckByteConversions(BigInteger(-0x1000), [0x00, 0xf0])
CheckByteConversions(BigInteger(-0x10000), [0x00, 0x00, 0xff])
CheckByteConversions(BigInteger(-0x100000), [0x00, 0x00, 0xf0])
CheckByteConversions(BigInteger(-0x10000000), [0x00, 0x00, 0x00, 0xf0])
CheckByteConversions(BigInteger(-0x100000000), [0x00, 0x00, 0x00, 0x00, 0xff])
CheckByteConversions(BigInteger(0x7f), [0x7f])
CheckByteConversions(BigInteger(0xff), [0xff, 0x00])
CheckByteConversions(BigInteger(0x0201), [0x01, 0x02])
CheckByteConversions(BigInteger(0xf2f1), [0xf1, 0xf2, 0x00])
CheckByteConversions(BigInteger(0x03020100), [0x00, 0x01, 0x02, 0x03])
CheckByteConversions(BigInteger(0x0403020100), [0x00, 0x01, 0x02, 0x03, 0x04])
CheckByteConversions(BigInteger(0x0706050403020100), [0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07])
CheckByteConversions(BigInteger(0x080706050403020100), [0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08])
def test_dword_conversions():
def CheckDwordConversions(bigint, dwords):
SequencesAreEqual(bigint.GetWords(), dwords)
if bigint == BigInteger.Zero:
AreEqual(
IronPythonTest.System_Scripting_Math.CreateBigInteger(
0,
Array[UInt32](dwords),),
bigint)
else:
AreEqual(
IronPythonTest.System_Scripting_Math.CreateBigInteger(
1,
Array[UInt32](dwords)),
bigint)
AreEqual(
IronPythonTest.System_Scripting_Math.CreateBigInteger(
-1,
Array[UInt32](dwords)),
BigInteger.Negate(bigint))
CheckDwordConversions(BigInteger(0), [0x00000000])
CheckDwordConversions(BigInteger(1), [0x00000001])
CheckDwordConversions(BigInteger((1<<31)), [0x80000000])
CheckDwordConversions(BigInteger(((1<<31) + 9)), [0x80000009])
CheckDwordConversions(BigInteger((1<<32)), [0x00000000, 0x00000001])
def test_misc():
AssertError(ArgumentException, IronPythonTest.System_Scripting_Math.CreateBigInteger, 0, (1, 2, 3))
AssertError(ArgumentNullException, IronPythonTest.System_Scripting_Math.CreateBigInteger, 0, None)
AreEqual(BigInteger(1).CompareTo(None), 1)
if is_net40:
AreEqual(BigInteger(1).CompareTo(True), 0)
else:
AssertError(ArgumentException, BigInteger(1).CompareTo, True)
run_test(__name__)
| 2 | 2 |
python/lib/viewer/gener_q_vs_w_for_df.py | timtyree/bgmc | 0 | 7042 | <reponame>timtyree/bgmc
import matplotlib.pyplot as plt, numpy as np, pandas as pd,os
from ..model import recall_powerlaw_fits_to_full_models
from .. import compute_power_rmse
from .bluf import *
from ..measure.powerlaw import *
from .gener_q_vs_w_for_result_folder import *
def q_vs_w_plotter_function_from_df(ax,df):
# npartitions=os.cpu_count()
fontsize=16
printing=False
alpha=0.5
markersize=50#5
xlabel=r'q (cm$^{-2}$)'
ylabel=r'w (Hz cm$^{-2}$)'
c='C3'
xlim=[.1,1.05]
ylim=[0.,20]
# xlim=[-0.05,1.05]
# ylim=[1e-1,20]#[1e-5,1e4]
legend_fontsize=fontsize-6
title_fontsize=fontsize-8
x_values=df.q.values
y_values=df.w.values
#extract column values
r_values=np.array(sorted(set(df.r.values)))#cm
D_values=np.array(sorted(set(df.D.values)))#cm^2/s
L_values=np.array(sorted(set(df.L.values)))#cm
A_values=L_values**2#cm^2
kappa_values=np.array(sorted(set(df.kappa.values)))#1/s
varkappa_values=np.array(sorted(set(df.varkappa.values)))#1/s
x0_values=np.array(sorted(set(df.x0.values)))#1/s
set_second_values=np.array(sorted(set(df.set_second.values)))
reflect_values=np.array(sorted(set(df.reflect.values)))
no_repulsion_values=np.array(sorted(set(df.no_repulsion.values)))
no_attraction_values=np.array(sorted(set(df.no_attraction.values)))
neighbor_values=np.array(sorted(set(df.neighbor.values)))
force_code_values=np.array(sorted(set(df.force_code.values)))
if printing:
print(f"input parameters:")
print(f"r~{r_values}")
print(f"D~{D_values}")
print(f"L~{L_values}")
print(f"kappa~{kappa_values}")
print(f"a~{varkappa_values}")
print(f"x0~{x0_values}")
print(f"set_second~{set_second_values}")
print(f"reflect~{reflect_values}")
print(f"no_repulsion~{no_repulsion_values}")
print(f"no_attraction~{no_attraction_values}")
print(f"neighbor~{neighbor_values}")
print(f"force_code~{force_code_values}")
#TDOO: compute xy values
#compute title=
# title=r"$\nu$="+f"{m:.3f}, "+f"M={M:.3f}"+r" cm$^2$/s\n"
# additional parameters optional/uncommentable...
title=f"force_code={int(force_code_values[0])}, neighbors={int(neighbor_values[0])}, reflect={int(reflect_values[0])}\n"
title=title+r'$r=$'+f'{r_values[0]:.5f} cm, '
title=title+r'$\kappa=$'+f'{kappa_values[0]:.5f} Hz\n'
title=title+r'$D=$'+f'{D_values[0]:.5f} cm'+r'$^2$/s, '
title=title+r'$a=$'+f'{varkappa_values[0]:.5f} cm'+r'$^2$/s, '
title=title+r'$x_0=$'+f'{x0_values[0]:.0f} cm\n'
#DONE: plot the data
PlotFullModels(ax,xlim=[0.1,1])
FormatAxes(ax,xlim,ylim,xlabel,ylabel,title,fontsize=fontsize,use_loglog=False)#,**kwargs)
PlotTrial(ax, x_values,y_values,title,title_fontsize)
ax.legend(fontsize=legend_fontsize,ncol=1,loc='upper left')
return True
def q_vs_Delta_w_plotter_function_from_df(ax,df):
fontsize=16
use_Delta_thresh=True
use_error_bars=True
percent_uncertainty=1.
printing=False
alpha=0.5
markersize=50#5
xlabel=r'q (cm$^{-2}$)'
ylabel=r'w (Hz cm$^{-2}$)'
c='C3'
xlim=[.1,1.05]
ylim=[-1,1]
legend_fontsize=fontsize-6
title_fontsize=fontsize-8
use_error_bars=True
percent_uncertainty=1.
x_values=df.q.values
y_values=df.w.values
if use_error_bars:
yerr_values=percent_uncertainty/100*y_values
#compute the error
model_name,m,M=compute_nearest_powerlaw_fit(x_values,y_values)
yhat_values=M*x_values**m
Delta_y_values=y_values-yhat_values
y_values=Delta_y_values
# TODO: compute rmse between
# the particle model and the full model
rmse_particle_vs_full=np.sqrt(np.mean(Delta_y_values**2))
Delta_thresh=rmse_particle_vs_full
#TODO: compute the apparent powerlaw fit of the particle model
x_values=df.q.values
y_values=df.w.values
B,Delta_B,m,Delta_m,Rsq=fit_power_law(x_values,y_values)
rmse_particle_vs_powerlawfit=compute_power_rmse(x_values,y_values, m, B)
M, Delta_M= comp_power_scale(B,Delta_B,m,Delta_m)
Delta_y_values=y_values-yhat_values
y_values=Delta_y_values
#extract column values
r_values=np.array(sorted(set(df.r.values)))#cm
D_values=np.array(sorted(set(df.D.values)))#cm^2/s
L_values=np.array(sorted(set(df.L.values)))#cm
A_values=L_values**2#cm^2
kappa_values=np.array(sorted(set(df.kappa.values)))#1/s
varkappa_values=np.array(sorted(set(df.varkappa.values)))#1/s
x0_values=np.array(sorted(set(df.x0.values)))#1/s
set_second_values=np.array(sorted(set(df.set_second.values)))
reflect_values=np.array(sorted(set(df.reflect.values)))
no_repulsion_values=np.array(sorted(set(df.no_repulsion.values)))
no_attraction_values=np.array(sorted(set(df.no_attraction.values)))
neighbor_values=np.array(sorted(set(df.neighbor.values)))
force_code_values=np.array(sorted(set(df.force_code.values)))
if printing:
print(f"input parameters:")
print(f"r~{r_values}")
print(f"D~{D_values}")
print(f"L~{L_values}")
print(f"kappa~{kappa_values}")
print(f"a~{varkappa_values}")
print(f"x0~{x0_values}")
print(f"set_second~{set_second_values}")
print(f"reflect~{reflect_values}")
print(f"no_repulsion~{no_repulsion_values}")
print(f"no_attraction~{no_attraction_values}")
print(f"neighbor~{neighbor_values}")
print(f"force_code~{force_code_values}")
#TODO: compute the powerlaw fit for the x and y values and set them equal to m,M,Delta_m,Delta_M
#TODO: modify title to take m,M,Delta_m,Delta_M
#compute title= string
title=r"$\nu$="+f"{m:.3f}"+r"$\pm$"+f"{Delta_m:.3f}"
title=title+f", M={M:.3f}"+r"$\pm$"+f"{Delta_M:.3f} "+r"cm$^{2(\nu-1)}$/s"
title=title+f"\n"+r"RMSE$_{particle\;vs\;full}=$"+f"{rmse_particle_vs_full:.3f} Hz/cm"+r"^2"+f"\n"
#additional parameters optional/uncommentable...
# title=f"force_code={int(force_code_values[0])}, neighbors={int(neighbor_values[0])}, reflect={int(reflect_values[0])}\n"
# title=title+r'$r=$'+f'{r_values[0]:.2f} cm, '
# title=title+r'$\kappa=$'+f'{kappa_values[0]:.2f} Hz\n'
# title=title+r'$D=$'+f'{D_values[0]:.2f} cm'+r'$^2$/s, '
# title=title+r'$a=$'+f'{varkappa_values[0]:.2f} cm'+r'$^2$/s, '
# title=title+r'$x_0=$'+f'{x0_values[0]:.0f} cm\n'
# plot_horizontal solid & dashed
plot_horizontal(ax,xlim,Delta_thresh=Delta_thresh,use_Delta_thresh=use_Delta_thresh)
FormatAxes(ax,xlim,ylim,xlabel,ylabel,title,fontsize=fontsize,use_loglog=False)#,**kwargs)
#plot the data
if not use_error_bars:
PlotTrial(ax, x_values,y_values,title,title_fontsize)
else:
PlotErrorBarScatter(ax, x_values,y_values,yerr_values,title,title_fontsize)
# ax.legend(fontsize=legend_fontsize,ncol=1,loc='upper left')
return True
| 2.046875 | 2 |
decatt/model.py | achyudh/castor | 132 | 7043 | import sys
import math
import numpy as np
from datetime import datetime
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
class DecAtt(nn.Module):
def __init__(self, num_units, num_classes, embedding_size, dropout, device=0,
training=True, project_input=True,
use_intra_attention=False, distance_biases=10, max_sentence_length=30):
"""
Create the model based on MLP networks.
:param num_units: size of the networks
:param num_classes: number of classes in the problem
:param embedding_size: size of each word embedding
:param use_intra_attention: whether to use intra-attention model
:param training: whether to create training tensors (optimizer)
:p/word_embeddingaram project_input: whether to project input embeddings to a
different dimensionality
:param distance_biases: number of different distances with biases used
in the intra-attention model
"""
super().__init__()
self.arch = "DecAtt"
self.num_units = num_units
self.num_classes = num_classes
self.project_input = project_input
self.embedding_size = embedding_size
self.distance_biases = distance_biases
self.intra_attention = False
self.max_sentence_length = max_sentence_length
self.device = device
self.bias_embedding = nn.Embedding(max_sentence_length,1)
self.linear_layer_project = nn.Linear(embedding_size, num_units, bias=False)
#self.linear_layer_intra = nn.Sequential(nn.Linear(num_units, num_units), nn.ReLU(), nn.Linear(num_units, num_units), nn.ReLU())
self.linear_layer_attend = nn.Sequential(nn.Dropout(p=dropout), nn.Linear(num_units, num_units), nn.ReLU(),
nn.Dropout(p=dropout), nn.Linear(num_units, num_units), nn.ReLU())
self.linear_layer_compare = nn.Sequential(nn.Dropout(p=dropout), nn.Linear(num_units*2, num_units), nn.ReLU(),
nn.Dropout(p=dropout), nn.Linear(num_units, num_units), nn.ReLU())
self.linear_layer_aggregate = nn.Sequential(nn.Dropout(p=dropout), nn.Linear(num_units*2, num_units), nn.ReLU(),
nn.Dropout(p=dropout), nn.Linear(num_units, num_units), nn.ReLU(),
nn.Linear(num_units, num_classes), nn.LogSoftmax())
self.init_weight()
def init_weight(self):
self.linear_layer_project.weight.data.normal_(0, 0.01)
self.linear_layer_attend[1].weight.data.normal_(0, 0.01)
self.linear_layer_attend[1].bias.data.fill_(0)
self.linear_layer_attend[4].weight.data.normal_(0, 0.01)
self.linear_layer_attend[4].bias.data.fill_(0)
self.linear_layer_compare[1].weight.data.normal_(0, 0.01)
self.linear_layer_compare[1].bias.data.fill_(0)
self.linear_layer_compare[4].weight.data.normal_(0, 0.01)
self.linear_layer_compare[4].bias.data.fill_(0)
self.linear_layer_aggregate[1].weight.data.normal_(0, 0.01)
self.linear_layer_aggregate[1].bias.data.fill_(0)
self.linear_layer_aggregate[4].weight.data.normal_(0, 0.01)
self.linear_layer_aggregate[4].bias.data.fill_(0)
#self.word_embedding.weight.data.copy_(torch.from_numpy(self.pretrained_emb))
def attention_softmax3d(self, raw_attentions):
reshaped_attentions = raw_attentions.view(-1, raw_attentions.size(2))
out = nn.functional.softmax(reshaped_attentions, dim=1)
return out.view(raw_attentions.size(0),raw_attentions.size(1),raw_attentions.size(2))
def _transformation_input(self, embed_sent):
embed_sent = self.linear_layer_project(embed_sent)
result = embed_sent
if self.intra_attention:
f_intra = self.linear_layer_intra(embed_sent)
f_intra_t = torch.transpose(f_intra, 1, 2)
raw_attentions = torch.matmul(f_intra, f_intra_t)
time_steps = embed_sent.size(1)
r = torch.arange(0, time_steps)
r_matrix = r.view(1,-1).expand(time_steps,time_steps)
raw_index = r_matrix-r.view(-1,1)
clipped_index = torch.clamp(raw_index,0,self.distance_biases-1)
clipped_index = Variable(clipped_index.long())
if torch.cuda.is_available():
clipped_index = clipped_index.to(self.device)
bias = self.bias_embedding(clipped_index)
bias = torch.squeeze(bias)
raw_attentions += bias
attentions = self.attention_softmax3d(raw_attentions)
attended = torch.matmul(attentions, embed_sent)
result = torch.cat([embed_sent,attended],2)
return result
def attend(self, sent1, sent2, lsize_list, rsize_list):
"""
Compute inter-sentence attention. This is step 1 (attend) in the paper
:param sent1: tensor in shape (batch, time_steps, num_units),
the projected sentence 1
:param sent2: tensor in shape (batch, time_steps, num_units)
:return: a tuple of 3-d tensors, alfa and beta.
"""
repr1 = self.linear_layer_attend(sent1)
repr2 = self.linear_layer_attend(sent2)
repr2 = torch.transpose(repr2,1,2)
raw_attentions = torch.matmul(repr1, repr2)
#self.mask = generate_mask(lsize_list, rsize_list)
# masked = mask(self.raw_attentions, rsize_list)
#masked = raw_attentions * self.mask
att_sent1 = self.attention_softmax3d(raw_attentions)
beta = torch.matmul(att_sent1, sent2) #input2_soft
raw_attentions_t = torch.transpose(raw_attentions,1,2).contiguous()
#self.mask_t = torch.transpose(self.mask, 1, 2).contiguous()
# masked = mask(raw_attentions_t, lsize_list)
#masked = raw_attentions_t * self.mask_t
att_sent2 = self.attention_softmax3d(raw_attentions_t)
alpha = torch.matmul(att_sent2,sent1) #input1_soft
return alpha, beta
def compare(self, sentence, soft_alignment):
"""
Apply a feed forward network to compare o ne sentence to its
soft alignment with the other.
:param sentence: embedded and projected sentence,
shape (batch, time_steps, num_units)
:param soft_alignment: tensor with shape (batch, time_steps, num_units)
:return: a tensor (batch, time_steps, num_units)
"""
sent_alignment = torch.cat([sentence, soft_alignment],2)
out = self.linear_layer_compare(sent_alignment)
#out, (state, _) = self.lstm_compare(out)
return out
def aggregate(self, v1, v2):
"""
Aggregate the representations induced from both sentences and their
representations
:param v1: tensor with shape (batch, time_steps, num_units)
:param v2: tensor with shape (batch, time_steps, num_units)
:return: logits over classes, shape (batch, num_classes)
"""
v1_sum = torch.sum(v1,1)
v2_sum = torch.sum(v2,1)
out = self.linear_layer_aggregate(torch.cat([v1_sum,v2_sum],1))
return out
def forward(self, sent1, sent2, ext_feats=None, word_to_doc_count=None, raw_sent1=None, raw_sent2=None):
lsize_list = [len(s.split(" ")) for s in raw_sent1]
rsize_list = [len(s.split(" ")) for s in raw_sent2]
sent1 = sent1.permute(0, 2, 1)
sent2 = sent2.permute(0, 2, 1)
sent1 = self._transformation_input(sent1)
sent2 = self._transformation_input(sent2)
alpha, beta = self.attend(sent1, sent2, lsize_list, rsize_list)
v1 = self.compare(sent1, beta)
v2 = self.compare(sent2, alpha)
logits = self.aggregate(v1, v2)
return logits
| 3.03125 | 3 |
basic_code/networks.py | J-asy/Emotion-FAN | 275 | 7044 | import torch.nn as nn
import math
import torch.utils.model_zoo as model_zoo
import torch.nn.functional as F
import torch
import numpy as np
import cv2
import pdb
def sigmoid(x):
return 1 / (1 + math.exp(-x))
def norm_angle(angle):
norm_angle = sigmoid(10 * (abs(angle) / 0.7853975 - 1))
return norm_angle
def conv3x3(in_planes, out_planes, stride=1):
"3x3 convolution with padding"
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = nn.BatchNorm2d(planes)
self.relu = nn.ReLU()
self.conv2 = conv3x3(planes, planes)
self.bn2 = nn.BatchNorm2d(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,
padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(planes * 4)
self.relu = nn.ReLU()
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
out = out + residual
out = self.relu(out)
return out
###''' self-attention; relation-attention '''
class ResNet_AT(nn.Module):
def __init__(self, block, layers, num_classes=1000, end2end=True, at_type=''):
self.inplanes = 64
self.end2end = end2end
super(ResNet_AT, self).__init__()
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.relu = nn.ReLU()
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
self.layer4 = self._make_layer(block, 512, layers[3], stride=2)
self.avgpool = nn.AdaptiveAvgPool2d(1)
self.dropout = nn.Dropout(0.5)
self.dropout2 = nn.Dropout(0.6)
self.alpha = nn.Sequential(nn.Linear(512, 1),
nn.Sigmoid())
self.beta = nn.Sequential(nn.Linear(1024, 1),
nn.Sigmoid())
self.pred_fc1 = nn.Linear(512, 7)
self.pred_fc2 = nn.Linear(1024, 7)
self.at_type = at_type
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def forward(self, x='', phrase='train', AT_level='first_level',vectors='',vm='',alphas_from1='',index_matrix=''):
vs = []
alphas = []
assert phrase == 'train' or phrase == 'eval'
assert AT_level == 'first_level' or AT_level == 'second_level' or AT_level == 'pred'
if phrase == 'train':
num_pair = 3
for i in range(num_pair):
f = x[:, :, :, :, i] # x[128,3,224,224]
f = self.conv1(f)
f = self.bn1(f)
f = self.relu(f)
f = self.maxpool(f)
f = self.layer1(f)
f = self.layer2(f)
f = self.layer3(f)
f = self.layer4(f)
f = self.avgpool(f)
f = f.squeeze(3).squeeze(2) # f[1, 512, 1, 1] ---> f[1, 512]
# MN_MODEL(first Level)
vs.append(f)
alphas.append(self.alpha(self.dropout(f)))
vs_stack = torch.stack(vs, dim=2)
alphas_stack = torch.stack(alphas, dim=2)
if self.at_type == 'self-attention':
vm1 = vs_stack.mul(alphas_stack).sum(2).div(alphas_stack.sum(2))
if self.at_type == 'self_relation-attention':
vm1 = vs_stack.mul(alphas_stack).sum(2).div(alphas_stack.sum(2))
betas = []
for i in range(len(vs)):
vs[i] = torch.cat([vs[i], vm1], dim=1)
betas.append(self.beta(self.dropout(vs[i])))
cascadeVs_stack = torch.stack(vs, dim=2)
betas_stack = torch.stack(betas, dim=2)
output = cascadeVs_stack.mul(betas_stack * alphas_stack).sum(2).div((betas_stack * alphas_stack).sum(2))
if self.at_type == 'self-attention':
vm1 = self.dropout(vm1)
pred_score = self.pred_fc1(vm1)
if self.at_type == 'self_relation-attention':
output = self.dropout2(output)
pred_score = self.pred_fc2(output)
return pred_score
if phrase == 'eval':
if AT_level == 'first_level':
f = self.conv1(x)
f = self.bn1(f)
f = self.relu(f)
f = self.maxpool(f)
f = self.layer1(f)
f = self.layer2(f)
f = self.layer3(f)
f = self.layer4(f)
f = self.avgpool(f)
f = f.squeeze(3).squeeze(2) # f[1, 512, 1, 1] ---> f[1, 512]
# MN_MODEL(first Level)
alphas = self.alpha(self.dropout(f))
return f, alphas
if AT_level == 'second_level':
assert self.at_type == 'self_relation-attention'
vms = index_matrix.permute(1, 0).mm(vm) # [381, 21783] -> [21783,381] * [381,512] --> [21783, 512]
vs_cate = torch.cat([vectors, vms], dim=1)
betas = self.beta(self.dropout(vs_cate))
''' keywords: mean_fc ; weight_sourcefc; sum_alpha; weightmean_sourcefc '''
''' alpha * beta '''
weight_catefc = vs_cate.mul(alphas_from1) # [21570,512] * [21570,1] --->[21570,512]
alpha_beta = alphas_from1.mul(betas)
sum_alphabetas = index_matrix.mm(alpha_beta) # [380,21570] * [21570,1] -> [380,1]
weightmean_catefc = index_matrix.mm(weight_catefc).div(sum_alphabetas)
weightmean_catefc = self.dropout2(weightmean_catefc)
pred_score = self.pred_fc2(weightmean_catefc)
return pred_score
if AT_level == 'pred':
if self.at_type == 'self-attention':
pred_score = self.pred_fc1(self.dropout(vm))
return pred_score
''' self-attention; relation-attention '''
def resnet18_at(pretrained=False, **kwargs):
# Constructs base a ResNet-18 model.
model = ResNet_AT(BasicBlock, [2, 2, 2, 2], **kwargs)
return model
| 2.828125 | 3 |
ndscheduler/server/handlers/index.py | symphonyrm/ndscheduler | 0 | 7045 | """Serves the single page app web ui."""
import json
import tornado.gen
from ndscheduler import settings
from ndscheduler import utils
from ndscheduler.server.handlers import base
class Handler(base.BaseHandler):
"""Index page request handler."""
@tornado.gen.coroutine
def get(self):
"""Serve up the single page app for scheduler dashboard."""
meta_info = utils.get_all_available_jobs()
self.render(settings.APP_INDEX_PAGE, jobs_meta_info=json.dumps(meta_info))
| 2.125 | 2 |
Scripts/xbbtools/xbb_io.py | eoc21/biopython | 3 | 7046 | #!/usr/bin/env python
# Created: Wed Jun 21 13:46:35 2000
# Last changed: Time-stamp: <00/12/02 14:18:23 thomas>
# <EMAIL>, http://evolution.bmc.uu.se/~thomas
# File: xbb_io.py
import os, sys # os.system, sys.argv
sys.path.insert(0, '.')
sys.path.insert(0, os.path.expanduser('~thomas/cbs/python/biopython'))
from Bio.ParserSupport import *
from Bio import Fasta
class xbb_io:
def __init__(self):
""
def error(self, str):
print str
def read_fasta_file(self, file):
genes = []
iter = Fasta.Iterator(handle = open(file), parser = Fasta.RecordParser())
while 1:
rec = iter.next()
if not rec: break
genes.append((rec.sequence, rec.title))
return genes
| 2.578125 | 3 |
HW6/Andrii_Haponov/cw_4.py | kolyasalubov/Lv-677.PythonCore | 0 | 7047 | <reponame>kolyasalubov/Lv-677.PythonCore
# Convert a Number to a String!
# We need a function that can transform a number into a string.
# What ways of achieving this do you know?
def number_to_string(num: int) -> str:
str_num = str(num)
return str_num
print(number_to_string(123))
print(type(number_to_string(123))) | 4.03125 | 4 |
project/scripts/clausecat/evaluate_clausecat.py | explosion/healthsea | 60 | 7048 | import spacy
from spacy.scorer import PRFScore
import typer
from pathlib import Path
from wasabi import Printer, table
import operator
import benepar
import clausecat_component
import clausecat_model
import clausecat_reader
import clause_segmentation
import clause_aggregation
msg = Printer()
def main(model_path: Path, eval_path: Path):
"""This script is used to evaluate the clausecat component"""
nlp = spacy.load(model_path)
reader = clausecat_reader.ClausecatCorpus(eval_path)
examples = reader(nlp)
clausecat = nlp.get_pipe("clausecat")
scorer = {
"POSITIVE": PRFScore(),
"NEGATIVE": PRFScore(),
"NEUTRAL": PRFScore(),
"ANAMNESIS": PRFScore(),
}
for i, example in enumerate(examples):
prediction = example.predicted
reference = example.reference
# Prediction
prediction = clausecat(prediction)
# Iterate through prediction and references
for pred_clause, ref_clause in zip(prediction._.clauses, reference._.clauses):
prediction_cats = pred_clause["cats"]
reference_cats = ref_clause["cats"]
prediction_class = max(prediction_cats.items(), key=operator.itemgetter(1))[
0
]
# Add to matrix
for label in prediction_cats:
if label != prediction_class:
prediction = 0
else:
prediction = 1
if prediction == 0 and reference_cats[label] != 0:
scorer[label].fn += 1
elif prediction == 1 and reference_cats[label] != 1:
scorer[label].fp += 1
elif prediction == 1 and reference_cats[label] == 1:
scorer[label].tp += 1
# Printing
textcat_data = []
avg_fscore = 0
avg_recall = 0
avg_precision = 0
for label in scorer:
textcat_data.append(
(
label,
round(scorer[label].fscore, 2),
round(scorer[label].recall, 2),
round(scorer[label].precision, 2),
)
)
avg_fscore += scorer[label].fscore
avg_recall += scorer[label].recall
avg_precision += scorer[label].precision
textcat_data.append(
(
"AVERAGE",
round(avg_fscore / len(scorer), 2),
round(avg_recall / len(scorer), 2),
round(avg_precision / len(scorer), 2),
)
)
header = ("Label", "F-Score", "Recall", "Precision")
print(table(textcat_data, header=header, divider=True))
if __name__ == "__main__":
typer.run(main)
| 2.609375 | 3 |
utils/test.py | david-waugh/network-automation | 0 | 7049 | import pathlib
print(pathlib.Path(__file__).parent.resolve())
while True:
next_cmd = input("> ")
print(eval(next_cmd))
| 2.3125 | 2 |
nlp_annotator_api/server/app.py | IBM/deepsearch-nlp-annotator-api-example | 3 | 7050 | import logging
import os
import aiohttp.web
from connexion import AioHttpApp
from nlp_annotator_api.config.config import conf
from nlp_annotator_api.config.logging import setup_logging
from nlp_annotator_api.server.middleware.statsd_middleware import StatsdMiddleware
from nlp_annotator_api.server.signals.statsd_client import statsd_client_factory
setup_logging()
access_log = logging.getLogger("nlp_annotator_api.access")
_file_dir = os.path.dirname(__file__)
app = AioHttpApp(
__name__, specification_dir=os.path.join(_file_dir, "..", "resources", "schemas"),
server_args=dict(
client_max_size=8 * 1024**2
)
)
app.add_api("openapi.yaml", pass_context_arg_name="request")
aiohttp_app: aiohttp.web.Application = app.app
aiohttp_app.cleanup_ctx.append(statsd_client_factory(conf.statsd))
aiohttp_app.middlewares.append(StatsdMiddleware())
if __name__ == "__main__":
app.run(access_log=access_log)
| 2.03125 | 2 |
keras_cv_attention_models/resnest/resnest.py | dcleres/keras_cv_attention_models | 140 | 7051 | <reponame>dcleres/keras_cv_attention_models
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import backend as K
from keras_cv_attention_models.aotnet import AotNet
from keras_cv_attention_models.download_and_load import reload_model_weights
from keras_cv_attention_models.attention_layers import batchnorm_with_activation, conv2d_no_bias
PRETRAINED_DICT = {
"resnest101": {"imagenet": "63f9ebdcd32529cbc4b4fbbec3d1bb2f"},
"resnest200": {"imagenet": "8e211dcb089b588e18d36ba7cdf92ef0"},
"resnest269": {"imagenet": "4309ed1b0a8ae92f2b1143dc3512c5c7"},
"resnest50": {"imagenet": "eee7b20a229821f730ab205b6afeb369"},
}
def rsoftmax(inputs, groups):
if groups > 1:
nn = tf.reshape(inputs, [-1, 1, groups, inputs.shape[-1] // groups])
# nn = tf.transpose(nn, [0, 2, 1, 3])
nn = tf.nn.softmax(nn, axis=2)
nn = tf.reshape(nn, [-1, 1, 1, inputs.shape[-1]])
else:
nn = keras.layers.Activation("sigmoid")(inputs)
return nn
def split_attention_conv2d(inputs, filters, kernel_size=3, strides=1, downsample_first=False, groups=2, activation="relu", name=""):
h_axis, w_axis = [2, 3] if K.image_data_format() == "channels_first" else [1, 2]
in_channels = inputs.shape[-1]
conv_strides = strides if downsample_first else 1
if groups == 1:
logits = conv2d_no_bias(inputs, filters, kernel_size, strides=conv_strides, padding="same", name=name and name + "1_")
else:
# Using groups=2 is slow in `mixed_float16` policy
# logits = conv2d_no_bias(inputs, filters * groups, kernel_size, padding="same", groups=groups, name=name and name + "1_")
logits = []
splitted_inputs = tf.split(inputs, groups, axis=-1)
for ii in range(groups):
conv_name = name and name + "1_g{}_".format(ii + 1)
logits.append(conv2d_no_bias(splitted_inputs[ii], filters, kernel_size, strides=conv_strides, padding="same", name=conv_name))
logits = tf.concat(logits, axis=-1)
logits = batchnorm_with_activation(logits, activation=activation, name=name and name + "1_")
if groups > 1:
splited = tf.split(logits, groups, axis=-1)
gap = tf.reduce_sum(splited, axis=0)
else:
gap = logits
gap = tf.reduce_mean(gap, [h_axis, w_axis], keepdims=True)
reduction_factor = 4
inter_channels = max(in_channels * groups // reduction_factor, 32)
atten = keras.layers.Conv2D(inter_channels, kernel_size=1, name=name and name + "2_conv")(gap)
atten = batchnorm_with_activation(atten, activation=activation, name=name and name + "2_")
atten = keras.layers.Conv2D(filters * groups, kernel_size=1, name=name and name + "3_conv")(atten)
atten = rsoftmax(atten, groups)
out = keras.layers.Multiply()([atten, logits])
if groups > 1:
out = tf.split(out, groups, axis=-1)
out = tf.reduce_sum(out, axis=0)
if not downsample_first and strides > 1:
out = keras.layers.ZeroPadding2D(padding=1, name=name and name + "pool_pad")(out)
out = keras.layers.AveragePooling2D(3, strides=2, name=name and name + "pool")(out)
return out
def ResNest(input_shape=(224, 224, 3), stem_type="deep", attn_types="sa", bn_after_attn=False, shortcut_type="avg", pretrained="imagenet", **kwargs):
kwargs.pop("kwargs", None)
model = AotNet(**locals(), **kwargs)
reload_model_weights(model, pretrained_dict=PRETRAINED_DICT, sub_release="resnest", pretrained=pretrained)
return model
def ResNest50(input_shape=(224, 224, 3), num_classes=1000, activation="relu", classifier_activation="softmax", pretrained="imagenet", groups=2, **kwargs):
return ResNest(num_blocks=[3, 4, 6, 3], stem_width=64, model_name="resnest50", **locals(), **kwargs)
def ResNest101(input_shape=(256, 256, 3), num_classes=1000, activation="relu", classifier_activation="softmax", pretrained="imagenet", groups=2, **kwargs):
return ResNest(num_blocks=[3, 4, 23, 3], stem_width=128, model_name="resnest101", **locals(), **kwargs)
def ResNest200(input_shape=(320, 320, 3), num_classes=1000, activation="relu", classifier_activation="softmax", pretrained="imagenet", groups=2, **kwargs):
return ResNest(num_blocks=[3, 24, 36, 3], stem_width=128, model_name="resnest200", **locals(), **kwargs)
def ResNest269(input_shape=(416, 416, 3), num_classes=1000, activation="relu", classifier_activation="softmax", pretrained="imagenet", groups=2, **kwargs):
return ResNest(num_blocks=[3, 30, 48, 8], stem_width=128, model_name="resnest269", **locals(), **kwargs)
| 2.53125 | 3 |
dataactcore/migrations/versions/8692ab1298e1_replace_filerequest_with_filegeneration.py | brianherman/data-act-broker-backend | 1 | 7052 | <reponame>brianherman/data-act-broker-backend
"""replace FileRequest with FileGeneration
Revision ID: 8692ab1298e1
Revises: <KEY>
Create Date: 2018-10-24 14:54:39.278159
"""
# revision identifiers, used by Alembic.
revision = '8692ab1298e1'
down_revision = '<KEY>'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
def upgrade(engine_name):
globals()["upgrade_%s" % engine_name]()
def downgrade(engine_name):
globals()["downgrade_%s" % engine_name]()
def upgrade_data_broker():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('file_generation',
sa.Column('created_at', sa.DateTime(), nullable=True),
sa.Column('updated_at', sa.DateTime(), nullable=True),
sa.Column('file_generation_id', sa.Integer(), nullable=False),
sa.Column('request_date', sa.Date(), nullable=False),
sa.Column('start_date', sa.Date(), nullable=False),
sa.Column('end_date', sa.Date(), nullable=False),
sa.Column('agency_code', sa.Text(), nullable=False),
sa.Column('agency_type', sa.Enum('awarding', 'funding', name='generation_agency_types'), server_default='awarding', nullable=False),
sa.Column('file_type', sa.Enum('D1', 'D2', name='generation_file_types'), server_default='D1', nullable=False),
sa.Column('file_path', sa.Text(), nullable=True),
sa.Column('is_cached_file', sa.Boolean(), nullable=False),
sa.PrimaryKeyConstraint('file_generation_id')
)
op.create_index(op.f('ix_file_generation_agency_code'), 'file_generation', ['agency_code'], unique=False)
op.create_index(op.f('ix_file_generation_agency_type'), 'file_generation', ['agency_type'], unique=False)
op.create_index(op.f('ix_file_generation_end_date'), 'file_generation', ['end_date'], unique=False)
op.create_index(op.f('ix_file_generation_file_type'), 'file_generation', ['file_type'], unique=False)
op.create_index(op.f('ix_file_generation_request_date'), 'file_generation', ['request_date'], unique=False)
op.create_index(op.f('ix_file_generation_start_date'), 'file_generation', ['start_date'], unique=False)
op.add_column('job', sa.Column('file_generation_id', sa.Integer(), nullable=True))
op.create_foreign_key('fk_file_request_file_generation_id', 'job', 'file_generation', ['file_generation_id'], ['file_generation_id'], ondelete='SET NULL')
op.drop_column('job', 'from_cached')
# ### end Alembic commands ###
def downgrade_data_broker():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('job', sa.Column('from_cached', sa.BOOLEAN(), server_default=sa.text('false'), autoincrement=False, nullable=False))
op.drop_constraint('fk_file_request_file_generation_id', 'job', type_='foreignkey')
op.drop_column('job', 'file_generation_id')
op.drop_index(op.f('ix_file_generation_start_date'), table_name='file_generation')
op.drop_index(op.f('ix_file_generation_request_date'), table_name='file_generation')
op.drop_index(op.f('ix_file_generation_file_type'), table_name='file_generation')
op.drop_index(op.f('ix_file_generation_end_date'), table_name='file_generation')
op.drop_index(op.f('ix_file_generation_agency_type'), table_name='file_generation')
op.drop_index(op.f('ix_file_generation_agency_code'), table_name='file_generation')
op.drop_table('file_generation')
op.execute("""
DROP TYPE generation_agency_types
""")
op.execute("""
DROP TYPE generation_file_types
""")
# ### end Alembic commands ###
| 1.445313 | 1 |
cluster.py | Birfy/Endlinking | 1 | 7053 | import numpy as np
import random
import sys
chainlength = int(sys.argv[1])
dfname = sys.argv[2]
outfl = 'result.data'
cluster_size = int(sys.argv[3])
def readsize(dfname):
with open(dfname, 'r') as df:
lines = df.readlines()
for line in lines:
content = line.split()
if content and content[-1] == 'xhi':
return 2*float(content[1])
def readdata(dfname, chainlen):
X=[]
Xi=[]
with open(dfname, 'r') as df:
lines = df.readlines()
for line in lines:
content = line.split()
if len(content) == 9:
# print(content)
if (int(content[0]) % chainlen == 0 or int(content[0]) % chainlen == 1) and int(content[2]) != 3 and int(content[2]) != 4 :
X.append([float(content[i]) for i in range(3,6)])
Xi.append(int(content[0]))
return np.array(X), np.array(Xi)
def initmeans(n):
M=[]
for i in range(n):
M.append([size*(random.random()-0.5),size*(random.random()-0.5),size*(random.random()-0.5)])
return np.array(M)
def SetDistMat(X, means):
distmat_dtype = [('key',int), ('dist',float)]
distmat = np.empty((n,k),dtype=distmat_dtype)
for i in range(n):
distmat[i,:] = [(c[0], GetDist(X[i], c[1])) for c in enumerate(means)]
distmat[i,:] = np.sort(distmat[i,:], order='dist')
return distmat
def GetDist(x, c):
dist = np.linalg.norm(x-c-boxl*np.around((x-c)/boxl))
return dist
def Get_plst(assigned, distmat, full):
plst = []
for i in range(n):
if (i not in assigned):
j = 0
while j<k:
if (not full[distmat[i,j][0]]):
bestkey = distmat[i,j][0]
mindist = distmat[i,j][1]
break
else:
j += 1
for j in range(k-1,-1,-1):
if (not full[distmat[i,j][0]]):
maxdist = distmat[i,j][1]
break
plst.append((i, bestkey, maxdist-mindist))
plst.sort(key=lambda t:t[2])
return plst
def InitialAssignment(distmat):
clusters = {}
full = np.zeros(k,dtype=bool) # a boolean array that records which clusters are full
assigned = [] # a list of objects who has been assigned to a cluster
plst = Get_plst(assigned, distmat, full)
while (len(plst)):
temp = plst.pop()
try:
if (len(clusters[temp[1]])<cluster_size):
clusters[temp[1]].append(temp[0])
assigned.append(temp[0])
else:
full[temp[1]] = True
plst = Get_plst(assigned, distmat, full)
except KeyError:
clusters[temp[1]] = [temp[0]]
assigned.append(temp[0])
return clusters
def CalcMeans(X, oldmeans, clusters):
means = np.zeros((k,3))
keys = sorted(clusters.keys())
for key in keys:
for i in clusters[key]:
means[key] += X[i]-boxl*np.around((X[i]-oldmeans[key])/boxl)
means[key] /= len(clusters[key])
means[key] -= boxl*np.around(means[key]/boxl)
return means
def SortObj(X, clusters, means, distmat):
objlst = [] # list of objects ordered in asceding delta of the current
# assignment and the best possible alternate assignment
keys = sorted(clusters.keys())
for key in keys:
for i in clusters[key]:
currdist = GetDist(X[i],means[key])
mindist = distmat[i,0][1]
objlst.append((i, key, currdist-mindist))
objlst.sort(key=lambda t:t[2], reverse=True)
return objlst
def Transfer(obj, clufrom, cluto, clusters):
clusters[clufrom].remove(obj)
clusters[cluto].append(obj)
return clusters
def WriteResult(file, X, means, clusters):
with open(file, 'w') as fl:
# keys = sorted(clusters.keys())
# i = 1
# for key in keys:
# for obj in clusters[key]:
# fl.write("%d\t%d\t%f\t%f\t%f\t%d\n"\
# %(obj,Xi[obj], X[obj][0], X[obj][1], X[obj][2], key))
# i = i + 1
for c in enumerate(means):
fl.write("%d\t%f\t%f\t%f"%(c[0], c[1][0], c[1][1], c[1][2]))
for obj in clusters[c[0]]:
fl.write("\t%d"%(Xi[obj]))
fl.write('\n')
# i = i + 1
return
# This function will perform statistical analysis to the clustering results
def ClusterStat(X, means, clusters):
# Average distance between means
means_avg = 0.
for i in range(k-1):
for j in range(i+1,k):
means_avg += GetDist(means[i], means[j])
means_avg /= (k*(k-1)/2.)
# Average distance between obj and mean in a cluster
obj2mean_avg = np.zeros(k)
# Variance of the distances between obj and mean in a cluster
obj2mean_var = np.zeros(k)
keys = sorted(clusters.keys())
for key in keys:
for i in clusters[key]:
obj2mean = GetDist(X[i], means[key])
obj2mean_avg[key] += obj2mean
obj2mean_var[key] += obj2mean*obj2mean
obj2mean_avg[key] /= len(clusters[key])
obj2mean_var[key] /= len(clusters[key])
obj2mean_var[key] = np.sqrt(obj2mean_var[key])
# Average within cluster distances between objects
winclu_avg = np.zeros(k)
# Average of within cluster distances of all clusters
winclu_grandavg = 0.
for key in keys:
for i in clusters[key]:
x = X[i]
for j in clusters[key]:
if j>i:
winclu_avg[key] += GetDist(x, X[j])
s = len(clusters[key])
winclu_avg[key] /= (s*(s-1)/2)
winclu_grandavg += winclu_avg[key]
winclu_grandavg /= k
# write the summary
print("average distance among means: %f"%means_avg)
#print("average distance from objects to the mean of a cluster:")
#for i in range(k):
# print("cluster %i: %f"%(i, obj2mean_avg[i]))
#print("variance of distances from objects to the mean of a cluster:")
#for i in range(k):
# print("cluster %i: %f"%(i, obj2mean_var[i]))
#print("within-cluster average distances:")
#for i in range(k):
# print("cluster %i: %f"%(i, winclu_avg[i]))
print("grand average of within-cluster average distances: %f"%winclu_grandavg)
return
X, Xi = readdata(dfname, chainlength)
size = readsize(dfname)
boxl = np.array([size, size, size])
n = len(X)
k = int(len(X)/cluster_size)
# Set up the database of objects
# X = readdata(dfname, chainlength)
# Choose initial means with K-means
means = initmeans(k)
# Set up initial clusters
distmat = SetDistMat(X, means)
clusters = InitialAssignment(distmat)
## debug code
#keys = sorted(clusters.keys())
#for key in keys:
# print("cluster %i:"%key)
# print(clusters[key])
## end of debug
# Iteration step
for iter in range(100):
active = 0 # indicate the number of transfers in the current iteration
tranlst = (-1)*np.ones(k, dtype='int') # set up transfer list for each cluster
# Compute the cluster means
oldmeans = means.copy()
means = CalcMeans(X, oldmeans, clusters)
# Get statistics about the clustering
#ClusterStat(X, means, clusters)
## debug code
#print("old means:")
#print(oldmeans)
#print("new means:")
#print(means)
## end of debug
# For each object, compute the distances to the cluster means
distmat = SetDistMat(X, means)
# Sort objects based on the delta of the current assignment and the best
# possible alternate assignment
objlst = SortObj(X, clusters, means, distmat)
##debug code
#print(objlst)
##return
#end of debug
# For each element by prioty:
while (len(objlst)):
(i, key, temp) = objlst.pop()
obj2key = GetDist(X[i], means[key])
transferred = False #record if any transfering has occured to i
if (key == distmat[i,0][0]):
##debug
#print("%i is already the opt cluster for obj %i. no transfer"%(clu, i))
##end of debug
continue
# For each other clusters by element gain:
else:
for j in range(k):
clu = distmat[i,j][0] # the key of another cluster
objgain = obj2key - distmat[i,j][1] # gain by transfering i from cluster key to clu
if (clu==key): # already in the cluster
continue
if (len(clusters[clu]) < cluster_size):
active += 1
transferred = True
clusters = Transfer(i, key, clu, clusters)
##debug
#print("cluster %i not full. transfer obj %i from cluster %i to it."%(clu, i, key))
##end of debug
break
elif (tranlst[clu] != -1): # if the tranlst of another cluster is not empty
# distance between the obj in the tranlst and the current cluster
tran2key = GetDist(X[tranlst[clu]], means[key])
tran2clu = GetDist(X[tranlst[clu]], means[clu])
# gain by transfering the obj in tranlst from cluster clu to key
trangain = tran2clu - tran2key
if (objgain + trangain > 0): # transfer if the sum of gains are positive, ie net gain
active += 2
transferred = True
clusters = Transfer(i, key, clu, clusters)
clusters = Transfer(tranlst[clu], clu, key, clusters)
##debug
#print("obj %i is transfered from cluster %i to %i"%(i, key, clu))
#print("obj %i is transfered from cluster %i to %i"%(tranlst[clu], clu, key))
#print("objgain: %f, trangain: %f"%(objgain, trangain))
##end of debug
tranlst[clu] = -1 # reset the tranlst to empty
break
if (not transferred):
tranlst[key] = i
##debug
#print("add obj %i in cluster %i to the transfer list"%(i, key))
##end of debug
# nothing is transferred during this iteration, return the clustering result
if (not active):
break
#debug code
print("number of transfers in iter %i: %i\n"%(iter+1, active))
#end of debug
print("K-means clustering converged in %d iterations!\n"%(iter+1))
# Output the clustering results
WriteResult(outfl, X, means, clusters)
ClusterStat(X, means, clusters)
# print(X)
| 2.640625 | 3 |
ituro/accounts/tests.py | kayduemre/ituro | 9 | 7054 | from django.test import TestCase
from django.utils import timezone
from accounts.models import CustomUser, CustomUserManager
class UserCreateTestCase(TestCase):
def test_create_user_correctly(self):
"Creating users correctly"
new_user = CustomUser.objects.create(
email="<EMAIL>",
name="<NAME>",
phone="09876543210",
school="Some University",
is_staff="False",
is_active="True",
date_joined=timezone.now())
self.assertTrue(isinstance(new_user, CustomUser))
self.assertEqual(new_user.get_full_name(), "<NAME>")
self.assertEqual(new_user.get_short_name(), "<NAME>")
| 2.890625 | 3 |
continuum/datasets/dtd.py | oleksost/continuum | 282 | 7055 | <filename>continuum/datasets/dtd.py
import os
from typing import List
import numpy as np
from torchvision import datasets as torchdata
from continuum.datasets import ImageFolderDataset
from continuum import download
from continuum.tasks import TaskType
class DTD(ImageFolderDataset):
"""Describable Textures Dataset (DTD)
Reference:
* Describing Textures in the Wild
<NAME> and <NAME> and <NAME> and <NAME> and and <NAME>
CVPR 2014
"""
url = "https://www.robots.ox.ac.uk/~vgg/data/dtd/download/dtd-r1.0.1.tar.gz"
def __init__(self, data_path: str, train: bool = True, download: bool = True, split: int = 1):
super().__init__(data_path=data_path, train=train, download=download, data_type=TaskType.IMAGE_PATH)
if not (1 <= int(split) <= 10):
raise ValueError(f"Available splits are [1, ..., 10], not {split}")
self.split = split
def _download(self):
archive_path = os.path.join(self.data_path, "dtd-r1.0.1.tar.gz")
if not os.path.exists(archive_path):
print("Downloading DTD dataset...")
download.download(self.url, self.data_path)
if not os.path.exists(os.path.join(self.data_path, "dtd")):
print("Uncompressing images...")
download.untar(archive_path)
def get_data(self):
x, y, t = self._format(torchdata.ImageFolder(os.path.join(self.data_path, "dtd", "images")).imgs)
if self.train:
index_files = [
os.path.join(self.data_path, "dtd", "labels", f"train{str(self.split)}.txt"),
os.path.join(self.data_path, "dtd", "labels", f"val{str(self.split)}.txt")
]
else:
index_files = [
os.path.join(self.data_path, "dtd", "labels", f"test{str(self.split)}.txt")
]
valid_paths = set()
for index_file in index_files:
with open(index_file) as f:
valid_paths.update(
map(lambda p: os.path.join(self.data_path, "dtd", "images", p.strip()),
f.readlines()
)
)
valid_paths = np.array(list(valid_paths))
indexes = np.isin(x, valid_paths)
return x[indexes], y[indexes], None
| 2.5625 | 3 |
src/tests/testdata.py | Doometnick/MaxiMin-2048 | 0 | 7056 | <filename>src/tests/testdata.py<gh_stars>0
from board import Direction
# Tuples of input, action, expected output.
moving_tests = [
(
[[0,0,0,0],
[4,0,0,0],
[0,0,0,0],
[4,0,2,0]],
Direction.UP,
[[8,0,2,0],
[0,0,0,0],
[0,0,0,0],
[0,0,0,0]]
),
(
[[0,0,0,0],
[4,0,0,0],
[0,0,0,0],
[4,0,2,0]],
Direction.DOWN,
[[0,0,0,0],
[0,0,0,0],
[0,0,0,0],
[8,0,2,0]]
),
(
[[0,0,0,0],
[4,0,0,0],
[0,0,0,0],
[4,0,2,0]],
Direction.LEFT,
[[0,0,0,0],
[4,0,0,0],
[0,0,0,0],
[4,2,0,0]]
),
(
[[0,0,0,0],
[4,0,0,0],
[0,0,0,0],
[4,0,2,0]],
Direction.RIGHT,
[[0,0,0,0],
[0,0,0,4],
[0,0,0,0],
[0,0,4,2]]
),
(
[[4,4,4,4],
[8,0,8,4],
[32,16,0,16],
[16,8,2,4]],
Direction.RIGHT,
[[0,0,8,8],
[0,0,16,4],
[0,0,32,32],
[16,8,2,4]]
),
(
[[4,4,4,4],
[8,0,8,4],
[32,16,0,16],
[16,8,2,4]],
Direction.LEFT,
[[8,8,0,0],
[16,4,0,0],
[32,32,0,0],
[16,8,2,4]]
),
(
[[4,4,4,4],
[8,0,8,4],
[32,16,0,16],
[16,8,2,4]],
Direction.UP,
[[4,4,4,8],
[8,16,8,16],
[32,8,2,4],
[16,0,0,0]]
),
(
[[4,4,4,4],
[8,0,8,4],
[32,16,0,16],
[16,8,2,4]],
Direction.DOWN,
[[4,0,0,0],
[8,4,4,8],
[32,16,8,16],
[16,8,2,4]]
)
] | 2.5625 | 3 |
test/utils/test_value.py | HansBug/pji | 0 | 7057 | import pytest
from pji.utils import ValueProxy
@pytest.mark.unittest
class TestUtilsValue:
def test_value_proxy_init(self):
value = ValueProxy()
assert value.value is None
value = ValueProxy(233)
assert value.value == 233
def test_value_proxy_set(self):
value = ValueProxy()
value.value = 233
assert value.value == 233
value.value = -27
assert value.value == -27
| 2.453125 | 2 |
intro.py | Ebenazer-2002/library-management | 0 | 7058 | #Intro Page
from tkinter import *
from PIL import Image, ImageTk
import cv2
#----------------------------Start Function--------------------------#
def start(event):
label1.destroy()
import log
win.destroy()
log.main()
#------------------------Main Window---------------------------------#li
def main_window():
global win
global label1
win = Tk()
win.title('Library Management System')
win.iconbitmap("images/main_icon.ico")
win.bind('<Key>', start) # start function on pressing any key
win.state('zoomed')
# opens video
cap = cv2.VideoCapture("images/vid.MP4")
global n
n = 0
#-----------------------------------------------------------------
# defining show function
def show():
global n # frame count
n = n+1
if n <= 30:
rest, frame = cap.read()
cv2image = cv2.cvtColor(frame, cv2.COLOR_BGR2RGBA)
img = Image.fromarray(cv2image).resize((1600, 850))
imgtk = ImageTk.PhotoImage(image=img)
label1.imgtk = imgtk
label1.configure(image=imgtk)
win.after(10, show)
else:
label1.destroy()
frm = Frame(win, bg='black')
frm.place(relx=0, rely=0, relwidth=1, relheight=1)
label = Label(frm, text='Press any Key to continue',
bg='black', fg='white')
label.place(relx=0.45, rely=0.5)
#-----------------------------------------------------------------
label1 = Label(win)
label1.place(relx=0, rely=0, relheight=1, relwidth=1)
show()
win.mainloop()
#-----------------------------------------------------------------
main_window()
| 3.15625 | 3 |
notebooks/week4_help.py | hugh9876/04-multivariate-analysis | 0 | 7059 | """
This module provides helper functions to support exercises during AM1
with outliers, robust regression and template regression in the CORE
data analytics workshop series, week 4.
"""
import numpy as np
import pandas as pd
import math
from collections import namedtuple
def recovery_sulphur_dataframe_with_outliers(outlier_probability):
"""Return dataframe representing recovery as a function of sulphur.
Parameters:
----------
outlier_probability:
This floating point parameter should range between 0 and 1
and is probability of an observation being an outlier.
Returns:
-------
Pandas dataframe:
A dataframe is returned with two series, the first being observed
recovery, and the second being sulphur %. The data may be sampled
from the true underlying relationship, plus gaussian noise, or
may be an outlier value taken from a non-gaussian distribution.
The proportion of outliers to non-outliers will depend on
the outlier_probability parameter.
"""
# Check that the outlier_probability is an ordinary number.
assert isinstance(outlier_probability, (float, int))
# As it's a probability, ensure that it ranges between 0 and 1.
assert outlier_probability >= 0.0
assert outlier_probability <= 1.0
# If no exceptions have been thrown then we likely have a valid input.
# Get 50 pairs of sulphur features and recovery labels
sulphur_percent = _draw_sulphur_observations(50)
recovery_percent = _observe_recovery(sulphur_percent,
outlier_probability)
return pd.DataFrame({'metal_recovery_percent': recovery_percent,
'feed_sulphur_percent': sulphur_percent})
def _initialise_randomstate(seed):
""" Use RandomState object with seed set."""
return np.random.RandomState(seed)
def _draw_sulphur_observations(count):
rs = _initialise_randomstate(7)
# draw "count" sulphur observations from a uniform distribution of
# sulphur percentages between 0.15% and 1.35%
sulphur_percent = rs.uniform(0.15, 1.35, count)
return sulphur_percent
def _draw_dilithium_observations(count):
rs = _initialise_randomstate(8)
return rs.uniform(25, 35, count)
def _draw_kryptonite_observations(count):
rs = _initialise_randomstate(9)
return rs.uniform(20, 25, count)
def _draw_unobtainium_observations(count):
rs = _initialise_randomstate(10)
return rs.uniform(0, 7, count)
def _draw_quartz_observations(count):
rs = _initialise_randomstate(11)
return rs.uniform(25, 35, count)
def _observe_recovery(sulphur_percent, outlier_probability):
"""Returns an array of metal recoveries.
This method returns an array of metal recoveries given both
an array of sulphur percentages and the probability of an
outlier being observed.
"""
recovery_percent = np.zeros_like(sulphur_percent)
is_outlier = _is_outlier(outlier_probability, len(sulphur_percent))
for index in range(0, len(recovery_percent)):
if is_outlier[index]:
recovery_percent [index]= _return_outlier_model_of_recovery(sulphur_percent[index])
else:
recovery_percent [index]=_noise_free_model_of_recovery(sulphur_percent[index])
return recovery_percent
def _noise_free_model_of_recovery(sulphur):
"""This method returns a metal recovery for a given sulphur %."""
return 74.81 - 6.81/sulphur
def _return_outlier_model_of_recovery(sulphur):
return (74.81 - 6.81/sulphur)/3
def _is_outlier(outlier_probability, how_many):
"""Return true/false numpy array
"""
rs = _initialise_randomstate(5)
uniformly_distributed = rs.uniform(0, 1, how_many)
is_outlier = np.zeros_like(uniformly_distributed)
for index in range(0, len(is_outlier)):
is_outlier[index]=uniformly_distributed[index]>(1-outlier_probability)
return is_outlier
def add_gaussian_noise(noise_free_input, mean, sigma):
"""Adds gaussian noise to vector, given mean and sigma
"""
bins = len(noise_free_input)
noise = np.random.normal(mean, sigma, bins)
return noise_free_input + noise
def gaussian_fwhm_pdf(X, height, x_position, fwhm):
"""Returns guassian probability distribution function, given FWHM
This computes a gaussian probability density function (pdf) given a
Full Width at Half Maximum (FWHM) instead of standard deviation, and
scales it by the height parameters. If the height is one, then the
area of the guassian will also be unity, as required for a pdf, and
for preserving area when used as an impulse response function in
convolution operations.
Note, this returns the function, it does not sample from the
distribution.
"""
return gaussian_pdf(X, height, x_position, fwhm / (2 * math.sqrt(2 * math.log(2))))
def gaussian_pdf(X, area, x_position, standard_deviation):
"""Returns gaussian probability distribution function multiplied by area.
This computes a gaussian with unit area and multiplies it
by the area parameter. It is translated to be centered
on x_position and has the width specified by standard_deviation.
Unit area gaussians are used as probability distributions functions,
and are also important in convolutions, as area of the convolution
of two functions is the product of their areas. If it is important
for the convolution to preserve area of a function when convolved
with a gaussian then that gaussian needs to have unit area. Preserving
area also implies conservation of energy in many physical models.
It can be shown that the integral of the gaussian function is unity
when the guassian's height is scaled as a function of standard_deviation
as:
height_scaling = 1/(standard_deviation*sqrt(2*pi))
So this function multiplies the height of the guassian by this factor and
then multiplies this result by the area parameter that is passed in.
If area parameter is 1, then the height of this gaussian with also
be 1 for all standard deviations, otherwise the area will be set by the
area parameter. The relationship between height and area, and the scaling
of height by the second parameter below, will be made clearer by
also studying the guassian function.
"""
return gaussian(X, area / (standard_deviation * math.sqrt(2 * math.pi)), x_position,
standard_deviation)
def gaussian(X, height, x_position, standard_deviation):
"""Return standard gaussian function
This is the unnormalised gaussian function
f(x)=height*exp(-(x-x_position)^2/(2*standard_deviation^2))
Parameters
----------
height:
This is the maximum of the gaussian peak.
This function does not normalise to constant area, the caller
must do this if this is what they want.
x_position:
This is the x position of the centre of the gaussian. If the
guassian is being used to apply the impulse response of an
instrument applied to an XRD reflection, then this will be the
two-theta position of the peak.
standard_deviation:
The standard deviation of the guassian curve.
If this function is being applied in spectroscopy, optics or
electrical engineering, it is common for gaussians to be
defined in terms of Full Width at Half Maximum (FWHM), which
is the width of the peak when the height drops to half
of the peak height, specified by the height parameter. If
the x-axis represents frequency, and the function height
is proportional to energy or power, then this will be the
gaussian's bandwidth, that is, the width between the -3db points.
To convert from FWHM to standard deviation use the relationship:
FWHM = 2*sqrt(2*log(2)) * standard_deviation
Returns
-------
double:
Evaluated gaussian function.
"""
return height * math.e**(-(X - x_position)**2 / 2 / standard_deviation**2)
class MultichannelXAxis:
"""Set up an X axis for isntrument
This object is set up with three inputs, min_x is the minimum value
on the axis. In the example I've chosen 5. The max_x
value is the highest value on the x axis, and spacing is
the x spacing between channels. In the example I've chosen
a max_x of 90 and spacing of 0.2. The unit is two-theta
degrees, and this unit (and the axis values) come from the
world of x-ray diffraction (XRD). We're describing the x-axis
of a low resolution XRD instrument.
The object's as_vector method can return the x_axis as an array
of numbers using numpy's linspace method, which we've already used
for plotting and other purposes.
"""
def __init__(self, min_x, max_x, spacing):
self._min = min_x
self._max = max_x
self._spacing = spacing
self._channel_count = \
round((self.max - self.min) / self.spacing + 1)
self._label = "r'$2\theta$ (degrees)"
@property
def min(self):
"""Return minimum two-theta for diffractogram x-axis."""
return self._min
@property
def max(self):
"""Return maximum two-theta for diffractogram x-axis."""
return self._max
@property
def spacing(self):
"""Return channel spacing in two-theta for diffractogram x-axis."""
return self._spacing
@property
def channel_count(self):
"""Return the count of channels in this diffractogram."""
return self._channel_count
@property
def label(self):
"""Return the x-axis label, for use with plot and report generation."""
return self._label
@property
def as_vector(self):
"""Return a numpy vector containing two-theta values for each channel."""
x_axis_vector = np.linspace(self.min, self.max, self.channel_count)
return x_axis_vector
def _apply_convolution_kernals(x_axis_vector, intensity, two_theta_angle,
instrument_broadening_fwhm,
reflection_broadening_fwhm):
"""Apply gaussian kernel for instrument broadening only."""
def _add_gaussian_fwhms(fwhm1, fwhm2):
sigma_fwhm_conversion_constant = 2*math.sqrt(2*math.log(2))
sigma_1 = fwhm1/sigma_fwhm_conversion_constant
sigma_2 = fwhm2/sigma_fwhm_conversion_constant
#squares of std_dev (ie sigma^2 which is variance) are additive
sigma_summed = math.sqrt(sigma_1*sigma_1 + sigma_2*sigma_2)
return sigma_summed*sigma_fwhm_conversion_constant
fwhm = _add_gaussian_fwhms (instrument_broadening_fwhm,
reflection_broadening_fwhm)
return gaussian_fwhm_pdf(x_axis_vector, intensity, two_theta_angle,
fwhm)
def create_templates_matrix():
"""Create templates for four test pure components.
This creates templates for quartz, dilithium, kryptonite and
unobtainium, in that order. The templates are returned
in an array where the first column is quartz, and the last is
unobtainium. If you plot them, you'll see gently varying
squiggly lines.
"""
# Create a templates matrix containing space for four templates, plus
# a column of ones.
x_axis = MultichannelXAxis(5, 90, 0.2)
template_count = 4
templates_matrix = np.zeros((x_axis.channel_count, template_count+1))
# set 4 two-theta units of instrument broadening
instrument_broadening = 4
# create a tuple for each reflection, and add it to a list. The loop
# then grabs each reflection from the list and then adds it to the
# template. The first value in the tuple is intensity, the second
# two-theta angle and the third is how much broadening to apply.
Reflection = namedtuple('Reflection', ('intensity', 'two_theta', 'broadening'))
quartz_reflections = []
quartz_reflections.append (Reflection(intensity=10.0, two_theta=25.0, broadening=3.0))
quartz_reflections.append (Reflection(13.0, 38.0, 6.0))
quartz_reflections.append (Reflection(10.0, 43.0, 2.0))
quartz_reflections.append (Reflection(25.0, 60, 2.0))
dilithium_reflections = []
dilithium_reflections.append (Reflection(25.0, 80, 1.0))
kryptonite_reflections = []
#kryptonite_reflections.append (Reflection(intensity=12.0, two_theta=25.0, broadening=9.0))
kryptonite_reflections.append (Reflection(17.0, 12.0, 1.0))
kryptonite_reflections.append (Reflection(19.0, 43.0, 12.0))
#kryptonite_reflections.append (Reflection(4.0, 70, 2.0))
#kryptonite_reflections.append (Reflection(32.0, 74, 2.0))
unobtainium_reflections = []
#unobtainium_reflections.append (Reflection(intensity=4.0, two_theta=25.0, broadening=12.0))
unobtainium_reflections.append (Reflection(5.0, 18.0, 6.0))
unobtainium_reflections.append (Reflection(1.0, 23.0, 1.0))
unobtainium_reflections.append (Reflection(5.0, 31.0, 2.0))
unobtainium_reflections.append (Reflection(3.0, 55.0, 6.0))
unobtainium_reflections.append (Reflection(7.0, 58.0, 1.0))
#unobtainium_reflections.append (Reflection(5.0, 80, 2.0))
phases=[]
# create four phases
phases.append(quartz_reflections)
phases.append(dilithium_reflections)
phases.append(kryptonite_reflections)
phases.append(unobtainium_reflections)
for phase_idx in range(0, template_count):
for a_reflection in phases[phase_idx]:
contribution_of_this_reflection = \
_apply_convolution_kernals(
x_axis.as_vector,
a_reflection.intensity,
a_reflection.two_theta,
instrument_broadening,
a_reflection.broadening)
templates_matrix[:, phase_idx] += \
contribution_of_this_reflection
# set the last column to be all ones
templates_matrix[:, template_count] = \
np.ones(x_axis.channel_count)
return templates_matrix
def create_composition_dataframe(observations_count):
"""Create a dataframe of observations of drilling samples
Returns:
Pandas DataFrame with observations_count observations.
The dataframe has four columns representing the amount
of quartz, dilithium, kryptonite and unobtainium present.
These values are drawn from uniform distributions."""
unobtainium = _draw_unobtainium_observations (observations_count)
dilithium = _draw_dilithium_observations(observations_count)
kryptonite = _draw_kryptonite_observations(observations_count)
quartz = _draw_quartz_observations(observations_count)
# Create clusters by imposing a relationship between quartz
# and dilithium.
for observation_idx in range(0, observations_count):
if quartz[observation_idx] > 30:
dilithium[observation_idx] = 5
if dilithium[observation_idx] > 30:
quartz[observation_idx] = 5
return pd.DataFrame({'Quartz': quartz,
'Dilithium': dilithium,
'Kryptonite': kryptonite,
'Unobtainium': unobtainium})
def create_observations(compositions_dataframe, templates):
"""Create a new array containing synthetic observations"""
observations_count = len(compositions_dataframe)
channels_count = len(templates[:,0])
observations_matrix = np.zeros((channels_count, observations_count))
for observation_idx in range (0, observations_count):
observations_matrix[:, observation_idx] = \
templates[:,0]*compositions_dataframe['Quartz'][observation_idx] + \
templates[:,1]*compositions_dataframe['Dilithium'][observation_idx] + \
templates[:,2]*compositions_dataframe['Kryptonite'][observation_idx] + \
templates[:,3]*compositions_dataframe['Unobtainium'][observation_idx]
# add gaussian noise. If you have time, try increasing this and watch
# prediction performance fall over.
observations_matrix[:, observation_idx] = \
add_gaussian_noise(observations_matrix[:, observation_idx], 10, 3)
return observations_matrix
| 3.015625 | 3 |
tests/actions/test_mutable_token_action.py | 0xOmarA/RadixLib | 32 | 7060 | <filename>tests/actions/test_mutable_token_action.py
from radixlib.actions import CreateTokenDefinition
from typing import Dict, Any
import unittest
class TestMutableTokenAction(unittest.TestCase):
""" Unit tests for the CreateTokenDefinition action of mutable tokens """
ActionDict: Dict[str, Any] = {
"token_properties": {
"name": "MutableTest",
"description": "An amazing new token with great utility!",
"icon_url": "https://www.google.com/",
"url": "https://www.google.com/",
"symbol": "mutable",
"is_supply_mutable": True,
"granularity": "1",
"owner": {
"address": "tdx1qspqqecwh3tgsgz92l4d4f0e4egmfe86049dj75pgq347fkkfmg84pgx9um0v"
}
},
"token_supply": {
"value": "0",
"token_identifier": {
"rri": "mutable_tr1q06dd0ut3qmyp4pqkvmeu2dvkwg5f7vm8yeslwvpkt9qcl5vqu"
}
},
"type": "CreateTokenDefinition"
}
def test_from_dict(self):
""" Tests the derivation of the mainnet wallet addresses from the public key """
# The action loaded from the dictionary
creation: CreateTokenDefinition = CreateTokenDefinition.from_dict(self.ActionDict)
# Asserting that the CreateTokenDefinition object understood the content of the dictionary
self.assertEqual(creation.name, self.ActionDict['token_properties']['name'])
self.assertEqual(creation.description, self.ActionDict['token_properties']['description'])
self.assertEqual(creation.icon_url, self.ActionDict['token_properties']['icon_url'])
self.assertEqual(creation.url, self.ActionDict['token_properties']['url'])
self.assertEqual(creation.symbol, self.ActionDict['token_properties']['symbol'])
self.assertEqual(creation.is_supply_mutable, self.ActionDict['token_properties']['is_supply_mutable'])
self.assertEqual(creation.granularity, int(self.ActionDict['token_properties']['granularity']))
self.assertEqual(creation.owner.address, self.ActionDict['token_properties']['owner']['address'])
self.assertEqual(creation.token_supply, int(self.ActionDict['token_supply']['value']))
self.assertEqual(creation.token_rri, self.ActionDict['token_supply']['token_identifier']['rri'])
self.assertEqual(creation.to_account, None)
def test_to_dict(self):
""" Tests the conversion of the token account to a dictionary """
# The account loaded from the dictionary
account: CreateTokenDefinition = CreateTokenDefinition.from_dict(self.ActionDict)
self.assertEqual(account.to_dict(), self.ActionDict) | 2.71875 | 3 |
src/tests/testModules/loadCfg_typeCasting/allowsCastFailKeeping/primativeTypes.py | Trimatix/carica | 5 | 7061 | floatVar = 1.0
listVar = [3, "hello"]
dictVar = {
"myField": "value"
}
aotVar = [dictVar, dictVar]
intVar = 1 | 2.203125 | 2 |
quacc/recipes/xtb/__init__.py | arosen93/HT-ASE | 9 | 7062 | <filename>quacc/recipes/xtb/__init__.py
"""Recipes for xTB"""
| 0.910156 | 1 |
src/python/pants/backend/native/subsystems/xcode_cli_tools.py | StephanErb/pants | 0 | 7063 | <filename>src/python/pants/backend/native/subsystems/xcode_cli_tools.py
# coding=utf-8
# Copyright 2018 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import absolute_import, division, print_function, unicode_literals
import os
from pants.backend.native.config.environment import Assembler, CCompiler, CppCompiler, Linker
from pants.engine.rules import rule
from pants.engine.selectors import Select
from pants.subsystem.subsystem import Subsystem
from pants.util.dirutil import is_readable_dir
from pants.util.memo import memoized_method, memoized_property
MIN_OSX_SUPPORTED_VERSION = '10.11'
MIN_OSX_VERSION_ARG = '-mmacosx-version-min={}'.format(MIN_OSX_SUPPORTED_VERSION)
class XCodeCLITools(Subsystem):
"""Subsystem to detect and provide the XCode command line developer tools.
This subsystem exists to give a useful error message if the tools aren't
installed, and because the install location may not be on the PATH when Pants
is invoked.
"""
options_scope = 'xcode-cli-tools'
_REQUIRED_FILES = {
'bin': [
'as',
'cc',
'c++',
'clang',
'clang++',
'ld',
'lipo',
],
# Any of the entries that would be here are not directly below the 'include' or 'lib' dirs, and
# we haven't yet encountered an invalid XCode/CLI tools installation which has the include dirs,
# but incorrect files. These would need to be updated if such an issue arises.
'include': [],
'lib': [],
}
INSTALL_PREFIXES_DEFAULT = [
# Prefer files from this installation directory, if available. This doesn't appear to be
# populated with e.g. header files on travis.
'/usr',
# Populated by the XCode CLI tools.
'/Library/Developer/CommandLineTools/SDKs/MacOSX.sdk/usr',
# Populated by the XCode app. These are derived from using the -v or -H switches invoking the
# osx clang compiler.
'/Applications/Xcode.app/Contents/Developer/Toolchains/XcodeDefault.xctoolchain/usr',
'/Applications/Xcode.app/Contents/Developer/Toolchains/XcodeDefault.xctoolchain/usr/lib/clang/9.1.0',
'/Applications/Xcode.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX.sdk/usr',
]
class XCodeToolsUnavailable(Exception):
"""Thrown if the XCode CLI tools could not be located."""
class XCodeToolsInvalid(Exception):
"""Thrown if a method within this subsystem requests a nonexistent tool."""
@classmethod
def register_options(cls, register):
super(XCodeCLITools, cls).register_options(register)
register('--install-prefixes', type=list, default=cls.INSTALL_PREFIXES_DEFAULT,
fingerprint=True, advanced=True,
help='Locations to search for resources from the XCode CLI tools, including a '
'compiler, linker, header files, and some libraries. '
'Under this directory should be some selection of these subdirectories: {}.'
.format(cls._REQUIRED_FILES.keys()))
@memoized_property
def _all_existing_install_prefixes(self):
return [pfx for pfx in self.get_options().install_prefixes if is_readable_dir(pfx)]
# NB: We use @memoized_method in this file for methods which may raise.
@memoized_method
def _get_existing_subdirs(self, subdir_name):
maybe_subdirs = [os.path.join(pfx, subdir_name) for pfx in self._all_existing_install_prefixes]
existing_dirs = [existing_dir for existing_dir in maybe_subdirs if is_readable_dir(existing_dir)]
required_files_for_dir = self._REQUIRED_FILES.get(subdir_name)
if required_files_for_dir:
for fname in required_files_for_dir:
found = False
for subdir in existing_dirs:
full_path = os.path.join(subdir, fname)
if os.path.isfile(full_path):
found = True
continue
if not found:
raise self.XCodeToolsUnavailable(
"File '{fname}' in subdirectory '{subdir_name}' does not exist at any of the specified "
"prefixes. This file is required to build native code on this platform. You may need "
"to install the XCode command line developer tools from the Mac App Store.\n\n"
"If the XCode tools are installed and you are still seeing this message, please file "
"an issue at https://github.com/pantsbuild/pants/issues/new describing your "
"OSX environment and which file could not be found.\n"
"The existing install prefixes were: {pfxs}. These can be extended with "
"--{scope}-install-prefixes."
.format(fname=fname,
subdir_name=subdir_name,
pfxs=self._all_existing_install_prefixes,
scope=self.get_options_scope_equivalent_flag_component()))
return existing_dirs
@memoized_method
def path_entries(self):
return self._get_existing_subdirs('bin')
@memoized_method
def lib_dirs(self):
return self._get_existing_subdirs('lib')
@memoized_method
def include_dirs(self):
base_inc_dirs = self._get_existing_subdirs('include')
all_inc_dirs = base_inc_dirs
for d in base_inc_dirs:
# TODO: figure out what this directory does and why it's not already found by this compiler.
secure_inc_dir = os.path.join(d, 'secure')
if is_readable_dir(secure_inc_dir):
all_inc_dirs.append(secure_inc_dir)
return all_inc_dirs
@memoized_method
def assembler(self):
return Assembler(
path_entries=self.path_entries(),
exe_filename='as',
library_dirs=[])
@memoized_method
def linker(self):
return Linker(
path_entries=self.path_entries(),
exe_filename='ld',
library_dirs=[],
linking_library_dirs=[],
extra_args=[MIN_OSX_VERSION_ARG])
@memoized_method
def c_compiler(self):
return CCompiler(
path_entries=self.path_entries(),
exe_filename='clang',
library_dirs=self.lib_dirs(),
include_dirs=self.include_dirs(),
extra_args=[MIN_OSX_VERSION_ARG])
@memoized_method
def cpp_compiler(self):
return CppCompiler(
path_entries=self.path_entries(),
exe_filename='clang++',
library_dirs=self.lib_dirs(),
include_dirs=self.include_dirs(),
extra_args=[MIN_OSX_VERSION_ARG])
@rule(Assembler, [Select(XCodeCLITools)])
def get_assembler(xcode_cli_tools):
return xcode_cli_tools.assembler()
@rule(Linker, [Select(XCodeCLITools)])
def get_ld(xcode_cli_tools):
return xcode_cli_tools.linker()
@rule(CCompiler, [Select(XCodeCLITools)])
def get_clang(xcode_cli_tools):
return xcode_cli_tools.c_compiler()
@rule(CppCompiler, [Select(XCodeCLITools)])
def get_clang_plusplus(xcode_cli_tools):
return xcode_cli_tools.cpp_compiler()
def create_xcode_cli_tools_rules():
return [
get_assembler,
get_ld,
get_clang,
get_clang_plusplus,
]
| 1.765625 | 2 |
improver_tests/regrid/test_RegridWithLandSeaMask.py | yzhaobom/improver | 77 | 7064 | <gh_stars>10-100
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# (C) British Crown Copyright 2017-2021 Met Office.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Unit tests for the RegridWithLandSeaMask class"""
# set up a special data set and corresponding land-sea mask info
# set up target grid and its land-sea mask info
# it is designed to cover different scenarios for regridding with land-sea
# the regridding reference results are manually checked for different methods
# not using "set_up_variable_cube" because of different spacing at lat/lon
import numpy as np
from improver.regrid.bilinear import basic_indexes
from improver.regrid.grid import calculate_input_grid_spacing, latlon_from_cube
from improver.regrid.landsea import RegridLandSea
from improver.synthetic_data.set_up_test_cubes import set_up_variable_cube
def modify_cube_coordinate_value(cube, coord_x, coord_y):
"""modify x(longitude) & y(latitude) andcoordinates for a cube"""
cube.coord(axis="x").points = coord_x
cube.coord(axis="x").bounds = None
cube.coord(axis="x").guess_bounds()
cube.coord(axis="y").points = coord_y
cube.coord(axis="y").bounds = None
cube.coord(axis="y").guess_bounds()
return cube
def define_source_target_grid_data():
""" define cube_in, cube_in_mask,cube_out_mask using assumed data """
# source (input) grid
in_lats = np.linspace(0, 15, 4)
in_lons = np.linspace(0, 40, 5)
# target (output) grid
out_lats = np.linspace(0, 14, 8)
out_lons = np.linspace(5, 35, 11)
# assume a set of nwp data
data = np.arange(20).reshape(4, 5).astype(np.float32)
# input grid mask info
in_mask = np.empty((4, 5), dtype=np.int)
in_mask[:, :] = 1
in_mask[0, 2] = 0
in_mask[2, 2:4] = 0
in_mask[3, 2:4] = 0
# output grid mask info
out_mask = np.empty((8, 11), dtype=np.int)
out_mask[:, :] = 1
out_mask[0, 4:7] = 0
out_mask[1, 5] = 0
out_mask[5:9, 4:10] = 0
out_mask[6, 6] = 1
out_mask[7, 6] = 1
out_mask[1, 0] = 0
# create cube with default spacing
cube_in = set_up_variable_cube(data, "air_temperature", "Celsius")
cube_in_mask = set_up_variable_cube(in_mask, "Land_Binary_Mask", "1")
cube_out_mask = set_up_variable_cube(out_mask, "Land_Binary_Mask", "1")
# modify cube coordinates to the designed value
cube_in = modify_cube_coordinate_value(cube_in, in_lons, in_lats)
cube_in_mask = modify_cube_coordinate_value(cube_in_mask, in_lons, in_lats)
cube_out_mask = modify_cube_coordinate_value(cube_out_mask, out_lons, out_lats)
return cube_in, cube_out_mask, cube_in_mask
def define_source_target_grid_data_same_domain():
""" define cube_in, cube_in_mask,cube_out_mask, assume the same domain """
# source (input) grid
in_lats = np.linspace(0, 15, 4)
in_lons = np.linspace(0, 40, 5)
# target (output) grid
out_lats = np.linspace(0, 15, 7)
out_lons = np.linspace(5, 40, 9)
# assume a set of nwp data
data = np.arange(20).reshape(4, 5).astype(np.float32)
# input grid mask info
in_mask = np.empty((4, 5), dtype=np.int)
in_mask[:, :] = 1
in_mask[0, 2] = 0
in_mask[2, 2:4] = 0
in_mask[3, 2:4] = 0
# output grid mask info
out_mask = np.empty((7, 9), dtype=np.int)
out_mask[:, :] = 1
out_mask[0, 3:6] = 0
out_mask[1, 4] = 0
out_mask[4:9, 4:8] = 0
out_mask[6, 6] = 1
out_mask[1, 0] = 0
# create cube with default spacing
cube_in = set_up_variable_cube(data, "air_temperature", "Celsius")
cube_in_mask = set_up_variable_cube(in_mask, "Land_Binary_Mask", "1")
cube_out_mask = set_up_variable_cube(out_mask, "Land_Binary_Mask", "1")
# modify cube coordinates to the designed value
cube_in = modify_cube_coordinate_value(cube_in, in_lons, in_lats)
cube_in_mask = modify_cube_coordinate_value(cube_in_mask, in_lons, in_lats)
cube_out_mask = modify_cube_coordinate_value(cube_out_mask, out_lons, out_lats)
return cube_in, cube_out_mask, cube_in_mask
def test_basic_indexes():
"""Test basic_indexes for identical source and target domain case """
cube_in, cube_out_mask, _ = define_source_target_grid_data_same_domain()
in_latlons = latlon_from_cube(cube_in)
out_latlons = latlon_from_cube(cube_out_mask)
in_lons_size = cube_in.coord(axis="x").shape[0]
lat_spacing, lon_spacing = calculate_input_grid_spacing(cube_in)
indexes = basic_indexes(
out_latlons, in_latlons, in_lons_size, lat_spacing, lon_spacing
)
test_results = indexes[58:63, :]
expected_results = np.array(
[
[12, 17, 18, 13],
[12, 17, 18, 13],
[13, 18, 19, 14],
[13, 18, 19, 14],
[13, 18, 19, 14],
]
)
np.testing.assert_array_equal(test_results, expected_results)
def test_regrid_nearest_2():
"""Test nearest neighbour regridding option 'nearest-2'"""
cube_in, cube_out_mask, _ = define_source_target_grid_data()
regrid_nearest = RegridLandSea(regrid_mode="nearest-2",)(cube_in, cube_out_mask)
expected_results = np.array(
[
[0, 1, 1, 1, 2, 2, 2, 3, 3, 3, 3],
[0, 1, 1, 1, 2, 2, 2, 3, 3, 3, 3],
[5, 6, 6, 6, 7, 7, 7, 8, 8, 8, 8],
[5, 6, 6, 6, 7, 7, 7, 8, 8, 8, 8],
[10, 11, 11, 11, 12, 12, 12, 13, 13, 13, 13],
[10, 11, 11, 11, 12, 12, 12, 13, 13, 13, 13],
[10, 11, 11, 11, 12, 12, 12, 13, 13, 13, 13],
[15, 16, 16, 16, 17, 17, 17, 18, 18, 18, 18],
]
)
np.testing.assert_allclose(regrid_nearest.data, expected_results, atol=1e-3)
def test_regrid_bilinear_2():
"""Test bilinear regridding option 'bilinear-2'"""
cube_in, cube_out_mask, _ = define_source_target_grid_data()
regrid_bilinear = RegridLandSea(regrid_mode="bilinear-2",)(cube_in, cube_out_mask)
expected_results = np.array(
[
[0.5, 0.8, 1.1, 1.4, 1.7, 2.0, 2.3, 2.6, 2.9, 3.2, 3.5],
[2.5, 2.8, 3.1, 3.4, 3.7, 4.0, 4.3, 4.6, 4.9, 5.2, 5.5],
[4.5, 4.8, 5.1, 5.4, 5.7, 6.0, 6.3, 6.6, 6.9, 7.2, 7.5],
[6.5, 6.8, 7.1, 7.4, 7.7, 8.0, 8.3, 8.6, 8.9, 9.2, 9.5],
[8.5, 8.8, 9.1, 9.4, 9.7, 10.0, 10.3, 10.6, 10.9, 11.2, 11.5],
[10.5, 10.8, 11.1, 11.4, 11.7, 12.0, 12.3, 12.6, 12.9, 13.2, 13.5],
[12.5, 12.8, 13.1, 13.4, 13.7, 14.0, 14.3, 14.6, 14.9, 15.2, 15.5],
[14.5, 14.8, 15.1, 15.4, 15.7, 16.0, 16.3, 16.6, 16.9, 17.2, 17.5],
]
)
np.testing.assert_allclose(regrid_bilinear.data, expected_results, atol=1e-3)
def test_regrid_nearest_with_mask_2():
"""Test nearest-with-mask-2 regridding"""
cube_in, cube_out_mask, cube_in_mask = define_source_target_grid_data()
regrid_nearest_with_mask = RegridLandSea(
regrid_mode="nearest-with-mask-2",
landmask=cube_in_mask,
landmask_vicinity=250000000,
)(cube_in, cube_out_mask)
expected_results = np.array(
[
[0, 1, 1, 1, 2, 2, 2, 3, 3, 3, 3],
[0, 1, 1, 1, 7, 2, 7, 3, 3, 3, 3],
[5, 6, 6, 6, 7, 7, 7, 8, 8, 8, 8],
[5, 6, 6, 6, 7, 7, 7, 8, 8, 8, 9],
[10, 11, 11, 11, 7, 7, 7, 8, 8, 8, 14],
[10, 11, 11, 11, 12, 12, 12, 13, 13, 13, 14],
[10, 11, 11, 11, 12, 12, 7, 13, 13, 13, 14],
[15, 16, 16, 16, 17, 17, 7, 18, 18, 18, 19],
]
)
np.testing.assert_allclose(
regrid_nearest_with_mask.data, expected_results, atol=1e-3
)
# consider constant field
cube_in.data = np.repeat(1.0, 20).reshape(4, 5).astype(np.float32)
regrid_nearest_with_mask = RegridLandSea(
regrid_mode="nearest-with-mask-2",
landmask=cube_in_mask,
landmask_vicinity=250000000,
)(cube_in, cube_out_mask)
expected_results = np.repeat(1.0, 88).reshape(8, 11).astype(np.float32)
np.testing.assert_allclose(
regrid_nearest_with_mask.data, expected_results, atol=1e-3
)
def test_regrid_bilinear_with_mask_2():
"""Test bilinear-with-mask-2 regridding """
cube_in, cube_out_mask, cube_in_mask = define_source_target_grid_data()
regrid_bilinear_with_mask = RegridLandSea(
regrid_mode="bilinear-with-mask-2",
landmask=cube_in_mask,
landmask_vicinity=250000000,
)(cube_in, cube_out_mask)
expected_results = np.array(
[
[0.5, 0.8, 1.40096, 3.2916, 2.0, 2.0, 2.0, 4.94333, 3.25586, 3.2, 3.5],
[2.5, 2.8, 3.1, 3.4, 5.48911, 2.76267, 6.32926, 4.6, 4.9, 5.2, 5.5],
[4.5, 4.8, 5.1, 5.4, 5.7, 7.0154, 6.3, 6.6, 6.9, 7.2, 7.5],
[6.5, 6.8, 7.1, 7.4, 7.7, 7.0, 7.19033, 7.6681, 7.6618, 9.2, 9.5],
[
8.5,
8.8,
9.1,
9.4,
8.10633,
7.0,
7.0,
7.62915,
7.21672,
9.11434,
10.52363,
],
[
10.5,
10.8,
11.00012,
11.01183,
13.15439,
12.0,
12.3,
12.6,
12.9,
13.71286,
15.74504,
],
[
12.5,
12.8,
12.23411,
13.25881,
14.14155,
14.0,
8.07328,
14.6,
14.9,
14.96332,
16.3334,
],
[
14.5,
14.8,
15.0997,
14.22659,
15.50905,
16.0,
9.8733,
16.6,
16.9,
16.91114,
17.03773,
],
]
)
np.testing.assert_allclose(
regrid_bilinear_with_mask.data, expected_results, atol=1e-3
)
# consider constant field
cube_in.data = np.repeat(1.0, 20).reshape(4, 5).astype(np.float32)
regrid_bilinear_with_mask = RegridLandSea(
regrid_mode="bilinear-with-mask-2",
landmask=cube_in_mask,
landmask_vicinity=250000000,
)(cube_in, cube_out_mask)
expected_results = np.repeat(1.0, 88).reshape(8, 11).astype(np.float32)
np.testing.assert_allclose(
regrid_bilinear_with_mask.data, expected_results, atol=1e-3
)
| 1.320313 | 1 |
setup.py | garnaat/details | 27 | 7065 | <reponame>garnaat/details<filename>setup.py
#!/usr/bin/env python
from setuptools import setup, find_packages
import os
requires = [
]
setup(
name='details',
version=open(os.path.join('details', '_version')).read(),
description='Tools for processing AWS detailed billing reports',
long_description=open('README.md').read(),
author='<NAME>',
author_email='<EMAIL>',
url='https://github.com/scopely-devops/details',
packages=find_packages(exclude=['tests*']),
package_dir={'details': 'details'},
install_requires=requires,
license=open("LICENSE").read(),
classifiers=(
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'Natural Language :: English',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4'
),
)
| 1.351563 | 1 |
beam_telescope_analysis/testing/test_kalman.py | YannickDieter/beam_telescope_analysis | 3 | 7066 | <filename>beam_telescope_analysis/testing/test_kalman.py
''' Script to check the correctness of the analysis. The analysis is done on raw data and all results are compared to a recorded analysis.
'''
import os
import unittest
import numpy as np
from beam_telescope_analysis import track_analysis
from beam_telescope_analysis.tools import test_tools
class TestTrackAnalysis(unittest.TestCase):
@classmethod
def setUpClass(cls):
# virtual X server for plots under headless LINUX travis testing is needed
if os.getenv('TRAVIS', False) and os.getenv('TRAVIS_OS_NAME', False) == 'linux':
from xvfbwrapper import Xvfb # virtual X server for plots under headless LINUX travis testing is needed
cls.vdisplay = Xvfb()
cls.vdisplay.start()
@classmethod
def tearDownClass(cls): # Remove created files
pass
# os.remove(os.path.join(cls.output_folder, 'Tracks_merged.pdf'))
def test_kalman(self):
# pixel size of sensor
pixel_size = np.array([(18.5, 18.5), (18.5, 18.5), (18.5, 18.5), (18.5, 18.5), (18.5, 18.5), (18.5, 18.5), (250., 50.)])
pixel_resolution = pixel_size / np.sqrt(12)
material_budget = np.array([100., 100., 100., 100., 100., 100., 250.]) / np.array([125390., 125390., 125390., 125390., 125390., 125390., 93700.])
prealignment = {'z': [0., 29900., 60300., 82100., 118700., 160700., 197800.]}
kwargs = {'track_hits': np.array([[[-1229.22372954, 2828.19616302, 0., pixel_resolution[0][0], pixel_resolution[0][1], 0.],
[np.nan, np.nan, np.nan, np.nan, np.nan, np.nan], # [-1254.51224282, 2827.4291421, 29900.],
[-1285.6117892, 2822.34536687, 60300., pixel_resolution[2][0], pixel_resolution[2][1], 0.],
[-1311.31083616, 2823.56121414, 82100., pixel_resolution[3][0], pixel_resolution[3][1], 0.],
[-1335.8529645, 2828.43359043, 118700., pixel_resolution[4][0], pixel_resolution[4][1], 0.],
[-1357.81872222, 2840.86947964, 160700., pixel_resolution[5][0], pixel_resolution[5][1], 0.],
[-1396.35698339, 2843.76799577, 197800., pixel_resolution[6][0], pixel_resolution[6][1], 0.]]]),
'dut_fit_selection': 61,
'z_positions': [[0., 29900, 60300, 82100, 118700, 160700, 197800]],
'alignment': [prealignment],
'use_prealignment': True,
'pixel_size': pixel_size,
'n_pixels': ((576, 1152), (576, 1152), (576, 1152), (576, 1152), (576, 1152), (576, 1152), (80, 336)),
'beam_energy': 2500.,
'material_budget': material_budget,
'add_scattering_plane': False}
# expected result array: (state estimates, chi, x error, y errors)
result = [[[-1.23045812e+03, 2.82684464e+03, 0.00000000e+00, -9.54188957e-04, -5.78722777e-05, 9.99999543e-01],
[-1.25900270e+03, 2.82511339e+03, 2.99000000e+04, -9.54667558e-04, -5.79013065e-05, 9.99999543e-01],
[-1.28705254e+03, 2.82443254e+03, 6.03000000e+04, -9.22691847e-04, -2.23966180e-05, 9.99999574e-01],
[-1.30575083e+03, 2.82550588e+03, 8.21000000e+04, -8.57719095e-04, 4.92360053e-05, 9.99999631e-01],
[-1.33339390e+03, 2.83014572e+03, 1.18700000e+05, -7.55274948e-04, 1.26771487e-04, 9.99999707e-01],
[-1.36192826e+03, 2.83782855e+03, 1.60700000e+05, -6.79389377e-04, 1.82924497e-04, 9.99999752e-01],
[-1.38713361e+03, 2.84461505e+03, 1.97800000e+05, -6.79389377e-04, 1.82924497e-04, 9.99999752e-01]],
[79.59176738400244],
[3.62429044, 3.2884327, 3.21655702, 3.1539946, 3.23671172, 4.66501707, 8.62909928],
[3.62429044, 3.2884327, 3.21655702, 3.1539946, 3.23671172, 4.66501707, 8.62909928]]
for i in range(4): # test each return (state estimates, chi, x error, y errors) seperatly
test = test_tools._call_function_with_args(function=track_analysis._fit_tracks_kalman_loop,
**kwargs)[0][i]
data_equal = np.allclose(test, result[i])
self.assertTrue(data_equal)
if __name__ == '__main__':
import logging
logging.basicConfig(level=logging.INFO, format="%(asctime)s - %(name)s - [%(levelname)-8s] (%(threadName)-10s) %(message)s")
suite = unittest.TestLoader().loadTestsFromTestCase(TestTrackAnalysis)
unittest.TextTestRunner(verbosity=2).run(suite)
| 2.53125 | 3 |
test/test_workflow.py | asnramos/asv | 0 | 7067 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import glob
import os
import sys
import json
from os.path import join, isfile
import pytest
from asv import util
from . import tools
def test_run_publish(capfd, basic_conf_2):
tmpdir, local, conf, machine_file = basic_conf_2
tmpdir = util.long_path(tmpdir)
conf.matrix = {
"req": dict(conf.matrix),
"env": {"SOME_TEST_VAR": ["1"]},
}
# Tests a typical complete run/publish workflow
ret = tools.run_asv_with_conf(conf, 'run', "master", '--steps=2',
'--quick', '--show-stderr', '--profile',
'-a', 'warmup_time=0',
'--durations=5',
_machine_file=machine_file)
assert ret is None
text, err = capfd.readouterr()
assert len(os.listdir(join(tmpdir, 'results_workflow', 'orangutan'))) == 5
assert len(os.listdir(join(tmpdir, 'results_workflow'))) == 2
assert 'asv: benchmark timed out (timeout 0.1s)' in text
assert 'total duration' in text
tools.run_asv_with_conf(conf, 'publish')
assert isfile(join(tmpdir, 'html', 'index.html'))
assert isfile(join(tmpdir, 'html', 'index.json'))
assert isfile(join(tmpdir, 'html', 'asv.js'))
assert isfile(join(tmpdir, 'html', 'asv.css'))
# Check parameterized test json data format
filename = glob.glob(join(tmpdir, 'html', 'graphs', 'arch-x86_64',
'asv_dummy_test_package_1',
'asv_dummy_test_package_2-' + tools.DUMMY2_VERSIONS[1],
'branch-master',
'cpu-Blazingly fast',
'env-SOME_TEST_VAR-1',
'machine-orangutan',
'os-GNU_Linux', 'python-*', 'ram-128GB',
'params_examples.time_skip.json'))[0]
with open(filename, 'r') as fp:
data = json.load(fp)
assert len(data) == 2
assert isinstance(data[0][0], int) # revision
assert len(data[0][1]) == 3
assert len(data[1][1]) == 3
assert isinstance(data[0][1][0], float)
assert isinstance(data[0][1][1], float)
assert data[0][1][2] is None
# Check that the skip options work
capfd.readouterr()
tools.run_asv_with_conf(conf, 'run', "master", '--steps=2',
'--quick', '--skip-existing-successful',
'--bench=time_secondary.track_value',
'--skip-existing-failed',
_machine_file=join(tmpdir, 'asv-machine.json'))
tools.run_asv_with_conf(conf, 'run', "master", '--steps=2',
'--bench=time_secondary.track_value',
'--quick', '--skip-existing-commits',
_machine_file=join(tmpdir, 'asv-machine.json'))
text, err = capfd.readouterr()
assert 'Running benchmarks.' not in text
# Check EXISTING and --environment work
python = "{0[0]}.{0[1]}".format(sys.version_info)
env_type = tools.get_default_environment_type(conf, python)
env_spec = ("-E", env_type + ":" + python)
tools.run_asv_with_conf(conf, 'run', "EXISTING", '--quick',
'--bench=time_secondary.track_value',
*env_spec,
_machine_file=machine_file)
# Remove the benchmarks.json file and check publish fails
os.remove(join(tmpdir, "results_workflow", "benchmarks.json"))
with pytest.raises(util.UserError):
tools.run_asv_with_conf(conf, 'publish')
| 1.921875 | 2 |
trainer.py | Metro1998/P-DQN | 5 | 7068 | # @author Metro
# @time 2021/11/24
import os.path
import gym
from agents.pdqn import P_DQN
from utilities.memory import ReplayBuffer
from utilities.utilities import *
from utilities.route_generator import generate_routefile
class Train_and_Evaluate(object):
def __init__(self, config):
# Environment
generate_routefile(seed=config.seed, demand=config.demand)
self.env = gym.make(config.environment)
# Agent
self.agent = P_DQN(config, self.env)
# Memory
self.replay_memory_size = config.hyperparameters['replay_memory_size']
self.batch_size = config.hyperparameters['batch_size']
self.updates_per_step = config.hyperparameters['updates_per_step']
self.memory = ReplayBuffer(self.replay_memory_size)
self.total_steps = 0
self.total_updates = 0
self.save_freq = config.save_freq
self.file_to_save = config.file_to_save
self.maximum_episodes = config.hyperparameters['maximum_episodes']
self.train = config.train
self.evaluate = config.evaluate
self.evaluate_internal = config.evaluate_internal
self.agent_to_color_dictionary = config.agent_to_color_dictionary
self.standard_deviation_results = config.standard_deviation_results
self.colors = ['red', 'blue', 'green', 'orange', 'yellow', 'purple']
self.color_idx = 0
self.rolling_score_window = config.rolling_score_window
self.runs_per_agent = config.runs_per_agent
self.agent_name = config.agent_name
self.ceil = config.ceil
# Training Loop
def train_agent(self):
"""
:return:
"""
rolling_scores_for_diff_runs = []
file_to_save_actor = os.path.join(self.file_to_save, 'actor/')
file_to_save_actor_param = os.path.join(self.file_to_save, 'actor_param/')
file_to_save_runs = os.path.join(self.file_to_save, 'runs_1/')
file_to_save_rolling_scores = os.path.join(self.file_to_save, 'rolling_scores/')
os.makedirs(file_to_save_actor, exist_ok=True)
os.makedirs(file_to_save_actor_param, exist_ok=True)
os.makedirs(file_to_save_runs, exist_ok=True)
os.makedirs(file_to_save_rolling_scores, exist_ok=True)
for run in range(self.runs_per_agent):
game_full_episodes_scores = []
game_full_episodes_rolling_scores = []
for i_episode in range(self.maximum_episodes):
if self.save_freq > 0 and i_episode % self.save_freq == 0:
actor_path = os.path.join(file_to_save_actor, 'episode{}'.format(i_episode))
actor_param_path = os.path.join(file_to_save_actor_param, 'episode{}'.format(i_episode))
self.agent.save_models(actor_path, actor_param_path)
episode_score = []
episode_steps = 0
done = 0
state = self.env.reset() # n_steps
while not done:
if len(self.memory) > self.batch_size:
action, action_params = self.agent.select_action(state, self.train)
if self.ceil:
action_params = np.ceil(action_params).squeeze(0)
action_for_env = [action, int(action_params[action])]
for i in range(self.updates_per_step):
self.agent.update(self.memory)
self.total_updates += 1
else:
action_params = np.random.randint(low=10, high=31, size=8)
action = np.random.randint(7, size=1)[0]
action_for_env = [action, action_params[action]]
next_state, reward, done, info = self.env.step(action_for_env)
print(reward)
episode_steps += 1
episode_score.append(info)
self.total_steps += 1
self.memory.push(state, action, action_params, reward, next_state, done)
state = next_state
episode_score_so_far = np.mean(episode_score)
game_full_episodes_scores.append(episode_score_so_far)
game_full_episodes_rolling_scores.append(
np.mean(game_full_episodes_scores[-1 * self.rolling_score_window:]))
print("Episode: {}, total steps:{}, episode steps:{}, scores:{}".format(
i_episode, self.total_steps, episode_steps, episode_score_so_far))
self.env.close()
file_path_for_pic = os.path.join(file_to_save_runs, 'episode{}_run{}.jpg'.format(i_episode, run))
visualize_results_per_run(agent_results=game_full_episodes_scores,
agent_name=self.agent_name,
save_freq=1,
file_path_for_pic=file_path_for_pic)
rolling_scores_for_diff_runs.append(game_full_episodes_rolling_scores)
file_path_for_pic = os.path.join(file_to_save_rolling_scores, 'rolling_scores.jpg')
visualize_overall_agent_results(agent_results=rolling_scores_for_diff_runs,
agent_name=self.agent_name,
show_mean_and_std_range=True,
agent_to_color_dictionary=self.agent_to_color_dictionary,
standard_deviation_results=1,
file_path_for_pic=file_path_for_pic
)
| 2.1875 | 2 |
server/splunkdj/views.py | splunk/splunk-webframework | 31 | 7069 | import sys
import pprint
import json
import datetime
import uuid
import urllib
import types
import traceback
from django.core.urlresolvers import reverse, resolve
from django.http import HttpResponseRedirect, Http404, HttpResponseServerError, HttpResponseNotFound
from django.conf import settings
from django.contrib.auth.decorators import login_required
from django.views.decorators.cache import never_cache
from django.views.debug import ExceptionReporter, get_safe_settings
from django.template import TemplateDoesNotExist, Context
from django.template.loader import render_to_string
from django.utils.encoding import force_bytes
from django.shortcuts import render
from splunkdj.decorators.render import render_to
from splunkdj.utility import make_splunkweb_url
from urlparse import urlparse
import logging
logger = logging.getLogger('spl.django.service')
error_logger = logging.getLogger('spl.django.request_error')
def format(value):
"""
Format values appropriately for json.dumps:
- Basic types will remain the same
- Unicode will be converted to str
- Everything else will be formatted using pprint
"""
if value is None:
return value
if isinstance(value, (int, long, str, float, list, dict, tuple, bool, unicode)):
return value
return str(pprint.pformat(value))
def get_exception_info(request):
# We use Django's debug reporter, even though we are doing our own template.
# This is because it has a great way of collecting all the useful info we
# need, so no reason not to leverage it
exc_info = sys.exc_info()
reporter = ExceptionReporter(request, *exc_info)
ctx = reporter.get_traceback_data()
# This is a refactor of what the technical_500_template contains, just
# doing the logic in Python rather than in a template. We collect all this
# information so that we can log it.
exception_type = ctx['exception_type'] if 'exception_type' in ctx else "No exception supplied"
exception_value = ctx['exception_value'] if 'exception_value' in ctx else "No exception supplied"
django_version = ctx["django_version_info"]
python_executable = ctx['sys_executable']
python_version = ctx['sys_version_info']
python_path = ctx['sys_path']
server_time = str(ctx['server_time'])
unicode_hint = None
if 'unicode_hint' in ctx:
unicdoe_hint = ctx['unicode_hint']
last_frame = None
if 'lastframe' in ctx:
frame_info = ctx['lastframe']
last_frame = "%s in %s, line %s" % (frame_info['filename'], frame_info['function'], frame_info['lineno'])
loaders = []
if 'template_does_not_exist' in ctx and 'loader_debug_info' in ctx and ctx['loader_debug_info']:
for loader in ctx['loader_debug_info']:
loader_info = {"name": loader['loader'], "templates": []}
for tmpl in loader['templates']:
loader_info['templates'].append({"file": tmpl['name'], "exists": tmpl['exists']})
loaders.append(loader_info)
template_errors = None
if 'template_info' in ctx and ctx['template_info']:
template_info = ctx['template_info']
template_errors = {
"name": template_info['name'],
"line": template_info['line'],
"message": template_info['message']
}
exception_info = []
if 'frames' in ctx:
frames = ctx['frames']
for frame in frames:
frame_info = {
"filename": frame['filename'],
"function": frame['function'],
"line": frame['lineno'],
"context_line": frame['context_line'],
"vars": []
}
if 'vars' in frame:
for var in frame['vars']:
frame_info['vars'].append({
"variable": str(var[0]),
"value": format(var[1])
})
exception_info.append(frame_info)
request_info = {
"path_info": request.path_info,
"method": request.META['REQUEST_METHOD'],
"url": request.build_absolute_uri(),
"GET": {},
"POST": {},
"FILES": {},
"COOKIES": {},
"META": {}
}
if hasattr(request, "GET"):
for key, value in request.GET.iteritems():
request_info['GET'][key] = format(value)
if "filtered_POST" in ctx:
for key, value in ctx['filtered_POST'].iteritems():
request_info['POST'][key] = format(value)
if hasattr(request, "FILES"):
for key, value in request.FILES.iteritems():
request_info['FILES'][key] = format(value)
if hasattr(request, "COOKIES"):
for key, value in request.COOKIES.iteritems():
request_info['COOKIES'][key] = format(value)
if hasattr(request, "META"):
for key, value in request.META.iteritems():
request_info['META'][key] = format(value)
settings_info = {}
for key, value in ctx['settings'].iteritems():
settings_info[key] = format(value)
ctx['errorid'] = errorid = uuid.uuid4().hex
full_info = dict(
__time=datetime.datetime.now().isoformat(),
__uuid=errorid,
settings=settings_info,
request=request_info,
traceback=exception_info,
stack=traceback.format_exc(exc_info[2]),
last_frame=last_frame,
template_loaders=loaders,
template_errors=template_errors,
unicode_hint=unicdoe_hint,
exception_type=exception_type,
exception_value=exception_value,
django_version=django_version,
python_version=python_version,
python_executable=python_executable,
python_path=python_path,
server_time=server_time
)
return (errorid, ctx, full_info)
def redirector(request, app, view):
params = {}
for (key, val) in request.GET.iteritems():
params[key] = val
full_name = "%s:%s" % (app, view)
if not view or not app:
logger.error("Redirector requires both 'app' and 'view' to be set, received: app='%s' view='%s'" % (app, view))
raise Error("Redirector requires both 'app' and 'view' to be set, received: app='%s' view='%s'" % (app, view))
return HttpResponseRedirect(reverse(full_name, kwargs=params))
def default_search(request):
app = request.app_name
lang_code = request.LANGUAGE_CODE
return HttpResponseRedirect(make_splunkweb_url("/%s/app/%s/search" % (lang_code, app)))
def default_flashtimeline(request):
app = request.app_name
lang_code = request.LANGUAGE_CODE
return HttpResponseRedirect(make_splunkweb_url("/%s/app/%s/flashtimeline" % (lang_code, app)))
@render_to()
@login_required
def default_template_render(request, template_name):
app = request.app_name
template_path = "%s:%s.html" % (app, template_name)
return {
"TEMPLATE": template_path
}
@never_cache
def handle404(request):
# This code is modified from views/debug.py in Django, as we want to display
# a debug style view, just modified slightly.
exc_info = sys.exc_info()
exception = exc_info[1]
try:
tried = exception.args[0]['tried']
except (IndexError, TypeError, KeyError):
tried = []
urlconf = getattr(request, 'urlconf', settings.ROOT_URLCONF)
if isinstance(urlconf, types.ModuleType):
urlconf = urlconf.__name__
c = Context({
'urlconf': urlconf,
'root_urlconf': settings.ROOT_URLCONF,
'request_path': request.path_info[1:], # Trim leading slash
'urlpatterns': tried,
'reason': force_bytes(exception, errors='replace'),
'request': request,
'settings': get_safe_settings(),
})
return HttpResponseNotFound(render_to_string('splunkdj:404.html', context_instance=c))
@never_cache
def handle500(request):
# Let's attempt to render a more useful error message
errorid, ctx, exception = get_exception_info(request)
# We log the raw error to the log file, so that splunk can pick it up as
# JSON.
error_logger.error(json.dumps(exception, sort_keys=True))
# Build up the URL for making the query
lang_code = request.LANGUAGE_CODE
query_args = {
"q": 'search index=_internal sourcetype=django_error "%s" | head 1 | spath' % errorid,
"display.events.maxlines": 0,
"display.general.type": "events",
"earliest": 0,
"latest": ""
}
query_string = urllib.urlencode(query_args)
ctx['search_url'] = make_splunkweb_url("/%s/app/search/search?%s" % (lang_code, query_string))
return HttpResponseServerError(render_to_string('splunkdj:500.html', context_instance=Context(ctx)))
@never_cache
@render_to('splunkdj:page_config.html', mimetype="application/javascript")
@login_required
def get_page_config(request):
referer = request.META.get("HTTP_REFERER", "")
app = ""
app_label = ""
if referer:
try:
parsed = urlparse(referer)
parsed_path = parsed.path.replace("/%s/" % settings.MOUNT, "/")
resolved = resolve(parsed_path)
app = resolved.app_name
if app:
app_label = request.service.apps[app]["label"]
except Exception, e:
# If there was an error here, don't kill the entire page
# just return some default info
app = app or ""
app_label = app_label or app
zone_info = request.service.get('/services/search/timeparser/tz').body.read()
return {
"autoload": "1" == request.GET.get("autoload", "0"),
"config": json.dumps({
"SPLUNKD_FREE_LICENSE": request.user.is_free,
"MRSPARKLE_ROOT_PATH": "/%s" % str(settings.SPLUNK_WEB_MOUNT).strip("/"),
"DJANGO_ROOT_PATH": "/%s" % str(settings.RAW_MOUNT),
"MRSPARKLE_PORT_NUMBER": str(settings.SPLUNK_WEB_PORT),
"DJANGO_PORT_NUMBER": str(settings.DJANGO_PORT),
"LOCALE": str(request.LANGUAGE_CODE),
"JS_LOGGER_MODE": "None",
"USERNAME": str(request.user.username),
"USER_DISPLAYNAME": str(request.user.realname),
"APP": str(app),
"APP_DISPLAYNAME": str(app_label),
"SERVER_ZONEINFO": str(zone_info),
})
}
| 1.96875 | 2 |
wbia/plottool/interact_keypoints.py | mmulich/wildbook-ia | 0 | 7070 | <filename>wbia/plottool/interact_keypoints.py
# -*- coding: utf-8 -*-
import logging
import utool as ut
import six
from . import draw_func2 as df2
from wbia.plottool import plot_helpers as ph
from wbia.plottool import interact_helpers as ih
from wbia.plottool.viz_featrow import draw_feat_row
from wbia.plottool.viz_keypoints import show_keypoints
from wbia.plottool import abstract_interaction
(print, rrr, profile) = ut.inject2(__name__)
logger = logging.getLogger('wbia')
class KeypointInteraction(abstract_interaction.AbstractInteraction):
r"""
CommandLine:
python -m wbia.plottool.interact_keypoints --exec-KeypointInteraction --show
python -m wbia.plottool.interact_keypoints --exec-KeypointInteraction --show --fname=lena.png
Example:
>>> # DISABLE_DOCTEST
>>> from wbia.plottool.interact_keypoints import * # NOQA
>>> import numpy as np
>>> import wbia.plottool as pt
>>> import utool as ut
>>> import pyhesaff
>>> import vtool as vt
>>> kpts, vecs, imgBGR = pt.viz_keypoints.testdata_kpts()
>>> ut.quit_if_noshow()
>>> #pt.interact_keypoints.ishow_keypoints(imgBGR, kpts, vecs, ori=True, ell_alpha=.4, color='distinct')
>>> pt.interact_keypoints.KeypointInteraction(imgBGR, kpts, vecs, ori=True, ell_alpha=.4, autostart=True)
>>> pt.show_if_requested()
"""
def __init__(self, chip, kpts, vecs, fnum=0, figtitle=None, **kwargs):
self.chip = chip
self.kpts = kpts
self.vecs = vecs
self.figtitle = figtitle
self.mode = 0
super(KeypointInteraction, self).__init__(**kwargs)
def plot(self, fnum=None, pnum=(1, 1, 1), **kwargs):
import wbia.plottool as pt
fnum = pt.ensure_fnum(fnum)
pt.figure(fnum=fnum, docla=True, doclf=True)
show_keypoints(self.chip, self.kpts, fnum=fnum, pnum=pnum, **kwargs)
if self.figtitle is not None:
pt.set_figtitle(self.figtitle)
def _select_ith_kpt(self, fx):
logger.info('[interact] viewing ith=%r keypoint' % fx)
# Get the fx-th keypiont
kp, sift = self.kpts[fx], self.vecs[fx]
# Draw the image with keypoint fx highlighted
self.plot(self.fnum, (2, 1, 1), sel_fx=fx)
# Draw the selected feature
nRows, nCols, px = (2, 3, 3)
draw_feat_row(self.chip, fx, kp, sift, self.fnum, nRows, nCols, px, None)
def on_click_outside(self, event):
self.mode = (self.mode + 1) % 3
ell = self.mode == 1
pts = self.mode == 2
logger.info('... default kpts view mode=%r' % self.mode)
self.plot(self.fnum, ell=ell, pts=pts)
self.draw()
def on_click_inside(self, event, ax):
import wbia.plottool as pt
viztype = ph.get_plotdat(ax, 'viztype', None)
logger.info('[ik] viztype=%r' % viztype)
if viztype is None:
pass
elif viztype == 'keypoints':
kpts = ph.get_plotdat(ax, 'kpts', [])
if len(kpts) == 0:
logger.info('...nokpts')
else:
logger.info('...nearest')
x, y = event.xdata, event.ydata
import vtool as vt
fx = vt.nearest_point(x, y, kpts)[0]
self._select_ith_kpt(fx)
elif viztype == 'warped':
hs_fx = ph.get_plotdat(ax, 'fx', None)
if hs_fx is not None:
kp = self.kpts[hs_fx] # FIXME
sift = self.vecs[hs_fx]
df2.draw_keypoint_gradient_orientations(
self.chip, kp, sift=sift, mode='vec', fnum=pt.next_fnum()
)
pt.draw()
elif viztype.startswith('colorbar'):
pass
else:
logger.info('...unhandled')
self.draw()
def ishow_keypoints(chip, kpts, desc, fnum=0, figtitle=None, nodraw=False, **kwargs):
"""
TODO: Depricate in favor of the class
CommandLine:
python -m wbia.plottool.interact_keypoints --test-ishow_keypoints --show
python -m wbia.plottool.interact_keypoints --test-ishow_keypoints --show --fname zebra.png
Example:
>>> # DISABLE_DOCTEST
>>> from wbia.plottool.interact_keypoints import * # NOQA
>>> import numpy as np
>>> import wbia.plottool as pt
>>> import utool as ut
>>> import pyhesaff
>>> import vtool as vt
>>> kpts, vecs, imgBGR = pt.viz_keypoints.testdata_kpts()
>>> ut.quit_if_noshow()
>>> #pt.interact_keypoints.ishow_keypoints(imgBGR, kpts, vecs, ori=True, ell_alpha=.4, color='distinct')
>>> pt.interact_keypoints.ishow_keypoints(imgBGR, kpts, vecs, ori=True, ell_alpha=.4)
>>> pt.show_if_requested()
"""
if isinstance(chip, six.string_types):
import vtool as vt
chip = vt.imread(chip)
fig = ih.begin_interaction('keypoint', fnum)
annote_ptr = [1]
self = ut.DynStruct() # MOVE TO A CLASS INTERACTION
self.kpts = kpts
vecs = desc
self.vecs = vecs
def _select_ith_kpt(fx):
logger.info('[interact] viewing ith=%r keypoint' % fx)
# Get the fx-th keypiont
kp, sift = kpts[fx], vecs[fx]
# Draw the image with keypoint fx highlighted
_viz_keypoints(fnum, (2, 1, 1), sel_fx=fx, **kwargs) # MAYBE: remove kwargs
# Draw the selected feature
nRows, nCols, px = (2, 3, 3)
draw_feat_row(chip, fx, kp, sift, fnum, nRows, nCols, px, None)
def _viz_keypoints(fnum, pnum=(1, 1, 1), **kwargs):
df2.figure(fnum=fnum, docla=True, doclf=True)
show_keypoints(chip, kpts, fnum=fnum, pnum=pnum, **kwargs)
if figtitle is not None:
df2.set_figtitle(figtitle)
def _on_keypoints_click(event):
logger.info('[viz] clicked keypoint view')
if event is None or event.xdata is None or event.inaxes is None:
annote_ptr[0] = (annote_ptr[0] + 1) % 3
mode = annote_ptr[0]
ell = mode == 1
pts = mode == 2
logger.info('... default kpts view mode=%r' % mode)
_viz_keypoints(fnum, ell=ell, pts=pts, **kwargs) # MAYBE: remove kwargs
else:
ax = event.inaxes
viztype = ph.get_plotdat(ax, 'viztype', None)
logger.info('[ik] viztype=%r' % viztype)
if viztype == 'keypoints':
kpts = ph.get_plotdat(ax, 'kpts', [])
if len(kpts) == 0:
logger.info('...nokpts')
else:
logger.info('...nearest')
x, y = event.xdata, event.ydata
import vtool as vt
fx = vt.nearest_point(x, y, kpts)[0]
_select_ith_kpt(fx)
elif viztype == 'warped':
hs_fx = ph.get_plotdat(ax, 'fx', None)
# kpts = ph.get_plotdat(ax, 'kpts', [])
if hs_fx is not None:
# Ugly. Interactions should be changed to classes.
kp = self.kpts[hs_fx] # FIXME
sift = self.vecs[hs_fx]
df2.draw_keypoint_gradient_orientations(
chip, kp, sift=sift, mode='vec', fnum=df2.next_fnum()
)
elif viztype.startswith('colorbar'):
pass
# Hack to get a specific scoring feature
# sortx = self.fs.argsort()
# idx = np.clip(int(np.round(y * len(sortx))), 0, len(sortx) - 1)
# mx = sortx[idx]
# (fx1, fx2) = self.fm[mx]
# (fx1, fx2) = self.fm[mx]
# logger.info('... selected score at rank idx=%r' % (idx,))
# logger.info('... selected score with fs=%r' % (self.fs[mx],))
# logger.info('... resolved to mx=%r' % mx)
# logger.info('... fx1, fx2 = %r, %r' % (fx1, fx2,))
# self.select_ith_match(mx)
else:
logger.info('...unhandled')
ph.draw()
# Draw without keypoints the first time
_viz_keypoints(fnum, **kwargs) # MAYBE: remove kwargs
ih.connect_callback(fig, 'button_press_event', _on_keypoints_click)
if not nodraw:
ph.draw()
| 2.234375 | 2 |
tb/storage/__init__.py | DronMDF/manabot | 1 | 7071 | from .database import StDatabase
from .telegram import StTelegram
from .tinydb import TinyDataBase, TinySelect
from .utility import StDispatch
| 1.09375 | 1 |
algorithms/maths/chinese_remainder_theorem.py | hbqdev/algorithms | 22,426 | 7072 | <reponame>hbqdev/algorithms<filename>algorithms/maths/chinese_remainder_theorem.py
from algorithms.maths.gcd import gcd
from typing import List
def solve_chinese_remainder(num : List[int], rem : List[int]):
"""
Computes the smallest x that satisfies the chinese remainder theorem
for a system of equations.
The system of equations has the form:
x % num[0] = rem[0]
x % num[1] = rem[1]
...
x % num[k - 1] = rem[k - 1]
Where k is the number of elements in num and rem, k > 0.
All numbers in num needs to be pariwise coprime otherwise an exception is raised
returns x: the smallest value for x that satisfies the system of equations
"""
if not len(num) == len(rem):
raise Exception("num and rem should have equal length")
if not len(num) > 0:
raise Exception("Lists num and rem need to contain at least one element")
for n in num:
if not n > 1:
raise Exception("All numbers in num needs to be > 1")
if not _check_coprime(num):
raise Exception("All pairs of numbers in num are not coprime")
k = len(num)
x = 1
while True:
i = 0
while i < k:
if x % num[i] != rem[i]:
break
i += 1
if i == k:
return x
else:
x += 1
def _check_coprime(l : List[int]):
for i in range(len(l)):
for j in range(len(l)):
if i == j:
continue
if gcd(l[i], l[j]) != 1:
return False
return True
| 3.84375 | 4 |
exercises/ja/exc_03_16_01.py | Jette16/spacy-course | 2,085 | 7073 | <reponame>Jette16/spacy-course<gh_stars>1000+
import spacy
nlp = spacy.load("ja_core_news_sm")
text = (
"チックフィレイはジョージア州カレッジパークに本社を置く、"
"チキンサンドを専門とするアメリカのファストフードレストランチェーンです。"
)
# トークナイズのみ行う
doc = nlp(text)
print([token.text for token in doc])
| 2.71875 | 3 |
apphelper/image.py | caiyueliang/chineseocr | 0 | 7074 | <filename>apphelper/image.py
# -*- coding: utf-8 -*-
"""
##图像相关函数
@author: lywen
"""
import sys
import six
import os
import base64
import requests
import numpy as np
import cv2
from PIL import Image
import traceback
import uuid
from glob import glob
from bs4 import BeautifulSoup
def sort_box_(box):
x1,y1,x2,y2,x3,y3,x4,y4 = box[:8]
pts = (x1,y1),(x2,y2),(x3,y3),(x4,y4)
pts = np.array(pts, dtype="float32")
(x1,y1),(x2,y2),(x3,y3),(x4,y4) = _order_points(pts)
"""
newBox = [[x1,y1],[x2,y2],[x3,y3],[x4,y4]]
## sort x
newBox = sorted(newBox,key=lambda x:x[0])
x1,y1 = sorted(newBox[:2],key=lambda x:x[1])[0]
index = newBox.index([x1,y1])
newBox.pop(index)
newBox = sorted(newBox,key=lambda x:-x[1])
x4,y4 = sorted(newBox[:2],key=lambda x:x[0])[0]
index = newBox.index([x4,y4])
newBox.pop(index)
newBox = sorted(newBox,key=lambda x:-x[0])
x2,y2 = sorted(newBox[:2],key=lambda x:x[1])[0]
index = newBox.index([x2,y2])
newBox.pop(index)
newBox = sorted(newBox,key=lambda x:-x[1])
x3,y3 = sorted(newBox[:2],key=lambda x:x[0])[0]
"""
return x1,y1,x2,y2,x3,y3,x4,y4
import numpy as np
from scipy.spatial import distance as dist
def _order_points(pts):
# 根据x坐标对点进行排序
"""
---------------------
作者:Tong_T
来源:CSDN
原文:https://blog.csdn.net/Tong_T/article/details/81907132
版权声明:本文为博主原创文章,转载请附上博文链接!
"""
x_sorted = pts[np.argsort(pts[:, 0]), :]
# 从排序中获取最左侧和最右侧的点
# x坐标点
left_most = x_sorted[:2, :]
right_most = x_sorted[2:, :]
# 现在,根据它们的y坐标对最左边的坐标进行排序,这样我们就可以分别抓住左上角和左下角
left_most = left_most[np.argsort(left_most[:, 1]), :]
(tl, bl) = left_most
# 现在我们有了左上角坐标,用它作为锚来计算左上角和右上角之间的欧氏距离;
# 根据毕达哥拉斯定理,距离最大的点将是我们的右下角
distance = dist.cdist(tl[np.newaxis], right_most, "euclidean")[0]
(br, tr) = right_most[np.argsort(distance)[::-1], :]
# 返回左上角,右上角,右下角和左下角的坐标
return np.array([tl, tr, br, bl], dtype="float32")
def solve(box):
"""
绕 cx,cy点 w,h 旋转 angle 的坐标
x = cx-w/2
y = cy-h/2
x1-cx = -w/2*cos(angle) +h/2*sin(angle)
y1 -cy= -w/2*sin(angle) -h/2*cos(angle)
h(x1-cx) = -wh/2*cos(angle) +hh/2*sin(angle)
w(y1 -cy)= -ww/2*sin(angle) -hw/2*cos(angle)
(hh+ww)/2sin(angle) = h(x1-cx)-w(y1 -cy)
"""
x1,y1,x2,y2,x3,y3,x4,y4= box[:8]
cx = (x1+x3+x2+x4)/4.0
cy = (y1+y3+y4+y2)/4.0
w = (np.sqrt((x2-x1)**2+(y2-y1)**2)+np.sqrt((x3-x4)**2+(y3-y4)**2))/2
h = (np.sqrt((x2-x3)**2+(y2-y3)**2)+np.sqrt((x1-x4)**2+(y1-y4)**2))/2
#x = cx-w/2
#y = cy-h/2
sinA = (h*(x1-cx)-w*(y1 -cy))*1.0/(h*h+w*w)*2
if abs(sinA)>1:
angle = None
else:
angle = np.arcsin(sinA)
return angle,w,h,cx,cy
def read_singLine_for_yolo(p):
"""
单行文本
"""
im = Image.open(p).convert('RGB')
w,h = im.size
boxes = [{'cx':w/2,'cy':h/2,'w':w,'h':h,'angle':0.0}]
return im,boxes
def read_voc_xml(p):
##读取voc xml 文件
boxes = []
if os.path.exists(p):
with open(p) as f:
xmlString = f.read()
xmlString = BeautifulSoup(xmlString,'lxml')
objList = xmlString.findAll('object')
for obj in objList:
robndbox = obj.find('robndbox')
bndbox = obj.find('bndbox')
if robndbox is not None and bndbox is None:
cx = np.float(robndbox.find('cx').text)
cy = np.float(robndbox.find('cy').text)
w = np.float(robndbox.find('w').text)
h = np.float(robndbox.find('h').text)
angle = robndbox.find('angle').text
if angle=='nan' or h==0 or w==0:
#boxes = []
continue
angle = np.float(angle)
if abs(angle)>np.pi/2:
w,h = h,w
angle = abs(angle)%(np.pi/2)*np.sign(angle)
x1,y1,x2,y2,x3,y3,x4,y4 = xy_rotate_box(cx,cy,w,h,angle)
x1,y1,x2,y2,x3,y3,x4,y4 = sort_box_([x1,y1,x2,y2,x3,y3,x4,y4])
"""
if abs(angle)>np.pi/2:
##lableImg bug
x1,y1,x2,y2,x3,y3,x4,y4 = sort_box_([x1,y1,x2,y2,x3,y3,x4,y4])
"""
angle,w,h,cx,cy = solve([x1,y1,x2,y2,x3,y3,x4,y4])
else:
xmin = np.float(bndbox.find('xmin').text)
xmax = np.float(bndbox.find('xmax').text)
ymin = np.float(bndbox.find('ymin').text)
ymax = np.float(bndbox.find('ymax').text)
cx = (xmin+xmax)/2.0
cy = (ymin+ymax)/2.0
w = (-xmin+xmax)#/2.0
h = (-ymin+ymax)#/2.0
angle =0.0
boxes.append({'cx':cx,'cy':cy,'w':w,'h':h,'angle':angle})
return boxes
def xy_rotate_box(cx,cy,w,h,angle):
"""
绕 cx,cy点 w,h 旋转 angle 的坐标
x_new = (x-cx)*cos(angle) - (y-cy)*sin(angle)+cx
y_new = (x-cx)*sin(angle) + (y-cy)*sin(angle)+cy
"""
cx = float(cx)
cy = float(cy)
w = float(w)
h = float(h)
angle = float(angle)
x1,y1 = rotate(cx-w/2,cy-h/2,angle,cx,cy)
x2,y2 = rotate(cx+w/2,cy-h/2,angle,cx,cy)
x3,y3 = rotate(cx+w/2,cy+h/2,angle,cx,cy)
x4,y4 = rotate(cx-w/2,cy+h/2,angle,cx,cy)
return x1,y1,x2,y2,x3,y3,x4,y4
from numpy import cos,sin,pi,tan
def rotate(x,y,angle,cx,cy):
"""
点(x,y) 绕(cx,cy)点旋转
"""
#angle = angle*pi/180
x_new = (x-cx)*cos(angle) - (y-cy)*sin(angle)+cx
y_new = (x-cx)*sin(angle) + (y-cy)*cos(angle)+cy
return x_new,y_new
def resize_box(boxes,scale):
newBoxes = []
for box in boxes:
cx = box['cx']*scale
cy = box['cy']*scale
w = box['w']*scale
h = box['h']*scale
angle = box['angle']
newBoxes.append({'cx':cx,'cy':cy,'w':w,'h':h,'angle':angle})
return newBoxes
def resize_im(w,h, scale=416, max_scale=608):
f=float(scale)/min(h, w)
if max_scale is not None:
if f*max(h, w)>max_scale:
f=float(max_scale)/max(h, w)
newW,newH = int(w*f),int(h*f)
return newW-(newW%32),newH-(newH%32)
def get_rorate(boxes,im,degree=0):
"""
获取旋转角度后的box及im
"""
imgW,imgH = im.size
newBoxes = []
for line in boxes:
cx0,cy0 = imgW/2.0,imgH/2.0
x1,y1,x2,y2,x3,y3,x4,y4 = xy_rotate_box(**line)
x1,y1 = rotate(x1,y1,-degree/180*np.pi,cx0,cy0)
x2,y2 = rotate(x2,y2,-degree/180*np.pi,cx0,cy0)
x3,y3 = rotate(x3,y3,-degree/180*np.pi,cx0,cy0)
x4,y4 = rotate(x4,y4,-degree/180*np.pi,cx0,cy0)
box = (x1,y1,x2,y2,x3,y3,x4,y4)
degree_,w_,h_,cx_,cy_ = solve(box)
newLine = {'angle':degree_,'w':w_,'h':h_,'cx':cx_,'cy':cy_}
newBoxes.append(newLine)
return im.rotate(degree,center=(imgW/2.0,imgH/2.0 )),newBoxes
def letterbox_image(image, size,fillValue=[128,128,128]):
'''
resize image with unchanged aspect ratio using padding
'''
image_w, image_h = image.size
w, h = size
new_w = int(image_w * min(w*1.0/image_w, h*1.0/image_h))
new_h = int(image_h * min(w*1.0/image_w, h*1.0/image_h))
resized_image = image.resize((new_w,new_h), Image.BICUBIC)
if fillValue is None:
fillValue = [int(x.mean()) for x in cv2.split(np.array(im))]
boxed_image = Image.new('RGB', size, tuple(fillValue))
boxed_image.paste(resized_image,)
return boxed_image,new_w/image_w
def box_split(boxes,splitW = 15):
newBoxes = []
for box in boxes:
w = box['w']
h = box['h']
cx = box['cx']
cy=box['cy']
angle = box['angle']
x1,y1,x2,y2,x3,y3,x4,y4 = xy_rotate_box(cx,cy,w,h,angle)
splitBoxes =[]
i = 1
tanAngle = tan(-angle)
while True:
flag = 0 if i==1 else 1
xmin = x1+(i-1)*splitW
ymin = y1-tanAngle*splitW*i
xmax = x1+i*splitW
ymax = y4-(i-1)*tanAngle*splitW +flag*tanAngle*(x4-x1)
if xmax>max(x2,x3) and xmin>max(x2,x3):
break
splitBoxes.append([int(xmin),int(ymin),int(xmax),int(ymax)])
i+=1
newBoxes.append(splitBoxes)
return newBoxes
def get_box_spilt(boxes,im,sizeW,SizeH,splitW=8,isRoate=False,rorateDegree=0):
"""
isRoate:是否旋转box
"""
size = sizeW,SizeH
if isRoate:
##旋转box
im,boxes = get_rorate(boxes,im,degree=rorateDegree)
newIm,f = letterbox_image(im, size)
newBoxes = resize_box(boxes,f)
newBoxes = sum(box_split(newBoxes,splitW),[])
newBoxes = [box+[1] for box in newBoxes]
return newBoxes,newIm
def box_rotate(box,angle=0,imgH=0,imgW=0):
"""
对坐标进行旋转 逆时针方向 0\90\180\270,
"""
x1,y1,x2,y2,x3,y3,x4,y4 = box[:8]
if angle==90:
x1_,y1_ = y2,imgW-x2
x2_,y2_ = y3,imgW-x3
x3_,y3_ = y4,imgW-x4
x4_,y4_ = y1,imgW-x1
elif angle==180:
x1_,y1_ = imgW-x3,imgH-y3
x2_,y2_ = imgW-x4,imgH-y4
x3_,y3_ = imgW-x1,imgH-y1
x4_,y4_ = imgW-x2,imgH-y2
elif angle==270:
x1_,y1_ = imgH-y4,x4
x2_,y2_ = imgH-y1,x1
x3_,y3_ = imgH-y2,x2
x4_,y4_ = imgH-y3,x3
else:
x1_,y1_,x2_,y2_,x3_,y3_,x4_,y4_ = x1,y1,x2,y2,x3,y3,x4,y4
return (x1_,y1_,x2_,y2_,x3_,y3_,x4_,y4_)
def solve(box):
"""
绕 cx,cy点 w,h 旋转 angle 的坐标
x = cx-w/2
y = cy-h/2
x1-cx = -w/2*cos(angle) +h/2*sin(angle)
y1 -cy= -w/2*sin(angle) -h/2*cos(angle)
h(x1-cx) = -wh/2*cos(angle) +hh/2*sin(angle)
w(y1 -cy)= -ww/2*sin(angle) -hw/2*cos(angle)
(hh+ww)/2sin(angle) = h(x1-cx)-w(y1 -cy)
"""
x1,y1,x2,y2,x3,y3,x4,y4= box[:8]
cx = (x1+x3+x2+x4)/4.0
cy = (y1+y3+y4+y2)/4.0
w = (np.sqrt((x2-x1)**2+(y2-y1)**2)+np.sqrt((x3-x4)**2+(y3-y4)**2))/2
h = (np.sqrt((x2-x3)**2+(y2-y3)**2)+np.sqrt((x1-x4)**2+(y1-y4)**2))/2
sinA = (h*(x1-cx)-w*(y1 -cy))*1.0/(h*h+w*w)*2
angle = np.arcsin(sinA)
return angle,w,h,cx,cy
from numpy import cos,sin,pi
def rotate(x,y,angle,cx,cy):
angle = angle#*pi/180
x_new = (x-cx)*cos(angle) - (y-cy)*sin(angle)+cx
y_new = (x-cx)*sin(angle) + (y-cy)*cos(angle)+cy
return x_new,y_new
def xy_rotate_box(cx,cy,w,h,angle):
"""
绕 cx,cy点 w,h 旋转 angle 的坐标
x_new = (x-cx)*cos(angle) - (y-cy)*sin(angle)+cx
y_new = (x-cx)*sin(angle) + (y-cy)*sin(angle)+cy
"""
cx = float(cx)
cy = float(cy)
w = float(w)
h = float(h)
angle = float(angle)
x1,y1 = rotate(cx-w/2,cy-h/2,angle,cx,cy)
x2,y2 = rotate(cx+w/2,cy-h/2,angle,cx,cy)
x3,y3 = rotate(cx+w/2,cy+h/2,angle,cx,cy)
x4,y4 = rotate(cx-w/2,cy+h/2,angle,cx,cy)
return x1,y1,x2,y2,x3,y3,x4,y4
# def rotate_cut_img(im, degree, box, w, h, leftAdjust=False, rightAdjust=False, alph=0.2):
# x1, y1, x2, y2, x3, y3, x4, y4 = box[:8]
# # print('rotate_cut_img', x1, y1, x2, y2, x3, y3, x4, y4)
#
# x_center, y_center = np.mean([x1, x2, x3, x4]), np.mean([y1, y2, y3, y4])
# right = 0
# left = 0
# if rightAdjust:
# right = 1
# if leftAdjust:
# left = 1
#
# # print(im.shape)
# box = (max(1, x_center - w / 2 - left * alph * (w / 2)), # xmin
# y_center - h / 2, # ymin
# min(x_center + w / 2 + right * alph * (w / 2), im.shape[1] - 1), # xmax
# y_center + h / 2) # ymax
# # print('box', box)
#
# newW = int(box[2] - box[0])
# newH = int(box[3] - box[1])
#
# # =====================================================
# # remap_points = np.array([[0, 0], [164, 0], [164, 48], [0, 48]], dtype=np.float32)
# remap_points = np.array([[0, 0], [newW, 0], [newW, newH], [0, newH]], dtype=np.float32)
# old_points = np.array([[x1, y1], [x2, y2], [x3, y3], [x4, y4]], dtype=np.float32)
# # 透视变换:用到opencv函数
# M = cv2.getPerspectiveTransform(old_points, remap_points)
# tmpImg = cv2.warpPerspective(im, M, (newW, newH))
# # cv2.imshow('rotate_cut_img', tmpImg)
# # cv2.waitKey(0)
#
# return tmpImg, newW, newH
def rotate_cut_img(im, degree, box, w, h, leftAdjust=False, rightAdjust=False, alph=0.2):
x1, y1, x2, y2, x3, y3, x4, y4 = box[:8]
x_center, y_center = np.mean([x1, x2, x3, x4]), np.mean([y1, y2, y3, y4])
degree_ = degree * 180.0 / np.pi
right = 0
left = 0
if rightAdjust:
right = 1
if leftAdjust:
left = 1
box = (max(1, x_center - w / 2 - left * alph * (w / 2)), # xmin
y_center - h / 2, # ymin
min(x_center + w / 2 + right * alph * (w / 2), im.size[0] - 1), # xmax
y_center + h / 2) # ymax
newW = box[2] - box[0]
newH = box[3] - box[1]
tmpImg = im.rotate(degree_, center=(x_center, y_center)).crop(box)
return tmpImg, newW, newH
def letterbox_image(image, size, fillValue=[128, 128, 128]):
'''resize image with unchanged aspect ratio using padding'''
image_w, image_h = image.size
w, h = size
new_w = int(image_w * min(w*1.0/image_w, h*1.0/image_h))
new_h = int(image_h * min(w*1.0/image_w, h*1.0/image_h))
resized_image = image.resize((new_w,new_h), Image.BICUBIC)
if fillValue is None:
fillValue = [int(x.mean()) for x in cv2.split(np.array(im))]
boxed_image = Image.new('RGB', size, tuple(fillValue))
boxed_image.paste(resized_image, (0,0))
return boxed_image,new_w/image_w
from scipy.ndimage import filters,interpolation,morphology,measurements,minimum
#from pylab import amin, amax
from numpy import amin, amax
def estimate_skew_angle(raw):
"""
估计图像文字角度
"""
def resize_im(im, scale, max_scale=None):
f=float(scale)/min(im.shape[0], im.shape[1])
if max_scale!=None and f*max(im.shape[0], im.shape[1])>max_scale:
f=float(max_scale)/max(im.shape[0], im.shape[1])
return cv2.resize(im, (0, 0), fx=f, fy=f)
raw = resize_im(raw, scale=600, max_scale=900)
image = raw-amin(raw)
image = image/amax(image)
m = interpolation.zoom(image,0.5)
m = filters.percentile_filter(m,80,size=(20,2))
m = filters.percentile_filter(m,80,size=(2,20))
m = interpolation.zoom(m,1.0/0.5)
w,h = min(image.shape[1],m.shape[1]),min(image.shape[0],m.shape[0])
flat = np.clip(image[:h,:w]-m[:h,:w]+1,0,1)
d0,d1 = flat.shape
o0,o1 = int(0.1*d0),int(0.1*d1)
flat = amax(flat)-flat
flat -= amin(flat)
est = flat[o0:d0-o0,o1:d1-o1]
angles = range(-15,15)
estimates = []
for a in angles:
roest =interpolation.rotate(est,a,order=0,mode='constant')
v = np.mean(roest,axis=1)
v = np.var(v)
estimates.append((v,a))
_,a = max(estimates)
return a
def sort_box(box):
"""
对box排序,及页面进行排版
box[index, 0] = x1
box[index, 1] = y1
box[index, 2] = x2
box[index, 3] = y2
box[index, 4] = x3
box[index, 5] = y3
box[index, 6] = x4
box[index, 7] = y4
"""
box = sorted(box,key=lambda x:sum([x[1],x[3],x[5],x[7]]))
return list(box)
def get_boxes( bboxes):
"""
boxes: bounding boxes
"""
text_recs=np.zeros((len(bboxes), 8), np.int)
index = 0
for box in bboxes:
b1 = box[6] - box[7] / 2
b2 = box[6] + box[7] / 2
x1 = box[0]
y1 = box[5] * box[0] + b1
x2 = box[2]
y2 = box[5] * box[2] + b1
x3 = box[0]
y3 = box[5] * box[0] + b2
x4 = box[2]
y4 = box[5] * box[2] + b2
disX = x2 - x1
disY = y2 - y1
width = np.sqrt(disX*disX + disY*disY)
fTmp0 = y3 - y1
fTmp1 = fTmp0 * disY / width
x = np.fabs(fTmp1*disX / width)
y = np.fabs(fTmp1*disY / width)
if box[5] < 0:
x1 -= x
y1 += y
x4 += x
y4 -= y
else:
x2 += x
y2 += y
x3 -= x
y3 -= y
text_recs[index, 0] = x1
text_recs[index, 1] = y1
text_recs[index, 2] = x2
text_recs[index, 3] = y2
text_recs[index, 4] = x3
text_recs[index, 5] = y3
text_recs[index, 6] = x4
text_recs[index, 7] = y4
index = index + 1
return text_recs
def union_rbox(result,alpha=0.1):
"""
按行合并box
"""
def diff(box1,box2):
"""
计算box1,box2之间的距离
"""
cy1 = box1['cy']
cy2 = box2['cy']
h1 = box1['h']
h2 = box2['h']
return abs(cy1-cy2)/max(0.01,min(h1/2,h2/2))
def sort_group_box(boxes):
"""
对box进行排序, 并合并box
"""
N = len(boxes)
boxes = sorted(boxes,key=lambda x:x['cx'])
text = ' '.join([bx['text'] for bx in boxes])
box4 = np.zeros((N,8))
for i in range(N):
cx =boxes[i]['cx']
cy = boxes[i]['cy']
degree =boxes[i]['degree']
w = boxes[i]['w']
h = boxes[i]['h']
x1,y1,x2,y2,x3,y3,x4,y4 = xy_rotate_box(cx, cy, w, h, degree/180*np.pi)
box4[i] = [x1,y1,x2,y2,x3,y3,x4,y4]
x1 = box4[:,0].min()
y1 = box4[:,1].min()
x2 = box4[:,2].max()
y2 = box4[:,3].min()
x3 = box4[:,4].max()
y3 = box4[:,5].max()
x4 = box4[:,6].min()
y4 = box4[:,7].max()
angle,w,h,cx,cy = solve([x1,y1,x2,y2,x3,y3,x4,y4])
return {'text':text,'cx':cx,'cy':cy,'w':w,'h':h,'degree':angle/np.pi*180}
newBox = []
for line in result:
if len(newBox)==0:
newBox.append([line])
else:
check=False
for box in newBox[-1]:
if diff(line,box)>alpha:
check = True
if not check:
newBox[-1].append(line)
else:
newBox.append([line])
newBox = [sort_group_box(bx) for bx in newBox]
return newBox
def adjust_box_to_origin(img,angle, result):
"""
调整box到原图坐标
"""
h,w = img.shape[:2]
if angle in [90,270]:
imgW,imgH = img.shape[:2]
else:
imgH,imgW= img.shape[:2]
newresult = []
for line in result:
cx =line['box']['cx']
cy = line['box']['cy']
degree =line['box']['angle']
w = line['box']['w']
h = line['box']['h']
x1,y1,x2,y2,x3,y3,x4,y4 = xy_rotate_box(cx, cy, w, h, degree/180*np.pi)
x1,y1,x2,y2,x3,y3,x4,y4 = box_rotate([x1,y1,x2,y2,x3,y3,x4,y4],angle=(360-angle)%360,imgH=imgH,imgW=imgW)
box = x1,y1,x2,y2,x3,y3,x4,y4
newresult.append({'name':line['name'],'text':line['text'],'box':box})
return newresult | 2.640625 | 3 |
opendatatools/common/ui_util.py | harveywwu/OpenData | 0 | 7075 | # -*- coding: UTF-8 -*-
import sys, time
class ShowProcess():
"""
显示处理进度的类
调用该类相关函数即可实现处理进度的显示
"""
i = 0 # 当前的处理进度
max_steps = 0 # 总共需要处理的次数
max_arrow = 50 #进度条的长度
infoDone = 'done'
# 初始化函数,需要知道总共的处理次数
def __init__(self, max_steps, infoDone = 'Done'):
self.max_steps = max_steps
self.i = 0
self.infoDone = infoDone
# 显示函数,根据当前的处理进度i显示进度
# 效果为[>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>]100.00%
def show_process(self, i=None):
if i is not None:
self.i = i
else:
self.i += 1
num_arrow = int(self.i * self.max_arrow / self.max_steps) #计算显示多少个'>'
num_line = self.max_arrow - num_arrow #计算显示多少个'-'
percent = self.i * 100.0 / self.max_steps #计算完成进度,格式为xx.xx%
process_bar = '[' + '>' * num_arrow + '-' * num_line + ']'\
+ '%.2f' % percent + '%' + '\r' #带输出的字符串,'\r'表示不换行回到最左边
print process_bar #打印字符到终端
if self.i >= self.max_steps:
self.close()
def close(self):
print('')
print(self.infoDone)
self.i = 0
if __name__=='__main__':
max_steps = 100
process_bar = ShowProcess(max_steps, 'OK')
for i in range(max_steps):
process_bar.show_process()
time.sleep(0.1) | 3.484375 | 3 |
data_structure/stack_and_queue/494. Target Sum_ Medium.py | JunzhongLin/leetcode_practice | 0 | 7076 | <filename>data_structure/stack_and_queue/494. Target Sum_ Medium.py
'''
You are given an integer array nums and an integer target.
You want to build an expression out of nums by adding one of the symbols '+' and '-' before each integer in nums and then concatenate all the integers.
For example, if nums = [2, 1], you can add a '+' before 2 and a '-' before 1 and concatenate them to build the expression "+2-1".
Return the number of different expressions that you can build, which evaluates to target.
'''
from collections import defaultdict
class Solution:
def findTargetSumWays(self, nums, target) -> int:
count = 0
target_depth = len(nums) - 1
stack = [(0, -1, 0)]
cache = defaultdict(int)
while stack:
# print(stack)
# count += 1
# if count == 10:
# break
curr_sum, depth, visited = stack.pop()
if visited:
if depth == target_depth:
if curr_sum == target:
cache[(curr_sum, depth, visited)] = 1
else:
l = cache[(curr_sum + nums[depth + 1], depth + 1, 1)]
r = cache[(curr_sum - nums[depth + 1], depth + 1, 1)]
cache[(curr_sum, depth, visited)] = l + r
continue
else:
if (curr_sum, depth, 1) in cache:
continue
stack.append((curr_sum, depth, 1))
if depth < target_depth:
stack.append((curr_sum + nums[depth + 1], depth + 1, 0))
stack.append((curr_sum - nums[depth + 1], depth + 1, 0))
return cache[(0, -1, 1)]
input_val, target = [1,1,1,1,1], 3
res = Solution().findTargetSumWays(input_val, target) | 3.703125 | 4 |
fixtrack/frontend/pickable_markers.py | os-gabe/fixtrack | 0 | 7077 | import numpy as np
from fixtrack.frontend.pickable_base import PickableBase
from vispy import scene
class PickableMarkers(PickableBase):
"""
Markers that can highlight on hover and be selected
"""
class State(PickableBase.State):
def __init__(self, **kwargs):
super(PickableMarkers.State, self).__init__(**kwargs)
self.sizes_raw = None
self.sizes = None
class Config(PickableBase.Config):
def __init__(self, select_scale=1.0, hover_scale=1.0, **kwargs):
super(PickableMarkers.Config, self).__init__(**kwargs)
self.select_scale = select_scale
self.hover_scale = hover_scale
_kwargs_ignore = ["size", "color_select", "color_hover"]
def __init__(self, parent=None, data=np.zeros((0, 3)), select_scale=2.0, **kwargs):
super(PickableMarkers, self).__init__(
scene.visuals.Markers(pos=data, parent=parent), data=data, parent=parent, **kwargs
)
self.visual.set_gl_state("translucent", depth_test=False, blend=True)
self._cfg.select_scale = select_scale
self._cfg.hover_scale = select_scale * 1.15
self.multi_sel = None
@property
def marker_size(self):
return self._cfg.vis_args["size"]
@marker_size.setter
def marker_size(self, s):
self._cfg.vis_args["size"] = max(1, s)
self._init_data()
self.set_data()
def _selected_idxs(self):
sel = []
if self.multi_sel is None:
if self._state.idx_selected >= 0:
sel = [self._state.idx_selected]
else:
sel = self.multi_sel
return sel
def _init_data(self):
super(PickableMarkers, self)._init_data()
n = len(self._state.data)
self._state.sizes_raw = np.full((n, ), self._cfg.vis_args["size"])
self._state.sizes = self._state.sizes_raw.copy()
def _highlight(self):
self._state.sizes = self._state.sizes_raw.copy()
super(PickableMarkers, self)._highlight()
def _highlight_selected(self):
super(PickableMarkers, self)._highlight_selected()
cfg = self._cfg
state = self._state
if (state.idx_selected >= 0) and cfg.pickable:
state.sizes[self._selected_idxs()] = cfg.vis_args["size"] * cfg.select_scale
def _highlight_hovered(self):
super(PickableMarkers, self)._highlight_hovered()
cfg = self._cfg
state = self._state
if (state.idx_hover >= 0) and cfg.hoverable:
state.sizes[self._hover_idxs()] = cfg.vis_args["size"] * cfg.hover_scale
def _set_data(self):
if len(self._state.data) > 0:
kwargs = {
k: v
for k, v in self._cfg.vis_args.items() if k not in self._kwargs_ignore
}
self._state.edge_colors[:, 3] = self._state.colors[:, 3]
self.visual.set_data(
pos=self._state.data,
size=self._state.sizes,
face_color=self._state.colors,
edge_color=self._state.edge_colors,
edge_width=3,
**kwargs
)
else:
self.visual.set_data(np.zeros((0, 3)))
def _set_data_false(self):
if len(self._state.data) > 0:
colors = self._pa.unique_colors(id(self)) / 255.0
colors[self._state.colors[:, 3] < 1.0e-3] = 0.0
self.visual.set_data(
pos=self._state.data,
size=self._state.sizes,
face_color=colors,
edge_color=colors,
edge_width=0,
)
else:
self.visual.set_data(np.zeros((0, 3)))
| 2.390625 | 2 |
examples/blocking_subscribe.py | FFY00/jeepney | 0 | 7078 | <reponame>FFY00/jeepney
"""
Example of subscribing to a D-Bus signal using blocking I/O.
This subscribes to the signal for a desktop notification being closed.
To try it, start this script, then trigger a desktop notification, and close it
somehow to trigger the signal. Use Ctrl-C to stop the script.
This example relies on the ``org.freedesktop.Notifications.NotificationClosed``
signal; some desktops may not support it. See the notification spec for more
details:
https://people.gnome.org/~mccann/docs/notification-spec/notification-spec-latest.html
Match rules are defined in the D-Bus specification:
https://dbus.freedesktop.org/doc/dbus-specification.html#message-bus-routing-match-rules
"""
from jeepney.bus_messages import MatchRule, message_bus
from jeepney.integrate.blocking import connect_and_authenticate, Proxy
from jeepney.wrappers import DBusAddress
noti = DBusAddress('/org/freedesktop/Notifications',
bus_name='org.freedesktop.Notifications',
interface='org.freedesktop.Notifications')
connection = connect_and_authenticate(bus="SESSION")
match_rule = MatchRule(
type="signal",
sender=noti.bus_name,
interface=noti.interface,
member="NotificationClosed",
path=noti.object_path,
)
# This defines messages for talking to the D-Bus bus daemon itself:
session_bus = Proxy(message_bus, connection)
# Tell the session bus to pass us matching signal messages:
print("Match added?", session_bus.AddMatch(match_rule) == ())
reasons = {1: 'expiry', 2: 'dismissal', 3: 'dbus', '4': 'undefined'}
def notification_closed(data):
"""Callback for when we receive a notification closed signal"""
nid, reason_no = data
reason = reasons.get(reason_no, 'unknown')
print('Notification {} closed by: {}'.format(nid, reason))
# Connect the callback to the relevant signal
connection.router.subscribe_signal(
callback=notification_closed,
path=noti.object_path,
interface=noti.interface,
member="NotificationClosed"
)
# Using dbus-send or d-feet or blocking_notify.py, send a notification and
# manually close it or call ``.CloseNotification`` after a beat.
try:
while True:
connection.recv_messages()
except KeyboardInterrupt:
pass
connection.close()
| 2.609375 | 3 |
test.py | league3236/shholiday | 0 | 7079 | <reponame>league3236/shholiday<gh_stars>0
from shholiday import holiday2020 as hd
daytuple = (1,1)
nowholiday = hd.holiday2020()
print(nowholiday.is_holiday(daytuple)) | 2.390625 | 2 |
blog/migrations/0005_title_null.py | encukou/Zpetnovazebnik | 1 | 7080 | # Generated by Django 2.1.7 on 2019-02-27 14:23
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('blog', '0004_longer_password'),
]
operations = [
migrations.AlterField(
model_name='session',
name='title',
field=models.CharField(blank=True, max_length=200, null=True),
),
]
| 1.5625 | 2 |
setup.py | Kuba77/Xian-DB | 1 | 7081 | from setuptools import setup
from codecs import open
from os import path
here = path.abspath(path.dirname(__file__))
with open(path.join(here, 'README.rst'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='xiandb',
version='0.2.0',
description='A database model for Xian',
long_description=long_description,
url='https://github.com/Kuba77/Xian-DB',
author='<NAME>',
author_email='<EMAIL>',
license='MIT',
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: XIAN Collaborators',
'Topic :: Software Development :: Database',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 2.7'
],
keywords='xian database db',
packages=['xiandb', 'xiandb.models'],
install_requires=['mongokat', 'pyyaml', 'bcrypt'],
extras_require={}
)
| 1.359375 | 1 |
yolo3/focal_loss.py | ashishpatel26/tf2-yolo3 | 43 | 7082 | from functools import partial
import tensorflow as tf
_EPSILON = tf.keras.backend.epsilon()
def register_keras_custom_object(cls):
tf.keras.utils.get_custom_objects()[cls.__name__] = cls
return cls
def binary_focal_loss(y_true, y_pred, gamma, *, pos_weight=None, from_logits=False, label_smoothing=None):
y_pred = tf.convert_to_tensor(y_pred)
if not y_pred.dtype.is_floating:
y_pred = tf.dtypes.cast(y_pred, dtype=tf.float32)
if from_logits:
return _binary_focal_loss_from_logits(labels=y_true,
logits=y_pred,
gamma=gamma,
pos_weight=pos_weight,
label_smoothing=label_smoothing)
else:
return _binary_focal_loss_from_probs(labels=y_true,
p=y_pred,
gamma=gamma,
pos_weight=pos_weight,
label_smoothing=label_smoothing)
@register_keras_custom_object
class BinaryFocalLoss(tf.keras.losses.Loss):
def __init__(self, gamma, *, pos_weight=None, from_logits=False, label_smoothing=None, **kwargs):
super().__init__(**kwargs)
self.gamma = gamma
self.pos_weight = pos_weight
self.from_logits = from_logits
self.label_smoothing = label_smoothing
def get_config(self):
config = super().get_config()
config.update(gamma=self.gamma,
pos_weight=self.pos_weight,
from_logits=self.from_logits,
label_smoothing=self.label_smoothing)
return config
def call(self, y_true, y_pred):
return binary_focal_loss(y_true=y_true,
y_pred=y_pred,
gamma=self.gamma,
pos_weight=self.pos_weight,
from_logits=self.from_logits,
label_smoothing=self.label_smoothing)
# Helper functions below
def _process_labels(labels, label_smoothing, dtype):
labels = tf.dtypes.cast(labels, dtype=dtype)
if label_smoothing is not None:
labels = (1 - label_smoothing) * labels + label_smoothing * 0.5
return labels
def _binary_focal_loss_from_logits(labels, logits, gamma, pos_weight, label_smoothing):
labels = _process_labels(labels=labels, label_smoothing=label_smoothing, dtype=logits.dtype)
# Compute probabilities for the positive class
p = tf.math.sigmoid(logits)
if label_smoothing is None:
labels_shape = labels.shape
logits_shape = logits.shape
if not labels_shape.is_fully_defined() or labels_shape != logits_shape:
labels_shape = tf.shape(labels)
logits_shape = tf.shape(logits)
shape = tf.broadcast_dynamic_shape(labels_shape, logits_shape)
labels = tf.broadcast_to(labels, shape)
logits = tf.broadcast_to(logits, shape)
if pos_weight is None:
loss_func = tf.nn.sigmoid_cross_entropy_with_logits
else:
loss_func = partial(tf.nn.weighted_cross_entropy_with_logits, pos_weight=pos_weight)
loss = loss_func(labels=labels, logits=logits)
modulation_pos = (1 - p)**gamma
modulation_neg = p**gamma
mask = tf.dtypes.cast(labels, dtype=tf.bool)
modulation = tf.where(mask, modulation_pos, modulation_neg)
return modulation * loss
# Terms for the positive and negative class components of the loss
pos_term = labels * ((1 - p)**gamma)
neg_term = (1 - labels) * (p**gamma)
# Term involving the log and ReLU
log_weight = pos_term
if pos_weight is not None:
log_weight *= pos_weight
log_weight += neg_term
log_term = tf.math.log1p(tf.math.exp(-tf.math.abs(logits)))
log_term += tf.nn.relu(-logits)
log_term *= log_weight
# Combine all the terms into the loss
loss = neg_term * logits + log_term
return loss
def _binary_focal_loss_from_probs(labels, p, gamma, pos_weight, label_smoothing):
q = 1 - p
# For numerical stability (so we don't inadvertently take the log of 0)
p = tf.math.maximum(p, _EPSILON)
q = tf.math.maximum(q, _EPSILON)
# Loss for the positive examples
pos_loss = -(q**gamma) * tf.math.log(p)
if pos_weight is not None:
pos_loss *= pos_weight
# Loss for the negative examples
neg_loss = -(p**gamma) * tf.math.log(q)
# Combine loss terms
if label_smoothing is None:
labels = tf.dtypes.cast(labels, dtype=tf.bool)
loss = tf.where(labels, pos_loss, neg_loss)
else:
labels = _process_labels(labels=labels, label_smoothing=label_smoothing, dtype=p.dtype)
loss = labels * pos_loss + (1 - labels) * neg_loss
return loss | 2.21875 | 2 |
characters/models/characters.py | Sult/evetool | 0 | 7083 | import time
from collections import OrderedDict
from datetime import datetime, timedelta
from django.db import models
from django.conf import settings
from django.utils.timezone import utc
from .skills import Skill, SkillGroup
from metrics.models import Corporation
from tasks.models import EveApiCache, Task
from evetool.storage import OverwriteStorage
import utils
class CharacterApi(models.Model):
""" charactertype apis """
api = models.ForeignKey("apis.Api")
characterid = models.BigIntegerField()
charactername = models.CharField(max_length=254)
corporationid = models.BigIntegerField()
corporationname = models.CharField(max_length=254)
def __unicode__(self):
return self.charactername
#get right icon for characters view
def view_icon(self):
try:
icon = self.characterapiicon_set.get(size=128, relation=self)
return icon.icon
except CharacterApiIcon.DoesNotExist:
return None
#def character sheet image
def sheet_icon(self):
try:
icon = self.characterapiicon_set.get(size=200, relation=self)
return icon.icon
except CharacterApiIcon.DoesNotExist:
return None
def current_balance(self):
if self.api.access_to("CharacterInfo"):
sheet = utils.connection.api_request(
"CharacterInfoAuth", obj=self
)
if sheet.accountBalance:
return round(float(sheet.accountBalance), 2)
return 0
def sheet_cache_key(self):
key = "CharacterInfo"
category = EveApiCache.EVE
kwargs = {"characterID": self.characterid}
if self.api.access_to("CharacterInfo"):
return utils.connection.generate_cache_key(
category, key, api=self.api, **kwargs
)
else:
return utils.connection.generate_cache_key(category, key)
def sheet_set_cache_job(self):
key = "CharacterInfo"
category = EveApiCache.EVE
kwargs = {"characterID": self.characterid}
if self.api.access_to("CharacterInfo"):
api = self.api
else:
api = None
EveApiCache.objects.create(
priority=Task.VERY_HIGH,
api=api,
category=category,
key=key,
kwargs=kwargs,
)
#get the data for landing page after character selection
def character_sheet(self):
sheet = utils.connection.get_cache(self.sheet_cache_key())
employment = self.employment_history(sheet)
return sheet, employment
#employment history of a player
@staticmethod
def employment_history(sheet):
cache_key = "employment_history_%d" % int(sheet.characterID)
#result = utils.connection.get_cache(cache_key)
result = None
if not result:
cache_timer = 60 * 60
result = []
for corp_data in sheet.employmentHistory:
result.append({
"corporation": Corporation.find_corporation(
corp_data.corporationID
),
"startdate": utils.common.convert_timestamp(
corp_data.startDate
)
})
utils.connection.set_cache(cache_key, result, cache_timer)
return result
#get skill in training
def skill_in_training(self):
training_skill = None
if self.api.access_to("SkillInTraining"):
in_training = utils.connection.api_request(
"SkillInTraining", obj=self
)
try:
training_skill = {
"skill": Skill.objects.get(
typeid=int(in_training.trainingTypeID)
).typename,
"to_level": int(in_training.trainingToLevel),
"finnished": utils.common.convert_timestamp(
in_training.trainingEndTime
)
}
except AttributeError:
training_skill = {"skill": "No skill in training"}
return training_skill
#characters trained skills
def trained_skills(self):
cache_key = "trained_skills_%d" % self.pk
result = utils.connection.get_cache(cache_key)
if not result:
cache_timer = 60 * 5
sheet = utils.connection.api_request("CharacterSheet", obj=self)
groups = SkillGroup.objects.exclude(
groupname="Fake Skills"
).order_by("groupname")
skills = Skill.objects.order_by("typename")
all_skills = OrderedDict()
skillpoints = {}
for group in groups:
all_skills[group.groupname] = list()
skillpoints[group.groupname] = 0
for skill in skills:
trained = sheet.skills.Get(skill.typeid, False)
if trained:
all_skills[skill.skillgroup.groupname].append(
{
"skill": skill,
"level": int(trained.level)
}
)
skillpoints[skill.skillgroup.groupname] += \
trained.skillpoints
result = {
"all_skills": all_skills,
"skillpoints": skillpoints,
}
utils.connection.set_cache(cache_key, result, cache_timer)
return result
#get skillqueue
def skill_queue(self):
queue = None
if self.api.access_to("SkillQueue"):
queue = {}
skills = utils.connection.api_request(
"SkillQueue", obj=self
).skillqueue
queue["skills"] = skills
queue["total"] = self.total_skillpoints(skills)
now = datetime.now().replace(tzinfo=utc)
try:
trainingtime = utils.common.convert_timestamp(
skills[-1].endTime
) - now
trainingtime -= timedelta(
microseconds=trainingtime.microseconds
)
queue["trainingtime"] = trainingtime
except TypeError:
pass
return queue
#get total skillpoints for skills in queue
@staticmethod
def total_skillpoints(skills):
total = 0
for skill in skills:
total += int(skill.endSP - skill.startSP)
return total
#walletjournal
def wallet_journal(self):
cache_key = "walletjournal_character_%d" % self.pk
result = utils.connection.get_cache(cache_key)
if not result:
self.update_journal()
cache_timer = 60 * 10
utils.connection.set_cache(cache_key, True, cache_timer)
return CharacterJournal.objects.filter(characterapi=self)
#updates journal to current moment
def update_journal(self):
fromid = 0
transactions = utils.connection.api_request(
"WalletJournal", obj=self, rowcount=2500
).transactions
while True:
for trans in transactions:
date = utils.common.convert_timestamp(trans.date)
#check for duplicate
if CharacterJournal.objects.filter(
characterapi=self,
balance=trans.balance,
date=date,
).exists():
continue
else:
CharacterJournal.create_entry(self, trans)
if int(trans.refID) < fromid or fromid == 0:
fromid = int(trans.refID)
if len(transactions) < 2500:
break
else:
time.sleep(1)
transactions = utils.connection.api_request(
"WalletJournal", obj=self, rowcount=2500, fromid=fromid
).transactions
class CharacterApiIcon(models.Model):
""" images related to characters """
relation = models.ForeignKey("characters.CharacterApi")
size = models.IntegerField(choices=settings.IMAGE_SIZES)
typeid = models.IntegerField()
icon = models.ImageField(
upload_to="images/characters/",
storage=OverwriteStorage(),
blank=True,
null=True
)
class Meta:
unique_together = ["size", "relation"]
def __unicode__(self):
return "Character Image %s" % self.relation.charactername
# def save(self, *args, **kwargs):
# try:
# temp = CharacterApiIcon.objects.get(pk=self.pk)
# if temp.icon != self.icon:
# temp.icon.delete()
# except ObjectDoesNotExist:
# pass
# super(CharacterApiIcon, self).save(*args, **kwargs)
#get list of wanted character icon sizes
@staticmethod
def icon_sizes():
return [128, 200]
class Transaction(models.Model):
reftypeid = models.SmallIntegerField()
ownername1 = models.CharField(max_length=254)
ownerid1 = models.IntegerField()
ownername2 = models.CharField(max_length=254)
ownerid2 = models.IntegerField()
argname1 = models.CharField(max_length=254)
argid1 = models.IntegerField()
amount = models.FloatField(null=True)
reason = models.TextField(blank=True)
taxreceiverid = models.IntegerField(null=True)
taxamount = models.FloatField(null=True)
class Meta:
abstract = True
class CharacterJournal(Transaction):
"""
Wallet transcations of a player. Saved to database so data can
be filtered, and metadata can be created.
Like balance graphs, see how much you paid in taxes and more.
"""
characterapi = models.ForeignKey(CharacterApi)
date = models.DateTimeField()
balance = models.FloatField()
class Meta:
unique_together = ["characterapi", "date", "balance"]
ordering = ["-date", "-reftypeid"]
def __unicode__(self):
return "%s's transaction" % self.characterapi.charactername
@staticmethod
def create_entry(characterapi, transaction):
if transaction.taxReceiverID == "":
taxreceiverid = None
else:
taxreceiverid = int(transaction.taxReceiverID)
if transaction.taxAmount == "":
taxamount = None
else:
taxamount = round(float(transaction.taxAmount), 2)
date = utils.common.convert_timestamp(transaction.date)
CharacterJournal.objects.create(
characterapi=characterapi,
date=date,
balance=round(float(transaction.balance), 2),
reftypeid=int(transaction.refTypeID),
ownername1=str(transaction.ownerName1),
ownerid1=int(transaction.ownerID1),
ownername2=str(transaction.ownerName2),
ownerid2=int(transaction.ownerID2),
argname1=str(transaction.argName1),
argid1=int(transaction.argID1),
amount=round(float(transaction.amount), 2),
reason=str(transaction.reason),
taxreceiverid=taxreceiverid,
taxamount=taxamount,
)
@staticmethod
def monthly_balance(characterapi):
last_restart = utils.common.last_server_restart()
days = last_restart - timedelta(days=31)
entries = CharacterJournal.objects.filter(
characterapi=characterapi,
date__range=[days, last_restart]
)
balance = []
for days in range(31):
first = entries.first()
date = (last_restart - timedelta(days=days))
#make timestamp in miliseconds
timestamp = int(time.mktime(date.timetuple()) * 1000)
if first:
isk = first.balance
else:
try:
isk = balance[-1][1]
except IndexError:
isk = characterapi.current_balance()
balance.append([timestamp, isk])
entries = entries.filter(date__lt=(date - timedelta(days=1)))
#return reversed list
return balance[::-1]
@staticmethod
def weekly_balance(characterapi):
now = datetime.now().replace(tzinfo=utc)
entries = CharacterJournal.objects.filter(
characterapi=characterapi,
date__range=[
now.replace(hour=23, minute=59, second=0) - timedelta(days=9),
now
]
)
balance = []
for days in range(8):
date = now.replace(
hour=0, minute=0, second=0
) - timedelta(days=days)
day_entries = entries.filter(
date__lt=now.replace(
hour=23, minute=59, second=59
) - timedelta(days=days),
date__gt=date
)
if not day_entries.count() > 0:
try:
isk = balance[-1][1]
except IndexError:
isk = characterapi.current_balance()
timestamp = int(time.mktime(date.timetuple()) * 1000)
balance.append([timestamp, isk])
else:
for entry in day_entries:
timestamp = int(time.mktime(entry.date.timetuple()) * 1000)
balance.append([timestamp, entry.balance])
#add last value for date on xaxis
date = now.replace(hour=23, minute=59, second=59) - timedelta(days=8)
isk = balance[-1][1]
timestamp = int(time.mktime(date.timetuple()) * 1000)
balance.append([timestamp, isk])
return balance[::-1]
| 2.03125 | 2 |
webex_assistant_sdk/templates/mindmeld_template/{{cookiecutter.skill_name}}/{{cookiecutter.skill_name}}/__init__.py | sachanacar/webex-assistant-sdk | 0 | 7084 | # -*- coding: utf-8 -*-
from {{cookiecutter.skill_name}}.root import app
__all__ = ['app']
| 1.007813 | 1 |
backend/api/v1/dialogs/urls.py | donicrazy/ChatApp | 0 | 7085 | from django.urls import path
from backend.api.v1.dialogs.views import (
DialogListCreateView,
DialogRetrieveUpdateDestroyAPIView,
DialogMembershipListCreateView,
DialogMessageListCreateView,
DialogMessageRetrieveUpdateDestroyAPIView,
)
urlpatterns = [
path('', DialogListCreateView.as_view()),
path('<int:pk>', DialogRetrieveUpdateDestroyAPIView.as_view()),
path('membership/', DialogMembershipListCreateView.as_view()),
path('messages/', DialogMessageListCreateView.as_view()),
path('messages/<int:pk>', DialogMessageRetrieveUpdateDestroyAPIView.as_view()),
]
| 1.804688 | 2 |
biomaj2galaxy/commands/init.py | genouest/biomaj2galaxy | 1 | 7086 | <filename>biomaj2galaxy/commands/init.py
# coding: utf-8
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from bioblend import galaxy
from biomaj2galaxy import config, pass_context
from biomaj2galaxy.io import info, warn
import click
CONFIG_TEMPLATE = """## BioMAJ2Galaxy: Global Configuration File.
# Each stanza should contain a single Galaxy server to interact with.
#
# You can set the key __default to the name of a default instance
__default: local
local:
url: "%(url)s"
apikey: "%(apikey)s"
"""
SUCCESS_MESSAGE = (
"Ready to go! Type `biomaj2galaxy` to get a list of commands you can execute."
)
@click.command()
@pass_context
def init(ctx, url=None, api_key=None, admin=False, **kwds):
"""Help initialize global configuration (in home directory)
"""
click.echo("""Welcome to BioMAJ2Galaxy""")
if os.path.exists(config.global_config_path()):
info("Your biomaj2galaxy configuration already exists. Please edit it instead: %s" % config.global_config_path())
return 0
while True:
# Check environment
url = click.prompt("url")
apikey = click.prompt("apikey")
info("Testing connection...")
try:
instance = galaxy.GalaxyInstance(url=url, key=apikey)
instance.libraries.get_libraries()
# We do a connection test during startup.
info("Ok! Everything looks good.")
break
except Exception as e:
warn("Error, we could not access the configuration data for your instance: %s", e)
should_break = click.prompt("Continue despite inability to contact this instance? [y/n]")
if should_break in ('Y', 'y'):
break
config_path = config.global_config_path()
if os.path.exists(config_path):
warn("File %s already exists, refusing to overwrite." % config_path)
return -1
with open(config_path, "w") as f:
f.write(CONFIG_TEMPLATE % {
'url': url,
'apikey': apikey,
})
info(SUCCESS_MESSAGE)
| 2.4375 | 2 |
datamart/tests/test_Dimension.py | josemrsantos/zoopla_datamart | 1 | 7087 | <filename>datamart/tests/test_Dimension.py
from ..datamart import *
def test_create_dimension():
dimension = Dimension("test_dimension")
assert dimension.is_degenerate == False
def test_create_dimension_insert_2_identical_lines():
''' with 2 identical lines, only one gets stored
'''
dimension = Dimension("test_dimension")
dimension.addDimensionLine('test')
dimension.addDimensionLine('test')
assert dimension.id_value == 1
assert len(list(dimension.values)) == 1
def test_create_dimension_insert_2_identical_lines_and_1_different():
''' with 2 identical lines and one different, only 2 get stored
'''
dimension = Dimension("test_dimension")
dimension.addDimensionLine('test')
dimension.addDimensionLine('test2')
dimension.addDimensionLine('test')
assert dimension.id_value == 2
assert len(list(dimension.values)) == 2
| 2.484375 | 2 |
preprocessing/convert_formats/msmarco_doc_create_train_input.py | PranjaliJain/matchmaker | 97 | 7088 | #
# msmarco doc: create the train.tsv triples
# -------------------------------
import random
random.seed(42)
import argparse
import os
import sys
from tqdm import tqdm
sys.path.append(os.getcwd())
from matchmaker.evaluation.msmarco_eval import *
from collections import defaultdict
from matchmaker.dataloaders.bling_fire_tokenizer import BlingFireTokenizer
#
# config
#
parser = argparse.ArgumentParser()
parser.add_argument('--out-file', action='store', dest='out_file',
help='training output text file location', required=True)
parser.add_argument('--out-file-ids', action='store', dest='out_file_ids',
help='training output ids file location', required=True)
parser.add_argument('--candidate-file', action='store', dest='candidate_file',
help='trec ranking file location (lucene output)', required=True)
parser.add_argument('--collection-file', action='store', dest='collection_file',
help='collection.tsv location', required=True)
parser.add_argument('--query-file', action='store', dest='query_file',
help='query.tsv location', required=True)
parser.add_argument('--qrel', action='store', dest='qrel_file',
help='qrel location', required=True)
args = parser.parse_args()
max_triples = 10_000_000
max_doc_char_length = 150_000
max_doc_token_length = 10000
#
# load data
# -------------------------------
#
collection = {}
#collection_length = {}
tokenizer = BlingFireTokenizer()
with open(args.collection_file,"r",encoding="utf8") as collection_file:
for line in tqdm(collection_file):
ls = line.split("\t") # id<\t>text ....
_id = ls[0]
max_char_doc = ls[1].rstrip()[:max_doc_char_length]
collection[_id] = max_char_doc
#collection_length[_id] = len(tokenizer.tokenize(max_char_doc))
queries = {}
with open(args.query_file,"r",encoding="utf8") as query_file:
for line in tqdm(query_file):
ls = line.split("\t") # id<\t>text ....
_id = ls[0]
queries[_id] = ls[1].rstrip()
qrels = load_reference(args.qrel_file)
#
# produce output
# -------------------------------
#
triples = []
stats = defaultdict(int)
with open(args.candidate_file,"r",encoding="utf8") as candidate_file:
for line in tqdm(candidate_file):
#if random.random() <= 0.5: continue #skip some entries for faster processing
[topicid, _ , unjudged_docid, rank, _ , _ ] = line.split()
#if int(rank) <= 100:
# #if random.random() < 0.7: continue # skip 70% of candidates to speed up things...
# #else:
# stats['< 100 sampling count'] += 1
#else:
# if random.random() <= 0.9: continue # skip 90% of candidates assumong top1k -> same number of samples from 0-100 as 101 - 1000
# else:
# stats['> 100 sampling count'] += 1
if topicid not in queries or topicid not in qrels: # added: because we carved out the validation qrels from the train -> so there are some missing
stats['skipped'] += 1
continue
#assert topicid in qrels
assert unjudged_docid in collection
# Use topicid to get our positive_docid
positive_docid = random.choice(qrels[topicid])
assert positive_docid in collection
if unjudged_docid in qrels[topicid]:
stats['docid_collision'] += 1
continue
stats['kept'] += 1
#if collection_length[positive_docid] > max_doc_token_length and collection_length[unjudged_docid] > max_doc_token_length:
# stats['both_to_long'] += 1
# continue
#if collection_length[positive_docid] > max_doc_token_length:
# stats['pos_to_long'] += 1
# continue
#if collection_length[unjudged_docid] > max_doc_token_length:
# stats['unjuged_to_long'] += 1
# continue
triples.append((topicid,positive_docid,unjudged_docid))
# important: shuffle the train data
random.shuffle(triples)
with open(args.out_file,"w",encoding="utf8") as out_file_text ,\
open(args.out_file_ids,"w",encoding="utf8") as out_file_ids:
for i,(topicid, positive_docid, unjudged_docid) in tqdm(enumerate(triples)):
if i == max_triples:
break
if collection[positive_docid].strip() != "" and collection[unjudged_docid].strip() != "":
out_file_ids.write(str(topicid)+"\t"+positive_docid+"\t"+unjudged_docid+"\n")
out_file_text.write(queries[topicid]+"\t"+collection[positive_docid]+"\t"+collection[unjudged_docid]+"\n")
for key, val in stats.items():
print(f"{key}\t{val}") | 1.773438 | 2 |
tests/communities/test_reply.py | powerblossom/workcloud | 1 | 7089 | <reponame>powerblossom/workcloud
from core.response import Response
from communities.tests import TestCase
class ReplyPermissionTest(TestCase):
def setUp(self):
self.create_user(is_staff=True)
def test_permission_reply_all(self):
self.create_forum()
self.create_thread()
thread_id = self.thread.id
response = self.post(
'/api/communities/f/%d/reply/' % thread_id,
{
'name': 'tester',
'content': 'test'
}
)
assert (
response.status_code == Response.HTTP_201 and
self.data.get('thread').get('id') == thread_id and
self.data.get('reply_id') == 0 and
not self.data.get('user') and
self.data.get('name') == 'tester' and
self.data.get('content') == 'test' and
not self.data.get('is_deleted')
)
reply_id = self.data.get('id')
response = self.get(
'/api/communities/f/%d/replies/' % thread_id
)
assert (
response.status_code == Response.HTTP_200 and
len(self.data) == 1 and
self.data[0].get('name') == 'tester' and
self.data[0].get('content') == 'test'
)
response = self.patch(
'/api/communities/r/%d/' % reply_id,
{
'content': 'edit'
},
)
assert response.status_code == Response.HTTP_401
response = self.delete(
'/api/communities/r/%d/' % reply_id
)
assert response.status_code == Response.HTTP_401
response = self.patch(
'/api/communities/r/%d/' % reply_id,
{
'content': 'edit',
},
auth=True
)
assert response.status_code == Response.HTTP_200
response = self.delete(
'/api/communities/r/%d/' % reply_id,
auth=True
)
assert response.status_code == Response.HTTP_200
self.create_user(username='<EMAIL>')
response = self.patch(
'/api/communities/r/%d/' % reply_id,
{
'content': 'edit',
},
auth=True
)
assert response.status_code == Response.HTTP_404
response = self.delete(
'/api/communities/r/%d/' % reply_id,
auth=True
)
assert response.status_code == Response.HTTP_404
response = self.post(
'/api/communities/f/%d/reply/' % thread_id,
{
'name': 'tester',
'content': 'test'
},
auth=True
)
assert (
response.status_code == Response.HTTP_201 and
self.data.get('thread').get('id') == thread_id and
self.data.get('reply_id') == 0 and
self.data.get('user').get('id') == self.user.id and
self.data.get('content') == 'test' and
not self.data.get('is_deleted')
)
response = self.get(
'/api/communities/f/%d/replies/' % thread_id
)
assert (
response.status_code == Response.HTTP_200 and
len(self.data) == 2
)
def test_permission_reply_member(self):
option = self.create_option(
permission_reply='member'
)
self.create_forum(option=option)
self.create_thread()
thread_id = self.thread.id
response = self.post(
'/api/communities/f/%d/reply/' % thread_id,
{
'name': 'tester',
'content': 'test'
}
)
assert response.status_code == Response.HTTP_401
response = self.get(
'/api/communities/f/%d/replies/' % thread_id
)
assert response.status_code == Response.HTTP_200
self.create_user(username='<EMAIL>')
response = self.post(
'/api/communities/f/%d/reply/' % thread_id,
{
'content': 'test'
},
auth=True
)
reply_id = self.data.get('id')
assert (
response.status_code == Response.HTTP_201 and
self.data.get('content') == 'test' and
self.data.get('user').get('username') == self.user.username
)
response = self.patch(
'/api/communities/r/%d/' % reply_id,
{
'content': 'edit',
},
auth=True
)
assert (
response.status_code == Response.HTTP_200 and
self.data.get('content') == 'edit'
)
response = self.delete(
'/api/communities/r/%d/' % reply_id,
auth=True
)
assert response.status_code == Response.HTTP_200
def test_permission_reply_staff(self):
option = self.create_option(
permission_reply='staff'
)
self.create_forum(option=option)
self.create_thread()
thread_id = self.thread.id
response = self.post(
'/api/communities/f/%d/reply/' % thread_id,
{
'name': 'tester',
'content': 'test'
}
)
assert response.status_code == Response.HTTP_401
response = self.get(
'/api/communities/f/%d/replies/' % thread_id
)
assert response.status_code == Response.HTTP_200
response = self.post(
'/api/communities/f/%d/reply/' % thread_id,
{
'content': 'test'
},
auth=True
)
assert response.status_code == Response.HTTP_201
reply_id = self.data.get('id')
response = self.patch(
'/api/communities/r/%d/' % reply_id,
{
'content': 'edit',
},
auth=True
)
assert (
response.status_code == Response.HTTP_200 and
self.data.get('content') == 'edit'
)
response = self.delete(
'/api/communities/r/%d/' % reply_id,
auth=True
)
assert response.status_code == Response.HTTP_200
self.create_user(username='<EMAIL>')
response = self.post(
'/api/communities/f/%d/reply/' % thread_id,
{
'content': 'test'
},
auth=True
)
assert response.status_code == Response.HTTP_403
response = self.patch(
'/api/communities/r/%d/' % reply_id,
{
'content': 'edit',
},
auth=True
)
assert response.status_code == Response.HTTP_404
response = self.delete(
'/api/communities/r/%d/' % reply_id,
auth=True
)
assert response.status_code == Response.HTTP_404
def test_permission_thread_read_member(self):
option = self.create_option(
permission_read='member',
permission_reply='member'
)
self.create_forum(option=option)
self.create_thread()
thread_id = self.thread.id
response = self.get(
'/api/communities/f/%d/replies/' % thread_id
)
assert response.status_code == Response.HTTP_401
response = self.get(
'/api/communities/f/%d/replies/' % thread_id,
auth=True
)
assert response.status_code == Response.HTTP_200
self.create_user(username='<EMAIL>')
response = self.get(
'/api/communities/f/%d/replies/' % thread_id,
auth=True
)
assert response.status_code == Response.HTTP_200
def test_permission_thread_read_staff(self):
option = self.create_option(
permission_read='staff',
permission_reply='staff'
)
self.create_forum(option=option)
self.create_thread()
thread_id = self.thread.id
response = self.get(
'/api/communities/f/%d/replies/' % thread_id
)
assert response.status_code == Response.HTTP_401
response = self.get(
'/api/communities/f/%d/replies/' % thread_id,
auth=True
)
assert response.status_code == Response.HTTP_200
self.create_user(username='<EMAIL>')
response = self.get(
'/api/communities/f/%d/replies/' % thread_id,
auth=True
)
assert response.status_code == Response.HTTP_403
class ReplyModelTest(TestCase):
def setUp(self):
self.create_user(is_staff=True)
self.create_forum()
self.create_thread()
self.create_reply()
def test_nested_reply(self):
response = self.post(
'/api/communities/f/%d/reply/' % self.thread.id,
{
'content': 'test'
},
auth=True
)
assert (
response.status_code == Response.HTTP_201 and
self.data.get('reply_id') == 0
)
reply_id = self.data.get('id')
response = self.post(
'/api/communities/f/%d/reply/' % self.thread.id,
{
'reply_id': reply_id,
'content': 'test'
},
auth=True
)
assert (
response.status_code == Response.HTTP_201 and
self.data.get('reply_id') == reply_id
)
response = self.post(
'/api/communities/f/%d/reply/' % self.thread.id,
{
'reply_id': self.data.get('id'),
'content': 'test'
},
auth=True
)
assert (
response.status_code == Response.HTTP_201 and
self.data.get('reply_id') == reply_id
)
def test_reply_edit_delete(self):
response = self.patch(
'/api/communities/r/%d/' % self.reply.id,
{
'content': 'bow wow'
},
auth=True
)
assert (
response.status_code == Response.HTTP_200 and
self.data.get('content') == 'bow wow' and
self.data.get('reply_id') == 0 and
not self.data.get('name')
)
response = self.patch(
'/api/communities/r/%d/' % self.reply.id,
{
'reply_id': self.reply.id,
'name': 'dog',
'content': 'meow'
},
auth=True
)
assert (
response.status_code == Response.HTTP_200 and
self.data.get('content') == 'meow' and
self.data.get('reply_id') == 0 and
not self.data.get('name')
)
response = self.delete(
'/api/communities/r/%d/' % self.reply.id,
auth=True
)
assert response.status_code == Response.HTTP_200
self.get(
'/api/communities/f/%d/replies/' % self.thread.id,
auth=True
)
assert (
len(self.data) == 1 and
self.data[0].get('is_deleted')
)
def test_reply_to_invalid_id(self):
thread_id = int(self.thread.id) + 1
response = self.post(
'/api/communities/f/%d/reply/' % thread_id,
{
'content': 'test'
},
auth=True
)
assert response.status_code == Response.HTTP_404
reply_id = int(self.reply.id) + 1
response = self.post(
'/api/communities/f/%d/reply/' % thread_id,
{
'reply_id': reply_id,
'content': 'test'
},
auth=True
)
assert response.status_code == Response.HTTP_404
class ReplyListTest(TestCase):
def setUp(self):
self.create_user(is_staff=True)
self.create_forum()
self.create_thread()
def test_reply_list(self):
self.post(
'/api/communities/f/%d/reply/' % self.thread.id,
{
'content': '1'
},
auth=True
)
reply_id = self.data.get('id')
self.post(
'/api/communities/f/%d/reply/' % self.thread.id,
{
'content': '4'
},
auth=True
)
self.post(
'/api/communities/f/%d/reply/' % self.thread.id,
{
'reply_id': reply_id,
'content': '2'
},
auth=True
)
nested_reply_id = self.data.get('id')
self.post(
'/api/communities/f/%d/reply/' % self.thread.id,
{
'content': '5'
},
auth=True
)
self.post(
'/api/communities/f/%d/reply/' % self.thread.id,
{
'reply_id': nested_reply_id,
'content': '3'
},
auth=True
)
self.get(
'/api/communities/f/%d/replies/' % self.thread.id,
auth=True
)
assert (
len(self.data) == 5 and
self.data[0].get('content') == '1' and
self.data[0].get('reply_id') == 0 and
self.data[1].get('content') == '2' and
self.data[1].get('reply_id') == reply_id and
self.data[2].get('content') == '3' and
self.data[2].get('reply_id') == reply_id and
self.data[3].get('content') == '4' and
self.data[3].get('reply_id') == 0 and
self.data[4].get('content') == '5' and
self.data[4].get('reply_id') == 0
)
| 2.28125 | 2 |
examples/Word2Vec_AverageVectorsTuto.py | noiseux1523/Deep-Belief-Network | 1 | 7090 | <reponame>noiseux1523/Deep-Belief-Network
# Author: <NAME>
# Date: 8/6/2014
#
# This file contains code to accompany the Kaggle tutorial
# "Deep learning goes to the movies". The code in this file
# is for Parts 2 and 3 of the tutorial, which cover how to
# train a model using Word2Vec.
#
# *************************************** #
# ****** Read the two training sets and the test set
#
import pandas as pd
import os
from nltk.corpus import stopwords
import nltk.data
import logging
import numpy as np # Make sure that numpy is imported
from gensim.models import Word2Vec
from sklearn.ensemble import RandomForestClassifier
from KaggleWord2VecUtility import KaggleWord2VecUtility
# ****** Define functions to create average word vectors
#
def makeFeatureVec(words, model, num_features):
# Function to average all of the word vectors in a given
# paragraph
#
# Pre-initialize an empty numpy array (for speed)
featureVec = np.zeros((num_features,), dtype="float32")
#
nwords = 0.
#
# Index2word is a list that contains the names of the words in
# the model's vocabulary. Convert it to a set, for speed
index2word_set = set(model.wv.index2word)
#
# Loop over each word in the review and, if it is in the model's
# vocaublary, add its feature vector to the total
for word in words:
if word in index2word_set:
nwords = nwords + 1.
featureVec = np.add(featureVec, model[word])
#
# Divide the result by the number of words to get the average
featureVec = np.divide(featureVec, nwords)
return featureVec
def getAvgFeatureVecs(reviews, model, num_features):
# Given a set of reviews (each one a list of words), calculate
# the average feature vector for each one and return a 2D numpy array
#
# Initialize a counter
counter = 0.
#
# Preallocate a 2D numpy array, for speed
reviewFeatureVecs = np.zeros((len(reviews), num_features), dtype="float32")
#
# Loop through the reviews
for review in reviews:
#
# Print a status message every 1000th review
if counter % 1000. == 0.:
print "Review %d of %d" % (counter, len(reviews))
#
# Call the function (defined above) that makes average feature vectors
reviewFeatureVecs[int(counter)] = makeFeatureVec(review, model, \
num_features)
#
# Increment the counter
counter = counter + 1.
return reviewFeatureVecs
def getCleanReviews(reviews):
clean_reviews = []
for review in reviews["review"]:
clean_reviews.append(KaggleWord2VecUtility.review_to_wordlist(review, remove_stopwords=True))
return clean_reviews
if __name__ == '__main__':
# Read data from files
train = pd.read_csv(os.path.join(os.path.dirname(__file__), 'data', 'labeledTrainData.tsv'), header=0,
delimiter="\t", quoting=3)
test = pd.read_csv(os.path.join(os.path.dirname(__file__), 'data', 'testData.tsv'), header=0, delimiter="\t",
quoting=3)
unlabeled_train = pd.read_csv(os.path.join(os.path.dirname(__file__), 'data', "unlabeledTrainData.tsv"), header=0,
delimiter="\t", quoting=3)
# Verify the number of reviews that were read (100,000 in total)
print "Read %d labeled train reviews, %d labeled test reviews, " \
"and %d unlabeled reviews\n" % (train["review"].size,
test["review"].size, unlabeled_train["review"].size)
# Load the punkt tokenizer
tokenizer = nltk.data.load('tokenizers/punkt/english.pickle')
# ****** Split the labeled and unlabeled training sets into clean sentences
#
sentences = [] # Initialize an empty list of sentences
print "Parsing sentences from training set"
for review in train["review"]:
sentences += KaggleWord2VecUtility.review_to_sentences(review, tokenizer)
print "Parsing sentences from unlabeled set"
for review in unlabeled_train["review"]:
sentences += KaggleWord2VecUtility.review_to_sentences(review, tokenizer)
# ****** Set parameters and train the word2vec model
#
# Import the built-in logging module and configure it so that Word2Vec
# creates nice output messages
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', \
level=logging.INFO)
# Set values for various parameters
num_features = 300 # Word vector dimensionality
min_word_count = 40 # Minimum word count
num_workers = 4 # Number of threads to run in parallel
context = 10 # Context window size
downsampling = 1e-3 # Downsample setting for frequent words
# Initialize and train the model (this will take some time)
print "Training Word2Vec model..."
model = Word2Vec(sentences, workers=num_workers, \
size=num_features, min_count=min_word_count, \
window=context, sample=downsampling, seed=1)
# If you don't plan to train the model any further, calling
# init_sims will make the model much more memory-efficient.
model.init_sims(replace=True)
# It can be helpful to create a meaningful model name and
# save the model for later use. You can load it later using Word2Vec.load()
model_name = "300features_40minwords_10context"
model.save(model_name)
model.doesnt_match("man woman child kitchen".split())
model.doesnt_match("france england germany berlin".split())
model.doesnt_match("paris berlin london austria".split())
model.most_similar("man")
model.most_similar("queen")
model.most_similar("awful")
# ****** Create average vectors for the training and test sets
#
print "Creating average feature vecs for training reviews"
trainDataVecs = getAvgFeatureVecs(getCleanReviews(train), model, num_features)
print "Creating average feature vecs for test reviews"
testDataVecs = getAvgFeatureVecs(getCleanReviews(test), model, num_features)
# ****** Fit a random forest to the training set, then make predictions
#
# Fit a random forest to the training data, using 100 trees
forest = RandomForestClassifier(n_estimators=100)
print "Fitting a random forest to labeled training data..."
forest = forest.fit(trainDataVecs, train["sentiment"])
# Test & extract results
result = forest.predict(testDataVecs)
# Write the test results
output = pd.DataFrame(data={"id": test["id"], "sentiment": result})
output.to_csv("Word2Vec_AverageVectors.csv", index=False, quoting=3)
print "Wrote Word2Vec_AverageVectors.csv" | 3.5 | 4 |
src/phl_budget_data/etl/qcmr/positions/__init__.py | PhiladelphiaController/phl-budget-data | 0 | 7091 | from .core import FullTimePositions
| 0.972656 | 1 |
sdk/loganalytics/azure-mgmt-loganalytics/azure/mgmt/loganalytics/models/search_get_schema_response.py | tzhanl/azure-sdk-for-python | 1 | 7092 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class SearchGetSchemaResponse(Model):
"""The get schema operation response.
:param metadata: The metadata from search results.
:type metadata: ~azure.mgmt.loganalytics.models.SearchMetadata
:param value: The array of result values.
:type value: list[~azure.mgmt.loganalytics.models.SearchSchemaValue]
"""
_attribute_map = {
'metadata': {'key': 'metadata', 'type': 'SearchMetadata'},
'value': {'key': 'value', 'type': '[SearchSchemaValue]'},
}
def __init__(self, **kwargs):
super(SearchGetSchemaResponse, self).__init__(**kwargs)
self.metadata = kwargs.get('metadata', None)
self.value = kwargs.get('value', None)
| 1.992188 | 2 |
python/ds/spiralprint.py | unhingedporter/DataStructureMustKnow | 3 | 7093 | # Python3 program to print
# given matrix in spiral form
def spiralPrint(m, n, a):
start_row_index = 0
start_col_index = 0
l = 0
''' start_row_index - starting row index
m - ending row index
start_col_index - starting column index
n - ending column index
i - iterator '''
while (start_row_index < m and start_col_index < n):
# Print the first row from
# the remaining rows
for i in range(start_col_index, n):
print(a[start_row_index][i], end=" ")
start_row_index += 1
# Print the last column from
# the remaining columns
for i in range(start_row_index, m):
print(a[i][n - 1], end=" ")
n -= 1
# Print the last row from
# the remaining rows
if (start_row_index < m):
for i in range(n - 1, (start_col_index - 1), -1):
print(a[m - 1][i], end=" ")
m -= 1
# Print the first column from
# the remaining columns
if (start_col_index < n):
for i in range(m - 1, start_row_index - 1, -1):
print(a[i][start_col_index], end=" ")
start_col_index += 1
# Driver Code
a = [[1, 2, 3, 4, 5, 6],
[7, 8, 9, 10, 11, 12],
[13, 14, 15, 16, 17, 18]]
R = 3
C = 6
spiralPrint(R, C, a)
| 4.59375 | 5 |
rest-api/routers/authorization.py | marintrace/backend | 2 | 7094 | <gh_stars>1-10
"""
Authorization Utilities
"""
from shared.models.user_entities import User
from shared.service.jwt_auth_wrapper import JWTAuthManager
manager = JWTAuthManager(oidc_vault_secret="oidc/rest",
object_creator=lambda claims, assumed_role, user_roles: User(
first_name=claims["given_name"],
last_name=claims["family_name"],
school=assumed_role,
email=claims['email']
))
AUTH_USER = manager.auth_header()
| 1.882813 | 2 |
photonpy/tests/psf_g2d_sigma.py | qnano/photonpy | 5 | 7095 | import matplotlib.pyplot as plt
import numpy as np
from photonpy.cpp.context import Context
import photonpy.cpp.gaussian as gaussian
from photonpy.smlm.util import imshow_hstack
from photonpy.cpp.estimator import Estimator
def CheckDeriv(psf:Estimator, theta):
nderiv,ev=psf.NumDeriv(theta,eps=1e-6)
deriv,ev=psf.Derivatives(theta)
maxerr = np.max( np.abs(deriv-nderiv), (-1,-2) )
print(f"PSF {psf.ParamFormat()}, max {np.max(deriv)}, min: {np.min(deriv)}: Deriv-NumDeriv: {maxerr}")
plt.figure()
imshow_hstack(deriv[0] - nderiv[0])
with Context() as ctx:
g = gaussian.Gaussian(ctx)
for cuda in [False]:
print(f"CUDA = {cuda}")
sigma=2
roisize=12
psf = g.CreatePSF_XYIBg(roisize, sigma, cuda)
theta = [[4, 4, 1000, 3]]
img = psf.ExpectedValue(theta)
plt.figure()
plt.set_cmap('inferno')
smp = np.random.poisson(img)
plt.imshow(smp[0])
psf_sigma = g.CreatePSF_XYIBgSigma(roisize, sigma, cuda)
theta_s = [[4,4,1000,3,sigma]]
img2 = psf_sigma.ExpectedValue(theta_s)
CheckDeriv(psf, theta)
# CheckDeriv(psf_sigma)
print(f"PSF Sigma crlb: {psf_sigma.CRLB(theta_s)}")
theta = psf_sigma.Estimate(smp)[0]
print(theta)
| 2.1875 | 2 |
tests/tools_tests/helpers_tests.py | Gautierhyp/tespy | 0 | 7096 | # -*- coding: utf-8
"""Module for testing helper functions.
This file is part of project TESPy (github.com/oemof/tespy). It's copyrighted
by the contributors recorded in the version control history of the file,
available from its original location
tests/tools_tests/helpers_tests.py
SPDX-License-Identifier: MIT
"""
from nose.tools import eq_
from tespy.tools.helpers import newton
def func(params, x):
return x ** 2 + x - 20
def deriv(params, x):
return 2 * x + 1
def test_newton_bounds():
"""
Test newton algorithm value limit handling.
Try to calculate a zero crossing of a quadratic function in three
tries.
- zero crossing within limits, starting value near 4
- zero crossing within limits, starting value near -5
- zero crossing below minimum
- zero crossing above maximum
The function is x^2 + x - 20, there crossings are -5 and 4.
"""
result = newton(func, deriv, [], 0, valmin=-10, valmax=10, val0=0)
msg = ('The newton algorithm should find the zero crossing at 4.0. ' +
str(round(result, 1)) + ' was found instead.')
eq_(4.0, result, msg)
result = newton(func, deriv, [], 0, valmin=-10, valmax=10, val0=-10)
msg = ('The newton algorithm should find the zero crossing at -5.0. ' +
str(round(result, 1)) + ' was found instead.')
eq_(-5.0, result, msg)
result = newton(func, deriv, [], 0, valmin=-4, valmax=-2, val0=-3)
msg = ('The newton algorithm should not be able to find a zero crossing. '
'The value ' + str(round(result, 1)) + ' was found, but the '
'algorithm should have found the lower boundary of -4.0.')
eq_(-4.0, result, msg)
result = newton(func, deriv, [], 0, valmin=-20, valmax=-10, val0=-10)
msg = ('The newton algorithm should not be able to find a zero crossing. '
'The value ' + str(round(result, 1)) + ' was found, but the '
'algorithm should have found the upper boundary of -10.0.')
eq_(-10.0, result, msg)
| 2.84375 | 3 |
theory/model/form.py | ralfonso/theory | 4 | 7097 | import formencode
import pylons
from pylons import app_globals as g
class OutputSchema(formencode.Schema):
allow_extra_fields = False
enabled = formencode.validators.Int()
class ConfigForm(formencode.Schema):
allow_extra_fields = True
filter_extra_fields = True
#pre_validators = [formencode.NestedVariables()]
action = formencode.validators.String(not_empty=False,if_missing=None)
cancel = formencode.validators.String(not_empty=False,if_missing=None)
firsttime = formencode.validators.Int(not_empty=False, if_missing=0)
server = formencode.validators.String(strip=True,not_empty=True,messages={'empty':'please enter a server host name'})
port = formencode.validators.Int(strip=True,not_empty=True,messages={'empty':'please enter a port, MPD default is 6600',
'integer':'please enter an integer value for port, MPD default is 6600'
})
password = formencode.validators.String(not_empty=False,if_missing=None)
webpassword = formencode.validators.String(not_empty=False,if_missing=None)
timeout = formencode.validators.Bool()
default_search = formencode.validators.String(not_empty=True)
awskey = formencode.validators.String(strip=True,not_empty=False,if_missing=None)
aws_secret = formencode.validators.String(strip=True,not_empty=False,if_missing=None)
outputs = formencode.ForEach(OutputSchema(), if_missing=[])
class StreamNameInUse(formencode.validators.FancyValidator):
def validate_python(self, values, state):
# if old name is set, don't do this check
if values['oldname']:
return
if values['name'] in [name[0] for name in g.tc.streams]:
raise formencode.Invalid({'stream_name_taken':"that stream name has already been used"}, values, state)
class StreamForm(formencode.Schema):
allow_extra_fields = False
name = formencode.validators.String(not_empty=True,strip=True,messages={'empty':'please enter a name for this stream'})
url = formencode.validators.URL(not_empty=True,require_tld=False,strip=True,check_exists=False,messages={'empty':'please enter a URL'})
oldname = formencode.validators.String(not_empty=False)
chained_validators = [StreamNameInUse()]
class State(object):
"""Trivial class to be used as State objects to transport information to formencode validators"""
def __init__(self, **kw):
for key in kw:
setattr(self, key, kw[key])
def __repr__(self):
atts = []
for key in self.__dict__:
atts.append( (key, getattr(self, key)) )
return self.__class__.__name__ + '(' + ', '.join(x[0] + '=' + repr(x[1]) for x in atts) + ')'
def validate_custom(schema, **state_kwargs):
"""Validate a formencode schema.
Works similar to the @validate decorator. On success return a dictionary
of parameters from request.params. On failure throws a formencode.Invalid
exception."""
# Create a state object if requested
if state_kwargs:
state = State(**state_kwargs)
else:
state = None
# In case of validation errors an exception is thrown. This needs to
# be caught elsewhere.
if state_kwargs.get('variable_decode', False):
params = formencode.variabledecode.variable_decode(pylons.request.params)
print pylons.request.params
print params
else:
params = pylons.request.params
return schema.to_python(params, state)
def htmlfill(html, exception_error=None):
"""Add formencode error messages to an HTML string.
'html' contains the HTML page with the form (e.g. created with render()).
'exception_error' is the formencode.Invalid-Exception from formencode."""
return formencode.htmlfill.render(
form=html,
defaults=pylons.request.params,
errors=(exception_error and exception_error.unpack_errors()),
encoding=pylons.response.determine_charset()
)
| 2.25 | 2 |
utils/dynamo.py | OnRails-IN/backend | 0 | 7098 | """
Dynamo Utils
============
All utility functions for interactions with DynamoDB
Functions
- ensure_json
- create_user_table
- create_or_update_record
- list_tables
- list_records
- get_record
- delete_table
- delete_record
- check_active
"""
import boto3
from decimal import Decimal
from constants import AWS_ACCESS_KEY, AWS_SECRET_KEY, AWS_REGION, DYNAMO_URL
ddb = boto3.resource(
'dynamodb',
aws_access_key_id = AWS_ACCESS_KEY,
aws_secret_access_key = AWS_SECRET_KEY,
endpoint_url = DYNAMO_URL,
region_name = AWS_REGION
)
client = boto3.client(
'dynamodb',
aws_access_key_id = AWS_ACCESS_KEY,
aws_secret_access_key = AWS_SECRET_KEY,
endpoint_url = DYNAMO_URL,
region_name = AWS_REGION
)
def ensure_json(obj):
"""
Function to ensure that a python object is JSON serializable
Params:
obj::dict|[dict]
Object to be JSON serializable
Returns:
obj::dict|[dict]
Returns the JSON serializable object
"""
if isinstance(obj, list):
for i in range(len(obj)):
obj[i] = ensure_json(obj[i])
return obj
elif isinstance(obj, dict):
for k in obj.keys():
obj[k] = ensure_json(obj[k])
return obj
elif isinstance(obj, Decimal):
if obj % 1 == 0:
return int(obj)
else:
return float(obj)
else:
return obj
def create_user_table():
"""
Function to create the "users" table in DynamoDB
Returns:
bool
If the table was created or not
"""
try:
table = ddb.create_table(
TableName = "users",
KeySchema = [
{
"AttributeName": "username",
"KeyType": "HASH" # Partition key
},
{
"AttributeName": "index",
"KeyType": "RANGE" # Sort key
}
],
AttributeDefinitions = [
{
"AttributeName": "username",
"AttributeType": "S"
},
{
"AttributeName": "index",
"AttributeType": "S"
}
],
ProvisionedThroughput = {
"ReadCapacityUnits": 10,
"WriteCapacityUnits": 10
}
)
return True
except client.exceptions.ResourceNotFoundException:
print("Table does not exist")
return False
except Exception as e:
print("Exception @ create_user_table\n{}".format(e))
return None
def create_train_table():
"""
Function to create the "trains" table in DynamoDB
Returns:
bool
If the table was created or not
"""
try:
table = ddb.create_table(
TableName = "trains",
KeySchema = [
{
"AttributeName": "train_name",
"KeyType": "HASH" # Partition key
},
{
"AttributeName": "train_type",
"KeyType": "RANGE" # Sort key
}
],
AttributeDefinitions = [
{
"AttributeName": "train_name",
"AttributeType": "N"
},
{
"AttributeName": "train_type",
"AttributeType": "S"
}
],
ProvisionedThroughput = {
"ReadCapacityUnits": 10,
"WriteCapacityUnits": 10
}
)
return True
except client.exceptions.ResourceNotFoundException:
print("Table does not exist")
return False
except Exception as e:
print("Exception @ create_user_table\n{}".format(e))
return None
def create_or_update_record(tableName, record):
"""
Function to create or update a record in DynamoDB
Params:
tableName::str
The table name to get the record
record::dict
The object to store
Returns:
bool
If the record was inserted or not
"""
if not tableName or not record:
return False
if not {'username', 'index'}.issubset(record):
return False
try:
res = ddb.Table(tableName).get_item(
Key = {
"username": record['username'],
"index": record['index']
}
)
record = { **res['Item'], **record } if 'Item' in res else record
ddb.Table(tableName).put_item(
Item = record
)
return True
except client.exceptions.ResourceNotFoundException:
print("Table does not exist")
return False
except Exception as e:
print("Exception @ create_or_update_record\n{}".format(e))
return None
def list_tables():
"""
Function to list all tables in DynamoDB
Returns:
tables::[str]
The list of tables
"""
try:
return client.list_tables()['TableNames']
except client.exceptions.ResourceNotFoundException:
print("Tables do not exist")
return False
except Exception as e:
print("Exception @ list_tables\n{}".format(e))
return None
def list_records(tableName):
"""
Function to list all records from a DynamoDB table
Params:
tableName::str
The table name to get the records
Returns:
records::[dict]
The list of records stored in the table
"""
if not tableName:
return False
try:
table = ddb.Table(tableName)
res = table.scan()
docs = ensure_json(res['Items'])
while 'LastEvaluatedKey' in res:
res = table.scan(ExclusiveStartKey = res['LastEvaluatedKey'])
docs.extend(ensure_json(res['Items']))
return docs
except client.exceptions.ResourceNotFoundException:
print("Table does not exist")
return False
except Exception as e:
print("Exception @ list_records\n{}".format(e))
return None
def get_record(tableName, query):
"""
Function to retrieve one record from DynamoDB table
Params:
tableName::str
The table name to get the record
query::dict
The query to fetch the record
Returns:
doc::dict
The record retrieved from the table
"""
if not tableName or not query or not isinstance(query, dict):
return False
try:
res = ddb.Table(tableName).get_item(
Key = query
)
doc = ensure_json(res['Item']) if 'Item' in res else None
return doc
except client.exceptions.ResourceNotFoundException:
print("Table does not exist")
return False
except Exception as e:
print("Exception @ get_record\n{}".format(e))
return None
def delete_table(tableName):
"""
Function to delete a DynamoDB table
Params:
tableName::str
The table name to delete
Returns:
bool
If the table was deleted or not
"""
if not tableName:
return False
try:
ddb.Table(tableName).delete()
return True
except client.exceptions.ResourceNotFoundException:
print("Table does not exist")
return False
except Exception as e:
print("Exception @ delete_table\n{}".format(e))
return None
def delete_record(tableName, query):
"""
Function to delete a DynamoDB table
Params:
tableName::str
The table name to get the record
query::dict
The query to fetch the record
Returns:
bool
If the record was deleted or not
"""
if not tableName or not key or not val:
return False
try:
res = ddb.Table(tableName).delete_item(
Key = query
)
print(res)
return True
except client.exceptions.ResourceNotFoundException:
print("Table does not exist")
return False
except Exception as e:
print("Exception @ delete_record\n{}".format(e))
return None
def check_active(tableName):
"""
Function to check if a table is ACTIVE
Params:
tableName::str
The table name to check
Returns:
bool
If the table is active or not
"""
if not tableName:
return False
try:
if ddb.Table(tableName).table_status == "ACTIVE":
return True
return False
except client.exceptions.ResourceNotFoundException:
print("Table does not exist")
return False
except Exception as e:
print("Exception @ check_status\n{}".format(e))
return None | 2.6875 | 3 |
cloudcms/branch/__init__.py | gitana/cloudcms-python-driver | 0 | 7099 | from .branch import Branch | 1.03125 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.